Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
authorLinus Torvalds <torvalds@g5.osdl.org>
Sat, 14 Jan 2006 20:16:07 +0000 (12:16 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Sat, 14 Jan 2006 20:16:07 +0000 (12:16 -0800)
967 files changed:
Documentation/SubmittingDrivers
Documentation/SubmittingPatches
Documentation/kernel-parameters.txt
Documentation/spi/butterfly [new file with mode: 0644]
Documentation/spi/spi-summary [new file with mode: 0644]
MAINTAINERS
arch/alpha/kernel/process.c
arch/alpha/kernel/ptrace.c
arch/alpha/kernel/smp.c
arch/arm/Kconfig
arch/arm/boot/compressed/head.S
arch/arm/common/locomo.c
arch/arm/common/rtctime.c
arch/arm/common/sa1111.c
arch/arm/configs/at91rm9200dk_defconfig [new file with mode: 0644]
arch/arm/configs/at91rm9200ek_defconfig [new file with mode: 0644]
arch/arm/configs/csb337_defconfig [new file with mode: 0644]
arch/arm/configs/csb637_defconfig [new file with mode: 0644]
arch/arm/kernel/ecard.c
arch/arm/kernel/fiq.c
arch/arm/kernel/process.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/setup.c
arch/arm/kernel/smp.c
arch/arm/kernel/traps.c
arch/arm/lib/csumpartialcopy.S
arch/arm/lib/csumpartialcopygeneric.S
arch/arm/lib/csumpartialcopyuser.S
arch/arm/mach-aaec2000/clock.c
arch/arm/mach-at91rm9200/Makefile
arch/arm/mach-at91rm9200/board-csb337.c [new file with mode: 0644]
arch/arm/mach-at91rm9200/board-csb637.c [new file with mode: 0644]
arch/arm/mach-at91rm9200/board-dk.c [new file with mode: 0644]
arch/arm/mach-at91rm9200/board-ek.c [new file with mode: 0644]
arch/arm/mach-integrator/clock.c
arch/arm/mach-integrator/lm.c
arch/arm/mach-pxa/ssp.c
arch/arm/mach-realview/clock.c
arch/arm/mach-s3c2410/clock.c
arch/arm/mach-versatile/clock.c
arch/arm/mm/consistent.c
arch/arm/plat-omap/clock.c
arch/arm26/kernel/process.c
arch/arm26/kernel/ptrace.c
arch/arm26/kernel/traps.c
arch/cris/arch-v10/kernel/process.c
arch/cris/arch-v10/kernel/ptrace.c
arch/cris/arch-v32/kernel/process.c
arch/cris/arch-v32/kernel/ptrace.c
arch/cris/arch-v32/kernel/smp.c
arch/cris/arch-v32/mm/tlb.c
arch/frv/kernel/process.c
arch/h8300/kernel/process.c
arch/i386/kernel/process.c
arch/i386/kernel/smpboot.c
arch/i386/kernel/vm86.c
arch/ia64/hp/sim/simserial.c
arch/ia64/ia32/elfcore32.h
arch/ia64/ia32/ia32_signal.c
arch/ia64/ia32/ia32_support.c
arch/ia64/ia32/sys_ia32.c
arch/ia64/kernel/fsys.S
arch/ia64/kernel/jprobes.S
arch/ia64/kernel/kprobes.c
arch/ia64/kernel/mca.c
arch/ia64/kernel/mca_asm.S
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/process.c
arch/ia64/kernel/ptrace.c
arch/ia64/kernel/salinfo.c
arch/ia64/kernel/setup.c
arch/ia64/kernel/signal.c
arch/ia64/kernel/sys_ia64.c
arch/ia64/kernel/traps.c
arch/ia64/mm/tlb.c
arch/ia64/sn/include/xtalk/hubdev.h
arch/ia64/sn/kernel/bte_error.c
arch/ia64/sn/kernel/huberror.c
arch/ia64/sn/kernel/io_init.c
arch/ia64/sn/kernel/tiocx.c
arch/ia64/sn/kernel/xpc.h [deleted file]
arch/ia64/sn/kernel/xpc_channel.c
arch/ia64/sn/kernel/xpc_main.c
arch/ia64/sn/kernel/xpc_partition.c
arch/ia64/sn/pci/pcibr/pcibr_dma.c
arch/ia64/sn/pci/pcibr/pcibr_provider.c
arch/m32r/kernel/process.c
arch/m32r/kernel/ptrace.c
arch/m32r/kernel/smpboot.c
arch/m68k/amiga/amiints.c
arch/m68k/amiga/amisound.c
arch/m68k/amiga/cia.c
arch/m68k/amiga/config.c
arch/m68k/apollo/config.c
arch/m68k/atari/config.c
arch/m68k/bvme6000/rtc.c
arch/m68k/hp300/config.c
arch/m68k/kernel/asm-offsets.c
arch/m68k/kernel/head.S
arch/m68k/kernel/process.c
arch/m68k/kernel/setup.c
arch/m68k/kernel/signal.c
arch/m68k/kernel/sys_m68k.c
arch/m68k/kernel/traps.c
arch/m68k/kernel/vmlinux-std.lds
arch/m68k/kernel/vmlinux-sun3.lds
arch/m68k/lib/checksum.c
arch/m68k/mac/config.c
arch/m68k/mac/iop.c
arch/m68k/mac/misc.c
arch/m68k/math-emu/multi_arith.h
arch/m68k/mm/kmap.c
arch/m68k/mvme16x/rtc.c
arch/m68k/q40/config.c
arch/m68k/sun3/config.c
arch/m68k/sun3x/config.c
arch/m68knommu/kernel/process.c
arch/mips/kernel/process.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/ptrace32.c
arch/mips/kernel/smp_mt.c
arch/mips/kernel/syscall.c
arch/mips/kernel/traps.c
arch/mips/pmc-sierra/yosemite/smp.c
arch/mips/sgi-ip27/ip27-smp.c
arch/mips/sibyte/cfe/smp.c
arch/parisc/kernel/drivers.c
arch/parisc/kernel/process.c
arch/parisc/kernel/smp.c
arch/powerpc/Makefile
arch/powerpc/boot/Makefile
arch/powerpc/boot/crt0.S
arch/powerpc/boot/hack-coff.c [new file with mode: 0644]
arch/powerpc/boot/main.c
arch/powerpc/boot/prom.c
arch/powerpc/boot/prom.h
arch/powerpc/boot/rs6000.h [new file with mode: 0644]
arch/powerpc/boot/stdio.c [new file with mode: 0644]
arch/powerpc/boot/stdio.h
arch/powerpc/boot/string.S
arch/powerpc/boot/zImage.coff.lds [new file with mode: 0644]
arch/powerpc/configs/mpc834x_sys_defconfig [new file with mode: 0644]
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/cpu_setup_power4.S
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/entry_32.S
arch/powerpc/kernel/entry_64.S
arch/powerpc/kernel/fpu.S
arch/powerpc/kernel/head_64.S
arch/powerpc/kernel/idle_power4.S
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/lparcfg.c
arch/powerpc/kernel/misc_32.S
arch/powerpc/kernel/misc_64.S
arch/powerpc/kernel/of_device.c
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/process.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/prom_parse.c
arch/powerpc/kernel/ptrace-common.h
arch/powerpc/kernel/rtas.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kernel/time.c
arch/powerpc/kernel/vio.c
arch/powerpc/lib/locks.c
arch/powerpc/oprofile/common.c
arch/powerpc/platforms/83xx/Kconfig
arch/powerpc/platforms/83xx/mpc834x_sys.c [new file with mode: 0644]
arch/powerpc/platforms/83xx/mpc834x_sys.h [new file with mode: 0644]
arch/powerpc/platforms/83xx/mpc83xx.h [new file with mode: 0644]
arch/powerpc/platforms/83xx/pci.c [new file with mode: 0644]
arch/powerpc/platforms/cell/pervasive.c
arch/powerpc/platforms/cell/setup.c
arch/powerpc/platforms/cell/smp.c
arch/powerpc/platforms/cell/spufs/syscalls.c
arch/powerpc/platforms/chrp/pci.c
arch/powerpc/platforms/chrp/setup.c
arch/powerpc/platforms/chrp/time.c
arch/powerpc/platforms/iseries/Makefile
arch/powerpc/platforms/iseries/iommu.c
arch/powerpc/platforms/iseries/iommu.h [new file with mode: 0644]
arch/powerpc/platforms/iseries/irq.c
arch/powerpc/platforms/iseries/lpardata.c
arch/powerpc/platforms/iseries/lpevents.c
arch/powerpc/platforms/iseries/mf.c
arch/powerpc/platforms/iseries/misc.S
arch/powerpc/platforms/iseries/pci.c
arch/powerpc/platforms/iseries/setup.c
arch/powerpc/platforms/iseries/smp.c
arch/powerpc/platforms/iseries/vio.c
arch/powerpc/platforms/iseries/viopath.c
arch/powerpc/platforms/powermac/setup.c
arch/powerpc/platforms/pseries/eeh.c
arch/powerpc/platforms/pseries/hvcserver.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/reconfig.c
arch/powerpc/platforms/pseries/scanlog.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/platforms/pseries/smp.c
arch/powerpc/platforms/pseries/xics.c
arch/powerpc/sysdev/Makefile
arch/powerpc/sysdev/dart_iommu.c
arch/powerpc/sysdev/fsl_soc.c [new file with mode: 0644]
arch/powerpc/sysdev/fsl_soc.h [new file with mode: 0644]
arch/powerpc/xmon/xmon.c
arch/ppc/4xx_io/serial_sicc.c
arch/ppc/amiga/amiints.c
arch/ppc/amiga/cia.c
arch/ppc/amiga/config.c
arch/ppc/kernel/Makefile
arch/ppc/kernel/head_8xx.S
arch/ppc/kernel/ppc_ksyms.c
arch/ppc/kernel/process.c [deleted file]
arch/ppc/kernel/smp.c
arch/ppc/platforms/83xx/mpc834x_sys.c
arch/ppc/platforms/85xx/mpc8540_ads.c
arch/ppc/platforms/85xx/mpc8560_ads.c
arch/ppc/platforms/85xx/mpc85xx_cds_common.c
arch/ppc/platforms/85xx/sbc8560.c
arch/ppc/platforms/85xx/stx_gp3.c
arch/ppc/platforms/85xx/tqm85xx.c
arch/ppc/platforms/apus_setup.c
arch/ppc/syslib/mpc83xx_devices.c
arch/ppc/syslib/mpc85xx_devices.c
arch/ppc/syslib/ocp.c
arch/ppc/xmon/xmon.c
arch/s390/kernel/binfmt_elf32.c
arch/s390/kernel/process.c
arch/s390/kernel/ptrace.c
arch/s390/kernel/smp.c
arch/s390/kernel/time.c
arch/s390/kernel/traps.c
arch/sh/kernel/cpu/bus.c
arch/sh/kernel/process.c
arch/sh/kernel/ptrace.c
arch/sh/kernel/smp.c
arch/sh64/kernel/process.c
arch/sh64/lib/dbg.c
arch/sparc/kernel/process.c
arch/sparc/kernel/ptrace.c
arch/sparc/kernel/sun4d_smp.c
arch/sparc/kernel/sun4m_smp.c
arch/sparc/kernel/traps.c
arch/sparc64/kernel/process.c
arch/sparc64/kernel/ptrace.c
arch/sparc64/kernel/setup.c
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/traps.c
arch/um/kernel/process_kern.c
arch/um/kernel/skas/process_kern.c
arch/um/kernel/tt/exec_kern.c
arch/um/kernel/tt/process_kern.c
arch/v850/kernel/process.c
arch/v850/kernel/ptrace.c
arch/x86_64/ia32/ia32_binfmt.c
arch/x86_64/ia32/ptrace32.c
arch/x86_64/kernel/i387.c
arch/x86_64/kernel/i8259.c
arch/x86_64/kernel/process.c
arch/x86_64/kernel/ptrace.c
arch/x86_64/kernel/smpboot.c
arch/x86_64/kernel/traps.c
arch/xtensa/kernel/process.c
arch/xtensa/kernel/ptrace.c
block/elevator.c
drivers/Kconfig
drivers/Makefile
drivers/base/dd.c
drivers/base/driver.c
drivers/base/platform.c
drivers/base/power/shutdown.c
drivers/block/amiflop.c
drivers/block/ataflop.c
drivers/block/viodasd.c
drivers/cdrom/viocd.c
drivers/char/amiserial.c
drivers/char/drm/Makefile
drivers/char/drm/ati_pcigart.c
drivers/char/drm/drm.h
drivers/char/drm/drmP.h
drivers/char/drm/drm_agpsupport.c
drivers/char/drm/drm_bufs.c
drivers/char/drm/drm_context.c
drivers/char/drm/drm_core.h
drivers/char/drm/drm_drv.c
drivers/char/drm/drm_fops.c
drivers/char/drm/drm_init.c [deleted file]
drivers/char/drm/drm_ioctl.c
drivers/char/drm/drm_lock.c
drivers/char/drm/drm_memory.c
drivers/char/drm/drm_memory_debug.h
drivers/char/drm/drm_os_linux.h
drivers/char/drm/drm_pciids.h
drivers/char/drm/drm_proc.c
drivers/char/drm/drm_stub.c
drivers/char/drm/drm_sysfs.c
drivers/char/drm/i810_dma.c
drivers/char/drm/i810_drv.c
drivers/char/drm/i810_drv.h
drivers/char/drm/i830_dma.c
drivers/char/drm/i830_drv.c
drivers/char/drm/i830_drv.h
drivers/char/drm/i915_dma.c
drivers/char/drm/i915_drm.h
drivers/char/drm/i915_drv.c
drivers/char/drm/i915_drv.h
drivers/char/drm/i915_irq.c
drivers/char/drm/i915_mem.c
drivers/char/drm/mga_dma.c
drivers/char/drm/mga_drv.c
drivers/char/drm/mga_drv.h
drivers/char/drm/mga_state.c
drivers/char/drm/r128_cce.c
drivers/char/drm/r128_drm.h
drivers/char/drm/r128_drv.c
drivers/char/drm/r128_drv.h
drivers/char/drm/r128_irq.c
drivers/char/drm/r128_state.c
drivers/char/drm/r300_cmdbuf.c
drivers/char/drm/r300_reg.h
drivers/char/drm/radeon_cp.c
drivers/char/drm/radeon_drm.h
drivers/char/drm/radeon_drv.c
drivers/char/drm/radeon_drv.h
drivers/char/drm/radeon_state.c
drivers/char/drm/savage_bci.c
drivers/char/drm/savage_drv.c
drivers/char/drm/savage_drv.h
drivers/char/drm/savage_state.c
drivers/char/drm/sis_drm.h
drivers/char/drm/sis_drv.c
drivers/char/drm/sis_drv.h
drivers/char/drm/sis_ds.h
drivers/char/drm/sis_mm.c
drivers/char/drm/tdfx_drv.c
drivers/char/drm/tdfx_drv.h
drivers/char/drm/via_dma.c
drivers/char/drm/via_dmablit.c [new file with mode: 0644]
drivers/char/drm/via_dmablit.h [new file with mode: 0644]
drivers/char/drm/via_drm.h
drivers/char/drm/via_drv.c
drivers/char/drm/via_drv.h
drivers/char/drm/via_ds.c
drivers/char/drm/via_irq.c
drivers/char/drm/via_map.c
drivers/char/drm/via_mm.c
drivers/char/drm/via_verifier.c
drivers/char/drm/via_verifier.h
drivers/char/drm/via_video.c
drivers/char/dsp56k.c
drivers/char/esp.c
drivers/char/generic_serial.c
drivers/char/mem.c
drivers/char/riscom8.c
drivers/char/scc.h
drivers/char/serial167.c
drivers/char/specialix.c
drivers/char/synclink.c
drivers/char/viocons.c
drivers/dio/dio-driver.c
drivers/i2c/busses/i2c-pxa.c
drivers/i2c/i2c-core.c
drivers/ide/ide-cd.c
drivers/ide/ide-disk.c
drivers/ide/ide-floppy.c
drivers/ide/ide-io.c
drivers/ide/ide-probe.c
drivers/ide/ide-tape.c
drivers/ide/ide.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/device.c
drivers/infiniband/core/sysfs.c
drivers/infiniband/core/ucm.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/mthca/mthca_av.c
drivers/infiniband/hw/mthca/mthca_cmd.c
drivers/infiniband/hw/mthca/mthca_dev.h
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/ulp/ipoib/ipoib.h
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/infiniband/ulp/ipoib/ipoib_verbs.c
drivers/infiniband/ulp/ipoib/ipoib_vlan.c
drivers/infiniband/ulp/srp/ib_srp.c
drivers/input/evdev.c
drivers/input/gameport/gameport.c
drivers/input/input.c
drivers/input/joystick/amijoy.c
drivers/input/mouse/alps.c
drivers/input/mouse/amimouse.c
drivers/input/mouse/logips2pp.c
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/psmouse.h
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/serio.c
drivers/input/touchscreen/Kconfig
drivers/input/touchscreen/Makefile
drivers/input/touchscreen/ads7846.c [new file with mode: 0644]
drivers/macintosh/adb-iop.c
drivers/macintosh/macio-adb.c
drivers/macintosh/macio_asic.c
drivers/macintosh/via-macii.c
drivers/macintosh/via-maciisi.c
drivers/macintosh/via-pmu68k.c
drivers/md/md.c
drivers/media/dvb/bt8xx/dvb-bt8xx.c
drivers/media/video/bttv-gpio.c
drivers/media/video/bttv.h
drivers/mfd/mcp-core.c
drivers/mfd/ucb1x00-core.c
drivers/mmc/mmc_block.c
drivers/mmc/mmc_sysfs.c
drivers/mtd/devices/Kconfig
drivers/mtd/devices/Makefile
drivers/mtd/devices/m25p80.c [new file with mode: 0644]
drivers/mtd/devices/mtd_dataflash.c [new file with mode: 0644]
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bonding.h
drivers/net/e100.c
drivers/net/gianfar.c
drivers/net/gianfar_mii.c
drivers/net/hplance.c
drivers/net/iseries_veth.c
drivers/net/mac8390.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/phy.c
drivers/net/sun3lance.c
drivers/net/tulip/uli526x.c
drivers/net/via-velocity.c
drivers/net/wireless/Kconfig
drivers/net/wireless/atmel.c
drivers/pci/pci-driver.c
drivers/pcmcia/ds.c
drivers/pcmcia/pxa2xx_mainstone.c
drivers/pcmcia/pxa2xx_sharpsl.c
drivers/pcmcia/socket_sysfs.c
drivers/pnp/driver.c
drivers/rapidio/rio-driver.c
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/css.c
drivers/s390/cio/css.h
drivers/s390/cio/device.c
drivers/sbus/char/aurora.c
drivers/scsi/Makefile
drivers/scsi/NCR53C9x.c
drivers/scsi/blz1230.c
drivers/scsi/blz2060.c
drivers/scsi/cyberstorm.c
drivers/scsi/cyberstormII.c
drivers/scsi/fastlane.c
drivers/scsi/oktagon_esp.c
drivers/scsi/scsi_debug.c
drivers/scsi/wd33c93.c
drivers/serial/68328serial.c
drivers/serial/8250.c
drivers/serial/Kconfig
drivers/serial/Makefile
drivers/serial/at91_serial.c [new file with mode: 0644]
drivers/serial/crisv10.c
drivers/serial/pmac_zilog.c
drivers/serial/serial_core.c
drivers/serial/serial_cs.c
drivers/serial/serial_txx9.c
drivers/sh/superhyway/superhyway.c
drivers/spi/Kconfig [new file with mode: 0644]
drivers/spi/Makefile [new file with mode: 0644]
drivers/spi/spi.c [new file with mode: 0644]
drivers/spi/spi_bitbang.c [new file with mode: 0644]
drivers/spi/spi_butterfly.c [new file with mode: 0644]
drivers/usb/gadget/ether.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/serial.c
drivers/usb/gadget/zero.c
drivers/usb/input/Kconfig
drivers/usb/input/hid-core.c
drivers/usb/input/hid-input.c
drivers/usb/input/hid.h
drivers/usb/input/pid.c
drivers/usb/input/wacom.c
drivers/usb/serial/bus.c
drivers/usb/serial/pl2303.c
drivers/video/amifb.c
drivers/video/aty/atyfb_base.c
drivers/video/macfb.c
drivers/zorro/proc.c
drivers/zorro/zorro-driver.c
fs/compat_ioctl.c
fs/proc/proc_devtree.c
fs/ufs/balloc.c
fs/ufs/ialloc.c
fs/ufs/inode.c
fs/ufs/super.c
fs/ufs/util.h
fs/xfs/linux-2.6/xfs_aops.c
fs/xfs/linux-2.6/xfs_aops.h
fs/xfs/linux-2.6/xfs_buf.c
fs/xfs/linux-2.6/xfs_buf.h
fs/xfs/linux-2.6/xfs_file.c
fs/xfs/linux-2.6/xfs_ioctl.c
fs/xfs/linux-2.6/xfs_iops.c
fs/xfs/linux-2.6/xfs_iops.h
fs/xfs/linux-2.6/xfs_linux.h
fs/xfs/linux-2.6/xfs_lrw.c
fs/xfs/linux-2.6/xfs_stats.c
fs/xfs/linux-2.6/xfs_stats.h
fs/xfs/linux-2.6/xfs_super.c
fs/xfs/linux-2.6/xfs_vnode.c
fs/xfs/linux-2.6/xfs_vnode.h
fs/xfs/quota/xfs_dquot_item.c
fs/xfs/quota/xfs_qm.c
fs/xfs/support/debug.c
fs/xfs/support/debug.h
fs/xfs/support/uuid.c
fs/xfs/xfs_arch.h
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_attr_leaf.h
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap.h
fs/xfs/xfs_clnt.h
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_dinode.h
fs/xfs/xfs_dir.c
fs/xfs/xfs_dir.h
fs/xfs/xfs_dir2.h
fs/xfs/xfs_dir_leaf.h
fs/xfs/xfs_error.c
fs/xfs/xfs_error.h
fs/xfs/xfs_fs.h
fs/xfs/xfs_fsops.c
fs/xfs/xfs_fsops.h
fs/xfs/xfs_iget.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_itable.c
fs/xfs/xfs_log.c
fs/xfs/xfs_log.h
fs/xfs/xfs_log_priv.h
fs/xfs/xfs_log_recover.c
fs/xfs/xfs_mount.c
fs/xfs/xfs_mount.h
fs/xfs/xfs_rename.c
fs/xfs/xfs_rw.c
fs/xfs/xfs_sb.h
fs/xfs/xfs_trans.c
fs/xfs/xfs_trans.h
fs/xfs/xfs_utils.c
fs/xfs/xfs_vfsops.c
fs/xfs/xfs_vnodeops.c
include/asm-alpha/mmu_context.h
include/asm-alpha/processor.h
include/asm-alpha/ptrace.h
include/asm-alpha/system.h
include/asm-alpha/thread_info.h
include/asm-arm/arch-at91rm9200/at91rm9200_pdc.h [new file with mode: 0644]
include/asm-arm/arch-at91rm9200/at91rm9200_usart.h [new file with mode: 0644]
include/asm-arm/byteorder.h
include/asm-arm/mach/serial_at91rm9200.h [new file with mode: 0644]
include/asm-arm/memory.h
include/asm-arm/processor.h
include/asm-arm/system.h
include/asm-arm/thread_info.h
include/asm-arm26/system.h
include/asm-arm26/thread_info.h
include/asm-cris/arch-v10/processor.h
include/asm-cris/arch-v32/processor.h
include/asm-cris/processor.h
include/asm-cris/thread_info.h
include/asm-frv/thread_info.h
include/asm-h8300/thread_info.h
include/asm-i386/i387.h
include/asm-i386/processor.h
include/asm-i386/system.h
include/asm-i386/thread_info.h
include/asm-i386/topology.h
include/asm-ia64/compat.h
include/asm-ia64/kprobes.h
include/asm-ia64/processor.h
include/asm-ia64/ptrace.h
include/asm-ia64/sn/sn_sal.h
include/asm-ia64/sn/xp.h
include/asm-ia64/sn/xpc.h [new file with mode: 0644]
include/asm-ia64/system.h
include/asm-ia64/thread_info.h
include/asm-ia64/topology.h
include/asm-m32r/ptrace.h
include/asm-m32r/system.h
include/asm-m32r/thread_info.h
include/asm-m68k/amigahw.h
include/asm-m68k/amigaints.h
include/asm-m68k/checksum.h
include/asm-m68k/dsp56k.h
include/asm-m68k/floppy.h
include/asm-m68k/hardirq.h
include/asm-m68k/io.h
include/asm-m68k/irq.h
include/asm-m68k/machdep.h
include/asm-m68k/raw_io.h
include/asm-m68k/signal.h
include/asm-m68k/sun3_pgtable.h
include/asm-m68k/sun3ints.h
include/asm-m68k/sun3xflop.h
include/asm-m68k/thread_info.h
include/asm-m68k/uaccess.h
include/asm-m68k/zorro.h
include/asm-m68knommu/machdep.h
include/asm-m68knommu/thread_info.h
include/asm-mips/mach-ip27/topology.h
include/asm-mips/processor.h
include/asm-mips/system.h
include/asm-mips/thread_info.h
include/asm-parisc/system.h
include/asm-parisc/thread_info.h
include/asm-powerpc/atomic.h
include/asm-powerpc/bitops.h
include/asm-powerpc/cputable.h
include/asm-powerpc/elf.h
include/asm-powerpc/futex.h
include/asm-powerpc/hvcall.h
include/asm-powerpc/iommu.h
include/asm-powerpc/iseries/hv_call.h
include/asm-powerpc/iseries/hv_call_event.h
include/asm-powerpc/iseries/hv_call_sc.h
include/asm-powerpc/iseries/hv_lp_config.h
include/asm-powerpc/iseries/hv_lp_event.h
include/asm-powerpc/iseries/hv_types.h
include/asm-powerpc/iseries/iseries_io.h
include/asm-powerpc/iseries/it_exp_vpd_panel.h
include/asm-powerpc/iseries/it_lp_naca.h
include/asm-powerpc/iseries/it_lp_queue.h
include/asm-powerpc/iseries/it_lp_reg_save.h
include/asm-powerpc/iseries/lpar_map.h
include/asm-powerpc/iseries/mf.h
include/asm-powerpc/iseries/vio.h
include/asm-powerpc/lppaca.h
include/asm-powerpc/paca.h
include/asm-powerpc/pci-bridge.h
include/asm-powerpc/ppc_asm.h
include/asm-powerpc/prom.h
include/asm-powerpc/spinlock.h
include/asm-powerpc/synch.h
include/asm-powerpc/system.h
include/asm-powerpc/thread_info.h
include/asm-powerpc/time.h
include/asm-powerpc/topology.h
include/asm-ppc/system.h
include/asm-s390/elf.h
include/asm-s390/processor.h
include/asm-s390/system.h
include/asm-s390/thread_info.h
include/asm-sh/ptrace.h
include/asm-sh/system.h
include/asm-sh/thread_info.h
include/asm-sh64/thread_info.h
include/asm-sparc/system.h
include/asm-sparc/thread_info.h
include/asm-sparc64/elf.h
include/asm-sparc64/mmu_context.h
include/asm-sparc64/processor.h
include/asm-sparc64/system.h
include/asm-um/thread_info.h
include/asm-v850/processor.h
include/asm-v850/thread_info.h
include/asm-x86_64/compat.h
include/asm-x86_64/i387.h
include/asm-x86_64/processor.h
include/asm-x86_64/system.h
include/asm-x86_64/thread_info.h
include/asm-x86_64/topology.h
include/asm-xtensa/processor.h
include/asm-xtensa/ptrace.h
include/asm-xtensa/thread_info.h
include/linux/auxvec.h
include/linux/device.h
include/linux/fsl_devices.h
include/linux/hrtimer.h
include/linux/ide.h
include/linux/kernel.h
include/linux/ktime.h
include/linux/netfilter/nf_conntrack_common.h
include/linux/netfilter/x_tables.h [new file with mode: 0644]
include/linux/netfilter/xt_CLASSIFY.h [new file with mode: 0644]
include/linux/netfilter/xt_CONNMARK.h [new file with mode: 0644]
include/linux/netfilter/xt_MARK.h [new file with mode: 0644]
include/linux/netfilter/xt_NFQUEUE.h [new file with mode: 0644]
include/linux/netfilter/xt_comment.h [new file with mode: 0644]
include/linux/netfilter/xt_connbytes.h [new file with mode: 0644]
include/linux/netfilter/xt_connmark.h [new file with mode: 0644]
include/linux/netfilter/xt_conntrack.h [new file with mode: 0644]
include/linux/netfilter/xt_dccp.h [new file with mode: 0644]
include/linux/netfilter/xt_helper.h [new file with mode: 0644]
include/linux/netfilter/xt_length.h [new file with mode: 0644]
include/linux/netfilter/xt_limit.h [new file with mode: 0644]
include/linux/netfilter/xt_mac.h [new file with mode: 0644]
include/linux/netfilter/xt_mark.h [new file with mode: 0644]
include/linux/netfilter/xt_physdev.h [new file with mode: 0644]
include/linux/netfilter/xt_pkttype.h [new file with mode: 0644]
include/linux/netfilter/xt_realm.h [new file with mode: 0644]
include/linux/netfilter/xt_sctp.h [new file with mode: 0644]
include/linux/netfilter/xt_state.h [new file with mode: 0644]
include/linux/netfilter/xt_string.h [new file with mode: 0644]
include/linux/netfilter/xt_tcpmss.h [new file with mode: 0644]
include/linux/netfilter/xt_tcpudp.h [new file with mode: 0644]
include/linux/netfilter_arp/arp_tables.h
include/linux/netfilter_ipv4/ip_conntrack.h
include/linux/netfilter_ipv4/ip_tables.h
include/linux/netfilter_ipv4/ipt_CLASSIFY.h
include/linux/netfilter_ipv4/ipt_CONNMARK.h
include/linux/netfilter_ipv4/ipt_MARK.h
include/linux/netfilter_ipv4/ipt_NFQUEUE.h
include/linux/netfilter_ipv4/ipt_comment.h
include/linux/netfilter_ipv4/ipt_connbytes.h
include/linux/netfilter_ipv4/ipt_connmark.h
include/linux/netfilter_ipv4/ipt_conntrack.h
include/linux/netfilter_ipv4/ipt_dccp.h
include/linux/netfilter_ipv4/ipt_helper.h
include/linux/netfilter_ipv4/ipt_length.h
include/linux/netfilter_ipv4/ipt_limit.h
include/linux/netfilter_ipv4/ipt_mac.h
include/linux/netfilter_ipv4/ipt_mark.h
include/linux/netfilter_ipv4/ipt_physdev.h
include/linux/netfilter_ipv4/ipt_pkttype.h
include/linux/netfilter_ipv4/ipt_realm.h
include/linux/netfilter_ipv4/ipt_state.h
include/linux/netfilter_ipv4/ipt_string.h
include/linux/netfilter_ipv4/ipt_tcpmss.h
include/linux/netfilter_ipv6/ip6_tables.h
include/linux/netfilter_ipv6/ip6t_MARK.h
include/linux/netfilter_ipv6/ip6t_length.h
include/linux/netfilter_ipv6/ip6t_limit.h
include/linux/netfilter_ipv6/ip6t_mac.h
include/linux/netfilter_ipv6/ip6t_mark.h
include/linux/netfilter_ipv6/ip6t_physdev.h
include/linux/phy.h
include/linux/proc_fs.h
include/linux/sched.h
include/linux/serial_core.h
include/linux/socket.h
include/linux/spi/ads7846.h [new file with mode: 0644]
include/linux/spi/flash.h [new file with mode: 0644]
include/linux/spi/spi.h [new file with mode: 0644]
include/linux/spi/spi_bitbang.h [new file with mode: 0644]
include/linux/tipc.h [new file with mode: 0644]
include/linux/tipc_config.h [new file with mode: 0644]
include/linux/topology.h
include/net/genetlink.h
include/net/ieee80211.h
include/net/netfilter/ipv4/nf_conntrack_ipv4.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_conntrack_tuple.h
include/net/sctp/sctp.h
include/net/tipc/tipc.h [new file with mode: 0644]
include/net/tipc/tipc_bearer.h [new file with mode: 0644]
include/net/tipc/tipc_msg.h [new file with mode: 0644]
include/net/tipc/tipc_port.h [new file with mode: 0644]
include/rdma/ib_verbs.h
kernel/hrtimer.c
kernel/sched.c
mm/mempolicy.c
mm/page_alloc.c
mm/swap.c
mm/tiny-shmem.c
net/Kconfig
net/Makefile
net/bridge/netfilter/ebt_log.c
net/core/filter.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/arpt_mangle.c
net/ipv4/netfilter/arptable_filter.c
net/ipv4/netfilter/ip_conntrack_standalone.c
net/ipv4/netfilter/ip_nat_rule.c
net/ipv4/netfilter/ip_nat_standalone.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/netfilter/ipt_CLASSIFY.c [deleted file]
net/ipv4/netfilter/ipt_CLUSTERIP.c
net/ipv4/netfilter/ipt_CONNMARK.c [deleted file]
net/ipv4/netfilter/ipt_DSCP.c
net/ipv4/netfilter/ipt_ECN.c
net/ipv4/netfilter/ipt_LOG.c
net/ipv4/netfilter/ipt_MARK.c [deleted file]
net/ipv4/netfilter/ipt_MASQUERADE.c
net/ipv4/netfilter/ipt_NETMAP.c
net/ipv4/netfilter/ipt_NFQUEUE.c [deleted file]
net/ipv4/netfilter/ipt_NOTRACK.c [deleted file]
net/ipv4/netfilter/ipt_REDIRECT.c
net/ipv4/netfilter/ipt_REJECT.c
net/ipv4/netfilter/ipt_SAME.c
net/ipv4/netfilter/ipt_TCPMSS.c
net/ipv4/netfilter/ipt_TOS.c
net/ipv4/netfilter/ipt_TTL.c
net/ipv4/netfilter/ipt_ULOG.c
net/ipv4/netfilter/ipt_addrtype.c
net/ipv4/netfilter/ipt_ah.c
net/ipv4/netfilter/ipt_comment.c [deleted file]
net/ipv4/netfilter/ipt_connbytes.c [deleted file]
net/ipv4/netfilter/ipt_connmark.c [deleted file]
net/ipv4/netfilter/ipt_conntrack.c [deleted file]
net/ipv4/netfilter/ipt_dccp.c [deleted file]
net/ipv4/netfilter/ipt_dscp.c
net/ipv4/netfilter/ipt_ecn.c
net/ipv4/netfilter/ipt_esp.c
net/ipv4/netfilter/ipt_hashlimit.c
net/ipv4/netfilter/ipt_helper.c [deleted file]
net/ipv4/netfilter/ipt_iprange.c
net/ipv4/netfilter/ipt_length.c [deleted file]
net/ipv4/netfilter/ipt_limit.c [deleted file]
net/ipv4/netfilter/ipt_mac.c [deleted file]
net/ipv4/netfilter/ipt_mark.c [deleted file]
net/ipv4/netfilter/ipt_multiport.c
net/ipv4/netfilter/ipt_owner.c
net/ipv4/netfilter/ipt_physdev.c [deleted file]
net/ipv4/netfilter/ipt_pkttype.c [deleted file]
net/ipv4/netfilter/ipt_realm.c [deleted file]
net/ipv4/netfilter/ipt_recent.c
net/ipv4/netfilter/ipt_sctp.c [deleted file]
net/ipv4/netfilter/ipt_state.c [deleted file]
net/ipv4/netfilter/ipt_string.c [deleted file]
net/ipv4/netfilter/ipt_tcpmss.c [deleted file]
net/ipv4/netfilter/ipt_tos.c
net/ipv4/netfilter/ipt_ttl.c
net/ipv4/netfilter/iptable_filter.c
net/ipv4/netfilter/iptable_mangle.c
net/ipv4/netfilter/iptable_raw.c
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/xfrm4_state.c
net/ipv6/addrconf.c
net/ipv6/ah6.c
net/ipv6/anycast.c
net/ipv6/esp6.c
net/ipv6/icmp.c
net/ipv6/ip6_flowlabel.c
net/ipv6/ipcomp6.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/ip6_tables.c
net/ipv6/netfilter/ip6t_HL.c
net/ipv6/netfilter/ip6t_LOG.c
net/ipv6/netfilter/ip6t_MARK.c [deleted file]
net/ipv6/netfilter/ip6t_NFQUEUE.c [deleted file]
net/ipv6/netfilter/ip6t_REJECT.c
net/ipv6/netfilter/ip6t_ah.c
net/ipv6/netfilter/ip6t_dst.c
net/ipv6/netfilter/ip6t_esp.c
net/ipv6/netfilter/ip6t_eui64.c
net/ipv6/netfilter/ip6t_frag.c
net/ipv6/netfilter/ip6t_hbh.c
net/ipv6/netfilter/ip6t_hl.c
net/ipv6/netfilter/ip6t_ipv6header.c
net/ipv6/netfilter/ip6t_length.c [deleted file]
net/ipv6/netfilter/ip6t_limit.c [deleted file]
net/ipv6/netfilter/ip6t_mac.c [deleted file]
net/ipv6/netfilter/ip6t_mark.c [deleted file]
net/ipv6/netfilter/ip6t_multiport.c
net/ipv6/netfilter/ip6t_owner.c
net/ipv6/netfilter/ip6t_physdev.c [deleted file]
net/ipv6/netfilter/ip6t_rt.c
net/ipv6/netfilter/ip6table_filter.c
net/ipv6/netfilter/ip6table_mangle.c
net/ipv6/netfilter/ip6table_raw.c
net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/xfrm6_state.c
net/ipv6/xfrm6_tunnel.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/nf_conntrack_ftp.c
net/netfilter/nf_conntrack_standalone.c
net/netfilter/x_tables.c [new file with mode: 0644]
net/netfilter/xt_CLASSIFY.c [new file with mode: 0644]
net/netfilter/xt_CONNMARK.c [new file with mode: 0644]
net/netfilter/xt_MARK.c [new file with mode: 0644]
net/netfilter/xt_NFQUEUE.c [new file with mode: 0644]
net/netfilter/xt_NOTRACK.c [new file with mode: 0644]
net/netfilter/xt_comment.c [new file with mode: 0644]
net/netfilter/xt_connbytes.c [new file with mode: 0644]
net/netfilter/xt_connmark.c [new file with mode: 0644]
net/netfilter/xt_conntrack.c [new file with mode: 0644]
net/netfilter/xt_dccp.c [new file with mode: 0644]
net/netfilter/xt_helper.c [new file with mode: 0644]
net/netfilter/xt_length.c [new file with mode: 0644]
net/netfilter/xt_limit.c [new file with mode: 0644]
net/netfilter/xt_mac.c [new file with mode: 0644]
net/netfilter/xt_mark.c [new file with mode: 0644]
net/netfilter/xt_physdev.c [new file with mode: 0644]
net/netfilter/xt_pkttype.c [new file with mode: 0644]
net/netfilter/xt_realm.c [new file with mode: 0644]
net/netfilter/xt_sctp.c [new file with mode: 0644]
net/netfilter/xt_state.c [new file with mode: 0644]
net/netfilter/xt_string.c [new file with mode: 0644]
net/netfilter/xt_tcpmss.c [new file with mode: 0644]
net/netfilter/xt_tcpudp.c [new file with mode: 0644]
net/netlink/genetlink.c
net/sched/Kconfig
net/sched/act_ipt.c
net/sctp/ipv6.c
net/sctp/sm_statefuns.c
net/tipc/Kconfig [new file with mode: 0644]
net/tipc/Makefile [new file with mode: 0644]
net/tipc/addr.c [new file with mode: 0644]
net/tipc/addr.h [new file with mode: 0644]
net/tipc/bcast.c [new file with mode: 0644]
net/tipc/bcast.h [new file with mode: 0644]
net/tipc/bearer.c [new file with mode: 0644]
net/tipc/bearer.h [new file with mode: 0644]
net/tipc/cluster.c [new file with mode: 0644]
net/tipc/cluster.h [new file with mode: 0644]
net/tipc/config.c [new file with mode: 0644]
net/tipc/config.h [new file with mode: 0644]
net/tipc/core.c [new file with mode: 0644]
net/tipc/core.h [new file with mode: 0644]
net/tipc/dbg.c [new file with mode: 0644]
net/tipc/dbg.h [new file with mode: 0644]
net/tipc/discover.c [new file with mode: 0644]
net/tipc/discover.h [new file with mode: 0644]
net/tipc/eth_media.c [new file with mode: 0644]
net/tipc/handler.c [new file with mode: 0644]
net/tipc/link.c [new file with mode: 0644]
net/tipc/link.h [new file with mode: 0644]
net/tipc/msg.c [new file with mode: 0644]
net/tipc/msg.h [new file with mode: 0644]
net/tipc/name_distr.c [new file with mode: 0644]
net/tipc/name_distr.h [new file with mode: 0644]
net/tipc/name_table.c [new file with mode: 0644]
net/tipc/name_table.h [new file with mode: 0644]
net/tipc/net.c [new file with mode: 0644]
net/tipc/net.h [new file with mode: 0644]
net/tipc/netlink.c [new file with mode: 0644]
net/tipc/node.c [new file with mode: 0644]
net/tipc/node.h [new file with mode: 0644]
net/tipc/node_subscr.c [new file with mode: 0644]
net/tipc/node_subscr.h [new file with mode: 0644]
net/tipc/port.c [new file with mode: 0644]
net/tipc/port.h [new file with mode: 0644]
net/tipc/ref.c [new file with mode: 0644]
net/tipc/ref.h [new file with mode: 0644]
net/tipc/socket.c [new file with mode: 0644]
net/tipc/subscr.c [new file with mode: 0644]
net/tipc/subscr.h [new file with mode: 0644]
net/tipc/user_reg.c [new file with mode: 0644]
net/tipc/user_reg.h [new file with mode: 0644]
net/tipc/zone.c [new file with mode: 0644]
net/tipc/zone.h [new file with mode: 0644]
security/selinux/avc.c
sound/oss/dmasound/dmasound.h
sound/oss/dmasound/dmasound_atari.c
sound/oss/dmasound/dmasound_paula.c
sound/oss/dmasound/dmasound_q40.c
sound/oss/dmasound/trans_16.c

index dd311cff1cc30b78ebb1dbe8593fb47866e0a78b..6bd30fdd0786b9a7c64aa4c34f5a732b4c7096e0 100644 (file)
@@ -143,7 +143,7 @@ KernelNewbies:
        http://kernelnewbies.org/
 
 Linux USB project:
-       http://linux-usb.sourceforge.net/
+       http://www.linux-usb.org/
 
 How to NOT write kernel driver by arjanv@redhat.com
        http://people.redhat.com/arjanv/olspaper.pdf
index 6198e5ebcf65be906f801acd1f5df836c3d4d93c..c2c85bcb3d438b983ccc1535acbbd185867e26c3 100644 (file)
@@ -478,10 +478,11 @@ Andrew Morton, "The perfect patch" (tpp).
 Jeff Garzik, "Linux kernel patch submission format."
   <http://linux.yyz.us/patch-format.html>
 
-Greg Kroah, "How to piss off a kernel subsystem maintainer".
+Greg Kroah-Hartman "How to piss off a kernel subsystem maintainer".
   <http://www.kroah.com/log/2005/03/31/>
   <http://www.kroah.com/log/2005/07/08/>
   <http://www.kroah.com/log/2005/10/19/>
+  <http://www.kroah.com/log/2006/01/11/>
 
 NO!!!! No more huge patch bombs to linux-kernel@vger.kernel.org people!.
   <http://marc.theaimsgroup.com/?l=linux-kernel&m=112112749912944&w=2>
index dd0bfc291a682e60c39441d07ef0be026dceabee..fe11fccf7e41bd7bb1abd38be8c73cb52775cc2d 100644 (file)
@@ -856,6 +856,49 @@ running once the system is up.
 
        mga=            [HW,DRM]
 
+       migration_cost=
+                       [KNL,SMP] debug: override scheduler migration costs
+                       Format: <level-1-usecs>,<level-2-usecs>,...
+                       This debugging option can be used to override the
+                       default scheduler migration cost matrix. The numbers
+                       are indexed by 'CPU domain distance'.
+                       E.g. migration_cost=1000,2000,3000 on an SMT NUMA
+                       box will set up an intra-core migration cost of
+                       1 msec, an inter-core migration cost of 2 msecs,
+                       and an inter-node migration cost of 3 msecs.
+
+                       WARNING: using the wrong values here can break
+                       scheduler performance, so it's only for scheduler
+                       development purposes, not production environments.
+
+       migration_debug=
+                       [KNL,SMP] migration cost auto-detect verbosity
+                       Format=<0|1|2>
+                       If a system's migration matrix reported at bootup
+                       seems erroneous then this option can be used to
+                       increase verbosity of the detection process.
+                       We default to 0 (no extra messages), 1 will print
+                       some more information, and 2 will be really
+                       verbose (probably only useful if you also have a
+                       serial console attached to the system).
+
+       migration_factor=
+                       [KNL,SMP] multiply/divide migration costs by a factor
+                       Format=<percent>
+                       This debug option can be used to proportionally
+                       increase or decrease the auto-detected migration
+                       costs for all entries of the migration matrix.
+                       E.g. migration_factor=150 will increase migration
+                       costs by 50%. (and thus the scheduler will be less
+                       eager migrating cache-hot tasks)
+                       migration_factor=80 will decrease migration costs
+                       by 20%. (thus the scheduler will be more eager to
+                       migrate tasks)
+
+                       WARNING: using the wrong values here can break
+                       scheduler performance, so it's only for scheduler
+                       development purposes, not production environments.
+
        mousedev.tap_time=
                        [MOUSE] Maximum time between finger touching and
                        leaving touchpad surface for touch to be considered
diff --git a/Documentation/spi/butterfly b/Documentation/spi/butterfly
new file mode 100644 (file)
index 0000000..a2e8c8d
--- /dev/null
@@ -0,0 +1,57 @@
+spi_butterfly - parport-to-butterfly adapter driver
+===================================================
+
+This is a hardware and software project that includes building and using
+a parallel port adapter cable, together with an "AVR Butterfly" to run
+firmware for user interfacing and/or sensors.  A Butterfly is a $US20
+battery powered card with an AVR microcontroller and lots of goodies:
+sensors, LCD, flash, toggle stick, and more.  You can use AVR-GCC to
+develop firmware for this, and flash it using this adapter cable.
+
+You can make this adapter from an old printer cable and solder things
+directly to the Butterfly.  Or (if you have the parts and skills) you
+can come up with something fancier, providing ciruit protection to the
+Butterfly and the printer port, or with a better power supply than two
+signal pins from the printer port.
+
+
+The first cable connections will hook Linux up to one SPI bus, with the
+AVR and a DataFlash chip; and to the AVR reset line.  This is all you
+need to reflash the firmware, and the pins are the standard Atmel "ISP"
+connector pins (used also on non-Butterfly AVR boards).
+
+       Signal    Butterfly       Parport (DB-25)
+       ------    ---------       ---------------
+       SCK     = J403.PB1/SCK  = pin 2/D0
+       RESET   = J403.nRST     = pin 3/D1
+       VCC     = J403.VCC_EXT  = pin 8/D6
+       MOSI    = J403.PB2/MOSI = pin 9/D7
+       MISO    = J403.PB3/MISO = pin 11/S7,nBUSY
+       GND     = J403.GND      = pin 23/GND
+
+Then to let Linux master that bus to talk to the DataFlash chip, you must
+(a) flash new firmware that disables SPI (set PRR.2, and disable pullups
+by clearing PORTB.[0-3]); (b) configure the mtd_dataflash driver; and
+(c) cable in the chipselect.
+
+       Signal    Butterfly       Parport (DB-25)
+       ------    ---------       ---------------
+       VCC     = J400.VCC_EXT  = pin 7/D5
+       SELECT  = J400.PB0/nSS  = pin 17/C3,nSELECT
+       GND     = J400.GND      = pin 24/GND
+
+The "USI" controller, using J405, can be used for a second SPI bus.  That
+would let you talk to the AVR over SPI, running firmware that makes it act
+as an SPI slave, while letting either Linux or the AVR use the DataFlash.
+There are plenty of spare parport pins to wire this one up, such as:
+
+       Signal    Butterfly       Parport (DB-25)
+       ------    ---------       ---------------
+       SCK     = J403.PE4/USCK = pin 5/D3
+       MOSI    = J403.PE5/DI   = pin 6/D4
+       MISO    = J403.PE6/DO   = pin 12/S5,nPAPEROUT
+       GND     = J403.GND      = pin 22/GND
+
+       IRQ     = J402.PF4      = pin 10/S6,ACK
+       GND     = J402.GND(P2)  = pin 25/GND
+
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
new file mode 100644 (file)
index 0000000..a5ffba3
--- /dev/null
@@ -0,0 +1,457 @@
+Overview of Linux kernel SPI support
+====================================
+
+02-Dec-2005
+
+What is SPI?
+------------
+The "Serial Peripheral Interface" (SPI) is a synchronous four wire serial
+link used to connect microcontrollers to sensors, memory, and peripherals.
+
+The three signal wires hold a clock (SCLK, often on the order of 10 MHz),
+and parallel data lines with "Master Out, Slave In" (MOSI) or "Master In,
+Slave Out" (MISO) signals.  (Other names are also used.)  There are four
+clocking modes through which data is exchanged; mode-0 and mode-3 are most
+commonly used.  Each clock cycle shifts data out and data in; the clock
+doesn't cycle except when there is data to shift.
+
+SPI masters may use a "chip select" line to activate a given SPI slave
+device, so those three signal wires may be connected to several chips
+in parallel.  All SPI slaves support chipselects.  Some devices have
+other signals, often including an interrupt to the master.
+
+Unlike serial busses like USB or SMBUS, even low level protocols for
+SPI slave functions are usually not interoperable between vendors
+(except for cases like SPI memory chips).
+
+  - SPI may be used for request/response style device protocols, as with
+    touchscreen sensors and memory chips.
+
+  - It may also be used to stream data in either direction (half duplex),
+    or both of them at the same time (full duplex).
+
+  - Some devices may use eight bit words.  Others may different word
+    lengths, such as streams of 12-bit or 20-bit digital samples.
+
+In the same way, SPI slaves will only rarely support any kind of automatic
+discovery/enumeration protocol.  The tree of slave devices accessible from
+a given SPI master will normally be set up manually, with configuration
+tables.
+
+SPI is only one of the names used by such four-wire protocols, and
+most controllers have no problem handling "MicroWire" (think of it as
+half-duplex SPI, for request/response protocols), SSP ("Synchronous
+Serial Protocol"), PSP ("Programmable Serial Protocol"), and other
+related protocols.
+
+Microcontrollers often support both master and slave sides of the SPI
+protocol.  This document (and Linux) currently only supports the master
+side of SPI interactions.
+
+
+Who uses it?  On what kinds of systems?
+---------------------------------------
+Linux developers using SPI are probably writing device drivers for embedded
+systems boards.  SPI is used to control external chips, and it is also a
+protocol supported by every MMC or SD memory card.  (The older "DataFlash"
+cards, predating MMC cards but using the same connectors and card shape,
+support only SPI.)  Some PC hardware uses SPI flash for BIOS code.
+
+SPI slave chips range from digital/analog converters used for analog
+sensors and codecs, to memory, to peripherals like USB controllers
+or Ethernet adapters; and more.
+
+Most systems using SPI will integrate a few devices on a mainboard.
+Some provide SPI links on expansion connectors; in cases where no
+dedicated SPI controller exists, GPIO pins can be used to create a
+low speed "bitbanging" adapter.  Very few systems will "hotplug" an SPI
+controller; the reasons to use SPI focus on low cost and simple operation,
+and if dynamic reconfiguration is important, USB will often be a more
+appropriate low-pincount peripheral bus.
+
+Many microcontrollers that can run Linux integrate one or more I/O
+interfaces with SPI modes.  Given SPI support, they could use MMC or SD
+cards without needing a special purpose MMC/SD/SDIO controller.
+
+
+How do these driver programming interfaces work?
+------------------------------------------------
+The <linux/spi/spi.h> header file includes kerneldoc, as does the
+main source code, and you should certainly read that.  This is just
+an overview, so you get the big picture before the details.
+
+SPI requests always go into I/O queues.  Requests for a given SPI device
+are always executed in FIFO order, and complete asynchronously through
+completion callbacks.  There are also some simple synchronous wrappers
+for those calls, including ones for common transaction types like writing
+a command and then reading its response.
+
+There are two types of SPI driver, here called:
+
+  Controller drivers ... these are often built in to System-On-Chip
+       processors, and often support both Master and Slave roles.
+       These drivers touch hardware registers and may use DMA.
+       Or they can be PIO bitbangers, needing just GPIO pins.
+
+  Protocol drivers ... these pass messages through the controller
+       driver to communicate with a Slave or Master device on the
+       other side of an SPI link.
+
+So for example one protocol driver might talk to the MTD layer to export
+data to filesystems stored on SPI flash like DataFlash; and others might
+control audio interfaces, present touchscreen sensors as input interfaces,
+or monitor temperature and voltage levels during industrial processing.
+And those might all be sharing the same controller driver.
+
+A "struct spi_device" encapsulates the master-side interface between
+those two types of driver.  At this writing, Linux has no slave side
+programming interface.
+
+There is a minimal core of SPI programming interfaces, focussing on
+using driver model to connect controller and protocol drivers using
+device tables provided by board specific initialization code.  SPI
+shows up in sysfs in several locations:
+
+   /sys/devices/.../CTLR/spiB.C ... spi_device for on bus "B",
+       chipselect C, accessed through CTLR.
+
+   /sys/devices/.../CTLR/spiB.C/modalias ... identifies the driver
+       that should be used with this device (for hotplug/coldplug)
+
+   /sys/bus/spi/devices/spiB.C ... symlink to the physical
+       spiB-C device
+
+   /sys/bus/spi/drivers/D ... driver for one or more spi*.* devices
+
+   /sys/class/spi_master/spiB ... class device for the controller
+       managing bus "B".  All the spiB.* devices share the same
+       physical SPI bus segment, with SCLK, MOSI, and MISO.
+
+
+How does board-specific init code declare SPI devices?
+------------------------------------------------------
+Linux needs several kinds of information to properly configure SPI devices.
+That information is normally provided by board-specific code, even for
+chips that do support some of automated discovery/enumeration.
+
+DECLARE CONTROLLERS
+
+The first kind of information is a list of what SPI controllers exist.
+For System-on-Chip (SOC) based boards, these will usually be platform
+devices, and the controller may need some platform_data in order to
+operate properly.  The "struct platform_device" will include resources
+like the physical address of the controller's first register and its IRQ.
+
+Platforms will often abstract the "register SPI controller" operation,
+maybe coupling it with code to initialize pin configurations, so that
+the arch/.../mach-*/board-*.c files for several boards can all share the
+same basic controller setup code.  This is because most SOCs have several
+SPI-capable controllers, and only the ones actually usable on a given
+board should normally be set up and registered.
+
+So for example arch/.../mach-*/board-*.c files might have code like:
+
+       #include <asm/arch/spi.h>       /* for mysoc_spi_data */
+
+       /* if your mach-* infrastructure doesn't support kernels that can
+        * run on multiple boards, pdata wouldn't benefit from "__init".
+        */
+       static struct mysoc_spi_data __init pdata = { ... };
+
+       static __init board_init(void)
+       {
+               ...
+               /* this board only uses SPI controller #2 */
+               mysoc_register_spi(2, &pdata);
+               ...
+       }
+
+And SOC-specific utility code might look something like:
+
+       #include <asm/arch/spi.h>
+
+       static struct platform_device spi2 = { ... };
+
+       void mysoc_register_spi(unsigned n, struct mysoc_spi_data *pdata)
+       {
+               struct mysoc_spi_data *pdata2;
+
+               pdata2 = kmalloc(sizeof *pdata2, GFP_KERNEL);
+               *pdata2 = pdata;
+               ...
+               if (n == 2) {
+                       spi2->dev.platform_data = pdata2;
+                       register_platform_device(&spi2);
+
+                       /* also: set up pin modes so the spi2 signals are
+                        * visible on the relevant pins ... bootloaders on
+                        * production boards may already have done this, but
+                        * developer boards will often need Linux to do it.
+                        */
+               }
+               ...
+       }
+
+Notice how the platform_data for boards may be different, even if the
+same SOC controller is used.  For example, on one board SPI might use
+an external clock, where another derives the SPI clock from current
+settings of some master clock.
+
+
+DECLARE SLAVE DEVICES
+
+The second kind of information is a list of what SPI slave devices exist
+on the target board, often with some board-specific data needed for the
+driver to work correctly.
+
+Normally your arch/.../mach-*/board-*.c files would provide a small table
+listing the SPI devices on each board.  (This would typically be only a
+small handful.)  That might look like:
+
+       static struct ads7846_platform_data ads_info = {
+               .vref_delay_usecs       = 100,
+               .x_plate_ohms           = 580,
+               .y_plate_ohms           = 410,
+       };
+
+       static struct spi_board_info spi_board_info[] __initdata = {
+       {
+               .modalias       = "ads7846",
+               .platform_data  = &ads_info,
+               .mode           = SPI_MODE_0,
+               .irq            = GPIO_IRQ(31),
+               .max_speed_hz   = 120000 /* max sample rate at 3V */ * 16,
+               .bus_num        = 1,
+               .chip_select    = 0,
+       },
+       };
+
+Again, notice how board-specific information is provided; each chip may need
+several types.  This example shows generic constraints like the fastest SPI
+clock to allow (a function of board voltage in this case) or how an IRQ pin
+is wired, plus chip-specific constraints like an important delay that's
+changed by the capacitance at one pin.
+
+(There's also "controller_data", information that may be useful to the
+controller driver.  An example would be peripheral-specific DMA tuning
+data or chipselect callbacks.  This is stored in spi_device later.)
+
+The board_info should provide enough information to let the system work
+without the chip's driver being loaded.  The most troublesome aspect of
+that is likely the SPI_CS_HIGH bit in the spi_device.mode field, since
+sharing a bus with a device that interprets chipselect "backwards" is
+not possible.
+
+Then your board initialization code would register that table with the SPI
+infrastructure, so that it's available later when the SPI master controller
+driver is registered:
+
+       spi_register_board_info(spi_board_info, ARRAY_SIZE(spi_board_info));
+
+Like with other static board-specific setup, you won't unregister those.
+
+The widely used "card" style computers bundle memory, cpu, and little else
+onto a card that's maybe just thirty square centimeters.  On such systems,
+your arch/.../mach-.../board-*.c file would primarily provide information
+about the devices on the mainboard into which such a card is plugged.  That
+certainly includes SPI devices hooked up through the card connectors!
+
+
+NON-STATIC CONFIGURATIONS
+
+Developer boards often play by different rules than product boards, and one
+example is the potential need to hotplug SPI devices and/or controllers.
+
+For those cases you might need to use use spi_busnum_to_master() to look
+up the spi bus master, and will likely need spi_new_device() to provide the
+board info based on the board that was hotplugged.  Of course, you'd later
+call at least spi_unregister_device() when that board is removed.
+
+When Linux includes support for MMC/SD/SDIO/DataFlash cards through SPI, those
+configurations will also be dynamic.  Fortunately, those devices all support
+basic device identification probes, so that support should hotplug normally.
+
+
+How do I write an "SPI Protocol Driver"?
+----------------------------------------
+All SPI drivers are currently kernel drivers.  A userspace driver API
+would just be another kernel driver, probably offering some lowlevel
+access through aio_read(), aio_write(), and ioctl() calls and using the
+standard userspace sysfs mechanisms to bind to a given SPI device.
+
+SPI protocol drivers somewhat resemble platform device drivers:
+
+       static struct spi_driver CHIP_driver = {
+               .driver = {
+                       .name           = "CHIP",
+                       .bus            = &spi_bus_type,
+                       .owner          = THIS_MODULE,
+               },
+
+               .probe          = CHIP_probe,
+               .remove         = __devexit_p(CHIP_remove),
+               .suspend        = CHIP_suspend,
+               .resume         = CHIP_resume,
+       };
+
+The driver core will autmatically attempt to bind this driver to any SPI
+device whose board_info gave a modalias of "CHIP".  Your probe() code
+might look like this unless you're creating a class_device:
+
+       static int __devinit CHIP_probe(struct spi_device *spi)
+       {
+               struct CHIP                     *chip;
+               struct CHIP_platform_data       *pdata;
+
+               /* assuming the driver requires board-specific data: */
+               pdata = &spi->dev.platform_data;
+               if (!pdata)
+                       return -ENODEV;
+
+               /* get memory for driver's per-chip state */
+               chip = kzalloc(sizeof *chip, GFP_KERNEL);
+               if (!chip)
+                       return -ENOMEM;
+               dev_set_drvdata(&spi->dev, chip);
+
+               ... etc
+               return 0;
+       }
+
+As soon as it enters probe(), the driver may issue I/O requests to
+the SPI device using "struct spi_message".  When remove() returns,
+the driver guarantees that it won't submit any more such messages.
+
+  - An spi_message is a sequence of of protocol operations, executed
+    as one atomic sequence.  SPI driver controls include:
+
+      + when bidirectional reads and writes start ... by how its
+        sequence of spi_transfer requests is arranged;
+
+      + optionally defining short delays after transfers ... using
+        the spi_transfer.delay_usecs setting;
+
+      + whether the chipselect becomes inactive after a transfer and
+        any delay ... by using the spi_transfer.cs_change flag;
+
+      + hinting whether the next message is likely to go to this same
+        device ... using the spi_transfer.cs_change flag on the last
+       transfer in that atomic group, and potentially saving costs
+       for chip deselect and select operations.
+
+  - Follow standard kernel rules, and provide DMA-safe buffers in
+    your messages.  That way controller drivers using DMA aren't forced
+    to make extra copies unless the hardware requires it (e.g. working
+    around hardware errata that force the use of bounce buffering).
+
+    If standard dma_map_single() handling of these buffers is inappropriate,
+    you can use spi_message.is_dma_mapped to tell the controller driver
+    that you've already provided the relevant DMA addresses.
+
+  - The basic I/O primitive is spi_async().  Async requests may be
+    issued in any context (irq handler, task, etc) and completion
+    is reported using a callback provided with the message.
+    After any detected error, the chip is deselected and processing
+    of that spi_message is aborted.
+
+  - There are also synchronous wrappers like spi_sync(), and wrappers
+    like spi_read(), spi_write(), and spi_write_then_read().  These
+    may be issued only in contexts that may sleep, and they're all
+    clean (and small, and "optional") layers over spi_async().
+
+  - The spi_write_then_read() call, and convenience wrappers around
+    it, should only be used with small amounts of data where the
+    cost of an extra copy may be ignored.  It's designed to support
+    common RPC-style requests, such as writing an eight bit command
+    and reading a sixteen bit response -- spi_w8r16() being one its
+    wrappers, doing exactly that.
+
+Some drivers may need to modify spi_device characteristics like the
+transfer mode, wordsize, or clock rate.  This is done with spi_setup(),
+which would normally be called from probe() before the first I/O is
+done to the device.
+
+While "spi_device" would be the bottom boundary of the driver, the
+upper boundaries might include sysfs (especially for sensor readings),
+the input layer, ALSA, networking, MTD, the character device framework,
+or other Linux subsystems.
+
+Note that there are two types of memory your driver must manage as part
+of interacting with SPI devices.
+
+  - I/O buffers use the usual Linux rules, and must be DMA-safe.
+    You'd normally allocate them from the heap or free page pool.
+    Don't use the stack, or anything that's declared "static".
+
+  - The spi_message and spi_transfer metadata used to glue those
+    I/O buffers into a group of protocol transactions.  These can
+    be allocated anywhere it's convenient, including as part of
+    other allocate-once driver data structures.  Zero-init these.
+
+If you like, spi_message_alloc() and spi_message_free() convenience
+routines are available to allocate and zero-initialize an spi_message
+with several transfers.
+
+
+How do I write an "SPI Master Controller Driver"?
+-------------------------------------------------
+An SPI controller will probably be registered on the platform_bus; write
+a driver to bind to the device, whichever bus is involved.
+
+The main task of this type of driver is to provide an "spi_master".
+Use spi_alloc_master() to allocate the master, and class_get_devdata()
+to get the driver-private data allocated for that device.
+
+       struct spi_master       *master;
+       struct CONTROLLER       *c;
+
+       master = spi_alloc_master(dev, sizeof *c);
+       if (!master)
+               return -ENODEV;
+
+       c = class_get_devdata(&master->cdev);
+
+The driver will initialize the fields of that spi_master, including the
+bus number (maybe the same as the platform device ID) and three methods
+used to interact with the SPI core and SPI protocol drivers.  It will
+also initialize its own internal state.
+
+    master->setup(struct spi_device *spi)
+       This sets up the device clock rate, SPI mode, and word sizes.
+       Drivers may change the defaults provided by board_info, and then
+       call spi_setup(spi) to invoke this routine.  It may sleep.
+
+    master->transfer(struct spi_device *spi, struct spi_message *message)
+       This must not sleep.  Its responsibility is arrange that the
+       transfer happens and its complete() callback is issued; the two
+       will normally happen later, after other transfers complete.
+
+    master->cleanup(struct spi_device *spi)
+       Your controller driver may use spi_device.controller_state to hold
+       state it dynamically associates with that device.  If you do that,
+       be sure to provide the cleanup() method to free that state.
+
+The bulk of the driver will be managing the I/O queue fed by transfer().
+
+That queue could be purely conceptual.  For example, a driver used only
+for low-frequency sensor acess might be fine using synchronous PIO.
+
+But the queue will probably be very real, using message->queue, PIO,
+often DMA (especially if the root filesystem is in SPI flash), and
+execution contexts like IRQ handlers, tasklets, or workqueues (such
+as keventd).  Your driver can be as fancy, or as simple, as you need.
+
+
+THANKS TO
+---------
+Contributors to Linux-SPI discussions include (in alphabetical order,
+by last name):
+
+David Brownell
+Russell King
+Dmitry Pervushin
+Stephen Street
+Mark Underwood
+Andrew Victor
+Vitaly Wool
+
index 0db72a36e2453e1f47007a6db14ad37ae3004829..71693c5c31372f55328ba4024230e78c98b501f4 100644 (file)
@@ -2519,6 +2519,19 @@ P:     Romain Lievin
 M:     roms@lpg.ticalc.org
 S:     Maintained
 
+TIPC NETWORK LAYER
+P:     Per Liden
+M:     per.liden@nospam.ericsson.com
+P:     Jon Maloy
+M:     jon.maloy@nospam.ericsson.com
+P:     Allan Stephens
+M:     allan.stephens@nospam.windriver.com
+L:     tipc-discussion@lists.sourceforge.net
+W:     http://tipc.sourceforge.net/
+W:     http://tipc.cslab.ericsson.net/
+T:     git tipc.cslab.ericsson.net:/pub/git/tipc.git
+S:     Maintained
+
 TLAN NETWORK DRIVER
 P:     Samuel Chessman
 M:     chessman@tux.org
index abb739b88ed15603400495cd6cadab48ff63a1ac..9924fd07743abfd5bb4cda9e78ac3e58ca72a9d7 100644 (file)
@@ -276,7 +276,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
 {
        extern void ret_from_fork(void);
 
-       struct thread_info *childti = p->thread_info;
+       struct thread_info *childti = task_thread_info(p);
        struct pt_regs * childregs;
        struct switch_stack * childstack, *stack;
        unsigned long stack_offset, settls;
@@ -285,7 +285,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
        if (!(regs->ps & 8))
                stack_offset = (PAGE_SIZE-1) & (unsigned long) regs;
        childregs = (struct pt_regs *)
-         (stack_offset + PAGE_SIZE + (long) childti);
+         (stack_offset + PAGE_SIZE + task_stack_page(p));
                
        *childregs = *regs;
        settls = regs->r20;
@@ -428,30 +428,15 @@ dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
 int
 dump_elf_task(elf_greg_t *dest, struct task_struct *task)
 {
-       struct thread_info *ti;
-       struct pt_regs *pt;
-
-       ti = task->thread_info;
-       pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1;
-
-       dump_elf_thread(dest, pt, ti);
-
+       dump_elf_thread(dest, task_pt_regs(task), task_thread_info(task));
        return 1;
 }
 
 int
 dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task)
 {
-       struct thread_info *ti;
-       struct pt_regs *pt;
-       struct switch_stack *sw;
-
-       ti = task->thread_info;
-       pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1;
-       sw = (struct switch_stack *)pt - 1;
-
+       struct switch_stack *sw = (struct switch_stack *)task_pt_regs(task) - 1;
        memcpy(dest, sw->fp, 32 * 8);
-
        return 1;
 }
 
@@ -492,8 +477,8 @@ out:
 unsigned long
 thread_saved_pc(task_t *t)
 {
-       unsigned long base = (unsigned long)t->thread_info;
-       unsigned long fp, sp = t->thread_info->pcb.ksp;
+       unsigned long base = (unsigned long)task_stack_page(t);
+       unsigned long fp, sp = task_thread_info(t)->pcb.ksp;
 
        if (sp > base && sp+6*8 < base + 16*1024) {
                fp = ((unsigned long*)sp)[6];
@@ -523,7 +508,7 @@ get_wchan(struct task_struct *p)
 
        pc = thread_saved_pc(p);
        if (in_sched_functions(pc)) {
-               schedule_frame = ((unsigned long *)p->thread_info->pcb.ksp)[6];
+               schedule_frame = ((unsigned long *)task_thread_info(p)->pcb.ksp)[6];
                return ((unsigned long *)schedule_frame)[12];
        }
        return pc;
index 9969d212e94d9d4b16da2f66921f8b206822612e..0cd060598f9aa25aee8d86a6629cc4d8514542ed 100644 (file)
@@ -72,6 +72,13 @@ enum {
        REG_R0 = 0, REG_F0 = 32, REG_FPCR = 63, REG_PC = 64
 };
 
+#define PT_REG(reg) \
+  (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
+
+#define SW_REG(reg) \
+ (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
+  + offsetof(struct switch_stack, reg))
+
 static int regoff[] = {
        PT_REG(    r0), PT_REG(    r1), PT_REG(    r2), PT_REG(   r3),
        PT_REG(    r4), PT_REG(    r5), PT_REG(    r6), PT_REG(   r7),
@@ -103,14 +110,14 @@ get_reg_addr(struct task_struct * task, unsigned long regno)
        unsigned long *addr;
 
        if (regno == 30) {
-               addr = &task->thread_info->pcb.usp;
+               addr = &task_thread_info(task)->pcb.usp;
        } else if (regno == 65) {
-               addr = &task->thread_info->pcb.unique;
+               addr = &task_thread_info(task)->pcb.unique;
        } else if (regno == 31 || regno > 65) {
                zero = 0;
                addr = &zero;
        } else {
-               addr = (void *)task->thread_info + regoff[regno];
+               addr = task_stack_page(task) + regoff[regno];
        }
        return addr;
 }
@@ -125,7 +132,7 @@ get_reg(struct task_struct * task, unsigned long regno)
        if (regno == 63) {
                unsigned long fpcr = *get_reg_addr(task, regno);
                unsigned long swcr
-                 = task->thread_info->ieee_state & IEEE_SW_MASK;
+                 = task_thread_info(task)->ieee_state & IEEE_SW_MASK;
                swcr = swcr_update_status(swcr, fpcr);
                return fpcr | swcr;
        }
@@ -139,8 +146,8 @@ static int
 put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
 {
        if (regno == 63) {
-               task->thread_info->ieee_state
-                 = ((task->thread_info->ieee_state & ~IEEE_SW_MASK)
+               task_thread_info(task)->ieee_state
+                 = ((task_thread_info(task)->ieee_state & ~IEEE_SW_MASK)
                     | (data & IEEE_SW_MASK));
                data = (data & FPCR_DYN_MASK) | ieee_swcr_to_fpcr(data);
        }
@@ -188,35 +195,35 @@ ptrace_set_bpt(struct task_struct * child)
                 * branch (emulation can be tricky for fp branches).
                 */
                displ = ((s32)(insn << 11)) >> 9;
-               child->thread_info->bpt_addr[nsaved++] = pc + 4;
+               task_thread_info(child)->bpt_addr[nsaved++] = pc + 4;
                if (displ)              /* guard against unoptimized code */
-                       child->thread_info->bpt_addr[nsaved++]
+                       task_thread_info(child)->bpt_addr[nsaved++]
                          = pc + 4 + displ;
                DBG(DBG_BPT, ("execing branch\n"));
        } else if (op_code == 0x1a) {
                reg_b = (insn >> 16) & 0x1f;
-               child->thread_info->bpt_addr[nsaved++] = get_reg(child, reg_b);
+               task_thread_info(child)->bpt_addr[nsaved++] = get_reg(child, reg_b);
                DBG(DBG_BPT, ("execing jump\n"));
        } else {
-               child->thread_info->bpt_addr[nsaved++] = pc + 4;
+               task_thread_info(child)->bpt_addr[nsaved++] = pc + 4;
                DBG(DBG_BPT, ("execing normal insn\n"));
        }
 
        /* install breakpoints: */
        for (i = 0; i < nsaved; ++i) {
-               res = read_int(child, child->thread_info->bpt_addr[i],
+               res = read_int(child, task_thread_info(child)->bpt_addr[i],
                               (int *) &insn);
                if (res < 0)
                        return res;
-               child->thread_info->bpt_insn[i] = insn;
+               task_thread_info(child)->bpt_insn[i] = insn;
                DBG(DBG_BPT, ("    -> next_pc=%lx\n",
-                             child->thread_info->bpt_addr[i]));
-               res = write_int(child, child->thread_info->bpt_addr[i],
+                             task_thread_info(child)->bpt_addr[i]));
+               res = write_int(child, task_thread_info(child)->bpt_addr[i],
                                BREAKINST);
                if (res < 0)
                        return res;
        }
-       child->thread_info->bpt_nsaved = nsaved;
+       task_thread_info(child)->bpt_nsaved = nsaved;
        return 0;
 }
 
@@ -227,9 +234,9 @@ ptrace_set_bpt(struct task_struct * child)
 int
 ptrace_cancel_bpt(struct task_struct * child)
 {
-       int i, nsaved = child->thread_info->bpt_nsaved;
+       int i, nsaved = task_thread_info(child)->bpt_nsaved;
 
-       child->thread_info->bpt_nsaved = 0;
+       task_thread_info(child)->bpt_nsaved = 0;
 
        if (nsaved > 2) {
                printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
@@ -237,8 +244,8 @@ ptrace_cancel_bpt(struct task_struct * child)
        }
 
        for (i = 0; i < nsaved; ++i) {
-               write_int(child, child->thread_info->bpt_addr[i],
-                         child->thread_info->bpt_insn[i]);
+               write_int(child, task_thread_info(child)->bpt_addr[i],
+                         task_thread_info(child)->bpt_insn[i]);
        }
        return (nsaved != 0);
 }
@@ -355,7 +362,7 @@ do_sys_ptrace(long request, long pid, long addr, long data,
                if (!valid_signal(data))
                        break;
                /* Mark single stepping.  */
-               child->thread_info->bpt_nsaved = -1;
+               task_thread_info(child)->bpt_nsaved = -1;
                clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
                child->exit_code = data;
                wake_up_process(child);
index da0be34657915beec4cab3beeb15af6d33e0956c..4b873527ce1c56d354e86ddabb5825ba3d345f0a 100644 (file)
@@ -302,7 +302,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
                 + hwrpb->processor_offset
                 + cpuid * hwrpb->processor_size);
        hwpcb = (struct pcb_struct *) cpu->hwpcb;
-       ipcb = &idle->thread_info->pcb;
+       ipcb = &task_thread_info(idle)->pcb;
 
        /* Initialize the CPU's HWPCB to something just good enough for
           us to get started.  Immediately after starting, we'll swpctx
index 50b9afa8ae6d09cbb9655cb5654d04dc7ec162e9..3cfd82a05b20b1fb2cd2efbe6fd3c7520c6573dd 100644 (file)
@@ -729,6 +729,8 @@ source "drivers/char/Kconfig"
 
 source "drivers/i2c/Kconfig"
 
+source "drivers/spi/Kconfig"
+
 source "drivers/hwmon/Kconfig"
 
 #source "drivers/l3/Kconfig"
index 6abafb6f1844d9b82a3f74cbfcb3b77486f2e5b8..aaa47400eb9c19ce57a60731f81ca0b14addb1b3 100644 (file)
@@ -84,7 +84,7 @@
                kputc   #'\n'
                kphex   r5, 8           /* decompressed kernel start */
                kputc   #'-'
-               kphex   r8, 8           /* decompressed kernel end  */
+               kphex   r9, 8           /* decompressed kernel end  */
                kputc   #'>'
                kphex   r4, 8           /* kernel execution address */
                kputc   #'\n'
@@ -116,7 +116,7 @@ start:
                .word   start                   @ absolute load/run zImage address
                .word   _edata                  @ zImage end address
 1:             mov     r7, r1                  @ save architecture ID
-               mov     r8, #0                  @ save r0
+               mov     r8, r2                  @ save atags pointer
 
 #ifndef __ARM_ARCH_2__
                /*
@@ -144,7 +144,7 @@ not_angel:
 
                /*
                 * some architecture specific code can be inserted
-                * by the linker here, but it should preserve r7 and r8.
+                * by the linker here, but it should preserve r7, r8, and r9.
                 */
 
                .text
@@ -249,16 +249,17 @@ not_relocated:    mov     r0, #0
  * r5     = decompressed kernel start
  * r6     = processor ID
  * r7     = architecture ID
- * r8-r14 = unused
+ * r8     = atags pointer
+ * r9-r14 = corrupted
  */
                add     r1, r5, r0              @ end of decompressed kernel
                adr     r2, reloc_start
                ldr     r3, LC1
                add     r3, r2, r3
-1:             ldmia   r2!, {r8 - r13}         @ copy relocation code
-               stmia   r1!, {r8 - r13}
-               ldmia   r2!, {r8 - r13}
-               stmia   r1!, {r8 - r13}
+1:             ldmia   r2!, {r9 - r14}         @ copy relocation code
+               stmia   r1!, {r9 - r14}
+               ldmia   r2!, {r9 - r14}
+               stmia   r1!, {r9 - r14}
                cmp     r2, r3
                blo     1b
 
@@ -308,11 +309,12 @@ params:           ldr     r0, =params_phys
  *  r4 = kernel execution address
  *  r6 = processor ID
  *  r7 = architecture number
- *  r8 = run-time address of "start"
+ *  r8 = atags pointer
+ *  r9 = run-time address of "start"  (???)
  * On exit,
- *  r1, r2, r3, r8, r9, r12 corrupted
+ *  r1, r2, r3, r9, r10, r12 corrupted
  * This routine must preserve:
- *  r4, r5, r6, r7
+ *  r4, r5, r6, r7, r8
  */
                .align  5
 cache_on:      mov     r3, #8                  @ cache_on function
@@ -326,15 +328,15 @@ __setup_mmu:      sub     r3, r4, #16384          @ Page directory size
  * bits for the RAM area only.
  */
                mov     r0, r3
-               mov     r8, r0, lsr #18
-               mov     r8, r8, lsl #18         @ start of RAM
-               add     r9, r8, #0x10000000     @ a reasonable RAM size
+               mov     r9, r0, lsr #18
+               mov     r9, r9, lsl #18         @ start of RAM
+               add     r10, r9, #0x10000000    @ a reasonable RAM size
                mov     r1, #0x12
                orr     r1, r1, #3 << 10
                add     r2, r3, #16384
 1:             cmp     r1, r8                  @ if virt > start of RAM
                orrhs   r1, r1, #0x0c           @ set cacheable, bufferable
-               cmp     r1, r                 @ if virt > end of RAM
+               cmp     r1, r10                 @ if virt > end of RAM
                bichs   r1, r1, #0x0c           @ clear cacheable, bufferable
                str     r1, [r0], #4            @ 1:1 mapping
                add     r1, r1, #1048576
@@ -403,26 +405,28 @@ __common_cache_on:
  * r5     = decompressed kernel start
  * r6     = processor ID
  * r7     = architecture ID
- * r8-r14 = unused
+ * r8     = atags pointer
+ * r9-r14 = corrupted
  */
                .align  5
-reloc_start:   add     r8, r5, r0
+reloc_start:   add     r9, r5, r0
                debug_reloc_start
                mov     r1, r4
 1:
                .rept   4
-               ldmia   r5!, {r0, r2, r3, r9 - r13}     @ relocate kernel
-               stmia   r1!, {r0, r2, r3, r9 - r13}
+               ldmia   r5!, {r0, r2, r3, r10 - r14}    @ relocate kernel
+               stmia   r1!, {r0, r2, r3, r10 - r14}
                .endr
 
-               cmp     r5, r8
+               cmp     r5, r9
                blo     1b
                debug_reloc_end
 
 call_kernel:   bl      cache_clean_flush
                bl      cache_off
-               mov     r0, #0
+               mov     r0, #0                  @ must be zero
                mov     r1, r7                  @ restore architecture number
+               mov     r2, r8                  @ restore atags pointer
                mov     pc, r4                  @ call kernel
 
 /*
index 1b7eaab02b9ec66263b0c66cd15e4522ab72f06f..159ad7ed7a40142c16093588fb0988963c675b29 100644 (file)
@@ -1103,14 +1103,14 @@ static int locomo_bus_remove(struct device *dev)
 struct bus_type locomo_bus_type = {
        .name           = "locomo-bus",
        .match          = locomo_match,
+       .probe          = locomo_bus_probe,
+       .remove         = locomo_bus_remove,
        .suspend        = locomo_bus_suspend,
        .resume         = locomo_bus_resume,
 };
 
 int locomo_driver_register(struct locomo_driver *driver)
 {
-       driver->drv.probe = locomo_bus_probe;
-       driver->drv.remove = locomo_bus_remove;
        driver->drv.bus = &locomo_bus_type;
        return driver_register(&driver->drv);
 }
index ffb82d5bedefaff24f5deb53e6215b16f333e65a..48b1e19b131f938faeed9b53bc4bb9e19d1438bc 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/spinlock.h>
 #include <linux/capability.h>
 #include <linux/device.h>
+#include <linux/mutex.h>
 
 #include <asm/rtc.h>
 #include <asm/semaphore.h>
@@ -35,7 +36,7 @@ static unsigned long rtc_irq_data;
 /*
  * rtc_sem protects rtc_inuse and rtc_ops
  */
-static DECLARE_MUTEX(rtc_sem);
+static DEFINE_MUTEX(rtc_mutex);
 static unsigned long rtc_inuse;
 static struct rtc_ops *rtc_ops;
 
@@ -356,7 +357,7 @@ static int rtc_open(struct inode *inode, struct file *file)
 {
        int ret;
 
-       down(&rtc_sem);
+       mutex_lock(&rtc_mutex);
 
        if (rtc_inuse) {
                ret = -EBUSY;
@@ -374,7 +375,7 @@ static int rtc_open(struct inode *inode, struct file *file)
                        rtc_inuse = 1;
                }
        }
-       up(&rtc_sem);
+       mutex_unlock(&rtc_mutex);
 
        return ret;
 }
@@ -480,7 +481,7 @@ int register_rtc(struct rtc_ops *ops)
 {
        int ret = -EBUSY;
 
-       down(&rtc_sem);
+       mutex_lock(&rtc_mutex);
        if (rtc_ops == NULL) {
                rtc_ops = ops;
 
@@ -489,7 +490,7 @@ int register_rtc(struct rtc_ops *ops)
                        create_proc_read_entry("driver/rtc", 0, NULL,
                                               rtc_read_proc, ops);
        }
-       up(&rtc_sem);
+       mutex_unlock(&rtc_mutex);
 
        return ret;
 }
@@ -497,12 +498,12 @@ EXPORT_SYMBOL(register_rtc);
 
 void unregister_rtc(struct rtc_ops *rtc)
 {
-       down(&rtc_sem);
+       mutex_lock(&rtc_mutex);
        if (rtc == rtc_ops) {
                remove_proc_entry("driver/rtc", NULL);
                misc_deregister(&rtc_miscdev);
                rtc_ops = NULL;
        }
-       up(&rtc_sem);
+       mutex_unlock(&rtc_mutex);
 }
 EXPORT_SYMBOL(unregister_rtc);
index d0d6e6d2d649e9887813fc6f3e08debb292e2b81..1475089f9b4265f7b2c9dc86696dea0a4970c027 100644 (file)
@@ -1247,14 +1247,14 @@ static int sa1111_bus_remove(struct device *dev)
 struct bus_type sa1111_bus_type = {
        .name           = "sa1111-rab",
        .match          = sa1111_match,
+       .probe          = sa1111_bus_probe,
+       .remove         = sa1111_bus_remove,
        .suspend        = sa1111_bus_suspend,
        .resume         = sa1111_bus_resume,
 };
 
 int sa1111_driver_register(struct sa1111_driver *driver)
 {
-       driver->drv.probe = sa1111_bus_probe;
-       driver->drv.remove = sa1111_bus_remove;
        driver->drv.bus = &sa1111_bus_type;
        return driver_register(&driver->drv);
 }
diff --git a/arch/arm/configs/at91rm9200dk_defconfig b/arch/arm/configs/at91rm9200dk_defconfig
new file mode 100644 (file)
index 0000000..5cdd13a
--- /dev/null
@@ -0,0 +1,1009 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15
+# Mon Jan  9 20:54:30 2006
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# System Type
+#
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_CAMELOT is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_AAEC2000 is not set
+CONFIG_ARCH_AT91RM9200=y
+
+#
+# AT91RM9200 Implementations
+#
+
+#
+# AT91RM9200 Board Type
+#
+CONFIG_ARCH_AT91RM9200DK=y
+# CONFIG_MACH_AT91RM9200EK is not set
+# CONFIG_MACH_CSB337 is not set
+# CONFIG_MACH_CSB637 is not set
+# CONFIG_MACH_CARMEVA is not set
+# CONFIG_MACH_KB9200 is not set
+# CONFIG_MACH_ATEB9200 is not set
+
+#
+# AT91RM9200 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+
+#
+# Bus support
+#
+CONFIG_ISA_DMA_API=y
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+CONFIG_PCCARD=y
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_PCMCIA=y
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_PCMCIA_IOCTL=y
+
+#
+# PC-card bridges
+#
+CONFIG_AT91_CF=y
+
+#
+# Kernel Features
+#
+# CONFIG_PREEMPT is not set
+# CONFIG_NO_IDLE_HZ is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_LEDS=y
+CONFIG_LEDS_TIMER=y
+# CONFIG_LEDS_CPU is not set
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x20410000,3145728 root=/dev/ram0 rw"
+# CONFIG_XIP_KERNEL is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_AMDSTD_RETRY=0
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x10000000
+CONFIG_MTD_PHYSMAP_LEN=0x200000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_IMPA7 is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_AT91_DATAFLASH=y
+CONFIG_MTD_AT91_DATAFLASH_CARD=y
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_ARM_AT91_ETHER=y
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# PCMCIA network device support
+#
+# CONFIG_NET_PCMCIA is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_AT91=y
+CONFIG_SERIAL_AT91_CONSOLE=y
+# CONFIG_SERIAL_AT91_TTYAT is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT91_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+CONFIG_AT91_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_AT91_SPI=y
+CONFIG_AT91_SPIDEV=y
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_AT91=y
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+# CONFIG_USB_STORAGE is not set
+
+#
+# USB Input Devices
+#
+# CONFIG_USB_HID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_ITMTOUCH is not set
+# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_DABUSB is not set
+
+#
+# Video4Linux support is needed for USB Multimedia device support
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_PXA2XX is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+CONFIG_USB_GADGET_AT91=y
+CONFIG_USB_AT91=y
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+
+#
+# MMC/SD Card support
+#
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_BLOCK=y
+# CONFIG_MMC_WBSD is not set
+CONFIG_MMC_AT91RM9200=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_WAITQ is not set
+# CONFIG_DEBUG_ERRORS is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/configs/at91rm9200ek_defconfig b/arch/arm/configs/at91rm9200ek_defconfig
new file mode 100644 (file)
index 0000000..20838cc
--- /dev/null
@@ -0,0 +1,998 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15
+# Mon Jan  9 20:57:31 2006
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# System Type
+#
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_CAMELOT is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_AAEC2000 is not set
+CONFIG_ARCH_AT91RM9200=y
+
+#
+# AT91RM9200 Implementations
+#
+
+#
+# AT91RM9200 Board Type
+#
+# CONFIG_ARCH_AT91RM9200DK is not set
+CONFIG_MACH_AT91RM9200EK=y
+# CONFIG_MACH_CSB337 is not set
+# CONFIG_MACH_CSB637 is not set
+# CONFIG_MACH_CARMEVA is not set
+# CONFIG_MACH_KB9200 is not set
+# CONFIG_MACH_ATEB9200 is not set
+
+#
+# AT91RM9200 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+
+#
+# Bus support
+#
+CONFIG_ISA_DMA_API=y
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# Kernel Features
+#
+# CONFIG_PREEMPT is not set
+# CONFIG_NO_IDLE_HZ is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_LEDS=y
+CONFIG_LEDS_TIMER=y
+CONFIG_LEDS_CPU=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x20410000,3145728 root=/dev/ram0 rw"
+# CONFIG_XIP_KERNEL is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+# CONFIG_IP_PNP_DHCP is not set
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+# CONFIG_MTD_CFI_INTELEXT is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_AMDSTD_RETRY=0
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_PHYSMAP_START=0x10000000
+CONFIG_MTD_PHYSMAP_LEN=0x800000
+CONFIG_MTD_PHYSMAP_BANKWIDTH=2
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_IMPA7 is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+CONFIG_MTD_AT91_DATAFLASH=y
+CONFIG_MTD_AT91_DATAFLASH_CARD=y
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+# CONFIG_BLK_DEV_LOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_ARM_AT91_ETHER=y
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_AT91=y
+CONFIG_SERIAL_AT91_CONSOLE=y
+# CONFIG_SERIAL_AT91_TTYAT is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT91_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+# CONFIG_NVRAM is not set
+# CONFIG_RTC is not set
+CONFIG_AT91_RTC=y
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_AT91_SPI=y
+CONFIG_AT91_SPIDEV=y
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_AT91=y
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+CONFIG_FB=y
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+CONFIG_FB_S1D13XXX=y
+# CONFIG_FB_VIRTUAL is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+
+#
+# Logo configuration
+#
+# CONFIG_LOGO is not set
+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+# CONFIG_USB_STORAGE is not set
+
+#
+# USB Input Devices
+#
+# CONFIG_USB_HID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_ITMTOUCH is not set
+# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_DABUSB is not set
+
+#
+# Video4Linux support is needed for USB Multimedia device support
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_PXA2XX is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+CONFIG_USB_GADGET_AT91=y
+CONFIG_USB_AT91=y
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+
+#
+# MMC/SD Card support
+#
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_BLOCK=y
+# CONFIG_MMC_WBSD is not set
+CONFIG_MMC_AT91RM9200=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+# CONFIG_NFS_FS is not set
+# CONFIG_NFSD is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_WAITQ is not set
+# CONFIG_DEBUG_ERRORS is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+# CONFIG_CRYPTO is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/configs/csb337_defconfig b/arch/arm/configs/csb337_defconfig
new file mode 100644 (file)
index 0000000..885a318
--- /dev/null
@@ -0,0 +1,1136 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15
+# Mon Jan  9 21:51:31 2006
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# System Type
+#
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_CAMELOT is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_AAEC2000 is not set
+CONFIG_ARCH_AT91RM9200=y
+
+#
+# AT91RM9200 Implementations
+#
+
+#
+# AT91RM9200 Board Type
+#
+# CONFIG_ARCH_AT91RM9200DK is not set
+# CONFIG_MACH_AT91RM9200EK is not set
+CONFIG_MACH_CSB337=y
+# CONFIG_MACH_CSB637 is not set
+# CONFIG_MACH_CARMEVA is not set
+# CONFIG_MACH_KB9200 is not set
+# CONFIG_MACH_ATEB9200 is not set
+
+#
+# AT91RM9200 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+
+#
+# Bus support
+#
+CONFIG_ISA_DMA_API=y
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+CONFIG_PCCARD=y
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_PCMCIA=y
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_PCMCIA_IOCTL=y
+
+#
+# PC-card bridges
+#
+CONFIG_AT91_CF=y
+
+#
+# Kernel Features
+#
+# CONFIG_PREEMPT is not set
+# CONFIG_NO_IDLE_HZ is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_LEDS=y
+CONFIG_LEDS_TIMER=y
+CONFIG_LEDS_CPU=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=32M console=ttyS0,38400 initrd=0x20410000,3145728 root=/dev/ram0 rw"
+# CONFIG_XIP_KERNEL is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_PLATRAM is not set
+CONFIG_MTD_CSB337=y
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_AT91_DATAFLASH is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# PCMCIA SCSI adapter support
+#
+# CONFIG_PCMCIA_AHA152X is not set
+# CONFIG_PCMCIA_FDOMAIN is not set
+# CONFIG_PCMCIA_NINJA_SCSI is not set
+# CONFIG_PCMCIA_QLOGIC is not set
+# CONFIG_PCMCIA_SYM53C500 is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_ARM_AT91_ETHER=y
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# PCMCIA network device support
+#
+# CONFIG_NET_PCMCIA is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_AT91=y
+CONFIG_SERIAL_AT91_CONSOLE=y
+# CONFIG_SERIAL_AT91_TTYAT is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT91_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_RTC=y
+# CONFIG_AT91_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_AT91_SPI=y
+CONFIG_AT91_SPIDEV=y
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_AT91=y
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+
+#
+# USB Input Devices
+#
+# CONFIG_USB_HID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_ITMTOUCH is not set
+# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_DABUSB is not set
+
+#
+# Video4Linux support is needed for USB Multimedia device support
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+CONFIG_USB_SERIAL_GENERIC=y
+# CONFIG_USB_SERIAL_AIRPRIME is not set
+# CONFIG_USB_SERIAL_ANYDATA is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP2101 is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+CONFIG_USB_SERIAL_FTDI_SIO=y
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+CONFIG_USB_SERIAL_KEYSPAN=y
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+CONFIG_USB_SERIAL_MCT_U232=y
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+CONFIG_USB_EZUSB=y
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET_NET2280 is not set
+# CONFIG_USB_GADGET_PXA2XX is not set
+# CONFIG_USB_GADGET_GOKU is not set
+# CONFIG_USB_GADGET_LH7A40X is not set
+# CONFIG_USB_GADGET_OMAP is not set
+CONFIG_USB_GADGET_AT91=y
+CONFIG_USB_AT91=y
+# CONFIG_USB_GADGET_DUMMY_HCD is not set
+# CONFIG_USB_GADGET_DUALSPEED is not set
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FILE_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+
+#
+# MMC/SD Card support
+#
+CONFIG_MMC=y
+# CONFIG_MMC_DEBUG is not set
+CONFIG_MMC_BLOCK=y
+# CONFIG_MMC_WBSD is not set
+CONFIG_MMC_AT91RM9200=y
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_WAITQ is not set
+# CONFIG_DEBUG_ERRORS is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/configs/csb637_defconfig b/arch/arm/configs/csb637_defconfig
new file mode 100644 (file)
index 0000000..95a96a5
--- /dev/null
@@ -0,0 +1,1116 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15
+# Mon Jan  9 21:52:00 2006
+#
+CONFIG_ARM=y
+CONFIG_MMU=y
+CONFIG_UID16=y
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+CONFIG_HOTPLUG=y
+CONFIG_KOBJECT_UEVENT=y
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_EMBEDDED is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_ALL is not set
+# CONFIG_KALLSYMS_EXTRA_PASS is not set
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+CONFIG_KMOD=y
+
+#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
+# System Type
+#
+# CONFIG_ARCH_CLPS7500 is not set
+# CONFIG_ARCH_CLPS711X is not set
+# CONFIG_ARCH_CO285 is not set
+# CONFIG_ARCH_EBSA110 is not set
+# CONFIG_ARCH_CAMELOT is not set
+# CONFIG_ARCH_FOOTBRIDGE is not set
+# CONFIG_ARCH_INTEGRATOR is not set
+# CONFIG_ARCH_IOP3XX is not set
+# CONFIG_ARCH_IXP4XX is not set
+# CONFIG_ARCH_IXP2000 is not set
+# CONFIG_ARCH_L7200 is not set
+# CONFIG_ARCH_PXA is not set
+# CONFIG_ARCH_RPC is not set
+# CONFIG_ARCH_SA1100 is not set
+# CONFIG_ARCH_S3C2410 is not set
+# CONFIG_ARCH_SHARK is not set
+# CONFIG_ARCH_LH7A40X is not set
+# CONFIG_ARCH_OMAP is not set
+# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_REALVIEW is not set
+# CONFIG_ARCH_IMX is not set
+# CONFIG_ARCH_H720X is not set
+# CONFIG_ARCH_AAEC2000 is not set
+CONFIG_ARCH_AT91RM9200=y
+
+#
+# AT91RM9200 Implementations
+#
+
+#
+# AT91RM9200 Board Type
+#
+# CONFIG_ARCH_AT91RM9200DK is not set
+# CONFIG_MACH_AT91RM9200EK is not set
+# CONFIG_MACH_CSB337 is not set
+CONFIG_MACH_CSB637=y
+# CONFIG_MACH_CARMEVA is not set
+# CONFIG_MACH_KB9200 is not set
+# CONFIG_MACH_ATEB9200 is not set
+
+#
+# AT91RM9200 Feature Selections
+#
+CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
+
+#
+# Processor Type
+#
+CONFIG_CPU_32=y
+CONFIG_CPU_ARM920T=y
+CONFIG_CPU_32v4=y
+CONFIG_CPU_ABRT_EV4T=y
+CONFIG_CPU_CACHE_V4WT=y
+CONFIG_CPU_CACHE_VIVT=y
+CONFIG_CPU_COPY_V4WB=y
+CONFIG_CPU_TLB_V4WBI=y
+
+#
+# Processor Features
+#
+# CONFIG_ARM_THUMB is not set
+# CONFIG_CPU_ICACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_DISABLE is not set
+# CONFIG_CPU_DCACHE_WRITETHROUGH is not set
+
+#
+# Bus support
+#
+CONFIG_ISA_DMA_API=y
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+CONFIG_PCCARD=y
+# CONFIG_PCMCIA_DEBUG is not set
+CONFIG_PCMCIA=y
+CONFIG_PCMCIA_LOAD_CIS=y
+CONFIG_PCMCIA_IOCTL=y
+
+#
+# PC-card bridges
+#
+CONFIG_AT91_CF=y
+
+#
+# Kernel Features
+#
+# CONFIG_PREEMPT is not set
+# CONFIG_NO_IDLE_HZ is not set
+# CONFIG_ARCH_DISCONTIGMEM_ENABLE is not set
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
+CONFIG_LEDS=y
+CONFIG_LEDS_TIMER=y
+CONFIG_LEDS_CPU=y
+CONFIG_ALIGNMENT_TRAP=y
+
+#
+# Boot options
+#
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="mem=32M console=ttyS0,38400 initrd=0x20410000,3145728 root=/dev/ram0 rw"
+# CONFIG_XIP_KERNEL is not set
+
+#
+# Floating point emulation
+#
+
+#
+# At least one emulation must be selected
+#
+CONFIG_FPE_NWFPE=y
+# CONFIG_FPE_NWFPE_XP is not set
+# CONFIG_FPE_FASTFPE is not set
+
+#
+# Userspace binary formats
+#
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+# CONFIG_ARTHUR is not set
+
+#
+# Power management options
+#
+# CONFIG_PM is not set
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+# CONFIG_IP_MULTICAST is not set
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_ARPD is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+# CONFIG_DEBUG_DRIVER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+CONFIG_MTD=y
+# CONFIG_MTD_DEBUG is not set
+# CONFIG_MTD_CONCAT is not set
+CONFIG_MTD_PARTITIONS=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+# CONFIG_MTD_AFS_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+# CONFIG_MTD_CFI_AMDSTD is not set
+# CONFIG_MTD_CFI_STAA is not set
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# CONFIG_MTD_XIP is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PHYSMAP is not set
+# CONFIG_MTD_ARM_INTEGRATOR is not set
+# CONFIG_MTD_PLATRAM is not set
+CONFIG_MTD_CSB637=y
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLKMTD is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOC2000 is not set
+# CONFIG_MTD_DOC2001 is not set
+# CONFIG_MTD_DOC2001PLUS is not set
+# CONFIG_MTD_AT91_DATAFLASH is not set
+
+#
+# NAND Flash Device Drivers
+#
+# CONFIG_MTD_NAND is not set
+
+#
+# OneNAND Flash Device Drivers
+#
+# CONFIG_MTD_ONENAND is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_UB is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+# CONFIG_BLK_DEV_SD is not set
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_CHR_DEV_OSST is not set
+# CONFIG_BLK_DEV_SR is not set
+# CONFIG_CHR_DEV_SG is not set
+# CONFIG_CHR_DEV_SCH is not set
+
+#
+# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
+#
+# CONFIG_SCSI_MULTI_LUN is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+
+#
+# SCSI Transport Attributes
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+
+#
+# SCSI low-level drivers
+#
+# CONFIG_ISCSI_TCP is not set
+# CONFIG_SCSI_SATA is not set
+# CONFIG_SCSI_DEBUG is not set
+
+#
+# PCMCIA SCSI adapter support
+#
+# CONFIG_PCMCIA_AHA152X is not set
+# CONFIG_PCMCIA_FDOMAIN is not set
+# CONFIG_PCMCIA_NINJA_SCSI is not set
+# CONFIG_PCMCIA_QLOGIC is not set
+# CONFIG_PCMCIA_SYM53C500 is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+
+#
+# I2O device support
+#
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+CONFIG_ARM_AT91_ETHER=y
+# CONFIG_SMC91X is not set
+# CONFIG_DM9000 is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+
+#
+# Ethernet (10000 Mbit)
+#
+
+#
+# Token Ring devices
+#
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# PCMCIA network device support
+#
+# CONFIG_NET_PCMCIA is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_VT=y
+CONFIG_VT_CONSOLE=y
+CONFIG_HW_CONSOLE=y
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_AT91=y
+CONFIG_SERIAL_AT91_CONSOLE=y
+# CONFIG_SERIAL_AT91_TTYAT is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_NOWAYOUT=y
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_AT91_WATCHDOG=y
+
+#
+# USB-based Watchdog Cards
+#
+# CONFIG_USBPCWATCHDOG is not set
+# CONFIG_NVRAM is not set
+CONFIG_RTC=y
+# CONFIG_AT91_RTC is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+
+#
+# PCMCIA character devices
+#
+# CONFIG_SYNCLINK_CS is not set
+# CONFIG_CARDMAN_4000 is not set
+# CONFIG_CARDMAN_4040 is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+CONFIG_AT91_SPI=y
+CONFIG_AT91_SPIDEV=y
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+CONFIG_I2C_AT91=y
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Console display driver support
+#
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_DUMMY_CONSOLE=y
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEVICEFS=y
+# CONFIG_USB_BANDWIDTH is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_ISP116X_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+# CONFIG_USB_OHCI_BIG_ENDIAN is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+# CONFIG_USB_SL811_HCD is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# may also be needed; see USB_STORAGE Help for more information
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_DPCM is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+
+#
+# USB Input Devices
+#
+# CONFIG_USB_HID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# CONFIG_USB_AIPTEK is not set
+# CONFIG_USB_WACOM is not set
+# CONFIG_USB_ACECAD is not set
+# CONFIG_USB_KBTAB is not set
+# CONFIG_USB_POWERMATE is not set
+# CONFIG_USB_MTOUCH is not set
+# CONFIG_USB_ITMTOUCH is not set
+# CONFIG_USB_EGALAX is not set
+# CONFIG_USB_YEALINK is not set
+# CONFIG_USB_XPAD is not set
+# CONFIG_USB_ATI_REMOTE is not set
+# CONFIG_USB_KEYSPAN_REMOTE is not set
+# CONFIG_USB_APPLETOUCH is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+
+#
+# USB Multimedia devices
+#
+# CONFIG_USB_DABUSB is not set
+
+#
+# Video4Linux support is needed for USB Multimedia device support
+#
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_USBNET is not set
+CONFIG_USB_MON=y
+
+#
+# USB port drivers
+#
+
+#
+# USB Serial Converter support
+#
+CONFIG_USB_SERIAL=y
+CONFIG_USB_SERIAL_CONSOLE=y
+CONFIG_USB_SERIAL_GENERIC=y
+# CONFIG_USB_SERIAL_AIRPRIME is not set
+# CONFIG_USB_SERIAL_ANYDATA is not set
+# CONFIG_USB_SERIAL_BELKIN is not set
+# CONFIG_USB_SERIAL_WHITEHEAT is not set
+# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
+# CONFIG_USB_SERIAL_CP2101 is not set
+# CONFIG_USB_SERIAL_CYPRESS_M8 is not set
+# CONFIG_USB_SERIAL_EMPEG is not set
+CONFIG_USB_SERIAL_FTDI_SIO=y
+# CONFIG_USB_SERIAL_VISOR is not set
+# CONFIG_USB_SERIAL_IPAQ is not set
+# CONFIG_USB_SERIAL_IR is not set
+# CONFIG_USB_SERIAL_EDGEPORT is not set
+# CONFIG_USB_SERIAL_EDGEPORT_TI is not set
+# CONFIG_USB_SERIAL_GARMIN is not set
+# CONFIG_USB_SERIAL_IPW is not set
+# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
+CONFIG_USB_SERIAL_KEYSPAN=y
+CONFIG_USB_SERIAL_KEYSPAN_MPR=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
+CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19=y
+CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
+CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
+CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
+# CONFIG_USB_SERIAL_KLSI is not set
+# CONFIG_USB_SERIAL_KOBIL_SCT is not set
+CONFIG_USB_SERIAL_MCT_U232=y
+# CONFIG_USB_SERIAL_PL2303 is not set
+# CONFIG_USB_SERIAL_HP4X is not set
+# CONFIG_USB_SERIAL_SAFE is not set
+# CONFIG_USB_SERIAL_TI is not set
+# CONFIG_USB_SERIAL_CYBERJACK is not set
+# CONFIG_USB_SERIAL_XIRCOM is not set
+# CONFIG_USB_SERIAL_OPTION is not set
+# CONFIG_USB_SERIAL_OMNINET is not set
+CONFIG_USB_EZUSB=y
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_AUERSWALD is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_PHIDGETKIT is not set
+# CONFIG_USB_PHIDGETSERVO is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TEST is not set
+
+#
+# USB DSL modem support
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+# CONFIG_EXT3_FS is not set
+# CONFIG_JBD is not set
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_MSDOS_PARTITION=y
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Profiling support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_MAGIC_SYSRQ is not set
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_DETECT_SOFTLOCKUP=y
+# CONFIG_SCHEDSTATS is not set
+# CONFIG_DEBUG_SLAB is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
+# CONFIG_DEBUG_KOBJECT is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_DEBUG_VM is not set
+CONFIG_FRAME_POINTER=y
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_DEBUG_USER=y
+# CONFIG_DEBUG_WAITQ is not set
+# CONFIG_DEBUG_ERRORS is not set
+CONFIG_DEBUG_LL=y
+# CONFIG_DEBUG_ICEDCC is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+CONFIG_ZLIB_INFLATE=y
index dceb826bd216ece20e0e8399eaf785fc7d5cbde3..74ea29c3205eb6b476a2c725858bf70895f9288f 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/proc_fs.h>
 #include <linux/device.h>
 #include <linux/init.h>
+#include <linux/mutex.h>
 
 #include <asm/dma.h>
 #include <asm/ecard.h>
@@ -206,7 +207,7 @@ static void ecard_task_readbytes(struct ecard_request *req)
 
 static DECLARE_WAIT_QUEUE_HEAD(ecard_wait);
 static struct ecard_request *ecard_req;
-static DECLARE_MUTEX(ecard_sem);
+static DEFINE_MUTEX(ecard_mutex);
 
 /*
  * Set up the expansion card daemon's page tables.
@@ -299,7 +300,7 @@ static void ecard_call(struct ecard_request *req)
 
        req->complete = &completion;
 
-       down(&ecard_sem);
+       mutex_lock(&ecard_mutex);
        ecard_req = req;
        wake_up(&ecard_wait);
 
@@ -307,7 +308,7 @@ static void ecard_call(struct ecard_request *req)
         * Now wait for kecardd to run.
         */
        wait_for_completion(&completion);
-       up(&ecard_sem);
+       mutex_unlock(&ecard_mutex);
 }
 
 /* ======================= Mid-level card control ===================== */
@@ -1146,9 +1147,11 @@ static void ecard_drv_shutdown(struct device *dev)
        struct ecard_driver *drv = ECARD_DRV(dev->driver);
        struct ecard_request req;
 
-       if (drv->shutdown)
-               drv->shutdown(ec);
-       ecard_release(ec);
+       if (dev->driver) {
+               if (drv->shutdown)
+                       drv->shutdown(ec);
+               ecard_release(ec);
+       }
 
        /*
         * If this card has a loader, call the reset handler.
@@ -1163,9 +1166,6 @@ static void ecard_drv_shutdown(struct device *dev)
 int ecard_register_driver(struct ecard_driver *drv)
 {
        drv->drv.bus = &ecard_bus_type;
-       drv->drv.probe = ecard_drv_probe;
-       drv->drv.remove = ecard_drv_remove;
-       drv->drv.shutdown = ecard_drv_shutdown;
 
        return driver_register(&drv->drv);
 }
@@ -1194,6 +1194,9 @@ struct bus_type ecard_bus_type = {
        .name           = "ecard",
        .dev_attrs      = ecard_dev_attrs,
        .match          = ecard_match,
+       .probe          = ecard_drv_probe,
+       .remove         = ecard_drv_remove,
+       .shutdown       = ecard_drv_shutdown,
 };
 
 static int ecard_bus_init(void)
index 9299dfc25698220beee832edbac717ac948c2ae8..1ec3f7faa259eedba5ccf3973ca827fb869cdfcd 100644 (file)
@@ -101,7 +101,7 @@ void __attribute__((naked)) set_fiq_regs(struct pt_regs *regs)
        ldmia   %1, {r8 - r14}\n\
        msr     cpsr_c, %0      @ return to SVC mode\n\
        mov     r0, r0\n\
-       ldmea   fp, {fp, sp, pc}"
+       ldmfd   sp, {fp, sp, pc}"
        : "=&r" (tmp)
        : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
 }
@@ -119,7 +119,7 @@ void __attribute__((naked)) get_fiq_regs(struct pt_regs *regs)
        stmia   %1, {r8 - r14}\n\
        msr     cpsr_c, %0      @ return to SVC mode\n\
        mov     r0, r0\n\
-       ldmea   fp, {fp, sp, pc}"
+       ldmfd   sp, {fp, sp, pc}"
        : "=&r" (tmp)
        : "r" (&regs->ARM_r8), "I" (PSR_I_BIT | PSR_F_BIT | FIQ_MODE));
 }
index 54a21bdcba5cde6fe94bc1d56a3e083b5ac68af6..4b4e4cf79c8070e0cfd055bdf3a72a93efd61027 100644 (file)
@@ -342,10 +342,10 @@ void flush_thread(void)
 void release_thread(struct task_struct *dead_task)
 {
 #if defined(CONFIG_VFP)
-       vfp_release_thread(&dead_task->thread_info->vfpstate);
+       vfp_release_thread(&task_thread_info(dead_task)->vfpstate);
 #endif
 #if defined(CONFIG_IWMMXT)
-       iwmmxt_task_release(dead_task->thread_info);
+       iwmmxt_task_release(task_thread_info(dead_task));
 #endif
 }
 
@@ -355,10 +355,9 @@ int
 copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
            unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
 {
-       struct thread_info *thread = p->thread_info;
-       struct pt_regs *childregs;
+       struct thread_info *thread = task_thread_info(p);
+       struct pt_regs *childregs = task_pt_regs(p);
 
-       childregs = (void *)thread + THREAD_START_SP - sizeof(*regs);
        *childregs = *regs;
        childregs->ARM_r0 = 0;
        childregs->ARM_sp = stack_start;
@@ -460,8 +459,8 @@ unsigned long get_wchan(struct task_struct *p)
        if (!p || p == current || p->state == TASK_RUNNING)
                return 0;
 
-       stack_start = (unsigned long)(p->thread_info + 1);
-       stack_end = ((unsigned long)p->thread_info) + THREAD_SIZE;
+       stack_start = (unsigned long)end_of_stack(p);
+       stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE;
 
        fp = thread_saved_fp(p);
        do {
index 2b84f78d7b0f78585698e54bf137f3cde7634205..e591f72bcdeb0abf8d6714e94219de4cdc2d7720 100644 (file)
 #define BREAKINST_THUMB        0xde01
 #endif
 
-/*
- * Get the address of the live pt_regs for the specified task.
- * These are saved onto the top kernel stack when the process
- * is not running.
- *
- * Note: if a user thread is execve'd from kernel space, the
- * kernel stack will not be empty on entry to the kernel, so
- * ptracing these tasks will fail.
- */
-static inline struct pt_regs *
-get_user_regs(struct task_struct *task)
-{
-       return (struct pt_regs *)
-               ((unsigned long)task->thread_info + THREAD_SIZE -
-                                8 - sizeof(struct pt_regs));
-}
-
 /*
  * this routine will get a word off of the processes privileged stack.
  * the offset is how far from the base addr as stored in the THREAD.
@@ -79,7 +62,7 @@ get_user_regs(struct task_struct *task)
  */
 static inline long get_user_reg(struct task_struct *task, int offset)
 {
-       return get_user_regs(task)->uregs[offset];
+       return task_pt_regs(task)->uregs[offset];
 }
 
 /*
@@ -91,7 +74,7 @@ static inline long get_user_reg(struct task_struct *task, int offset)
 static inline int
 put_user_reg(struct task_struct *task, int offset, long data)
 {
-       struct pt_regs newregs, *regs = get_user_regs(task);
+       struct pt_regs newregs, *regs = task_pt_regs(task);
        int ret = -EINVAL;
 
        newregs = *regs;
@@ -421,7 +404,7 @@ void ptrace_set_bpt(struct task_struct *child)
        u32 insn;
        int res;
 
-       regs = get_user_regs(child);
+       regs = task_pt_regs(child);
        pc = instruction_pointer(regs);
 
        if (thumb_mode(regs)) {
@@ -572,7 +555,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
  */
 static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
 {
-       struct pt_regs *regs = get_user_regs(tsk);
+       struct pt_regs *regs = task_pt_regs(tsk);
 
        return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
 }
@@ -587,7 +570,7 @@ static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
 
        ret = -EFAULT;
        if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
-               struct pt_regs *regs = get_user_regs(tsk);
+               struct pt_regs *regs = task_pt_regs(tsk);
 
                ret = -EINVAL;
                if (valid_user_regs(&newregs)) {
@@ -604,7 +587,7 @@ static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
  */
 static int ptrace_getfpregs(struct task_struct *tsk, void __user *ufp)
 {
-       return copy_to_user(ufp, &tsk->thread_info->fpstate,
+       return copy_to_user(ufp, &task_thread_info(tsk)->fpstate,
                            sizeof(struct user_fp)) ? -EFAULT : 0;
 }
 
@@ -613,7 +596,7 @@ static int ptrace_getfpregs(struct task_struct *tsk, void __user *ufp)
  */
 static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp)
 {
-       struct thread_info *thread = tsk->thread_info;
+       struct thread_info *thread = task_thread_info(tsk);
        thread->used_cp[1] = thread->used_cp[2] = 1;
        return copy_from_user(&thread->fpstate, ufp,
                              sizeof(struct user_fp)) ? -EFAULT : 0;
@@ -626,7 +609,7 @@ static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp)
  */
 static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
 {
-       struct thread_info *thread = tsk->thread_info;
+       struct thread_info *thread = task_thread_info(tsk);
        void *ptr = &thread->fpstate;
 
        if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
@@ -643,7 +626,7 @@ static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
  */
 static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
 {
-       struct thread_info *thread = tsk->thread_info;
+       struct thread_info *thread = task_thread_info(tsk);
        void *ptr = &thread->fpstate;
 
        if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
@@ -779,7 +762,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
 #endif
 
                case PTRACE_GET_THREAD_AREA:
-                       ret = put_user(child->thread_info->tp_value,
+                       ret = put_user(task_thread_info(child)->tp_value,
                                       (unsigned long __user *) data);
                        break;
 
index 2cab741ad0f8e2d31cbb9fec2b3fefc8b40a429c..c45d10d07bde95f73034f47fd26877c41548ba08 100644 (file)
@@ -205,7 +205,7 @@ static const char *proc_arch[] = {
        "5TE",
        "5TEJ",
        "6TEJ",
-       "?(10)",
+       "7",
        "?(11)",
        "?(12)",
        "?(13)",
@@ -258,14 +258,17 @@ int cpu_architecture(void)
 {
        int cpu_arch;
 
-       if ((processor_id & 0x0000f000) == 0) {
+       if ((processor_id & 0x0008f000) == 0) {
                cpu_arch = CPU_ARCH_UNKNOWN;
-       } else if ((processor_id & 0x0000f000) == 0x00007000) {
+       } else if ((processor_id & 0x0008f000) == 0x00007000) {
                cpu_arch = (processor_id & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
-       } else {
+       } else if ((processor_id & 0x00080000) == 0x00000000) {
                cpu_arch = (processor_id >> 16) & 7;
                if (cpu_arch)
                        cpu_arch += CPU_ARCH_ARMv3;
+       } else {
+               /* the revised CPUID */
+               cpu_arch = ((processor_id >> 12) & 0xf) - 0xb + CPU_ARCH_ARMv6;
        }
 
        return cpu_arch;
@@ -863,11 +866,11 @@ static int c_show(struct seq_file *m, void *v)
        seq_printf(m, "\nCPU implementer\t: 0x%02x\n", processor_id >> 24);
        seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
 
-       if ((processor_id & 0x0000f000) == 0x00000000) {
+       if ((processor_id & 0x0008f000) == 0x00000000) {
                /* pre-ARM7 */
                seq_printf(m, "CPU part\t\t: %07x\n", processor_id >> 4);
        } else {
-               if ((processor_id & 0x0000f000) == 0x00007000) {
+               if ((processor_id & 0x0008f000) == 0x00007000) {
                        /* ARM7 */
                        seq_printf(m, "CPU variant\t: 0x%02x\n",
                                   (processor_id >> 16) & 127);
index 373c0959bc2f64ff02e72610e3091f53b6cd9763..7338948bd7d38046e355807e73c44413d3db40d7 100644 (file)
@@ -114,7 +114,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
         * We need to tell the secondary core where to find
         * its stack and the page tables.
         */
-       secondary_data.stack = (void *)idle->thread_info + THREAD_START_SP;
+       secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
        secondary_data.pgdir = virt_to_phys(pgd);
        wmb();
 
@@ -245,7 +245,7 @@ void __cpuexit cpu_die(void)
        __asm__("mov    sp, %0\n"
        "       b       secondary_start_kernel"
                :
-               : "r" ((void *)current->thread_info + THREAD_SIZE - 8));
+               : "r" (task_stack_page(current) + THREAD_SIZE - 8));
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
index c9fe6f5f7ee35ac6df900efcf06c85cd908e3a15..93cfd3ffcc72b6cbc90ff6c7ff8f808b15e3ea15 100644 (file)
@@ -164,7 +164,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
        } else if (verify_stack(fp)) {
                printk("invalid frame pointer 0x%08x", fp);
                ok = 0;
-       } else if (fp < (unsigned long)(tsk->thread_info + 1))
+       } else if (fp < (unsigned long)end_of_stack(tsk))
                printk("frame pointer underflow");
        printk("\n");
 
@@ -210,7 +210,7 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
 
        if (!user_mode(regs) || in_interrupt()) {
                dump_mem("Stack: ", regs->ARM_sp,
-                        THREAD_SIZE + (unsigned long)tsk->thread_info);
+                        THREAD_SIZE + (unsigned long)task_stack_page(tsk));
                dump_backtrace(regs, tsk);
                dump_instr(regs);
        }
index 990ee63b246551f20e7db1f628d75a26a4a8ded0..21effe0dbf97e2b2f15ad1c3f6a2ab96a0ed9d43 100644 (file)
  */
 
                .macro  save_regs
+               mov     ip, sp
                stmfd   sp!, {r1, r4 - r8, fp, ip, lr, pc}
+               sub     fp, ip, #4
                .endm
 
-               .macro  load_regs,flags
-               LOADREGS(\flags,fp,{r1, r4 - r8, fp, sp, pc})
+               .macro  load_regs
+               ldmfd   sp, {r1, r4 - r8, fp, sp, pc}
                .endm
 
                .macro  load1b, reg1
index 4a4609c19095f9188005e4e2e9b68d235008b079..c50e8f5285d17e7851de9d03c5c7e7e25f3b86cd 100644 (file)
@@ -23,7 +23,7 @@ len   .req    r2
 sum    .req    r3
 
 .Lzero:                mov     r0, sum
-               load_regs       ea
+               load_regs
 
                /*
                 * Align an unaligned destination pointer.  We know that
@@ -87,9 +87,7 @@ sum   .req    r3
                b       .Ldone
 
 FN_ENTRY
-               mov     ip, sp
                save_regs
-               sub     fp, ip, #4
 
                cmp     len, #8                 @ Ensure that we have at least
                blo     .Lless8                 @ 8 bytes to copy.
@@ -163,7 +161,7 @@ FN_ENTRY
                ldr     sum, [sp, #0]           @ dst
                tst     sum, #1
                movne   r0, r0, ror #8
-               load_regs       ea
+               load_regs
 
 .Lsrc_not_aligned:
                adc     sum, sum, #0            @ include C from dst alignment
index 333bca292de93a5b0eec17405207089395dbf88f..c3b93e22ea25a05d80654016bb13e1c4f27879f5 100644 (file)
                .text
 
                .macro  save_regs
+               mov     ip, sp
                stmfd   sp!, {r1 - r2, r4 - r8, fp, ip, lr, pc}
+               sub     fp, ip, #4
                .endm
 
-               .macro  load_regs,flags
-               ldm\flags       fp, {r1, r2, r4-r8, fp, sp, pc}
+               .macro  load_regs
+               ldmfd   sp, {r1, r2, r4-r8, fp, sp, pc}
                .endm
 
                .macro  load1b, reg1
 6002:          teq     r2, r1
                strneb  r0, [r1], #1
                bne     6002b
-               load_regs       ea
+               load_regs
                .previous
index 1c84c60941e1b976bf4e9b4db6d6d308084f3b41..74aa7a39bb68d9d09c281dd7c97612c206291afd 100644 (file)
 #include <linux/err.h>
 #include <linux/string.h>
 #include <linux/clk.h>
+#include <linux/mutex.h>
 
 #include <asm/semaphore.h>
 
 #include "clock.h"
 
 static LIST_HEAD(clocks);
-static DECLARE_MUTEX(clocks_sem);
+static DEFINE_MUTEX(clocks_mutex);
 
 struct clk *clk_get(struct device *dev, const char *id)
 {
        struct clk *p, *clk = ERR_PTR(-ENOENT);
 
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_for_each_entry(p, &clocks, node) {
                if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
                        clk = p;
                        break;
                }
        }
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
 
        return clk;
 }
@@ -78,18 +79,18 @@ EXPORT_SYMBOL(clk_set_rate);
 
 int clk_register(struct clk *clk)
 {
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_add(&clk->node, &clocks);
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
        return 0;
 }
 EXPORT_SYMBOL(clk_register);
 
 void clk_unregister(struct clk *clk)
 {
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_del(&clk->node);
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
 }
 EXPORT_SYMBOL(clk_unregister);
 
index 1f2805ca6e21dc2544be4dab891025b527eec60a..75e6ee318dedc2d735a195445cde96179fdc232a 100644 (file)
@@ -8,10 +8,10 @@ obj-n         :=
 obj-           :=
 
 # Board-specific support
-#obj-$(CONFIG_ARCH_AT91RM9200DK)       += board-dk.o
-#obj-$(CONFIG_MACH_AT91RM9200EK)       += board-ek.o
-#obj-$(CONFIG_MACH_CSB337)     += board-csb337.o
-#obj-$(CONFIG_MACH_CSB637)     += board-csb637.o
+obj-$(CONFIG_ARCH_AT91RM9200DK)        += board-dk.o
+obj-$(CONFIG_MACH_AT91RM9200EK)        += board-ek.o
+obj-$(CONFIG_MACH_CSB337)      += board-csb337.o
+obj-$(CONFIG_MACH_CSB637)      += board-csb637.o
 #obj-$(CONFIG_MACH_CARMEVA)    += board-carmeva.o
 #obj-$(CONFIG_MACH_KB9200)     += board-kb9202.o
 
diff --git a/arch/arm/mach-at91rm9200/board-csb337.c b/arch/arm/mach-at91rm9200/board-csb337.c
new file mode 100644 (file)
index 0000000..4aec834
--- /dev/null
@@ -0,0 +1,143 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/board-csb337.c
+ *
+ *  Copyright (C) 2005 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/mach/serial_at91rm9200.h>
+#include <asm/arch/board.h>
+
+#include "generic.h"
+
+static void __init csb337_init_irq(void)
+{
+       /* Initialize AIC controller */
+       at91rm9200_init_irq(NULL);
+
+       /* Set up the GPIO interrupts */
+       at91_gpio_irq_setup(BGA_GPIO_BANKS);
+}
+
+/*
+ * Serial port configuration.
+ *    0 .. 3 = USART0 .. USART3
+ *    4      = DBGU
+ */
+#define CSB337_UART_MAP                { 4, 1, -1, -1, -1 }    /* ttyS0, ..., ttyS4 */
+#define CSB337_SERIAL_CONSOLE  0                       /* ttyS0 */
+
+static void __init csb337_map_io(void)
+{
+       int serial[AT91_NR_UART] = CSB337_UART_MAP;
+       int i;
+
+       at91rm9200_map_io();
+
+       /* Initialize clocks: 3.6864 MHz crystal */
+       at91_clock_init(3686400);
+
+#ifdef CONFIG_SERIAL_AT91
+       at91_console_port = CSB337_SERIAL_CONSOLE;
+       memcpy(at91_serial_map, serial, sizeof(serial));
+
+       /* Register UARTs */
+       for (i = 0; i < AT91_NR_UART; i++) {
+               if (serial[i] >= 0)
+                       at91_register_uart(i, serial[i]);
+       }
+#endif
+}
+
+static struct at91_eth_data __initdata csb337_eth_data = {
+       .phy_irq_pin    = AT91_PIN_PC2,
+       .is_rmii        = 0,
+};
+
+static struct at91_usbh_data __initdata csb337_usbh_data = {
+       .ports          = 2,
+};
+
+static struct at91_udc_data __initdata csb337_udc_data = {
+       // this has no VBUS sensing pin
+       .pullup_pin     = AT91_PIN_PA24,
+};
+
+static struct at91_cf_data __initdata csb337_cf_data = {
+       /*
+        * connector P4 on the CSB 337 mates to
+        * connector P8 on the CSB 300CF
+        */
+
+       /* CSB337 specific */
+       .det_pin        = AT91_PIN_PC3,
+
+       /* CSB300CF specific */
+       .irq_pin        = AT91_PIN_PA19,
+       .vcc_pin        = AT91_PIN_PD0,
+       .rst_pin        = AT91_PIN_PD2,
+};
+
+static struct at91_mmc_data __initdata csb337_mmc_data = {
+       .det_pin        = AT91_PIN_PD5,
+       .is_b           = 0,
+       .wire4          = 1,
+       .wp_pin         = AT91_PIN_PD6,
+};
+
+static void __init csb337_board_init(void)
+{
+       /* Ethernet */
+       at91_add_device_eth(&csb337_eth_data);
+       /* USB Host */
+       at91_add_device_usbh(&csb337_usbh_data);
+       /* USB Device */
+       at91_add_device_udc(&csb337_udc_data);
+       /* Compact Flash */
+       at91_set_gpio_input(AT91_PIN_PB22, 1);          /* IOIS16 */
+       at91_add_device_cf(&csb337_cf_data);
+       /* MMC */
+       at91_add_device_mmc(&csb337_mmc_data);
+}
+
+MACHINE_START(CSB337, "Cogent CSB337")
+       /* Maintainer: Bill Gatliff */
+       .phys_ram       = AT91_SDRAM_BASE,
+       .phys_io        = AT91_BASE_SYS,
+       .io_pg_offst    = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+       .boot_params    = AT91_SDRAM_BASE + 0x100,
+       .timer          = &at91rm9200_timer,
+       .map_io         = csb337_map_io,
+       .init_irq       = csb337_init_irq,
+       .init_machine   = csb337_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91rm9200/board-csb637.c b/arch/arm/mach-at91rm9200/board-csb637.c
new file mode 100644 (file)
index 0000000..23e4cc2
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/board-csb637.c
+ *
+ *  Copyright (C) 2005 SAN People
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/mach/serial_at91rm9200.h>
+#include <asm/arch/board.h>
+
+#include "generic.h"
+
+static void __init csb637_init_irq(void)
+{
+       /* Initialize AIC controller */
+       at91rm9200_init_irq(NULL);
+
+       /* Set up the GPIO interrupts */
+       at91_gpio_irq_setup(BGA_GPIO_BANKS);
+}
+
+/*
+ * Serial port configuration.
+ *    0 .. 3 = USART0 .. USART3
+ *    4      = DBGU
+ */
+#define CSB637_UART_MAP                { 4, 1, -1, -1, -1 }    /* ttyS0, ..., ttyS4 */
+#define CSB637_SERIAL_CONSOLE  0                       /* ttyS0 */
+
+static void __init csb637_map_io(void)
+{
+       int serial[AT91_NR_UART] = CSB637_UART_MAP;
+       int i;
+
+       at91rm9200_map_io();
+
+       /* Initialize clocks: 3.6864 MHz crystal */
+       at91_clock_init(3686400);
+
+#ifdef CONFIG_SERIAL_AT91
+       at91_console_port = CSB637_SERIAL_CONSOLE;
+       memcpy(at91_serial_map, serial, sizeof(serial));
+
+       /* Register UARTs */
+       for (i = 0; i < AT91_NR_UART; i++) {
+               if (serial[i] >= 0)
+                       at91_register_uart(i, serial[i]);
+       }
+#endif
+}
+
+static struct at91_eth_data __initdata csb637_eth_data = {
+       .phy_irq_pin    = AT91_PIN_PC0,
+       .is_rmii        = 0,
+};
+
+static struct at91_usbh_data __initdata csb637_usbh_data = {
+       .ports          = 2,
+};
+
+static struct at91_udc_data __initdata csb637_udc_data = {
+       .vbus_pin     = AT91_PIN_PB28,
+       .pullup_pin   = AT91_PIN_PB1,
+};
+
+static void __init csb637_board_init(void)
+{
+       /* Ethernet */
+       at91_add_device_eth(&csb637_eth_data);
+       /* USB Host */
+       at91_add_device_usbh(&csb637_usbh_data);
+       /* USB Device */
+       at91_add_device_udc(&csb637_udc_data);
+}
+
+MACHINE_START(CSB637, "Cogent CSB637")
+       /* Maintainer: Bill Gatliff */
+       .phys_ram       = AT91_SDRAM_BASE,
+       .phys_io        = AT91_BASE_SYS,
+       .io_pg_offst    = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+       .boot_params    = AT91_SDRAM_BASE + 0x100,
+       .timer          = &at91rm9200_timer,
+       .map_io         = csb637_map_io,
+       .init_irq       = csb637_init_irq,
+       .init_machine   = csb637_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91rm9200/board-dk.c b/arch/arm/mach-at91rm9200/board-dk.c
new file mode 100644 (file)
index 0000000..8c747a3
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/board-dk.c
+ *
+ *  Copyright (C) 2005 SAN People
+ *
+ *  Epson S1D framebuffer glue code is:
+ *     Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/mach/serial_at91rm9200.h>
+#include <asm/arch/board.h>
+
+#include "generic.h"
+
+static void __init dk_init_irq(void)
+{
+       /* Initialize AIC controller */
+       at91rm9200_init_irq(NULL);
+
+       /* Set up the GPIO interrupts */
+       at91_gpio_irq_setup(BGA_GPIO_BANKS);
+}
+
+/*
+ * Serial port configuration.
+ *    0 .. 3 = USART0 .. USART3
+ *    4      = DBGU
+ */
+#define DK_UART_MAP            { 4, 1, -1, -1, -1 }    /* ttyS0, ..., ttyS4 */
+#define DK_SERIAL_CONSOLE      0                       /* ttyS0 */
+
+static void __init dk_map_io(void)
+{
+       int serial[AT91_NR_UART] = DK_UART_MAP;
+       int i;
+
+       at91rm9200_map_io();
+
+       /* Initialize clocks: 18.432 MHz crystal */
+       at91_clock_init(18432000);
+
+#ifdef CONFIG_SERIAL_AT91
+       at91_console_port = DK_SERIAL_CONSOLE;
+       memcpy(at91_serial_map, serial, sizeof(serial));
+
+       /* Register UARTs */
+       for (i = 0; i < AT91_NR_UART; i++) {
+               if (at91_serial_map[i] >= 0)
+                       at91_register_uart(i, at91_serial_map[i]);
+       }
+#endif
+}
+
+static struct at91_eth_data __initdata dk_eth_data = {
+       .phy_irq_pin    = AT91_PIN_PC4,
+       .is_rmii        = 1,
+};
+
+static struct at91_usbh_data __initdata dk_usbh_data = {
+       .ports          = 2,
+};
+
+static struct at91_udc_data __initdata dk_udc_data = {
+       .vbus_pin       = AT91_PIN_PD4,
+       .pullup_pin     = AT91_PIN_PD5,
+};
+
+static struct at91_cf_data __initdata dk_cf_data = {
+       .det_pin        = AT91_PIN_PB0,
+       .rst_pin        = AT91_PIN_PC5,
+       // .irq_pin     = ... not connected
+       // .vcc_pin     = ... always powered
+};
+
+static struct at91_mmc_data __initdata dk_mmc_data = {
+       .is_b           = 0,
+       .wire4          = 1,
+};
+
+static void __init dk_board_init(void)
+{
+       /* Ethernet */
+       at91_add_device_eth(&dk_eth_data);
+       /* USB Host */
+       at91_add_device_usbh(&dk_usbh_data);
+       /* USB Device */
+       at91_add_device_udc(&dk_udc_data);
+       /* Compact Flash */
+       at91_add_device_cf(&dk_cf_data);
+       /* MMC */
+       at91_set_gpio_output(AT91_PIN_PB7, 1);  /* this MMC card slot can optionally use SPI signaling (CS3). default: MMC */
+       at91_add_device_mmc(&dk_mmc_data);
+       /* VGA */
+//     dk_add_device_video();
+}
+
+MACHINE_START(AT91RM9200DK, "Atmel AT91RM9200-DK")
+       /* Maintainer: SAN People/Atmel */
+       .phys_ram       = AT91_SDRAM_BASE,
+       .phys_io        = AT91_BASE_SYS,
+       .io_pg_offst    = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+       .boot_params    = AT91_SDRAM_BASE + 0x100,
+       .timer          = &at91rm9200_timer,
+       .map_io         = dk_map_io,
+       .init_irq       = dk_init_irq,
+       .init_machine   = dk_board_init,
+MACHINE_END
diff --git a/arch/arm/mach-at91rm9200/board-ek.c b/arch/arm/mach-at91rm9200/board-ek.c
new file mode 100644 (file)
index 0000000..d140645
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * linux/arch/arm/mach-at91rm9200/board-ek.c
+ *
+ *  Copyright (C) 2005 SAN People
+ *
+ *  Epson S1D framebuffer glue code is:
+ *     Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/setup.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/irq.h>
+
+#include <asm/arch/hardware.h>
+#include <asm/mach/serial_at91rm9200.h>
+#include <asm/arch/board.h>
+
+#include "generic.h"
+
+static void __init ek_init_irq(void)
+{
+       /* Initialize AIC controller */
+       at91rm9200_init_irq(NULL);
+
+       /* Set up the GPIO interrupts */
+       at91_gpio_irq_setup(BGA_GPIO_BANKS);
+}
+
+/*
+ * Serial port configuration.
+ *    0 .. 3 = USART0 .. USART3
+ *    4      = DBGU
+ */
+#define EK_UART_MAP            { 4, 1, -1, -1, -1 }    /* ttyS0, ..., ttyS4 */
+#define EK_SERIAL_CONSOLE      0                       /* ttyS0 */
+
+static void __init ek_map_io(void)
+{
+       int serial[AT91_NR_UART] = EK_UART_MAP;
+       int i;
+
+       at91rm9200_map_io();
+
+       /* Initialize clocks: 18.432 MHz crystal */
+       at91_clock_init(18432000);
+
+#ifdef CONFIG_SERIAL_AT91
+       at91_console_port = EK_SERIAL_CONSOLE;
+       memcpy(at91_serial_map, serial, sizeof(serial));
+
+       /* Register UARTs */
+       for (i = 0; i < AT91_NR_UART; i++) {
+               if (serial[i] >= 0)
+                       at91_register_uart(i, serial[i]);
+       }
+#endif
+}
+
+static struct at91_eth_data __initdata ek_eth_data = {
+       .phy_irq_pin    = AT91_PIN_PC4,
+       .is_rmii        = 1,
+};
+
+static struct at91_usbh_data __initdata ek_usbh_data = {
+       .ports          = 2,
+};
+
+static struct at91_udc_data __initdata ek_udc_data = {
+       .vbus_pin       = AT91_PIN_PD4,
+       .pullup_pin     = AT91_PIN_PD5,
+};
+
+static struct at91_mmc_data __initdata ek_mmc_data = {
+       .det_pin        = AT91_PIN_PB27,
+       .is_b           = 0,
+       .wire4          = 1,
+       .wp_pin         = AT91_PIN_PA17,
+};
+
+static void __init ek_board_init(void)
+{
+       /* Ethernet */
+       at91_add_device_eth(&ek_eth_data);
+       /* USB Host */
+       at91_add_device_usbh(&ek_usbh_data);
+       /* USB Device */
+       at91_add_device_udc(&ek_udc_data);
+       /* MMC */
+       at91_set_gpio_output(AT91_PIN_PB22, 1); /* this MMC card slot can optionally use SPI signaling (CS3). default: MMC */
+       at91_add_device_mmc(&ek_mmc_data);
+       /* VGA */
+//     ek_add_device_video();
+}
+
+MACHINE_START(AT91RM9200EK, "Atmel AT91RM9200-EK")
+       /* Maintainer: SAN People/Atmel */
+       .phys_ram       = AT91_SDRAM_BASE,
+       .phys_io        = AT91_BASE_SYS,
+       .io_pg_offst    = (AT91_VA_BASE_SYS >> 18) & 0xfffc,
+       .boot_params    = AT91_SDRAM_BASE + 0x100,
+       .timer          = &at91rm9200_timer,
+       .map_io         = ek_map_io,
+       .init_irq       = ek_init_irq,
+       .init_machine   = ek_board_init,
+MACHINE_END
index 40684e01e865a77563f2cafe0c4ddc11b20586da..95a1e263f7fa59e488adecdba1f0637005528471 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/err.h>
 #include <linux/string.h>
 #include <linux/clk.h>
+#include <linux/mutex.h>
 
 #include <asm/semaphore.h>
 #include <asm/hardware/icst525.h>
 #include "clock.h"
 
 static LIST_HEAD(clocks);
-static DECLARE_MUTEX(clocks_sem);
+static DEFINE_MUTEX(clocks_mutex);
 
 struct clk *clk_get(struct device *dev, const char *id)
 {
        struct clk *p, *clk = ERR_PTR(-ENOENT);
 
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_for_each_entry(p, &clocks, node) {
                if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
                        clk = p;
                        break;
                }
        }
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
 
        return clk;
 }
@@ -107,18 +108,18 @@ static struct clk uart_clk = {
 
 int clk_register(struct clk *clk)
 {
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_add(&clk->node, &clocks);
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
        return 0;
 }
 EXPORT_SYMBOL(clk_register);
 
 void clk_unregister(struct clk *clk)
 {
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_del(&clk->node);
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
 }
 EXPORT_SYMBOL(clk_unregister);
 
index 5b41e3a724e1d34f35aacd1a94372781a2c4710a..622cdc4212dd6f096ca7880f85d42334f0581da1 100644 (file)
@@ -22,20 +22,6 @@ static int lm_match(struct device *dev, struct device_driver *drv)
        return 1;
 }
 
-static struct bus_type lm_bustype = {
-       .name           = "logicmodule",
-       .match          = lm_match,
-//     .suspend        = lm_suspend,
-//     .resume         = lm_resume,
-};
-
-static int __init lm_init(void)
-{
-       return bus_register(&lm_bustype);
-}
-
-postcore_initcall(lm_init);
-
 static int lm_bus_probe(struct device *dev)
 {
        struct lm_device *lmdev = to_lm_device(dev);
@@ -49,16 +35,30 @@ static int lm_bus_remove(struct device *dev)
        struct lm_device *lmdev = to_lm_device(dev);
        struct lm_driver *lmdrv = to_lm_driver(dev->driver);
 
-       lmdrv->remove(lmdev);
+       if (lmdrv->remove)
+               lmdrv->remove(lmdev);
        return 0;
 }
 
+static struct bus_type lm_bustype = {
+       .name           = "logicmodule",
+       .match          = lm_match,
+       .probe          = lm_bus_probe,
+       .remove         = lm_bus_remove,
+//     .suspend        = lm_bus_suspend,
+//     .resume         = lm_bus_resume,
+};
+
+static int __init lm_init(void)
+{
+       return bus_register(&lm_bustype);
+}
+
+postcore_initcall(lm_init);
+
 int lm_driver_register(struct lm_driver *drv)
 {
        drv->drv.bus = &lm_bustype;
-       drv->drv.probe = lm_bus_probe;
-       drv->drv.remove = lm_bus_remove;
-
        return driver_register(&drv->drv);
 }
 
index a68b30eff4d26180c24e4ed44545980f04188d64..93096befd0173a3ed767f1c48550dcedc0b66fa3 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/init.h>
+#include <linux/mutex.h>
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/hardware.h>
@@ -59,7 +60,7 @@ static const struct ssp_info_ ssp_info[PXA_SSP_PORTS] = {
 #endif
 };
 
-static DECLARE_MUTEX(sem);
+static DEFINE_MUTEX(mutex);
 static int use_count[PXA_SSP_PORTS] = {0, 0, 0};
 
 static irqreturn_t ssp_interrupt(int irq, void *dev_id, struct pt_regs *regs)
@@ -239,16 +240,16 @@ int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags)
        if (port > PXA_SSP_PORTS || port == 0)
                return -ENODEV;
 
-       down(&sem);
+       mutex_lock(&mutex);
        if (use_count[port - 1]) {
-               up(&sem);
+               mutex_unlock(&mutex);
                return -EBUSY;
        }
        use_count[port - 1]++;
 
        if (!request_mem_region(__PREG(SSCR0_P(port)), 0x2c, "SSP")) {
                use_count[port - 1]--;
-               up(&sem);
+               mutex_unlock(&mutex);
                return -EBUSY;
        }
        dev->port = port;
@@ -265,13 +266,13 @@ int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags)
 
        /* turn on SSP port clock */
        pxa_set_cken(ssp_info[port-1].clock, 1);
-       up(&sem);
+       mutex_unlock(&mutex);
        return 0;
 
 out_region:
        release_mem_region(__PREG(SSCR0_P(port)), 0x2c);
        use_count[port - 1]--;
-       up(&sem);
+       mutex_unlock(&mutex);
        return ret;
 }
 
@@ -282,7 +283,7 @@ out_region:
  */
 void ssp_exit(struct ssp_dev *dev)
 {
-       down(&sem);
+       mutex_lock(&mutex);
        SSCR0_P(dev->port) &= ~SSCR0_SSE;
 
        if (dev->port > PXA_SSP_PORTS || dev->port == 0) {
@@ -295,7 +296,7 @@ void ssp_exit(struct ssp_dev *dev)
                free_irq(dev->irq, dev);
        release_mem_region(__PREG(SSCR0_P(dev->port)), 0x2c);
        use_count[dev->port - 1]--;
-       up(&sem);
+       mutex_unlock(&mutex);
 }
 
 EXPORT_SYMBOL(ssp_write_word);
index ec3f7e798623f3b94d1216d8d95828bf47843503..21325a4da9da65bd23a13bc2587a81312ba0bb4e 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/errno.h>
 #include <linux/err.h>
 #include <linux/clk.h>
+#include <linux/mutex.h>
 
 #include <asm/semaphore.h>
 #include <asm/hardware/icst307.h>
 #include "clock.h"
 
 static LIST_HEAD(clocks);
-static DECLARE_MUTEX(clocks_sem);
+static DEFINE_MUTEX(clocks_mutex);
 
 struct clk *clk_get(struct device *dev, const char *id)
 {
        struct clk *p, *clk = ERR_PTR(-ENOENT);
 
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_for_each_entry(p, &clocks, node) {
                if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
                        clk = p;
                        break;
                }
        }
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
 
        return clk;
 }
@@ -109,18 +110,18 @@ static struct clk mmci_clk = {
 
 int clk_register(struct clk *clk)
 {
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_add(&clk->node, &clocks);
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
        return 0;
 }
 EXPORT_SYMBOL(clk_register);
 
 void clk_unregister(struct clk *clk)
 {
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_del(&clk->node);
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
 }
 EXPORT_SYMBOL(clk_unregister);
 
index fc09ba92d66a4673eae8f594ca91ae60d66476b1..af2f3d52b61b8e235c2a3cc37e2db9dcd32783e1 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/interrupt.h>
 #include <linux/ioport.h>
 #include <linux/clk.h>
+#include <linux/mutex.h>
 
 #include <asm/hardware.h>
 #include <asm/atomic.h>
@@ -51,7 +52,7 @@
 /* clock information */
 
 static LIST_HEAD(clocks);
-static DECLARE_MUTEX(clocks_sem);
+static DEFINE_MUTEX(clocks_mutex);
 
 /* old functions */
 
@@ -102,7 +103,7 @@ struct clk *clk_get(struct device *dev, const char *id)
        else
                idno = to_platform_device(dev)->id;
 
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
 
        list_for_each_entry(p, &clocks, list) {
                if (p->id == idno &&
@@ -126,7 +127,7 @@ struct clk *clk_get(struct device *dev, const char *id)
                }
        }
 
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
        return clk;
 }
 
@@ -362,9 +363,9 @@ int s3c24xx_register_clock(struct clk *clk)
 
        /* add to the list of available clocks */
 
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_add(&clk->list, &clocks);
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
 
        return 0;
 }
index dcf10014f5cd39091c7af28959bbc2a91a72088d..9858c96560e247e2b5cf92a263907b85cb5c9f6d 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/err.h>
 #include <linux/string.h>
 #include <linux/clk.h>
+#include <linux/mutex.h>
 
 #include <asm/semaphore.h>
 #include <asm/hardware/icst307.h>
 #include "clock.h"
 
 static LIST_HEAD(clocks);
-static DECLARE_MUTEX(clocks_sem);
+static DEFINE_MUTEX(clocks_mutex);
 
 struct clk *clk_get(struct device *dev, const char *id)
 {
        struct clk *p, *clk = ERR_PTR(-ENOENT);
 
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_for_each_entry(p, &clocks, node) {
                if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
                        clk = p;
                        break;
                }
        }
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
 
        return clk;
 }
@@ -110,18 +111,18 @@ static struct clk mmci_clk = {
 
 int clk_register(struct clk *clk)
 {
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_add(&clk->node, &clocks);
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
        return 0;
 }
 EXPORT_SYMBOL(clk_register);
 
 void clk_unregister(struct clk *clk)
 {
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_del(&clk->node);
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
 }
 EXPORT_SYMBOL(clk_unregister);
 
index 0513ed1b2fcf8334ffdcf12a78277008e23e07c8..c2ee18d2075e7bd22312835cf9c3d859c0913f5b 100644 (file)
 
 #include <asm/cacheflush.h>
 #include <asm/tlbflush.h>
+#include <asm/sizes.h>
+
+/* Sanity check size */
+#if (CONSISTENT_DMA_SIZE % SZ_2M)
+#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
+#endif
 
-#define CONSISTENT_BASE        (0xffc00000)
 #define CONSISTENT_END (0xffe00000)
+#define CONSISTENT_BASE        (CONSISTENT_END - CONSISTENT_DMA_SIZE)
+
 #define CONSISTENT_OFFSET(x)   (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
+#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
+#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
+
 
 /*
- * This is the page table (2MB) covering uncached, DMA consistent allocations
+ * These are the page tables (2MB each) covering uncached, DMA consistent allocations
  */
-static pte_t *consistent_pte;
+static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
 static DEFINE_SPINLOCK(consistent_lock);
 
 /*
@@ -142,7 +152,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
        unsigned long order;
        u64 mask = ISA_DMA_THRESHOLD, limit;
 
-       if (!consistent_pte) {
+       if (!consistent_pte[0]) {
                printk(KERN_ERR "%s: not initialised\n", __func__);
                dump_stack();
                return NULL;
@@ -205,9 +215,12 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
        c = vm_region_alloc(&consistent_head, size,
                            gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
        if (c) {
-               pte_t *pte = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+               pte_t *pte;
                struct page *end = page + (1 << order);
+               int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+               u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
 
+               pte = consistent_pte[idx] + off;
                c->vm_pages = page;
 
                /*
@@ -226,6 +239,11 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
                        set_pte(pte, mk_pte(page, prot));
                        page++;
                        pte++;
+                       off++;
+                       if (off >= PTRS_PER_PTE) {
+                               off = 0;
+                               pte = consistent_pte[++idx];
+                       }
                } while (size -= PAGE_SIZE);
 
                /*
@@ -327,6 +345,8 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
        struct vm_region *c;
        unsigned long flags, addr;
        pte_t *ptep;
+       int idx;
+       u32 off;
 
        WARN_ON(irqs_disabled());
 
@@ -347,7 +367,9 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
                size = c->vm_end - c->vm_start;
        }
 
-       ptep = consistent_pte + CONSISTENT_OFFSET(c->vm_start);
+       idx = CONSISTENT_PTE_INDEX(c->vm_start);
+       off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+       ptep = consistent_pte[idx] + off;
        addr = c->vm_start;
        do {
                pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
@@ -355,6 +377,11 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
 
                ptep++;
                addr += PAGE_SIZE;
+               off++;
+               if (off >= PTRS_PER_PTE) {
+                       off = 0;
+                       ptep = consistent_pte[++idx];
+               }
 
                if (!pte_none(pte) && pte_present(pte)) {
                        pfn = pte_pfn(pte);
@@ -401,11 +428,12 @@ static int __init consistent_init(void)
        pgd_t *pgd;
        pmd_t *pmd;
        pte_t *pte;
-       int ret = 0;
+       int ret = 0, i = 0;
+       u32 base = CONSISTENT_BASE;
 
        do {
-               pgd = pgd_offset(&init_mm, CONSISTENT_BASE);
-               pmd = pmd_alloc(&init_mm, pgd, CONSISTENT_BASE);
+               pgd = pgd_offset(&init_mm, base);
+               pmd = pmd_alloc(&init_mm, pgd, base);
                if (!pmd) {
                        printk(KERN_ERR "%s: no pmd tables\n", __func__);
                        ret = -ENOMEM;
@@ -413,15 +441,16 @@ static int __init consistent_init(void)
                }
                WARN_ON(!pmd_none(*pmd));
 
-               pte = pte_alloc_kernel(pmd, CONSISTENT_BASE);
+               pte = pte_alloc_kernel(pmd, base);
                if (!pte) {
                        printk(KERN_ERR "%s: no pte tables\n", __func__);
                        ret = -ENOMEM;
                        break;
                }
 
-               consistent_pte = pte;
-       } while (0);
+               consistent_pte[i++] = pte;
+               base += (1 << PGDIR_SHIFT);
+       } while (base < CONSISTENT_END);
 
        return ret;
 }
index 84fd65656fcfa359caa7c219a11815b0d83c53ab..7ebc5a29db8d68d6de060d8142fbc4f851e7f3e8 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/err.h>
 #include <linux/string.h>
 #include <linux/clk.h>
+#include <linux/mutex.h>
 
 #include <asm/io.h>
 #include <asm/semaphore.h>
@@ -27,7 +28,7 @@
 #include <asm/arch/clock.h>
 
 LIST_HEAD(clocks);
-static DECLARE_MUTEX(clocks_sem);
+static DEFINE_MUTEX(clocks_mutex);
 DEFINE_SPINLOCK(clockfw_lock);
 
 static struct clk_functions *arch_clock;
@@ -40,14 +41,14 @@ struct clk * clk_get(struct device *dev, const char *id)
 {
        struct clk *p, *clk = ERR_PTR(-ENOENT);
 
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_for_each_entry(p, &clocks, node) {
                if (strcmp(id, p->name) == 0 && try_module_get(p->owner)) {
                        clk = p;
                        break;
                }
        }
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
 
        return clk;
 }
@@ -249,11 +250,11 @@ void propagate_rate(struct clk * tclk)
 
 int clk_register(struct clk *clk)
 {
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_add(&clk->node, &clocks);
        if (clk->init)
                clk->init(clk);
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
 
        return 0;
 }
@@ -261,9 +262,9 @@ EXPORT_SYMBOL(clk_register);
 
 void clk_unregister(struct clk *clk)
 {
-       down(&clocks_sem);
+       mutex_lock(&clocks_mutex);
        list_del(&clk->node);
-       up(&clocks_sem);
+       mutex_unlock(&clocks_mutex);
 }
 EXPORT_SYMBOL(clk_unregister);
 
index 15833a0057dd1c9ad7360275cd81b4e1716d053b..38630565917142c4b4bb2c5017141e71ecfb06cc 100644 (file)
@@ -277,10 +277,9 @@ int
 copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
            unsigned long unused, struct task_struct *p, struct pt_regs *regs)
 {
-       struct thread_info *thread = p->thread_info;
-       struct pt_regs *childregs;
+       struct thread_info *thread = task_thread_info(p);
+       struct pt_regs *childregs = task_pt_regs(p);
 
-       childregs = __get_user_regs(thread);
        *childregs = *regs;
        childregs->ARM_r0 = 0;
        childregs->ARM_sp = stack_start;
index 4e6b7356a7221a29c20faa79299ba118bd9cfebe..3c3371d4683edd2ed5c6afa300cfcc91ae41fa36 100644 (file)
  */
 #define BREAKINST_ARM  0xef9f0001
 
-/*
- * Get the address of the live pt_regs for the specified task.
- * These are saved onto the top kernel stack when the process
- * is not running.
- *
- * Note: if a user thread is execve'd from kernel space, the
- * kernel stack will not be empty on entry to the kernel, so
- * ptracing these tasks will fail.
- */
-static inline struct pt_regs *
-get_user_regs(struct task_struct *task)
-{
-       return __get_user_regs(task->thread_info);
-}
-
 /*
  * this routine will get a word off of the processes privileged stack.
  * the offset is how far from the base addr as stored in the THREAD.
@@ -62,7 +47,7 @@ get_user_regs(struct task_struct *task)
  */
 static inline long get_user_reg(struct task_struct *task, int offset)
 {
-       return get_user_regs(task)->uregs[offset];
+       return task_pt_regs(task)->uregs[offset];
 }
 
 /*
@@ -74,7 +59,7 @@ static inline long get_user_reg(struct task_struct *task, int offset)
 static inline int
 put_user_reg(struct task_struct *task, int offset, long data)
 {
-       struct pt_regs newregs, *regs = get_user_regs(task);
+       struct pt_regs newregs, *regs = task_pt_regs(task);
        int ret = -EINVAL;
 
        newregs = *regs;
@@ -377,7 +362,7 @@ void ptrace_set_bpt(struct task_struct *child)
        u32 insn;
        int res;
 
-       regs = get_user_regs(child);
+       regs = task_pt_regs(child);
        pc = instruction_pointer(regs);
 
        res = read_instr(child, pc, &insn);
@@ -500,7 +485,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
  */
 static int ptrace_getregs(struct task_struct *tsk, void *uregs)
 {
-       struct pt_regs *regs = get_user_regs(tsk);
+       struct pt_regs *regs = task_pt_regs(tsk);
 
        return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
 }
@@ -515,7 +500,7 @@ static int ptrace_setregs(struct task_struct *tsk, void *uregs)
 
        ret = -EFAULT;
        if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
-               struct pt_regs *regs = get_user_regs(tsk);
+               struct pt_regs *regs = task_pt_regs(tsk);
 
                ret = -EINVAL;
                if (valid_user_regs(&newregs)) {
@@ -532,7 +517,7 @@ static int ptrace_setregs(struct task_struct *tsk, void *uregs)
  */
 static int ptrace_getfpregs(struct task_struct *tsk, void *ufp)
 {
-       return copy_to_user(ufp, &tsk->thread_info->fpstate,
+       return copy_to_user(ufp, &task_thread_info(tsk)->fpstate,
                            sizeof(struct user_fp)) ? -EFAULT : 0;
 }
 
@@ -542,7 +527,7 @@ static int ptrace_getfpregs(struct task_struct *tsk, void *ufp)
 static int ptrace_setfpregs(struct task_struct *tsk, void *ufp)
 {
        set_stopped_child_used_math(tsk);
-       return copy_from_user(&tsk->thread_info->fpstate, ufp,
+       return copy_from_user(&task_threas_info(tsk)->fpstate, ufp,
                              sizeof(struct user_fp)) ? -EFAULT : 0;
 }
 
index f64f59022392894d473722099553afff21fd9d13..5847ea5d7747f6f73dd86bbd6f18af2eaa930ef9 100644 (file)
@@ -132,7 +132,7 @@ static void dump_instr(struct pt_regs *regs)
 
 /*static*/ void __dump_stack(struct task_struct *tsk, unsigned long sp)
 {
-       dump_mem("Stack: ", sp, 8192+(unsigned long)tsk->thread_info);
+       dump_mem("Stack: ", sp, 8192+(unsigned long)task_stack_page(tsk));
 }
 
 void dump_stack(void)
@@ -158,7 +158,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
        } else if (verify_stack(fp)) {
                printk("invalid frame pointer 0x%08x", fp);
                ok = 0;
-       } else if (fp < (unsigned long)(tsk->thread_info + 1))
+       } else if (fp < (unsigned long)end_of_stack(tsk))
                printk("frame pointer underflow");
        printk("\n");
 
@@ -168,7 +168,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 
 /* FIXME - this is probably wrong.. */
 void show_stack(struct task_struct *task, unsigned long *sp) {
-       dump_mem("Stack: ", (unsigned long)sp, 8192+(unsigned long)task->thread_info);
+       dump_mem("Stack: ", (unsigned long)sp, 8192+(unsigned long)task_stack_page(task));
 }
 
 DEFINE_SPINLOCK(die_lock);
@@ -187,7 +187,7 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
        printk("CPU: %d\n", smp_processor_id());
        show_regs(regs);
        printk("Process %s (pid: %d, stack limit = 0x%p)\n",
-               current->comm, current->pid, tsk->thread_info + 1);
+               current->comm, current->pid, end_of_stack(tsk));
 
        if (!user_mode(regs) || in_interrupt()) {
                __dump_stack(tsk, (unsigned long)(regs + 1));
index 69e28b4057e8bc00f7ea93f4b756a589f20fbd32..0a675ce9e0992895f809b3898084efbd8b973ea6 100644 (file)
@@ -79,7 +79,7 @@ void hard_reset_now (void)
  */
 unsigned long thread_saved_pc(struct task_struct *t)
 {
-       return (unsigned long)user_regs(t->thread_info)->irp;
+       return task_pt_regs(t)->irp;
 }
 
 static void kernel_thread_helper(void* dummy, int (*fn)(void *), void * arg)
@@ -128,7 +128,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
         * remember that the task_struct doubles as the kernel stack for the task
         */
 
-       childregs = user_regs(p->thread_info);        
+       childregs = task_pt_regs(p);
         
        *childregs = *regs;  /* struct copy of pt_regs */
         
index 6cbd34a27b906cdce1805b710eedca1cd6cfa566..f214f74f264e43a65415d490a640ae31364a94de 100644 (file)
@@ -37,7 +37,7 @@ inline long get_reg(struct task_struct *task, unsigned int regno)
        if (regno == PT_USP)
                return task->thread.usp;
        else if (regno < PT_MAX)
-               return ((unsigned long *)user_regs(task->thread_info))[regno];
+               return ((unsigned long *)task_pt_regs(task))[regno];
        else
                return 0;
 }
@@ -51,7 +51,7 @@ inline int put_reg(struct task_struct *task, unsigned int regno,
        if (regno == PT_USP)
                task->thread.usp = data;
        else if (regno < PT_MAX)
-               ((unsigned long *)user_regs(task->thread_info))[regno] = data;
+               ((unsigned long *)task_pt_regs(task))[regno] = data;
        else
                return -1;
        return 0;
index 882be42114f749a625f9a48c9ede2c70009b6f05..843513102d3cfcbb94404b0a56edd08daa6b9178 100644 (file)
@@ -96,7 +96,7 @@ hard_reset_now(void)
  */
 unsigned long thread_saved_pc(struct task_struct *t)
 {
-       return (unsigned long)user_regs(t->thread_info)->erp;
+       return task_pt_regs(t)->erp;
 }
 
 static void
@@ -148,7 +148,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
         * fix it up. Note: the task_struct doubles as the kernel stack for the
         * task.
         */
-       childregs = user_regs(p->thread_info);
+       childregs = task_pt_regs(p);
        *childregs = *regs;     /* Struct copy of pt_regs. */
         p->set_child_tid = p->clear_child_tid = NULL;
         childregs->r10 = 0;    /* Child returns 0 after a fork/clone. */
@@ -157,7 +157,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
         * The TLS is in $mof beacuse it is the 5th argument to sys_clone.
         */
        if (p->mm && (clone_flags & CLONE_SETTLS)) {
-               p->thread_info->tls = regs->mof;
+               task_thread_info(p)->tls = regs->mof;
        }
 
        /* Put the switch stack right below the pt_regs. */
index 5528b83a622b62e666f2e4241967353724f47f7e..82cf2e3624a41e93a1b748c671406f62043876bc 100644 (file)
@@ -46,7 +46,7 @@ long get_reg(struct task_struct *task, unsigned int regno)
        unsigned long ret;
 
        if (regno <= PT_EDA)
-               ret = ((unsigned long *)user_regs(task->thread_info))[regno];
+               ret = ((unsigned long *)task_pt_regs(task))[regno];
        else if (regno == PT_USP)
                ret = task->thread.usp;
        else if (regno == PT_PPC)
@@ -65,13 +65,13 @@ long get_reg(struct task_struct *task, unsigned int regno)
 int put_reg(struct task_struct *task, unsigned int regno, unsigned long data)
 {
        if (regno <= PT_EDA)
-               ((unsigned long *)user_regs(task->thread_info))[regno] = data;
+               ((unsigned long *)task_pt_regs(task))[regno] = data;
        else if (regno == PT_USP)
                task->thread.usp = data;
        else if (regno == PT_PPC) {
                /* Write pseudo-PC to ERP only if changed. */
                if (data != get_pseudo_pc(task))
-                       ((unsigned long *)user_regs(task->thread_info))[PT_ERP] = data;
+                       task_pt_regs(task)->erp = data;
        } else if (regno <= PT_MAX)
                return put_debugreg(task->pid, regno, data);
        else
index 13867f4fad16a858a13d4305a3869998ef516b78..da40d19a151e28a76b7814a8e1d9ac6805a35c4c 100644 (file)
@@ -113,10 +113,10 @@ smp_boot_one_cpu(int cpuid)
        if (IS_ERR(idle))
                panic("SMP: fork failed for CPU:%d", cpuid);
 
-       idle->thread_info->cpu = cpuid;
+       task_thread_info(idle)->cpu = cpuid;
 
        /* Information to the CPU that is about to boot */
-       smp_init_current_idle_thread = idle->thread_info;
+       smp_init_current_idle_thread = task_thread_info(idle);
        cpu_now_booting = cpuid;
 
        /* Wait for CPU to come online */
index b08a28bb58abab021e9dcd263067b6a5abd55a5b..9d75d7692303fca512713743952403f07cab07fe 100644 (file)
@@ -198,9 +198,9 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
        per_cpu(current_pgd, cpu) = next->pgd;
 
        /* Switch context in the MMU. */
-        if (tsk && tsk->thread_info)
+        if (tsk && task_thread_info(tsk))
         {
-          SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | tsk->thread_info->tls);
+          SPEC_REG_WR(SPEC_REG_PID, next->context.page_id | task_thread_info(tsk)->tls);
         }
         else
         {
index c4488379ac3bf022998274d629f78baccb03f513..0fff8a61ef2a8ba2032ebfb50cfb553ff94cc97b 100644 (file)
@@ -204,7 +204,7 @@ int copy_thread(int nr, unsigned long clone_flags,
 
        regs0 = __kernel_frame0_ptr;
        childregs0 = (struct pt_regs *)
-               ((unsigned long) p->thread_info + THREAD_SIZE - USER_CONTEXT_SIZE);
+               (task_stack_page(p) + THREAD_SIZE - USER_CONTEXT_SIZE);
        childregs = childregs0;
 
        /* set up the userspace frame (the only place that the USP is stored) */
@@ -220,7 +220,7 @@ int copy_thread(int nr, unsigned long clone_flags,
                *childregs = *regs;
                childregs->sp = (unsigned long) childregs0;
                childregs->next_frame = childregs0;
-               childregs->gr15 = (unsigned long) p->thread_info;
+               childregs->gr15 = (unsigned long) task_thread_info(p);
                childregs->gr29 = (unsigned long) p;
        }
 
index 585ed5efd0f719fa5d490c6f65a7fb318dbf5550..ed79ae20e88d4a58800da6204648820bd6171666 100644 (file)
@@ -195,7 +195,7 @@ int copy_thread(int nr, unsigned long clone_flags,
 {
        struct pt_regs * childregs;
 
-       childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+       childregs = (struct pt_regs *) (THREAD_SIZE + task_stack_page(p)) - 1;
 
        *childregs = *regs;
        childregs->retpc = (unsigned long) ret_from_fork;
index 035928f3f6c1c96bf8869c9388f275fd49db703c..2185377fdde118424aaa36b3b3d8c36498373161 100644 (file)
@@ -424,18 +424,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
        struct task_struct *tsk;
        int err;
 
-       childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
-       /*
-        * The below -8 is to reserve 8 bytes on top of the ring0 stack.
-        * This is necessary to guarantee that the entire "struct pt_regs"
-        * is accessable even if the CPU haven't stored the SS/ESP registers
-        * on the stack (interrupt gate does not save these registers
-        * when switching to the same priv ring).
-        * Therefore beware: accessing the xss/esp fields of the
-        * "struct pt_regs" is possible, but they may contain the
-        * completely wrong values.
-        */
-       childregs = (struct pt_regs *) ((unsigned long) childregs - 8);
+       childregs = task_pt_regs(p);
        *childregs = *regs;
        childregs->eax = 0;
        childregs->esp = esp;
@@ -540,12 +529,7 @@ EXPORT_SYMBOL(dump_thread);
  */
 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
 {
-       struct pt_regs ptregs;
-       
-       ptregs = *(struct pt_regs *)
-               ((unsigned long)tsk->thread_info +
-               /* see comments in copy_thread() about -8 */
-               THREAD_SIZE - sizeof(ptregs) - 8);
+       struct pt_regs ptregs = *task_pt_regs(tsk);
        ptregs.xcs &= 0xffff;
        ptregs.xds &= 0xffff;
        ptregs.xes &= 0xffff;
@@ -601,8 +585,8 @@ static inline void disable_tsc(struct task_struct *prev_p,
         * gcc should eliminate the ->thread_info dereference if
         * has_secure_computing returns 0 at compile time (SECCOMP=n).
         */
-       prev = prev_p->thread_info;
-       next = next_p->thread_info;
+       prev = task_thread_info(prev_p);
+       next = task_thread_info(next_p);
 
        if (has_secure_computing(prev) || has_secure_computing(next)) {
                /* slow path here */
@@ -787,7 +771,7 @@ unsigned long get_wchan(struct task_struct *p)
        int count = 0;
        if (!p || p == current || p->state == TASK_RUNNING)
                return 0;
-       stack_page = (unsigned long)p->thread_info;
+       stack_page = (unsigned long)task_stack_page(p);
        esp = p->thread.esp;
        if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
                return 0;
index b3c2e2c26743381e5f1a8b3dfd592c2c0cb5d2c9..255adb498268bd0f27b0020fd4402682d0432a1a 100644 (file)
@@ -875,8 +875,7 @@ static inline struct task_struct * alloc_idle_task(int cpu)
                /* initialize thread_struct.  we really want to avoid destroy
                 * idle tread
                 */
-               idle->thread.esp = (unsigned long)(((struct pt_regs *)
-                       (THREAD_SIZE + (unsigned long) idle->thread_info)) - 1);
+               idle->thread.esp = (unsigned long)task_pt_regs(idle);
                init_idle(idle, cpu);
                return idle;
        }
@@ -1096,6 +1095,7 @@ static void smp_tune_scheduling (void)
                        cachesize = 16; /* Pentiums, 2x8kB cache */
                        bandwidth = 100;
                }
+               max_cache_size = cachesize * 1024;
        }
 }
 
index cbdb0afed76a0cf8173fdde0d3c34f55d3fbcef5..0c90ae54ddfa0a868f5df45c23bc738b2d7591dc 100644 (file)
@@ -311,7 +311,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
                "movl %1,%%ebp\n\t"
                "jmp resume_userspace"
                : /* no outputs */
-               :"r" (&info->regs), "r" (tsk->thread_info) : "ax");
+               :"r" (&info->regs), "r" (task_thread_info(tsk)) : "ax");
        /* we never return here */
 }
 
index a346e1833bf2335b224a298469911233dab809fa..626cdc83668b1bc17a2823deefeadae287b86ef1 100644 (file)
@@ -108,7 +108,6 @@ static struct async_struct *IRQ_ports[NR_IRQS];
 static struct console *console;
 
 static unsigned char *tmp_buf;
-static DECLARE_MUTEX(tmp_buf_sem);
 
 extern struct console *console_drivers; /* from kernel/printk.c */
 
@@ -167,15 +166,9 @@ static  void receive_chars(struct tty_struct *tty, struct pt_regs *regs)
                        }
                }
                seen_esc = 0;
-               if (tty->flip.count >= TTY_FLIPBUF_SIZE) break;
 
-               *tty->flip.char_buf_ptr = ch;
-
-               *tty->flip.flag_buf_ptr = 0;
-
-               tty->flip.flag_buf_ptr++;
-               tty->flip.char_buf_ptr++;
-               tty->flip.count++;
+               if (tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0)
+                       break;
        }
        tty_flip_buffer_push(tty);
 }
index b73b8b6b10c13adc3cd0dd3abf140c5cdbc6fe7a..a47f63b204fbbc8d1cf02b982dab0d2992aeac0d 100644 (file)
@@ -95,8 +95,7 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs,
 static inline int elf_core_copy_task_regs(struct task_struct *t,
                                          elf_gregset_t* elfregs)
 {
-       struct pt_regs *pp = ia64_task_regs(t);
-       ELF_CORE_COPY_REGS((*elfregs), pp);
+       ELF_CORE_COPY_REGS((*elfregs), task_pt_regs(t));
        return 1;
 }
 
index aa891c9bc9b67ac268970391f49122f37af4b22a..5856510210fac55247f15294469103c55d49dc6c 100644 (file)
@@ -255,7 +255,7 @@ save_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
         */
        fp_tos = (fsr>>11)&0x7;
        fr8_st_map = (8-fp_tos)&0x7;
-       ptp = ia64_task_regs(tsk);
+       ptp = task_pt_regs(tsk);
        fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
        ia64f2ia32f(fpregp, &ptp->f8);
        copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
@@ -389,7 +389,7 @@ restore_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
        fr8_st_map = (8-fp_tos)&0x7;
        fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
 
-       ptp = ia64_task_regs(tsk);
+       ptp = task_pt_regs(tsk);
        copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
        ia32f2ia64f(&ptp->f8, fpregp);
        copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
index 4f630043b3aedeb8933b74d5ca7fb78b1210d300..c187743965a02029df7090843ae30e2059afe4d6 100644 (file)
@@ -58,7 +58,7 @@ load_desc (u16 selector)
 void
 ia32_load_segment_descriptors (struct task_struct *task)
 {
-       struct pt_regs *regs = ia64_task_regs(task);
+       struct pt_regs *regs = task_pt_regs(task);
 
        /* Setup the segment descriptors */
        regs->r24 = load_desc(regs->r16 >> 16);         /* ESD */
@@ -113,7 +113,7 @@ void
 ia32_load_state (struct task_struct *t)
 {
        unsigned long eflag, fsr, fcr, fir, fdr, tssd;
-       struct pt_regs *regs = ia64_task_regs(t);
+       struct pt_regs *regs = task_pt_regs(t);
 
        eflag = t->thread.eflag;
        fsr = t->thread.fsr;
index 0668b2b7714db553fadee5e6bd51ea941c669ed5..3945d378bd7ed913f7c530dc3135828580ff50bc 100644 (file)
@@ -1482,7 +1482,7 @@ getreg (struct task_struct *child, int regno)
 {
        struct pt_regs *child_regs;
 
-       child_regs = ia64_task_regs(child);
+       child_regs = task_pt_regs(child);
        switch (regno / sizeof(int)) {
              case PT_EBX: return child_regs->r11;
              case PT_ECX: return child_regs->r9;
@@ -1510,7 +1510,7 @@ putreg (struct task_struct *child, int regno, unsigned int value)
 {
        struct pt_regs *child_regs;
 
-       child_regs = ia64_task_regs(child);
+       child_regs = task_pt_regs(child);
        switch (regno / sizeof(int)) {
              case PT_EBX: child_regs->r11 = value; break;
              case PT_ECX: child_regs->r9 = value; break;
@@ -1626,7 +1626,7 @@ save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user
         *  Stack frames start with 16-bytes of temp space
         */
        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
-       ptp = ia64_task_regs(tsk);
+       ptp = task_pt_regs(tsk);
        tos = (tsk->thread.fsr >> 11) & 7;
        for (i = 0; i < 8; i++)
                put_fpreg(i, &save->st_space[i], ptp, swp, tos);
@@ -1659,7 +1659,7 @@ restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __us
         *  Stack frames start with 16-bytes of temp space
         */
        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
-       ptp = ia64_task_regs(tsk);
+       ptp = task_pt_regs(tsk);
        tos = (tsk->thread.fsr >> 11) & 7;
        for (i = 0; i < 8; i++)
                get_fpreg(i, &save->st_space[i], ptp, swp, tos);
@@ -1690,7 +1690,7 @@ save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user
          *  Stack frames start with 16-bytes of temp space
          */
         swp = (struct switch_stack *)(tsk->thread.ksp + 16);
-        ptp = ia64_task_regs(tsk);
+        ptp = task_pt_regs(tsk);
        tos = (tsk->thread.fsr >> 11) & 7;
         for (i = 0; i < 8; i++)
                put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
@@ -1734,7 +1734,7 @@ restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __u
         *  Stack frames start with 16-bytes of temp space
         */
        swp = (struct switch_stack *)(tsk->thread.ksp + 16);
-       ptp = ia64_task_regs(tsk);
+       ptp = task_pt_regs(tsk);
        tos = (tsk->thread.fsr >> 11) & 7;
        for (i = 0; i < 8; i++)
        get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
index 2ddbac6f49993b4c0026ea6d12803f80b144f620..ce423910ca976553b363ca42ee29f923423c563a 100644 (file)
@@ -903,5 +903,6 @@ fsyscall_table:
        data8 0
        data8 0
        data8 0
+       data8 0                                                 // 1280
 
        .org fsyscall_table + 8*NR_syscalls     // guard against failures to increase NR_syscalls
index 2323377e36950ee236a662bcddada08c2f423d45..5cd6226f44f291c2a9a4fd2fa42374148fc65ab9 100644 (file)
@@ -60,3 +60,30 @@ END(jprobe_break)
 GLOBAL_ENTRY(jprobe_inst_return)
        br.call.sptk.many b0=jprobe_break
 END(jprobe_inst_return)
+
+GLOBAL_ENTRY(invalidate_stacked_regs)
+       movl r16=invalidate_restore_cfm
+       ;;
+       mov b6=r16
+       ;;
+       br.ret.sptk.many b6
+       ;;
+invalidate_restore_cfm:
+       mov r16=ar.rsc
+       ;;
+       mov ar.rsc=r0
+       ;;
+       loadrs
+       ;;
+       mov ar.rsc=r16
+       ;;
+       br.cond.sptk.many rp
+END(invalidate_stacked_regs)
+
+GLOBAL_ENTRY(flush_register_stack)
+       // flush dirty regs to backing store (must be first in insn group)
+       flushrs
+       ;;
+       br.ret.sptk.many rp
+END(flush_register_stack)
+
index 346fedf9ea479a361ea7cdca2173b442b2302adb..50ae8c7d453d5075641a191dcfdf5bf0d4087ece 100644 (file)
@@ -766,11 +766,56 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        return ret;
 }
 
+struct param_bsp_cfm {
+       unsigned long ip;
+       unsigned long *bsp;
+       unsigned long cfm;
+};
+
+static void ia64_get_bsp_cfm(struct unw_frame_info *info, void *arg)
+{
+       unsigned long ip;
+       struct param_bsp_cfm *lp = arg;
+
+       do {
+               unw_get_ip(info, &ip);
+               if (ip == 0)
+                       break;
+               if (ip == lp->ip) {
+                       unw_get_bsp(info, (unsigned long*)&lp->bsp);
+                       unw_get_cfm(info, (unsigned long*)&lp->cfm);
+                       return;
+               }
+       } while (unw_unwind(info) >= 0);
+       lp->bsp = 0;
+       lp->cfm = 0;
+       return;
+}
+
 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
        struct jprobe *jp = container_of(p, struct jprobe, kp);
        unsigned long addr = ((struct fnptr *)(jp->entry))->ip;
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+       struct param_bsp_cfm pa;
+       int bytes;
+
+       /*
+        * Callee owns the argument space and could overwrite it, eg
+        * tail call optimization. So to be absolutely safe
+        * we save the argument space before transfering the control
+        * to instrumented jprobe function which runs in
+        * the process context
+        */
+       pa.ip = regs->cr_iip;
+       unw_init_running(ia64_get_bsp_cfm, &pa);
+       bytes = (char *)ia64_rse_skip_regs(pa.bsp, pa.cfm & 0x3f)
+                               - (char *)pa.bsp;
+       memcpy( kcb->jprobes_saved_stacked_regs,
+               pa.bsp,
+               bytes );
+       kcb->bsp = pa.bsp;
+       kcb->cfm = pa.cfm;
 
        /* save architectural state */
        kcb->jprobe_saved_regs = *regs;
@@ -792,8 +837,20 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
 {
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+       int bytes;
 
+       /* restoring architectural state */
        *regs = kcb->jprobe_saved_regs;
+
+       /* restoring the original argument space */
+       flush_register_stack();
+       bytes = (char *)ia64_rse_skip_regs(kcb->bsp, kcb->cfm & 0x3f)
+                               - (char *)kcb->bsp;
+       memcpy( kcb->bsp,
+               kcb->jprobes_saved_stacked_regs,
+               bytes );
+       invalidate_stacked_regs();
+
        preempt_enable_no_resched();
        return 1;
 }
index 355af15287c7eb318f08382cf6d695a6bb677524..ee7eec9ee57648cee03a84aeacea97651577131d 100644 (file)
@@ -766,7 +766,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
                        l = strlen(previous_current->comm);
                snprintf(comm, sizeof(comm), "%s %*s %d",
                        current->comm, l, previous_current->comm,
-                       previous_current->thread_info->cpu);
+                       task_thread_info(previous_current)->cpu);
        }
        memcpy(current->comm, comm, sizeof(current->comm));
 
@@ -1423,7 +1423,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
        struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
        struct thread_info *ti;
        memset(p, 0, KERNEL_STACK_SIZE);
-       ti = (struct thread_info *)((char *)p + IA64_TASK_SIZE);
+       ti = task_thread_info(p);
        ti->flags = _TIF_MCA_INIT;
        ti->preempt_count = 1;
        ti->task = p;
index db32fc1d39356321950de04b0a586df9ac5dff37..403a80a58c13bf9ef89118ae4f4b7063d28df7e0 100644 (file)
@@ -847,7 +847,7 @@ ia64_state_restore:
        ;;
        mov cr.iim=temp3
        mov cr.iha=temp4
-       dep r22=0,r22,62,2      // pal_min_state, physical, uncached
+       dep r22=0,r22,62,1      // pal_min_state, physical, uncached
        mov IA64_KR(CURRENT)=r21
        ld8 r8=[temp1]          // os_status
        ld8 r10=[temp2]         // context
index c026ac1142a68733d8c37baf3cb6c33310836c3a..bd87cb6b7a8135b1dbfc7e927b8d464c66379db4 100644 (file)
@@ -1710,7 +1710,7 @@ static void
 pfm_syswide_force_stop(void *info)
 {
        pfm_context_t   *ctx = (pfm_context_t *)info;
-       struct pt_regs *regs = ia64_task_regs(current);
+       struct pt_regs *regs = task_pt_regs(current);
        struct task_struct *owner;
        unsigned long flags;
        int ret;
@@ -1815,7 +1815,7 @@ pfm_flush(struct file *filp)
        is_system = ctx->ctx_fl_system;
 
        task = PFM_CTX_TASK(ctx);
-       regs = ia64_task_regs(task);
+       regs = task_pt_regs(task);
 
        DPRINT(("ctx_state=%d is_current=%d\n",
                state,
@@ -1945,7 +1945,7 @@ pfm_close(struct inode *inode, struct file *filp)
        is_system = ctx->ctx_fl_system;
 
        task = PFM_CTX_TASK(ctx);
-       regs = ia64_task_regs(task);
+       regs = task_pt_regs(task);
 
        DPRINT(("ctx_state=%d is_current=%d\n", 
                state,
@@ -4052,7 +4052,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
                 */
                ia64_psr(regs)->up = 0;
        } else {
-               tregs = ia64_task_regs(task);
+               tregs = task_pt_regs(task);
 
                /*
                 * stop monitoring at the user level
@@ -4134,7 +4134,7 @@ pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
                ia64_psr(regs)->up = 1;
 
        } else {
-               tregs = ia64_task_regs(ctx->ctx_task);
+               tregs = task_pt_regs(ctx->ctx_task);
 
                /*
                 * start monitoring at the kernel level the next
@@ -4404,7 +4404,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
                /*
                 * when not current, task MUST be stopped, so this is safe
                 */
-               regs = ia64_task_regs(task);
+               regs = task_pt_regs(task);
 
                /* force a full reload */
                ctx->ctx_last_activation = PFM_INVALID_ACTIVATION;
@@ -4530,7 +4530,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
        /*
         * per-task mode
         */
-       tregs = task == current ? regs : ia64_task_regs(task);
+       tregs = task == current ? regs : task_pt_regs(task);
 
        if (task == current) {
                /*
@@ -4593,7 +4593,7 @@ pfm_exit_thread(struct task_struct *task)
 {
        pfm_context_t *ctx;
        unsigned long flags;
-       struct pt_regs *regs = ia64_task_regs(task);
+       struct pt_regs *regs = task_pt_regs(task);
        int ret, state;
        int free_ok = 0;
 
@@ -4926,7 +4926,7 @@ restart_args:
        if (unlikely(ret)) goto abort_locked;
 
 skip_fd:
-       ret = (*func)(ctx, args_k, count, ia64_task_regs(current));
+       ret = (*func)(ctx, args_k, count, task_pt_regs(current));
 
        call_made = 1;
 
@@ -5050,7 +5050,7 @@ pfm_handle_work(void)
 
        pfm_clear_task_notify();
 
-       regs = ia64_task_regs(current);
+       regs = task_pt_regs(current);
 
        /*
         * extract reason for being here and clear
@@ -5794,7 +5794,7 @@ pfm_syst_wide_update_task(struct task_struct *task, unsigned long info, int is_c
         * on every CPU, so we can rely on the pid to identify the idle task.
         */
        if ((info & PFM_CPUINFO_EXCL_IDLE) == 0 || task->pid) {
-               regs = ia64_task_regs(task);
+               regs = task_pt_regs(task);
                ia64_psr(regs)->pp = is_ctxswin ? dcr_pp : 0;
                return;
        }
@@ -5877,7 +5877,7 @@ pfm_save_regs(struct task_struct *task)
        flags = pfm_protect_ctx_ctxsw(ctx);
 
        if (ctx->ctx_state == PFM_CTX_ZOMBIE) {
-               struct pt_regs *regs = ia64_task_regs(task);
+               struct pt_regs *regs = task_pt_regs(task);
 
                pfm_clear_psr_up();
 
@@ -6077,7 +6077,7 @@ pfm_load_regs (struct task_struct *task)
        BUG_ON(psr & IA64_PSR_I);
 
        if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) {
-               struct pt_regs *regs = ia64_task_regs(task);
+               struct pt_regs *regs = task_pt_regs(task);
 
                BUG_ON(ctx->ctx_smpl_hdr);
 
@@ -6446,7 +6446,7 @@ pfm_alt_save_pmu_state(void *data)
 {
        struct pt_regs *regs;
 
-       regs = ia64_task_regs(current);
+       regs = task_pt_regs(current);
 
        DPRINT(("called\n"));
 
@@ -6472,7 +6472,7 @@ pfm_alt_restore_pmu_state(void *data)
 {
        struct pt_regs *regs;
 
-       regs = ia64_task_regs(current);
+       regs = task_pt_regs(current);
 
        DPRINT(("called\n"));
 
@@ -6754,7 +6754,7 @@ dump_pmu_state(const char *from)
        local_irq_save(flags);
 
        this_cpu = smp_processor_id();
-       regs     = ia64_task_regs(current);
+       regs     = task_pt_regs(current);
        info     = PFM_CPUINFO_GET();
        dcr      = ia64_getreg(_IA64_REG_CR_DCR);
 
index e9904c74d2ba617679131802ce1f68858e3b2826..309d59658e5ff9344bbd122c8d364fed8b8a1c09 100644 (file)
@@ -328,7 +328,7 @@ ia64_save_extra (struct task_struct *task)
 #endif
 
 #ifdef CONFIG_IA32_SUPPORT
-       if (IS_IA32_PROCESS(ia64_task_regs(task)))
+       if (IS_IA32_PROCESS(task_pt_regs(task)))
                ia32_save_state(task);
 #endif
 }
@@ -353,7 +353,7 @@ ia64_load_extra (struct task_struct *task)
 #endif
 
 #ifdef CONFIG_IA32_SUPPORT
-       if (IS_IA32_PROCESS(ia64_task_regs(task)))
+       if (IS_IA32_PROCESS(task_pt_regs(task)))
                ia32_load_state(task);
 #endif
 }
@@ -488,7 +488,7 @@ copy_thread (int nr, unsigned long clone_flags,
         * If we're cloning an IA32 task then save the IA32 extra
         * state from the current task to the new task
         */
-       if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+       if (IS_IA32_PROCESS(task_pt_regs(current))) {
                ia32_save_state(p);
                if (clone_flags & CLONE_SETTLS)
                        retval = ia32_clone_tls(p, child_ptregs);
@@ -701,7 +701,7 @@ int
 kernel_thread_helper (int (*fn)(void *), void *arg)
 {
 #ifdef CONFIG_IA32_SUPPORT
-       if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+       if (IS_IA32_PROCESS(task_pt_regs(current))) {
                /* A kernel thread is always a 64-bit process. */
                current->thread.map_base  = DEFAULT_MAP_BASE;
                current->thread.task_size = DEFAULT_TASK_SIZE;
@@ -722,7 +722,7 @@ flush_thread (void)
        current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
        ia64_drop_fpu(current);
 #ifdef CONFIG_IA32_SUPPORT
-       if (IS_IA32_PROCESS(ia64_task_regs(current))) {
+       if (IS_IA32_PROCESS(task_pt_regs(current))) {
                ia32_drop_partial_page_list(current);
                current->thread.task_size = IA32_PAGE_OFFSET;
                set_fs(USER_DS);
@@ -755,7 +755,7 @@ exit_thread (void)
        if (current->thread.flags & IA64_THREAD_DBG_VALID)
                pfm_release_debug_registers(current);
 #endif
-       if (IS_IA32_PROCESS(ia64_task_regs(current)))
+       if (IS_IA32_PROCESS(task_pt_regs(current)))
                ia32_drop_partial_page_list(current);
 }
 
index 8d88eeea02d12fa762a1d54ba71ce15be73529c9..eaed14aac6aa59592e201b91eff05e0ccb6a8ba6 100644 (file)
@@ -254,7 +254,7 @@ get_rnat (struct task_struct *task, struct switch_stack *sw,
        long num_regs, nbits;
        struct pt_regs *pt;
 
-       pt = ia64_task_regs(task);
+       pt = task_pt_regs(task);
        kbsp = (unsigned long *) sw->ar_bspstore;
        ubspstore = (unsigned long *) pt->ar_bspstore;
 
@@ -314,7 +314,7 @@ put_rnat (struct task_struct *task, struct switch_stack *sw,
        struct pt_regs *pt;
        unsigned long cfm, *urbs_kargs;
 
-       pt = ia64_task_regs(task);
+       pt = task_pt_regs(task);
        kbsp = (unsigned long *) sw->ar_bspstore;
        ubspstore = (unsigned long *) pt->ar_bspstore;
 
@@ -407,7 +407,7 @@ ia64_peek (struct task_struct *child, struct switch_stack *child_stack,
 
        urbs_end = (long *) user_rbs_end;
        laddr = (unsigned long *) addr;
-       child_regs = ia64_task_regs(child);
+       child_regs = task_pt_regs(child);
        bspstore = (unsigned long *) child_regs->ar_bspstore;
        krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
        if (on_kernel_rbs(addr, (unsigned long) bspstore,
@@ -467,7 +467,7 @@ ia64_poke (struct task_struct *child, struct switch_stack *child_stack,
        struct pt_regs *child_regs;
 
        laddr = (unsigned long *) addr;
-       child_regs = ia64_task_regs(child);
+       child_regs = task_pt_regs(child);
        bspstore = (unsigned long *) child_regs->ar_bspstore;
        krbs = (unsigned long *) child + IA64_RBS_OFFSET/8;
        if (on_kernel_rbs(addr, (unsigned long) bspstore,
@@ -567,7 +567,7 @@ thread_matches (struct task_struct *thread, unsigned long addr)
                 */
                return 0;
 
-       thread_regs = ia64_task_regs(thread);
+       thread_regs = task_pt_regs(thread);
        thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL);
        if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end))
                return 0;
@@ -627,7 +627,7 @@ find_thread_for_addr (struct task_struct *child, unsigned long addr)
 inline void
 ia64_flush_fph (struct task_struct *task)
 {
-       struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
+       struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
 
        /*
         * Prevent migrating this task while
@@ -653,7 +653,7 @@ ia64_flush_fph (struct task_struct *task)
 void
 ia64_sync_fph (struct task_struct *task)
 {
-       struct ia64_psr *psr = ia64_psr(ia64_task_regs(task));
+       struct ia64_psr *psr = ia64_psr(task_pt_regs(task));
 
        ia64_flush_fph(task);
        if (!(task->thread.flags & IA64_THREAD_FPH_VALID)) {
@@ -794,7 +794,7 @@ access_uarea (struct task_struct *child, unsigned long addr,
                                          + offsetof(struct pt_regs, reg)))
 
 
-       pt = ia64_task_regs(child);
+       pt = task_pt_regs(child);
        sw = (struct switch_stack *) (child->thread.ksp + 16);
 
        if ((addr & 0x7) != 0) {
@@ -1120,7 +1120,7 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
        if (!access_ok(VERIFY_WRITE, ppr, sizeof(struct pt_all_user_regs)))
                return -EIO;
 
-       pt = ia64_task_regs(child);
+       pt = task_pt_regs(child);
        sw = (struct switch_stack *) (child->thread.ksp + 16);
        unw_init_from_blocked_task(&info, child);
        if (unw_unwind_to_user(&info) < 0) {
@@ -1265,7 +1265,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
        if (!access_ok(VERIFY_READ, ppr, sizeof(struct pt_all_user_regs)))
                return -EIO;
 
-       pt = ia64_task_regs(child);
+       pt = task_pt_regs(child);
        sw = (struct switch_stack *) (child->thread.ksp + 16);
        unw_init_from_blocked_task(&info, child);
        if (unw_unwind_to_user(&info) < 0) {
@@ -1403,7 +1403,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
 void
 ptrace_disable (struct task_struct *child)
 {
-       struct ia64_psr *child_psr = ia64_psr(ia64_task_regs(child));
+       struct ia64_psr *child_psr = ia64_psr(task_pt_regs(child));
 
        /* make sure the single step/taken-branch trap bits are not set: */
        child_psr->ss = 0;
@@ -1456,7 +1456,7 @@ sys_ptrace (long request, pid_t pid, unsigned long addr, unsigned long data)
        if (ret < 0)
                goto out_tsk;
 
-       pt = ia64_task_regs(child);
+       pt = task_pt_regs(child);
        sw = (struct switch_stack *) (child->thread.ksp + 16);
 
        switch (request) {
index a87a162a30865198b179142a90bd1ac548642814..9d5a823479a3f2dd8d6aef4e56edee4a1d309743 100644 (file)
@@ -3,7 +3,7 @@
  *
  * Creates entries in /proc/sal for various system features.
  *
- * Copyright (c) 2003 Silicon Graphics, Inc.  All rights reserved.
+ * Copyright (c) 2003, 2006 Silicon Graphics, Inc.  All rights reserved.
  * Copyright (c) 2003 Hewlett-Packard Co
  *     Bjorn Helgaas <bjorn.helgaas@hp.com>
  *
  *   mca.c may not pass a buffer, a NULL buffer just indicates that a new
  *   record is available in SAL.
  *   Replace some NR_CPUS by cpus_online, for hotplug cpu.
+ *
+ * Jan  5 2006        kaos@sgi.com
+ *   Handle hotplug cpus coming online.
+ *   Handle hotplug cpus going offline while they still have outstanding records.
+ *   Use the cpu_* macros consistently.
+ *   Replace the counting semaphore with a mutex and a test if the cpumask is non-empty.
+ *   Modify the locking to make the test for "work to do" an atomic operation.
  */
 
 #include <linux/capability.h>
+#include <linux/cpu.h>
 #include <linux/types.h>
 #include <linux/proc_fs.h>
 #include <linux/module.h>
@@ -132,8 +140,8 @@ enum salinfo_state {
 };
 
 struct salinfo_data {
-       volatile cpumask_t      cpu_event;      /* which cpus have outstanding events */
-       struct semaphore        sem;            /* count of cpus with outstanding events (bits set in cpu_event) */
+       cpumask_t               cpu_event;      /* which cpus have outstanding events */
+       struct semaphore        mutex;
        u8                      *log_buffer;
        u64                     log_size;
        u8                      *oemdata;       /* decoded oem data */
@@ -174,6 +182,21 @@ struct salinfo_platform_oemdata_parms {
        int ret;
 };
 
+/* Kick the mutex that tells user space that there is work to do.  Instead of
+ * trying to track the state of the mutex across multiple cpus, in user
+ * context, interrupt context, non-maskable interrupt context and hotplug cpu,
+ * it is far easier just to grab the mutex if it is free then release it.
+ *
+ * This routine must be called with data_saved_lock held, to make the down/up
+ * operation atomic.
+ */
+static void
+salinfo_work_to_do(struct salinfo_data *data)
+{
+       down_trylock(&data->mutex);
+       up(&data->mutex);
+}
+
 static void
 salinfo_platform_oemdata_cpu(void *context)
 {
@@ -212,9 +235,9 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
 
        BUG_ON(type >= ARRAY_SIZE(salinfo_log_name));
 
+       if (irqsafe)
+               spin_lock_irqsave(&data_saved_lock, flags);
        if (buffer) {
-               if (irqsafe)
-                       spin_lock_irqsave(&data_saved_lock, flags);
                for (i = 0, data_saved = data->data_saved; i < saved_size; ++i, ++data_saved) {
                        if (!data_saved->buffer)
                                break;
@@ -232,13 +255,11 @@ salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe)
                        data_saved->size = size;
                        data_saved->buffer = buffer;
                }
-               if (irqsafe)
-                       spin_unlock_irqrestore(&data_saved_lock, flags);
        }
-
-       if (!test_and_set_bit(smp_processor_id(), &data->cpu_event)) {
-               if (irqsafe)
-                       up(&data->sem);
+       cpu_set(smp_processor_id(), data->cpu_event);
+       if (irqsafe) {
+               salinfo_work_to_do(data);
+               spin_unlock_irqrestore(&data_saved_lock, flags);
        }
 }
 
@@ -249,20 +270,17 @@ static struct timer_list salinfo_timer;
 static void
 salinfo_timeout_check(struct salinfo_data *data)
 {
-       int i;
+       unsigned long flags;
        if (!data->open)
                return;
-       for_each_online_cpu(i) {
-               if (test_bit(i, &data->cpu_event)) {
-                       /* double up() is not a problem, user space will see no
-                        * records for the additional "events".
-                        */
-                       up(&data->sem);
-               }
+       if (!cpus_empty(data->cpu_event)) {
+               spin_lock_irqsave(&data_saved_lock, flags);
+               salinfo_work_to_do(data);
+               spin_unlock_irqrestore(&data_saved_lock, flags);
        }
 }
 
-static void 
+static void
 salinfo_timeout (unsigned long arg)
 {
        salinfo_timeout_check(salinfo_data + SAL_INFO_TYPE_MCA);
@@ -290,16 +308,20 @@ salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t
        int i, n, cpu = -1;
 
 retry:
-       if (down_trylock(&data->sem)) {
+       if (cpus_empty(data->cpu_event) && down_trylock(&data->mutex)) {
                if (file->f_flags & O_NONBLOCK)
                        return -EAGAIN;
-               if (down_interruptible(&data->sem))
+               if (down_interruptible(&data->mutex))
                        return -EINTR;
        }
 
        n = data->cpu_check;
        for (i = 0; i < NR_CPUS; i++) {
-               if (test_bit(n, &data->cpu_event) && cpu_online(n)) {
+               if (cpu_isset(n, data->cpu_event)) {
+                       if (!cpu_online(n)) {
+                               cpu_clear(n, data->cpu_event);
+                               continue;
+                       }
                        cpu = n;
                        break;
                }
@@ -310,9 +332,6 @@ retry:
        if (cpu == -1)
                goto retry;
 
-       /* events are sticky until the user says "clear" */
-       up(&data->sem);
-
        /* for next read, start checking at next CPU */
        data->cpu_check = cpu;
        if (++data->cpu_check == NR_CPUS)
@@ -381,10 +400,8 @@ salinfo_log_release(struct inode *inode, struct file *file)
 static void
 call_on_cpu(int cpu, void (*fn)(void *), void *arg)
 {
-       cpumask_t save_cpus_allowed, new_cpus_allowed;
-       memcpy(&save_cpus_allowed, &current->cpus_allowed, sizeof(save_cpus_allowed));
-       memset(&new_cpus_allowed, 0, sizeof(new_cpus_allowed));
-       set_bit(cpu, &new_cpus_allowed);
+       cpumask_t save_cpus_allowed = current->cpus_allowed;
+       cpumask_t new_cpus_allowed = cpumask_of_cpu(cpu);
        set_cpus_allowed(current, new_cpus_allowed);
        (*fn)(arg);
        set_cpus_allowed(current, save_cpus_allowed);
@@ -433,10 +450,10 @@ retry:
        if (!data->saved_num)
                call_on_cpu(cpu, salinfo_log_read_cpu, data);
        if (!data->log_size) {
-               data->state = STATE_NO_DATA;
-               clear_bit(cpu, &data->cpu_event);
+               data->state = STATE_NO_DATA;
+               cpu_clear(cpu, data->cpu_event);
        } else {
-               data->state = STATE_LOG_RECORD;
+               data->state = STATE_LOG_RECORD;
        }
 }
 
@@ -473,27 +490,31 @@ static int
 salinfo_log_clear(struct salinfo_data *data, int cpu)
 {
        sal_log_record_header_t *rh;
+       unsigned long flags;
+       spin_lock_irqsave(&data_saved_lock, flags);
        data->state = STATE_NO_DATA;
-       if (!test_bit(cpu, &data->cpu_event))
+       if (!cpu_isset(cpu, data->cpu_event)) {
+               spin_unlock_irqrestore(&data_saved_lock, flags);
                return 0;
-       down(&data->sem);
-       clear_bit(cpu, &data->cpu_event);
+       }
+       cpu_clear(cpu, data->cpu_event);
        if (data->saved_num) {
-               unsigned long flags;
-               spin_lock_irqsave(&data_saved_lock, flags);
-               shift1_data_saved(data, data->saved_num - 1 );
+               shift1_data_saved(data, data->saved_num - 1);
                data->saved_num = 0;
-               spin_unlock_irqrestore(&data_saved_lock, flags);
        }
+       spin_unlock_irqrestore(&data_saved_lock, flags);
        rh = (sal_log_record_header_t *)(data->log_buffer);
        /* Corrected errors have already been cleared from SAL */
        if (rh->severity != sal_log_severity_corrected)
                call_on_cpu(cpu, salinfo_log_clear_cpu, data);
        /* clearing a record may make a new record visible */
        salinfo_log_new_read(cpu, data);
-       if (data->state == STATE_LOG_RECORD &&
-           !test_and_set_bit(cpu,  &data->cpu_event))
-               up(&data->sem);
+       if (data->state == STATE_LOG_RECORD) {
+               spin_lock_irqsave(&data_saved_lock, flags);
+               cpu_set(cpu, data->cpu_event);
+               salinfo_work_to_do(data);
+               spin_unlock_irqrestore(&data_saved_lock, flags);
+       }
        return 0;
 }
 
@@ -550,6 +571,53 @@ static struct file_operations salinfo_data_fops = {
        .write   = salinfo_log_write,
 };
 
+#ifdef CONFIG_HOTPLUG_CPU
+static int __devinit
+salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
+{
+       unsigned int i, cpu = (unsigned long)hcpu;
+       unsigned long flags;
+       struct salinfo_data *data;
+       switch (action) {
+       case CPU_ONLINE:
+               spin_lock_irqsave(&data_saved_lock, flags);
+               for (i = 0, data = salinfo_data;
+                    i < ARRAY_SIZE(salinfo_data);
+                    ++i, ++data) {
+                       cpu_set(cpu, data->cpu_event);
+                       salinfo_work_to_do(data);
+               }
+               spin_unlock_irqrestore(&data_saved_lock, flags);
+               break;
+       case CPU_DEAD:
+               spin_lock_irqsave(&data_saved_lock, flags);
+               for (i = 0, data = salinfo_data;
+                    i < ARRAY_SIZE(salinfo_data);
+                    ++i, ++data) {
+                       struct salinfo_data_saved *data_saved;
+                       int j;
+                       for (j = ARRAY_SIZE(data->data_saved) - 1, data_saved = data->data_saved + j;
+                            j >= 0;
+                            --j, --data_saved) {
+                               if (data_saved->buffer && data_saved->cpu == cpu) {
+                                       shift1_data_saved(data, j);
+                               }
+                       }
+                       cpu_clear(cpu, data->cpu_event);
+               }
+               spin_unlock_irqrestore(&data_saved_lock, flags);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block salinfo_cpu_notifier =
+{
+       .notifier_call = salinfo_cpu_callback,
+       .priority = 0,
+};
+#endif /* CONFIG_HOTPLUG_CPU */
+
 static int __init
 salinfo_init(void)
 {
@@ -557,7 +625,7 @@ salinfo_init(void)
        struct proc_dir_entry **sdir = salinfo_proc_entries; /* keeps track of every entry */
        struct proc_dir_entry *dir, *entry;
        struct salinfo_data *data;
-       int i, j, online;
+       int i, j;
 
        salinfo_dir = proc_mkdir("sal", NULL);
        if (!salinfo_dir)
@@ -572,7 +640,7 @@ salinfo_init(void)
        for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) {
                data = salinfo_data + i;
                data->type = i;
-               sema_init(&data->sem, 0);
+               init_MUTEX(&data->mutex);
                dir = proc_mkdir(salinfo_log_name[i], salinfo_dir);
                if (!dir)
                        continue;
@@ -592,12 +660,8 @@ salinfo_init(void)
                *sdir++ = entry;
 
                /* we missed any events before now */
-               online = 0;
-               for_each_online_cpu(j) {
-                       set_bit(j, &data->cpu_event);
-                       ++online;
-               }
-               sema_init(&data->sem, online);
+               for_each_online_cpu(j)
+                       cpu_set(j, data->cpu_event);
 
                *sdir++ = dir;
        }
@@ -609,6 +673,10 @@ salinfo_init(void)
        salinfo_timer.function = &salinfo_timeout;
        add_timer(&salinfo_timer);
 
+#ifdef CONFIG_HOTPLUG_CPU
+       register_cpu_notifier(&salinfo_cpu_notifier);
+#endif
+
        return 0;
 }
 
index c33305d8e5eb21593e94396303ea9f0d971a409b..c0766575a3a2113a56792623dc16a64752263239 100644 (file)
@@ -60,6 +60,7 @@
 #include <asm/smp.h>
 #include <asm/system.h>
 #include <asm/unistd.h>
+#include <asm/system.h>
 
 #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
 # error "struct cpuinfo_ia64 too big!"
@@ -695,6 +696,7 @@ static void
 get_max_cacheline_size (void)
 {
        unsigned long line_size, max = 1;
+       unsigned int cache_size = 0;
        u64 l, levels, unique_caches;
         pal_cache_config_info_t cci;
         s64 status;
@@ -724,6 +726,8 @@ get_max_cacheline_size (void)
                line_size = 1 << cci.pcci_line_size;
                if (line_size > max)
                        max = line_size;
+               if (cache_size < cci.pcci_cache_size)
+                       cache_size = cci.pcci_cache_size;
                if (!cci.pcci_unified) {
                        status = ia64_pal_cache_config_info(l,
                                                    /* cache_type (instruction)= */ 1,
@@ -740,6 +744,9 @@ get_max_cacheline_size (void)
                        ia64_i_cache_stride_shift = cci.pcci_stride;
        }
   out:
+#ifdef CONFIG_SMP
+       max_cache_size = max(max_cache_size, cache_size);
+#endif
        if (max > ia64_max_cacheline_size)
                ia64_max_cacheline_size = max;
 }
@@ -794,7 +801,7 @@ cpu_init (void)
 #endif
 
        /* Clear the stack memory reserved for pt_regs: */
-       memset(ia64_task_regs(current), 0, sizeof(struct pt_regs));
+       memset(task_pt_regs(current), 0, sizeof(struct pt_regs));
 
        ia64_set_kr(IA64_KR_FPU_OWNER, 0);
 
@@ -870,6 +877,15 @@ cpu_init (void)
        pm_idle = default_idle;
 }
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+void sched_cacheflush(void)
+{
+       ia64_sal_cache_flush(3);
+}
+
 void
 check_bugs (void)
 {
index 58ce07efc56e060bda10d23498592f960e2858bf..463f6bb44d07880993b0f13e282352100dd05cff 100644 (file)
@@ -655,11 +655,11 @@ set_sigdelayed(pid_t pid, int signo, int code, void __user *addr)
 
                if (!t)
                        return;
-               t->thread_info->sigdelayed.signo = signo;
-               t->thread_info->sigdelayed.code = code;
-               t->thread_info->sigdelayed.addr = addr;
-               t->thread_info->sigdelayed.start_time = start_time;
-               t->thread_info->sigdelayed.pid = pid;
+               task_thread_info(t)->sigdelayed.signo = signo;
+               task_thread_info(t)->sigdelayed.code = code;
+               task_thread_info(t)->sigdelayed.addr = addr;
+               task_thread_info(t)->sigdelayed.start_time = start_time;
+               task_thread_info(t)->sigdelayed.pid = pid;
                wmb();
                set_tsk_thread_flag(t, TIF_SIGDELAYED);
        }
index f2dbcd1db0d4dc99b3baca0d0b677c3ed788a58c..c7b943f1019981d4c5c16b2a93d7e623b516f263 100644 (file)
@@ -151,7 +151,7 @@ out:
 asmlinkage long
 sys_pipe (void)
 {
-       struct pt_regs *regs = ia64_task_regs(current);
+       struct pt_regs *regs = task_pt_regs(current);
        int fd[2];
        int retval;
 
index d3e0ecb56d627c0a0514f9bfdefefd1467df0a59..55391901b0137f17184a7e8203d5087581ecd14e 100644 (file)
@@ -530,12 +530,15 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
                if (fsys_mode(current, &regs)) {
                        extern char __kernel_syscall_via_break[];
                        /*
-                        * Got a trap in fsys-mode: Taken Branch Trap and Single Step trap
-                        * need special handling; Debug trap is not supposed to happen.
+                        * Got a trap in fsys-mode: Taken Branch Trap
+                        * and Single Step trap need special handling;
+                        * Debug trap is ignored (we disable it here
+                        * and re-enable it in the lower-privilege trap).
                         */
                        if (unlikely(vector == 29)) {
-                               die("Got debug trap in fsys-mode---not supposed to happen!",
-                                   &regs, 0);
+                               set_thread_flag(TIF_DB_DISABLED);
+                               ia64_psr(&regs)->db = 0;
+                               ia64_psr(&regs)->lp = 1;
                                return;
                        }
                        /* re-do the system call via break 0x100000: */
@@ -589,10 +592,19 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
              case 34:
                if (isr & 0x2) {
                        /* Lower-Privilege Transfer Trap */
+
+                       /* If we disabled debug traps during an fsyscall,
+                        * re-enable them here.
+                        */
+                       if (test_thread_flag(TIF_DB_DISABLED)) {
+                               clear_thread_flag(TIF_DB_DISABLED);
+                               ia64_psr(&regs)->db = 1;
+                       }
+
                        /*
-                        * Just clear PSR.lp and then return immediately: all the
-                        * interesting work (e.g., signal delivery is done in the kernel
-                        * exit path).
+                        * Just clear PSR.lp and then return immediately:
+                        * all the interesting work (e.g., signal delivery)
+                        * is done in the kernel exit path.
                         */
                        ia64_psr(&regs)->lp = 0;
                        return;
index 41105d45442366940006aac1945c425fa93ec8bf..6a4eec9113e8383efd029253a9a038637d6819d0 100644 (file)
@@ -90,7 +90,7 @@ ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
 {
        static DEFINE_SPINLOCK(ptcg_lock);
 
-       if (mm != current->active_mm) {
+       if (mm != current->active_mm || !current->mm) {
                flush_tlb_all();
                return;
        }
index 71c2b271b4c687daabd4e886023fe85dacfeeb94..4d417c301201c4a5364a6776d4d965fd1a397571 100644 (file)
 #define IIO_NUM_ITTES   7
 #define HUB_NUM_BIG_WINDOW      (IIO_NUM_ITTES - 1)
 
-struct sn_flush_device_list {
+/* This struct is shared between the PROM and the kernel.
+ * Changes to this struct will require corresponding changes to the kernel.
+ */
+struct sn_flush_device_common {
        int sfdl_bus;
        int sfdl_slot;
        int sfdl_pin;
-       struct bar_list {
+       struct common_bar_list {
                unsigned long start;
                unsigned long end;
        } sfdl_bar_list[6];
@@ -40,14 +43,19 @@ struct sn_flush_device_list {
        uint32_t sfdl_persistent_busnum;
        uint32_t sfdl_persistent_segment;
        struct pcibus_info *sfdl_pcibus_info;
+};
+
+/* This struct is kernel only and is not used by the PROM */
+struct sn_flush_device_kernel {
        spinlock_t sfdl_flush_lock;
+       struct sn_flush_device_common *common;
 };
 
 /*
- * **widget_p - Used as an array[wid_num][device] of sn_flush_device_list.
+ * **widget_p - Used as an array[wid_num][device] of sn_flush_device_kernel.
  */
 struct sn_flush_nasid_entry  {
-       struct sn_flush_device_list **widget_p; /* Used as a array of wid_num */
+       struct sn_flush_device_kernel **widget_p; // Used as an array of wid_num
        uint64_t iio_itte[8];
 };
 
index fcbc748ae4337e7497eafd8c328ef6a4fe376051..f1ec1370b3e37afc87fe9237763b4adc62054e9e 100644 (file)
@@ -33,7 +33,7 @@ void bte_error_handler(unsigned long);
  * Wait until all BTE related CRBs are completed
  * and then reset the interfaces.
  */
-void shub1_bte_error_handler(unsigned long _nodepda)
+int shub1_bte_error_handler(unsigned long _nodepda)
 {
        struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
        struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
@@ -53,7 +53,7 @@ void shub1_bte_error_handler(unsigned long _nodepda)
            (err_nodepda->bte_if[1].bh_error == BTE_SUCCESS)) {
                BTE_PRINTK(("eh:%p:%d Nothing to do.\n", err_nodepda,
                            smp_processor_id()));
-               return;
+               return 1;
        }
 
        /* Determine information about our hub */
@@ -81,7 +81,7 @@ void shub1_bte_error_handler(unsigned long _nodepda)
                mod_timer(recovery_timer, HZ * 5);
                BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
                            smp_processor_id()));
-               return;
+               return 1;
        }
        if (icmr.ii_icmr_fld_s.i_crb_vld != 0) {
 
@@ -99,7 +99,7 @@ void shub1_bte_error_handler(unsigned long _nodepda)
                                BTE_PRINTK(("eh:%p:%d Valid %d, Giving up\n",
                                            err_nodepda, smp_processor_id(),
                                            i));
-                               return;
+                               return 1;
                        }
                }
        }
@@ -124,6 +124,42 @@ void shub1_bte_error_handler(unsigned long _nodepda)
        REMOTE_HUB_S(nasid, IIO_IBCR, ibcr.ii_ibcr_regval);
 
        del_timer(recovery_timer);
+       return 0;
+}
+
+/*
+ * Wait until all BTE related CRBs are completed
+ * and then reset the interfaces.
+ */
+int shub2_bte_error_handler(unsigned long _nodepda)
+{
+       struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
+       struct timer_list *recovery_timer = &err_nodepda->bte_recovery_timer;
+       struct bteinfo_s *bte;
+       nasid_t nasid;
+       u64 status;
+       int i;
+
+       nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
+
+       /*
+        * Verify that all the BTEs are complete
+        */
+       for (i = 0; i < BTES_PER_NODE; i++) {
+               bte = &err_nodepda->bte_if[i];
+               status = BTE_LNSTAT_LOAD(bte);
+               if ((status & IBLS_ERROR) || !(status & IBLS_BUSY))
+                       continue;
+               mod_timer(recovery_timer, HZ * 5);
+               BTE_PRINTK(("eh:%p:%d Marked Giving up\n", err_nodepda,
+                           smp_processor_id()));
+               return 1;
+       }
+       if (ia64_sn_bte_recovery(nasid))
+               panic("bte_error_handler(): Fatal BTE Error");
+
+       del_timer(recovery_timer);
+       return 0;
 }
 
 /*
@@ -135,7 +171,6 @@ void bte_error_handler(unsigned long _nodepda)
        struct nodepda_s *err_nodepda = (struct nodepda_s *)_nodepda;
        spinlock_t *recovery_lock = &err_nodepda->bte_recovery_lock;
        int i;
-       nasid_t nasid;
        unsigned long irq_flags;
        volatile u64 *notify;
        bte_result_t bh_error;
@@ -160,12 +195,15 @@ void bte_error_handler(unsigned long _nodepda)
        }
 
        if (is_shub1()) {
-               shub1_bte_error_handler(_nodepda);
+               if (shub1_bte_error_handler(_nodepda)) {
+                       spin_unlock_irqrestore(recovery_lock, irq_flags);
+                       return;
+               }
        } else {
-               nasid = cnodeid_to_nasid(err_nodepda->bte_if[0].bte_cnode);
-
-               if (ia64_sn_bte_recovery(nasid))
-                       panic("bte_error_handler(): Fatal BTE Error");
+               if (shub2_bte_error_handler(_nodepda)) {
+                       spin_unlock_irqrestore(recovery_lock, irq_flags);
+                       return;
+               }
        }
 
        for (i = 0; i < BTES_PER_NODE; i++) {
index 5c5eb01c50f02b53f97513eda0e1e65e3daa2c00..56ab6bae00ee2184c0faecdde66956879e5d66e9 100644 (file)
@@ -32,13 +32,14 @@ static irqreturn_t hub_eint_handler(int irq, void *arg, struct pt_regs *ep)
        ret_stuff.v0 = 0;
        hubdev_info = (struct hubdev_info *)arg;
        nasid = hubdev_info->hdi_nasid;
-       SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
+
+       if (is_shub1()) {
+               SAL_CALL_NOLOCK(ret_stuff, SN_SAL_HUB_ERROR_INTERRUPT,
                        (u64) nasid, 0, 0, 0, 0, 0, 0);
 
-       if ((int)ret_stuff.v0)
-               panic("hubii_eint_handler(): Fatal TIO Error");
+               if ((int)ret_stuff.v0)
+                       panic("hubii_eint_handler(): Fatal TIO Error");
 
-       if (is_shub1()) {
                if (!(nasid & 1)) /* Not a TIO, handle CRB errors */
                        (void)hubiio_crb_error_handler(hubdev_info);
        } else 
index 318087e35b66c2ad2cef6114469d4e873d5fe6b1..258d9d7aff98f759d0af721756310d76b4063199 100644 (file)
@@ -76,11 +76,12 @@ static struct sn_pcibus_provider sn_pci_default_provider = {
 };
 
 /*
- * Retrieve the DMA Flush List given nasid.  This list is needed 
- * to implement the WAR - Flush DMA data on PIO Reads.
+ * Retrieve the DMA Flush List given nasid, widget, and device.
+ * This list is needed to implement the WAR - Flush DMA data on PIO Reads.
  */
-static inline uint64_t
-sal_get_widget_dmaflush_list(u64 nasid, u64 widget_num, u64 address)
+static inline u64
+sal_get_device_dmaflush_list(u64 nasid, u64 widget_num, u64 device_num,
+                            u64 address)
 {
 
        struct ia64_sal_retval ret_stuff;
@@ -88,17 +89,17 @@ sal_get_widget_dmaflush_list(u64 nasid, u64 widget_num, u64 address)
        ret_stuff.v0 = 0;
 
        SAL_CALL_NOLOCK(ret_stuff,
-                       (u64) SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST,
-                       (u64) nasid, (u64) widget_num, (u64) address, 0, 0, 0,
-                       0);
-       return ret_stuff.v0;
+                       (u64) SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST,
+                       (u64) nasid, (u64) widget_num,
+                       (u64) device_num, (u64) address, 0, 0, 0);
+       return ret_stuff.status;
 
 }
 
 /*
  * Retrieve the hub device info structure for the given nasid.
  */
-static inline uint64_t sal_get_hubdev_info(u64 handle, u64 address)
+static inline u64 sal_get_hubdev_info(u64 handle, u64 address)
 {
 
        struct ia64_sal_retval ret_stuff;
@@ -114,7 +115,7 @@ static inline uint64_t sal_get_hubdev_info(u64 handle, u64 address)
 /*
  * Retrieve the pci bus information given the bus number.
  */
-static inline uint64_t sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
+static inline u64 sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
 {
 
        struct ia64_sal_retval ret_stuff;
@@ -130,7 +131,7 @@ static inline uint64_t sal_get_pcibus_info(u64 segment, u64 busnum, u64 address)
 /*
  * Retrieve the pci device information given the bus and device|function number.
  */
-static inline uint64_t
+static inline u64
 sal_get_pcidev_info(u64 segment, u64 bus_number, u64 devfn, u64 pci_dev, 
                        u64 sn_irq_info)
 {
@@ -170,12 +171,12 @@ sn_pcidev_info_get(struct pci_dev *dev)
  */
 static void sn_fixup_ionodes(void)
 {
-
-       struct sn_flush_device_list *sn_flush_device_list;
+       struct sn_flush_device_kernel *sn_flush_device_kernel;
+       struct sn_flush_device_kernel *dev_entry;
        struct hubdev_info *hubdev;
-       uint64_t status;
-       uint64_t nasid;
-       int i, widget;
+       u64 status;
+       u64 nasid;
+       int i, widget, device;
 
        /*
         * Get SGI Specific HUB chipset information.
@@ -186,7 +187,7 @@ static void sn_fixup_ionodes(void)
                nasid = cnodeid_to_nasid(i);
                hubdev->max_segment_number = 0xffffffff;
                hubdev->max_pcibus_number = 0xff;
-               status = sal_get_hubdev_info(nasid, (uint64_t) __pa(hubdev));
+               status = sal_get_hubdev_info(nasid, (u64) __pa(hubdev));
                if (status)
                        continue;
 
@@ -213,38 +214,49 @@ static void sn_fixup_ionodes(void)
 
                hubdev->hdi_flush_nasid_list.widget_p =
                    kmalloc((HUB_WIDGET_ID_MAX + 1) *
-                           sizeof(struct sn_flush_device_list *), GFP_KERNEL);
-
+                           sizeof(struct sn_flush_device_kernel *),
+                           GFP_KERNEL);
                memset(hubdev->hdi_flush_nasid_list.widget_p, 0x0,
                       (HUB_WIDGET_ID_MAX + 1) *
-                      sizeof(struct sn_flush_device_list *));
+                      sizeof(struct sn_flush_device_kernel *));
 
                for (widget = 0; widget <= HUB_WIDGET_ID_MAX; widget++) {
-                       sn_flush_device_list = kmalloc(DEV_PER_WIDGET *
-                                                      sizeof(struct
-                                                             sn_flush_device_list),
-                                                      GFP_KERNEL);
-                       memset(sn_flush_device_list, 0x0,
+                       sn_flush_device_kernel = kmalloc(DEV_PER_WIDGET *
+                                                        sizeof(struct
+                                                       sn_flush_device_kernel),
+                                                       GFP_KERNEL);
+                       if (!sn_flush_device_kernel)
+                               BUG();
+                       memset(sn_flush_device_kernel, 0x0,
                               DEV_PER_WIDGET *
-                              sizeof(struct sn_flush_device_list));
-
-                       status =
-                           sal_get_widget_dmaflush_list(nasid, widget,
-                                                        (uint64_t)
-                                                        __pa
-                                                        (sn_flush_device_list));
-                       if (status) {
-                               kfree(sn_flush_device_list);
-                               continue;
+                              sizeof(struct sn_flush_device_kernel));
+
+                       dev_entry = sn_flush_device_kernel;
+                       for (device = 0; device < DEV_PER_WIDGET;
+                            device++,dev_entry++) {
+                               dev_entry->common = kmalloc(sizeof(struct
+                                                       sn_flush_device_common),
+                                                           GFP_KERNEL);
+                               if (!dev_entry->common)
+                                       BUG();
+                               memset(dev_entry->common, 0x0, sizeof(struct
+                                                      sn_flush_device_common));
+
+                               status = sal_get_device_dmaflush_list(nasid,
+                                                                       widget,
+                                                                       device,
+                                                     (u64)(dev_entry->common));
+                               if (status)
+                                       BUG();
+
+                               spin_lock_init(&dev_entry->sfdl_flush_lock);
                        }
 
-                       spin_lock_init(&sn_flush_device_list->sfdl_flush_lock);
-                       hubdev->hdi_flush_nasid_list.widget_p[widget] =
-                           sn_flush_device_list;
-               }
-
+                       if (sn_flush_device_kernel)
+                               hubdev->hdi_flush_nasid_list.widget_p[widget] =
+                                                      sn_flush_device_kernel;
+               }
        }
-
 }
 
 /*
index 493fb3f38dc37af5a7bc919cea2a98a9e06ed326..6a7939b16a1cc1045803e2ad56c2755ce4082616 100644 (file)
@@ -77,12 +77,6 @@ static void tiocx_bus_release(struct device *dev)
        kfree(to_cx_dev(dev));
 }
 
-struct bus_type tiocx_bus_type = {
-       .name = "tiocx",
-       .match = tiocx_match,
-       .uevent = tiocx_uevent,
-};
-
 /**
  * cx_device_match - Find cx_device in the id table.
  * @ids: id table from driver
@@ -149,6 +143,14 @@ static int cx_driver_remove(struct device *dev)
        return 0;
 }
 
+struct bus_type tiocx_bus_type = {
+       .name = "tiocx",
+       .match = tiocx_match,
+       .uevent = tiocx_uevent,
+       .probe = cx_device_probe,
+       .remove = cx_driver_remove,
+};
+
 /**
  * cx_driver_register - Register the driver.
  * @cx_driver: driver table (cx_drv struct) from driver
@@ -162,8 +164,6 @@ int cx_driver_register(struct cx_drv *cx_driver)
 {
        cx_driver->driver.name = cx_driver->name;
        cx_driver->driver.bus = &tiocx_bus_type;
-       cx_driver->driver.probe = cx_device_probe;
-       cx_driver->driver.remove = cx_driver_remove;
 
        return driver_register(&cx_driver->driver);
 }
diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h
deleted file mode 100644 (file)
index 5483a9f..0000000
+++ /dev/null
@@ -1,1273 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
- */
-
-
-/*
- * Cross Partition Communication (XPC) structures and macros.
- */
-
-#ifndef _IA64_SN_KERNEL_XPC_H
-#define _IA64_SN_KERNEL_XPC_H
-
-
-#include <linux/config.h>
-#include <linux/interrupt.h>
-#include <linux/sysctl.h>
-#include <linux/device.h>
-#include <asm/pgtable.h>
-#include <asm/processor.h>
-#include <asm/sn/bte.h>
-#include <asm/sn/clksupport.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/mspec.h>
-#include <asm/sn/shub_mmr.h>
-#include <asm/sn/xp.h>
-
-
-/*
- * XPC Version numbers consist of a major and minor number. XPC can always
- * talk to versions with same major #, and never talk to versions with a
- * different major #.
- */
-#define _XPC_VERSION(_maj, _min)       (((_maj) << 4) | ((_min) & 0xf))
-#define XPC_VERSION_MAJOR(_v)          ((_v) >> 4)
-#define XPC_VERSION_MINOR(_v)          ((_v) & 0xf)
-
-
-/*
- * The next macros define word or bit representations for given
- * C-brick nasid in either the SAL provided bit array representing
- * nasids in the partition/machine or the AMO_t array used for
- * inter-partition initiation communications.
- *
- * For SN2 machines, C-Bricks are alway even numbered NASIDs.  As
- * such, some space will be saved by insisting that nasid information
- * passed from SAL always be packed for C-Bricks and the
- * cross-partition interrupts use the same packing scheme.
- */
-#define XPC_NASID_W_INDEX(_n)  (((_n) / 64) / 2)
-#define XPC_NASID_B_INDEX(_n)  (((_n) / 2) & (64 - 1))
-#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \
-                                   (1UL << XPC_NASID_B_INDEX(_n)))
-#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
-
-#define XPC_HB_DEFAULT_INTERVAL                5       /* incr HB every x secs */
-#define XPC_HB_CHECK_DEFAULT_INTERVAL  20      /* check HB every x secs */
-
-/* define the process name of HB checker and the CPU it is pinned to */
-#define XPC_HB_CHECK_THREAD_NAME       "xpc_hb"
-#define XPC_HB_CHECK_CPU               0
-
-/* define the process name of the discovery thread */
-#define XPC_DISCOVERY_THREAD_NAME      "xpc_discovery"
-
-
-/*
- * the reserved page
- *
- *   SAL reserves one page of memory per partition for XPC. Though a full page
- *   in length (16384 bytes), its starting address is not page aligned, but it
- *   is cacheline aligned. The reserved page consists of the following:
- *
- *   reserved page header
- *
- *     The first cacheline of the reserved page contains the header
- *     (struct xpc_rsvd_page). Before SAL initialization has completed,
- *     SAL has set up the following fields of the reserved page header:
- *     SAL_signature, SAL_version, partid, and nasids_size. The other
- *     fields are set up by XPC. (xpc_rsvd_page points to the local
- *     partition's reserved page.)
- *
- *   part_nasids mask
- *   mach_nasids mask
- *
- *     SAL also sets up two bitmaps (or masks), one that reflects the actual
- *     nasids in this partition (part_nasids), and the other that reflects
- *     the actual nasids in the entire machine (mach_nasids). We're only
- *     interested in the even numbered nasids (which contain the processors
- *     and/or memory), so we only need half as many bits to represent the
- *     nasids. The part_nasids mask is located starting at the first cacheline
- *     following the reserved page header. The mach_nasids mask follows right
- *     after the part_nasids mask. The size in bytes of each mask is reflected
- *     by the reserved page header field 'nasids_size'. (Local partition's
- *     mask pointers are xpc_part_nasids and xpc_mach_nasids.)
- *
- *   vars
- *   vars part
- *
- *     Immediately following the mach_nasids mask are the XPC variables
- *     required by other partitions. First are those that are generic to all
- *     partitions (vars), followed on the next available cacheline by those
- *     which are partition specific (vars part). These are setup by XPC.
- *     (Local partition's vars pointers are xpc_vars and xpc_vars_part.)
- *
- * Note: Until vars_pa is set, the partition XPC code has not been initialized.
- */
-struct xpc_rsvd_page {
-       u64 SAL_signature;      /* SAL: unique signature */
-       u64 SAL_version;        /* SAL: version */
-       u8 partid;              /* SAL: partition ID */
-       u8 version;
-       u8 pad1[6];             /* align to next u64 in cacheline */
-       volatile u64 vars_pa;
-       struct timespec stamp;  /* time when reserved page was setup by XPC */
-       u64 pad2[9];            /* align to last u64 in cacheline */
-       u64 nasids_size;        /* SAL: size of each nasid mask in bytes */
-};
-
-#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */
-
-#define XPC_SUPPORTS_RP_STAMP(_version) \
-                       (_version >= _XPC_VERSION(1,1))
-
-/*
- * compare stamps - the return value is:
- *
- *     < 0,    if stamp1 < stamp2
- *     = 0,    if stamp1 == stamp2
- *     > 0,    if stamp1 > stamp2
- */
-static inline int
-xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
-{
-       int ret;
-
-
-       if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) {
-               ret = stamp1->tv_nsec - stamp2->tv_nsec;
-       }
-       return ret;
-}
-
-
-/*
- * Define the structures by which XPC variables can be exported to other
- * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
- */
-
-/*
- * The following structure describes the partition generic variables
- * needed by other partitions in order to properly initialize.
- *
- * struct xpc_vars version number also applies to struct xpc_vars_part.
- * Changes to either structure and/or related functionality should be
- * reflected by incrementing either the major or minor version numbers
- * of struct xpc_vars.
- */
-struct xpc_vars {
-       u8 version;
-       u64 heartbeat;
-       u64 heartbeating_to_mask;
-       u64 heartbeat_offline;  /* if 0, heartbeat should be changing */
-       int act_nasid;
-       int act_phys_cpuid;
-       u64 vars_part_pa;
-       u64 amos_page_pa;       /* paddr of page of AMOs from MSPEC driver */
-       AMO_t *amos_page;       /* vaddr of page of AMOs from MSPEC driver */
-};
-
-#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */
-
-#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
-                       (_version >= _XPC_VERSION(3,1))
-
-
-static inline int
-xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
-{
-       return ((vars->heartbeating_to_mask & (1UL << partid)) != 0);
-}
-
-static inline void
-xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
-{
-       u64 old_mask, new_mask;
-
-       do {
-               old_mask = vars->heartbeating_to_mask;
-               new_mask = (old_mask | (1UL << partid));
-       } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
-                                                       old_mask);
-}
-
-static inline void
-xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
-{
-       u64 old_mask, new_mask;
-
-       do {
-               old_mask = vars->heartbeating_to_mask;
-               new_mask = (old_mask & ~(1UL << partid));
-       } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
-                                                       old_mask);
-}
-
-
-/*
- * The AMOs page consists of a number of AMO variables which are divided into
- * four groups, The first two groups are used to identify an IRQ's sender.
- * These two groups consist of 64 and 128 AMO variables respectively. The last
- * two groups, consisting of just one AMO variable each, are used to identify
- * the remote partitions that are currently engaged (from the viewpoint of
- * the XPC running on the remote partition).
- */
-#define XPC_NOTIFY_IRQ_AMOS       0
-#define XPC_ACTIVATE_IRQ_AMOS     (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS)
-#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
-#define XPC_DISENGAGE_REQUEST_AMO  (XPC_ENGAGED_PARTITIONS_AMO + 1)
-
-
-/*
- * The following structure describes the per partition specific variables.
- *
- * An array of these structures, one per partition, will be defined. As a
- * partition becomes active XPC will copy the array entry corresponding to
- * itself from that partition. It is desirable that the size of this
- * structure evenly divide into a cacheline, such that none of the entries
- * in this array crosses a cacheline boundary. As it is now, each entry
- * occupies half a cacheline.
- */
-struct xpc_vars_part {
-       volatile u64 magic;
-
-       u64 openclose_args_pa;  /* physical address of open and close args */
-       u64 GPs_pa;             /* physical address of Get/Put values */
-
-       u64 IPI_amo_pa;         /* physical address of IPI AMO_t structure */
-       int IPI_nasid;          /* nasid of where to send IPIs */
-       int IPI_phys_cpuid;     /* physical CPU ID of where to send IPIs */
-
-       u8 nchannels;           /* #of defined channels supported */
-
-       u8 reserved[23];        /* pad to a full 64 bytes */
-};
-
-/*
- * The vars_part MAGIC numbers play a part in the first contact protocol.
- *
- * MAGIC1 indicates that the per partition specific variables for a remote
- * partition have been initialized by this partition.
- *
- * MAGIC2 indicates that this partition has pulled the remote partititions
- * per partition variables that pertain to this partition.
- */
-#define XPC_VP_MAGIC1  0x0053524156435058L  /* 'XPCVARS\0'L (little endian) */
-#define XPC_VP_MAGIC2  0x0073726176435058L  /* 'XPCvars\0'L (little endian) */
-
-
-/* the reserved page sizes and offsets */
-
-#define XPC_RP_HEADER_SIZE     L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
-#define XPC_RP_VARS_SIZE       L1_CACHE_ALIGN(sizeof(struct xpc_vars))
-
-#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE)
-#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
-#define XPC_RP_VARS(_rp)       ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words)
-#define XPC_RP_VARS_PART(_rp)  (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE)
-
-
-/*
- * Functions registered by add_timer() or called by kernel_thread() only
- * allow for a single 64-bit argument. The following macros can be used to
- * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from
- * the passed argument.
- */
-#define XPC_PACK_ARGS(_arg1, _arg2) \
-                       ((((u64) _arg1) & 0xffffffff) | \
-                       ((((u64) _arg2) & 0xffffffff) << 32))
-
-#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
-#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
-
-
-
-/*
- * Define a Get/Put value pair (pointers) used with a message queue.
- */
-struct xpc_gp {
-       volatile s64 get;       /* Get value */
-       volatile s64 put;       /* Put value */
-};
-
-#define XPC_GP_SIZE \
-               L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
-
-
-
-/*
- * Define a structure that contains arguments associated with opening and
- * closing a channel.
- */
-struct xpc_openclose_args {
-       u16 reason;             /* reason why channel is closing */
-       u16 msg_size;           /* sizeof each message entry */
-       u16 remote_nentries;    /* #of message entries in remote msg queue */
-       u16 local_nentries;     /* #of message entries in local msg queue */
-       u64 local_msgqueue_pa;  /* physical address of local message queue */
-};
-
-#define XPC_OPENCLOSE_ARGS_SIZE \
-             L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
-
-
-
-/* struct xpc_msg flags */
-
-#define        XPC_M_DONE              0x01    /* msg has been received/consumed */
-#define        XPC_M_READY             0x02    /* msg is ready to be sent */
-#define        XPC_M_INTERRUPT         0x04    /* send interrupt when msg consumed */
-
-
-#define XPC_MSG_ADDRESS(_payload) \
-               ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
-
-
-
-/*
- * Defines notify entry.
- *
- * This is used to notify a message's sender that their message was received
- * and consumed by the intended recipient.
- */
-struct xpc_notify {
-       struct semaphore sema;          /* notify semaphore */
-       volatile u8 type;                       /* type of notification */
-
-       /* the following two fields are only used if type == XPC_N_CALL */
-       xpc_notify_func func;           /* user's notify function */
-       void *key;                      /* pointer to user's key */
-};
-
-/* struct xpc_notify type of notification */
-
-#define        XPC_N_CALL              0x01    /* notify function provided by user */
-
-
-
-/*
- * Define the structure that manages all the stuff required by a channel. In
- * particular, they are used to manage the messages sent across the channel.
- *
- * This structure is private to a partition, and is NOT shared across the
- * partition boundary.
- *
- * There is an array of these structures for each remote partition. It is
- * allocated at the time a partition becomes active. The array contains one
- * of these structures for each potential channel connection to that partition.
- *
- * Each of these structures manages two message queues (circular buffers).
- * They are allocated at the time a channel connection is made. One of
- * these message queues (local_msgqueue) holds the locally created messages
- * that are destined for the remote partition. The other of these message
- * queues (remote_msgqueue) is a locally cached copy of the remote partition's
- * own local_msgqueue.
- *
- * The following is a description of the Get/Put pointers used to manage these
- * two message queues. Consider the local_msgqueue to be on one partition
- * and the remote_msgqueue to be its cached copy on another partition. A
- * description of what each of the lettered areas contains is included.
- *
- *
- *                     local_msgqueue      remote_msgqueue
- *
- *                        |/////////|      |/////////|
- *    w_remote_GP.get --> +---------+      |/////////|
- *                        |    F    |      |/////////|
- *     remote_GP.get  --> +---------+      +---------+ <-- local_GP->get
- *                        |         |      |         |
- *                        |         |      |    E    |
- *                        |         |      |         |
- *                        |         |      +---------+ <-- w_local_GP.get
- *                        |    B    |      |/////////|
- *                        |         |      |////D////|
- *                        |         |      |/////////|
- *                        |         |      +---------+ <-- w_remote_GP.put
- *                        |         |      |////C////|
- *      local_GP->put --> +---------+      +---------+ <-- remote_GP.put
- *                        |         |      |/////////|
- *                        |    A    |      |/////////|
- *                        |         |      |/////////|
- *     w_local_GP.put --> +---------+      |/////////|
- *                        |/////////|      |/////////|
- *
- *
- *         ( remote_GP.[get|put] are cached copies of the remote
- *           partition's local_GP->[get|put], and thus their values can
- *           lag behind their counterparts on the remote partition. )
- *
- *
- *  A - Messages that have been allocated, but have not yet been sent to the
- *     remote partition.
- *
- *  B - Messages that have been sent, but have not yet been acknowledged by the
- *      remote partition as having been received.
- *
- *  C - Area that needs to be prepared for the copying of sent messages, by
- *     the clearing of the message flags of any previously received messages.
- *
- *  D - Area into which sent messages are to be copied from the remote
- *     partition's local_msgqueue and then delivered to their intended
- *     recipients. [ To allow for a multi-message copy, another pointer
- *     (next_msg_to_pull) has been added to keep track of the next message
- *     number needing to be copied (pulled). It chases after w_remote_GP.put.
- *     Any messages lying between w_local_GP.get and next_msg_to_pull have
- *     been copied and are ready to be delivered. ]
- *
- *  E - Messages that have been copied and delivered, but have not yet been
- *     acknowledged by the recipient as having been received.
- *
- *  F - Messages that have been acknowledged, but XPC has not yet notified the
- *     sender that the message was received by its intended recipient.
- *     This is also an area that needs to be prepared for the allocating of
- *     new messages, by the clearing of the message flags of the acknowledged
- *     messages.
- */
-struct xpc_channel {
-       partid_t partid;                /* ID of remote partition connected */
-       spinlock_t lock;                /* lock for updating this structure */
-       u32 flags;                      /* general flags */
-
-       enum xpc_retval reason;         /* reason why channel is disconnect'g */
-       int reason_line;                /* line# disconnect initiated from */
-
-       u16 number;                     /* channel # */
-
-       u16 msg_size;                   /* sizeof each msg entry */
-       u16 local_nentries;             /* #of msg entries in local msg queue */
-       u16 remote_nentries;            /* #of msg entries in remote msg queue*/
-
-       void *local_msgqueue_base;      /* base address of kmalloc'd space */
-       struct xpc_msg *local_msgqueue; /* local message queue */
-       void *remote_msgqueue_base;     /* base address of kmalloc'd space */
-       struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */
-                                       /* local message queue */
-       u64 remote_msgqueue_pa;         /* phys addr of remote partition's */
-                                       /* local message queue */
-
-       atomic_t references;            /* #of external references to queues */
-
-       atomic_t n_on_msg_allocate_wq;   /* #on msg allocation wait queue */
-       wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
-
-       u8 delayed_IPI_flags;           /* IPI flags received, but delayed */
-                                       /* action until channel disconnected */
-
-       /* queue of msg senders who want to be notified when msg received */
-
-       atomic_t n_to_notify;           /* #of msg senders to notify */
-       struct xpc_notify *notify_queue;/* notify queue for messages sent */
-
-       xpc_channel_func func;          /* user's channel function */
-       void *key;                      /* pointer to user's key */
-
-       struct semaphore msg_to_pull_sema; /* next msg to pull serialization */
-       struct semaphore wdisconnect_sema; /* wait for channel disconnect */
-
-       struct xpc_openclose_args *local_openclose_args; /* args passed on */
-                                       /* opening or closing of channel */
-
-       /* various flavors of local and remote Get/Put values */
-
-       struct xpc_gp *local_GP;        /* local Get/Put values */
-       struct xpc_gp remote_GP;        /* remote Get/Put values */
-       struct xpc_gp w_local_GP;       /* working local Get/Put values */
-       struct xpc_gp w_remote_GP;      /* working remote Get/Put values */
-       s64 next_msg_to_pull;           /* Put value of next msg to pull */
-
-       /* kthread management related fields */
-
-// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
-// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
-// >>> dependent on activity over the last interval of time
-       atomic_t kthreads_assigned;     /* #of kthreads assigned to channel */
-       u32 kthreads_assigned_limit;    /* limit on #of kthreads assigned */
-       atomic_t kthreads_idle;         /* #of kthreads idle waiting for work */
-       u32 kthreads_idle_limit;        /* limit on #of kthreads idle */
-       atomic_t kthreads_active;       /* #of kthreads actively working */
-       // >>> following field is temporary
-       u32 kthreads_created;           /* total #of kthreads created */
-
-       wait_queue_head_t idle_wq;      /* idle kthread wait queue */
-
-} ____cacheline_aligned;
-
-
-/* struct xpc_channel flags */
-
-#define        XPC_C_WASCONNECTED      0x00000001 /* channel was connected */
-
-#define        XPC_C_ROPENREPLY        0x00000002 /* remote open channel reply */
-#define        XPC_C_OPENREPLY         0x00000004 /* local open channel reply */
-#define        XPC_C_ROPENREQUEST      0x00000008 /* remote open channel request */
-#define        XPC_C_OPENREQUEST       0x00000010 /* local open channel request */
-
-#define        XPC_C_SETUP             0x00000020 /* channel's msgqueues are alloc'd */
-#define        XPC_C_CONNECTCALLOUT    0x00000040 /* channel connected callout made */
-#define        XPC_C_CONNECTED         0x00000080 /* local channel is connected */
-#define        XPC_C_CONNECTING        0x00000100 /* channel is being connected */
-
-#define        XPC_C_RCLOSEREPLY       0x00000200 /* remote close channel reply */
-#define        XPC_C_CLOSEREPLY        0x00000400 /* local close channel reply */
-#define        XPC_C_RCLOSEREQUEST     0x00000800 /* remote close channel request */
-#define        XPC_C_CLOSEREQUEST      0x00001000 /* local close channel request */
-
-#define        XPC_C_DISCONNECTED      0x00002000 /* channel is disconnected */
-#define        XPC_C_DISCONNECTING     0x00004000 /* channel is being disconnected */
-#define        XPC_C_DISCONNECTCALLOUT 0x00008000 /* chan disconnected callout made */
-#define        XPC_C_WDISCONNECT       0x00010000 /* waiting for channel disconnect */
-
-
-
-/*
- * Manages channels on a partition basis. There is one of these structures
- * for each partition (a partition will never utilize the structure that
- * represents itself).
- */
-struct xpc_partition {
-
-       /* XPC HB infrastructure */
-
-       u8 remote_rp_version;           /* version# of partition's rsvd pg */
-       struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */
-       u64 remote_rp_pa;               /* phys addr of partition's rsvd pg */
-       u64 remote_vars_pa;             /* phys addr of partition's vars */
-       u64 remote_vars_part_pa;        /* phys addr of partition's vars part */
-       u64 last_heartbeat;             /* HB at last read */
-       u64 remote_amos_page_pa;        /* phys addr of partition's amos page */
-       int remote_act_nasid;           /* active part's act/deact nasid */
-       int remote_act_phys_cpuid;      /* active part's act/deact phys cpuid */
-       u32 act_IRQ_rcvd;               /* IRQs since activation */
-       spinlock_t act_lock;            /* protect updating of act_state */
-       u8 act_state;                   /* from XPC HB viewpoint */
-       u8 remote_vars_version;         /* version# of partition's vars */
-       enum xpc_retval reason;         /* reason partition is deactivating */
-       int reason_line;                /* line# deactivation initiated from */
-       int reactivate_nasid;           /* nasid in partition to reactivate */
-
-       unsigned long disengage_request_timeout; /* timeout in jiffies */
-       struct timer_list disengage_request_timer;
-
-
-       /* XPC infrastructure referencing and teardown control */
-
-       volatile u8 setup_state;        /* infrastructure setup state */
-       wait_queue_head_t teardown_wq;  /* kthread waiting to teardown infra */
-       atomic_t references;            /* #of references to infrastructure */
-
-
-       /*
-        * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
-        * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION
-        * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE
-        * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
-        */
-
-
-       u8 nchannels;              /* #of defined channels supported */
-       atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
-       atomic_t nchannels_engaged;/* #of channels engaged with remote part */
-       struct xpc_channel *channels;/* array of channel structures */
-
-       void *local_GPs_base;     /* base address of kmalloc'd space */
-       struct xpc_gp *local_GPs; /* local Get/Put values */
-       void *remote_GPs_base;    /* base address of kmalloc'd space */
-       struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */
-                                 /* values */
-       u64 remote_GPs_pa;        /* phys address of remote partition's local */
-                                 /* Get/Put values */
-
-
-       /* fields used to pass args when opening or closing a channel */
-
-       void *local_openclose_args_base;  /* base address of kmalloc'd space */
-       struct xpc_openclose_args *local_openclose_args;  /* local's args */
-       void *remote_openclose_args_base; /* base address of kmalloc'd space */
-       struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
-                                         /* args */
-       u64 remote_openclose_args_pa;     /* phys addr of remote's args */
-
-
-       /* IPI sending, receiving and handling related fields */
-
-       int remote_IPI_nasid;       /* nasid of where to send IPIs */
-       int remote_IPI_phys_cpuid;  /* phys CPU ID of where to send IPIs */
-       AMO_t *remote_IPI_amo_va;   /* address of remote IPI AMO_t structure */
-
-       AMO_t *local_IPI_amo_va;    /* address of IPI AMO_t structure */
-       u64 local_IPI_amo;          /* IPI amo flags yet to be handled */
-       char IPI_owner[8];          /* IPI owner's name */
-       struct timer_list dropped_IPI_timer; /* dropped IPI timer */
-
-       spinlock_t IPI_lock;        /* IPI handler lock */
-
-
-       /* channel manager related fields */
-
-       atomic_t channel_mgr_requests;  /* #of requests to activate chan mgr */
-       wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
-
-} ____cacheline_aligned;
-
-
-/* struct xpc_partition act_state values (for XPC HB) */
-
-#define        XPC_P_INACTIVE          0x00    /* partition is not active */
-#define XPC_P_ACTIVATION_REQ   0x01    /* created thread to activate */
-#define XPC_P_ACTIVATING       0x02    /* activation thread started */
-#define XPC_P_ACTIVE           0x03    /* xpc_partition_up() was called */
-#define XPC_P_DEACTIVATING     0x04    /* partition deactivation initiated */
-
-
-#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
-                       xpc_deactivate_partition(__LINE__, (_p), (_reason))
-
-
-/* struct xpc_partition setup_state values */
-
-#define XPC_P_UNSET            0x00    /* infrastructure was never setup */
-#define XPC_P_SETUP            0x01    /* infrastructure is setup */
-#define XPC_P_WTEARDOWN                0x02    /* waiting to teardown infrastructure */
-#define XPC_P_TORNDOWN         0x03    /* infrastructure is torndown */
-
-
-
-/*
- * struct xpc_partition IPI_timer #of seconds to wait before checking for
- * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
- * after the IPI was received.
- */
-#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
-
-
-/* number of seconds to wait for other partitions to disengage */
-#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT        90
-
-/* interval in seconds to print 'waiting disengagement' messages */
-#define XPC_DISENGAGE_PRINTMSG_INTERVAL                10
-
-
-#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
-
-
-
-/* found in xp_main.c */
-extern struct xpc_registration xpc_registrations[];
-
-
-/* found in xpc_main.c */
-extern struct device *xpc_part;
-extern struct device *xpc_chan;
-extern int xpc_disengage_request_timelimit;
-extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *);
-extern void xpc_dropped_IPI_check(struct xpc_partition *);
-extern void xpc_activate_partition(struct xpc_partition *);
-extern void xpc_activate_kthreads(struct xpc_channel *, int);
-extern void xpc_create_kthreads(struct xpc_channel *, int);
-extern void xpc_disconnect_wait(int);
-
-
-/* found in xpc_partition.c */
-extern int xpc_exiting;
-extern struct xpc_vars *xpc_vars;
-extern struct xpc_rsvd_page *xpc_rsvd_page;
-extern struct xpc_vars_part *xpc_vars_part;
-extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
-extern char xpc_remote_copy_buffer[];
-extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
-extern void xpc_allow_IPI_ops(void);
-extern void xpc_restrict_IPI_ops(void);
-extern int xpc_identify_act_IRQ_sender(void);
-extern int xpc_partition_disengaged(struct xpc_partition *);
-extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);
-extern void xpc_mark_partition_inactive(struct xpc_partition *);
-extern void xpc_discovery(void);
-extern void xpc_check_remote_hb(void);
-extern void xpc_deactivate_partition(const int, struct xpc_partition *,
-                                               enum xpc_retval);
-extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
-
-
-/* found in xpc_channel.c */
-extern void xpc_initiate_connect(int);
-extern void xpc_initiate_disconnect(int);
-extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **);
-extern enum xpc_retval xpc_initiate_send(partid_t, int, void *);
-extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *,
-                                               xpc_notify_func, void *);
-extern void xpc_initiate_received(partid_t, int, void *);
-extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *);
-extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *);
-extern void xpc_process_channel_activity(struct xpc_partition *);
-extern void xpc_connected_callout(struct xpc_channel *);
-extern void xpc_deliver_msg(struct xpc_channel *);
-extern void xpc_disconnect_channel(const int, struct xpc_channel *,
-                                       enum xpc_retval, unsigned long *);
-extern void xpc_disconnecting_callout(struct xpc_channel *);
-extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
-extern void xpc_teardown_infrastructure(struct xpc_partition *);
-
-
-
-static inline void
-xpc_wakeup_channel_mgr(struct xpc_partition *part)
-{
-       if (atomic_inc_return(&part->channel_mgr_requests) == 1) {
-               wake_up(&part->channel_mgr_wq);
-       }
-}
-
-
-
-/*
- * These next two inlines are used to keep us from tearing down a channel's
- * msg queues while a thread may be referencing them.
- */
-static inline void
-xpc_msgqueue_ref(struct xpc_channel *ch)
-{
-       atomic_inc(&ch->references);
-}
-
-static inline void
-xpc_msgqueue_deref(struct xpc_channel *ch)
-{
-       s32 refs = atomic_dec_return(&ch->references);
-
-       DBUG_ON(refs < 0);
-       if (refs == 0) {
-               xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
-       }
-}
-
-
-
-#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
-               xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
-
-
-/*
- * These two inlines are used to keep us from tearing down a partition's
- * setup infrastructure while a thread may be referencing it.
- */
-static inline void
-xpc_part_deref(struct xpc_partition *part)
-{
-       s32 refs = atomic_dec_return(&part->references);
-
-
-       DBUG_ON(refs < 0);
-       if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
-               wake_up(&part->teardown_wq);
-       }
-}
-
-static inline int
-xpc_part_ref(struct xpc_partition *part)
-{
-       int setup;
-
-
-       atomic_inc(&part->references);
-       setup = (part->setup_state == XPC_P_SETUP);
-       if (!setup) {
-               xpc_part_deref(part);
-       }
-       return setup;
-}
-
-
-
-/*
- * The following macro is to be used for the setting of the reason and
- * reason_line fields in both the struct xpc_channel and struct xpc_partition
- * structures.
- */
-#define XPC_SET_REASON(_p, _reason, _line) \
-       { \
-               (_p)->reason = _reason; \
-               (_p)->reason_line = _line; \
-       }
-
-
-
-/*
- * This next set of inlines are used to keep track of when a partition is
- * potentially engaged in accessing memory belonging to another partition.
- */
-
-static inline void
-xpc_mark_partition_engaged(struct xpc_partition *part)
-{
-       unsigned long irq_flags;
-       AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
-                               (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
-
-
-       local_irq_save(irq_flags);
-
-       /* set bit corresponding to our partid in remote partition's AMO */
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
-                                               (1UL << sn_partition_id));
-       /*
-        * We must always use the nofault function regardless of whether we
-        * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
-        * didn't, we'd never know that the other partition is down and would
-        * keep sending IPIs and AMOs to it until the heartbeat times out.
-        */
-       (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
-                               variable), xp_nofault_PIOR_target));
-
-       local_irq_restore(irq_flags);
-}
-
-static inline void
-xpc_mark_partition_disengaged(struct xpc_partition *part)
-{
-       unsigned long irq_flags;
-       AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
-                               (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
-
-
-       local_irq_save(irq_flags);
-
-       /* clear bit corresponding to our partid in remote partition's AMO */
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
-                                               ~(1UL << sn_partition_id));
-       /*
-        * We must always use the nofault function regardless of whether we
-        * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
-        * didn't, we'd never know that the other partition is down and would
-        * keep sending IPIs and AMOs to it until the heartbeat times out.
-        */
-       (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
-                               variable), xp_nofault_PIOR_target));
-
-       local_irq_restore(irq_flags);
-}
-
-static inline void
-xpc_request_partition_disengage(struct xpc_partition *part)
-{
-       unsigned long irq_flags;
-       AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
-                               (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
-
-
-       local_irq_save(irq_flags);
-
-       /* set bit corresponding to our partid in remote partition's AMO */
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
-                                               (1UL << sn_partition_id));
-       /*
-        * We must always use the nofault function regardless of whether we
-        * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
-        * didn't, we'd never know that the other partition is down and would
-        * keep sending IPIs and AMOs to it until the heartbeat times out.
-        */
-       (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
-                               variable), xp_nofault_PIOR_target));
-
-       local_irq_restore(irq_flags);
-}
-
-static inline void
-xpc_cancel_partition_disengage_request(struct xpc_partition *part)
-{
-       unsigned long irq_flags;
-       AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
-                               (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
-
-
-       local_irq_save(irq_flags);
-
-       /* clear bit corresponding to our partid in remote partition's AMO */
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
-                                               ~(1UL << sn_partition_id));
-       /*
-        * We must always use the nofault function regardless of whether we
-        * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
-        * didn't, we'd never know that the other partition is down and would
-        * keep sending IPIs and AMOs to it until the heartbeat times out.
-        */
-       (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
-                               variable), xp_nofault_PIOR_target));
-
-       local_irq_restore(irq_flags);
-}
-
-static inline u64
-xpc_partition_engaged(u64 partid_mask)
-{
-       AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
-
-
-       /* return our partition's AMO variable ANDed with partid_mask */
-       return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
-                                                               partid_mask);
-}
-
-static inline u64
-xpc_partition_disengage_requested(u64 partid_mask)
-{
-       AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
-
-
-       /* return our partition's AMO variable ANDed with partid_mask */
-       return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
-                                                               partid_mask);
-}
-
-static inline void
-xpc_clear_partition_engaged(u64 partid_mask)
-{
-       AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
-
-
-       /* clear bit(s) based on partid_mask in our partition's AMO */
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
-                                                               ~partid_mask);
-}
-
-static inline void
-xpc_clear_partition_disengage_request(u64 partid_mask)
-{
-       AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
-
-
-       /* clear bit(s) based on partid_mask in our partition's AMO */
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
-                                                               ~partid_mask);
-}
-
-
-
-/*
- * The following set of macros and inlines are used for the sending and
- * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
- * one that is associated with partition activity (SGI_XPC_ACTIVATE) and
- * the other that is associated with channel activity (SGI_XPC_NOTIFY).
- */
-
-static inline u64
-xpc_IPI_receive(AMO_t *amo)
-{
-       return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR);
-}
-
-
-static inline enum xpc_retval
-xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
-{
-       int ret = 0;
-       unsigned long irq_flags;
-
-
-       local_irq_save(irq_flags);
-
-       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag);
-       sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
-
-       /*
-        * We must always use the nofault function regardless of whether we
-        * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
-        * didn't, we'd never know that the other partition is down and would
-        * keep sending IPIs and AMOs to it until the heartbeat times out.
-        */
-       ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
-                               xp_nofault_PIOR_target));
-
-       local_irq_restore(irq_flags);
-
-       return ((ret == 0) ? xpcSuccess : xpcPioReadError);
-}
-
-
-/*
- * IPIs associated with SGI_XPC_ACTIVATE IRQ.
- */
-
-/*
- * Flag the appropriate AMO variable and send an IPI to the specified node.
- */
-static inline void
-xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
-                       int to_phys_cpuid)
-{
-       int w_index = XPC_NASID_W_INDEX(from_nasid);
-       int b_index = XPC_NASID_B_INDEX(from_nasid);
-       AMO_t *amos = (AMO_t *) __va(amos_page_pa +
-                               (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
-
-
-       (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
-                               to_phys_cpuid, SGI_XPC_ACTIVATE);
-}
-
-static inline void
-xpc_IPI_send_activate(struct xpc_vars *vars)
-{
-       xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
-                               vars->act_nasid, vars->act_phys_cpuid);
-}
-
-static inline void
-xpc_IPI_send_activated(struct xpc_partition *part)
-{
-       xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
-                       part->remote_act_nasid, part->remote_act_phys_cpuid);
-}
-
-static inline void
-xpc_IPI_send_reactivate(struct xpc_partition *part)
-{
-       xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
-                               xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
-}
-
-static inline void
-xpc_IPI_send_disengage(struct xpc_partition *part)
-{
-       xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
-                       part->remote_act_nasid, part->remote_act_phys_cpuid);
-}
-
-
-/*
- * IPIs associated with SGI_XPC_NOTIFY IRQ.
- */
-
-/*
- * Send an IPI to the remote partition that is associated with the
- * specified channel.
- */
-#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \
-               xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f)
-
-static inline void
-xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
-                       unsigned long *irq_flags)
-{
-       struct xpc_partition *part = &xpc_partitions[ch->partid];
-       enum xpc_retval ret;
-
-
-       if (likely(part->act_state != XPC_P_DEACTIVATING)) {
-               ret = xpc_IPI_send(part->remote_IPI_amo_va,
-                                       (u64) ipi_flag << (ch->number * 8),
-                                       part->remote_IPI_nasid,
-                                       part->remote_IPI_phys_cpuid,
-                                       SGI_XPC_NOTIFY);
-               dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
-                       ipi_flag_string, ch->partid, ch->number, ret);
-               if (unlikely(ret != xpcSuccess)) {
-                       if (irq_flags != NULL) {
-                               spin_unlock_irqrestore(&ch->lock, *irq_flags);
-                       }
-                       XPC_DEACTIVATE_PARTITION(part, ret);
-                       if (irq_flags != NULL) {
-                               spin_lock_irqsave(&ch->lock, *irq_flags);
-                       }
-               }
-       }
-}
-
-
-/*
- * Make it look like the remote partition, which is associated with the
- * specified channel, sent us an IPI. This faked IPI will be handled
- * by xpc_dropped_IPI_check().
- */
-#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \
-               xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f)
-
-static inline void
-xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
-                               char *ipi_flag_string)
-{
-       struct xpc_partition *part = &xpc_partitions[ch->partid];
-
-
-       FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable),
-                       FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
-       dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
-               ipi_flag_string, ch->partid, ch->number);
-}
-
-
-/*
- * The sending and receiving of IPIs includes the setting of an AMO variable
- * to indicate the reason the IPI was sent. The 64-bit variable is divided
- * up into eight bytes, ordered from right to left. Byte zero pertains to
- * channel 0, byte one to channel 1, and so on. Each byte is described by
- * the following IPI flags.
- */
-
-#define        XPC_IPI_CLOSEREQUEST    0x01
-#define        XPC_IPI_CLOSEREPLY      0x02
-#define        XPC_IPI_OPENREQUEST     0x04
-#define        XPC_IPI_OPENREPLY       0x08
-#define        XPC_IPI_MSGREQUEST      0x10
-
-
-/* given an AMO variable and a channel#, get its associated IPI flags */
-#define XPC_GET_IPI_FLAGS(_amo, _c)    ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
-#define XPC_SET_IPI_FLAGS(_amo, _c, _f)        (_amo) |= ((u64) (_f) << ((_c) * 8))
-
-#define        XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f)
-#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo)       ((_amo) & 0x1010101010101010)
-
-
-static inline void
-xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
-{
-       struct xpc_openclose_args *args = ch->local_openclose_args;
-
-
-       args->reason = ch->reason;
-
-       XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
-}
-
-static inline void
-xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags)
-{
-       XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags);
-}
-
-static inline void
-xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
-{
-       struct xpc_openclose_args *args = ch->local_openclose_args;
-
-
-       args->msg_size = ch->msg_size;
-       args->local_nentries = ch->local_nentries;
-
-       XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags);
-}
-
-static inline void
-xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
-{
-       struct xpc_openclose_args *args = ch->local_openclose_args;
-
-
-       args->remote_nentries = ch->remote_nentries;
-       args->local_nentries = ch->local_nentries;
-       args->local_msgqueue_pa = __pa(ch->local_msgqueue);
-
-       XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags);
-}
-
-static inline void
-xpc_IPI_send_msgrequest(struct xpc_channel *ch)
-{
-       XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL);
-}
-
-static inline void
-xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
-{
-       XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
-}
-
-
-/*
- * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
- * pages are located in the lowest granule. The lowest granule uses 4k pages
- * for cached references and an alternate TLB handler to never provide a
- * cacheable mapping for the entire region. This will prevent speculative
- * reading of cached copies of our lines from being issued which will cause
- * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
- * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an
- * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition
- * activation and 2 AMO variables for partition deactivation.
- */
-static inline AMO_t *
-xpc_IPI_init(int index)
-{
-       AMO_t *amo = xpc_vars->amos_page + index;
-
-
-       (void) xpc_IPI_receive(amo);    /* clear AMO variable */
-       return amo;
-}
-
-
-
-static inline enum xpc_retval
-xpc_map_bte_errors(bte_result_t error)
-{
-       switch (error) {
-       case BTE_SUCCESS:       return xpcSuccess;
-       case BTEFAIL_DIR:       return xpcBteDirectoryError;
-       case BTEFAIL_POISON:    return xpcBtePoisonError;
-       case BTEFAIL_WERR:      return xpcBteWriteError;
-       case BTEFAIL_ACCESS:    return xpcBteAccessError;
-       case BTEFAIL_PWERR:     return xpcBtePWriteError;
-       case BTEFAIL_PRERR:     return xpcBtePReadError;
-       case BTEFAIL_TOUT:      return xpcBteTimeOutError;
-       case BTEFAIL_XTERR:     return xpcBteXtalkError;
-       case BTEFAIL_NOTAVAIL:  return xpcBteNotAvailable;
-       default:                return xpcBteUnmappedError;
-       }
-}
-
-
-
-static inline void *
-xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
-{
-       /* see if kmalloc will give us cachline aligned memory by default */
-       *base = kmalloc(size, flags);
-       if (*base == NULL) {
-               return NULL;
-       }
-       if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
-               return *base;
-       }
-       kfree(*base);
-
-       /* nope, we'll have to do it ourselves */
-       *base = kmalloc(size + L1_CACHE_BYTES, flags);
-       if (*base == NULL) {
-               return NULL;
-       }
-       return (void *) L1_CACHE_ALIGN((u64) *base);
-}
-
-
-/*
- * Check to see if there is any channel activity to/from the specified
- * partition.
- */
-static inline void
-xpc_check_for_channel_activity(struct xpc_partition *part)
-{
-       u64 IPI_amo;
-       unsigned long irq_flags;
-
-
-       IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
-       if (IPI_amo == 0) {
-               return;
-       }
-
-       spin_lock_irqsave(&part->IPI_lock, irq_flags);
-       part->local_IPI_amo |= IPI_amo;
-       spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
-
-       dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
-               XPC_PARTID(part), IPI_amo);
-
-       xpc_wakeup_channel_mgr(part);
-}
-
-
-#endif /* _IA64_SN_KERNEL_XPC_H */
-
index abf4fc2a87bba20e98014172412be96f84c92f91..0c0a6890240996e97b3b5960302219014b616902 100644 (file)
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2006 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
 
@@ -24,7 +24,7 @@
 #include <linux/slab.h>
 #include <asm/sn/bte.h>
 #include <asm/sn/sn_sal.h>
-#include "xpc.h"
+#include <asm/sn/xpc.h>
 
 
 /*
@@ -779,6 +779,12 @@ xpc_process_disconnect(struct xpc_channel *ch, unsigned long *irq_flags)
 
        /* both sides are disconnected now */
 
+       if (ch->flags & XPC_C_CONNECTCALLOUT) {
+               spin_unlock_irqrestore(&ch->lock, *irq_flags);
+               xpc_disconnect_callout(ch, xpcDisconnected);
+               spin_lock_irqsave(&ch->lock, *irq_flags);
+       }
+
        /* it's now safe to free the channel's message queues */
        xpc_free_msgqueues(ch);
 
@@ -1645,7 +1651,7 @@ xpc_disconnect_channel(const int line, struct xpc_channel *ch,
 
 
 void
-xpc_disconnecting_callout(struct xpc_channel *ch)
+xpc_disconnect_callout(struct xpc_channel *ch, enum xpc_retval reason)
 {
        /*
         * Let the channel's registerer know that the channel is being
@@ -1654,15 +1660,13 @@ xpc_disconnecting_callout(struct xpc_channel *ch)
         */
 
        if (ch->func != NULL) {
-               dev_dbg(xpc_chan, "ch->func() called, reason=xpcDisconnecting,"
-                       " partid=%d, channel=%d\n", ch->partid, ch->number);
+               dev_dbg(xpc_chan, "ch->func() called, reason=%d, partid=%d, "
+                       "channel=%d\n", reason, ch->partid, ch->number);
 
-               ch->func(xpcDisconnecting, ch->partid, ch->number, NULL,
-                                                               ch->key);
+               ch->func(reason, ch->partid, ch->number, NULL, ch->key);
 
-               dev_dbg(xpc_chan, "ch->func() returned, reason="
-                       "xpcDisconnecting, partid=%d, channel=%d\n",
-                       ch->partid, ch->number);
+               dev_dbg(xpc_chan, "ch->func() returned, reason=%d, partid=%d, "
+                       "channel=%d\n", reason, ch->partid, ch->number);
        }
 }
 
index b617236524c690ea2eb95122e26b9ad133d30819..8930586e0eb4203c64b85201b5b2ab7310bd3166 100644 (file)
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2006 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
 
@@ -59,7 +59,7 @@
 #include <asm/sn/sn_sal.h>
 #include <asm/kdebug.h>
 #include <asm/uaccess.h>
-#include "xpc.h"
+#include <asm/sn/xpc.h>
 
 
 /* define two XPC debug device structures to be used with dev_dbg() et al */
@@ -82,6 +82,9 @@ struct device *xpc_part = &xpc_part_dbg_subname;
 struct device *xpc_chan = &xpc_chan_dbg_subname;
 
 
+static int xpc_kdebug_ignore;
+
+
 /* systune related variables for /proc/sys directories */
 
 static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
@@ -162,6 +165,8 @@ static ctl_table xpc_sys_dir[] = {
 };
 static struct ctl_table_header *xpc_sysctl;
 
+/* non-zero if any remote partition disengage request was timed out */
+int xpc_disengage_request_timedout;
 
 /* #of IRQs received */
 static atomic_t xpc_act_IRQ_rcvd;
@@ -773,7 +778,7 @@ xpc_daemonize_kthread(void *args)
                        ch->flags |= XPC_C_DISCONNECTCALLOUT;
                        spin_unlock_irqrestore(&ch->lock, irq_flags);
 
-                       xpc_disconnecting_callout(ch);
+                       xpc_disconnect_callout(ch, xpcDisconnecting);
                } else {
                        spin_unlock_irqrestore(&ch->lock, irq_flags);
                }
@@ -921,9 +926,9 @@ static void
 xpc_do_exit(enum xpc_retval reason)
 {
        partid_t partid;
-       int active_part_count;
+       int active_part_count, printed_waiting_msg = 0;
        struct xpc_partition *part;
-       unsigned long printmsg_time;
+       unsigned long printmsg_time, disengage_request_timeout = 0;
 
 
        /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
@@ -953,7 +958,8 @@ xpc_do_exit(enum xpc_retval reason)
 
        /* wait for all partitions to become inactive */
 
-       printmsg_time = jiffies;
+       printmsg_time = jiffies + (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
+       xpc_disengage_request_timedout = 0;
 
        do {
                active_part_count = 0;
@@ -969,20 +975,39 @@ xpc_do_exit(enum xpc_retval reason)
                        active_part_count++;
 
                        XPC_DEACTIVATE_PARTITION(part, reason);
-               }
 
-               if (active_part_count == 0) {
-                       break;
+                       if (part->disengage_request_timeout >
+                                               disengage_request_timeout) {
+                               disengage_request_timeout =
+                                               part->disengage_request_timeout;
+                       }
                }
 
-               if (jiffies >= printmsg_time) {
-                       dev_info(xpc_part, "waiting for partitions to "
-                               "deactivate/disengage, active count=%d, remote "
-                               "engaged=0x%lx\n", active_part_count,
-                               xpc_partition_engaged(1UL << partid));
-
-                       printmsg_time = jiffies +
+               if (xpc_partition_engaged(-1UL)) {
+                       if (time_after(jiffies, printmsg_time)) {
+                               dev_info(xpc_part, "waiting for remote "
+                                       "partitions to disengage, timeout in "
+                                       "%ld seconds\n",
+                                       (disengage_request_timeout - jiffies)
+                                                                       / HZ);
+                               printmsg_time = jiffies +
                                        (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
+                               printed_waiting_msg = 1;
+                       }
+
+               } else if (active_part_count > 0) {
+                       if (printed_waiting_msg) {
+                               dev_info(xpc_part, "waiting for local partition"
+                                       " to disengage\n");
+                               printed_waiting_msg = 0;
+                       }
+
+               } else {
+                       if (!xpc_disengage_request_timedout) {
+                               dev_info(xpc_part, "all partitions have "
+                                       "disengaged\n");
+                       }
+                       break;
                }
 
                /* sleep for a 1/3 of a second or so */
@@ -1000,11 +1025,13 @@ xpc_do_exit(enum xpc_retval reason)
        del_timer_sync(&xpc_hb_timer);
        DBUG_ON(xpc_vars->heartbeating_to_mask != 0);
 
-       /* take ourselves off of the reboot_notifier_list */
-       (void) unregister_reboot_notifier(&xpc_reboot_notifier);
+       if (reason == xpcUnloading) {
+               /* take ourselves off of the reboot_notifier_list */
+               (void) unregister_reboot_notifier(&xpc_reboot_notifier);
 
-       /* take ourselves off of the die_notifier list */
-       (void) unregister_die_notifier(&xpc_die_notifier);
+               /* take ourselves off of the die_notifier list */
+               (void) unregister_die_notifier(&xpc_die_notifier);
+       }
 
        /* close down protections for IPI operations */
        xpc_restrict_IPI_ops();
@@ -1020,7 +1047,35 @@ xpc_do_exit(enum xpc_retval reason)
 
 
 /*
- * Called when the system is about to be either restarted or halted.
+ * This function is called when the system is being rebooted.
+ */
+static int
+xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
+{
+       enum xpc_retval reason;
+
+
+       switch (event) {
+       case SYS_RESTART:
+               reason = xpcSystemReboot;
+               break;
+       case SYS_HALT:
+               reason = xpcSystemHalt;
+               break;
+       case SYS_POWER_OFF:
+               reason = xpcSystemPoweroff;
+               break;
+       default:
+               reason = xpcSystemGoingDown;
+       }
+
+       xpc_do_exit(reason);
+       return NOTIFY_DONE;
+}
+
+
+/*
+ * Notify other partitions to disengage from all references to our memory.
  */
 static void
 xpc_die_disengage(void)
@@ -1028,7 +1083,7 @@ xpc_die_disengage(void)
        struct xpc_partition *part;
        partid_t partid;
        unsigned long engaged;
-       long time, print_time, disengage_request_timeout;
+       long time, printmsg_time, disengage_request_timeout;
 
 
        /* keep xpc_hb_checker thread from doing anything (just in case) */
@@ -1055,57 +1110,53 @@ xpc_die_disengage(void)
                }
        }
 
-       print_time = rtc_time();
-       disengage_request_timeout = print_time +
+       time = rtc_time();
+       printmsg_time = time +
+               (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
+       disengage_request_timeout = time +
                (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
 
        /* wait for all other partitions to disengage from us */
 
-       while ((engaged = xpc_partition_engaged(-1UL)) &&
-                       (time = rtc_time()) < disengage_request_timeout) {
+       while (1) {
+               engaged = xpc_partition_engaged(-1UL);
+               if (!engaged) {
+                       dev_info(xpc_part, "all partitions have disengaged\n");
+                       break;
+               }
 
-               if (time >= print_time) {
+               time = rtc_time();
+               if (time >= disengage_request_timeout) {
+                       for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+                               if (engaged & (1UL << partid)) {
+                                       dev_info(xpc_part, "disengage from "
+                                               "remote partition %d timed "
+                                               "out\n", partid);
+                               }
+                       }
+                       break;
+               }
+
+               if (time >= printmsg_time) {
                        dev_info(xpc_part, "waiting for remote partitions to "
-                               "disengage, engaged=0x%lx\n", engaged);
-                       print_time = time + (XPC_DISENGAGE_PRINTMSG_INTERVAL *
+                               "disengage, timeout in %ld seconds\n",
+                               (disengage_request_timeout - time) /
+                                               sn_rtc_cycles_per_second);
+                       printmsg_time = time +
+                                       (XPC_DISENGAGE_PRINTMSG_INTERVAL *
                                                sn_rtc_cycles_per_second);
                }
        }
-       dev_info(xpc_part, "finished waiting for remote partitions to "
-                               "disengage, engaged=0x%lx\n", engaged);
-}
-
-
-/*
- * This function is called when the system is being rebooted.
- */
-static int
-xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
-{
-       enum xpc_retval reason;
-
-
-       switch (event) {
-       case SYS_RESTART:
-               reason = xpcSystemReboot;
-               break;
-       case SYS_HALT:
-               reason = xpcSystemHalt;
-               break;
-       case SYS_POWER_OFF:
-               reason = xpcSystemPoweroff;
-               break;
-       default:
-               reason = xpcSystemGoingDown;
-       }
-
-       xpc_do_exit(reason);
-       return NOTIFY_DONE;
 }
 
 
 /*
- * This function is called when the system is being rebooted.
+ * This function is called when the system is being restarted or halted due
+ * to some sort of system failure. If this is the case we need to notify the
+ * other partitions to disengage from all references to our memory.
+ * This function can also be called when our heartbeater could be offlined
+ * for a time. In this case we need to notify other partitions to not worry
+ * about the lack of a heartbeat.
  */
 static int
 xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
@@ -1115,11 +1166,25 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
        case DIE_MACHINE_HALT:
                xpc_die_disengage();
                break;
+
+       case DIE_KDEBUG_ENTER:
+               /* Should lack of heartbeat be ignored by other partitions? */
+               if (!xpc_kdebug_ignore) {
+                       break;
+               }
+               /* fall through */
        case DIE_MCA_MONARCH_ENTER:
        case DIE_INIT_MONARCH_ENTER:
                xpc_vars->heartbeat++;
                xpc_vars->heartbeat_offline = 1;
                break;
+
+       case DIE_KDEBUG_LEAVE:
+               /* Is lack of heartbeat being ignored by other partitions? */
+               if (!xpc_kdebug_ignore) {
+                       break;
+               }
+               /* fall through */
        case DIE_MCA_MONARCH_LEAVE:
        case DIE_INIT_MONARCH_LEAVE:
                xpc_vars->heartbeat++;
@@ -1344,3 +1409,7 @@ module_param(xpc_disengage_request_timelimit, int, 0);
 MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
                "for disengage request to complete.");
 
+module_param(xpc_kdebug_ignore, int, 0);
+MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
+               "other partitions when dropping into kdebug.");
+
index cdd6431853a1b4cc7cfaafa1d30612ed215b104e..88a730e6cfdbc524e9a867f58c6cddb52e57b086 100644 (file)
@@ -3,7 +3,7 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (c) 2004-2005 Silicon Graphics, Inc.  All Rights Reserved.
+ * Copyright (c) 2004-2006 Silicon Graphics, Inc.  All Rights Reserved.
  */
 
 
@@ -28,7 +28,7 @@
 #include <asm/sn/sn_sal.h>
 #include <asm/sn/nodepda.h>
 #include <asm/sn/addrs.h>
-#include "xpc.h"
+#include <asm/sn/xpc.h>
 
 
 /* XPC is exiting flag */
@@ -771,7 +771,8 @@ xpc_identify_act_IRQ_req(int nasid)
                }
        }
 
-       if (!xpc_partition_disengaged(part)) {
+       if (part->disengage_request_timeout > 0 &&
+                                       !xpc_partition_disengaged(part)) {
                /* still waiting on other side to disengage from us */
                return;
        }
@@ -873,6 +874,9 @@ xpc_partition_disengaged(struct xpc_partition *part)
                         * request in a timely fashion, so assume it's dead.
                         */
 
+                       dev_info(xpc_part, "disengage from remote partition %d "
+                               "timed out\n", partid);
+                       xpc_disengage_request_timedout = 1;
                        xpc_clear_partition_engaged(1UL << partid);
                        disengaged = 1;
                }
index 34093476e9652fc03938edeadeddb4596d439d34..e68332d93171cfd17e317fb7a18ecb1f99f7fc5b 100644 (file)
@@ -218,7 +218,9 @@ void sn_dma_flush(uint64_t addr)
        uint64_t flags;
        uint64_t itte;
        struct hubdev_info *hubinfo;
-       volatile struct sn_flush_device_list *p;
+       volatile struct sn_flush_device_kernel *p;
+       volatile struct sn_flush_device_common *common;
+
        struct sn_flush_nasid_entry *flush_nasid_list;
 
        if (!sn_ioif_inited)
@@ -268,17 +270,17 @@ void sn_dma_flush(uint64_t addr)
        p = &flush_nasid_list->widget_p[wid_num][0];
 
        /* find a matching BAR */
-       for (i = 0; i < DEV_PER_WIDGET; i++) {
+       for (i = 0; i < DEV_PER_WIDGET; i++,p++) {
+               common = p->common;
                for (j = 0; j < PCI_ROM_RESOURCE; j++) {
-                       if (p->sfdl_bar_list[j].start == 0)
+                       if (common->sfdl_bar_list[j].start == 0)
                                break;
-                       if (addr >= p->sfdl_bar_list[j].start
-                           && addr <= p->sfdl_bar_list[j].end)
+                       if (addr >= common->sfdl_bar_list[j].start
+                           && addr <= common->sfdl_bar_list[j].end)
                                break;
                }
-               if (j < PCI_ROM_RESOURCE && p->sfdl_bar_list[j].start != 0)
+               if (j < PCI_ROM_RESOURCE && common->sfdl_bar_list[j].start != 0)
                        break;
-               p++;
        }
 
        /* if no matching BAR, return without doing anything. */
@@ -304,24 +306,24 @@ void sn_dma_flush(uint64_t addr)
                if ((1 << XWIDGET_PART_REV_NUM_REV(revnum)) & PV907516) {
                        return;
                } else {
-                       pcireg_wrb_flush_get(p->sfdl_pcibus_info,
-                                            (p->sfdl_slot - 1));
+                       pcireg_wrb_flush_get(common->sfdl_pcibus_info,
+                                            (common->sfdl_slot - 1));
                }
        } else {
-               spin_lock_irqsave(&((struct sn_flush_device_list *)p)->
-                                 sfdl_flush_lock, flags);
-
-               *p->sfdl_flush_addr = 0;
+               spin_lock_irqsave((spinlock_t *)&p->sfdl_flush_lock,
+                                 flags);
+               *common->sfdl_flush_addr = 0;
 
                /* force an interrupt. */
-               *(volatile uint32_t *)(p->sfdl_force_int_addr) = 1;
+               *(volatile uint32_t *)(common->sfdl_force_int_addr) = 1;
 
                /* wait for the interrupt to come back. */
-               while (*(p->sfdl_flush_addr) != 0x10f)
+               while (*(common->sfdl_flush_addr) != 0x10f)
                        cpu_relax();
 
                /* okay, everything is synched up. */
-               spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock, flags);
+               spin_unlock_irqrestore((spinlock_t *)&p->sfdl_flush_lock,
+                                      flags);
        }
        return;
 }
index 1f500c81002c88869a47935cad3237eaad10bca3..e328e948175d9e3f69795176318864b883b1fe32 100644 (file)
@@ -92,7 +92,8 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
        cnodeid_t near_cnode;
        struct hubdev_info *hubdev_info;
        struct pcibus_info *soft;
-       struct sn_flush_device_list *sn_flush_device_list;
+       struct sn_flush_device_kernel *sn_flush_device_kernel;
+       struct sn_flush_device_common *common;
 
        if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
                return NULL;
@@ -137,20 +138,19 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
        hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
 
        if (hubdev_info->hdi_flush_nasid_list.widget_p) {
-               sn_flush_device_list = hubdev_info->hdi_flush_nasid_list.
+               sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list.
                    widget_p[(int)soft->pbi_buscommon.bs_xid];
-               if (sn_flush_device_list) {
+               if (sn_flush_device_kernel) {
                        for (j = 0; j < DEV_PER_WIDGET;
-                            j++, sn_flush_device_list++) {
-                               if (sn_flush_device_list->sfdl_slot == -1)
+                            j++, sn_flush_device_kernel++) {
+                               common = sn_flush_device_kernel->common;
+                               if (common->sfdl_slot == -1)
                                        continue;
-                               if ((sn_flush_device_list->
-                                    sfdl_persistent_segment ==
+                               if ((common->sfdl_persistent_segment ==
                                     soft->pbi_buscommon.bs_persist_segment) &&
-                                    (sn_flush_device_list->
-                                    sfdl_persistent_busnum ==
+                                    (common->sfdl_persistent_busnum ==
                                     soft->pbi_buscommon.bs_persist_busnum))
-                                       sn_flush_device_list->sfdl_pcibus_info =
+                                       common->sfdl_pcibus_info =
                                            soft;
                        }
                }
index 2a1f250349b7027f057a2386c5ed555f0fafa171..5dfc7ea45cf7a9b19887f7de1049a7d337e5833e 100644 (file)
@@ -242,13 +242,10 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
 int copy_thread(int nr, unsigned long clone_flags, unsigned long spu,
        unsigned long unused, struct task_struct *tsk, struct pt_regs *regs)
 {
-       struct pt_regs *childregs;
-       unsigned long sp = (unsigned long)tsk->thread_info + THREAD_SIZE;
+       struct pt_regs *childregs = task_pt_regs(tsk);
        extern void ret_from_fork(void);
 
        /* Copy registers */
-       sp -= sizeof (struct pt_regs);
-       childregs = (struct pt_regs *)sp;
        *childregs = *regs;
 
        childregs->spu = spu;
index 9b75caaf5cec49ba98a0357312f4a4cdbb35042f..340a3bf59b88d6935fc004ea95669ee1f59cdc61 100644 (file)
 #include <asm/processor.h>
 #include <asm/mmu_context.h>
 
-/*
- * Get the address of the live pt_regs for the specified task.
- * These are saved onto the top kernel stack when the process
- * is not running.
- *
- * Note: if a user thread is execve'd from kernel space, the
- * kernel stack will not be empty on entry to the kernel, so
- * ptracing these tasks will fail.
- */
-static inline struct pt_regs *
-get_user_regs(struct task_struct *task)
-{
-        return (struct pt_regs *)
-                ((unsigned long)task->thread_info + THREAD_SIZE
-                 - sizeof(struct pt_regs));
-}
-
 /*
  * This routine will get a word off of the process kernel stack.
  */
@@ -59,7 +42,7 @@ get_stack_long(struct task_struct *task, int offset)
 {
        unsigned long *stack;
 
-       stack = (unsigned long *)get_user_regs(task);
+       stack = (unsigned long *)task_pt_regs(task);
 
        return stack[offset];
 }
@@ -72,7 +55,7 @@ put_stack_long(struct task_struct *task, int offset, unsigned long data)
 {
        unsigned long *stack;
 
-       stack = (unsigned long *)get_user_regs(task);
+       stack = (unsigned long *)task_pt_regs(task);
        stack[offset] = data;
 
        return 0;
@@ -208,7 +191,7 @@ static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
  */
 static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
 {
-       struct pt_regs *regs = get_user_regs(tsk);
+       struct pt_regs *regs = task_pt_regs(tsk);
 
        return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
 }
@@ -223,7 +206,7 @@ static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
 
        ret = -EFAULT;
        if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
-               struct pt_regs *regs = get_user_regs(tsk);
+               struct pt_regs *regs = task_pt_regs(tsk);
                *regs = newregs;
                ret = 0;
        }
index b90c54169fa5ae22bd82f68988d44c2a14705873..d7ec16e7fb259f50d6ee32ef99a332259d3891cc 100644 (file)
@@ -286,7 +286,7 @@ static void __init do_boot_cpu(int phys_id)
        /* So we see what's up   */
        printk("Booting processor %d/%d\n", phys_id, cpu_id);
        stack_start.spi = (void *)idle->thread.sp;
-       idle->thread_info->cpu = cpu_id;
+       task_thread_info(idle)->cpu = cpu_id;
 
        /*
         * Send Startup IPI
index d9edf2d1a4928d47cc5eaf01e580ebba7a9eb3bc..b0aa61bf8700df5d532e0d64d20ff37f02834f75 100644 (file)
@@ -126,9 +126,9 @@ void __init amiga_init_IRQ(void)
                gayle.inten = GAYLE_IRQ_IDE;
 
        /* turn off all interrupts and enable the master interrupt bit */
-       custom.intena = 0x7fff;
-       custom.intreq = 0x7fff;
-       custom.intena = IF_SETCLR | IF_INTEN;
+       amiga_custom.intena = 0x7fff;
+       amiga_custom.intreq = 0x7fff;
+       amiga_custom.intena = IF_SETCLR | IF_INTEN;
 
        cia_init_IRQ(&ciaa_base);
        cia_init_IRQ(&ciab_base);
@@ -245,7 +245,7 @@ int amiga_request_irq(unsigned int irq,
 
        /* enable the interrupt */
        if (irq < IRQ_AMIGA_PORTS && !ami_ablecount[irq])
-               custom.intena = IF_SETCLR | amiga_intena_vals[irq];
+               amiga_custom.intena = IF_SETCLR | amiga_intena_vals[irq];
 
        return error;
 }
@@ -274,7 +274,7 @@ void amiga_free_irq(unsigned int irq, void *dev_id)
                amiga_delete_irq(&ami_irq_list[irq], dev_id);
                /* if server list empty, disable the interrupt */
                if (!ami_irq_list[irq] && irq < IRQ_AMIGA_PORTS)
-                       custom.intena = amiga_intena_vals[irq];
+                       amiga_custom.intena = amiga_intena_vals[irq];
        } else {
                if (ami_irq_list[irq]->dev_id != dev_id)
                        printk("%s: removing probably wrong IRQ %d from %s\n",
@@ -283,7 +283,7 @@ void amiga_free_irq(unsigned int irq, void *dev_id)
                ami_irq_list[irq]->flags   = 0;
                ami_irq_list[irq]->dev_id  = NULL;
                ami_irq_list[irq]->devname = NULL;
-               custom.intena = amiga_intena_vals[irq];
+               amiga_custom.intena = amiga_intena_vals[irq];
        }
 }
 
@@ -327,7 +327,7 @@ void amiga_enable_irq(unsigned int irq)
        }
 
        /* enable the interrupt */
-       custom.intena = IF_SETCLR | amiga_intena_vals[irq];
+       amiga_custom.intena = IF_SETCLR | amiga_intena_vals[irq];
 }
 
 void amiga_disable_irq(unsigned int irq)
@@ -358,7 +358,7 @@ void amiga_disable_irq(unsigned int irq)
        }
 
        /* disable the interrupt */
-       custom.intena = amiga_intena_vals[irq];
+       amiga_custom.intena = amiga_intena_vals[irq];
 }
 
 inline void amiga_do_irq(int irq, struct pt_regs *fp)
@@ -373,7 +373,7 @@ void amiga_do_irq_list(int irq, struct pt_regs *fp)
 
        kstat_cpu(0).irqs[SYS_IRQS + irq]++;
 
-       custom.intreq = amiga_intena_vals[irq];
+       amiga_custom.intreq = amiga_intena_vals[irq];
 
        for (node = ami_irq_list[irq]; node; node = node->next)
                node->handler(irq, node->dev_id, fp);
@@ -385,23 +385,23 @@ void amiga_do_irq_list(int irq, struct pt_regs *fp)
 
 static irqreturn_t ami_int1(int irq, void *dev_id, struct pt_regs *fp)
 {
-       unsigned short ints = custom.intreqr & custom.intenar;
+       unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
 
        /* if serial transmit buffer empty, interrupt */
        if (ints & IF_TBE) {
-               custom.intreq = IF_TBE;
+               amiga_custom.intreq = IF_TBE;
                amiga_do_irq(IRQ_AMIGA_TBE, fp);
        }
 
        /* if floppy disk transfer complete, interrupt */
        if (ints & IF_DSKBLK) {
-               custom.intreq = IF_DSKBLK;
+               amiga_custom.intreq = IF_DSKBLK;
                amiga_do_irq(IRQ_AMIGA_DSKBLK, fp);
        }
 
        /* if software interrupt set, interrupt */
        if (ints & IF_SOFT) {
-               custom.intreq = IF_SOFT;
+               amiga_custom.intreq = IF_SOFT;
                amiga_do_irq(IRQ_AMIGA_SOFT, fp);
        }
        return IRQ_HANDLED;
@@ -409,17 +409,17 @@ static irqreturn_t ami_int1(int irq, void *dev_id, struct pt_regs *fp)
 
 static irqreturn_t ami_int3(int irq, void *dev_id, struct pt_regs *fp)
 {
-       unsigned short ints = custom.intreqr & custom.intenar;
+       unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
 
        /* if a blitter interrupt */
        if (ints & IF_BLIT) {
-               custom.intreq = IF_BLIT;
+               amiga_custom.intreq = IF_BLIT;
                amiga_do_irq(IRQ_AMIGA_BLIT, fp);
        }
 
        /* if a copper interrupt */
        if (ints & IF_COPER) {
-               custom.intreq = IF_COPER;
+               amiga_custom.intreq = IF_COPER;
                amiga_do_irq(IRQ_AMIGA_COPPER, fp);
        }
 
@@ -431,29 +431,29 @@ static irqreturn_t ami_int3(int irq, void *dev_id, struct pt_regs *fp)
 
 static irqreturn_t ami_int4(int irq, void *dev_id, struct pt_regs *fp)
 {
-       unsigned short ints = custom.intreqr & custom.intenar;
+       unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
 
        /* if audio 0 interrupt */
        if (ints & IF_AUD0) {
-               custom.intreq = IF_AUD0;
+               amiga_custom.intreq = IF_AUD0;
                amiga_do_irq(IRQ_AMIGA_AUD0, fp);
        }
 
        /* if audio 1 interrupt */
        if (ints & IF_AUD1) {
-               custom.intreq = IF_AUD1;
+               amiga_custom.intreq = IF_AUD1;
                amiga_do_irq(IRQ_AMIGA_AUD1, fp);
        }
 
        /* if audio 2 interrupt */
        if (ints & IF_AUD2) {
-               custom.intreq = IF_AUD2;
+               amiga_custom.intreq = IF_AUD2;
                amiga_do_irq(IRQ_AMIGA_AUD2, fp);
        }
 
        /* if audio 3 interrupt */
        if (ints & IF_AUD3) {
-               custom.intreq = IF_AUD3;
+               amiga_custom.intreq = IF_AUD3;
                amiga_do_irq(IRQ_AMIGA_AUD3, fp);
        }
        return IRQ_HANDLED;
@@ -461,7 +461,7 @@ static irqreturn_t ami_int4(int irq, void *dev_id, struct pt_regs *fp)
 
 static irqreturn_t ami_int5(int irq, void *dev_id, struct pt_regs *fp)
 {
-       unsigned short ints = custom.intreqr & custom.intenar;
+       unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
 
        /* if serial receive buffer full interrupt */
        if (ints & IF_RBF) {
@@ -471,7 +471,7 @@ static irqreturn_t ami_int5(int irq, void *dev_id, struct pt_regs *fp)
 
        /* if a disk sync interrupt */
        if (ints & IF_DSKSYN) {
-               custom.intreq = IF_DSKSYN;
+               amiga_custom.intreq = IF_DSKSYN;
                amiga_do_irq(IRQ_AMIGA_DSKSYN, fp);
        }
        return IRQ_HANDLED;
index bd5d134e9f123856fc052ad5139a7a137b848d6a..ae94db5d93b2208942218a89dba61d354fb7e2ae 100644 (file)
@@ -24,6 +24,8 @@ static const signed char sine_data[] = {
 };
 #define DATA_SIZE      (sizeof(sine_data)/sizeof(sine_data[0]))
 
+#define custom amiga_custom
+
     /*
      * The minimum period for audio may be modified by the frame buffer
      * device since it depends on htotal (for OCS/ECS/AGA)
index 7d55682615e3604ffeef19fab1708d4a9bf57ef0..9476eb9440f57436365b8de4f6d70e02bb9500c0 100644 (file)
@@ -60,7 +60,7 @@ unsigned char cia_set_irq(struct ciabase *base, unsigned char mask)
        else
                base->icr_data &= ~mask;
        if (base->icr_data & base->icr_mask)
-               custom.intreq = IF_SETCLR | base->int_mask;
+               amiga_custom.intreq = IF_SETCLR | base->int_mask;
        return old & base->icr_mask;
 }
 
@@ -89,7 +89,7 @@ unsigned char cia_able_irq(struct ciabase *base, unsigned char mask)
                }
        }
        if (base->icr_data & base->icr_mask)
-               custom.intreq = IF_SETCLR | base->int_mask;
+               amiga_custom.intreq = IF_SETCLR | base->int_mask;
        return old;
 }
 
@@ -133,7 +133,7 @@ static irqreturn_t cia_handler(int irq, void *dev_id, struct pt_regs *fp)
        mach_irq = base->cia_irq;
        irq = SYS_IRQS + mach_irq;
        ints = cia_set_irq(base, CIA_ICR_ALL);
-       custom.intreq = base->int_mask;
+       amiga_custom.intreq = base->int_mask;
        for (i = 0; i < CIA_IRQS; i++, irq++, mach_irq++) {
                if (ints & 1) {
                        kstat_cpu(0).irqs[irq]++;
@@ -162,7 +162,7 @@ void __init cia_init_IRQ(struct ciabase *base)
        /* install CIA handler */
        request_irq(base->handler_irq, cia_handler, 0, base->name, base);
 
-       custom.intena = IF_SETCLR | base->int_mask;
+       amiga_custom.intena = IF_SETCLR | base->int_mask;
 }
 
 int cia_get_irq_list(struct ciabase *base, struct seq_file *p)
index 4775e18a78f08a7f262730de6adc05969bb184b8..12e3706fe02cf17839d0910dc9d31f5b960b4c1a 100644 (file)
@@ -105,9 +105,6 @@ static int a2000_hwclk (int, struct rtc_time *);
 static int amiga_set_clock_mmss (unsigned long);
 static unsigned int amiga_get_ss (void);
 extern void amiga_mksound( unsigned int count, unsigned int ticks );
-#ifdef CONFIG_AMIGA_FLOPPY
-extern void amiga_floppy_setup(char *, int *);
-#endif
 static void amiga_reset (void);
 extern void amiga_init_sound(void);
 static void amiga_savekmsg_init(void);
@@ -290,7 +287,7 @@ static void __init amiga_identify(void)
     case CS_OCS:
     case CS_ECS:
     case CS_AGA:
-      switch (custom.deniseid & 0xf) {
+      switch (amiga_custom.deniseid & 0xf) {
       case 0x0c:
        AMIGAHW_SET(DENISE_HR);
        break;
@@ -303,7 +300,7 @@ static void __init amiga_identify(void)
       AMIGAHW_SET(DENISE);
       break;
     }
-    switch ((custom.vposr>>8) & 0x7f) {
+    switch ((amiga_custom.vposr>>8) & 0x7f) {
     case 0x00:
       AMIGAHW_SET(AGNUS_PAL);
       break;
@@ -427,13 +424,7 @@ void __init config_amiga(void)
 
   mach_set_clock_mmss  = amiga_set_clock_mmss;
   mach_get_ss          = amiga_get_ss;
-#ifdef CONFIG_AMIGA_FLOPPY
-  mach_floppy_setup    = amiga_floppy_setup;
-#endif
   mach_reset           = amiga_reset;
-#ifdef CONFIG_DUMMY_CONSOLE
-  conswitchp           = &dummy_con;
-#endif
 #if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
   mach_beep            = amiga_mksound;
 #endif
@@ -447,9 +438,9 @@ void __init config_amiga(void)
   amiga_colorclock = 5*amiga_eclock;   /* 3.5 MHz */
 
   /* clear all DMA bits */
-  custom.dmacon = DMAF_ALL;
+  amiga_custom.dmacon = DMAF_ALL;
   /* ensure that the DMA master bit is set */
-  custom.dmacon = DMAF_SETCLR | DMAF_MASTER;
+  amiga_custom.dmacon = DMAF_SETCLR | DMAF_MASTER;
 
   /* don't use Z2 RAM as system memory on Z3 capable machines */
   if (AMIGAHW_PRESENT(ZORRO3)) {
@@ -830,8 +821,8 @@ static void amiga_savekmsg_init(void)
 
 static void amiga_serial_putc(char c)
 {
-    custom.serdat = (unsigned char)c | 0x100;
-    while (!(custom.serdatr & 0x2000))
+    amiga_custom.serdat = (unsigned char)c | 0x100;
+    while (!(amiga_custom.serdatr & 0x2000))
        ;
 }
 
@@ -855,11 +846,11 @@ int amiga_serial_console_wait_key(struct console *co)
 {
     int ch;
 
-    while (!(custom.intreqr & IF_RBF))
+    while (!(amiga_custom.intreqr & IF_RBF))
        barrier();
-    ch = custom.serdatr & 0xff;
+    ch = amiga_custom.serdatr & 0xff;
     /* clear the interrupt, so that another character can be read */
-    custom.intreq = IF_RBF;
+    amiga_custom.intreq = IF_RBF;
     return ch;
 }
 
index 264929471253b371a86aa5eb36877b64159cb55c..d401962d9b251e53dcef279d5045473056d6f995 100644 (file)
@@ -176,9 +176,6 @@ void config_apollo(void) {
        mach_set_clock_mmss  = dn_dummy_set_clock_mmss; /* */
        mach_process_int     = dn_process_int;
        mach_reset           = dn_dummy_reset;  /* */
-#ifdef CONFIG_DUMMY_CONSOLE
-        conswitchp           = &dummy_con;
-#endif
 #ifdef CONFIG_HEARTBEAT
        mach_heartbeat = dn_heartbeat;
 #endif
index 9261d2deeaf5b7629f83f63a857f66fb55fe6d53..1012b08e552200a75dc17a39f64d201f686399fd 100644 (file)
@@ -52,9 +52,6 @@ int atari_rtc_year_offset;
 
 /* local function prototypes */
 static void atari_reset( void );
-#ifdef CONFIG_ATARI_FLOPPY
-extern void atari_floppy_setup(char *, int *);
-#endif
 static void atari_get_model(char *model);
 static int atari_get_hardware_list(char *buffer);
 
@@ -244,12 +241,6 @@ void __init config_atari(void)
     mach_get_irq_list   = show_atari_interrupts;
     mach_gettimeoffset   = atari_gettimeoffset;
     mach_reset           = atari_reset;
-#ifdef CONFIG_ATARI_FLOPPY
-    mach_floppy_setup   = atari_floppy_setup;
-#endif
-#ifdef CONFIG_DUMMY_CONSOLE
-    conswitchp          = &dummy_con;
-#endif
     mach_max_dma_address = 0xffffff;
 #if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
     mach_beep          = atari_mksound;
index f7573f2bcb9c5b356a51aa25a7fc028be1bc8462..703cbc6dc9cc3c95a547c0ea11f2e0aa4370df99 100644 (file)
@@ -47,6 +47,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
        unsigned char msr;
        unsigned long flags;
        struct rtc_time wtime;
+       void __user *argp = (void __user *)arg;
 
        switch (cmd) {
        case RTC_RD_TIME:       /* Read the time/date from RTC  */
@@ -69,7 +70,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
                } while (wtime.tm_sec != BCD2BIN(rtc->bcd_sec));
                rtc->msr = msr;
                local_irq_restore(flags);
-               return copy_to_user((void *)arg, &wtime, sizeof wtime) ?
+               return copy_to_user(argp, &wtime, sizeof wtime) ?
                                                                -EFAULT : 0;
        }
        case RTC_SET_TIME:      /* Set the RTC */
@@ -81,8 +82,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
                if (!capable(CAP_SYS_ADMIN))
                        return -EACCES;
 
-               if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
-                                  sizeof(struct rtc_time)))
+               if (copy_from_user(&rtc_tm, argp, sizeof(struct rtc_time)))
                        return -EFAULT;
 
                yrs = rtc_tm.tm_year;
index a0b854f3f94add74e7c73d5dd569d4b0c9c8bb77..6d129eef370f1df8d5dab71c8127384c93ad3b16 100644 (file)
@@ -260,9 +260,6 @@ void __init config_hp300(void)
        mach_reset           = hp300_reset;
 #ifdef CONFIG_HEARTBEAT
        mach_heartbeat       = hp300_pulse;
-#endif
-#ifdef CONFIG_DUMMY_CONSOLE
-       conswitchp           = &dummy_con;
 #endif
        mach_max_dma_address = 0xffffffff;
 
index c787c5ba951310fe13f53e2e99d949c79f868e37..246a8820c2236b814e636985078b93cf78dd6c3b 100644 (file)
@@ -92,7 +92,7 @@ int main(void)
        DEFINE(TRAP_TRACE, TRAP_TRACE);
 
        /* offsets into the custom struct */
-       DEFINE(CUSTOMBASE, &custom);
+       DEFINE(CUSTOMBASE, &amiga_custom);
        DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
        DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
        DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
index d4336d846df146c8e3f6f771481c1e19c360a144..70002c146eed19c0c5dd76c73092d6a56f11df25 100644 (file)
  * Macintosh console support
  */
 
+#ifdef CONFIG_FRAMEBUFFER_CONSOLE
 #define CONSOLE
 #define CONSOLE_PENGUIN
+#endif
 
 /*
  * Macintosh serial debug support; outputs boot info to the printer
index 13d109328a428166d4a11ba456595ff2d0170a91..3f9cb55d0356bbab9b4cf4301152f0e40550fc17 100644 (file)
@@ -238,10 +238,9 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
 {
        struct pt_regs * childregs;
        struct switch_stack * childstack, *stack;
-       unsigned long stack_offset, *retp;
+       unsigned long *retp;
 
-       stack_offset = THREAD_SIZE - sizeof(struct pt_regs);
-       childregs = (struct pt_regs *) ((unsigned long) (p->thread_info) + stack_offset);
+       childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
 
        *childregs = *regs;
        childregs->d0 = 0;
@@ -386,7 +385,7 @@ unsigned long get_wchan(struct task_struct *p)
        if (!p || p == current || p->state == TASK_RUNNING)
                return 0;
 
-       stack_page = (unsigned long)(p->thread_info);
+       stack_page = (unsigned long)task_stack_page(p);
        fp = ((struct switch_stack *)p->thread.ksp)->a6;
        do {
                if (fp < stack_page+sizeof(struct thread_info) ||
index d6ca99242e5aa14a885182add91366b6d0e1448d..750d5b3c971fe001a2d86808a30b9cd237b0eebe 100644 (file)
@@ -84,9 +84,6 @@ void (*mach_reset)( void );
 void (*mach_halt)( void );
 void (*mach_power_off)( void );
 long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
-#if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY)
-void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
-#endif
 #ifdef CONFIG_HEARTBEAT
 void (*mach_heartbeat) (int);
 EXPORT_SYMBOL(mach_heartbeat);
@@ -100,6 +97,8 @@ void (*mach_beep)(unsigned int, unsigned int);
 #if defined(CONFIG_ISA) && defined(MULTI_ISA)
 int isa_type;
 int isa_sex;
+EXPORT_SYMBOL(isa_type);
+EXPORT_SYMBOL(isa_sex);
 #endif
 
 extern int amiga_parse_bootinfo(const struct bi_record *);
@@ -280,6 +279,10 @@ void __init setup_arch(char **cmdline_p)
            }
        }
 
+#ifdef CONFIG_DUMMY_CONSOLE
+       conswitchp = &dummy_con;
+#endif
+
        switch (m68k_machtype) {
 #ifdef CONFIG_AMIGA
            case MACH_AMIGA:
@@ -521,16 +524,6 @@ int get_hardware_list(char *buffer)
     return(len);
 }
 
-
-#if defined(CONFIG_AMIGA_FLOPPY) || defined(CONFIG_ATARI_FLOPPY)
-void __init floppy_setup(char *str, int *ints)
-{
-       if (mach_floppy_setup)
-               mach_floppy_setup (str, ints);
-}
-
-#endif
-
 void check_bugs(void)
 {
 #ifndef CONFIG_M68KFPU_EMU
index 9c636a4c238d5f0c3f01d46b6938d082517dafe1..866917bfa0280f16898df65c5d7cc632cc63b957 100644 (file)
@@ -96,7 +96,7 @@ asmlinkage int do_sigsuspend(struct pt_regs *regs)
 asmlinkage int
 do_rt_sigsuspend(struct pt_regs *regs)
 {
-       sigset_t *unewset = (sigset_t *)regs->d1;
+       sigset_t __user *unewset = (sigset_t __user *)regs->d1;
        size_t sigsetsize = (size_t)regs->d2;
        sigset_t saveset, newset;
 
@@ -122,8 +122,8 @@ do_rt_sigsuspend(struct pt_regs *regs)
 }
 
 asmlinkage int
-sys_sigaction(int sig, const struct old_sigaction *act,
-             struct old_sigaction *oact)
+sys_sigaction(int sig, const struct old_sigaction __user *act,
+             struct old_sigaction __user *oact)
 {
        struct k_sigaction new_ka, old_ka;
        int ret;
@@ -154,7 +154,7 @@ sys_sigaction(int sig, const struct old_sigaction *act,
 }
 
 asmlinkage int
-sys_sigaltstack(const stack_t *uss, stack_t *uoss)
+sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
 {
        return do_sigaltstack(uss, uoss, rdusp());
 }
@@ -169,10 +169,10 @@ sys_sigaltstack(const stack_t *uss, stack_t *uoss)
 
 struct sigframe
 {
-       char *pretcode;
+       char __user *pretcode;
        int sig;
        int code;
-       struct sigcontext *psc;
+       struct sigcontext __user *psc;
        char retcode[8];
        unsigned long extramask[_NSIG_WORDS-1];
        struct sigcontext sc;
@@ -180,10 +180,10 @@ struct sigframe
 
 struct rt_sigframe
 {
-       char *pretcode;
+       char __user *pretcode;
        int sig;
-       struct siginfo *pinfo;
-       void *puc;
+       struct siginfo __user *pinfo;
+       void __user *puc;
        char retcode[8];
        struct siginfo info;
        struct ucontext uc;
@@ -248,7 +248,7 @@ out:
 #define uc_formatvec   uc_filler[FPCONTEXT_SIZE/4]
 #define uc_extra       uc_filler[FPCONTEXT_SIZE/4+1]
 
-static inline int rt_restore_fpu_state(struct ucontext *uc)
+static inline int rt_restore_fpu_state(struct ucontext __user *uc)
 {
        unsigned char fpstate[FPCONTEXT_SIZE];
        int context_size = CPU_IS_060 ? 8 : 0;
@@ -267,7 +267,7 @@ static inline int rt_restore_fpu_state(struct ucontext *uc)
                return 0;
        }
 
-       if (__get_user(*(long *)fpstate, (long *)&uc->uc_fpstate))
+       if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
                goto out;
        if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
                if (!CPU_IS_060)
@@ -306,7 +306,7 @@ static inline int rt_restore_fpu_state(struct ucontext *uc)
                                    "m" (*fpregs.f_fpcntl));
        }
        if (context_size &&
-           __copy_from_user(fpstate + 4, (long *)&uc->uc_fpstate + 1,
+           __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
                             context_size))
                goto out;
        __asm__ volatile (".chip 68k/68881\n\t"
@@ -319,7 +319,7 @@ out:
 }
 
 static inline int
-restore_sigcontext(struct pt_regs *regs, struct sigcontext *usc, void *fp,
+restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp,
                   int *pd0)
 {
        int fsize, formatvec;
@@ -404,10 +404,10 @@ badframe:
 
 static inline int
 rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
-                   struct ucontext *uc, int *pd0)
+                   struct ucontext __user *uc, int *pd0)
 {
        int fsize, temp;
-       greg_t *gregs = uc->uc_mcontext.gregs;
+       greg_t __user *gregs = uc->uc_mcontext.gregs;
        unsigned long usp;
        int err;
 
@@ -506,7 +506,7 @@ asmlinkage int do_sigreturn(unsigned long __unused)
        struct switch_stack *sw = (struct switch_stack *) &__unused;
        struct pt_regs *regs = (struct pt_regs *) (sw + 1);
        unsigned long usp = rdusp();
-       struct sigframe *frame = (struct sigframe *)(usp - 4);
+       struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
        sigset_t set;
        int d0;
 
@@ -536,7 +536,7 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused)
        struct switch_stack *sw = (struct switch_stack *) &__unused;
        struct pt_regs *regs = (struct pt_regs *) (sw + 1);
        unsigned long usp = rdusp();
-       struct rt_sigframe *frame = (struct rt_sigframe *)(usp - 4);
+       struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
        sigset_t set;
        int d0;
 
@@ -596,7 +596,7 @@ static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
        }
 }
 
-static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
+static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
 {
        unsigned char fpstate[FPCONTEXT_SIZE];
        int context_size = CPU_IS_060 ? 8 : 0;
@@ -617,7 +617,7 @@ static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
                          ".chip 68k"
                          : : "m" (*fpstate) : "memory");
 
-       err |= __put_user(*(long *)fpstate, (long *)&uc->uc_fpstate);
+       err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
        if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
                fpregset_t fpregs;
                if (!CPU_IS_060)
@@ -642,7 +642,7 @@ static inline int rt_save_fpu_state(struct ucontext *uc, struct pt_regs *regs)
                                    sizeof(fpregs));
        }
        if (context_size)
-               err |= copy_to_user((long *)&uc->uc_fpstate + 1, fpstate + 4,
+               err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
                                    context_size);
        return err;
 }
@@ -662,10 +662,10 @@ static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
        save_fpu_state(sc, regs);
 }
 
-static inline int rt_setup_ucontext(struct ucontext *uc, struct pt_regs *regs)
+static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
 {
        struct switch_stack *sw = (struct switch_stack *)regs - 1;
-       greg_t *gregs = uc->uc_mcontext.gregs;
+       greg_t __user *gregs = uc->uc_mcontext.gregs;
        int err = 0;
 
        err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
@@ -753,7 +753,7 @@ static inline void push_cache (unsigned long vaddr)
        }
 }
 
-static inline void *
+static inline void __user *
 get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
 {
        unsigned long usp;
@@ -766,13 +766,13 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
                if (!on_sig_stack(usp))
                        usp = current->sas_ss_sp + current->sas_ss_size;
        }
-       return (void *)((usp - frame_size) & -8UL);
+       return (void __user *)((usp - frame_size) & -8UL);
 }
 
 static void setup_frame (int sig, struct k_sigaction *ka,
                         sigset_t *set, struct pt_regs *regs)
 {
-       struct sigframe *frame;
+       struct sigframe __user *frame;
        int fsize = frame_extra_sizes[regs->format];
        struct sigcontext context;
        int err = 0;
@@ -813,7 +813,7 @@ static void setup_frame (int sig, struct k_sigaction *ka,
        err |= __put_user(frame->retcode, &frame->pretcode);
        /* moveq #,d0; trap #0 */
        err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
-                         (long *)(frame->retcode));
+                         (long __user *)(frame->retcode));
 
        if (err)
                goto give_sigsegv;
@@ -849,7 +849,7 @@ give_sigsegv:
 static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
                            sigset_t *set, struct pt_regs *regs)
 {
-       struct rt_sigframe *frame;
+       struct rt_sigframe __user *frame;
        int fsize = frame_extra_sizes[regs->format];
        int err = 0;
 
@@ -880,8 +880,8 @@ static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
 
        /* Create the ucontext.  */
        err |= __put_user(0, &frame->uc.uc_flags);
-       err |= __put_user(0, &frame->uc.uc_link);
-       err |= __put_user((void *)current->sas_ss_sp,
+       err |= __put_user(NULL, &frame->uc.uc_link);
+       err |= __put_user((void __user *)current->sas_ss_sp,
                          &frame->uc.uc_stack.ss_sp);
        err |= __put_user(sas_ss_flags(rdusp()),
                          &frame->uc.uc_stack.ss_flags);
@@ -893,8 +893,8 @@ static void setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
        err |= __put_user(frame->retcode, &frame->pretcode);
        /* moveq #,d0; notb d0; trap #0 */
        err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
-                         (long *)(frame->retcode + 0));
-       err |= __put_user(0x4e40, (short *)(frame->retcode + 4));
+                         (long __user *)(frame->retcode + 0));
+       err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
 
        if (err)
                goto give_sigsegv;
index 640895b2c51aa2b55deef87256f2bb659f103dd9..143c552d38f36a6ca799bee01e8a2e418934cdb8 100644 (file)
@@ -32,7 +32,7 @@
  * sys_pipe() is the normal C calling standard for creating
  * a pipe. It's not the way unix traditionally does this, though.
  */
-asmlinkage int sys_pipe(unsigned long * fildes)
+asmlinkage int sys_pipe(unsigned long __user * fildes)
 {
        int fd[2];
        int error;
@@ -94,7 +94,7 @@ struct mmap_arg_struct {
        unsigned long offset;
 };
 
-asmlinkage int old_mmap(struct mmap_arg_struct *arg)
+asmlinkage int old_mmap(struct mmap_arg_struct __user *arg)
 {
        struct mmap_arg_struct a;
        int error = -EFAULT;
@@ -160,11 +160,11 @@ out:
 
 struct sel_arg_struct {
        unsigned long n;
-       fd_set *inp, *outp, *exp;
-       struct timeval *tvp;
+       fd_set __user *inp, *outp, *exp;
+       struct timeval __user *tvp;
 };
 
-asmlinkage int old_select(struct sel_arg_struct *arg)
+asmlinkage int old_select(struct sel_arg_struct __user *arg)
 {
        struct sel_arg_struct a;
 
@@ -180,7 +180,7 @@ asmlinkage int old_select(struct sel_arg_struct *arg)
  * This is really horribly ugly.
  */
 asmlinkage int sys_ipc (uint call, int first, int second,
-                       int third, void *ptr, long fifth)
+                       int third, void __user *ptr, long fifth)
 {
        int version, ret;
 
@@ -190,14 +190,14 @@ asmlinkage int sys_ipc (uint call, int first, int second,
        if (call <= SEMCTL)
                switch (call) {
                case SEMOP:
-                       return sys_semop (first, (struct sembuf *)ptr, second);
+                       return sys_semop (first, ptr, second);
                case SEMGET:
                        return sys_semget (first, second, third);
                case SEMCTL: {
                        union semun fourth;
                        if (!ptr)
                                return -EINVAL;
-                       if (get_user(fourth.__pad, (void **) ptr))
+                       if (get_user(fourth.__pad, (void __user *__user *) ptr))
                                return -EFAULT;
                        return sys_semctl (first, second, third, fourth);
                        }
@@ -207,31 +207,26 @@ asmlinkage int sys_ipc (uint call, int first, int second,
        if (call <= MSGCTL)
                switch (call) {
                case MSGSND:
-                       return sys_msgsnd (first, (struct msgbuf *) ptr,
-                                         second, third);
+                       return sys_msgsnd (first, ptr, second, third);
                case MSGRCV:
                        switch (version) {
                        case 0: {
                                struct ipc_kludge tmp;
                                if (!ptr)
                                        return -EINVAL;
-                               if (copy_from_user (&tmp,
-                                                   (struct ipc_kludge *)ptr,
-                                                   sizeof (tmp)))
+                               if (copy_from_user (&tmp, ptr, sizeof (tmp)))
                                        return -EFAULT;
                                return sys_msgrcv (first, tmp.msgp, second,
                                                   tmp.msgtyp, third);
                                }
                        default:
-                               return sys_msgrcv (first,
-                                                  (struct msgbuf *) ptr,
+                               return sys_msgrcv (first, ptr,
                                                   second, fifth, third);
                        }
                case MSGGET:
                        return sys_msgget ((key_t) first, second);
                case MSGCTL:
-                       return sys_msgctl (first, second,
-                                          (struct msqid_ds *) ptr);
+                       return sys_msgctl (first, second, ptr);
                default:
                        return -ENOSYS;
                }
@@ -241,20 +236,18 @@ asmlinkage int sys_ipc (uint call, int first, int second,
                        switch (version) {
                        default: {
                                ulong raddr;
-                               ret = do_shmat (first, (char *) ptr,
-                                                second, &raddr);
+                               ret = do_shmat (first, ptr, second, &raddr);
                                if (ret)
                                        return ret;
-                               return put_user (raddr, (ulong *) third);
+                               return put_user (raddr, (ulong __user *) third);
                        }
                        }
                case SHMDT:
-                       return sys_shmdt ((char *)ptr);
+                       return sys_shmdt (ptr);
                case SHMGET:
                        return sys_shmget (first, second, third);
                case SHMCTL:
-                       return sys_shmctl (first, second,
-                                          (struct shmid_ds *) ptr);
+                       return sys_shmctl (first, second, ptr);
                default:
                        return -ENOSYS;
                }
index deb36e8b04a269e3a933c98ca09644359f673f31..cdf58fbb3e730f3d2613aaab606e3639cd8e51ad 100644 (file)
@@ -169,25 +169,25 @@ void __init trap_init (void)
 
        if (CPU_IS_060 && !FPU_IS_EMU) {
                /* set up IFPSP entry points */
-               asmlinkage void snan_vec(void) asm ("_060_fpsp_snan");
-               asmlinkage void operr_vec(void) asm ("_060_fpsp_operr");
-               asmlinkage void ovfl_vec(void) asm ("_060_fpsp_ovfl");
-               asmlinkage void unfl_vec(void) asm ("_060_fpsp_unfl");
-               asmlinkage void dz_vec(void) asm ("_060_fpsp_dz");
-               asmlinkage void inex_vec(void) asm ("_060_fpsp_inex");
-               asmlinkage void fline_vec(void) asm ("_060_fpsp_fline");
-               asmlinkage void unsupp_vec(void) asm ("_060_fpsp_unsupp");
-               asmlinkage void effadd_vec(void) asm ("_060_fpsp_effadd");
-
-               vectors[VEC_FPNAN] = snan_vec;
-               vectors[VEC_FPOE] = operr_vec;
-               vectors[VEC_FPOVER] = ovfl_vec;
-               vectors[VEC_FPUNDER] = unfl_vec;
-               vectors[VEC_FPDIVZ] = dz_vec;
-               vectors[VEC_FPIR] = inex_vec;
-               vectors[VEC_LINE11] = fline_vec;
-               vectors[VEC_FPUNSUP] = unsupp_vec;
-               vectors[VEC_UNIMPEA] = effadd_vec;
+               asmlinkage void snan_vec6(void) asm ("_060_fpsp_snan");
+               asmlinkage void operr_vec6(void) asm ("_060_fpsp_operr");
+               asmlinkage void ovfl_vec6(void) asm ("_060_fpsp_ovfl");
+               asmlinkage void unfl_vec6(void) asm ("_060_fpsp_unfl");
+               asmlinkage void dz_vec6(void) asm ("_060_fpsp_dz");
+               asmlinkage void inex_vec6(void) asm ("_060_fpsp_inex");
+               asmlinkage void fline_vec6(void) asm ("_060_fpsp_fline");
+               asmlinkage void unsupp_vec6(void) asm ("_060_fpsp_unsupp");
+               asmlinkage void effadd_vec6(void) asm ("_060_fpsp_effadd");
+
+               vectors[VEC_FPNAN] = snan_vec6;
+               vectors[VEC_FPOE] = operr_vec6;
+               vectors[VEC_FPOVER] = ovfl_vec6;
+               vectors[VEC_FPUNDER] = unfl_vec6;
+               vectors[VEC_FPDIVZ] = dz_vec6;
+               vectors[VEC_FPIR] = inex_vec6;
+               vectors[VEC_LINE11] = fline_vec6;
+               vectors[VEC_FPUNSUP] = unsupp_vec6;
+               vectors[VEC_UNIMPEA] = effadd_vec6;
        }
 
         /* if running on an amiga, make the NMI interrupt do nothing */
index e58654f3f8dd5c5e8444572a564c3eded86dd0c0..69d1d3d30c788cf03e373705597f49ea83ca07e2 100644 (file)
@@ -13,6 +13,7 @@ SECTIONS
   .text : {
        *(.text)
        SCHED_TEXT
+       LOCK_TEXT
        *(.fixup)
        *(.gnu.warning)
        } :text = 0x4e75
index cc37e8d3c1e2ad30a11989e220612d7a34bd37eb..65cc39c24185569f3141d3b9ea96a8ed5b66b21f 100644 (file)
@@ -14,6 +14,7 @@ SECTIONS
        *(.head)
        *(.text)
        SCHED_TEXT
+       LOCK_TEXT
        *(.fixup)
        *(.gnu.warning)
        } :text = 0x4e75
@@ -66,7 +67,7 @@ __init_begin = .;
        __initramfs_end = .;
        . = ALIGN(8192);
        __init_end = .;
-       .init.task : { *(init_task) }
+       .data.init.task : { *(.data.init_task) }
 
 
   .bss : { *(.bss) }           /* BSS */
index 4a5c5445c610fe42d92963ffb1cb7e2c85892b75..cb13c6e3ccaeb6449dd2d99898f9e1ee53eb50d5 100644 (file)
@@ -134,7 +134,7 @@ EXPORT_SYMBOL(csum_partial);
  */
 
 unsigned int
-csum_partial_copy_from_user(const unsigned char *src, unsigned char *dst,
+csum_partial_copy_from_user(const unsigned char __user *src, unsigned char *dst,
                            int len, int sum, int *csum_err)
 {
        /*
index cd19cbb213e811d33f3ee3981f59295ed35b4f4a..14f8d3f4e195f2424fbf18530d29070a10fd534f 100644 (file)
@@ -212,9 +212,6 @@ void __init config_mac(void)
        mach_reset           = mac_reset;
        mach_halt            = mac_poweroff;
        mach_power_off       = mac_poweroff;
-#ifdef CONFIG_DUMMY_CONSOLE
-       conswitchp               = &dummy_con;
-#endif
        mach_max_dma_address = 0xffffffff;
 #if 0
        mach_debug_init  = mac_debug_init;
index d889ba80ccdcdb33232833cba15b11202755f589..9179a37984074593dd01ddb1583814b69dcd628c 100644 (file)
@@ -293,8 +293,8 @@ void __init iop_init(void)
        }
 
        for (i = 0 ; i < NUM_IOP_CHAN ; i++) {
-               iop_send_queue[IOP_NUM_SCC][i] = 0;
-               iop_send_queue[IOP_NUM_ISM][i] = 0;
+               iop_send_queue[IOP_NUM_SCC][i] = NULL;
+               iop_send_queue[IOP_NUM_ISM][i] = NULL;
                iop_listeners[IOP_NUM_SCC][i].devname = NULL;
                iop_listeners[IOP_NUM_SCC][i].handler = NULL;
                iop_listeners[IOP_NUM_ISM][i].devname = NULL;
index 5b80d7cd954a824d573a1ea311ef94b9cde42097..bbb0c3b95e9c141614b0a0d3989e952620b8787d 100644 (file)
 extern struct mac_booter_data mac_bi_data;
 static void (*rom_reset)(void);
 
-#ifdef CONFIG_ADB
-/*
- * Return the current time as the number of seconds since January 1, 1904.
- */
-
-static long adb_read_time(void)
+#ifdef CONFIG_ADB_CUDA
+static long cuda_read_time(void)
 {
-       volatile struct adb_request req;
+       struct adb_request req;
        long time;
 
-       adb_request((struct adb_request *) &req, NULL,
-                       ADBREQ_RAW|ADBREQ_SYNC,
-                       2, CUDA_PACKET, CUDA_GET_TIME);
+       if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
+               return 0;
+       while (!req.complete)
+               cuda_poll();
 
        time = (req.reply[3] << 24) | (req.reply[4] << 16)
                | (req.reply[5] << 8) | req.reply[6];
        return time - RTC_OFFSET;
 }
 
-/*
- * Set the current system time
- */
+static void cuda_write_time(long data)
+{
+       struct adb_request req;
+       data += RTC_OFFSET;
+       if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
+                       (data >> 24) & 0xFF, (data >> 16) & 0xFF,
+                       (data >> 8) & 0xFF, data & 0xFF) < 0)
+               return;
+       while (!req.complete)
+               cuda_poll();
+}
 
-static void adb_write_time(long data)
+static __u8 cuda_read_pram(int offset)
 {
-       volatile struct adb_request req;
+       struct adb_request req;
+       if (cuda_request(&req, NULL, 4, CUDA_PACKET, CUDA_GET_PRAM,
+                       (offset >> 8) & 0xFF, offset & 0xFF) < 0)
+               return 0;
+       while (!req.complete)
+               cuda_poll();
+       return req.reply[3];
+}
 
-       data += RTC_OFFSET;
+static void cuda_write_pram(int offset, __u8 data)
+{
+       struct adb_request req;
+       if (cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_SET_PRAM,
+                       (offset >> 8) & 0xFF, offset & 0xFF, data) < 0)
+               return;
+       while (!req.complete)
+               cuda_poll();
+}
+#else
+#define cuda_read_time() 0
+#define cuda_write_time(n)
+#define cuda_read_pram NULL
+#define cuda_write_pram NULL
+#endif
+
+#ifdef CONFIG_ADB_PMU68K
+static long pmu_read_time(void)
+{
+       struct adb_request req;
+       long time;
+
+       if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
+               return 0;
+       while (!req.complete)
+               pmu_poll();
 
-       adb_request((struct adb_request *) &req, NULL,
-                       ADBREQ_RAW|ADBREQ_SYNC,
-                       6, CUDA_PACKET, CUDA_SET_TIME,
+       time = (req.reply[0] << 24) | (req.reply[1] << 16)
+               | (req.reply[2] << 8) | req.reply[3];
+       return time - RTC_OFFSET;
+}
+
+static void pmu_write_time(long data)
+{
+       struct adb_request req;
+       data += RTC_OFFSET;
+       if (pmu_request(&req, NULL, 5, PMU_SET_RTC,
                        (data >> 24) & 0xFF, (data >> 16) & 0xFF,
-                       (data >> 8) & 0xFF, data & 0xFF);
+                       (data >> 8) & 0xFF, data & 0xFF) < 0)
+               return;
+       while (!req.complete)
+               pmu_poll();
 }
 
-/*
- * Get a byte from the NVRAM
- */
+static __u8 pmu_read_pram(int offset)
+{
+       struct adb_request req;
+       if (pmu_request(&req, NULL, 3, PMU_READ_NVRAM,
+                       (offset >> 8) & 0xFF, offset & 0xFF) < 0)
+               return 0;
+       while (!req.complete)
+               pmu_poll();
+       return req.reply[3];
+}
 
-static __u8 adb_read_pram(int offset)
+static void pmu_write_pram(int offset, __u8 data)
 {
-       volatile struct adb_request req;
+       struct adb_request req;
+       if (pmu_request(&req, NULL, 4, PMU_WRITE_NVRAM,
+                       (offset >> 8) & 0xFF, offset & 0xFF, data) < 0)
+               return;
+       while (!req.complete)
+               pmu_poll();
+}
+#else
+#define pmu_read_time() 0
+#define pmu_write_time(n)
+#define pmu_read_pram NULL
+#define pmu_write_pram NULL
+#endif
 
-       adb_request((struct adb_request *) &req, NULL,
-                       ADBREQ_RAW|ADBREQ_SYNC,
-                       4, CUDA_PACKET, CUDA_GET_PRAM,
-                       (offset >> 8) & 0xFF, offset & 0xFF);
-       return req.reply[3];
+#ifdef CONFIG_ADB_MACIISI
+extern int maciisi_request(struct adb_request *req,
+                       void (*done)(struct adb_request *), int nbytes, ...);
+
+static long maciisi_read_time(void)
+{
+       struct adb_request req;
+       long time;
+
+       if (maciisi_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME))
+               return 0;
+
+       time = (req.reply[3] << 24) | (req.reply[4] << 16)
+               | (req.reply[5] << 8) | req.reply[6];
+       return time - RTC_OFFSET;
 }
 
-/*
- * Write a byte to the NVRAM
- */
+static void maciisi_write_time(long data)
+{
+       struct adb_request req;
+       data += RTC_OFFSET;
+       maciisi_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
+                       (data >> 24) & 0xFF, (data >> 16) & 0xFF,
+                       (data >> 8) & 0xFF, data & 0xFF);
+}
 
-static void adb_write_pram(int offset, __u8 data)
+static __u8 maciisi_read_pram(int offset)
 {
-       volatile struct adb_request req;
+       struct adb_request req;
+       if (maciisi_request(&req, NULL, 4, CUDA_PACKET, CUDA_GET_PRAM,
+                       (offset >> 8) & 0xFF, offset & 0xFF))
+               return 0;
+       return req.reply[3];
+}
 
-       adb_request((struct adb_request *) &req, NULL,
-                       ADBREQ_RAW|ADBREQ_SYNC,
-                       5, CUDA_PACKET, CUDA_SET_PRAM,
-                       (offset >> 8) & 0xFF, offset & 0xFF,
-                       data);
+static void maciisi_write_pram(int offset, __u8 data)
+{
+       struct adb_request req;
+       maciisi_request(&req, NULL, 5, CUDA_PACKET, CUDA_SET_PRAM,
+                       (offset >> 8) & 0xFF, offset & 0xFF, data);
 }
-#endif /* CONFIG_ADB */
+#else
+#define maciisi_read_time() 0
+#define maciisi_write_time(n)
+#define maciisi_read_pram NULL
+#define maciisi_write_pram NULL
+#endif
 
 /*
  * VIA PRAM/RTC access routines
@@ -305,42 +396,55 @@ static void oss_shutdown(void)
 
 static void cuda_restart(void)
 {
-       adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
-                       2, CUDA_PACKET, CUDA_RESET_SYSTEM);
+       struct adb_request req;
+       if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM) < 0)
+               return;
+       while (!req.complete)
+               cuda_poll();
 }
 
 static void cuda_shutdown(void)
 {
-       adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
-                       2, CUDA_PACKET, CUDA_POWERDOWN);
+       struct adb_request req;
+       if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN) < 0)
+               return;
+       while (!req.complete)
+               cuda_poll();
 }
 
 #endif /* CONFIG_ADB_CUDA */
 
-#ifdef CONFIG_ADB_PMU
+#ifdef CONFIG_ADB_PMU68K
 
 void pmu_restart(void)
 {
-       adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
-                       3, PMU_PACKET, PMU_SET_INTR_MASK,
-                       PMU_INT_ADB|PMU_INT_TICK);
-
-       adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
-                       2, PMU_PACKET, PMU_RESET);
+       struct adb_request req;
+       if (pmu_request(&req, NULL,
+                       2, PMU_SET_INTR_MASK, PMU_INT_ADB|PMU_INT_TICK) < 0)
+               return;
+       while (!req.complete)
+               pmu_poll();
+       if (pmu_request(&req, NULL, 1, PMU_RESET) < 0)
+               return;
+       while (!req.complete)
+               pmu_poll();
 }
 
 void pmu_shutdown(void)
 {
-       adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
-                       3, PMU_PACKET, PMU_SET_INTR_MASK,
-                       PMU_INT_ADB|PMU_INT_TICK);
-
-       adb_request(NULL, NULL, ADBREQ_RAW|ADBREQ_SYNC,
-                       6, PMU_PACKET, PMU_SHUTDOWN,
-                       'M', 'A', 'T', 'T');
+       struct adb_request req;
+       if (pmu_request(&req, NULL,
+                       2, PMU_SET_INTR_MASK, PMU_INT_ADB|PMU_INT_TICK) < 0)
+               return;
+       while (!req.complete)
+               pmu_poll();
+       if (pmu_request(&req, NULL, 5, PMU_SHUTDOWN, 'M', 'A', 'T', 'T') < 0)
+               return;
+       while (!req.complete)
+               pmu_poll();
 }
 
-#endif /* CONFIG_ADB_PMU */
+#endif
 
 /*
  *-------------------------------------------------------------------
@@ -351,21 +455,22 @@ void pmu_shutdown(void)
 
 void mac_pram_read(int offset, __u8 *buffer, int len)
 {
-       __u8 (*func)(int) = NULL;
+       __u8 (*func)(int);
        int i;
 
-       if (macintosh_config->adb_type == MAC_ADB_IISI ||
-           macintosh_config->adb_type == MAC_ADB_PB1 ||
-           macintosh_config->adb_type == MAC_ADB_PB2 ||
-           macintosh_config->adb_type == MAC_ADB_CUDA) {
-#ifdef CONFIG_ADB
-               func = adb_read_pram;
-#else
-               return;
-#endif
-       } else {
+       switch(macintosh_config->adb_type) {
+       case MAC_ADB_IISI:
+               func = maciisi_read_pram; break;
+       case MAC_ADB_PB1:
+       case MAC_ADB_PB2:
+               func = pmu_read_pram; break;
+       case MAC_ADB_CUDA:
+               func = cuda_read_pram; break;
+       default:
                func = via_read_pram;
        }
+       if (!func)
+               return;
        for (i = 0 ; i < len ; i++) {
                buffer[i] = (*func)(offset++);
        }
@@ -373,21 +478,22 @@ void mac_pram_read(int offset, __u8 *buffer, int len)
 
 void mac_pram_write(int offset, __u8 *buffer, int len)
 {
-       void (*func)(int, __u8) = NULL;
+       void (*func)(int, __u8);
        int i;
 
-       if (macintosh_config->adb_type == MAC_ADB_IISI ||
-           macintosh_config->adb_type == MAC_ADB_PB1 ||
-           macintosh_config->adb_type == MAC_ADB_PB2 ||
-           macintosh_config->adb_type == MAC_ADB_CUDA) {
-#ifdef CONFIG_ADB
-               func = adb_write_pram;
-#else
-               return;
-#endif
-       } else {
+       switch(macintosh_config->adb_type) {
+       case MAC_ADB_IISI:
+               func = maciisi_write_pram; break;
+       case MAC_ADB_PB1:
+       case MAC_ADB_PB2:
+               func = pmu_write_pram; break;
+       case MAC_ADB_CUDA:
+               func = cuda_write_pram; break;
+       default:
                func = via_write_pram;
        }
+       if (!func)
+               return;
        for (i = 0 ; i < len ; i++) {
                (*func)(offset++, buffer[i]);
        }
@@ -408,7 +514,7 @@ void mac_poweroff(void)
        } else if (macintosh_config->adb_type == MAC_ADB_CUDA) {
                cuda_shutdown();
 #endif
-#ifdef CONFIG_ADB_PMU
+#ifdef CONFIG_ADB_PMU68K
        } else if (macintosh_config->adb_type == MAC_ADB_PB1
                || macintosh_config->adb_type == MAC_ADB_PB2) {
                pmu_shutdown();
@@ -448,7 +554,7 @@ void mac_reset(void)
        } else if (macintosh_config->adb_type == MAC_ADB_CUDA) {
                cuda_restart();
 #endif
-#ifdef CONFIG_ADB_PMU
+#ifdef CONFIG_ADB_PMU68K
        } else if (macintosh_config->adb_type == MAC_ADB_PB1
                || macintosh_config->adb_type == MAC_ADB_PB2) {
                pmu_restart();
@@ -466,12 +572,13 @@ void mac_reset(void)
                /* make a 1-to-1 mapping, using the transparent tran. reg. */
                unsigned long virt = (unsigned long) mac_reset;
                unsigned long phys = virt_to_phys(mac_reset);
+               unsigned long addr = (phys&0xFF000000)|0x8777;
                unsigned long offset = phys-virt;
                local_irq_disable(); /* lets not screw this up, ok? */
                __asm__ __volatile__(".chip 68030\n\t"
                                     "pmove %0,%/tt0\n\t"
                                     ".chip 68k"
-                                    : : "m" ((phys&0xFF000000)|0x8777));
+                                    : : "m" (addr));
                /* Now jump to physical address so we can disable MMU */
                __asm__ __volatile__(
                     ".chip 68030\n\t"
@@ -588,20 +695,22 @@ int mac_hwclk(int op, struct rtc_time *t)
        unsigned long now;
 
        if (!op) { /* read */
-               if (macintosh_config->adb_type == MAC_ADB_II) {
+               switch (macintosh_config->adb_type) {
+               case MAC_ADB_II:
+               case MAC_ADB_IOP:
                        now = via_read_time();
-               } else
-#ifdef CONFIG_ADB
-               if ((macintosh_config->adb_type == MAC_ADB_IISI) ||
-                          (macintosh_config->adb_type == MAC_ADB_PB1) ||
-                          (macintosh_config->adb_type == MAC_ADB_PB2) ||
-                          (macintosh_config->adb_type == MAC_ADB_CUDA)) {
-                       now = adb_read_time();
-               } else
-#endif
-               if (macintosh_config->adb_type == MAC_ADB_IOP) {
-                       now = via_read_time();
-               } else {
+                       break;
+               case MAC_ADB_IISI:
+                       now = maciisi_read_time();
+                       break;
+               case MAC_ADB_PB1:
+               case MAC_ADB_PB2:
+                       now = pmu_read_time();
+                       break;
+               case MAC_ADB_CUDA:
+                       now = cuda_read_time();
+                       break;
+               default:
                        now = 0;
                }
 
@@ -619,15 +728,20 @@ int mac_hwclk(int op, struct rtc_time *t)
                now = mktime(t->tm_year + 1900, t->tm_mon + 1, t->tm_mday,
                             t->tm_hour, t->tm_min, t->tm_sec);
 
-               if (macintosh_config->adb_type == MAC_ADB_II) {
-                       via_write_time(now);
-               } else if ((macintosh_config->adb_type == MAC_ADB_IISI) ||
-                          (macintosh_config->adb_type == MAC_ADB_PB1) ||
-                          (macintosh_config->adb_type == MAC_ADB_PB2) ||
-                          (macintosh_config->adb_type == MAC_ADB_CUDA)) {
-                       adb_write_time(now);
-               } else if (macintosh_config->adb_type == MAC_ADB_IOP) {
+               switch (macintosh_config->adb_type) {
+               case MAC_ADB_II:
+               case MAC_ADB_IOP:
                        via_write_time(now);
+                       break;
+               case MAC_ADB_CUDA:
+                       cuda_write_time(now);
+                       break;
+               case MAC_ADB_PB1:
+               case MAC_ADB_PB2:
+                       pmu_write_time(now);
+                       break;
+               case MAC_ADB_IISI:
+                       maciisi_write_time(now);
                }
 #endif
        }
index 02251e5afd891f83bcfc304ed8f34a975f2cb809..4ad0ca918e2e3e08426c5dd012adec1c29781b9e 100644 (file)
@@ -366,7 +366,7 @@ static inline void fp_submant(struct fp_ext *dest, struct fp_ext *src1,
 
 #define fp_mul64(desth, destl, src1, src2) ({                          \
        asm ("mulu.l %2,%1:%0" : "=d" (destl), "=d" (desth)             \
-               : "g" (src1), "0" (src2));                              \
+               : "dm" (src1), "0" (src2));                             \
 })
 #define fp_div64(quot, rem, srch, srcl, div)                           \
        asm ("divu.l %2,%1:%0" : "=d" (quot), "=d" (rem)                \
index fe2383e36b0664d6586c02a3c54533c1d197753b..85ad19a0ac792ddc16e4a74be3854f3f752cd3cc 100644 (file)
@@ -102,7 +102,7 @@ static inline void free_io_area(void *addr)
  */
 /* Rewritten by Andreas Schwab to remove all races. */
 
-void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
+void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
 {
        struct vm_struct *area;
        unsigned long virtaddr, retaddr;
@@ -121,7 +121,7 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
        if (MACH_IS_AMIGA) {
                if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
                    && (cacheflag == IOMAP_NOCACHE_SER))
-                       return (void *)physaddr;
+                       return (void __iomem *)physaddr;
        }
 #endif
 
@@ -218,21 +218,21 @@ void *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
 #endif
        flush_tlb_all();
 
-       return (void *)retaddr;
+       return (void __iomem *)retaddr;
 }
 
 /*
  * Unmap a ioremap()ed region again
  */
-void iounmap(void *addr)
+void iounmap(void __iomem *addr)
 {
 #ifdef CONFIG_AMIGA
        if ((!MACH_IS_AMIGA) ||
            (((unsigned long)addr < 0x40000000) ||
             ((unsigned long)addr > 0x60000000)))
-                       free_io_area(addr);
+                       free_io_area((__force void *)addr);
 #else
-       free_io_area(addr);
+       free_io_area((__force void *)addr);
 #endif
 }
 
index 30f5921ece9b2c58c21cef4d13370982248ba722..a69fe3048edce030e8d65d6403e75e291bd1176e 100644 (file)
@@ -45,6 +45,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
        volatile MK48T08ptr_t rtc = (MK48T08ptr_t)MVME_RTC_BASE;
        unsigned long flags;
        struct rtc_time wtime;
+       void __user *argp = (void __user *)arg;
 
        switch (cmd) {
        case RTC_RD_TIME:       /* Read the time/date from RTC  */
@@ -64,7 +65,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
                wtime.tm_wday = BCD2BIN(rtc->bcd_dow)-1;
                rtc->ctrl = 0;
                local_irq_restore(flags);
-               return copy_to_user((void *)arg, &wtime, sizeof wtime) ?
+               return copy_to_user(argp, &wtime, sizeof wtime) ?
                                                                -EFAULT : 0;
        }
        case RTC_SET_TIME:      /* Set the RTC */
@@ -76,8 +77,7 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
                if (!capable(CAP_SYS_ADMIN))
                        return -EACCES;
 
-               if (copy_from_user(&rtc_tm, (struct rtc_time*)arg,
-                                  sizeof(struct rtc_time)))
+               if (copy_from_user(&rtc_tm, argp, sizeof(struct rtc_time)))
                        return -EFAULT;
 
                yrs = rtc_tm.tm_year;
index 02b626bae4ae9028dcd32029bec9ab9c615cea22..5e0f9b04d45e6adc0f94d2a23050831d124b365d 100644 (file)
@@ -36,8 +36,6 @@
 #include <asm/machdep.h>
 #include <asm/q40_master.h>
 
-extern void floppy_setup(char *str, int *ints);
-
 extern irqreturn_t q40_process_int (int level, struct pt_regs *regs);
 extern irqreturn_t (*q40_default_handler[]) (int, void *, struct pt_regs *);  /* added just for debugging */
 extern void q40_init_IRQ (void);
@@ -194,9 +192,6 @@ void __init config_q40(void)
     mach_heartbeat = q40_heartbeat;
 #endif
     mach_halt = q40_halt;
-#ifdef CONFIG_DUMMY_CONSOLE
-    conswitchp = &dummy_con;
-#endif
 
     /* disable a few things that SMSQ might have left enabled */
     q40_disable_irqs();
index 77d05bcc32214bb7f9fe979cbc88249324a75063..f1ca0dfbaa67f928b55ad62ea38d217b4a0ff982 100644 (file)
@@ -160,9 +160,6 @@ void __init config_sun3(void)
        mach_hwclk           =  sun3_hwclk;
        mach_halt            =  sun3_halt;
        mach_get_hardware_list = sun3_get_hardware_list;
-#if defined(CONFIG_DUMMY_CONSOLE)
-       conswitchp           = &dummy_con;
-#endif
 
        memory_start = ((((int)&_end) + 0x2000) & ~0x1fff);
 // PROM seems to want the last couple of physical pages. --m
index 0ef547f5494d85d732e99d61e764a5200cccb721..0920f5d33606d972fe349308dc76cefd93711195 100644 (file)
@@ -71,10 +71,6 @@ void __init config_sun3x(void)
        mach_get_model       = sun3_get_model;
        mach_get_hardware_list = sun3x_get_hardware_list;
 
-#ifdef CONFIG_DUMMY_CONSOLE
-       conswitchp           = &dummy_con;
-#endif
-
        sun3_intreg = (unsigned char *)SUN3X_INTREG;
 
        /* only the serial console is known to work anyway... */
index 8b3cf57ba706f04043a3da3f11e590ea5d7052a0..99bf43824795e0004b2850413ac3c84450f21362 100644 (file)
@@ -198,10 +198,9 @@ int copy_thread(int nr, unsigned long clone_flags,
 {
        struct pt_regs * childregs;
        struct switch_stack * childstack, *stack;
-       unsigned long stack_offset, *retp;
+       unsigned long *retp;
 
-       stack_offset = THREAD_SIZE - sizeof(struct pt_regs);
-       childregs = (struct pt_regs *) ((unsigned long) p->thread_info + stack_offset);
+       childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
 
        *childregs = *regs;
        childregs->d0 = 0;
index 0476a4dce14e47ac53e811f9a5a63feb9d98b605..fa98f10d0132262da989f9110a46c2b9ddbb6bd5 100644 (file)
@@ -140,12 +140,12 @@ void flush_thread(void)
 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
        unsigned long unused, struct task_struct *p, struct pt_regs *regs)
 {
-       struct thread_info *ti = p->thread_info;
+       struct thread_info *ti = task_thread_info(p);
        struct pt_regs *childregs;
        long childksp;
        p->set_child_tid = p->clear_child_tid = NULL;
 
-       childksp = (unsigned long)ti + THREAD_SIZE - 32;
+       childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
 
        preempt_disable();
 
@@ -229,9 +229,7 @@ void elf_dump_regs(elf_greg_t *gp, struct pt_regs *regs)
 
 int dump_task_regs (struct task_struct *tsk, elf_gregset_t *regs)
 {
-       struct thread_info *ti = tsk->thread_info;
-       long ksp = (unsigned long)ti + THREAD_SIZE - 32;
-       elf_dump_regs(&(*regs)[0], (struct pt_regs *) ksp - 1);
+       elf_dump_regs(*regs, task_pt_regs(tsk));
        return 1;
 }
 
@@ -409,7 +407,7 @@ unsigned long get_wchan(struct task_struct *p)
        if (!p || p == current || p->state == TASK_RUNNING)
                return 0;
 
-       stack_page = (unsigned long)p->thread_info;
+       stack_page = (unsigned long)task_stack_page(p);
        if (!stack_page || !mips_frame_info_initialized)
                return 0;
 
index 8d25493353040597a240a631c18b9340d38be30c..f838b36cc765bcbc9ad544701aec14e2ff29c278 100644 (file)
@@ -64,8 +64,7 @@ int ptrace_getregs (struct task_struct *child, __s64 __user *data)
        if (!access_ok(VERIFY_WRITE, data, 38 * 8))
                return -EIO;
 
-       regs = (struct pt_regs *) ((unsigned long) child->thread_info +
-              THREAD_SIZE - 32 - sizeof(struct pt_regs));
+       regs = task_pt_regs(child);
 
        for (i = 0; i < 32; i++)
                __put_user (regs->regs[i], data + i);
@@ -92,8 +91,7 @@ int ptrace_setregs (struct task_struct *child, __s64 __user *data)
        if (!access_ok(VERIFY_READ, data, 38 * 8))
                return -EIO;
 
-       regs = (struct pt_regs *) ((unsigned long) child->thread_info +
-              THREAD_SIZE - 32 - sizeof(struct pt_regs));
+       regs = task_pt_regs(child);
 
        for (i = 0; i < 32; i++)
                __get_user (regs->regs[i], data + i);
@@ -198,8 +196,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                struct pt_regs *regs;
                unsigned long tmp = 0;
 
-               regs = (struct pt_regs *) ((unsigned long) child->thread_info +
-                      THREAD_SIZE - 32 - sizeof(struct pt_regs));
+               regs = task_pt_regs(child);
                ret = 0;  /* Default return value. */
 
                switch (addr) {
@@ -314,8 +311,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        case PTRACE_POKEUSR: {
                struct pt_regs *regs;
                ret = 0;
-               regs = (struct pt_regs *) ((unsigned long) child->thread_info +
-                      THREAD_SIZE - 32 - sizeof(struct pt_regs));
+               regs = task_pt_regs(child);
 
                switch (addr) {
                case 0 ... 31:
@@ -442,7 +438,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                break;
 
        case PTRACE_GET_THREAD_AREA:
-               ret = put_user(child->thread_info->tp_value,
+               ret = put_user(task_thread_info(child)->tp_value,
                                (unsigned long __user *) data);
                break;
 
index 1f998bfde1656d88702c19e6b6deb34e50685217..0c82b25d8c6d17c087f5c3ec0c704052dbd22bd3 100644 (file)
@@ -126,8 +126,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
                struct pt_regs *regs;
                unsigned int tmp;
 
-               regs = (struct pt_regs *) ((unsigned long) child->thread_info +
-                      THREAD_SIZE - 32 - sizeof(struct pt_regs));
+               regs = task_pt_regs(child);
                ret = 0;  /* Default return value. */
 
                switch (addr) {
@@ -259,8 +258,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
        case PTRACE_POKEUSR: {
                struct pt_regs *regs;
                ret = 0;
-               regs = (struct pt_regs *) ((unsigned long) child->thread_info +
-                      THREAD_SIZE - 32 - sizeof(struct pt_regs));
+               regs = task_pt_regs(child);
 
                switch (addr) {
                case 0 ... 31:
@@ -377,7 +375,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
                break;
 
        case PTRACE_GET_THREAD_AREA:
-               ret = put_user(child->thread_info->tp_value,
+               ret = put_user(task_thread_info(child)->tp_value,
                                (unsigned int __user *) (unsigned long) data);
                break;
 
@@ -391,7 +389,7 @@ asmlinkage int sys32_ptrace(int request, int pid, int addr, int data)
                break;
 
        case PTRACE_GET_THREAD_AREA_3264:
-               ret = put_user(child->thread_info->tp_value,
+               ret = put_user(task_thread_info(child)->tp_value,
                                (unsigned long __user *) (unsigned long) data);
                break;
 
index d429544ba4bcc7ed99e2abeeab5a2906366c9e78..794a1c3de2a493b62ac2f60f38c3665d30763489 100644 (file)
@@ -287,6 +287,7 @@ void prom_prepare_cpus(unsigned int max_cpus)
  */
 void prom_boot_secondary(int cpu, struct task_struct *idle)
 {
+       struct thread_info *gp = task_thread_info(idle);
        dvpe();
        set_c0_mvpcontrol(MVPCONTROL_VPC);
 
@@ -307,11 +308,9 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
        write_tc_gpr_sp( __KSTK_TOS(idle));
 
        /* global pointer */
-       write_tc_gpr_gp((unsigned long)idle->thread_info);
+       write_tc_gpr_gp((unsigned long)gp);
 
-       flush_icache_range((unsigned long)idle->thread_info,
-                                          (unsigned long)idle->thread_info +
-                                          sizeof(struct thread_info));
+       flush_icache_range((unsigned long)gp, (unsigned long)(gp + 1));
 
        /* finally out of configuration and into chaos */
        clear_c0_mvpcontrol(MVPCONTROL_VPC);
index 006881942aa287fd63f925605e4195954cc1430b..332358430ff5122a3639ccdb8a7ead2e8db03fd7 100644 (file)
@@ -263,7 +263,7 @@ asmlinkage int sys_olduname(struct oldold_utsname * name)
 
 void sys_set_thread_area(unsigned long addr)
 {
-       struct thread_info *ti = current->thread_info;
+       struct thread_info *ti = task_thread_info(current);
 
        ti->tp_value = addr;
 
index 7058893d5ad2227259746895558f47644e6d66bd..59a187956de028cc4812a8822aea55fcfa5d68e0 100644 (file)
@@ -519,7 +519,7 @@ static inline int simulate_llsc(struct pt_regs *regs)
  */
 static inline int simulate_rdhwr(struct pt_regs *regs)
 {
-       struct thread_info *ti = current->thread_info;
+       struct thread_info *ti = task_thread_info(current);
        unsigned int opcode;
 
        if (unlikely(get_insn_opcode(regs, &opcode)))
index 0527170d6adb46fee7165b22b925c3d0f72c61df..f17f575f58f0a17ddfb700549110c07feb4afad1 100644 (file)
@@ -93,8 +93,8 @@ void __init prom_prepare_cpus(unsigned int max_cpus)
  */
 void prom_boot_secondary(int cpu, struct task_struct *idle)
 {
-       unsigned long gp = (unsigned long) idle->thread_info;
-       unsigned long sp = gp + THREAD_SIZE - 32;
+       unsigned long gp = (unsigned long) task_thread_info(idle);
+       unsigned long sp = __KSTK_TOP(idle);
 
        secondary_sp = sp;
        secondary_gp = gp;
index 3a8291b7d26d1aa84122bdbfa7a516cb3eb62cd9..dbef3f6b565022341ce983521a9314b76813c748 100644 (file)
@@ -168,8 +168,8 @@ void __init prom_prepare_cpus(unsigned int max_cpus)
  */
 void __init prom_boot_secondary(int cpu, struct task_struct *idle)
 {
-       unsigned long gp = (unsigned long) idle->thread_info;
-       unsigned long sp = gp + THREAD_SIZE - 32;
+       unsigned long gp = (unsigned long)task_thread_info(idle);
+       unsigned long sp = __KSTK_TOS(idle);
 
        LAUNCH_SLAVE(cputonasid(cpu),cputoslice(cpu),
                (launch_proc_t)MAPPED_KERN_RW_TO_K0(smp_bootstrap),
index e8485124b8fc903fda4b8768825f080ff19c5a5b..4477af3d8074fbd067d38d29b9b8936693c3b527 100644 (file)
@@ -60,7 +60,7 @@ void prom_boot_secondary(int cpu, struct task_struct *idle)
 
        retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
                               __KSTK_TOS(idle),
-                              (unsigned long)idle->thread_info, 0);
+                              (unsigned long)task_thread_info(idle), 0);
        if (retval != 0)
                printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
 }
index 1eaa0d37f677feffe7e2c8fd4f3439f3bbe6ce41..2d804e2d16d11f64d7482f3d78d36a65fbaf6689 100644 (file)
@@ -173,8 +173,6 @@ int register_parisc_driver(struct parisc_driver *driver)
        WARN_ON(driver->drv.probe != NULL);
        WARN_ON(driver->drv.remove != NULL);
 
-       driver->drv.probe = parisc_driver_probe;
-       driver->drv.remove = parisc_driver_remove;
        driver->drv.name = driver->name;
 
        return driver_register(&driver->drv);
@@ -575,6 +573,8 @@ struct bus_type parisc_bus_type = {
        .name = "parisc",
        .match = parisc_generic_match,
        .dev_attrs = parisc_device_attrs,
+       .probe = parisc_driver_probe,
+       .remove = parisc_driver_remove,
 };
 
 /**
index 4eb70a40ec7eceacc4fd029a7d0b8e6a6f4eaf91..5da41677e70bd36201d9fd729345664d4c4b6781 100644 (file)
@@ -295,7 +295,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
            struct task_struct * p, struct pt_regs * pregs)
 {
        struct pt_regs * cregs = &(p->thread.regs);
-       struct thread_info *ti = p->thread_info;
+       void *stack = task_stack_page(p);
        
        /* We have to use void * instead of a function pointer, because
         * function pointers aren't a pointer to the function on 64-bit.
@@ -322,7 +322,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
         */
        if (usp == 1) {
                /* kernel thread */
-               cregs->ksp = (((unsigned long)(ti)) + THREAD_SZ_ALGN);
+               cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN;
                /* Must exit via ret_from_kernel_thread in order
                 * to call schedule_tail()
                 */
@@ -344,7 +344,7 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
                 */
 
                /* Use same stack depth as parent */
-               cregs->ksp = ((unsigned long)(ti))
+               cregs->ksp = (unsigned long)stack
                        + (pregs->gr[21] & (THREAD_SIZE - 1));
                cregs->gr[30] = usp;
                if (p->personality == PER_HPUX) {
index 17f23c26f1ca907efa041bc827ffea23b3a58626..25564b7ca6bbc084f60a25978431c20b5e163f90 100644 (file)
@@ -517,7 +517,7 @@ int __init smp_boot_one_cpu(int cpuid)
        if (IS_ERR(idle))
                panic("SMP: fork failed for CPU:%d", cpuid);
 
-       idle->thread_info->cpu = cpuid;
+       task_thread_info(idle)->cpu = cpuid;
 
        /* Let _start know what logical CPU we're booting
        ** (offset into init_tasks[],cpu_data[])
index d3654a264ef7533a8811f9e170fb4f7b4c5499ab..44dd82b791d17075f2ff44ea1ed2eb47c1024759 100644 (file)
@@ -139,17 +139,14 @@ drivers-$(CONFIG_CPM2)            += arch/ppc/8260_io/
 
 drivers-$(CONFIG_OPROFILE)     += arch/powerpc/oprofile/
 
-defaultimage-$(CONFIG_PPC32)   := zImage
+# Default to zImage, override when needed
+defaultimage-y                 := zImage
 defaultimage-$(CONFIG_PPC_ISERIES) := vmlinux
-defaultimage-$(CONFIG_PPC_PSERIES) := zImage
 KBUILD_IMAGE := $(defaultimage-y)
 all: $(KBUILD_IMAGE)
 
 CPPFLAGS_vmlinux.lds   := -Upowerpc
 
-# All the instructions talk about "make bzImage".
-bzImage: zImage
-
 BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm uImage
 
 .PHONY: $(BOOT_TARGETS)
index b53d677f6742d521867e9512be1e86e45041eae4..788dec4c7ef37410b0bca04814ecab189d1e45d9 100644 (file)
@@ -25,8 +25,8 @@ HOSTCC                := gcc
 BOOTCFLAGS     := $(HOSTCFLAGS) -fno-builtin -nostdinc -isystem \
                   $(shell $(CROSS32CC) -print-file-name=include) -fPIC
 BOOTAFLAGS     := -D__ASSEMBLY__ $(BOOTCFLAGS) -traditional -nostdinc
-BOOTLFLAGS     := -T $(srctree)/$(src)/zImage.lds
 OBJCOPYFLAGS    := contents,alloc,load,readonly,data
+OBJCOPY_COFF_ARGS := -O aixcoff-rs6000 --set-start 0x500000
 
 zlib       := infblock.c infcodes.c inffast.c inflate.c inftrees.c infutil.c
 zlibheader := infblock.h infcodes.h inffast.h inftrees.h infutil.h
@@ -35,7 +35,7 @@ zliblinuxheader := zlib.h zconf.h zutil.h
 $(addprefix $(obj)/,$(zlib) main.o): $(addprefix $(obj)/,$(zliblinuxheader)) $(addprefix $(obj)/,$(zlibheader))
 #$(addprefix $(obj)/,main.o): $(addprefix $(obj)/,zlib.h)
 
-src-boot := string.S prom.c main.c div64.S crt0.S
+src-boot := crt0.S string.S prom.c stdio.c main.c div64.S
 src-boot += $(zlib)
 src-boot := $(addprefix $(obj)/, $(src-boot))
 obj-boot := $(addsuffix .o, $(basename $(src-boot)))
@@ -70,7 +70,7 @@ quiet_cmd_bootas = BOOTAS  $@
       cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $<
 
 quiet_cmd_bootld = BOOTLD  $@
-      cmd_bootld = $(CROSS32LD) $(BOOTLFLAGS) -o $@ $(2)
+      cmd_bootld = $(CROSS32LD) -T $(srctree)/$(src)/$(3) -o $@ $(2)
 
 $(patsubst %.c,%.o, $(filter %.c, $(src-boot))): %.o: %.c
        $(call if_changed_dep,bootcc)
@@ -87,12 +87,14 @@ obj-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.o, $(section)))
 src-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.c, $(section)))
 gz-sec  = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section)))
 
-hostprogs-y            := addnote addRamDisk
-targets                += zImage.vmode zImage.initrd.vmode zImage zImage.initrd \
-                          $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \
-                          $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \
-                          $(patsubst $(obj)/%,%, $(call gz-sec, $(required) $(initrd))) \
-                          vmlinux.initrd
+hostprogs-y            := addnote addRamDisk hack-coff
+
+targets += zImage.vmode zImage.initrd.vmode zImage zImage.initrd \
+          zImage.coff zImage.initrd.coff \
+          $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \
+          $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \
+          $(patsubst $(obj)/%,%, $(call gz-sec, $(required) $(initrd))) \
+          vmlinux.initrd
 extra-y                        := initrd.o
 
 quiet_cmd_ramdisk = RAMDISK $@
@@ -114,6 +116,10 @@ quiet_cmd_addsection = ADDSEC  $@
 quiet_cmd_addnote = ADDNOTE $@
       cmd_addnote = $(obj)/addnote $@
 
+quiet_cmd_gencoff = COFF    $@
+      cmd_gencoff = $(OBJCOPY) $(OBJCOPY_COFF_ARGS) $@ && \
+                   $(obj)/hack-coff $@
+
 $(call gz-sec, $(required)): $(obj)/kernel-%.gz: %
        $(call if_changed,gzip)
 
@@ -127,22 +133,35 @@ $(call obj-sec, $(required) $(initrd)): $(obj)/kernel-%.o: $(obj)/kernel-%.c
        $(call if_changed_dep,bootcc)
        $(call cmd,addsection)
 
-$(obj)/zImage.vmode: obj-boot += $(call obj-sec, $(required))
+$(obj)/zImage.vmode $(obj)/zImage.coff: obj-boot += $(call obj-sec, $(required))
 $(obj)/zImage.vmode: $(call obj-sec, $(required)) $(obj-boot) $(srctree)/$(src)/zImage.lds
-       $(call cmd,bootld,$(obj-boot))
+       $(call cmd,bootld,$(obj-boot),zImage.lds)
 
-$(obj)/zImage.initrd.vmode: obj-boot += $(call obj-sec, $(required) $(initrd))
+$(obj)/zImage.initrd.vmode $(obj)/zImage.initrd.coff: obj-boot += $(call obj-sec, $(required) $(initrd))
 $(obj)/zImage.initrd.vmode: $(call obj-sec, $(required) $(initrd)) $(obj-boot) $(srctree)/$(src)/zImage.lds
-       $(call cmd,bootld,$(obj-boot))
+       $(call cmd,bootld,$(obj-boot),zImage.lds)
+
+# For 32-bit powermacs, build the COFF images as well as the ELF images.
+coffimage-$(CONFIG_PPC_PMAC)-$(CONFIG_PPC32) := $(obj)/zImage.coff
+coffrdimg-$(CONFIG_PPC_PMAC)-$(CONFIG_PPC32) := $(obj)/zImage.initrd.coff
 
-$(obj)/zImage: $(obj)/zImage.vmode $(obj)/addnote
+$(obj)/zImage: $(obj)/zImage.vmode $(obj)/addnote $(coffimage-y-y)
        @cp -f $< $@
        $(call if_changed,addnote)
 
-$(obj)/zImage.initrd: $(obj)/zImage.initrd.vmode $(obj)/addnote
+$(obj)/zImage.initrd: $(obj)/zImage.initrd.vmode $(obj)/addnote $(coffrdimg-y-y)
        @cp -f $< $@
        $(call if_changed,addnote)
 
+$(obj)/zImage.coff: $(call obj-sec, $(required)) $(obj-boot) $(srctree)/$(src)/zImage.coff.lds $(obj)/hack-coff
+       $(call cmd,bootld,$(obj-boot),zImage.coff.lds)
+       $(call cmd,gencoff)
+
+$(obj)/zImage.initrd.coff: $(call obj-sec, $(required) $(initrd)) $(obj-boot) \
+                          $(srctree)/$(src)/zImage.coff.lds $(obj)/hack-coff
+       $(call cmd,bootld,$(obj-boot),zImage.coff.lds)
+       $(call cmd,gencoff)
+
 #-----------------------------------------------------------
 # build u-boot images
 #-----------------------------------------------------------
index d2f2ace56cd34df5e1203a85ce5248e763e3133a..e0192c26037b918f1e195d41d2f5ec9176606535 100644 (file)
 #include "ppc_asm.h"
 
        .text
+       /* a procedure descriptor used when booting this as a COFF file */
+_zimage_start_opd:
+       .long   _zimage_start, 0, 0, 0
+
        .globl  _zimage_start
 _zimage_start:
+       /* Work out the offset between the address we were linked at
+          and the address where we're running. */
        bl      1f
-
-1:
-       mflr    r0
+1:     mflr    r0
        lis     r9,1b@ha
        addi    r9,r9,1b@l
        subf.   r0,r9,r0
-       beq     3f
+       beq     3f              /* if running at same address as linked */
 
+       /* The .got2 section contains a list of addresses, so add
+          the address offset onto each entry. */
        lis     r9,__got2_start@ha
        addi    r9,r9,__got2_start@l
        lis     r8,__got2_end@ha
@@ -32,15 +38,14 @@ _zimage_start:
        srwi.   r8,r8,2
        mtctr   r8
        add     r9,r0,r9
-2:
-       lwz     r8,0(r9)
+2:     lwz     r8,0(r9)
        add     r8,r8,r0
        stw     r8,0(r9)
        addi    r9,r9,4
        bdnz    2b
 
-3:
-       lis     r9,_start@h
+       /* Do a cache flush for our text, in case OF didn't */
+3:     lis     r9,_start@h
        add     r9,r0,r9
        lis     r8,_etext@ha
        addi    r8,r8,_etext@l
diff --git a/arch/powerpc/boot/hack-coff.c b/arch/powerpc/boot/hack-coff.c
new file mode 100644 (file)
index 0000000..5e5a657
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * hack-coff.c - hack the header of an xcoff file to fill in
+ * a few fields needed by the Open Firmware xcoff loader on
+ * Power Macs but not initialized by objcopy.
+ *
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <string.h>
+#include "rs6000.h"
+
+#define AOUT_MAGIC     0x010b
+
+#define get_16be(x)    ((((unsigned char *)(x))[0] << 8) \
+                        + ((unsigned char *)(x))[1])
+#define put_16be(x, v) (((unsigned char *)(x))[0] = (v) >> 8, \
+                        ((unsigned char *)(x))[1] = (v) & 0xff)
+#define get_32be(x)    ((((unsigned char *)(x))[0] << 24) \
+                        + (((unsigned char *)(x))[1] << 16) \
+                        + (((unsigned char *)(x))[2] << 8) \
+                        + ((unsigned char *)(x))[3])
+
+int
+main(int ac, char **av)
+{
+    int fd;
+    int i, nsect;
+    int aoutsz;
+    struct external_filehdr fhdr;
+    AOUTHDR aout;
+    struct external_scnhdr shdr;
+
+    if (ac != 2) {
+       fprintf(stderr, "Usage: hack-coff coff-file\n");
+       exit(1);
+    }
+    if ((fd = open(av[1], 2)) == -1) {
+       perror(av[2]);
+       exit(1);
+    }
+    if (read(fd, &fhdr, sizeof(fhdr)) != sizeof(fhdr))
+       goto readerr;
+    i = get_16be(fhdr.f_magic);
+    if (i != U802TOCMAGIC && i != U802WRMAGIC && i != U802ROMAGIC) {
+       fprintf(stderr, "%s: not an xcoff file\n", av[1]);
+       exit(1);
+    }
+    aoutsz = get_16be(fhdr.f_opthdr);
+    if (read(fd, &aout, aoutsz) != aoutsz)
+       goto readerr;
+    nsect = get_16be(fhdr.f_nscns);
+    for (i = 0; i < nsect; ++i) {
+       if (read(fd, &shdr, sizeof(shdr)) != sizeof(shdr))
+           goto readerr;
+       if (strcmp(shdr.s_name, ".text") == 0) {
+           put_16be(aout.o_snentry, i+1);
+           put_16be(aout.o_sntext, i+1);
+       } else if (strcmp(shdr.s_name, ".data") == 0) {
+           put_16be(aout.o_sndata, i+1);
+       } else if (strcmp(shdr.s_name, ".bss") == 0) {
+           put_16be(aout.o_snbss, i+1);
+       }
+    }
+    put_16be(aout.magic, AOUT_MAGIC);
+    if (lseek(fd, (long) sizeof(struct external_filehdr), 0) == -1
+       || write(fd, &aout, aoutsz) != aoutsz) {
+       fprintf(stderr, "%s: write error\n", av[1]);
+       exit(1);
+    }
+    close(fd);
+    exit(0);
+
+readerr:
+    fprintf(stderr, "%s: read error or file too short\n", av[1]);
+    exit(1);
+}
index 64ec93116fa6cfb2d1eb6eed1578b80b61965487..55ec5986725079c24c56bef079c5f822700b9f0b 100644 (file)
@@ -21,8 +21,8 @@ extern void flush_cache(void *, unsigned long);
 
 
 /* Value picked to match that used by yaboot */
-#define PROG_START     0x01400000
-#define RAM_END                (512<<20) // Fixme: use OF */
+#define PROG_START     0x01400000      /* only used on 64-bit systems */
+#define RAM_END                (512<<20)       /* Fixme: use OF */
 #define        ONE_MB          0x100000
 
 extern char _start[];
@@ -160,6 +160,17 @@ static int is_elf64(void *hdr)
        elfoffset = (unsigned long)elf64ph->p_offset;
        vmlinux.size = (unsigned long)elf64ph->p_filesz + elfoffset;
        vmlinux.memsize = (unsigned long)elf64ph->p_memsz + elfoffset;
+
+#if defined(PROG_START)
+       /*
+        * Maintain a "magic" minimum address. This keeps some older
+        * firmware platforms running.
+        */
+
+       if (claim_base < PROG_START)
+               claim_base = PROG_START;
+#endif
+
        return 1;
 }
 
@@ -206,12 +217,18 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
                exit();
        if (getprop(chosen_handle, "stdout", &stdout, sizeof(stdout)) != 4)
                exit();
-       stderr = stdout;
-       if (getprop(chosen_handle, "stdin", &stdin, sizeof(stdin)) != 4)
-               exit();
 
        printf("\n\rzImage starting: loaded at 0x%p (sp: 0x%p)\n\r", _start, sp);
 
+       /*
+        * The first available claim_base must be above the end of the
+        * the loaded kernel wrapper file (_start to _end includes the
+        * initrd image if it is present) and rounded up to a nice
+        * 1 MB boundary for good measure.
+        */
+
+       claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB);
+
        vmlinuz.addr = (unsigned long)_vmlinux_start;
        vmlinuz.size = (unsigned long)(_vmlinux_end - _vmlinux_start);
 
@@ -228,25 +245,6 @@ void start(unsigned long a1, unsigned long a2, void *promptr, void *sp)
                exit();
        }
 
-       /*
-        * The first available claim_base must be above the end of the
-        * the loaded kernel wrapper file (_start to _end includes the
-        * initrd image if it is present) and rounded up to a nice
-        * 1 MB boundary for good measure.
-        */
-
-       claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB);
-
-#if defined(PROG_START)
-       /*
-        * Maintain a "magic" minimum address. This keeps some older
-        * firmware platforms running.
-        */
-
-       if (claim_base < PROG_START)
-               claim_base = PROG_START;
-#endif
-
        /* We need to claim the memsize plus the file offset since gzip
         * will expand the header (file offset), then the kernel, then
         * possible rubbish we don't care about. But the kernel bss must
index 4bea2f4dcb067412be7610b23308a0d9c996302b..fa0057736f6b391bfc5fa96bfd5fdbebc217754d 100644 (file)
 #include "prom.h"
 
 int (*prom)(void *);
+phandle chosen_handle;
+ihandle stdout;
 
-void *chosen_handle;
-
-void *stdin;
-void *stdout;
-void *stderr;
-
-
-int
-write(void *handle, void *ptr, int nb)
-{
-       struct prom_args {
-               char *service;
-               int nargs;
-               int nret;
-               void *ihandle;
-               void *addr;
-               int len;
-               int actual;
-       } args;
-
-       args.service = "write";
-       args.nargs = 3;
-       args.nret = 1;
-       args.ihandle = handle;
-       args.addr = ptr;
-       args.len = nb;
-       args.actual = -1;
-       (*prom)(&args);
-       return args.actual;
-}
-
-int
-read(void *handle, void *ptr, int nb)
+int call_prom(const char *service, int nargs, int nret, ...)
 {
+       int i;
        struct prom_args {
-               char *service;
+               const char *service;
                int nargs;
                int nret;
-               void *ihandle;
-               void *addr;
-               int len;
-               int actual;
-       } args;
-
-       args.service = "read";
-       args.nargs = 3;
-       args.nret = 1;
-       args.ihandle = handle;
-       args.addr = ptr;
-       args.len = nb;
-       args.actual = -1;
-       (*prom)(&args);
-       return args.actual;
-}
-
-void
-exit()
-{
-       struct prom_args {
-               char *service;
-       } args;
-
-       for (;;) {
-               args.service = "exit";
-               (*prom)(&args);
-       }
-}
-
-void
-pause(void)
-{
-       struct prom_args {
-               char *service;
+               unsigned int args[12];
        } args;
+       va_list list;
 
-       args.service = "enter";
-       (*prom)(&args);
-}
+       args.service = service;
+       args.nargs = nargs;
+       args.nret = nret;
 
-void *
-finddevice(const char *name)
-{
-       struct prom_args {
-               char *service;
-               int nargs;
-               int nret;
-               const char *devspec;
-               void *phandle;
-       } args;
+       va_start(list, nret);
+       for (i = 0; i < nargs; i++)
+               args.args[i] = va_arg(list, unsigned int);
+       va_end(list);
 
-       args.service = "finddevice";
-       args.nargs = 1;
-       args.nret = 1;
-       args.devspec = name;
-       args.phandle = (void *) -1;
-       (*prom)(&args);
-       return args.phandle;
-}
+       for (i = 0; i < nret; i++)
+               args.args[nargs+i] = 0;
 
-void *
-claim(unsigned long virt, unsigned long size, unsigned long align)
-{
-       struct prom_args {
-               char *service;
-               int nargs;
-               int nret;
-               unsigned int virt;
-               unsigned int size;
-               unsigned int align;
-               void *ret;
-       } args;
+       if (prom(&args) < 0)
+               return -1;
 
-       args.service = "claim";
-       args.nargs = 3;
-       args.nret = 1;
-       args.virt = virt;
-       args.size = size;
-       args.align = align;
-       (*prom)(&args);
-       return args.ret;
+       return (nret > 0)? args.args[nargs]: 0;
 }
 
-int
-getprop(void *phandle, const char *name, void *buf, int buflen)
+int call_prom_ret(const char *service, int nargs, int nret,
+                 unsigned int *rets, ...)
 {
+       int i;
        struct prom_args {
-               char *service;
+               const char *service;
                int nargs;
                int nret;
-               void *phandle;
-               const char *name;
-               void *buf;
-               int buflen;
-               int size;
+               unsigned int args[12];
        } args;
+       va_list list;
 
-       args.service = "getprop";
-       args.nargs = 4;
-       args.nret = 1;
-       args.phandle = phandle;
-       args.name = name;
-       args.buf = buf;
-       args.buflen = buflen;
-       args.size = -1;
-       (*prom)(&args);
-       return args.size;
-}
+       args.service = service;
+       args.nargs = nargs;
+       args.nret = nret;
 
-int
-putc(int c, void *f)
-{
-       char ch = c;
+       va_start(list, rets);
+       for (i = 0; i < nargs; i++)
+               args.args[i] = va_arg(list, unsigned int);
+       va_end(list);
 
-       if (c == '\n')
-               putc('\r', f);
-       return write(f, &ch, 1) == 1? c: -1;
-}
+       for (i = 0; i < nret; i++)
+               args.args[nargs+i] = 0;
 
-int
-putchar(int c)
-{
-       return putc(c, stdout);
-}
+       if (prom(&args) < 0)
+               return -1;
 
-int
-fputs(char *str, void *f)
-{
-       int n = strlen(str);
+       if (rets != (void *) 0)
+               for (i = 1; i < nret; ++i)
+                       rets[i-1] = args.args[nargs+i];
 
-       return write(f, str, n) == n? 0: -1;
+       return (nret > 0)? args.args[nargs]: 0;
 }
 
-size_t strnlen(const char * s, size_t count)
+int write(void *handle, void *ptr, int nb)
 {
-       const char *sc;
-
-       for (sc = s; count-- && *sc != '\0'; ++sc)
-               /* nothing */;
-       return sc - s;
+       return call_prom("write", 3, 1, handle, ptr, nb);
 }
 
-extern unsigned int __div64_32(unsigned long long *dividend,
-                              unsigned int divisor);
-
-/* The unnecessary pointer compare is there
- * to check for type safety (n must be 64bit)
+/*
+ * Older OF's require that when claiming a specific range of addresses,
+ * we claim the physical space in the /memory node and the virtual
+ * space in the chosen mmu node, and then do a map operation to
+ * map virtual to physical.
  */
-# define do_div(n,base) ({                                             \
-       unsigned int __base = (base);                                   \
-       unsigned int __rem;                                             \
-       (void)(((typeof((n)) *)0) == ((unsigned long long *)0));        \
-       if (((n) >> 32) == 0) {                                         \
-               __rem = (unsigned int)(n) % __base;                     \
-               (n) = (unsigned int)(n) / __base;                       \
-       } else                                                          \
-               __rem = __div64_32(&(n), __base);                       \
-       __rem;                                                          \
- })
+static int need_map = -1;
+static ihandle chosen_mmu;
+static phandle memory;
 
-static int skip_atoi(const char **s)
+/* returns true if s2 is a prefix of s1 */
+static int string_match(const char *s1, const char *s2)
 {
-       int i, c;
-
-       for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s)
-               i = i*10 + c - '0';
-       return i;
+       for (; *s2; ++s2)
+               if (*s1++ != *s2)
+                       return 0;
+       return 1;
 }
 
-#define ZEROPAD        1               /* pad with zero */
-#define SIGN   2               /* unsigned/signed long */
-#define PLUS   4               /* show plus */
-#define SPACE  8               /* space if plus */
-#define LEFT   16              /* left justified */
-#define SPECIAL        32              /* 0x */
-#define LARGE  64              /* use 'ABCDEF' instead of 'abcdef' */
-
-static char * number(char * str, unsigned long long num, int base, int size, int precision, int type)
+static int check_of_version(void)
 {
-       char c,sign,tmp[66];
-       const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
-       int i;
+       phandle oprom, chosen;
+       char version[64];
 
-       if (type & LARGE)
-               digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
-       if (type & LEFT)
-               type &= ~ZEROPAD;
-       if (base < 2 || base > 36)
+       oprom = finddevice("/openprom");
+       if (oprom == (phandle) -1)
                return 0;
-       c = (type & ZEROPAD) ? '0' : ' ';
-       sign = 0;
-       if (type & SIGN) {
-               if ((signed long long)num < 0) {
-                       sign = '-';
-                       num = - (signed long long)num;
-                       size--;
-               } else if (type & PLUS) {
-                       sign = '+';
-                       size--;
-               } else if (type & SPACE) {
-                       sign = ' ';
-                       size--;
+       if (getprop(oprom, "model", version, sizeof(version)) <= 0)
+               return 0;
+       version[sizeof(version)-1] = 0;
+       printf("OF version = '%s'\r\n", version);
+       if (!string_match(version, "Open Firmware, 1.")
+           && !string_match(version, "FirmWorks,3."))
+               return 0;
+       chosen = finddevice("/chosen");
+       if (chosen == (phandle) -1) {
+               chosen = finddevice("/chosen@0");
+               if (chosen == (phandle) -1) {
+                       printf("no chosen\n");
+                       return 0;
                }
        }
-       if (type & SPECIAL) {
-               if (base == 16)
-                       size -= 2;
-               else if (base == 8)
-                       size--;
-       }
-       i = 0;
-       if (num == 0)
-               tmp[i++]='0';
-       else while (num != 0) {
-               tmp[i++] = digits[do_div(num, base)];
+       if (getprop(chosen, "mmu", &chosen_mmu, sizeof(chosen_mmu)) <= 0) {
+               printf("no mmu\n");
+               return 0;
        }
-       if (i > precision)
-               precision = i;
-       size -= precision;
-       if (!(type&(ZEROPAD+LEFT)))
-               while(size-->0)
-                       *str++ = ' ';
-       if (sign)
-               *str++ = sign;
-       if (type & SPECIAL) {
-               if (base==8)
-                       *str++ = '0';
-               else if (base==16) {
-                       *str++ = '0';
-                       *str++ = digits[33];
+       memory = (ihandle) call_prom("open", 1, 1, "/memory");
+       if (memory == (ihandle) -1) {
+               memory = (ihandle) call_prom("open", 1, 1, "/memory@0");
+               if (memory == (ihandle) -1) {
+                       printf("no memory node\n");
+                       return 0;
                }
        }
-       if (!(type & LEFT))
-               while (size-- > 0)
-                       *str++ = c;
-       while (i < precision--)
-               *str++ = '0';
-       while (i-- > 0)
-               *str++ = tmp[i];
-       while (size-- > 0)
-               *str++ = ' ';
-       return str;
+       printf("old OF detected\r\n");
+       return 1;
 }
 
-int vsprintf(char *buf, const char *fmt, va_list args)
+void *claim(unsigned long virt, unsigned long size, unsigned long align)
 {
-       int len;
-       unsigned long long num;
-       int i, base;
-       char * str;
-       const char *s;
-
-       int flags;              /* flags to number() */
-
-       int field_width;        /* width of output field */
-       int precision;          /* min. # of digits for integers; max
-                                  number of chars for from string */
-       int qualifier;          /* 'h', 'l', or 'L' for integer fields */
-                               /* 'z' support added 23/7/1999 S.H.    */
-                               /* 'z' changed to 'Z' --davidm 1/25/99 */
+       int ret;
+       unsigned int result;
 
+       if (need_map < 0)
+               need_map = check_of_version();
+       if (align || !need_map)
+               return (void *) call_prom("claim", 3, 1, virt, size, align);
        
-       for (str=buf ; *fmt ; ++fmt) {
-               if (*fmt != '%') {
-                       *str++ = *fmt;
-                       continue;
-               }
-                       
-               /* process flags */
-               flags = 0;
-               repeat:
-                       ++fmt;          /* this also skips first '%' */
-                       switch (*fmt) {
-                               case '-': flags |= LEFT; goto repeat;
-                               case '+': flags |= PLUS; goto repeat;
-                               case ' ': flags |= SPACE; goto repeat;
-                               case '#': flags |= SPECIAL; goto repeat;
-                               case '0': flags |= ZEROPAD; goto repeat;
-                               }
-               
-               /* get field width */
-               field_width = -1;
-               if ('0' <= *fmt && *fmt <= '9')
-                       field_width = skip_atoi(&fmt);
-               else if (*fmt == '*') {
-                       ++fmt;
-                       /* it's the next argument */
-                       field_width = va_arg(args, int);
-                       if (field_width < 0) {
-                               field_width = -field_width;
-                               flags |= LEFT;
-                       }
-               }
-
-               /* get the precision */
-               precision = -1;
-               if (*fmt == '.') {
-                       ++fmt;  
-                       if ('0' <= *fmt && *fmt <= '9')
-                               precision = skip_atoi(&fmt);
-                       else if (*fmt == '*') {
-                               ++fmt;
-                               /* it's the next argument */
-                               precision = va_arg(args, int);
-                       }
-                       if (precision < 0)
-                               precision = 0;
-               }
-
-               /* get the conversion qualifier */
-               qualifier = -1;
-               if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
-                       qualifier = *fmt;
-                       ++fmt;
-               }
-
-               /* default base */
-               base = 10;
-
-               switch (*fmt) {
-               case 'c':
-                       if (!(flags & LEFT))
-                               while (--field_width > 0)
-                                       *str++ = ' ';
-                       *str++ = (unsigned char) va_arg(args, int);
-                       while (--field_width > 0)
-                               *str++ = ' ';
-                       continue;
-
-               case 's':
-                       s = va_arg(args, char *);
-                       if (!s)
-                               s = "<NULL>";
-
-                       len = strnlen(s, precision);
-
-                       if (!(flags & LEFT))
-                               while (len < field_width--)
-                                       *str++ = ' ';
-                       for (i = 0; i < len; ++i)
-                               *str++ = *s++;
-                       while (len < field_width--)
-                               *str++ = ' ';
-                       continue;
-
-               case 'p':
-                       if (field_width == -1) {
-                               field_width = 2*sizeof(void *);
-                               flags |= ZEROPAD;
-                       }
-                       str = number(str,
-                               (unsigned long) va_arg(args, void *), 16,
-                               field_width, precision, flags);
-                       continue;
-
-
-               case 'n':
-                       if (qualifier == 'l') {
-                               long * ip = va_arg(args, long *);
-                               *ip = (str - buf);
-                       } else if (qualifier == 'Z') {
-                               size_t * ip = va_arg(args, size_t *);
-                               *ip = (str - buf);
-                       } else {
-                               int * ip = va_arg(args, int *);
-                               *ip = (str - buf);
-                       }
-                       continue;
-
-               case '%':
-                       *str++ = '%';
-                       continue;
-
-               /* integer number formats - set up the flags and "break" */
-               case 'o':
-                       base = 8;
-                       break;
-
-               case 'X':
-                       flags |= LARGE;
-               case 'x':
-                       base = 16;
-                       break;
-
-               case 'd':
-               case 'i':
-                       flags |= SIGN;
-               case 'u':
-                       break;
-
-               default:
-                       *str++ = '%';
-                       if (*fmt)
-                               *str++ = *fmt;
-                       else
-                               --fmt;
-                       continue;
-               }
-               if (qualifier == 'l') {
-                       num = va_arg(args, unsigned long);
-                       if (flags & SIGN)
-                               num = (signed long) num;
-               } else if (qualifier == 'Z') {
-                       num = va_arg(args, size_t);
-               } else if (qualifier == 'h') {
-                       num = (unsigned short) va_arg(args, int);
-                       if (flags & SIGN)
-                               num = (signed short) num;
-               } else {
-                       num = va_arg(args, unsigned int);
-                       if (flags & SIGN)
-                               num = (signed int) num;
-               }
-               str = number(str, num, base, field_width, precision, flags);
-       }
-       *str = '\0';
-       return str-buf;
-}
-
-int sprintf(char * buf, const char *fmt, ...)
-{
-       va_list args;
-       int i;
-
-       va_start(args, fmt);
-       i=vsprintf(buf,fmt,args);
-       va_end(args);
-       return i;
-}
-
-static char sprint_buf[1024];
-
-int
-printf(const char *fmt, ...)
-{
-       va_list args;
-       int n;
-
-       va_start(args, fmt);
-       n = vsprintf(sprint_buf, fmt, args);
-       va_end(args);
-       write(stdout, sprint_buf, n);
-       return n;
+       ret = call_prom_ret("call-method", 5, 2, &result, "claim", memory,
+                           align, size, virt);
+       if (ret != 0 || result == -1)
+               return (void *) -1;
+       ret = call_prom_ret("call-method", 5, 2, &result, "claim", chosen_mmu,
+                           align, size, virt);
+       /* 0x12 == coherent + read/write */
+       ret = call_prom("call-method", 6, 1, "map", chosen_mmu,
+                       0x12, size, virt, virt);
+       return (void *) virt;
 }
index 96ab5aec740c6cb20682736ec83663b134f0dc90..3e2ddd4a5a816ffeee35883847bffbd617fd2411 100644 (file)
@@ -1,18 +1,34 @@
 #ifndef _PPC_BOOT_PROM_H_
 #define _PPC_BOOT_PROM_H_
 
+typedef void *phandle;
+typedef void *ihandle;
+
 extern int (*prom) (void *);
-extern void *chosen_handle;
+extern phandle chosen_handle;
+extern ihandle stdout;
 
-extern void *stdin;
-extern void *stdout;
-extern void *stderr;
+int    call_prom(const char *service, int nargs, int nret, ...);
+int    call_prom_ret(const char *service, int nargs, int nret,
+                     unsigned int *rets, ...);
 
 extern int write(void *handle, void *ptr, int nb);
-extern int read(void *handle, void *ptr, int nb);
-extern void exit(void);
-extern void pause(void);
-extern void *finddevice(const char *);
-extern void *claim(unsigned long virt, unsigned long size, unsigned long align);
-extern int getprop(void *phandle, const char *name, void *buf, int buflen);
+extern void *claim(unsigned long virt, unsigned long size, unsigned long aln);
+
+static inline void exit(void)
+{
+       call_prom("exit", 0, 0);
+}
+
+static inline phandle finddevice(const char *name)
+{
+       return (phandle) call_prom("finddevice", 1, 1, name);
+}
+
+static inline int getprop(void *phandle, const char *name,
+                         void *buf, int buflen)
+{
+       return call_prom("getprop", 4, 1, phandle, name, buf, buflen);
+}
+
 #endif                         /* _PPC_BOOT_PROM_H_ */
diff --git a/arch/powerpc/boot/rs6000.h b/arch/powerpc/boot/rs6000.h
new file mode 100644 (file)
index 0000000..433f450
--- /dev/null
@@ -0,0 +1,243 @@
+/* IBM RS/6000 "XCOFF" file definitions for BFD.
+   Copyright (C) 1990, 1991 Free Software Foundation, Inc.
+   FIXME: Can someone provide a transliteration of this name into ASCII?
+   Using the following chars caused a compiler warning on HIUX (so I replaced
+   them with octal escapes), and isn't useful without an understanding of what
+   character set it is.
+   Written by Mimi Ph\373\364ng-Th\345o V\365 of IBM
+   and John Gilmore of Cygnus Support.  */
+
+/********************** FILE HEADER **********************/
+
+struct external_filehdr {
+       char f_magic[2];        /* magic number                 */
+       char f_nscns[2];        /* number of sections           */
+       char f_timdat[4];       /* time & date stamp            */
+       char f_symptr[4];       /* file pointer to symtab       */
+       char f_nsyms[4];        /* number of symtab entries     */
+       char f_opthdr[2];       /* sizeof(optional hdr)         */
+       char f_flags[2];        /* flags                        */
+};
+
+        /* IBM RS/6000 */
+#define U802WRMAGIC     0730    /* writeable text segments **chh**      */
+#define U802ROMAGIC     0735    /* readonly sharable text segments      */
+#define U802TOCMAGIC    0737    /* readonly text segments and TOC       */
+
+#define BADMAG(x)      \
+       ((x).f_magic != U802ROMAGIC && (x).f_magic != U802WRMAGIC && \
+        (x).f_magic != U802TOCMAGIC)
+
+#define        FILHDR  struct external_filehdr
+#define        FILHSZ  20
+
+
+/********************** AOUT "OPTIONAL HEADER" **********************/
+
+
+typedef struct
+{
+  unsigned char        magic[2];       /* type of file                 */
+  unsigned char        vstamp[2];      /* version stamp                */
+  unsigned char        tsize[4];       /* text size in bytes, padded to FW bdry */
+  unsigned char        dsize[4];       /* initialized data "  "        */
+  unsigned char        bsize[4];       /* uninitialized data "   "     */
+  unsigned char        entry[4];       /* entry pt.                    */
+  unsigned char        text_start[4];  /* base of text used for this file */
+  unsigned char        data_start[4];  /* base of data used for this file */
+  unsigned char        o_toc[4];       /* address of TOC */
+  unsigned char        o_snentry[2];   /* section number of entry point */
+  unsigned char        o_sntext[2];    /* section number of .text section */
+  unsigned char        o_sndata[2];    /* section number of .data section */
+  unsigned char        o_sntoc[2];     /* section number of TOC */
+  unsigned char        o_snloader[2];  /* section number of .loader section */
+  unsigned char        o_snbss[2];     /* section number of .bss section */
+  unsigned char        o_algntext[2];  /* .text alignment */
+  unsigned char        o_algndata[2];  /* .data alignment */
+  unsigned char        o_modtype[2];   /* module type (??) */
+  unsigned char o_cputype[2];  /* cpu type */
+  unsigned char        o_maxstack[4];  /* max stack size (??) */
+  unsigned char o_maxdata[4];  /* max data size (??) */
+  unsigned char        o_resv2[12];    /* reserved */
+}
+AOUTHDR;
+
+#define AOUTSZ 72
+#define SMALL_AOUTSZ (28)
+#define AOUTHDRSZ 72
+
+#define        RS6K_AOUTHDR_OMAGIC     0x0107  /* old: text & data writeable */
+#define        RS6K_AOUTHDR_NMAGIC     0x0108  /* new: text r/o, data r/w */
+#define        RS6K_AOUTHDR_ZMAGIC     0x010B  /* paged: text r/o, both page-aligned */
+
+
+/********************** SECTION HEADER **********************/
+
+
+struct external_scnhdr {
+       char            s_name[8];      /* section name                 */
+       char            s_paddr[4];     /* physical address, aliased s_nlib */
+       char            s_vaddr[4];     /* virtual address              */
+       char            s_size[4];      /* section size                 */
+       char            s_scnptr[4];    /* file ptr to raw data for section */
+       char            s_relptr[4];    /* file ptr to relocation       */
+       char            s_lnnoptr[4];   /* file ptr to line numbers     */
+       char            s_nreloc[2];    /* number of relocation entries */
+       char            s_nlnno[2];     /* number of line number entries*/
+       char            s_flags[4];     /* flags                        */
+};
+
+/*
+ * names of "special" sections
+ */
+#define _TEXT  ".text"
+#define _DATA  ".data"
+#define _BSS   ".bss"
+#define _PAD   ".pad"
+#define _LOADER        ".loader"
+
+#define        SCNHDR  struct external_scnhdr
+#define        SCNHSZ  40
+
+/* XCOFF uses a special .loader section with type STYP_LOADER.  */
+#define STYP_LOADER 0x1000
+
+/* XCOFF uses a special .debug section with type STYP_DEBUG.  */
+#define STYP_DEBUG 0x2000
+
+/* XCOFF handles line number or relocation overflow by creating
+   another section header with STYP_OVRFLO set.  */
+#define STYP_OVRFLO 0x8000
+
+/********************** LINE NUMBERS **********************/
+
+/* 1 line number entry for every "breakpointable" source line in a section.
+ * Line numbers are grouped on a per function basis; first entry in a function
+ * grouping will have l_lnno = 0 and in place of physical address will be the
+ * symbol table index of the function name.
+ */
+struct external_lineno {
+       union {
+               char l_symndx[4];       /* function name symbol index, iff l_lnno == 0*/
+               char l_paddr[4];        /* (physical) address of line number    */
+       } l_addr;
+       char l_lnno[2]; /* line number          */
+};
+
+
+#define        LINENO  struct external_lineno
+#define        LINESZ  6
+
+
+/********************** SYMBOLS **********************/
+
+#define E_SYMNMLEN     8       /* # characters in a symbol name        */
+#define E_FILNMLEN     14      /* # characters in a file name          */
+#define E_DIMNUM       4       /* # array dimensions in auxiliary entry */
+
+struct external_syment
+{
+  union {
+    char e_name[E_SYMNMLEN];
+    struct {
+      char e_zeroes[4];
+      char e_offset[4];
+    } e;
+  } e;
+  char e_value[4];
+  char e_scnum[2];
+  char e_type[2];
+  char e_sclass[1];
+  char e_numaux[1];
+};
+
+
+
+#define N_BTMASK       (017)
+#define N_TMASK                (060)
+#define N_BTSHFT       (4)
+#define N_TSHIFT       (2)
+
+
+union external_auxent {
+       struct {
+               char x_tagndx[4];       /* str, un, or enum tag indx */
+               union {
+                       struct {
+                           char  x_lnno[2]; /* declaration line number */
+                           char  x_size[2]; /* str/union/array size */
+                       } x_lnsz;
+                       char x_fsize[4];        /* size of function */
+               } x_misc;
+               union {
+                       struct {                /* if ISFCN, tag, or .bb */
+                           char x_lnnoptr[4];  /* ptr to fcn line # */
+                           char x_endndx[4];   /* entry ndx past block end */
+                       } x_fcn;
+                       struct {                /* if ISARY, up to 4 dimen. */
+                           char x_dimen[E_DIMNUM][2];
+                       } x_ary;
+               } x_fcnary;
+               char x_tvndx[2];                /* tv index */
+       } x_sym;
+
+       union {
+               char x_fname[E_FILNMLEN];
+               struct {
+                       char x_zeroes[4];
+                       char x_offset[4];
+               } x_n;
+       } x_file;
+
+       struct {
+               char x_scnlen[4];                       /* section length */
+               char x_nreloc[2];       /* # relocation entries */
+               char x_nlinno[2];       /* # line numbers */
+       } x_scn;
+
+        struct {
+               char x_tvfill[4];       /* tv fill value */
+               char x_tvlen[2];        /* length of .tv */
+               char x_tvran[2][2];     /* tv range */
+       } x_tv;         /* info about .tv section (in auxent of symbol .tv)) */
+
+       struct {
+               unsigned char x_scnlen[4];
+               unsigned char x_parmhash[4];
+               unsigned char x_snhash[2];
+               unsigned char x_smtyp[1];
+               unsigned char x_smclas[1];
+               unsigned char x_stab[4];
+               unsigned char x_snstab[2];
+       } x_csect;
+
+};
+
+#define        SYMENT  struct external_syment
+#define        SYMESZ  18
+#define        AUXENT  union external_auxent
+#define        AUXESZ  18
+#define DBXMASK 0x80           /* for dbx storage mask */
+#define SYMNAME_IN_DEBUG(symptr) ((symptr)->n_sclass & DBXMASK)
+
+
+
+/********************** RELOCATION DIRECTIVES **********************/
+
+
+struct external_reloc {
+  char r_vaddr[4];
+  char r_symndx[4];
+  char r_size[1];
+  char r_type[1];
+};
+
+
+#define RELOC struct external_reloc
+#define RELSZ 10
+
+#define DEFAULT_DATA_SECTION_ALIGNMENT 4
+#define DEFAULT_BSS_SECTION_ALIGNMENT 4
+#define DEFAULT_TEXT_SECTION_ALIGNMENT 4
+/* For new sections we havn't heard of before */
+#define DEFAULT_SECTION_ALIGNMENT 4
diff --git a/arch/powerpc/boot/stdio.c b/arch/powerpc/boot/stdio.c
new file mode 100644 (file)
index 0000000..b5aa522
--- /dev/null
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+#include "string.h"
+#include "stdio.h"
+#include "prom.h"
+
+size_t strnlen(const char * s, size_t count)
+{
+       const char *sc;
+
+       for (sc = s; count-- && *sc != '\0'; ++sc)
+               /* nothing */;
+       return sc - s;
+}
+
+extern unsigned int __div64_32(unsigned long long *dividend,
+                              unsigned int divisor);
+
+/* The unnecessary pointer compare is there
+ * to check for type safety (n must be 64bit)
+ */
+# define do_div(n,base) ({                                             \
+       unsigned int __base = (base);                                   \
+       unsigned int __rem;                                             \
+       (void)(((typeof((n)) *)0) == ((unsigned long long *)0));        \
+       if (((n) >> 32) == 0) {                                         \
+               __rem = (unsigned int)(n) % __base;                     \
+               (n) = (unsigned int)(n) / __base;                       \
+       } else                                                          \
+               __rem = __div64_32(&(n), __base);                       \
+       __rem;                                                          \
+ })
+
+static int skip_atoi(const char **s)
+{
+       int i, c;
+
+       for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s)
+               i = i*10 + c - '0';
+       return i;
+}
+
+#define ZEROPAD        1               /* pad with zero */
+#define SIGN   2               /* unsigned/signed long */
+#define PLUS   4               /* show plus */
+#define SPACE  8               /* space if plus */
+#define LEFT   16              /* left justified */
+#define SPECIAL        32              /* 0x */
+#define LARGE  64              /* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * str, unsigned long long num, int base, int size, int precision, int type)
+{
+       char c,sign,tmp[66];
+       const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
+       int i;
+
+       if (type & LARGE)
+               digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+       if (type & LEFT)
+               type &= ~ZEROPAD;
+       if (base < 2 || base > 36)
+               return 0;
+       c = (type & ZEROPAD) ? '0' : ' ';
+       sign = 0;
+       if (type & SIGN) {
+               if ((signed long long)num < 0) {
+                       sign = '-';
+                       num = - (signed long long)num;
+                       size--;
+               } else if (type & PLUS) {
+                       sign = '+';
+                       size--;
+               } else if (type & SPACE) {
+                       sign = ' ';
+                       size--;
+               }
+       }
+       if (type & SPECIAL) {
+               if (base == 16)
+                       size -= 2;
+               else if (base == 8)
+                       size--;
+       }
+       i = 0;
+       if (num == 0)
+               tmp[i++]='0';
+       else while (num != 0) {
+               tmp[i++] = digits[do_div(num, base)];
+       }
+       if (i > precision)
+               precision = i;
+       size -= precision;
+       if (!(type&(ZEROPAD+LEFT)))
+               while(size-->0)
+                       *str++ = ' ';
+       if (sign)
+               *str++ = sign;
+       if (type & SPECIAL) {
+               if (base==8)
+                       *str++ = '0';
+               else if (base==16) {
+                       *str++ = '0';
+                       *str++ = digits[33];
+               }
+       }
+       if (!(type & LEFT))
+               while (size-- > 0)
+                       *str++ = c;
+       while (i < precision--)
+               *str++ = '0';
+       while (i-- > 0)
+               *str++ = tmp[i];
+       while (size-- > 0)
+               *str++ = ' ';
+       return str;
+}
+
+int vsprintf(char *buf, const char *fmt, va_list args)
+{
+       int len;
+       unsigned long long num;
+       int i, base;
+       char * str;
+       const char *s;
+
+       int flags;              /* flags to number() */
+
+       int field_width;        /* width of output field */
+       int precision;          /* min. # of digits for integers; max
+                                  number of chars for from string */
+       int qualifier;          /* 'h', 'l', or 'L' for integer fields */
+                               /* 'z' support added 23/7/1999 S.H.    */
+                               /* 'z' changed to 'Z' --davidm 1/25/99 */
+
+       
+       for (str=buf ; *fmt ; ++fmt) {
+               if (*fmt != '%') {
+                       *str++ = *fmt;
+                       continue;
+               }
+                       
+               /* process flags */
+               flags = 0;
+               repeat:
+                       ++fmt;          /* this also skips first '%' */
+                       switch (*fmt) {
+                               case '-': flags |= LEFT; goto repeat;
+                               case '+': flags |= PLUS; goto repeat;
+                               case ' ': flags |= SPACE; goto repeat;
+                               case '#': flags |= SPECIAL; goto repeat;
+                               case '0': flags |= ZEROPAD; goto repeat;
+                               }
+               
+               /* get field width */
+               field_width = -1;
+               if ('0' <= *fmt && *fmt <= '9')
+                       field_width = skip_atoi(&fmt);
+               else if (*fmt == '*') {
+                       ++fmt;
+                       /* it's the next argument */
+                       field_width = va_arg(args, int);
+                       if (field_width < 0) {
+                               field_width = -field_width;
+                               flags |= LEFT;
+                       }
+               }
+
+               /* get the precision */
+               precision = -1;
+               if (*fmt == '.') {
+                       ++fmt;  
+                       if ('0' <= *fmt && *fmt <= '9')
+                               precision = skip_atoi(&fmt);
+                       else if (*fmt == '*') {
+                               ++fmt;
+                               /* it's the next argument */
+                               precision = va_arg(args, int);
+                       }
+                       if (precision < 0)
+                               precision = 0;
+               }
+
+               /* get the conversion qualifier */
+               qualifier = -1;
+               if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
+                       qualifier = *fmt;
+                       ++fmt;
+               }
+
+               /* default base */
+               base = 10;
+
+               switch (*fmt) {
+               case 'c':
+                       if (!(flags & LEFT))
+                               while (--field_width > 0)
+                                       *str++ = ' ';
+                       *str++ = (unsigned char) va_arg(args, int);
+                       while (--field_width > 0)
+                               *str++ = ' ';
+                       continue;
+
+               case 's':
+                       s = va_arg(args, char *);
+                       if (!s)
+                               s = "<NULL>";
+
+                       len = strnlen(s, precision);
+
+                       if (!(flags & LEFT))
+                               while (len < field_width--)
+                                       *str++ = ' ';
+                       for (i = 0; i < len; ++i)
+                               *str++ = *s++;
+                       while (len < field_width--)
+                               *str++ = ' ';
+                       continue;
+
+               case 'p':
+                       if (field_width == -1) {
+                               field_width = 2*sizeof(void *);
+                               flags |= ZEROPAD;
+                       }
+                       str = number(str,
+                               (unsigned long) va_arg(args, void *), 16,
+                               field_width, precision, flags);
+                       continue;
+
+
+               case 'n':
+                       if (qualifier == 'l') {
+                               long * ip = va_arg(args, long *);
+                               *ip = (str - buf);
+                       } else if (qualifier == 'Z') {
+                               size_t * ip = va_arg(args, size_t *);
+                               *ip = (str - buf);
+                       } else {
+                               int * ip = va_arg(args, int *);
+                               *ip = (str - buf);
+                       }
+                       continue;
+
+               case '%':
+                       *str++ = '%';
+                       continue;
+
+               /* integer number formats - set up the flags and "break" */
+               case 'o':
+                       base = 8;
+                       break;
+
+               case 'X':
+                       flags |= LARGE;
+               case 'x':
+                       base = 16;
+                       break;
+
+               case 'd':
+               case 'i':
+                       flags |= SIGN;
+               case 'u':
+                       break;
+
+               default:
+                       *str++ = '%';
+                       if (*fmt)
+                               *str++ = *fmt;
+                       else
+                               --fmt;
+                       continue;
+               }
+               if (qualifier == 'l') {
+                       num = va_arg(args, unsigned long);
+                       if (flags & SIGN)
+                               num = (signed long) num;
+               } else if (qualifier == 'Z') {
+                       num = va_arg(args, size_t);
+               } else if (qualifier == 'h') {
+                       num = (unsigned short) va_arg(args, int);
+                       if (flags & SIGN)
+                               num = (signed short) num;
+               } else {
+                       num = va_arg(args, unsigned int);
+                       if (flags & SIGN)
+                               num = (signed int) num;
+               }
+               str = number(str, num, base, field_width, precision, flags);
+       }
+       *str = '\0';
+       return str-buf;
+}
+
+int sprintf(char * buf, const char *fmt, ...)
+{
+       va_list args;
+       int i;
+
+       va_start(args, fmt);
+       i=vsprintf(buf,fmt,args);
+       va_end(args);
+       return i;
+}
+
+static char sprint_buf[1024];
+
+int
+printf(const char *fmt, ...)
+{
+       va_list args;
+       int n;
+
+       va_start(args, fmt);
+       n = vsprintf(sprint_buf, fmt, args);
+       va_end(args);
+       write(stdout, sprint_buf, n);
+       return n;
+}
index 24bd3a8dee949e18f019270553ce86963c036740..eb9e16c87aef793c380cea859a19674c2f035032 100644 (file)
@@ -7,10 +7,4 @@ extern int sprintf(char *buf, const char *fmt, ...);
 
 extern int vsprintf(char *buf, const char *fmt, va_list args);
 
-extern int putc(int c, void *f);
-extern int putchar(int c);
-extern int getchar(void);
-
-extern int fputs(char *str, void *f);
-
 #endif                         /* _PPC_BOOT_STDIO_H_ */
index b1eeaed7db177e273c6e014c4d0c882e82cee739..ac3d43b6a324c97549d8c181e2896ee2f41d377f 100644 (file)
@@ -107,10 +107,12 @@ memcpy:
        rlwinm. r7,r5,32-3,3,31         /* r7 = r5 >> 3 */
        addi    r6,r3,-4
        addi    r4,r4,-4
-       beq     2f                      /* if less than 8 bytes to do */
+       beq     3f                      /* if less than 8 bytes to do */
        andi.   r0,r6,3                 /* get dest word aligned */
        mtctr   r7
        bne     5f
+       andi.   r0,r4,3                 /* check src word aligned too */
+       bne     3f
 1:     lwz     r7,4(r4)
        lwzu    r8,8(r4)
        stw     r7,4(r6)
@@ -132,6 +134,11 @@ memcpy:
        bdnz    4b
        blr
 5:     subfic  r0,r0,4
+       cmpw    cr1,r0,r5
+       add     r7,r0,r4
+       andi.   r7,r7,3                 /* will source be word-aligned too? */
+       ble     cr1,3b
+       bne     3b                      /* do byte-by-byte if not */
        mtctr   r0
 6:     lbz     r7,4(r4)
        addi    r4,r4,1
@@ -149,10 +156,12 @@ backwards_memcpy:
        rlwinm. r7,r5,32-3,3,31         /* r7 = r5 >> 3 */
        add     r6,r3,r5
        add     r4,r4,r5
-       beq     2f
+       beq     3f
        andi.   r0,r6,3
        mtctr   r7
        bne     5f
+       andi.   r0,r4,3
+       bne     3f
 1:     lwz     r7,-4(r4)
        lwzu    r8,-8(r4)
        stw     r7,-4(r6)
@@ -171,7 +180,12 @@ backwards_memcpy:
        stbu    r0,-1(r6)
        bdnz    4b
        blr
-5:     mtctr   r0
+5:     cmpw    cr1,r0,r5
+       subf    r7,r0,r4
+       andi.   r7,r7,3
+       ble     cr1,3b
+       bne     3b
+       mtctr   r0
 6:     lbzu    r7,-1(r4)
        stbu    r7,-1(r6)
        bdnz    6b
diff --git a/arch/powerpc/boot/zImage.coff.lds b/arch/powerpc/boot/zImage.coff.lds
new file mode 100644 (file)
index 0000000..6016251
--- /dev/null
@@ -0,0 +1,46 @@
+OUTPUT_ARCH(powerpc:common)
+ENTRY(_start)
+SECTIONS
+{
+  . = (5*1024*1024);
+  _start = .;
+  .text      :
+  {
+    *(.text)
+    *(.fixup)
+  }
+  _etext = .;
+  . = ALIGN(4096);
+  .data    :
+  {
+    *(.rodata*)
+    *(.data*)
+    *(.sdata*)
+    __got2_start = .;
+    *(.got2)
+    __got2_end = .;
+
+    _vmlinux_start =  .;
+    *(.kernel:vmlinux.strip)
+    _vmlinux_end =  .;
+
+    _initrd_start =  .;
+    *(.kernel:initrd)
+    _initrd_end =  .;
+  }
+
+  . = ALIGN(4096);
+  _edata  =  .;
+  __bss_start = .;
+  .bss       :
+  {
+   *(.sbss)
+   *(.bss)
+  }
+  _end = . ;
+
+  /DISCARD/ :
+  {
+    *(.comment)
+  }
+}
diff --git a/arch/powerpc/configs/mpc834x_sys_defconfig b/arch/powerpc/configs/mpc834x_sys_defconfig
new file mode 100644 (file)
index 0000000..3bff761
--- /dev/null
@@ -0,0 +1,911 @@
+#
+# Automatically generated make config: don't edit
+# Linux kernel version: 2.6.15-g461d4edf-dirty
+# Fri Jan 13 11:01:47 2006
+#
+# CONFIG_PPC64 is not set
+CONFIG_PPC32=y
+CONFIG_PPC_MERGE=y
+CONFIG_MMU=y
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_RWSEM_XCHGADD_ALGORITHM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_PPC=y
+CONFIG_EARLY_PRINTK=y
+CONFIG_GENERIC_NVRAM=y
+CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_PPC_OF=y
+CONFIG_PPC_UDBG_16550=y
+# CONFIG_GENERIC_TBSYNC is not set
+CONFIG_DEFAULT_UIMAGE=y
+
+#
+# Processor support
+#
+# CONFIG_CLASSIC32 is not set
+# CONFIG_PPC_52xx is not set
+# CONFIG_PPC_82xx is not set
+CONFIG_PPC_83xx=y
+# CONFIG_40x is not set
+# CONFIG_44x is not set
+# CONFIG_8xx is not set
+# CONFIG_E200 is not set
+# CONFIG_E500 is not set
+CONFIG_6xx=y
+CONFIG_83xx=y
+CONFIG_PPC_FPU=y
+CONFIG_PPC_STD_MMU=y
+CONFIG_PPC_STD_MMU_32=y
+# CONFIG_SMP is not set
+
+#
+# Code maturity level options
+#
+CONFIG_EXPERIMENTAL=y
+CONFIG_CLEAN_COMPILE=y
+CONFIG_BROKEN_ON_SMP=y
+CONFIG_INIT_ENV_ARG_LIMIT=32
+
+#
+# General setup
+#
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_BSD_PROCESS_ACCT is not set
+CONFIG_SYSCTL=y
+# CONFIG_AUDIT is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+CONFIG_HOTPLUG=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+# CONFIG_EPOLL is not set
+CONFIG_SHMEM=y
+CONFIG_CC_ALIGN_FUNCTIONS=0
+CONFIG_CC_ALIGN_LABELS=0
+CONFIG_CC_ALIGN_LOOPS=0
+CONFIG_CC_ALIGN_JUMPS=0
+CONFIG_SLAB=y
+# CONFIG_TINY_SHMEM is not set
+CONFIG_BASE_SMALL=0
+# CONFIG_SLOB is not set
+
+#
+# Loadable module support
+#
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+CONFIG_OBSOLETE_MODPARM=y
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_KMOD is not set
+
+#
+# Block layer
+#
+# CONFIG_LBD is not set
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+CONFIG_PPC_GEN550=y
+# CONFIG_WANT_EARLY_SERIAL is not set
+
+#
+# Platform support
+#
+CONFIG_MPC834x_SYS=y
+CONFIG_MPC834x=y
+
+#
+# Kernel options
+#
+# CONFIG_HIGHMEM is not set
+# CONFIG_HZ_100 is not set
+CONFIG_HZ_250=y
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=250
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+CONFIG_BINFMT_ELF=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_SELECT_MEMORY_MODEL=y
+CONFIG_FLATMEM_MANUAL=y
+# CONFIG_DISCONTIGMEM_MANUAL is not set
+# CONFIG_SPARSEMEM_MANUAL is not set
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_PROC_DEVICETREE=y
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_PM is not set
+# CONFIG_SOFTWARE_SUSPEND is not set
+CONFIG_SECCOMP=y
+CONFIG_ISA_DMA_API=y
+
+#
+# Bus options
+#
+CONFIG_GENERIC_ISA_DMA=y
+# CONFIG_PPC_I8259 is not set
+CONFIG_PPC_INDIRECT_PCI=y
+CONFIG_FSL_SOC=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_LEGACY_PROC is not set
+
+#
+# PCCARD (PCMCIA/CardBus) support
+#
+# CONFIG_PCCARD is not set
+
+#
+# PCI Hotplug Support
+#
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# Advanced setup
+#
+# CONFIG_ADVANCED_OPTIONS is not set
+
+#
+# Default settings for advanced configuration options are used
+#
+CONFIG_HIGHMEM_START=0xfe000000
+CONFIG_LOWMEM_SIZE=0x30000000
+CONFIG_KERNEL_START=0xc0000000
+CONFIG_TASK_SIZE=0x80000000
+CONFIG_BOOT_LOAD=0x00800000
+
+#
+# Networking
+#
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_MMAP is not set
+CONFIG_UNIX=y
+# CONFIG_NET_KEY is not set
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+# CONFIG_IP_ADVANCED_ROUTER is not set
+CONFIG_IP_FIB_HASH=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+# CONFIG_NET_IPGRE is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_INET_AH is not set
+# CONFIG_INET_ESP is not set
+# CONFIG_INET_IPCOMP is not set
+# CONFIG_INET_TUNNEL is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_BIC=y
+# CONFIG_IPV6 is not set
+# CONFIG_NETFILTER is not set
+
+#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
+# SCTP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_SCTP is not set
+# CONFIG_ATM is not set
+# CONFIG_BRIDGE is not set
+# CONFIG_VLAN_8021Q is not set
+# CONFIG_DECNET is not set
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_NET_DIVERT is not set
+# CONFIG_ECONET is not set
+# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
+# CONFIG_NET_SCHED is not set
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+# CONFIG_FW_LOADER is not set
+
+#
+# Connector - unified userspace <-> kernelspace linker
+#
+# CONFIG_CONNECTOR is not set
+
+#
+# Memory Technology Devices (MTD)
+#
+# CONFIG_MTD is not set
+
+#
+# Parallel port support
+#
+# CONFIG_PARPORT is not set
+
+#
+# Plug and Play support
+#
+
+#
+# Block devices
+#
+# CONFIG_BLK_DEV_FD is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+# CONFIG_BLK_DEV_UMEM is not set
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+# CONFIG_BLK_DEV_CRYPTOLOOP is not set
+# CONFIG_BLK_DEV_NBD is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=32768
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+
+#
+# ATA/ATAPI/MFM/RLL support
+#
+# CONFIG_IDE is not set
+
+#
+# SCSI device support
+#
+# CONFIG_RAID_ATTRS is not set
+# CONFIG_SCSI is not set
+
+#
+# Multi-device support (RAID and LVM)
+#
+# CONFIG_MD is not set
+
+#
+# Fusion MPT device support
+#
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_IEEE1394 is not set
+
+#
+# I2O device support
+#
+# CONFIG_I2O is not set
+
+#
+# Macintosh device drivers
+#
+# CONFIG_WINDFARM is not set
+
+#
+# Network device support
+#
+CONFIG_NETDEVICES=y
+# CONFIG_DUMMY is not set
+# CONFIG_BONDING is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_TUN is not set
+
+#
+# ARCnet devices
+#
+# CONFIG_ARCNET is not set
+
+#
+# PHY device support
+#
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+CONFIG_MARVELL_PHY=y
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_CICADA_PHY is not set
+
+#
+# Ethernet (10 or 100Mbit)
+#
+CONFIG_NET_ETHERNET=y
+CONFIG_MII=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NET_VENDOR_3COM is not set
+
+#
+# Tulip family network device support
+#
+# CONFIG_NET_TULIP is not set
+# CONFIG_HP100 is not set
+CONFIG_NET_PCI=y
+# CONFIG_PCNET32 is not set
+# CONFIG_AMD8111_ETH is not set
+# CONFIG_ADAPTEC_STARFIRE is not set
+# CONFIG_B44 is not set
+# CONFIG_FORCEDETH is not set
+# CONFIG_DGRS is not set
+# CONFIG_EEPRO100 is not set
+CONFIG_E100=y
+# CONFIG_FEALNX is not set
+# CONFIG_NATSEMI is not set
+# CONFIG_NE2K_PCI is not set
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_SIS900 is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SUNDANCE is not set
+# CONFIG_TLAN is not set
+# CONFIG_VIA_RHINE is not set
+
+#
+# Ethernet (1000 Mbit)
+#
+# CONFIG_ACENIC is not set
+# CONFIG_DL2K is not set
+# CONFIG_E1000 is not set
+# CONFIG_NS83820 is not set
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+# CONFIG_R8169 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+# CONFIG_SK98LIN is not set
+# CONFIG_VIA_VELOCITY is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2 is not set
+CONFIG_GIANFAR=y
+# CONFIG_GFAR_NAPI is not set
+
+#
+# Ethernet (10000 Mbit)
+#
+# CONFIG_CHELSIO_T1 is not set
+# CONFIG_IXGB is not set
+# CONFIG_S2IO is not set
+
+#
+# Token Ring devices
+#
+# CONFIG_TR is not set
+
+#
+# Wireless LAN (non-hamradio)
+#
+# CONFIG_NET_RADIO is not set
+
+#
+# Wan interfaces
+#
+# CONFIG_WAN is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+# CONFIG_PPP is not set
+# CONFIG_SLIP is not set
+# CONFIG_SHAPER is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+
+#
+# ISDN subsystem
+#
+# CONFIG_ISDN is not set
+
+#
+# Telephony Support
+#
+# CONFIG_PHONE is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+# CONFIG_INPUT_TSDEV is not set
+# CONFIG_INPUT_EVDEV is not set
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+# CONFIG_INPUT_MISC is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+# CONFIG_VT is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+
+#
+# Non-8250 serial port support
+#
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=256
+
+#
+# IPMI
+#
+# CONFIG_IPMI_HANDLER is not set
+
+#
+# Watchdog Cards
+#
+CONFIG_WATCHDOG=y
+# CONFIG_WATCHDOG_NOWAYOUT is not set
+
+#
+# Watchdog Device Drivers
+#
+# CONFIG_SOFT_WATCHDOG is not set
+CONFIG_83xx_WDT=y
+
+#
+# PCI-based Watchdog Cards
+#
+# CONFIG_PCIPCWATCHDOG is not set
+# CONFIG_WDTPCI is not set
+# CONFIG_NVRAM is not set
+CONFIG_GEN_RTC=y
+# CONFIG_GEN_RTC_X is not set
+# CONFIG_DTLK is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+
+#
+# Ftape, the floppy tape device driver
+#
+# CONFIG_AGP is not set
+# CONFIG_DRM is not set
+# CONFIG_RAW_DRIVER is not set
+
+#
+# TPM devices
+#
+# CONFIG_TCG_TPM is not set
+# CONFIG_TELCLOCK is not set
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_CHARDEV=y
+
+#
+# I2C Algorithms
+#
+# CONFIG_I2C_ALGOBIT is not set
+# CONFIG_I2C_ALGOPCF is not set
+# CONFIG_I2C_ALGOPCA is not set
+
+#
+# I2C Hardware Bus support
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_I810 is not set
+# CONFIG_I2C_PIIX4 is not set
+CONFIG_I2C_MPC=y
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_PARPORT_LIGHT is not set
+# CONFIG_I2C_PROSAVAGE is not set
+# CONFIG_I2C_SAVAGE4 is not set
+# CONFIG_SCx200_ACB is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+# CONFIG_I2C_VOODOO3 is not set
+# CONFIG_I2C_PCA_ISA is not set
+
+#
+# Miscellaneous I2C Chip support
+#
+# CONFIG_SENSORS_DS1337 is not set
+# CONFIG_SENSORS_DS1374 is not set
+# CONFIG_SENSORS_EEPROM is not set
+# CONFIG_SENSORS_PCF8574 is not set
+# CONFIG_SENSORS_PCA9539 is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_SENSORS_RTC8564 is not set
+# CONFIG_SENSORS_M41T00 is not set
+# CONFIG_SENSORS_MAX6875 is not set
+# CONFIG_RTC_X1205_I2C is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# CONFIG_I2C_DEBUG_CHIP is not set
+
+#
+# Dallas's 1-wire bus
+#
+# CONFIG_W1 is not set
+
+#
+# Hardware Monitoring support
+#
+CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
+# CONFIG_SENSORS_ADM1021 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ASB100 is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_FSCHER is not set
+# CONFIG_SENSORS_FSCPOS is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Misc devices
+#
+
+#
+# Multimedia Capabilities Port drivers
+#
+
+#
+# Multimedia devices
+#
+# CONFIG_VIDEO_DEV is not set
+
+#
+# Digital Video Broadcasting Devices
+#
+# CONFIG_DVB is not set
+
+#
+# Graphics support
+#
+# CONFIG_FB is not set
+
+#
+# Sound
+#
+# CONFIG_SOUND is not set
+
+#
+# USB support
+#
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+# CONFIG_USB is not set
+
+#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
+# USB Gadget Support
+#
+# CONFIG_USB_GADGET is not set
+
+#
+# MMC/SD Card support
+#
+# CONFIG_MMC is not set
+
+#
+# InfiniBand support
+#
+# CONFIG_INFINIBAND is not set
+
+#
+# SN Devices
+#
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_JBD=y
+# CONFIG_JBD_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_REISERFS_FS is not set
+# CONFIG_JFS_FS is not set
+# CONFIG_FS_POSIX_ACL is not set
+# CONFIG_XFS_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_INOTIFY=y
+# CONFIG_QUOTA is not set
+CONFIG_DNOTIFY=y
+# CONFIG_AUTOFS_FS is not set
+# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+
+#
+# DOS/FAT/NT Filesystems
+#
+# CONFIG_MSDOS_FS is not set
+# CONFIG_VFAT_FS is not set
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
+# CONFIG_CONFIGFS_FS is not set
+
+#
+# Miscellaneous filesystems
+#
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_HFS_FS is not set
+# CONFIG_HFSPLUS_FS is not set
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_CRAMFS is not set
+# CONFIG_VXFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+
+#
+# Network File Systems
+#
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+CONFIG_NFS_V4=y
+# CONFIG_NFS_DIRECTIO is not set
+# CONFIG_NFSD is not set
+CONFIG_ROOT_NFS=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_RPCSEC_GSS_SPKM3 is not set
+# CONFIG_SMB_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+
+#
+# Native Language Support
+#
+# CONFIG_NLS is not set
+
+#
+# Library routines
+#
+# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
+CONFIG_CRC32=y
+# CONFIG_LIBCRC32C is not set
+
+#
+# Instrumentation Support
+#
+# CONFIG_PROFILING is not set
+
+#
+# Kernel hacking
+#
+# CONFIG_PRINTK_TIME is not set
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_DEBUG_KERNEL is not set
+CONFIG_LOG_BUF_SHIFT=14
+# CONFIG_BOOTX_TEXT is not set
+# CONFIG_SERIAL_TEXT_DEBUG is not set
+# CONFIG_PPC_EARLY_DEBUG_LPAR is not set
+# CONFIG_PPC_EARLY_DEBUG_G5 is not set
+# CONFIG_PPC_EARLY_DEBUG_RTAS is not set
+# CONFIG_PPC_EARLY_DEBUG_MAPLE is not set
+# CONFIG_PPC_EARLY_DEBUG_ISERIES is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY is not set
+
+#
+# Cryptographic options
+#
+CONFIG_CRYPTO=y
+# CONFIG_CRYPTO_HMAC is not set
+# CONFIG_CRYPTO_NULL is not set
+# CONFIG_CRYPTO_MD4 is not set
+CONFIG_CRYPTO_MD5=y
+# CONFIG_CRYPTO_SHA1 is not set
+# CONFIG_CRYPTO_SHA256 is not set
+# CONFIG_CRYPTO_SHA512 is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_TGR192 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_AES is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+# CONFIG_CRYPTO_TEA is not set
+# CONFIG_CRYPTO_ARC4 is not set
+# CONFIG_CRYPTO_KHAZAD is not set
+# CONFIG_CRYPTO_ANUBIS is not set
+# CONFIG_CRYPTO_DEFLATE is not set
+# CONFIG_CRYPTO_MICHAEL_MIC is not set
+# CONFIG_CRYPTO_CRC32C is not set
+# CONFIG_CRYPTO_TEST is not set
+
+#
+# Hardware crypto devices
+#
+
+#
+# SEC2.x Options
+#
+CONFIG_MPC8349E_SEC2x=y
+
+#
+# SEC2.x Test Options
+#
+CONFIG_MPC8349E_SEC2xTEST=y
index bbfa1bdceb4d1b586132beb2cb0d8c4fc26fe202..a94699d8dc52b4d534264d9e95439080bc13451d 100644 (file)
@@ -11,7 +11,8 @@ CFLAGS_btext.o                += -fPIC
 endif
 
 obj-y                          := semaphore.o cputable.o ptrace.o syscalls.o \
-                                  irq.o align.o signal_32.o pmc.o vdso.o
+                                  irq.o align.o signal_32.o pmc.o vdso.o \
+                                  init_task.o process.o
 obj-y                          += vdso32/
 obj-$(CONFIG_PPC64)            += setup_64.o binfmt_elf32.o sys_ppc32.o \
                                   signal_64.o ptrace32.o systbl.o \
@@ -44,8 +45,7 @@ extra-$(CONFIG_FSL_BOOKE)     := head_fsl_booke.o
 extra-$(CONFIG_8xx)            := head_8xx.o
 extra-y                                += vmlinux.lds
 
-obj-y                          += process.o init_task.o time.o \
-                                  prom.o traps.o setup-common.o udbg.o
+obj-y                          += time.o prom.o traps.o setup-common.o udbg.o
 obj-$(CONFIG_PPC32)            += entry_32.o setup_32.o misc_32.o systbl.o
 obj-$(CONFIG_PPC64)            += misc_64.o dma_64.o iommu.o
 obj-$(CONFIG_PPC_MULTIPLATFORM)        += prom_init.o
index 56399c5c931a94ef809a61b432ca82311327dceb..840aad43a98bd218f5c985214e483b5578d6b763 100644 (file)
@@ -135,7 +135,7 @@ int main(void)
        DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
        DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
        DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
-       DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
+       DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
        DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
 
        DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
index cca942fe61154ecb9883cdc3c6e6007eade0f228..b61d86e7ceb6059c7e838927dc66c25244441f1e 100644 (file)
@@ -130,7 +130,7 @@ _GLOBAL(__save_cpu_setup)
        mfcr    r7
 
        /* Get storage ptr */
-       LOADADDR(r5,cpu_state_storage)
+       LOAD_REG_IMMEDIATE(r5,cpu_state_storage)
 
        /* We only deal with 970 for now */
        mfspr   r0,SPRN_PVR
@@ -164,7 +164,7 @@ _GLOBAL(__restore_cpu_setup)
        /* Get storage ptr (FIXME when using anton reloc as we
         * are running with translation disabled here
         */
-       LOADADDR(r5,cpu_state_storage)
+       LOAD_REG_IMMEDIATE(r5,cpu_state_storage)
 
        /* We only deal with 970 for now */
        mfspr   r0,SPRN_PVR
index 43c74a6b07b1502e5039f8d10ea2e9a3d1f2ade5..10696456a4c616d0d3900ec0b6370b9a2cb41402 100644 (file)
@@ -55,7 +55,8 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
 #define COMMON_USER_POWER4     (COMMON_USER_PPC64 | PPC_FEATURE_POWER4)
 #define COMMON_USER_POWER5     (COMMON_USER_PPC64 | PPC_FEATURE_POWER5)
 #define COMMON_USER_POWER5_PLUS        (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS)
-
+#define COMMON_USER_BOOKE      (PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU | \
+                                PPC_FEATURE_BOOKE)
 
 /* We only set the spe features if the kernel was compiled with
  * spe support
@@ -79,7 +80,8 @@ struct cpu_spec       cpu_specs[] = {
                .num_pmcs               = 8,
                .cpu_setup              = __setup_cpu_power3,
                .oprofile_cpu_type      = "ppc64/power3",
-               .oprofile_type          = RS64,
+               .oprofile_type          = PPC_OPROFILE_RS64,
+               .platform               = "power3",
        },
        {       /* Power3+ */
                .pvr_mask               = 0xffff0000,
@@ -92,7 +94,8 @@ struct cpu_spec       cpu_specs[] = {
                .num_pmcs               = 8,
                .cpu_setup              = __setup_cpu_power3,
                .oprofile_cpu_type      = "ppc64/power3",
-               .oprofile_type          = RS64,
+               .oprofile_type          = PPC_OPROFILE_RS64,
+               .platform               = "power3",
        },
        {       /* Northstar */
                .pvr_mask               = 0xffff0000,
@@ -105,7 +108,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 8,
                .cpu_setup              = __setup_cpu_power3,
                .oprofile_cpu_type      = "ppc64/rs64",
-               .oprofile_type          = RS64,
+               .oprofile_type          = PPC_OPROFILE_RS64,
+               .platform               = "rs64",
        },
        {       /* Pulsar */
                .pvr_mask               = 0xffff0000,
@@ -118,7 +122,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 8,
                .cpu_setup              = __setup_cpu_power3,
                .oprofile_cpu_type      = "ppc64/rs64",
-               .oprofile_type          = RS64,
+               .oprofile_type          = PPC_OPROFILE_RS64,
+               .platform               = "rs64",
        },
        {       /* I-star */
                .pvr_mask               = 0xffff0000,
@@ -131,7 +136,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 8,
                .cpu_setup              = __setup_cpu_power3,
                .oprofile_cpu_type      = "ppc64/rs64",
-               .oprofile_type          = RS64,
+               .oprofile_type          = PPC_OPROFILE_RS64,
+               .platform               = "rs64",
        },
        {       /* S-star */
                .pvr_mask               = 0xffff0000,
@@ -144,7 +150,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 8,
                .cpu_setup              = __setup_cpu_power3,
                .oprofile_cpu_type      = "ppc64/rs64",
-               .oprofile_type          = RS64,
+               .oprofile_type          = PPC_OPROFILE_RS64,
+               .platform               = "rs64",
        },
        {       /* Power4 */
                .pvr_mask               = 0xffff0000,
@@ -157,7 +164,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 8,
                .cpu_setup              = __setup_cpu_power4,
                .oprofile_cpu_type      = "ppc64/power4",
-               .oprofile_type          = POWER4,
+               .oprofile_type          = PPC_OPROFILE_POWER4,
+               .platform               = "power4",
        },
        {       /* Power4+ */
                .pvr_mask               = 0xffff0000,
@@ -170,7 +178,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 8,
                .cpu_setup              = __setup_cpu_power4,
                .oprofile_cpu_type      = "ppc64/power4",
-               .oprofile_type          = POWER4,
+               .oprofile_type          = PPC_OPROFILE_POWER4,
+               .platform               = "power4",
        },
        {       /* PPC970 */
                .pvr_mask               = 0xffff0000,
@@ -184,7 +193,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 8,
                .cpu_setup              = __setup_cpu_ppc970,
                .oprofile_cpu_type      = "ppc64/970",
-               .oprofile_type          = POWER4,
+               .oprofile_type          = PPC_OPROFILE_POWER4,
+               .platform               = "ppc970",
        },
 #endif /* CONFIG_PPC64 */
 #if defined(CONFIG_PPC64) || defined(CONFIG_POWER4)
@@ -204,7 +214,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 8,
                .cpu_setup              = __setup_cpu_ppc970,
                .oprofile_cpu_type      = "ppc64/970",
-               .oprofile_type          = POWER4,
+               .oprofile_type          = PPC_OPROFILE_POWER4,
+               .platform               = "ppc970",
        },
 #endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */
 #ifdef CONFIG_PPC64
@@ -219,7 +230,8 @@ struct cpu_spec     cpu_specs[] = {
                .dcache_bsize           = 128,
                .cpu_setup              = __setup_cpu_ppc970,
                .oprofile_cpu_type      = "ppc64/970",
-               .oprofile_type          = POWER4,
+               .oprofile_type          = PPC_OPROFILE_POWER4,
+               .platform               = "ppc970",
        },
        {       /* Power5 GR */
                .pvr_mask               = 0xffff0000,
@@ -232,7 +244,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_power4,
                .oprofile_cpu_type      = "ppc64/power5",
-               .oprofile_type          = POWER4,
+               .oprofile_type          = PPC_OPROFILE_POWER4,
+               .platform               = "power5",
        },
        {       /* Power5 GS */
                .pvr_mask               = 0xffff0000,
@@ -245,7 +258,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_power4,
                .oprofile_cpu_type      = "ppc64/power5+",
-               .oprofile_type          = POWER4,
+               .oprofile_type          = PPC_OPROFILE_POWER4,
+               .platform               = "power5+",
        },
        {       /* Cell Broadband Engine */
                .pvr_mask               = 0xffff0000,
@@ -257,6 +271,7 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 128,
                .dcache_bsize           = 128,
                .cpu_setup              = __setup_cpu_be,
+               .platform               = "ppc-cell-be",
        },
        {       /* default match */
                .pvr_mask               = 0x00000000,
@@ -268,6 +283,7 @@ struct cpu_spec     cpu_specs[] = {
                .dcache_bsize           = 128,
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_power4,
+               .platform               = "power4",
        }
 #endif /* CONFIG_PPC64 */
 #ifdef CONFIG_PPC32
@@ -281,6 +297,7 @@ struct cpu_spec     cpu_specs[] = {
                        PPC_FEATURE_UNIFIED_CACHE | PPC_FEATURE_NO_TB,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc601",
        },
        {       /* 603 */
                .pvr_mask               = 0xffff0000,
@@ -290,7 +307,8 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
-               .cpu_setup              = __setup_cpu_603
+               .cpu_setup              = __setup_cpu_603,
+               .platform               = "ppc603",
        },
        {       /* 603e */
                .pvr_mask               = 0xffff0000,
@@ -300,7 +318,8 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
-               .cpu_setup              = __setup_cpu_603
+               .cpu_setup              = __setup_cpu_603,
+               .platform               = "ppc603",
        },
        {       /* 603ev */
                .pvr_mask               = 0xffff0000,
@@ -310,7 +329,8 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
-               .cpu_setup              = __setup_cpu_603
+               .cpu_setup              = __setup_cpu_603,
+               .platform               = "ppc603",
        },
        {       /* 604 */
                .pvr_mask               = 0xffff0000,
@@ -321,7 +341,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 2,
-               .cpu_setup              = __setup_cpu_604
+               .cpu_setup              = __setup_cpu_604,
+               .platform               = "ppc604",
        },
        {       /* 604e */
                .pvr_mask               = 0xfffff000,
@@ -332,7 +353,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_604
+               .cpu_setup              = __setup_cpu_604,
+               .platform               = "ppc604",
        },
        {       /* 604r */
                .pvr_mask               = 0xffff0000,
@@ -343,7 +365,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_604
+               .cpu_setup              = __setup_cpu_604,
+               .platform               = "ppc604",
        },
        {       /* 604ev */
                .pvr_mask               = 0xffff0000,
@@ -354,7 +377,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_604
+               .cpu_setup              = __setup_cpu_604,
+               .platform               = "ppc604",
        },
        {       /* 740/750 (0x4202, don't support TAU ?) */
                .pvr_mask               = 0xffffffff,
@@ -365,7 +389,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_750
+               .cpu_setup              = __setup_cpu_750,
+               .platform               = "ppc750",
        },
        {       /* 750CX (80100 and 8010x?) */
                .pvr_mask               = 0xfffffff0,
@@ -376,7 +401,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_750cx
+               .cpu_setup              = __setup_cpu_750cx,
+               .platform               = "ppc750",
        },
        {       /* 750CX (82201 and 82202) */
                .pvr_mask               = 0xfffffff0,
@@ -387,7 +413,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_750cx
+               .cpu_setup              = __setup_cpu_750cx,
+               .platform               = "ppc750",
        },
        {       /* 750CXe (82214) */
                .pvr_mask               = 0xfffffff0,
@@ -398,7 +425,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_750cx
+               .cpu_setup              = __setup_cpu_750cx,
+               .platform               = "ppc750",
        },
        {       /* 750CXe "Gekko" (83214) */
                .pvr_mask               = 0xffffffff,
@@ -409,7 +437,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_750cx
+               .cpu_setup              = __setup_cpu_750cx,
+               .platform               = "ppc750",
        },
        {       /* 745/755 */
                .pvr_mask               = 0xfffff000,
@@ -420,7 +449,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_750
+               .cpu_setup              = __setup_cpu_750,
+               .platform               = "ppc750",
        },
        {       /* 750FX rev 1.x */
                .pvr_mask               = 0xffffff00,
@@ -431,7 +461,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_750
+               .cpu_setup              = __setup_cpu_750,
+               .platform               = "ppc750",
        },
        {       /* 750FX rev 2.0 must disable HID0[DPM] */
                .pvr_mask               = 0xffffffff,
@@ -442,7 +473,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_750
+               .cpu_setup              = __setup_cpu_750,
+               .platform               = "ppc750",
        },
        {       /* 750FX (All revs except 2.0) */
                .pvr_mask               = 0xffff0000,
@@ -453,7 +485,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_750fx
+               .cpu_setup              = __setup_cpu_750fx,
+               .platform               = "ppc750",
        },
        {       /* 750GX */
                .pvr_mask               = 0xffff0000,
@@ -464,7 +497,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_750fx
+               .cpu_setup              = __setup_cpu_750fx,
+               .platform               = "ppc750",
        },
        {       /* 740/750 (L2CR bit need fixup for 740) */
                .pvr_mask               = 0xffff0000,
@@ -475,7 +509,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_750
+               .cpu_setup              = __setup_cpu_750,
+               .platform               = "ppc750",
        },
        {       /* 7400 rev 1.1 ? (no TAU) */
                .pvr_mask               = 0xffffffff,
@@ -486,7 +521,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_7400
+               .cpu_setup              = __setup_cpu_7400,
+               .platform               = "ppc7400",
        },
        {       /* 7400 */
                .pvr_mask               = 0xffff0000,
@@ -497,7 +533,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_7400
+               .cpu_setup              = __setup_cpu_7400,
+               .platform               = "ppc7400",
        },
        {       /* 7410 */
                .pvr_mask               = 0xffff0000,
@@ -508,7 +545,8 @@ struct cpu_spec     cpu_specs[] = {
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
-               .cpu_setup              = __setup_cpu_7410
+               .cpu_setup              = __setup_cpu_7410,
+               .platform               = "ppc7400",
        },
        {       /* 7450 2.0 - no doze/nap */
                .pvr_mask               = 0xffffffff,
@@ -521,7 +559,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_745x,
                .oprofile_cpu_type      = "ppc/7450",
-               .oprofile_type          = G4,
+               .oprofile_type          = PPC_OPROFILE_G4,
+               .platform               = "ppc7450",
        },
        {       /* 7450 2.1 */
                .pvr_mask               = 0xffffffff,
@@ -534,7 +573,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_745x,
                .oprofile_cpu_type      = "ppc/7450",
-               .oprofile_type          = G4,
+               .oprofile_type          = PPC_OPROFILE_G4,
+               .platform               = "ppc7450",
        },
        {       /* 7450 2.3 and newer */
                .pvr_mask               = 0xffff0000,
@@ -547,7 +587,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_745x,
                .oprofile_cpu_type      = "ppc/7450",
-               .oprofile_type          = G4,
+               .oprofile_type          = PPC_OPROFILE_G4,
+               .platform               = "ppc7450",
        },
        {       /* 7455 rev 1.x */
                .pvr_mask               = 0xffffff00,
@@ -560,7 +601,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_745x,
                .oprofile_cpu_type      = "ppc/7450",
-               .oprofile_type          = G4,
+               .oprofile_type          = PPC_OPROFILE_G4,
+               .platform               = "ppc7450",
        },
        {       /* 7455 rev 2.0 */
                .pvr_mask               = 0xffffffff,
@@ -573,7 +615,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_745x,
                .oprofile_cpu_type      = "ppc/7450",
-               .oprofile_type          = G4,
+               .oprofile_type          = PPC_OPROFILE_G4,
+               .platform               = "ppc7450",
        },
        {       /* 7455 others */
                .pvr_mask               = 0xffff0000,
@@ -586,7 +629,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_745x,
                .oprofile_cpu_type      = "ppc/7450",
-               .oprofile_type          = G4,
+               .oprofile_type          = PPC_OPROFILE_G4,
+               .platform               = "ppc7450",
        },
        {       /* 7447/7457 Rev 1.0 */
                .pvr_mask               = 0xffffffff,
@@ -599,7 +643,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_745x,
                .oprofile_cpu_type      = "ppc/7450",
-               .oprofile_type          = G4,
+               .oprofile_type          = PPC_OPROFILE_G4,
+               .platform               = "ppc7450",
        },
        {       /* 7447/7457 Rev 1.1 */
                .pvr_mask               = 0xffffffff,
@@ -612,7 +657,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_745x,
                .oprofile_cpu_type      = "ppc/7450",
-               .oprofile_type          = G4,
+               .oprofile_type          = PPC_OPROFILE_G4,
+               .platform               = "ppc7450",
        },
        {       /* 7447/7457 Rev 1.2 and later */
                .pvr_mask               = 0xffff0000,
@@ -625,7 +671,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_745x,
                .oprofile_cpu_type      = "ppc/7450",
-               .oprofile_type          = G4,
+               .oprofile_type          = PPC_OPROFILE_G4,
+               .platform               = "ppc7450",
        },
        {       /* 7447A */
                .pvr_mask               = 0xffff0000,
@@ -638,7 +685,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_745x,
                .oprofile_cpu_type      = "ppc/7450",
-               .oprofile_type          = G4,
+               .oprofile_type          = PPC_OPROFILE_G4,
+               .platform               = "ppc7450",
        },
        {       /* 7448 */
                .pvr_mask               = 0xffff0000,
@@ -651,7 +699,8 @@ struct cpu_spec     cpu_specs[] = {
                .num_pmcs               = 6,
                .cpu_setup              = __setup_cpu_745x,
                .oprofile_cpu_type      = "ppc/7450",
-               .oprofile_type          = G4,
+               .oprofile_type          = PPC_OPROFILE_G4,
+               .platform               = "ppc7450",
        },
        {       /* 82xx (8240, 8245, 8260 are all 603e cores) */
                .pvr_mask               = 0x7fff0000,
@@ -661,7 +710,8 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
-               .cpu_setup              = __setup_cpu_603
+               .cpu_setup              = __setup_cpu_603,
+               .platform               = "ppc603",
        },
        {       /* All G2_LE (603e core, plus some) have the same pvr */
                .pvr_mask               = 0x7fff0000,
@@ -671,7 +721,8 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
-               .cpu_setup              = __setup_cpu_603
+               .cpu_setup              = __setup_cpu_603,
+               .platform               = "ppc603",
        },
        {       /* e300 (a 603e core, plus some) on 83xx */
                .pvr_mask               = 0x7fff0000,
@@ -681,7 +732,8 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
-               .cpu_setup              = __setup_cpu_603
+               .cpu_setup              = __setup_cpu_603,
+               .platform               = "ppc603",
        },
        {       /* default match, we assume split I/D cache & TB (non-601)... */
                .pvr_mask               = 0x00000000,
@@ -691,6 +743,7 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_user_features      = COMMON_USER,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc603",
        },
 #endif /* CLASSIC_PPC */
 #ifdef CONFIG_8xx
@@ -704,6 +757,7 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 16,
                .dcache_bsize           = 16,
+               .platform               = "ppc823",
        },
 #endif /* CONFIG_8xx */
 #ifdef CONFIG_40x
@@ -715,6 +769,7 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 16,
                .dcache_bsize           = 16,
+               .platform               = "ppc403",
        },
        {       /* 403GCX */
                .pvr_mask               = 0xffffff00,
@@ -725,6 +780,7 @@ struct cpu_spec     cpu_specs[] = {
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_NO_TB,
                .icache_bsize           = 16,
                .dcache_bsize           = 16,
+               .platform               = "ppc403",
        },
        {       /* 403G ?? */
                .pvr_mask               = 0xffff0000,
@@ -734,6 +790,7 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 16,
                .dcache_bsize           = 16,
+               .platform               = "ppc403",
        },
        {       /* 405GP */
                .pvr_mask               = 0xffff0000,
@@ -744,6 +801,7 @@ struct cpu_spec     cpu_specs[] = {
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc405",
        },
        {       /* STB 03xxx */
                .pvr_mask               = 0xffff0000,
@@ -754,6 +812,7 @@ struct cpu_spec     cpu_specs[] = {
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc405",
        },
        {       /* STB 04xxx */
                .pvr_mask               = 0xffff0000,
@@ -764,6 +823,7 @@ struct cpu_spec     cpu_specs[] = {
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc405",
        },
        {       /* NP405L */
                .pvr_mask               = 0xffff0000,
@@ -774,6 +834,7 @@ struct cpu_spec     cpu_specs[] = {
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc405",
        },
        {       /* NP4GS3 */
                .pvr_mask               = 0xffff0000,
@@ -784,6 +845,7 @@ struct cpu_spec     cpu_specs[] = {
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc405",
        },
        {   /* NP405H */
                .pvr_mask               = 0xffff0000,
@@ -794,6 +856,7 @@ struct cpu_spec     cpu_specs[] = {
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc405",
        },
        {       /* 405GPr */
                .pvr_mask               = 0xffff0000,
@@ -804,6 +867,7 @@ struct cpu_spec     cpu_specs[] = {
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc405",
        },
        {   /* STBx25xx */
                .pvr_mask               = 0xffff0000,
@@ -814,6 +878,7 @@ struct cpu_spec     cpu_specs[] = {
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc405",
        },
        {       /* 405LP */
                .pvr_mask               = 0xffff0000,
@@ -823,6 +888,7 @@ struct cpu_spec     cpu_specs[] = {
                .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc405",
        },
        {       /* Xilinx Virtex-II Pro  */
                .pvr_mask               = 0xffff0000,
@@ -833,6 +899,7 @@ struct cpu_spec     cpu_specs[] = {
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc405",
        },
        {       /* 405EP */
                .pvr_mask               = 0xffff0000,
@@ -843,6 +910,7 @@ struct cpu_spec     cpu_specs[] = {
                        PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_4xxMAC,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc405",
        },
 
 #endif /* CONFIG_40x */
@@ -852,81 +920,90 @@ struct cpu_spec   cpu_specs[] = {
                .pvr_value              = 0x40000850,
                .cpu_name               = "440EP Rev. A",
                .cpu_features           = CPU_FTRS_44X,
-               .cpu_user_features      = COMMON_USER, /* 440EP has an FPU */
+               .cpu_user_features      = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc440",
        },
        {
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x400008d3,
                .cpu_name               = "440EP Rev. B",
                .cpu_features           = CPU_FTRS_44X,
-               .cpu_user_features      = COMMON_USER, /* 440EP has an FPU */
+               .cpu_user_features      = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc440",
        },
        {       /* 440GP Rev. B */
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x40000440,
                .cpu_name               = "440GP Rev. B",
                .cpu_features           = CPU_FTRS_44X,
-               .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+               .cpu_user_features      = COMMON_USER_BOOKE,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc440gp",
        },
        {       /* 440GP Rev. C */
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x40000481,
                .cpu_name               = "440GP Rev. C",
                .cpu_features           = CPU_FTRS_44X,
-               .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+               .cpu_user_features      = COMMON_USER_BOOKE,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc440gp",
        },
        { /* 440GX Rev. A */
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x50000850,
                .cpu_name               = "440GX Rev. A",
                .cpu_features           = CPU_FTRS_44X,
-               .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+               .cpu_user_features      = COMMON_USER_BOOKE,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc440",
        },
        { /* 440GX Rev. B */
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x50000851,
                .cpu_name               = "440GX Rev. B",
                .cpu_features           = CPU_FTRS_44X,
-               .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+               .cpu_user_features      = COMMON_USER_BOOKE,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc440",
        },
        { /* 440GX Rev. C */
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x50000892,
                .cpu_name               = "440GX Rev. C",
                .cpu_features           = CPU_FTRS_44X,
-               .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+               .cpu_user_features      = COMMON_USER_BOOKE,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc440",
        },
        { /* 440GX Rev. F */
                .pvr_mask               = 0xf0000fff,
                .pvr_value              = 0x50000894,
                .cpu_name               = "440GX Rev. F",
                .cpu_features           = CPU_FTRS_44X,
-               .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+               .cpu_user_features      = COMMON_USER_BOOKE,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc440",
        },
        { /* 440SP Rev. A */
                .pvr_mask               = 0xff000fff,
                .pvr_value              = 0x53000891,
                .cpu_name               = "440SP Rev. A",
                .cpu_features           = CPU_FTRS_44X,
-               .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+               .cpu_user_features      = COMMON_USER_BOOKE,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc440",
        },
        { /* 440SPe Rev. A */
                .pvr_mask               = 0xff000fff,
@@ -934,9 +1011,10 @@ struct cpu_spec   cpu_specs[] = {
                .cpu_name               = "440SPe Rev. A",
                .cpu_features           = CPU_FTR_SPLIT_ID_CACHE |
                        CPU_FTR_USE_TB,
-               .cpu_user_features      = PPC_FEATURE_32 | PPC_FEATURE_HAS_MMU,
+               .cpu_user_features      = COMMON_USER_BOOKE,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "ppc440",
        },
 #endif /* CONFIG_44x */
 #ifdef CONFIG_FSL_BOOKE
@@ -946,10 +1024,11 @@ struct cpu_spec  cpu_specs[] = {
                .cpu_name               = "e200z5",
                /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
                .cpu_features           = CPU_FTRS_E200,
-               .cpu_user_features      = PPC_FEATURE_32 |
-                       PPC_FEATURE_HAS_MMU | PPC_FEATURE_HAS_EFP_SINGLE |
+               .cpu_user_features      = COMMON_USER_BOOKE |
+                       PPC_FEATURE_HAS_EFP_SINGLE |
                        PPC_FEATURE_UNIFIED_CACHE,
                .dcache_bsize           = 32,
+               .platform               = "ppc5554",
        },
        {       /* e200z6 */
                .pvr_mask               = 0xfff00000,
@@ -957,11 +1036,12 @@ struct cpu_spec  cpu_specs[] = {
                .cpu_name               = "e200z6",
                /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
                .cpu_features           = CPU_FTRS_E200,
-               .cpu_user_features      = PPC_FEATURE_32 |
-                       PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
+               .cpu_user_features      = COMMON_USER_BOOKE |
+                       PPC_FEATURE_SPE_COMP |
                        PPC_FEATURE_HAS_EFP_SINGLE |
                        PPC_FEATURE_UNIFIED_CACHE,
                .dcache_bsize           = 32,
+               .platform               = "ppc5554",
        },
        {       /* e500 */
                .pvr_mask               = 0xffff0000,
@@ -969,14 +1049,15 @@ struct cpu_spec  cpu_specs[] = {
                .cpu_name               = "e500",
                /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
                .cpu_features           = CPU_FTRS_E500,
-               .cpu_user_features      = PPC_FEATURE_32 |
-                       PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
+               .cpu_user_features      = COMMON_USER_BOOKE |
+                       PPC_FEATURE_SPE_COMP |
                        PPC_FEATURE_HAS_EFP_SINGLE,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
                .oprofile_cpu_type      = "ppc/e500",
-               .oprofile_type          = BOOKE,
+               .oprofile_type          = PPC_OPROFILE_BOOKE,
+               .platform               = "ppc8540",
        },
        {       /* e500v2 */
                .pvr_mask               = 0xffff0000,
@@ -984,14 +1065,16 @@ struct cpu_spec  cpu_specs[] = {
                .cpu_name               = "e500v2",
                /* xxx - galak: add CPU_FTR_MAYBE_CAN_DOZE */
                .cpu_features           = CPU_FTRS_E500_2,
-               .cpu_user_features      = PPC_FEATURE_32 |
-                       PPC_FEATURE_HAS_MMU | PPC_FEATURE_SPE_COMP |
-                       PPC_FEATURE_HAS_EFP_SINGLE | PPC_FEATURE_HAS_EFP_DOUBLE,
+               .cpu_user_features      = COMMON_USER_BOOKE |
+                       PPC_FEATURE_SPE_COMP |
+                       PPC_FEATURE_HAS_EFP_SINGLE |
+                       PPC_FEATURE_HAS_EFP_DOUBLE,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
                .num_pmcs               = 4,
                .oprofile_cpu_type      = "ppc/e500",
-               .oprofile_type          = BOOKE,
+               .oprofile_type          = PPC_OPROFILE_BOOKE,
+               .platform               = "ppc8548",
        },
 #endif
 #if !CLASSIC_PPC
@@ -1003,6 +1086,7 @@ struct cpu_spec   cpu_specs[] = {
                .cpu_user_features      = PPC_FEATURE_32,
                .icache_bsize           = 32,
                .dcache_bsize           = 32,
+               .platform               = "powerpc",
        }
 #endif /* !CLASSIC_PPC */
 #endif /* CONFIG_PPC32 */
index 036b71d2adfc191c0dd5e33be04264aae16894e0..d8da2a35c0a4adf62dd6b9cbe5d91f7470fc170e 100644 (file)
@@ -988,7 +988,7 @@ _GLOBAL(enter_rtas)
        stwu    r1,-INT_FRAME_SIZE(r1)
        mflr    r0
        stw     r0,INT_FRAME_SIZE+4(r1)
-       LOADADDR(r4, rtas)
+       LOAD_REG_ADDR(r4, rtas)
        lis     r6,1f@ha        /* physical return address for rtas */
        addi    r6,r6,1f@l
        tophys(r6,r6)
index aacebb33e98a1a9224371e943e5c9fb9f5163512..5420363188660db8803f4f771aca1ffb75481cbf 100644 (file)
@@ -511,7 +511,8 @@ restore:
        cmpdi   0,r5,0
        beq     4f
        /* Check for pending interrupts (iSeries) */
-       ld      r3,PACALPPACA+LPPACAANYINT(r13)
+       ld      r3,PACALPPACAPTR(r13)
+       ld      r3,LPPACAANYINT(r3)
        cmpdi   r3,0
        beq+    4f                      /* skip do_IRQ if no interrupts */
 
@@ -689,9 +690,8 @@ _GLOBAL(enter_rtas)
         std    r6,PACASAVEDMSR(r13)
 
        /* Setup our real return addr */        
-       SET_REG_TO_LABEL(r4,.rtas_return_loc)
-       SET_REG_TO_CONST(r9,PAGE_OFFSET)
-       sub     r4,r4,r9
+       LOAD_REG_ADDR(r4,.rtas_return_loc)
+       clrldi  r4,r4,2                 /* convert to realmode address */
                mtlr    r4
 
        li      r0,0
@@ -706,7 +706,7 @@ _GLOBAL(enter_rtas)
        sync                            /* disable interrupts so SRR0/1 */
        mtmsrd  r0                      /* don't get trashed */
 
-       SET_REG_TO_LABEL(r4,rtas)
+       LOAD_REG_ADDR(r4, rtas)
        ld      r5,RTASENTRY(r4)        /* get the rtas->entry value */
        ld      r4,RTASBASE(r4)         /* get the rtas->base value */
        
@@ -718,8 +718,7 @@ _GLOBAL(enter_rtas)
 _STATIC(rtas_return_loc)
        /* relocation is off at this point */
        mfspr   r4,SPRN_SPRG3           /* Get PACA */
-       SET_REG_TO_CONST(r5, PAGE_OFFSET)
-        sub     r4,r4,r5                /* RELOC the PACA base pointer */
+       clrldi  r4,r4,2                 /* convert to realmode address */
 
        mfmsr   r6
        li      r0,MSR_RI
@@ -728,7 +727,7 @@ _STATIC(rtas_return_loc)
        mtmsrd  r6
         
         ld     r1,PACAR1(r4)           /* Restore our SP */
-       LOADADDR(r3,.rtas_restore_regs)
+       LOAD_REG_IMMEDIATE(r3,.rtas_restore_regs)
         ld     r4,PACASAVEDMSR(r4)     /* Restore our MSR */
 
        mtspr   SPRN_SRR0,r3
index b780b42c95fc0f57c42dbb4b12ed58876742e4d3..e4362dfa37fba2e1971167c23d1a6c613152062e 100644 (file)
@@ -39,9 +39,9 @@ _GLOBAL(load_up_fpu)
  * to another.  Instead we call giveup_fpu in switch_to.
  */
 #ifndef CONFIG_SMP
-       LOADBASE(r3, last_task_used_math)
+       LOAD_REG_ADDRBASE(r3, last_task_used_math)
        toreal(r3)
-       PPC_LL  r4,OFF(last_task_used_math)(r3)
+       PPC_LL  r4,ADDROFF(last_task_used_math)(r3)
        PPC_LCMPI       0,r4,0
        beq     1f
        toreal(r4)
@@ -77,7 +77,7 @@ _GLOBAL(load_up_fpu)
 #ifndef CONFIG_SMP
        subi    r4,r5,THREAD
        fromreal(r4)
-       PPC_STL r4,OFF(last_task_used_math)(r3)
+       PPC_STL r4,ADDROFF(last_task_used_math)(r3)
 #endif /* CONFIG_SMP */
        /* restore registers and return */
        /* we haven't used ctr or xer or lr */
@@ -113,8 +113,8 @@ _GLOBAL(giveup_fpu)
 1:
 #ifndef CONFIG_SMP
        li      r5,0
-       LOADBASE(r4,last_task_used_math)
-       PPC_STL r5,OFF(last_task_used_math)(r4)
+       LOAD_REG_ADDRBASE(r4,last_task_used_math)
+       PPC_STL r5,ADDROFF(last_task_used_math)(r4)
 #endif /* CONFIG_SMP */
        blr
 
index 1c066d1253756ae3b9f008e5246bcbe2fa2bf9b3..3082684663428bffc0483a1995dc78b69cc3cc7d 100644 (file)
@@ -154,12 +154,12 @@ _GLOBAL(__secondary_hold)
        bne     100b
 
 #ifdef CONFIG_HMT
-       LOADADDR(r4, .hmt_init)
+       SET_REG_IMMEDIATE(r4, .hmt_init)
        mtctr   r4
        bctr
 #else
 #ifdef CONFIG_SMP
-       LOADADDR(r4, .pSeries_secondary_smp_init)
+       LOAD_REG_IMMEDIATE(r4, .pSeries_secondary_smp_init)
        mtctr   r4
        mr      r3,r24
        bctr
@@ -205,9 +205,10 @@ exception_marker:
 #define EX_LR          72
 
 /*
- * We're short on space and time in the exception prolog, so we can't use
- * the normal LOADADDR macro. Normally we just need the low halfword of the
- * address, but for Kdump we need the whole low word.
+ * We're short on space and time in the exception prolog, so we can't
+ * use the normal SET_REG_IMMEDIATE macro. Normally we just need the
+ * low halfword of the address, but for Kdump we need the whole low
+ * word.
  */
 #ifdef CONFIG_CRASH_DUMP
 #define LOAD_HANDLER(reg, label)                                       \
@@ -254,8 +255,9 @@ exception_marker:
 
 #define EXCEPTION_PROLOG_ISERIES_2                                     \
        mfmsr   r10;                                                    \
-       ld      r11,PACALPPACA+LPPACASRR0(r13);                         \
-       ld      r12,PACALPPACA+LPPACASRR1(r13);                         \
+       ld      r12,PACALPPACAPTR(r13);                                 \
+       ld      r11,LPPACASRR0(r12);                                    \
+       ld      r12,LPPACASRR1(r12);                                    \
        ori     r10,r10,MSR_RI;                                         \
        mtmsrd  r10,1
 
@@ -634,7 +636,8 @@ data_access_slb_iSeries:
        std     r12,PACA_EXSLB+EX_R12(r13)
        mfspr   r10,SPRN_SPRG1
        std     r10,PACA_EXSLB+EX_R13(r13)
-       ld      r12,PACALPPACA+LPPACASRR1(r13);
+       ld      r12,PACALPPACAPTR(r13)
+       ld      r12,LPPACASRR1(r12)
        b       .slb_miss_realmode
 
        STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN)
@@ -644,7 +647,8 @@ instruction_access_slb_iSeries:
        mtspr   SPRN_SPRG1,r13          /* save r13 */
        mfspr   r13,SPRN_SPRG3          /* get paca address into r13 */
        std     r3,PACA_EXSLB+EX_R3(r13)
-       ld      r3,PACALPPACA+LPPACASRR0(r13)   /* get SRR0 value */
+       ld      r3,PACALPPACAPTR(r13)
+       ld      r3,LPPACASRR0(r3)       /* get SRR0 value */
        std     r9,PACA_EXSLB+EX_R9(r13)
        mfcr    r9
 #ifdef __DISABLED__
@@ -656,7 +660,8 @@ instruction_access_slb_iSeries:
        std     r12,PACA_EXSLB+EX_R12(r13)
        mfspr   r10,SPRN_SPRG1
        std     r10,PACA_EXSLB+EX_R13(r13)
-       ld      r12,PACALPPACA+LPPACASRR1(r13);
+       ld      r12,PACALPPACAPTR(r13)
+       ld      r12,LPPACASRR1(r12)
        b       .slb_miss_realmode
 
 #ifdef __DISABLED__
@@ -713,7 +718,7 @@ system_reset_iSeries:
        lbz     r23,PACAPROCSTART(r13)  /* Test if this processor
                                         * should start */
        sync
-       LOADADDR(r3,current_set)
+       LOAD_REG_IMMEDIATE(r3,current_set)
        sldi    r28,r24,3               /* get current_set[cpu#] */
        ldx     r3,r3,r28
        addi    r1,r3,THREAD_SIZE
@@ -745,17 +750,19 @@ iSeries_secondary_smp_loop:
        .globl decrementer_iSeries_masked
 decrementer_iSeries_masked:
        li      r11,1
-       stb     r11,PACALPPACA+LPPACADECRINT(r13)
-       LOADBASE(r12,tb_ticks_per_jiffy)
-       lwz     r12,OFF(tb_ticks_per_jiffy)(r12)
+       ld      r12,PACALPPACAPTR(r13)
+       stb     r11,LPPACADECRINT(r12)
+       LOAD_REG_ADDRBASE(r12,tb_ticks_per_jiffy)
+       lwz     r12,ADDROFF(tb_ticks_per_jiffy)(r12)
        mtspr   SPRN_DEC,r12
        /* fall through */
 
        .globl hardware_interrupt_iSeries_masked
 hardware_interrupt_iSeries_masked:
        mtcrf   0x80,r9         /* Restore regs */
-       ld      r11,PACALPPACA+LPPACASRR0(r13)
-       ld      r12,PACALPPACA+LPPACASRR1(r13)
+       ld      r12,PACALPPACAPTR(r13)
+       ld      r11,LPPACASRR0(r12)
+       ld      r12,LPPACASRR1(r12)
        mtspr   SPRN_SRR0,r11
        mtspr   SPRN_SRR1,r12
        ld      r9,PACA_EXGEN+EX_R9(r13)
@@ -994,7 +1001,8 @@ _GLOBAL(slb_miss_realmode)
        ld      r3,PACA_EXSLB+EX_R3(r13)
        lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
 #ifdef CONFIG_PPC_ISERIES
-       ld      r11,PACALPPACA+LPPACASRR0(r13)  /* get SRR0 value */
+       ld      r11,PACALPPACAPTR(r13)
+       ld      r11,LPPACASRR0(r11)             /* get SRR0 value */
 #endif /* CONFIG_PPC_ISERIES */
 
        mtlr    r10
@@ -1412,7 +1420,7 @@ _GLOBAL(pSeries_secondary_smp_init)
         * physical cpu id in r24, we need to search the pacas to find
         * which logical id maps to our physical one.
         */
-       LOADADDR(r13, paca)             /* Get base vaddr of paca array  */
+       LOAD_REG_IMMEDIATE(r13, paca)   /* Get base vaddr of paca array  */
        li      r5,0                    /* logical cpu id                */
 1:     lhz     r6,PACAHWCPUID(r13)     /* Load HW procid from paca      */
        cmpw    r6,r24                  /* Compare to our id             */
@@ -1446,8 +1454,8 @@ _GLOBAL(pSeries_secondary_smp_init)
 #ifdef CONFIG_PPC_ISERIES
 _STATIC(__start_initialization_iSeries)
        /* Clear out the BSS */
-       LOADADDR(r11,__bss_stop)
-       LOADADDR(r8,__bss_start)
+       LOAD_REG_IMMEDIATE(r11,__bss_stop)
+       LOAD_REG_IMMEDIATE(r8,__bss_start)
        sub     r11,r11,r8              /* bss size                     */
        addi    r11,r11,7               /* round up to an even double word */
        rldicl. r11,r11,61,3            /* shift right by 3             */
@@ -1458,17 +1466,17 @@ _STATIC(__start_initialization_iSeries)
 3:     stdu    r0,8(r8)
        bdnz    3b
 4:
-       LOADADDR(r1,init_thread_union)
+       LOAD_REG_IMMEDIATE(r1,init_thread_union)
        addi    r1,r1,THREAD_SIZE
        li      r0,0
        stdu    r0,-STACK_FRAME_OVERHEAD(r1)
 
-       LOADADDR(r3,cpu_specs)
-       LOADADDR(r4,cur_cpu_spec)
+       LOAD_REG_IMMEDIATE(r3,cpu_specs)
+       LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
        li      r5,0
        bl      .identify_cpu
 
-       LOADADDR(r2,__toc_start)
+       LOAD_REG_IMMEDIATE(r2,__toc_start)
        addi    r2,r2,0x4000
        addi    r2,r2,0x4000
 
@@ -1528,7 +1536,7 @@ _GLOBAL(__start_initialization_multiplatform)
        li      r24,0
 
        /* Switch off MMU if not already */
-       LOADADDR(r4, .__after_prom_start - KERNELBASE)
+       LOAD_REG_IMMEDIATE(r4, .__after_prom_start - KERNELBASE)
        add     r4,r4,r30
        bl      .__mmu_off
        b       .__after_prom_start
@@ -1548,7 +1556,7 @@ _STATIC(__boot_from_prom)
        /* put a relocation offset into r3 */
        bl      .reloc_offset
 
-       LOADADDR(r2,__toc_start)
+       LOAD_REG_IMMEDIATE(r2,__toc_start)
        addi    r2,r2,0x4000
        addi    r2,r2,0x4000
 
@@ -1588,9 +1596,9 @@ _STATIC(__after_prom_start)
  */
        bl      .reloc_offset
        mr      r26,r3
-       SET_REG_TO_CONST(r27,KERNELBASE)
+       LOAD_REG_IMMEDIATE(r27, KERNELBASE)
 
-       LOADADDR(r3, PHYSICAL_START)    /* target addr */
+       LOAD_REG_IMMEDIATE(r3, PHYSICAL_START)  /* target addr */
 
        // XXX FIXME: Use phys returned by OF (r30)
        add     r4,r27,r26              /* source addr                   */
@@ -1598,7 +1606,7 @@ _STATIC(__after_prom_start)
                                        /*   i.e. where we are running   */
                                        /*      the source addr          */
 
-       LOADADDR(r5,copy_to_here)       /* # bytes of memory to copy     */
+       LOAD_REG_IMMEDIATE(r5,copy_to_here) /* # bytes of memory to copy */
        sub     r5,r5,r27
 
        li      r6,0x100                /* Start offset, the first 0x100 */
@@ -1608,11 +1616,11 @@ _STATIC(__after_prom_start)
                                        /* this includes the code being  */
                                        /* executed here.                */
 
-       LOADADDR(r0, 4f)                /* Jump to the copy of this code */
+       LOAD_REG_IMMEDIATE(r0, 4f)      /* Jump to the copy of this code */
        mtctr   r0                      /* that we just made/relocated   */
        bctr
 
-4:     LOADADDR(r5,klimit)
+4:     LOAD_REG_IMMEDIATE(r5,klimit)
        add     r5,r5,r26
        ld      r5,0(r5)                /* get the value of klimit */
        sub     r5,r5,r27
@@ -1694,7 +1702,7 @@ _GLOBAL(pmac_secondary_start)
        mtmsrd  r3                      /* RI on */
 
        /* Set up a paca value for this processor. */
-       LOADADDR(r4, paca)               /* Get base vaddr of paca array        */
+       LOAD_REG_IMMEDIATE(r4, paca)    /* Get base vaddr of paca array */
        mulli   r13,r24,PACA_SIZE        /* Calculate vaddr of right paca */
        add     r13,r13,r4              /* for this processor.          */
        mtspr   SPRN_SPRG3,r13           /* Save vaddr of paca in SPRG3 */
@@ -1731,7 +1739,7 @@ _GLOBAL(__secondary_start)
        bl      .early_setup_secondary
 
        /* Initialize the kernel stack.  Just a repeat for iSeries.      */
-       LOADADDR(r3,current_set)
+       LOAD_REG_ADDR(r3, current_set)
        sldi    r28,r24,3               /* get current_set[cpu#]         */
        ldx     r1,r3,r28
        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
@@ -1742,8 +1750,8 @@ _GLOBAL(__secondary_start)
        mtlr    r7
 
        /* enable MMU and jump to start_secondary */
-       LOADADDR(r3,.start_secondary_prolog)
-       SET_REG_TO_CONST(r4, MSR_KERNEL)
+       LOAD_REG_ADDR(r3, .start_secondary_prolog)
+       LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
 #ifdef DO_SOFT_DISABLE
        ori     r4,r4,MSR_EE
 #endif
@@ -1792,8 +1800,8 @@ _STATIC(start_here_multiplatform)
         * be detached from the kernel completely. Besides, we need
         * to clear it now for kexec-style entry.
         */
-       LOADADDR(r11,__bss_stop)
-       LOADADDR(r8,__bss_start)
+       LOAD_REG_IMMEDIATE(r11,__bss_stop)
+       LOAD_REG_IMMEDIATE(r8,__bss_start)
        sub     r11,r11,r8              /* bss size                     */
        addi    r11,r11,7               /* round up to an even double word */
        rldicl. r11,r11,61,3            /* shift right by 3             */
@@ -1831,7 +1839,7 @@ _STATIC(start_here_multiplatform)
        /* up the htab.  This is done because we have relocated the  */
        /* kernel but are still running in real mode. */
 
-       LOADADDR(r3,init_thread_union)
+       LOAD_REG_IMMEDIATE(r3,init_thread_union)
        add     r3,r3,r26
 
        /* set up a stack pointer (physical address) */
@@ -1840,14 +1848,14 @@ _STATIC(start_here_multiplatform)
        stdu    r0,-STACK_FRAME_OVERHEAD(r1)
 
        /* set up the TOC (physical address) */
-       LOADADDR(r2,__toc_start)
+       LOAD_REG_IMMEDIATE(r2,__toc_start)
        addi    r2,r2,0x4000
        addi    r2,r2,0x4000
        add     r2,r2,r26
 
-       LOADADDR(r3,cpu_specs)
+       LOAD_REG_IMMEDIATE(r3, cpu_specs)
        add     r3,r3,r26
-       LOADADDR(r4,cur_cpu_spec)
+       LOAD_REG_IMMEDIATE(r4,cur_cpu_spec)
        add     r4,r4,r26
        mr      r5,r26
        bl      .identify_cpu
@@ -1863,11 +1871,11 @@ _STATIC(start_here_multiplatform)
         * nowhere it can be initialized differently before we reach this
         * code
         */
-       LOADADDR(r27, boot_cpuid)
+       LOAD_REG_IMMEDIATE(r27, boot_cpuid)
        add     r27,r27,r26
        lwz     r27,0(r27)
 
-       LOADADDR(r24, paca)             /* Get base vaddr of paca array  */
+       LOAD_REG_IMMEDIATE(r24, paca)   /* Get base vaddr of paca array  */
        mulli   r13,r27,PACA_SIZE       /* Calculate vaddr of right paca */
        add     r13,r13,r24             /* for this processor.           */
        add     r13,r13,r26             /* convert to physical addr      */
@@ -1880,8 +1888,8 @@ _STATIC(start_here_multiplatform)
        mr      r3,r31
        bl      .early_setup
 
-       LOADADDR(r3,.start_here_common)
-       SET_REG_TO_CONST(r4, MSR_KERNEL)
+       LOAD_REG_IMMEDIATE(r3, .start_here_common)
+       LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
        mtspr   SPRN_SRR0,r3
        mtspr   SPRN_SRR1,r4
        rfid
@@ -1895,7 +1903,7 @@ _STATIC(start_here_common)
        /* The following code sets up the SP and TOC now that we are */
        /* running with translation enabled. */
 
-       LOADADDR(r3,init_thread_union)
+       LOAD_REG_IMMEDIATE(r3,init_thread_union)
 
        /* set up the stack */
        addi    r1,r3,THREAD_SIZE
@@ -1908,16 +1916,16 @@ _STATIC(start_here_common)
        li      r3,0
        bl      .do_cpu_ftr_fixups
 
-       LOADADDR(r26, boot_cpuid)
+       LOAD_REG_IMMEDIATE(r26, boot_cpuid)
        lwz     r26,0(r26)
 
-       LOADADDR(r24, paca)             /* Get base vaddr of paca array  */
+       LOAD_REG_IMMEDIATE(r24, paca)   /* Get base vaddr of paca array  */
        mulli   r13,r26,PACA_SIZE       /* Calculate vaddr of right paca */
        add     r13,r13,r24             /* for this processor.           */
        mtspr   SPRN_SPRG3,r13
 
        /* ptr to current */
-       LOADADDR(r4,init_task)
+       LOAD_REG_IMMEDIATE(r4, init_task)
        std     r4,PACACURRENT(r13)
 
        /* Load the TOC */
@@ -1940,7 +1948,7 @@ _STATIC(start_here_common)
 
 _GLOBAL(hmt_init)
 #ifdef CONFIG_HMT
-       LOADADDR(r5, hmt_thread_data)
+       LOAD_REG_IMMEDIATE(r5, hmt_thread_data)
        mfspr   r7,SPRN_PVR
        srwi    r7,r7,16
        cmpwi   r7,0x34                 /* Pulsar  */
@@ -1961,7 +1969,7 @@ _GLOBAL(hmt_init)
        b       101f
 
 __hmt_secondary_hold:
-       LOADADDR(r5, hmt_thread_data)
+       LOAD_REG_IMMEDIATE(r5, hmt_thread_data)
        clrldi  r5,r5,4
        li      r7,0
        mfspr   r6,SPRN_PIR
@@ -1989,7 +1997,7 @@ __hmt_secondary_hold:
 
 #ifdef CONFIG_HMT
 _GLOBAL(hmt_start_secondary)
-       LOADADDR(r4,__hmt_secondary_hold)
+       LOAD_REG_IMMEDIATE(r4,__hmt_secondary_hold)
        clrldi  r4,r4,4
        mtspr   SPRN_NIADORM, r4
        mfspr   r4, SPRN_MSRDORM
index 1494e2f177f7e3baa04314d68b327b15d5272dae..c16b4afab582f4ed3e4306045a0b4995d4e1d636 100644 (file)
@@ -38,14 +38,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
        /* We must dynamically check for the NAP feature as it
         * can be cleared by CPU init after the fixups are done
         */
-       LOADBASE(r3,cur_cpu_spec)
-       ld      r4,OFF(cur_cpu_spec)(r3)
+       LOAD_REG_ADDRBASE(r3,cur_cpu_spec)
+       ld      r4,ADDROFF(cur_cpu_spec)(r3)
        ld      r4,CPU_SPEC_FEATURES(r4)
        andi.   r0,r4,CPU_FTR_CAN_NAP
        beqlr
        /* Now check if user or arch enabled NAP mode */
-       LOADBASE(r3,powersave_nap)
-       lwz     r4,OFF(powersave_nap)(r3)
+       LOAD_REG_ADDRBASE(r3,powersave_nap)
+       lwz     r4,ADDROFF(powersave_nap)(r3)
        cmpwi   0,r4,0
        beqlr
 
index 5651032d870620d7a72eaeea7dca93ef5790c251..d1fffce86df920799cad33996d51035b0bc3ebe0 100644 (file)
@@ -238,14 +238,10 @@ void do_IRQ(struct pt_regs *regs)
         irq_exit();
 
 #ifdef CONFIG_PPC_ISERIES
-       {
-               struct paca_struct *lpaca = get_paca();
-
-               if (lpaca->lppaca.int_dword.fields.decr_int) {
-                       lpaca->lppaca.int_dword.fields.decr_int = 0;
-                       /* Signal a fake decrementer interrupt */
-                       timer_interrupt(regs);
-               }
+       if (get_lppaca()->int_dword.fields.decr_int) {
+               get_lppaca()->int_dword.fields.decr_int = 0;
+               /* Signal a fake decrementer interrupt */
+               timer_interrupt(regs);
        }
 #endif
 }
index 9dda16ccde78600f0444f0db8a9579fed4397e41..1ae96a8ed7e21f080327d71764589ec00feeaf2b 100644 (file)
@@ -55,15 +55,13 @@ static unsigned long get_purr(void)
 {
        unsigned long sum_purr = 0;
        int cpu;
-       struct paca_struct *lpaca;
 
        for_each_cpu(cpu) {
-               lpaca = paca + cpu;
-               sum_purr += lpaca->lppaca.emulated_time_base;
+               sum_purr += lppaca[cpu].emulated_time_base;
 
 #ifdef PURR_DEBUG
                printk(KERN_INFO "get_purr for cpu (%d) has value (%ld) \n",
-                       cpu, lpaca->lppaca.emulated_time_base);
+                       cpu, lppaca[cpu].emulated_time_base);
 #endif
        }
        return sum_purr;
@@ -79,12 +77,11 @@ static int lparcfg_data(struct seq_file *m, void *v)
        unsigned long pool_id, lp_index;
        int shared, entitled_capacity, max_entitled_capacity;
        int processors, max_processors;
-       struct paca_struct *lpaca = get_paca();
        unsigned long purr = get_purr();
 
        seq_printf(m, "%s %s \n", MODULE_NAME, MODULE_VERS);
 
-       shared = (int)(lpaca->lppaca_ptr->shared_proc);
+       shared = (int)(get_lppaca()->shared_proc);
        seq_printf(m, "serial_number=%c%c%c%c%c%c%c\n",
                   e2a(xItExtVpdPanel.mfgID[2]),
                   e2a(xItExtVpdPanel.mfgID[3]),
@@ -402,7 +399,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
                           (h_resource >> 0 * 8) & 0xffff);
 
                /* pool related entries are apropriate for shared configs */
-               if (paca[0].lppaca.shared_proc) {
+               if (lppaca[0].shared_proc) {
 
                        h_pic(&pool_idle_time, &pool_procs);
 
@@ -451,7 +448,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
        seq_printf(m, "partition_potential_processors=%d\n",
                   partition_potential_processors);
 
-       seq_printf(m, "shared_processor_mode=%d\n", paca[0].lppaca.shared_proc);
+       seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc);
 
        return 0;
 }
index 01d0d97a16e1e657363e2410de6a3f9918c31a5a..be982023409ec2179833ebceefd4a6ff02aa32c7 100644 (file)
@@ -68,7 +68,7 @@ _GLOBAL(reloc_offset)
        mflr    r0
        bl      1f
 1:     mflr    r3
-       LOADADDR(r4,1b)
+       LOAD_REG_IMMEDIATE(r4,1b)
        subf    r3,r4,r3
        mtlr    r0
        blr
@@ -80,7 +80,7 @@ _GLOBAL(add_reloc_offset)
        mflr    r0
        bl      1f
 1:     mflr    r5
-       LOADADDR(r4,1b)
+       LOAD_REG_IMMEDIATE(r4,1b)
        subf    r5,r4,r5
        add     r3,r3,r5
        mtlr    r0
index ae48a002f81ad1bf94f54b411b7d1e27356a44d2..2778cce058e2af29ff7d66152dfd59d4d7d62fe4 100644 (file)
@@ -39,7 +39,7 @@ _GLOBAL(reloc_offset)
        mflr    r0
        bl      1f
 1:     mflr    r3
-       LOADADDR(r4,1b)
+       LOAD_REG_IMMEDIATE(r4,1b)
        subf    r3,r4,r3
        mtlr    r0
        blr
@@ -51,7 +51,7 @@ _GLOBAL(add_reloc_offset)
        mflr    r0
        bl      1f
 1:     mflr    r5
-       LOADADDR(r4,1b)
+       LOAD_REG_IMMEDIATE(r4,1b)
        subf    r5,r4,r5
        add     r3,r3,r5
        mtlr    r0
@@ -498,15 +498,15 @@ _GLOBAL(identify_cpu)
  */
 _GLOBAL(do_cpu_ftr_fixups)
        /* Get CPU 0 features */
-       LOADADDR(r6,cur_cpu_spec)
+       LOAD_REG_IMMEDIATE(r6,cur_cpu_spec)
        sub     r6,r6,r3
        ld      r4,0(r6)
        sub     r4,r4,r3
        ld      r4,CPU_SPEC_FEATURES(r4)
        /* Get the fixup table */
-       LOADADDR(r6,__start___ftr_fixup)
+       LOAD_REG_IMMEDIATE(r6,__start___ftr_fixup)
        sub     r6,r6,r3
-       LOADADDR(r7,__stop___ftr_fixup)
+       LOAD_REG_IMMEDIATE(r7,__stop___ftr_fixup)
        sub     r7,r7,r3
        /* Do the fixup */
 1:     cmpld   r6,r7
index 7065e40e2f42ac8661fb372c371911aa17c76a23..22d83d4d1af5f6ce33d3ba0b9cce8b26ed30b1e8 100644 (file)
@@ -132,6 +132,8 @@ static int of_device_resume(struct device * dev)
 struct bus_type of_platform_bus_type = {
        .name   = "of_platform",
        .match  = of_platform_bus_match,
+       .probe  = of_device_probe,
+       .remove = of_device_remove,
        .suspend        = of_device_suspend,
        .resume = of_device_resume,
 };
@@ -150,8 +152,6 @@ int of_register_driver(struct of_platform_driver *drv)
        /* initialize common driver fields */
        drv->driver.name = drv->name;
        drv->driver.bus = &of_platform_bus_type;
-       drv->driver.probe = of_device_probe;
-       drv->driver.remove = of_device_remove;
 
        /* register with core */
        count = driver_register(&drv->driver);
index 999bdd816769ba1f63fbf332f2dc0e224630a410..5d1b708086bd33d9a9a26472fbdfedccfd93e6d1 100644 (file)
  * field correctly */
 extern unsigned long __toc_start;
 
+/*
+ * iSeries structure which the hypervisor knows about - this structure
+ * should not cross a page boundary.  The vpa_init/register_vpa call
+ * is now known to fail if the lppaca structure crosses a page
+ * boundary.  The lppaca is also used on POWER5 pSeries boxes.  The
+ * lppaca is 640 bytes long, and cannot readily change since the
+ * hypervisor knows its layout, so a 1kB alignment will suffice to
+ * ensure that it doesn't cross a page boundary.
+ */
+struct lppaca lppaca[] = {
+       [0 ... (NR_CPUS-1)] = {
+               .desc = 0xd397d781,     /* "LpPa" */
+               .size = sizeof(struct lppaca),
+               .dyn_proc_status = 2,
+               .decr_val = 0x00ff0000,
+               .fpregs_in_use = 1,
+               .end_of_quantum = 0xfffffffffffffffful,
+               .slb_count = 64,
+               .vmxregs_in_use = 0,
+       },
+};
+
 /* The Paca is an array with one entry per processor.  Each contains an
  * lppaca, which contains the information shared between the
  * hypervisor and Linux.
@@ -35,27 +57,17 @@ extern unsigned long __toc_start;
  * processor (not thread).
  */
 #define PACA_INIT_COMMON(number, start, asrr, asrv)                        \
+       .lppaca_ptr = &lppaca[number],                                      \
        .lock_token = 0x8000,                                               \
        .paca_index = (number),         /* Paca Index */                    \
        .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL,             \
        .stab_real = (asrr),            /* Real pointer to segment table */ \
        .stab_addr = (asrv),            /* Virt pointer to segment table */ \
        .cpu_start = (start),           /* Processor start */               \
-       .hw_cpu_id = 0xffff,                                                \
-       .lppaca = {                                                         \
-               .desc = 0xd397d781,     /* "LpPa" */                        \
-               .size = sizeof(struct lppaca),                              \
-               .dyn_proc_status = 2,                                       \
-               .decr_val = 0x00ff0000,                                     \
-               .fpregs_in_use = 1,                                         \
-               .end_of_quantum = 0xfffffffffffffffful,                     \
-               .slb_count = 64,                                            \
-               .vmxregs_in_use = 0,                                        \
-       },                                                                  \
+       .hw_cpu_id = 0xffff,
 
 #ifdef CONFIG_PPC_ISERIES
 #define PACA_INIT_ISERIES(number)                                          \
-       .lppaca_ptr = &paca[number].lppaca,                                 \
        .reg_save_ptr = &iseries_reg_save[number],
 
 #define PACA_INIT(number)                                                  \
index 24fe70f40b665320c60142131d8e4fb01592df63..c367520bc1c38706c585c8c97e88cb0988964267 100644 (file)
@@ -53,6 +53,7 @@ EXPORT_SYMBOL(io_page_mask);
 #ifdef CONFIG_PPC_MULTIPLATFORM
 static void fixup_resource(struct resource *res, struct pci_dev *dev);
 static void do_bus_setup(struct pci_bus *bus);
+static void phbs_remap_io(void);
 #endif
 
 /* pci_io_base -- the base address from which io bars are offsets.
@@ -251,6 +252,7 @@ void pcibios_free_controller(struct pci_controller *phb)
                kfree(phb);
 }
 
+#ifndef CONFIG_PPC_ISERIES
 void __devinit pcibios_claim_one_bus(struct pci_bus *b)
 {
        struct pci_dev *dev;
@@ -275,7 +277,6 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *b)
 EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
 #endif
 
-#ifndef CONFIG_PPC_ISERIES
 static void __init pcibios_claim_of_setup(void)
 {
        struct pci_bus *b;
@@ -1218,7 +1219,7 @@ int remap_bus_range(struct pci_bus *bus)
 }
 EXPORT_SYMBOL(remap_bus_range);
 
-void phbs_remap_io(void)
+static void phbs_remap_io(void)
 {
        struct pci_controller *hose, *tmp;
 
index 16d9a904f3cbc35b24a6341a1928663829d163ed..d9a459c144d81da76bb3023f3f5d89cddca28d95 100644 (file)
@@ -230,8 +230,7 @@ EXPORT_SYMBOL(__down_interruptible);
 EXPORT_SYMBOL(cpm_install_handler);
 EXPORT_SYMBOL(cpm_free_handler);
 #endif /* CONFIG_8xx */
-#if defined(CONFIG_8xx) || defined(CONFIG_40x) || defined(CONFIG_85xx) ||\
-       defined(CONFIG_83xx)
+#if defined(CONFIG_8xx) || defined(CONFIG_40x)
 EXPORT_SYMBOL(__res);
 #endif
 
index 105d5609ff572dc63c2919808fffd2c89f019dbc..57703994a0635033da0d1d3eefaf2a652bedb490 100644 (file)
@@ -201,13 +201,13 @@ int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
 }
 #endif /* CONFIG_SPE */
 
+#ifndef CONFIG_SMP
 /*
  * If we are doing lazy switching of CPU state (FP, altivec or SPE),
  * and the current task has some state, discard it.
  */
-static inline void discard_lazy_cpu_state(void)
+void discard_lazy_cpu_state(void)
 {
-#ifndef CONFIG_SMP
        preempt_disable();
        if (last_task_used_math == current)
                last_task_used_math = NULL;
@@ -220,9 +220,10 @@ static inline void discard_lazy_cpu_state(void)
                last_task_used_spe = NULL;
 #endif
        preempt_enable();
-#endif /* CONFIG_SMP */
 }
+#endif /* CONFIG_SMP */
 
+#ifdef CONFIG_PPC_MERGE                /* XXX for now */
 int set_dabr(unsigned long dabr)
 {
        if (ppc_md.set_dabr)
@@ -231,6 +232,7 @@ int set_dabr(unsigned long dabr)
        mtspr(SPRN_DABR, dabr);
        return 0;
 }
+#endif
 
 #ifdef CONFIG_PPC64
 DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
@@ -424,7 +426,7 @@ void show_regs(struct pt_regs * regs)
        if (trap == 0x300 || trap == 0x600)
                printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
        printk("TASK = %p[%d] '%s' THREAD: %p",
-              current, current->pid, current->comm, current->thread_info);
+              current, current->pid, current->comm, task_thread_info(current));
 
 #ifdef CONFIG_SMP
        printk(" CPU: %d", smp_processor_id());
@@ -503,7 +505,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
 {
        struct pt_regs *childregs, *kregs;
        extern void ret_from_fork(void);
-       unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
+       unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
 
        CHECK_FULL_REGS(regs);
        /* Copy registers */
@@ -516,7 +518,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
 #ifdef CONFIG_PPC32
                childregs->gpr[2] = (unsigned long) p;
 #else
-               clear_ti_thread_flag(p->thread_info, TIF_32BIT);
+               clear_tsk_thread_flag(p, TIF_32BIT);
 #endif
                p->thread.regs = NULL;  /* no user register state */
        } else {
@@ -588,10 +590,8 @@ void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
         * set.  Do it now.
         */
        if (!current->thread.regs) {
-               unsigned long childregs = (unsigned long)current->thread_info +
-                                               THREAD_SIZE;
-               childregs -= sizeof(struct pt_regs);
-               current->thread.regs = (struct pt_regs *)childregs;
+               struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
+               current->thread.regs = regs - 1;
        }
 
        memset(regs->gpr, 0, sizeof(regs->gpr));
@@ -767,7 +767,7 @@ out:
 static int validate_sp(unsigned long sp, struct task_struct *p,
                       unsigned long nbytes)
 {
-       unsigned long stack_page = (unsigned long)p->thread_info;
+       unsigned long stack_page = (unsigned long)task_stack_page(p);
 
        if (sp >= stack_page + sizeof(struct thread_struct)
            && sp <= stack_page + THREAD_SIZE - nbytes)
index 34ab0daec3a7d795de54018975bb492f70917ecf..d50c8df0183e524283ea966ccc728b4b1025676e 100644 (file)
@@ -1100,17 +1100,37 @@ static int __init early_init_dt_scan_memory(unsigned long node,
 
 static void __init early_reserve_mem(void)
 {
-       unsigned long base, size;
-       unsigned long *reserve_map;
+       u64 base, size;
+       u64 *reserve_map;
 
-       reserve_map = (unsigned long *)(((unsigned long)initial_boot_params) +
+       reserve_map = (u64 *)(((unsigned long)initial_boot_params) +
                                        initial_boot_params->off_mem_rsvmap);
+#ifdef CONFIG_PPC32
+       /* 
+        * Handle the case where we might be booting from an old kexec
+        * image that setup the mem_rsvmap as pairs of 32-bit values
+        */
+       if (*reserve_map > 0xffffffffull) {
+               u32 base_32, size_32;
+               u32 *reserve_map_32 = (u32 *)reserve_map;
+
+               while (1) {
+                       base_32 = *(reserve_map_32++);
+                       size_32 = *(reserve_map_32++);
+                       if (size_32 == 0)
+                               break;
+                       DBG("reserving: %lx -> %lx\n", base_32, size_32);
+                       lmb_reserve(base_32, size_32);
+               }
+               return;
+       }
+#endif
        while (1) {
                base = *(reserve_map++);
                size = *(reserve_map++);
                if (size == 0)
                        break;
-               DBG("reserving: %lx -> %lx\n", base, size);
+               DBG("reserving: %llx -> %llx\n", base, size);
                lmb_reserve(base, size);
        }
 
@@ -1607,6 +1627,11 @@ static void of_node_release(struct kref *kref)
                kfree(prop->value);
                kfree(prop);
                prop = next;
+
+               if (!prop) {
+                       prop = node->deadprops;
+                       node->deadprops = NULL;
+               }
        }
        kfree(node->intrs);
        kfree(node->full_name);
@@ -1754,22 +1779,32 @@ static int __init prom_reconfig_setup(void)
 __initcall(prom_reconfig_setup);
 #endif
 
-/*
- * Find a property with a given name for a given node
- * and return the value.
- */
-unsigned char *get_property(struct device_node *np, const char *name,
-                           int *lenp)
+struct property *of_find_property(struct device_node *np, const char *name,
+                                 int *lenp)
 {
        struct property *pp;
 
+       read_lock(&devtree_lock);
        for (pp = np->properties; pp != 0; pp = pp->next)
                if (strcmp(pp->name, name) == 0) {
                        if (lenp != 0)
                                *lenp = pp->length;
-                       return pp->value;
+                       break;
                }
-       return NULL;
+       read_unlock(&devtree_lock);
+
+       return pp;
+}
+
+/*
+ * Find a property with a given name for a given node
+ * and return the value.
+ */
+unsigned char *get_property(struct device_node *np, const char *name,
+                           int *lenp)
+{
+       struct property *pp = of_find_property(np,name,lenp);
+       return pp ? pp->value : NULL;
 }
 EXPORT_SYMBOL(get_property);
 
@@ -1803,4 +1838,82 @@ int prom_add_property(struct device_node* np, struct property* prop)
        return 0;
 }
 
+/*
+ * Remove a property from a node.  Note that we don't actually
+ * remove it, since we have given out who-knows-how-many pointers
+ * to the data using get-property.  Instead we just move the property
+ * to the "dead properties" list, so it won't be found any more.
+ */
+int prom_remove_property(struct device_node *np, struct property *prop)
+{
+       struct property **next;
+       int found = 0;
 
+       write_lock(&devtree_lock);
+       next = &np->properties;
+       while (*next) {
+               if (*next == prop) {
+                       /* found the node */
+                       *next = prop->next;
+                       prop->next = np->deadprops;
+                       np->deadprops = prop;
+                       found = 1;
+                       break;
+               }
+               next = &(*next)->next;
+       }
+       write_unlock(&devtree_lock);
+
+       if (!found)
+               return -ENODEV;
+
+#ifdef CONFIG_PROC_DEVICETREE
+       /* try to remove the proc node as well */
+       if (np->pde)
+               proc_device_tree_remove_prop(np->pde, prop);
+#endif /* CONFIG_PROC_DEVICETREE */
+
+       return 0;
+}
+
+/*
+ * Update a property in a node.  Note that we don't actually
+ * remove it, since we have given out who-knows-how-many pointers
+ * to the data using get-property.  Instead we just move the property
+ * to the "dead properties" list, and add the new property to the
+ * property list
+ */
+int prom_update_property(struct device_node *np,
+                        struct property *newprop,
+                        struct property *oldprop)
+{
+       struct property **next;
+       int found = 0;
+
+       write_lock(&devtree_lock);
+       next = &np->properties;
+       while (*next) {
+               if (*next == oldprop) {
+                       /* found the node */
+                       newprop->next = oldprop->next;
+                       *next = newprop;
+                       oldprop->next = np->deadprops;
+                       np->deadprops = oldprop;
+                       found = 1;
+                       break;
+               }
+               next = &(*next)->next;
+       }
+       write_unlock(&devtree_lock);
+
+       if (!found)
+               return -ENODEV;
+
+#ifdef CONFIG_PROC_DEVICETREE
+       /* try to add to proc as well if it was initialized */
+       if (np->pde)
+               proc_device_tree_update_prop(np->pde, newprop, oldprop);
+#endif /* CONFIG_PROC_DEVICETREE */
+
+       return 0;
+}
index e381f2fc121c5782d04b20a270503efded1d0b46..d963a12ec640007269d5be45a63a64a73f04cb1b 100644 (file)
@@ -137,8 +137,8 @@ struct prom_t {
 };
 
 struct mem_map_entry {
-       unsigned long   base;
-       unsigned long   size;
+       u64     base;
+       u64     size;
 };
 
 typedef u32 cell_t;
@@ -897,9 +897,9 @@ static unsigned long __init prom_next_cell(int s, cell_t **cellp)
  * If problems seem to show up, it would be a good start to track
  * them down.
  */
-static void reserve_mem(unsigned long base, unsigned long size)
+static void reserve_mem(u64 base, u64 size)
 {
-       unsigned long top = base + size;
+       u64 top = base + size;
        unsigned long cnt = RELOC(mem_reserve_cnt);
 
        if (size == 0)
index 309ae1d5fa77c0d007d05ff5560a0f45fbc1aed5..a8099c806150618e34dab60d7f7df036aa9eed29 100644 (file)
@@ -113,7 +113,8 @@ static unsigned int of_bus_default_get_flags(u32 *addr)
 
 static int of_bus_pci_match(struct device_node *np)
 {
-       return !strcmp(np->type, "pci");
+       /* "vci" is for the /chaos bridge on 1st-gen PCI powermacs */
+       return !strcmp(np->type, "pci") || !strcmp(np->type, "vci");
 }
 
 static void of_bus_pci_count_cells(struct device_node *np,
index b1babb7296733632baeaa2ea60a21e0399bc60e0..5ccbdbe0d5c96c9e063a99ccbf66e20ecb53f83e 100644 (file)
@@ -62,7 +62,7 @@ static inline void set_single_step(struct task_struct *task)
        struct pt_regs *regs = task->thread.regs;
        if (regs != NULL)
                regs->msr |= MSR_SE;
-       set_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
+       set_tsk_thread_flag(task, TIF_SINGLESTEP);
 }
 
 static inline void clear_single_step(struct task_struct *task)
@@ -70,7 +70,7 @@ static inline void clear_single_step(struct task_struct *task)
        struct pt_regs *regs = task->thread.regs;
        if (regs != NULL)
                regs->msr &= ~MSR_SE;
-       clear_ti_thread_flag(task->thread_info, TIF_SINGLESTEP);
+       clear_tsk_thread_flag(task, TIF_SINGLESTEP);
 }
 
 #ifdef CONFIG_ALTIVEC
index 4b9cfe4637b1fcff56cf698f15e250101d28ffe5..7fe4a5c944c9655e60c126ce1dabe0fe9027d0fb 100644 (file)
@@ -36,6 +36,11 @@ struct rtas_t rtas = {
        .lock = SPIN_LOCK_UNLOCKED
 };
 
+struct rtas_suspend_me_data {
+       long waiting;
+       struct rtas_args *args;
+};
+
 EXPORT_SYMBOL(rtas);
 
 DEFINE_SPINLOCK(rtas_data_buf_lock);
@@ -556,6 +561,80 @@ void rtas_os_term(char *str)
        } while (status == RTAS_BUSY);
 }
 
+static int ibm_suspend_me_token = RTAS_UNKNOWN_SERVICE;
+#ifdef CONFIG_PPC_PSERIES
+static void rtas_percpu_suspend_me(void *info)
+{
+       long rc;
+       long flags;
+       struct rtas_suspend_me_data *data =
+               (struct rtas_suspend_me_data *)info;
+
+       /*
+        * We use "waiting" to indicate our state.  As long
+        * as it is >0, we are still trying to all join up.
+        * If it goes to 0, we have successfully joined up and
+        * one thread got H_Continue.  If any error happens,
+        * we set it to <0.
+        */
+       local_irq_save(flags);
+       do {
+               rc = plpar_hcall_norets(H_JOIN);
+               smp_rmb();
+       } while (rc == H_Success && data->waiting > 0);
+       if (rc == H_Success)
+               goto out;
+
+       if (rc == H_Continue) {
+               data->waiting = 0;
+               rtas_call(ibm_suspend_me_token, 0, 1,
+                         data->args->args);
+       } else {
+               data->waiting = -EBUSY;
+               printk(KERN_ERR "Error on H_Join hypervisor call\n");
+       }
+
+out:
+       /* before we restore interrupts, make sure we don't
+        * generate a spurious soft lockup errors
+        */
+       touch_softlockup_watchdog();
+       local_irq_restore(flags);
+       return;
+}
+
+static int rtas_ibm_suspend_me(struct rtas_args *args)
+{
+       int i;
+
+       struct rtas_suspend_me_data data;
+
+       data.waiting = 1;
+       data.args = args;
+
+       /* Call function on all CPUs.  One of us will make the
+        * rtas call
+        */
+       if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0))
+               data.waiting = -EINVAL;
+
+       if (data.waiting != 0)
+               printk(KERN_ERR "Error doing global join\n");
+
+       /* Prod each CPU.  This won't hurt, and will wake
+        * anyone we successfully put to sleep with H_Join
+        */
+       for_each_cpu(i)
+               plpar_hcall_norets(H_PROD, i);
+
+       return data.waiting;
+}
+#else /* CONFIG_PPC_PSERIES */
+static int rtas_ibm_suspend_me(struct rtas_args *args)
+{
+       return -ENOSYS;
+}
+#endif
 
 asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
 {
@@ -563,6 +642,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
        unsigned long flags;
        char *buff_copy, *errbuf = NULL;
        int nargs;
+       int rc;
 
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
@@ -581,6 +661,17 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
                           nargs * sizeof(rtas_arg_t)) != 0)
                return -EFAULT;
 
+       if (args.token == RTAS_UNKNOWN_SERVICE)
+               return -EINVAL;
+
+       /* Need to handle ibm,suspend_me call specially */
+       if (args.token == ibm_suspend_me_token) {
+               rc = rtas_ibm_suspend_me(&args);
+               if (rc)
+                       return rc;
+               goto copy_return;
+       }
+
        buff_copy = get_errorlog_buffer();
 
        spin_lock_irqsave(&rtas.lock, flags);
@@ -604,6 +695,7 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
                kfree(buff_copy);
        }
 
+ copy_return:
        /* Copy out args. */
        if (copy_to_user(uargs->args + nargs,
                         args.args + nargs,
@@ -675,8 +767,10 @@ void __init rtas_initialize(void)
         * the stop-self token if any
         */
 #ifdef CONFIG_PPC64
-       if (_machine == PLATFORM_PSERIES_LPAR)
+       if (_machine == PLATFORM_PSERIES_LPAR) {
                rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
+               ibm_suspend_me_token = rtas_token("ibm,suspend-me");
+       }
 #endif
        rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
 
index d5c52fae023a1b996f71f5e38a1ed0d5964c9d3e..be12041c0fc5404ab8b811050f45de883c225631 100644 (file)
@@ -100,7 +100,8 @@ void machine_shutdown(void)
 void machine_restart(char *cmd)
 {
        machine_shutdown();
-       ppc_md.restart(cmd);
+       if (ppc_md.restart)
+               ppc_md.restart(cmd);
 #ifdef CONFIG_SMP
        smp_send_stop();
 #endif
@@ -112,7 +113,8 @@ void machine_restart(char *cmd)
 void machine_power_off(void)
 {
        machine_shutdown();
-       ppc_md.power_off();
+       if (ppc_md.power_off)
+               ppc_md.power_off();
 #ifdef CONFIG_SMP
        smp_send_stop();
 #endif
@@ -129,7 +131,8 @@ EXPORT_SYMBOL_GPL(pm_power_off);
 void machine_halt(void)
 {
        machine_shutdown();
-       ppc_md.halt();
+       if (ppc_md.halt)
+               ppc_md.halt();
 #ifdef CONFIG_SMP
        smp_send_stop();
 #endif
index d3f0b6d452fb71d906a56fd87980f65c229c5bda..177bba78fb0b6974a7f733769ab1d489a7ecfcb1 100644 (file)
@@ -497,6 +497,15 @@ static long restore_user_regs(struct pt_regs *regs,
        if (err)
                return 1;
 
+       /*
+        * Do this before updating the thread state in
+        * current->thread.fpr/vr/evr.  That way, if we get preempted
+        * and another task grabs the FPU/Altivec/SPE, it won't be
+        * tempted to save the current CPU state into the thread_struct
+        * and corrupt what we are writing there.
+        */
+       discard_lazy_cpu_state();
+
        /* force the process to reload the FP registers from
           current->thread when it next does FP instructions */
        regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
@@ -538,18 +547,6 @@ static long restore_user_regs(struct pt_regs *regs,
                return 1;
 #endif /* CONFIG_SPE */
 
-#ifndef CONFIG_SMP
-       preempt_disable();
-       if (last_task_used_math == current)
-               last_task_used_math = NULL;
-       if (last_task_used_altivec == current)
-               last_task_used_altivec = NULL;
-#ifdef CONFIG_SPE
-       if (last_task_used_spe == current)
-               last_task_used_spe = NULL;
-#endif
-       preempt_enable();
-#endif
        return 0;
 }
 
index 5462bef898f6101d5b838f52e8380978e75f2c52..7b9d999e2115027d72bec3beca14257345bbb7a4 100644 (file)
@@ -207,10 +207,20 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
 
        if (!sig)
                regs->gpr[13] = save_r13;
-       err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
        if (set != NULL)
                err |=  __get_user(set->sig[0], &sc->oldmask);
 
+       /*
+        * Do this before updating the thread state in
+        * current->thread.fpr/vr.  That way, if we get preempted
+        * and another task grabs the FPU/Altivec, it won't be
+        * tempted to save the current CPU state into the thread_struct
+        * and corrupt what we are writing there.
+        */
+       discard_lazy_cpu_state();
+
+       err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
+
 #ifdef CONFIG_ALTIVEC
        err |= __get_user(v_regs, &sc->v_regs);
        err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
@@ -229,14 +239,6 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
                current->thread.vrsave = 0;
 #endif /* CONFIG_ALTIVEC */
 
-#ifndef CONFIG_SMP
-       preempt_disable();
-       if (last_task_used_math == current)
-               last_task_used_math = NULL;
-       if (last_task_used_altivec == current)
-               last_task_used_altivec = NULL;
-       preempt_enable();
-#endif
        /* Force reload of FP/VEC */
        regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC);
 
index d381ec90b759e27fe5242d9f7c3e66bb4b7ca939..c8458c531b255f3dbb97b94b5023ead71b6f5193 100644 (file)
@@ -338,8 +338,8 @@ static void __init smp_create_idle(unsigned int cpu)
 #ifdef CONFIG_PPC64
        paca[cpu].__current = p;
 #endif
-       current_set[cpu] = p->thread_info;
-       p->thread_info->cpu = cpu;
+       current_set[cpu] = task_thread_info(p);
+       task_thread_info(p)->cpu = cpu;
 }
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
@@ -375,7 +375,7 @@ void __devinit smp_prepare_boot_cpu(void)
 #ifdef CONFIG_PPC64
        paca[boot_cpuid].__current = current;
 #endif
-       current_set[boot_cpuid] = current->thread_info;
+       current_set[boot_cpuid] = task_thread_info(current);
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
index 56f50e91bddbcb612201fdc4d34d0a2dd8b7b633..c4a294d657b92c23eee484902e6a963e4e0f3ab5 100644 (file)
@@ -431,7 +431,7 @@ void timer_interrupt(struct pt_regs * regs)
        profile_tick(CPU_PROFILING, regs);
 
 #ifdef CONFIG_PPC_ISERIES
-       get_paca()->lppaca.int_dword.fields.decr_int = 0;
+       get_lppaca()->int_dword.fields.decr_int = 0;
 #endif
 
        while ((ticks = tb_ticks_since(per_cpu(last_jiffy, cpu)))
index 13c41495fe06309e55311166eb8e8dfaa6c2b834..13c655ba2841aac42db44ddb6f2f76862eff8ffe 100644 (file)
@@ -76,7 +76,7 @@ static void vio_bus_shutdown(struct device *dev)
        struct vio_dev *viodev = to_vio_dev(dev);
        struct vio_driver *viodrv = to_vio_driver(dev->driver);
 
-       if (viodrv->shutdown)
+       if (dev->driver && viodrv->shutdown)
                viodrv->shutdown(viodev);
 }
 
@@ -91,9 +91,6 @@ int vio_register_driver(struct vio_driver *viodrv)
 
        /* fill in 'struct driver' fields */
        viodrv->driver.bus = &vio_bus_type;
-       viodrv->driver.probe = vio_bus_probe;
-       viodrv->driver.remove = vio_bus_remove;
-       viodrv->driver.shutdown = vio_bus_shutdown;
 
        return driver_register(&viodrv->driver);
 }
@@ -295,4 +292,7 @@ struct bus_type vio_bus_type = {
        .name = "vio",
        .uevent = vio_hotplug,
        .match = vio_bus_match,
+       .probe = vio_bus_probe,
+       .remove = vio_bus_remove,
+       .shutdown = vio_bus_shutdown,
 };
index 35bd03c41dd19727601e15f9169bc8807f50fb75..8362fa272ca58e9699c88c155c535a2e59fb4aeb 100644 (file)
 void __spin_yield(raw_spinlock_t *lock)
 {
        unsigned int lock_value, holder_cpu, yield_count;
-       struct paca_struct *holder_paca;
 
        lock_value = lock->slock;
        if (lock_value == 0)
                return;
        holder_cpu = lock_value & 0xffff;
        BUG_ON(holder_cpu >= NR_CPUS);
-       holder_paca = &paca[holder_cpu];
-       yield_count = holder_paca->lppaca.yield_count;
+       yield_count = lppaca[holder_cpu].yield_count;
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
@@ -60,15 +58,13 @@ void __rw_yield(raw_rwlock_t *rw)
 {
        int lock_value;
        unsigned int holder_cpu, yield_count;
-       struct paca_struct *holder_paca;
 
        lock_value = rw->lock;
        if (lock_value >= 0)
                return;         /* no write lock at present */
        holder_cpu = lock_value & 0xffff;
        BUG_ON(holder_cpu >= NR_CPUS);
-       holder_paca = &paca[holder_cpu];
-       yield_count = holder_paca->lppaca.yield_count;
+       yield_count = lppaca[holder_cpu].yield_count;
        if ((yield_count & 1) == 0)
                return;         /* virtual cpu is currently running */
        rmb();
index 71615eb70b2be53975e6c83bca0665107ed6bc48..cc2535be3a73641326f49c4bb73233729ce7d314 100644 (file)
@@ -140,19 +140,19 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
 
        switch (cur_cpu_spec->oprofile_type) {
 #ifdef CONFIG_PPC64
-               case RS64:
+               case PPC_OPROFILE_RS64:
                        model = &op_model_rs64;
                        break;
-               case POWER4:
+               case PPC_OPROFILE_POWER4:
                        model = &op_model_power4;
                        break;
 #else
-               case G4:
+               case PPC_OPROFILE_G4:
                        model = &op_model_7450;
                        break;
 #endif
 #ifdef CONFIG_FSL_BOOKE
-               case BOOKE:
+               case PPC_OPROFILE_BOOKE:
                        model = &op_model_fsl_booke;
                        break;
 #endif
index b20812d460e6edee41ee7eedf165dbba1a15cbff..7675e675dce1dab1e76d34bc8acf42dfd3357b64 100644 (file)
@@ -7,6 +7,7 @@ choice
 
 config MPC834x_SYS
        bool "Freescale MPC834x SYS"
+       select DEFAULT_UIMAGE
        help
          This option enables support for the MPC 834x SYS evaluation board.
 
diff --git a/arch/powerpc/platforms/83xx/mpc834x_sys.c b/arch/powerpc/platforms/83xx/mpc834x_sys.c
new file mode 100644 (file)
index 0000000..2098dd0
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+ * arch/powerpc/platforms/83xx/mpc834x_sys.c
+ *
+ * MPC834x SYS board specific routines
+ *
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/reboot.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/major.h>
+#include <linux/console.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/module.h>
+#include <linux/fsl_devices.h>
+
+#include <asm/system.h>
+#include <asm/pgtable.h>
+#include <asm/page.h>
+#include <asm/atomic.h>
+#include <asm/time.h>
+#include <asm/io.h>
+#include <asm/machdep.h>
+#include <asm/ipic.h>
+#include <asm/bootinfo.h>
+#include <asm/pci-bridge.h>
+#include <asm/mpc83xx.h>
+#include <asm/irq.h>
+#include <mm/mmu_decl.h>
+#include <asm/prom.h>
+#include <asm/udbg.h>
+#include <sysdev/fsl_soc.h>
+
+#include "mpc83xx.h"
+
+#ifndef CONFIG_PCI
+unsigned long isa_io_base = 0;
+unsigned long isa_mem_base = 0;
+#endif
+
+#ifdef CONFIG_PCI
+extern int mpc83xx_pci2_busno;
+
+static int
+mpc83xx_map_irq(struct pci_dev *dev, unsigned char idsel, unsigned char pin)
+{
+       static char pci_irq_table[][4] =
+           /*
+            *      PCI IDSEL/INTPIN->INTLINE
+            *       A      B      C      D
+            */
+       {
+               {PIRQA, PIRQB, PIRQC, PIRQD},   /* idsel 0x11 */
+               {PIRQC, PIRQD, PIRQA, PIRQB},   /* idsel 0x12 */
+               {PIRQD, PIRQA, PIRQB, PIRQC},   /* idsel 0x13 */
+               {0, 0, 0, 0},
+               {PIRQA, PIRQB, PIRQC, PIRQD},   /* idsel 0x15 */
+               {PIRQD, PIRQA, PIRQB, PIRQC},   /* idsel 0x16 */
+               {PIRQC, PIRQD, PIRQA, PIRQB},   /* idsel 0x17 */
+               {PIRQB, PIRQC, PIRQD, PIRQA},   /* idsel 0x18 */
+               {0, 0, 0, 0},                   /* idsel 0x19 */
+               {0, 0, 0, 0},                   /* idsel 0x20 */
+       };
+
+       const long min_idsel = 0x11, max_idsel = 0x20, irqs_per_slot = 4;
+       return PCI_IRQ_TABLE_LOOKUP;
+}
+
+static int
+mpc83xx_exclude_device(u_char bus, u_char devfn)
+{
+       if (bus == 0 && PCI_SLOT(devfn) == 0)
+               return PCIBIOS_DEVICE_NOT_FOUND;
+       if (mpc83xx_pci2_busno)
+               if (bus == (mpc83xx_pci2_busno) && PCI_SLOT(devfn) == 0)
+                       return PCIBIOS_DEVICE_NOT_FOUND;
+       return PCIBIOS_SUCCESSFUL;
+}
+#endif /* CONFIG_PCI */
+
+/* ************************************************************************
+ *
+ * Setup the architecture
+ *
+ */
+static void __init
+mpc834x_sys_setup_arch(void)
+{
+       struct device_node *np;
+
+       if (ppc_md.progress)
+               ppc_md.progress("mpc834x_sys_setup_arch()", 0);
+
+       np = of_find_node_by_type(NULL, "cpu");
+       if (np != 0) {
+               unsigned int *fp = (int *) get_property(np, "clock-frequency", NULL);
+               if (fp != 0)
+                       loops_per_jiffy = *fp / HZ;
+               else
+                       loops_per_jiffy = 50000000 / HZ;
+               of_node_put(np);
+       }
+
+#ifdef CONFIG_PCI
+       for (np = NULL; (np = of_find_node_by_type(np, "pci")) != NULL;)
+               add_bridge(np);
+
+       ppc_md.pci_swizzle = common_swizzle;
+       ppc_md.pci_map_irq = mpc83xx_map_irq;
+       ppc_md.pci_exclude_device = mpc83xx_exclude_device;
+#endif
+
+#ifdef  CONFIG_ROOT_NFS
+               ROOT_DEV = Root_NFS;
+#else
+               ROOT_DEV = Root_HDA1;
+#endif
+}
+
+void __init
+mpc834x_sys_init_IRQ(void)
+{
+       u8 senses[8] = {
+               0,                      /* EXT 0 */
+               IRQ_SENSE_LEVEL,        /* EXT 1 */
+               IRQ_SENSE_LEVEL,        /* EXT 2 */
+               0,                      /* EXT 3 */
+#ifdef CONFIG_PCI
+               IRQ_SENSE_LEVEL,        /* EXT 4 */
+               IRQ_SENSE_LEVEL,        /* EXT 5 */
+               IRQ_SENSE_LEVEL,        /* EXT 6 */
+               IRQ_SENSE_LEVEL,        /* EXT 7 */
+#else
+               0,                      /* EXT 4 */
+               0,                      /* EXT 5 */
+               0,                      /* EXT 6 */
+               0,                      /* EXT 7 */
+#endif
+       };
+
+       ipic_init(get_immrbase() + 0x00700, 0, 0, senses, 8);
+
+       /* Initialize the default interrupt mapping priorities,
+        * in case the boot rom changed something on us.
+        */
+       ipic_set_default_priority();
+}
+
+#if defined(CONFIG_I2C_MPC) && defined(CONFIG_SENSORS_DS1374)
+extern ulong   ds1374_get_rtc_time(void);
+extern int     ds1374_set_rtc_time(ulong);
+
+static int __init
+mpc834x_rtc_hookup(void)
+{
+       struct timespec tv;
+
+       ppc_md.get_rtc_time = ds1374_get_rtc_time;
+       ppc_md.set_rtc_time = ds1374_set_rtc_time;
+
+       tv.tv_nsec = 0;
+       tv.tv_sec = (ppc_md.get_rtc_time)();
+       do_settimeofday(&tv);
+
+       return 0;
+}
+late_initcall(mpc834x_rtc_hookup);
+#endif
+
+static void
+mpc83xx_restart(char *cmd)
+{
+#define RST_OFFSET     0x00000900
+#define RST_PROT_REG   0x00000018
+#define RST_CTRL_REG   0x0000001c
+       __be32 __iomem *reg;
+
+       // map reset register space
+       reg = ioremap(get_immrbase() + 0x900, 0xff);
+
+       local_irq_disable();
+
+       /* enable software reset "RSTE" */
+       out_be32(reg + (RST_PROT_REG >> 2), 0x52535445);
+
+       /* set software hard reset */
+       out_be32(reg + (RST_CTRL_REG >> 2), 0x52535445);
+       for(;;);
+}
+
+static long __init
+mpc83xx_time_init(void)
+{
+#define SPCR_OFFSET    0x00000110
+#define SPCR_TBEN      0x00400000
+       __be32 __iomem *spcr = ioremap(get_immrbase() + SPCR_OFFSET, 4);
+       __be32 tmp;
+
+       tmp = in_be32(spcr);
+       out_be32(spcr, tmp|SPCR_TBEN);
+
+       iounmap(spcr);
+
+       return 0;
+}
+void __init
+platform_init(void)
+{
+       /* setup the PowerPC module struct */
+       ppc_md.setup_arch = mpc834x_sys_setup_arch;
+
+       ppc_md.init_IRQ = mpc834x_sys_init_IRQ;
+       ppc_md.get_irq = ipic_get_irq;
+
+       ppc_md.restart = mpc83xx_restart;
+
+       ppc_md.time_init = mpc83xx_time_init;
+       ppc_md.set_rtc_time = NULL;
+       ppc_md.get_rtc_time = NULL;
+       ppc_md.calibrate_decr = generic_calibrate_decr;
+
+       ppc_md.progress = udbg_progress;
+
+       if (ppc_md.progress)
+               ppc_md.progress("mpc834x_sys_init(): exit", 0);
+
+       return;
+}
+
+
diff --git a/arch/powerpc/platforms/83xx/mpc834x_sys.h b/arch/powerpc/platforms/83xx/mpc834x_sys.h
new file mode 100644 (file)
index 0000000..e4ca39f
--- /dev/null
@@ -0,0 +1,23 @@
+/*
+ * arch/powerppc/platforms/83xx/mpc834x_sys.h
+ *
+ * MPC834X SYS common board definitions
+ *
+ * Maintainer: Kumar Gala <galak@kernel.crashing.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MACH_MPC83XX_SYS_H__
+#define __MACH_MPC83XX_SYS_H__
+
+#define PIRQA  MPC83xx_IRQ_EXT4
+#define PIRQB  MPC83xx_IRQ_EXT5
+#define PIRQC  MPC83xx_IRQ_EXT6
+#define PIRQD  MPC83xx_IRQ_EXT7
+
+#endif                /* __MACH_MPC83XX_SYS_H__ */
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h
new file mode 100644 (file)
index 0000000..ce9e66a
--- /dev/null
@@ -0,0 +1,14 @@
+#ifndef __MPC83XX_H__
+#define __MPC83XX_H__
+
+#include <linux/init.h>
+#include <linux/device.h>
+
+/*
+ * Declaration for the various functions exported by the
+ * mpc83xx_* files. Mostly for use by mpc83xx_setup
+ */
+
+extern int add_bridge(struct device_node *dev);
+
+#endif /* __MPC83XX_H__ */
diff --git a/arch/powerpc/platforms/83xx/pci.c b/arch/powerpc/platforms/83xx/pci.c
new file mode 100644 (file)
index 0000000..469cdac
--- /dev/null
@@ -0,0 +1,99 @@
+/*
+ * FSL SoC setup code
+ *
+ * Maintained by Kumar Gala (see MAINTAINERS for contact information)
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/pci-bridge.h>
+#include <asm/prom.h>
+#include <sysdev/fsl_soc.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+#define DBG(x...) printk(x)
+#else
+#define DBG(x...)
+#endif
+
+int mpc83xx_pci2_busno;
+
+#ifdef CONFIG_PCI
+int __init add_bridge(struct device_node *dev)
+{
+       int len;
+       struct pci_controller *hose;
+       struct resource rsrc;
+       int *bus_range;
+       int primary = 1, has_address = 0;
+       phys_addr_t immr = get_immrbase();
+
+       DBG("Adding PCI host bridge %s\n", dev->full_name);
+
+       /* Fetch host bridge registers address */
+       has_address = (of_address_to_resource(dev, 0, &rsrc) == 0);
+
+       /* Get bus range if any */
+       bus_range = (int *) get_property(dev, "bus-range", &len);
+       if (bus_range == NULL || len < 2 * sizeof(int)) {
+               printk(KERN_WARNING "Can't get bus-range for %s, assume"
+                      " bus 0\n", dev->full_name);
+       }
+
+       hose = pcibios_alloc_controller();
+       if (!hose)
+               return -ENOMEM;
+       hose->arch_data = dev;
+       hose->set_cfg_type = 1;
+
+       hose->first_busno = bus_range ? bus_range[0] : 0;
+       hose->last_busno = bus_range ? bus_range[1] : 0xff;
+
+       /* MPC83xx supports up to two host controllers one at 0x8500 from immrbar
+        * the other at 0x8600, we consider the 0x8500 the primary controller
+        */
+       /* PCI 1 */
+       if ((rsrc.start & 0xfffff) == 0x8500) {
+               setup_indirect_pci(hose, immr + 0x8300, immr + 0x8304);
+       }
+       /* PCI 2*/
+       if ((rsrc.start & 0xfffff) == 0x8600) {
+               setup_indirect_pci(hose, immr + 0x8380, immr + 0x8384);
+               primary = 0;
+               hose->bus_offset = hose->first_busno;
+               mpc83xx_pci2_busno = hose->first_busno;
+       }
+
+       printk(KERN_INFO "Found MPC83xx PCI host bridge at 0x%08lx. "
+              "Firmware bus number: %d->%d\n",
+               rsrc.start, hose->first_busno, hose->last_busno);
+
+       DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n",
+               hose, hose->cfg_addr, hose->cfg_data);
+
+       /* Interpret the "ranges" property */
+       /* This also maps the I/O region and sets isa_io/mem_base */
+       pci_process_bridge_OF_ranges(hose, dev, primary);
+
+       return 0;
+}
+
+#endif
index 85152544c1530dec87325a2b500d904b69f1dcd3..e0e051c675dd129d1116cb7a43db71c819a805b7 100644 (file)
@@ -142,7 +142,7 @@ static void cbe_idle(void)
        }
 }
 
-int cbe_system_reset_exception(struct pt_regs *regs)
+static int cbe_system_reset_exception(struct pt_regs *regs)
 {
        switch (regs->msr & SRR1_WAKEMASK) {
        case SRR1_WAKEEE:
index 18e25e65c04b23ba2ec87328ba68ba4b97f40eae..b33a4443f5a9368344f6ac3f3507ac6593a86e77 100644 (file)
@@ -57,7 +57,7 @@
 #define DBG(fmt...)
 #endif
 
-void cell_show_cpuinfo(struct seq_file *m)
+static void cell_show_cpuinfo(struct seq_file *m)
 {
        struct device_node *root;
        const char *model = "";
index de96eadf419d03cdc36ddb830c9ba9402179206b..bdf6c5fe58c02e9346c3334041a668f646687e81 100644 (file)
@@ -86,7 +86,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
        pcpu = get_hard_smp_processor_id(lcpu);
 
        /* Fixup atomic count: it exited inside IRQ handler. */
-       paca[lcpu].__current->thread_info->preempt_count        = 0;
+       task_thread_info(paca[lcpu].__current)->preempt_count   = 0;
 
        /*
         * If the RTAS start-cpu token does not exist then presume the
index d549aa7ebea6175eefc3c38eefadd189e949211c..e6565a949ddc727b11299b4374001ff70a67e326 100644 (file)
@@ -29,7 +29,9 @@
  * value of the spu_status register after the SPU has stopped.
  *
  */
-long do_spu_run(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus)
+static long do_spu_run(struct file *filp,
+                       __u32 __user *unpc,
+                       __u32 __user *ustatus)
 {
        long ret;
        struct spufs_inode_info *i;
index 82c429d487f3e6247a1079bc3bbb9e591e5e2ad8..00c52f27ef4f87f3d445eee09c6cd8d8fb916c3c 100644 (file)
@@ -135,12 +135,13 @@ int __init
 hydra_init(void)
 {
        struct device_node *np;
+       struct resource r;
 
        np = find_devices("mac-io");
-       if (np == NULL || np->n_addrs == 0)
+       if (np == NULL || of_address_to_resource(np, 0, &r))
                return 0;
-       Hydra = ioremap(np->addrs[0].address, np->addrs[0].size);
-       printk("Hydra Mac I/O at %lx\n", np->addrs[0].address);
+       Hydra = ioremap(r.start, r.end-r.start);
+       printk("Hydra Mac I/O at %lx\n", r.start);
        printk("Hydra Feature_Control was %x",
               in_le32(&Hydra->Feature_Control));
        out_le32(&Hydra->Feature_Control, (HYDRA_FC_SCC_CELL_EN |
@@ -177,18 +178,24 @@ setup_python(struct pci_controller *hose, struct device_node *dev)
 {
        u32 __iomem *reg;
        u32 val;
-       unsigned long addr = dev->addrs[0].address;
+       struct resource r;
 
-       setup_indirect_pci(hose, addr + 0xf8000, addr + 0xf8010);
+       if (of_address_to_resource(dev, 0, &r)) {
+               printk(KERN_ERR "No address for Python PCI controller\n");
+               return;
+       }
 
        /* Clear the magic go-slow bit */
-       reg = ioremap(dev->addrs[0].address + 0xf6000, 0x40);
+       reg = ioremap(r.start + 0xf6000, 0x40);
+       BUG_ON(!reg); 
        val = in_be32(&reg[12]);
        if (val & PRG_CL_RESET_VALID) {
                out_be32(&reg[12], val & ~PRG_CL_RESET_VALID);
                in_be32(&reg[12]);
        }
        iounmap(reg);
+
+       setup_indirect_pci(hose, r.start + 0xf8000, r.start + 0xf8010);
 }
 
 /* Marvell Discovery II based Pegasos 2 */
@@ -218,7 +225,7 @@ chrp_find_bridges(void)
        char *model, *machine;
        int is_longtrail = 0, is_mot = 0, is_pegasos = 0;
        struct device_node *root = find_path_device("/");
-
+       struct resource r;
        /*
         * The PCI host bridge nodes on some machines don't have
         * properties to adequately identify them, so we have to
@@ -238,7 +245,7 @@ chrp_find_bridges(void)
                        continue;
                ++index;
                /* The GG2 bridge on the LongTrail doesn't have an address */
-               if (dev->n_addrs < 1 && !is_longtrail) {
+               if (of_address_to_resource(dev, 0, &r) && !is_longtrail) {
                        printk(KERN_WARNING "Can't use %s: no address\n",
                               dev->full_name);
                        continue;
@@ -255,8 +262,8 @@ chrp_find_bridges(void)
                        printk(KERN_INFO "PCI buses %d..%d",
                               bus_range[0], bus_range[1]);
                printk(" controlled by %s", dev->type);
-               if (dev->n_addrs > 0)
-                       printk(" at %lx", dev->addrs[0].address);
+               if (!is_longtrail)
+                       printk(" at %lx", r.start);
                printk("\n");
 
                hose = pcibios_alloc_controller();
index 4ec8ba737e7d71cf486abf14d6f27d9d9e1ce317..2dc87aa5962fe8e1b264506bc7f00fef59897bd6 100644 (file)
@@ -352,9 +352,10 @@ static void __init chrp_find_openpic(void)
                opaddr = opprop[na-1];  /* assume 32-bit */
                oplen /= na * sizeof(unsigned int);
        } else {
-               if (np->n_addrs == 0)
+               struct resource r;
+               if (of_address_to_resource(np, 0, &r))
                        return;
-               opaddr = np->addrs[0].address;
+               opaddr = r.start;
                oplen = 0;
        }
 
@@ -377,7 +378,7 @@ static void __init chrp_find_openpic(void)
         */
        if (oplen < len) {
                printk(KERN_ERR "Insufficient addresses for distributed"
-                      " OpenPIC (%d < %d)\n", np->n_addrs, len);
+                      " OpenPIC (%d < %d)\n", oplen, len);
                len = oplen;
        }
 
index 737ee5d9f0aad5cdbbfd4746af9d39aae43aafb4..36a0f97bb7b13143dee6bf88cf1818bbb95f446f 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/mc146818rtc.h>
 #include <linux/init.h>
 #include <linux/bcd.h>
+#include <linux/ioport.h>
 
 #include <asm/io.h>
 #include <asm/nvram.h>
@@ -37,14 +38,16 @@ static int nvram_data = NVRAM_DATA;
 long __init chrp_time_init(void)
 {
        struct device_node *rtcs;
+       struct resource r;
        int base;
 
        rtcs = find_compatible_devices("rtc", "pnpPNP,b00");
        if (rtcs == NULL)
                rtcs = find_compatible_devices("rtc", "ds1385-rtc");
-       if (rtcs == NULL || rtcs->addrs == NULL)
+       if (rtcs == NULL || of_address_to_resource(rtcs, 0, &r))
                return 0;
-       base = rtcs->addrs[0].address;
+       
+       base = r.start;
        nvram_as1 = 0;
        nvram_as0 = base;
        nvram_data = base + 1;
index 127b465308be80e86ec72a46c3b9538533777a9c..ce8c0b943fa0ffee6c81be950fc619f1cd4ced26 100644 (file)
@@ -1,8 +1,8 @@
 EXTRA_CFLAGS   += -mno-minimal-toc
 
 obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o mf.o lpevents.o \
-       hvcall.o proc.o htab.o iommu.o misc.o
-obj-$(CONFIG_PCI) += pci.o irq.o vpdinfo.o
+       hvcall.o proc.o htab.o iommu.o misc.o irq.o
+obj-$(CONFIG_PCI) += pci.o vpdinfo.o
 obj-$(CONFIG_IBMVIO) += vio.o
 obj-$(CONFIG_SMP) += smp.o
 obj-$(CONFIG_VIOPATH) += viopath.o
index 2b54eeb2c8997c5f6c540c137824aea64513aec7..bea0b703f4095c216c1b290119199ac15bdac047 100644 (file)
@@ -34,6 +34,8 @@
 #include <asm/pci-bridge.h>
 #include <asm/iseries/hv_call_xm.h>
 
+#include "iommu.h"
+
 extern struct list_head iSeries_Global_Device_List;
 
 
diff --git a/arch/powerpc/platforms/iseries/iommu.h b/arch/powerpc/platforms/iseries/iommu.h
new file mode 100644 (file)
index 0000000..cb5658f
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef _PLATFORMS_ISERIES_IOMMU_H
+#define _PLATFORMS_ISERIES_IOMMU_H
+
+/*
+ * Copyright (C) 2005  Stephen Rothwell, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the:
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330,
+ * Boston, MA  02111-1307  USA
+ */
+
+struct device_node;
+struct iommu_table;
+
+/* Creates table for an individual device node */
+extern void iommu_devnode_init_iSeries(struct device_node *dn);
+
+/* Get table parameters from HV */
+extern void iommu_table_getparms_iSeries(unsigned long busno,
+               unsigned char slotno, unsigned char virtbus,
+               struct iommu_table *tbl);
+
+#endif /* _PLATFORMS_ISERIES_IOMMU_H */
index 42e978e4897a9d24860ed741abca38ecb2b65133..be3fbfc24e6cc2b1ecf947df77fd38f9c4f2b050 100644 (file)
@@ -48,6 +48,8 @@
 extern void iSeries_smp_message_recv(struct pt_regs *);
 #endif
 
+#ifdef CONFIG_PCI
+
 enum pci_event_type {
        pe_bus_created          = 0,    /* PHB has been created */
        pe_bus_error            = 1,    /* PHB has failed */
@@ -147,20 +149,11 @@ static void int_received(struct pci_event *event, struct pt_regs *regs)
 static void pci_event_handler(struct HvLpEvent *event, struct pt_regs *regs)
 {
        if (event && (event->xType == HvLpEvent_Type_PciIo)) {
-               switch (event->xFlags.xFunction) {
-               case HvLpEvent_Function_Int:
+               if (hvlpevent_is_int(event))
                        int_received((struct pci_event *)event, regs);
-                       break;
-               case HvLpEvent_Function_Ack:
+               else
                        printk(KERN_ERR
                                "pci_event_handler: unexpected ack received\n");
-                       break;
-               default:
-                       printk(KERN_ERR
-                               "pci_event_handler: unexpected event function %d\n",
-                               (int)event->xFlags.xFunction);
-                       break;
-               }
        } else if (event)
                printk(KERN_ERR
                        "pci_event_handler: Unrecognized PCI event type 0x%x\n",
@@ -334,25 +327,26 @@ int __init iSeries_allocate_IRQ(HvBusNumber bus,
        return virtirq;
 }
 
+#endif /* CONFIG_PCI */
+
 /*
  * Get the next pending IRQ.
  */
 int iSeries_get_irq(struct pt_regs *regs)
 {
-       struct paca_struct *lpaca;
        /* -2 means ignore this interrupt */
        int irq = -2;
 
-       lpaca = get_paca();
 #ifdef CONFIG_SMP
-       if (lpaca->lppaca.int_dword.fields.ipi_cnt) {
-               lpaca->lppaca.int_dword.fields.ipi_cnt = 0;
+       if (get_lppaca()->int_dword.fields.ipi_cnt) {
+               get_lppaca()->int_dword.fields.ipi_cnt = 0;
                iSeries_smp_message_recv(regs);
        }
 #endif /* CONFIG_SMP */
        if (hvlpevent_is_pending())
                process_hvlpevents(regs);
 
+#ifdef CONFIG_PCI
        if (num_pending_irqs) {
                spin_lock(&pending_irqs_lock);
                for (irq = 0; irq < NR_IRQS; irq++) {
@@ -366,6 +360,7 @@ int iSeries_get_irq(struct pt_regs *regs)
                if (irq >= NR_IRQS)
                        irq = -2;
        }
+#endif
 
        return irq;
 }
index ea72385aaf0a991791a7d9a8d809e89903fd5bae..438e2dba63b59526f8e0356e6be776b86c850b64 100644 (file)
@@ -93,10 +93,7 @@ struct ItLpNaca itLpNaca = {
        .xPirEnvironMode = 0,           /* Piranha stuff */
        .xPirConsoleMode = 0,
        .xPirDasdMode = 0,
-       .xLparInstalled = 0,
-       .xSysPartitioned = 0,
-       .xHwSyncedTBs = 0,
-       .xIntProcUtilHmt = 0,
+       .flags = 0,
        .xSpVpdFormat = 0,
        .xIntProcRatio = 0,
        .xPlicVrmIndex = 0,             /* VRM index of PLIC */
index e9fb98bf895f46fa9d4008dbdca1e6b23b8835a9..0b885300d1d1a9360a32a2418e5d0e9c024c1faf 100644 (file)
@@ -53,7 +53,7 @@ static struct HvLpEvent * get_next_hvlpevent(void)
        struct HvLpEvent * event;
        event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
 
-       if (event->xFlags.xValid) {
+       if (hvlpevent_is_valid(event)) {
                /* rmb() needed only for weakly consistent machines (regatta) */
                rmb();
                /* Set pointer to next potential event */
@@ -84,7 +84,7 @@ int hvlpevent_is_pending(void)
 
        next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
 
-       return next_event->xFlags.xValid |
+       return hvlpevent_is_valid(next_event) ||
                hvlpevent_queue.xPlicOverflowIntPending;
 }
 
@@ -101,18 +101,18 @@ static void hvlpevent_clear_valid(struct HvLpEvent * event)
        switch (extra) {
        case 3:
                tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign);
-               tmp->xFlags.xValid = 0;
+               hvlpevent_invalidate(tmp);
        case 2:
                tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign);
-               tmp->xFlags.xValid = 0;
+               hvlpevent_invalidate(tmp);
        case 1:
                tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign);
-               tmp->xFlags.xValid = 0;
+               hvlpevent_invalidate(tmp);
        }
 
        mb();
 
-       event->xFlags.xValid = 0;
+       hvlpevent_invalidate(event);
 }
 
 void process_hvlpevents(struct pt_regs *regs)
index 49e7e4b85847558bacba928922d85166615075dc..a41d8b78c0cd6e15b8e83c57321eff84fb26dbe9 100644 (file)
@@ -251,10 +251,7 @@ static struct pending_event *new_pending_event(void)
        }
        memset(ev, 0, sizeof(struct pending_event));
        hev = &ev->event.hp_lp_event;
-       hev->xFlags.xValid = 1;
-       hev->xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
-       hev->xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
-       hev->xFlags.xFunction = HvLpEvent_Function_Int;
+       hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | HV_LP_EVENT_INT;
        hev->xType = HvLpEvent_Type_MachineFac;
        hev->xSourceLp = HvLpConfig_getLpIndex();
        hev->xTargetLp = primary_lp;
@@ -518,17 +515,10 @@ static void handle_ack(struct io_mf_lp_event *event)
 static void hv_handler(struct HvLpEvent *event, struct pt_regs *regs)
 {
        if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
-               switch(event->xFlags.xFunction) {
-               case HvLpEvent_Function_Ack:
+               if (hvlpevent_is_ack(event))
                        handle_ack((struct io_mf_lp_event *)event);
-                       break;
-               case HvLpEvent_Function_Int:
+               else
                        handle_int((struct io_mf_lp_event *)event);
-                       break;
-               default:
-                       printk(KERN_ERR "mf.c: non ack/int event received\n");
-                       break;
-               }
        } else
                printk(KERN_ERR "mf.c: alien event received\n");
 }
index dfe7aa1ba098facedbd1294dd6d1a26c8616ef1a..7641fc7e550a0ec1603dabab9cc917a8dfecdb72 100644 (file)
@@ -44,7 +44,8 @@ _GLOBAL(local_irq_restore)
        /* Check pending interrupts */
        /*   A decrementer, IPI or PMC interrupt may have occurred
         *   while we were in the hypervisor (which enables) */
-       ld      r4,PACALPPACA+LPPACAANYINT(r13)
+       ld      r4,PACALPPACAPTR(r13)
+       ld      r4,LPPACAANYINT(r4)
        cmpdi   r4,0
        beqlr
 
index dafc518fbb8334ec5db75eb388e34e1c09ca6fcf..a19833b880e4096273c5e5360d01af7638ddf9e2 100644 (file)
@@ -43,6 +43,7 @@
 #include "irq.h"
 #include "pci.h"
 #include "call_pci.h"
+#include "iommu.h"
 
 extern unsigned long io_page_mask;
 
index c6bbe5c25107f2ed2a6452d549a41568d31c8bed..3f8790146b00a7ae73e7f28b93f6c380b730a7d6 100644 (file)
@@ -538,7 +538,7 @@ static unsigned long __init build_iSeries_Memory_Map(void)
  */
 static void __init iSeries_setup_arch(void)
 {
-       if (get_paca()->lppaca.shared_proc) {
+       if (get_lppaca()->shared_proc) {
                ppc_md.idle_loop = iseries_shared_idle;
                printk(KERN_INFO "Using shared processor idle loop\n");
        } else {
@@ -647,7 +647,7 @@ static void yield_shared_processor(void)
         * The decrementer stops during the yield.  Force a fake decrementer
         * here and let the timer_interrupt code sort out the actual time.
         */
-       get_paca()->lppaca.int_dword.fields.decr_int = 1;
+       get_lppaca()->int_dword.fields.decr_int = 1;
        process_iSeries_events();
 }
 
@@ -883,7 +883,7 @@ void dt_cpus(struct iseries_flat_dt *dt)
        pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
 
        for (i = 0; i < NR_CPUS; i++) {
-               if (paca[i].lppaca.dyn_proc_status >= 2)
+               if (lppaca[i].dyn_proc_status >= 2)
                        continue;
 
                snprintf(p, 32 - (p - buf), "@%d", i);
@@ -891,7 +891,7 @@ void dt_cpus(struct iseries_flat_dt *dt)
 
                dt_prop_str(dt, "device_type", "cpu");
 
-               index = paca[i].lppaca.dyn_hv_phys_proc_index;
+               index = lppaca[i].dyn_hv_phys_proc_index;
                d = &xIoHriProcessorVpd[index];
 
                dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
index fcb094ec6aec6efa082879088bf542399c43b23e..6f9d407a709f21334f1fd3be58cf2685174a2787 100644 (file)
@@ -91,7 +91,7 @@ static void smp_iSeries_kick_cpu(int nr)
        BUG_ON((nr < 0) || (nr >= NR_CPUS));
 
        /* Verify that our partition has a processor nr */
-       if (paca[nr].lppaca.dyn_proc_status >= 2)
+       if (lppaca[nr].dyn_proc_status >= 2)
                return;
 
        /* The processor is currently spinning, waiting
index 384360ee06ec6647510f059db6c2da3a1ba121f2..ad36ab0639f0496df1f1c1cbbe155c2b26d421f1 100644 (file)
@@ -22,6 +22,8 @@
 #include <asm/iseries/hv_lp_config.h>
 #include <asm/iseries/hv_call_xm.h>
 
+#include "iommu.h"
+
 struct device *iSeries_vio_dev = &vio_bus_device.dev;
 EXPORT_SYMBOL(iSeries_vio_dev);
 
index 842672695598e9845f8698e4fe827ec1c20234d8..622a30149b48b700131da58ebd52e098acebf573 100644 (file)
@@ -270,7 +270,7 @@ static void handleMonitorEvent(struct HvLpEvent *event)
         * First see if this is just a normal monitor message from the
         * other partition
         */
-       if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+       if (hvlpevent_is_int(event)) {
                remoteLp = event->xSourceLp;
                if (!viopathStatus[remoteLp].isActive)
                        sendMonMsg(remoteLp);
@@ -331,13 +331,12 @@ static void handleConfig(struct HvLpEvent *event)
 {
        if (!event)
                return;
-       if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+       if (hvlpevent_is_int(event)) {
                printk(VIOPATH_KERN_WARN
                       "unexpected config request from partition %d",
                       event->xSourceLp);
 
-               if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
-                   (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
+               if (hvlpevent_need_ack(event)) {
                        event->xRc = HvLpEvent_Rc_InvalidSubtype;
                        HvCallEvent_ackLpEvent(event);
                }
@@ -377,7 +376,7 @@ static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
        int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
                >> VIOMAJOR_SUBTYPE_SHIFT;
 
-       if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+       if (hvlpevent_is_int(event)) {
                remoteLp = event->xSourceLp;
                /*
                 * The isActive is checked because if the hosting partition
@@ -436,8 +435,7 @@ static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
                       "unexpected virtual io event subtype %d from partition %d\n",
                       event->xSubtype, remoteLp);
                /* No handler.  Ack if necessary */
-               if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
-                   (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
+               if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
                        event->xRc = HvLpEvent_Rc_InvalidSubtype;
                        HvCallEvent_ackLpEvent(event);
                }
index 3b1a9d4fcbc6c7ff1789960baa9b304f833c21e4..89c4c36361610749e401f5cb1cf96712a70ba4ad 100644 (file)
@@ -278,7 +278,7 @@ static void __init l2cr_init(void)
 }
 #endif
 
-void __init pmac_setup_arch(void)
+static void __init pmac_setup_arch(void)
 {
        struct device_node *cpu, *ic;
        int *fp;
index 17cea7f2afd3a5dca0e918474ff83ea40fb05d6e..83578313ee7e7e635fb07663df1e64ab080dff94 100644 (file)
@@ -208,10 +208,11 @@ static void __eeh_mark_slot (struct device_node *dn, int mode_flag)
 {
        while (dn) {
                if (PCI_DN(dn)) {
-                       PCI_DN(dn)->eeh_mode |= mode_flag;
-
                        /* Mark the pci device driver too */
                        struct pci_dev *dev = PCI_DN(dn)->pcidev;
+
+                       PCI_DN(dn)->eeh_mode |= mode_flag;
+
                        if (dev && dev->driver)
                                dev->error_state = pci_channel_io_frozen;
 
index 4d584172055a6e923dc6d69e7a0b83c01b0b2019..22bfb5c89db9fe0cfbde10c3688130e69a3becb0 100644 (file)
@@ -40,7 +40,7 @@ MODULE_VERSION(HVCS_ARCH_VERSION);
  * functions aren't performance sensitive, so this conversion isn't an
  * issue.
  */
-int hvcs_convert(long to_convert)
+static int hvcs_convert(long to_convert)
 {
        switch (to_convert) {
                case H_Success:
@@ -91,7 +91,7 @@ int hvcs_free_partner_info(struct list_head *head)
 EXPORT_SYMBOL(hvcs_free_partner_info);
 
 /* Helper function for hvcs_get_partner_info */
-int hvcs_next_partner(uint32_t unit_address,
+static int hvcs_next_partner(uint32_t unit_address,
                unsigned long last_p_partition_ID,
                unsigned long last_p_unit_address, unsigned long *pi_buff)
 
index 169f9148789c7317e721a887b35164f24fee0868..48cfbfc43f9921df42965ade250cf84887b68304 100644 (file)
@@ -51,8 +51,6 @@
 
 #define DBG(fmt...)
 
-extern int is_python(struct device_node *);
-
 static void tce_build_pSeries(struct iommu_table *tbl, long index, 
                              long npages, unsigned long uaddr, 
                              enum dma_data_direction direction)
index 1fe445ab78a6342f0efed8e96349045d5a33eec2..8952528d31ac61ed7801c77d771dd6c00ec8b507 100644 (file)
@@ -254,11 +254,11 @@ out:
 void vpa_init(int cpu)
 {
        int hwcpu = get_hard_smp_processor_id(cpu);
-       unsigned long vpa = __pa(&paca[cpu].lppaca);
+       unsigned long vpa = __pa(&lppaca[cpu]);
        long ret;
 
        if (cpu_has_feature(CPU_FTR_ALTIVEC))
-               paca[cpu].lppaca.vmxregs_in_use = 1;
+               lppaca[cpu].vmxregs_in_use = 1;
 
        ret = register_vpa(hwcpu, vpa);
 
index d8864164dbe809c85c012fe03957e96d4b398ff4..86cfa6ecdcf3b55ae6e2468400650258d48e4231 100644 (file)
@@ -350,6 +350,100 @@ static int do_remove_node(char *buf)
        return rv;
 }
 
+static char *parse_node(char *buf, size_t bufsize, struct device_node **npp)
+{
+       char *handle_str;
+       phandle handle;
+       *npp = NULL;
+
+       handle_str = buf;
+
+       buf = strchr(buf, ' ');
+       if (!buf)
+               return NULL;
+       *buf = '\0';
+       buf++;
+
+       handle = simple_strtoul(handle_str, NULL, 10);
+
+       *npp = of_find_node_by_phandle(handle);
+       return buf;
+}
+
+static int do_add_property(char *buf, size_t bufsize)
+{
+       struct property *prop = NULL;
+       struct device_node *np;
+       unsigned char *value;
+       char *name, *end;
+       int length;
+       end = buf + bufsize;
+       buf = parse_node(buf, bufsize, &np);
+
+       if (!np)
+               return -ENODEV;
+
+       if (parse_next_property(buf, end, &name, &length, &value) == NULL)
+               return -EINVAL;
+
+       prop = new_property(name, length, value, NULL);
+       if (!prop)
+               return -ENOMEM;
+
+       prom_add_property(np, prop);
+
+       return 0;
+}
+
+static int do_remove_property(char *buf, size_t bufsize)
+{
+       struct device_node *np;
+       char *tmp;
+       struct property *prop;
+       buf = parse_node(buf, bufsize, &np);
+
+       if (!np)
+               return -ENODEV;
+
+       tmp = strchr(buf,' ');
+       if (tmp)
+               *tmp = '\0';
+
+       if (strlen(buf) == 0)
+               return -EINVAL;
+
+       prop = of_find_property(np, buf, NULL);
+
+       return prom_remove_property(np, prop);
+}
+
+static int do_update_property(char *buf, size_t bufsize)
+{
+       struct device_node *np;
+       unsigned char *value;
+       char *name, *end;
+       int length;
+       struct property *newprop, *oldprop;
+       buf = parse_node(buf, bufsize, &np);
+       end = buf + bufsize;
+
+       if (!np)
+               return -ENODEV;
+
+       if (parse_next_property(buf, end, &name, &length, &value) == NULL)
+               return -EINVAL;
+
+       newprop = new_property(name, length, value, NULL);
+       if (!newprop)
+               return -ENOMEM;
+
+       oldprop = of_find_property(np, name,NULL);
+       if (!oldprop)
+               return -ENODEV;
+
+       return prom_update_property(np, newprop, oldprop);
+}
+
 /**
  * ofdt_write - perform operations on the Open Firmware device tree
  *
@@ -392,6 +486,12 @@ static ssize_t ofdt_write(struct file *file, const char __user *buf, size_t coun
                rv = do_add_node(tmp, count - (tmp - kbuf));
        else if (!strcmp(kbuf, "remove_node"))
                rv = do_remove_node(tmp);
+       else if (!strcmp(kbuf, "add_property"))
+               rv = do_add_property(tmp, count - (tmp - kbuf));
+       else if (!strcmp(kbuf, "remove_property"))
+               rv = do_remove_property(tmp, count - (tmp - kbuf));
+       else if (!strcmp(kbuf, "update_property"))
+               rv = do_update_property(tmp, count - (tmp - kbuf));
        else
                rv = -EINVAL;
 out:
index 2edc947f7c44b2a9908f43ad098633018dc38bf6..50643496eb635f5febf4f8fa621fe1311bc8718d 100644 (file)
@@ -192,7 +192,7 @@ struct file_operations scanlog_fops = {
        .release        = scanlog_release,
 };
 
-int __init scanlog_init(void)
+static int __init scanlog_init(void)
 {
        struct proc_dir_entry *ent;
 
@@ -222,7 +222,7 @@ int __init scanlog_init(void)
        return 0;
 }
 
-void __exit scanlog_cleanup(void)
+static void __exit scanlog_cleanup(void)
 {
        if (proc_ppc64_scan_log_dump) {
                kfree(proc_ppc64_scan_log_dump->data);
index 8903cf63236a273bfd2f77828cc89b5904bf2a62..da6cebaf72cda275fc1b9bb7613b7fe3efa97cbb 100644 (file)
@@ -86,7 +86,7 @@ static void pseries_dedicated_idle(void);
 
 struct mpic *pSeries_mpic;
 
-void pSeries_show_cpuinfo(struct seq_file *m)
+static void pSeries_show_cpuinfo(struct seq_file *m)
 {
        struct device_node *root;
        const char *model = "";
@@ -190,7 +190,7 @@ static void pseries_lpar_enable_pmcs(void)
 
        /* instruct hypervisor to maintain PMCs */
        if (firmware_has_feature(FW_FEATURE_SPLPAR))
-               get_paca()->lppaca.pmcregs_in_use = 1;
+               get_lppaca()->pmcregs_in_use = 1;
 }
 
 static void __init pSeries_setup_arch(void)
@@ -234,7 +234,7 @@ static void __init pSeries_setup_arch(void)
        /* Choose an idle loop */
        if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
                vpa_init(boot_cpuid);
-               if (get_paca()->lppaca.shared_proc) {
+               if (get_lppaca()->shared_proc) {
                        printk(KERN_INFO "Using shared processor idle loop\n");
                        ppc_md.idle_loop = pseries_shared_idle;
                } else {
@@ -444,10 +444,10 @@ DECLARE_PER_CPU(unsigned long, smt_snooze_delay);
 
 static inline void dedicated_idle_sleep(unsigned int cpu)
 {
-       struct paca_struct *ppaca = &paca[cpu ^ 1];
+       struct lppaca *plppaca = &lppaca[cpu ^ 1];
 
        /* Only sleep if the other thread is not idle */
-       if (!(ppaca->lppaca.idle)) {
+       if (!(plppaca->idle)) {
                local_irq_disable();
 
                /*
@@ -480,7 +480,6 @@ static inline void dedicated_idle_sleep(unsigned int cpu)
 
 static void pseries_dedicated_idle(void)
 { 
-       struct paca_struct *lpaca = get_paca();
        unsigned int cpu = smp_processor_id();
        unsigned long start_snooze;
        unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay);
@@ -491,7 +490,7 @@ static void pseries_dedicated_idle(void)
                 * Indicate to the HV that we are idle. Now would be
                 * a good time to find other work to dispatch.
                 */
-               lpaca->lppaca.idle = 1;
+               get_lppaca()->idle = 1;
 
                if (!need_resched()) {
                        start_snooze = get_tb() +
@@ -518,7 +517,7 @@ static void pseries_dedicated_idle(void)
                        HMT_medium();
                }
 
-               lpaca->lppaca.idle = 0;
+               get_lppaca()->idle = 0;
                ppc64_runlatch_on();
 
                preempt_enable_no_resched();
@@ -532,7 +531,6 @@ static void pseries_dedicated_idle(void)
 
 static void pseries_shared_idle(void)
 {
-       struct paca_struct *lpaca = get_paca();
        unsigned int cpu = smp_processor_id();
 
        while (1) {
@@ -540,7 +538,7 @@ static void pseries_shared_idle(void)
                 * Indicate to the HV that we are idle. Now would be
                 * a good time to find other work to dispatch.
                 */
-               lpaca->lppaca.idle = 1;
+               get_lppaca()->idle = 1;
 
                while (!need_resched() && !cpu_is_offline(cpu)) {
                        local_irq_disable();
@@ -564,7 +562,7 @@ static void pseries_shared_idle(void)
                        HMT_medium();
                }
 
-               lpaca->lppaca.idle = 0;
+               get_lppaca()->idle = 0;
                ppc64_runlatch_on();
 
                preempt_enable_no_resched();
@@ -588,7 +586,7 @@ static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
 {
        /* Don't risk a hypervisor call if we're crashing */
        if (!crash_shutdown) {
-               unsigned long vpa = __pa(&get_paca()->lppaca);
+               unsigned long vpa = __pa(get_lppaca());
 
                if (unregister_vpa(hard_smp_processor_id(), vpa)) {
                        printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
index 25181c594d737f128c0f8676a30dad24a328108c..8e6b1ed1396e47cdb1aff565c296a003a577ba3e 100644 (file)
@@ -93,7 +93,7 @@ static int query_cpu_stopped(unsigned int pcpu)
        return cpu_status;
 }
 
-int pSeries_cpu_disable(void)
+static int pSeries_cpu_disable(void)
 {
        int cpu = smp_processor_id();
 
@@ -109,7 +109,7 @@ int pSeries_cpu_disable(void)
        return 0;
 }
 
-void pSeries_cpu_die(unsigned int cpu)
+static void pSeries_cpu_die(unsigned int cpu)
 {
        int tries;
        int cpu_status;
@@ -282,7 +282,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
        pcpu = get_hard_smp_processor_id(lcpu);
 
        /* Fixup atomic count: it exited inside IRQ handler. */
-       paca[lcpu].__current->thread_info->preempt_count        = 0;
+       task_thread_info(paca[lcpu].__current)->preempt_count   = 0;
 
        /* 
         * If the RTAS start-cpu token does not exist then presume the
index 0c0cfa32eb58f57344b915ea8137cac8d308d5c4..fd823c7c9ac88e6db77d9d912750819df682ccd3 100644 (file)
@@ -381,7 +381,7 @@ int xics_get_irq(struct pt_regs *regs)
 
 #ifdef CONFIG_SMP
 
-irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs)
 {
        int cpu = smp_processor_id();
 
index 0ae841347a099d4b1714079ed436993914ad8c59..4c2b356774eada42c083c1796c0f7eec93927088 100644 (file)
@@ -7,3 +7,4 @@ obj-$(CONFIG_40x)               += dcr.o
 obj-$(CONFIG_U3_DART)          += dart_iommu.o
 obj-$(CONFIG_MMIO_NVRAM)       += mmio_nvram.o
 obj-$(CONFIG_PPC_83xx)         += ipic.o
+obj-$(CONFIG_FSL_SOC)          += fsl_soc.o
index e00b46b9514edb3159385d87adcd986e569e8874..977de9db87547635f61dffff982684c674a0b531 100644 (file)
@@ -139,7 +139,6 @@ static void dart_build(struct iommu_table *tbl, long index,
 
                *(dp++) = DARTMAP_VALID | (rpn & DARTMAP_RPNMASK);
 
-               rpn++;
                uaddr += DART_PAGE_SIZE;
        }
 
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
new file mode 100644 (file)
index 0000000..064c9de
--- /dev/null
@@ -0,0 +1,317 @@
+/*
+ * FSL SoC setup code
+ *
+ * Maintained by Kumar Gala (see MAINTAINERS for contact information)
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/major.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/fsl_devices.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/prom.h>
+#include <sysdev/fsl_soc.h>
+#include <mm/mmu_decl.h>
+
+static phys_addr_t immrbase = -1;
+
+phys_addr_t get_immrbase(void)
+{
+       struct device_node *soc;
+
+       if (immrbase != -1)
+               return immrbase;
+
+       soc = of_find_node_by_type(NULL, "soc");
+       if (soc != 0) {
+               unsigned int size;
+               void *prop = get_property(soc, "reg", &size);
+               immrbase = of_translate_address(soc, prop);
+               of_node_put(soc);
+       };
+
+       return immrbase;
+}
+EXPORT_SYMBOL(get_immrbase);
+
+static const char * gfar_tx_intr = "tx";
+static const char * gfar_rx_intr = "rx";
+static const char * gfar_err_intr = "error";
+
+static int __init gfar_of_init(void)
+{
+       struct device_node *np;
+       unsigned int i;
+       struct platform_device *mdio_dev, *gfar_dev;
+       struct resource res;
+       int ret;
+
+       for (np = NULL, i = 0; (np = of_find_compatible_node(np, "mdio", "gianfar")) != NULL; i++) {
+               int k;
+               struct device_node *child = NULL;
+               struct gianfar_mdio_data mdio_data;
+
+               memset(&res, 0, sizeof(res));
+               memset(&mdio_data, 0, sizeof(mdio_data));
+
+               ret = of_address_to_resource(np, 0, &res);
+               if (ret)
+                       goto mdio_err;
+
+               mdio_dev = platform_device_register_simple("fsl-gianfar_mdio", res.start, &res, 1);
+               if (IS_ERR(mdio_dev)) {
+                       ret = PTR_ERR(mdio_dev);
+                       goto mdio_err;
+               }
+
+               for (k = 0; k < 32; k++)
+                       mdio_data.irq[k] = -1;
+
+               while ((child = of_get_next_child(np, child)) != NULL) {
+                       if (child->n_intrs) {
+                               u32 *id = (u32 *) get_property(child, "reg", NULL);
+                               mdio_data.irq[*id] = child->intrs[0].line;
+                       }
+               }
+
+               ret = platform_device_add_data(mdio_dev, &mdio_data, sizeof(struct gianfar_mdio_data));
+               if (ret)
+                       goto mdio_unreg;
+       }
+
+       for (np = NULL, i = 0; (np = of_find_compatible_node(np, "network", "gianfar")) != NULL; i++) {
+               struct resource r[4];
+               struct device_node *phy, *mdio;
+               struct gianfar_platform_data gfar_data;
+               unsigned int *id;
+               char *model;
+               void *mac_addr;
+               phandle *ph;
+
+               memset(r, 0, sizeof(r));
+               memset(&gfar_data, 0, sizeof(gfar_data));
+
+               ret = of_address_to_resource(np, 0, &r[0]);
+               if (ret)
+                       goto gfar_err;
+
+               r[1].start = np->intrs[0].line;
+               r[1].end = np->intrs[0].line;
+               r[1].flags = IORESOURCE_IRQ;
+
+               model = get_property(np, "model", NULL);
+
+               /* If we aren't the FEC we have multiple interrupts */
+               if (model && strcasecmp(model, "FEC")) {
+                       r[1].name = gfar_tx_intr;
+
+                       r[2].name = gfar_rx_intr;
+                       r[2].start = np->intrs[1].line;
+                       r[2].end = np->intrs[1].line;
+                       r[2].flags = IORESOURCE_IRQ;
+
+                       r[3].name = gfar_err_intr;
+                       r[3].start = np->intrs[2].line;
+                       r[3].end = np->intrs[2].line;
+                       r[3].flags = IORESOURCE_IRQ;
+               }
+
+               gfar_dev = platform_device_register_simple("fsl-gianfar", i, &r[0], np->n_intrs + 1);
+
+               if (IS_ERR(gfar_dev)) {
+                       ret = PTR_ERR(gfar_dev);
+                       goto gfar_err;
+               }
+
+               mac_addr = get_property(np, "address", NULL);
+               memcpy(gfar_data.mac_addr, mac_addr, 6);
+
+               if (model && !strcasecmp(model, "TSEC"))
+                       gfar_data.device_flags =
+                               FSL_GIANFAR_DEV_HAS_GIGABIT |
+                               FSL_GIANFAR_DEV_HAS_COALESCE |
+                               FSL_GIANFAR_DEV_HAS_RMON |
+                               FSL_GIANFAR_DEV_HAS_MULTI_INTR;
+               if (model && !strcasecmp(model, "eTSEC"))
+                       gfar_data.device_flags =
+                               FSL_GIANFAR_DEV_HAS_GIGABIT |
+                               FSL_GIANFAR_DEV_HAS_COALESCE |
+                               FSL_GIANFAR_DEV_HAS_RMON |
+                               FSL_GIANFAR_DEV_HAS_MULTI_INTR |
+                               FSL_GIANFAR_DEV_HAS_CSUM |
+                               FSL_GIANFAR_DEV_HAS_VLAN |
+                               FSL_GIANFAR_DEV_HAS_EXTENDED_HASH;
+
+               ph = (phandle *) get_property(np, "phy-handle", NULL);
+               phy = of_find_node_by_phandle(*ph);
+
+               if (phy == NULL) {
+                       ret = -ENODEV;
+                       goto gfar_unreg;
+               }
+
+               mdio = of_get_parent(phy);
+
+               id = (u32 *) get_property(phy, "reg", NULL);
+               ret = of_address_to_resource(mdio, 0, &res);
+               if (ret) {
+                       of_node_put(phy);
+                       of_node_put(mdio);
+                       goto gfar_unreg;
+               }
+
+               gfar_data.phy_id = *id;
+               gfar_data.bus_id = res.start;
+
+               of_node_put(phy);
+               of_node_put(mdio);
+
+               ret = platform_device_add_data(gfar_dev, &gfar_data, sizeof(struct gianfar_platform_data));
+               if (ret)
+                       goto gfar_unreg;
+       }
+
+       return 0;
+
+mdio_unreg:
+       platform_device_unregister(mdio_dev);
+mdio_err:
+       return ret;
+
+gfar_unreg:
+       platform_device_unregister(gfar_dev);
+gfar_err:
+       return ret;
+}
+arch_initcall(gfar_of_init);
+
+static int __init fsl_i2c_of_init(void)
+{
+       struct device_node *np;
+       unsigned int i;
+       struct platform_device *i2c_dev;
+       int ret;
+
+       for (np = NULL, i = 0; (np = of_find_compatible_node(np, "i2c", "fsl-i2c")) != NULL; i++) {
+               struct resource r[2];
+               struct fsl_i2c_platform_data i2c_data;
+               unsigned char * flags = NULL;
+
+               memset(&r, 0, sizeof(r));
+               memset(&i2c_data, 0, sizeof(i2c_data));
+
+               ret = of_address_to_resource(np, 0, &r[0]);
+               if (ret)
+                       goto i2c_err;
+
+               r[1].start = np->intrs[0].line;
+               r[1].end = np->intrs[0].line;
+               r[1].flags = IORESOURCE_IRQ;
+
+               i2c_dev = platform_device_register_simple("fsl-i2c", i, r, 2);
+               if (IS_ERR(i2c_dev)) {
+                       ret = PTR_ERR(i2c_dev);
+                       goto i2c_err;
+               }
+
+               i2c_data.device_flags = 0;
+               flags = get_property(np, "dfsrr", NULL);
+               if (flags)
+                       i2c_data.device_flags |= FSL_I2C_DEV_SEPARATE_DFSRR;
+
+               flags = get_property(np, "fsl5200-clocking", NULL);
+               if (flags)
+                       i2c_data.device_flags |= FSL_I2C_DEV_CLOCK_5200;
+
+               ret = platform_device_add_data(i2c_dev, &i2c_data, sizeof(struct fsl_i2c_platform_data));
+               if (ret)
+                       goto i2c_unreg;
+       }
+
+       return 0;
+
+i2c_unreg:
+       platform_device_unregister(i2c_dev);
+i2c_err:
+       return ret;
+}
+arch_initcall(fsl_i2c_of_init);
+
+#ifdef CONFIG_PPC_83xx
+static int __init mpc83xx_wdt_init(void)
+{
+       struct resource r;
+       struct device_node *soc, *np;
+       struct platform_device *dev;
+       unsigned int *freq;
+       int ret;
+
+       np = of_find_compatible_node(NULL, "watchdog", "mpc83xx_wdt");
+
+       if (!np) {
+               ret = -ENODEV;
+               goto mpc83xx_wdt_nodev;
+       }
+
+       soc = of_find_node_by_type(NULL, "soc");
+
+       if (!soc) {
+               ret = -ENODEV;
+               goto mpc83xx_wdt_nosoc;
+       }
+
+       freq = (unsigned int *)get_property(soc, "bus-frequency", NULL);
+       if (!freq) {
+               ret = -ENODEV;
+               goto mpc83xx_wdt_err;
+       }
+
+       memset(&r, 0, sizeof(r));
+
+       ret = of_address_to_resource(np, 0, &r);
+       if (ret)
+               goto mpc83xx_wdt_err;
+
+       dev = platform_device_register_simple("mpc83xx_wdt", 0, &r, 1);
+       if (IS_ERR(dev)) {
+               ret = PTR_ERR(dev);
+               goto mpc83xx_wdt_err;
+       }
+
+       ret = platform_device_add_data(dev, freq, sizeof(int));
+       if (ret)
+               goto mpc83xx_wdt_unreg;
+
+       of_node_put(soc);
+       of_node_put(np);
+
+       return 0;
+
+mpc83xx_wdt_unreg:
+       platform_device_unregister(dev);
+mpc83xx_wdt_err:
+       of_node_put(soc);
+mpc83xx_wdt_nosoc:
+       of_node_put(np);
+mpc83xx_wdt_nodev:
+       return ret;
+}
+arch_initcall(mpc83xx_wdt_init);
+#endif
diff --git a/arch/powerpc/sysdev/fsl_soc.h b/arch/powerpc/sysdev/fsl_soc.h
new file mode 100644 (file)
index 0000000..c433d3f
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef __PPC_FSL_SOC_H
+#define __PPC_FSL_SOC_H
+#ifdef __KERNEL__
+
+extern phys_addr_t get_immrbase(void);
+
+#endif
+#endif
index 22612ed5379c46d0c0482759f873261298522d91..7d02fa2a899029d3df4023ef804f4b14588b539c 100644 (file)
@@ -311,7 +311,7 @@ static void release_output_lock(void)
 }
 #endif
 
-int xmon_core(struct pt_regs *regs, int fromipi)
+static int xmon_core(struct pt_regs *regs, int fromipi)
 {
        int cmd = 0;
        unsigned long msr;
@@ -528,7 +528,7 @@ xmon_irq(int irq, void *d, struct pt_regs *regs)
        return IRQ_HANDLED;
 }
 
-int xmon_bpt(struct pt_regs *regs)
+static int xmon_bpt(struct pt_regs *regs)
 {
        struct bpt *bp;
        unsigned long offset;
@@ -554,7 +554,7 @@ int xmon_bpt(struct pt_regs *regs)
        return 1;
 }
 
-int xmon_sstep(struct pt_regs *regs)
+static int xmon_sstep(struct pt_regs *regs)
 {
        if (user_mode(regs))
                return 0;
@@ -562,7 +562,7 @@ int xmon_sstep(struct pt_regs *regs)
        return 1;
 }
 
-int xmon_dabr_match(struct pt_regs *regs)
+static int xmon_dabr_match(struct pt_regs *regs)
 {
        if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF))
                return 0;
@@ -572,7 +572,7 @@ int xmon_dabr_match(struct pt_regs *regs)
        return 1;
 }
 
-int xmon_iabr_match(struct pt_regs *regs)
+static int xmon_iabr_match(struct pt_regs *regs)
 {
        if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF))
                return 0;
@@ -582,7 +582,7 @@ int xmon_iabr_match(struct pt_regs *regs)
        return 1;
 }
 
-int xmon_ipi(struct pt_regs *regs)
+static int xmon_ipi(struct pt_regs *regs)
 {
 #ifdef CONFIG_SMP
        if (in_xmon && !cpu_isset(smp_processor_id(), cpus_in_xmon))
@@ -591,7 +591,7 @@ int xmon_ipi(struct pt_regs *regs)
        return 0;
 }
 
-int xmon_fault_handler(struct pt_regs *regs)
+static int xmon_fault_handler(struct pt_regs *regs)
 {
        struct bpt *bp;
        unsigned long offset;
index ebc4db8fcc63de0380e2dacc99b5feb5a9abe263..8ace2a1f3b488f1c3b3dcfbf1b2a22da613e395b 100644 (file)
@@ -215,7 +215,6 @@ static struct tty_driver *siccnormal_driver;
  * memory if large numbers of serial ports are open.
  */
 static u_char *tmp_buf;
-static DECLARE_MUTEX(tmp_buf_sem);
 
 #define HIGH_BITS_OFFSET    ((sizeof(long)-sizeof(int))*8)
 
index 91195e2ce38d138861315f3006f4a9e53e3ef1cf..5f35cf3986f768c186a269bbc0ec048e67cba824 100644 (file)
@@ -96,8 +96,8 @@ void amiga_init_IRQ(void)
                gayle.inten = GAYLE_IRQ_IDE;
 
        /* turn off all interrupts... */
-       custom.intena = 0x7fff;
-       custom.intreq = 0x7fff;
+       amiga_custom.intena = 0x7fff;
+       amiga_custom.intreq = 0x7fff;
 
 #ifdef CONFIG_APUS
        /* Clear any inter-CPU interrupt requests. Circumvents bug in
@@ -110,7 +110,7 @@ void amiga_init_IRQ(void)
        APUS_WRITE(APUS_IPL_EMU, IPLEMU_SETRESET | IPLEMU_IPLMASK);
 #endif
        /* ... and enable the master interrupt bit */
-       custom.intena = IF_SETCLR | IF_INTEN;
+       amiga_custom.intena = IF_SETCLR | IF_INTEN;
 
        cia_init_IRQ(&ciaa_base);
        cia_init_IRQ(&ciab_base);
@@ -151,7 +151,7 @@ void amiga_enable_irq(unsigned int irq)
        }
 
        /* enable the interrupt */
-       custom.intena = IF_SETCLR | ami_intena_vals[irq];
+       amiga_custom.intena = IF_SETCLR | ami_intena_vals[irq];
 }
 
 void amiga_disable_irq(unsigned int irq)
@@ -177,7 +177,7 @@ void amiga_disable_irq(unsigned int irq)
        }
 
        /* disable the interrupt */
-       custom.intena = ami_intena_vals[irq];
+       amiga_custom.intena = ami_intena_vals[irq];
 }
 
 inline void amiga_do_irq(int irq, struct pt_regs *fp)
@@ -196,7 +196,7 @@ void amiga_do_irq_list(int irq, struct pt_regs *fp)
 
        kstat_cpu(0).irqs[irq]++;
 
-       custom.intreq = ami_intena_vals[irq];
+       amiga_custom.intreq = ami_intena_vals[irq];
 
        for (action = desc->action; action; action = action->next)
                action->handler(irq, action->dev_id, fp);
@@ -208,40 +208,40 @@ void amiga_do_irq_list(int irq, struct pt_regs *fp)
 
 static void ami_int1(int irq, void *dev_id, struct pt_regs *fp)
 {
-       unsigned short ints = custom.intreqr & custom.intenar;
+       unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
 
        /* if serial transmit buffer empty, interrupt */
        if (ints & IF_TBE) {
-               custom.intreq = IF_TBE;
+               amiga_custom.intreq = IF_TBE;
                amiga_do_irq(IRQ_AMIGA_TBE, fp);
        }
 
        /* if floppy disk transfer complete, interrupt */
        if (ints & IF_DSKBLK) {
-               custom.intreq = IF_DSKBLK;
+               amiga_custom.intreq = IF_DSKBLK;
                amiga_do_irq(IRQ_AMIGA_DSKBLK, fp);
        }
 
        /* if software interrupt set, interrupt */
        if (ints & IF_SOFT) {
-               custom.intreq = IF_SOFT;
+               amiga_custom.intreq = IF_SOFT;
                amiga_do_irq(IRQ_AMIGA_SOFT, fp);
        }
 }
 
 static void ami_int3(int irq, void *dev_id, struct pt_regs *fp)
 {
-       unsigned short ints = custom.intreqr & custom.intenar;
+       unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
 
        /* if a blitter interrupt */
        if (ints & IF_BLIT) {
-               custom.intreq = IF_BLIT;
+               amiga_custom.intreq = IF_BLIT;
                amiga_do_irq(IRQ_AMIGA_BLIT, fp);
        }
 
        /* if a copper interrupt */
        if (ints & IF_COPER) {
-               custom.intreq = IF_COPER;
+               amiga_custom.intreq = IF_COPER;
                amiga_do_irq(IRQ_AMIGA_COPPER, fp);
        }
 
@@ -252,36 +252,36 @@ static void ami_int3(int irq, void *dev_id, struct pt_regs *fp)
 
 static void ami_int4(int irq, void *dev_id, struct pt_regs *fp)
 {
-       unsigned short ints = custom.intreqr & custom.intenar;
+       unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
 
        /* if audio 0 interrupt */
        if (ints & IF_AUD0) {
-               custom.intreq = IF_AUD0;
+               amiga_custom.intreq = IF_AUD0;
                amiga_do_irq(IRQ_AMIGA_AUD0, fp);
        }
 
        /* if audio 1 interrupt */
        if (ints & IF_AUD1) {
-               custom.intreq = IF_AUD1;
+               amiga_custom.intreq = IF_AUD1;
                amiga_do_irq(IRQ_AMIGA_AUD1, fp);
        }
 
        /* if audio 2 interrupt */
        if (ints & IF_AUD2) {
-               custom.intreq = IF_AUD2;
+               amiga_custom.intreq = IF_AUD2;
                amiga_do_irq(IRQ_AMIGA_AUD2, fp);
        }
 
        /* if audio 3 interrupt */
        if (ints & IF_AUD3) {
-               custom.intreq = IF_AUD3;
+               amiga_custom.intreq = IF_AUD3;
                amiga_do_irq(IRQ_AMIGA_AUD3, fp);
        }
 }
 
 static void ami_int5(int irq, void *dev_id, struct pt_regs *fp)
 {
-       unsigned short ints = custom.intreqr & custom.intenar;
+       unsigned short ints = amiga_custom.intreqr & amiga_custom.intenar;
 
        /* if serial receive buffer full interrupt */
        if (ints & IF_RBF) {
@@ -291,7 +291,7 @@ static void ami_int5(int irq, void *dev_id, struct pt_regs *fp)
 
        /* if a disk sync interrupt */
        if (ints & IF_DSKSYN) {
-               custom.intreq = IF_DSKSYN;
+               amiga_custom.intreq = IF_DSKSYN;
                amiga_do_irq(IRQ_AMIGA_DSKSYN, fp);
        }
 }
index ad961465b6cb62fa04afa78910a65c59bb5c5a04..4431c58f611a95764ea2a6ed6f10f125e4014f60 100644 (file)
@@ -66,7 +66,7 @@ static unsigned char cia_set_irq_private(struct ciabase *base,
        else
                base->icr_data &= ~mask;
        if (base->icr_data & base->icr_mask)
-               custom.intreq = IF_SETCLR | base->int_mask;
+               amiga_custom.intreq = IF_SETCLR | base->int_mask;
        return old & base->icr_mask;
 }
 
@@ -114,7 +114,7 @@ static unsigned char cia_able_irq_private(struct ciabase *base,
        base->icr_mask &= CIA_ICR_ALL;
 
        if (base->icr_data & base->icr_mask)
-               custom.intreq = IF_SETCLR | base->int_mask;
+               amiga_custom.intreq = IF_SETCLR | base->int_mask;
        return old;
 }
 
@@ -145,7 +145,7 @@ static void cia_handler(int irq, void *dev_id, struct pt_regs *fp)
        irq = base->cia_irq;
        desc = irq_desc + irq;
        ints = cia_set_irq_private(base, CIA_ICR_ALL);
-       custom.intreq = base->int_mask;
+       amiga_custom.intreq = base->int_mask;
        for (i = 0; i < CIA_IRQS; i++, irq++) {
                if (ints & 1) {
                        kstat_cpu(0).irqs[irq]++;
@@ -174,5 +174,5 @@ void __init cia_init_IRQ(struct ciabase *base)
        action->name = base->name;
        setup_irq(base->handler_irq, &amiga_sys_irqaction[base->handler_irq-IRQ_AMIGA_AUTO]);
 
-       custom.intena = IF_SETCLR | base->int_mask;
+       amiga_custom.intena = IF_SETCLR | base->int_mask;
 }
index af881d7454ddd1254e8e4a4aedbb80d4c6e8cb18..60e2da1c92c07a79f47c387fe75059b7f1d61657 100644 (file)
@@ -90,9 +90,6 @@ static void a3000_gettod (int *, int *, int *, int *, int *, int *);
 static void a2000_gettod (int *, int *, int *, int *, int *, int *);
 static int amiga_hwclk (int, struct hwclk_time *);
 static int amiga_set_clock_mmss (unsigned long);
-#ifdef CONFIG_AMIGA_FLOPPY
-extern void amiga_floppy_setup(char *, int *);
-#endif
 static void amiga_reset (void);
 extern void amiga_init_sound(void);
 static void amiga_savekmsg_init(void);
@@ -281,7 +278,7 @@ static void __init amiga_identify(void)
     case CS_OCS:
     case CS_ECS:
     case CS_AGA:
-      switch (custom.deniseid & 0xf) {
+      switch (amiga_custom.deniseid & 0xf) {
       case 0x0c:
        AMIGAHW_SET(DENISE_HR);
        break;
@@ -294,7 +291,7 @@ static void __init amiga_identify(void)
       AMIGAHW_SET(DENISE);
       break;
     }
-    switch ((custom.vposr>>8) & 0x7f) {
+    switch ((amiga_custom.vposr>>8) & 0x7f) {
     case 0x00:
       AMIGAHW_SET(AGNUS_PAL);
       break;
@@ -419,9 +416,6 @@ void __init config_amiga(void)
 
   mach_hwclk           = amiga_hwclk;
   mach_set_clock_mmss  = amiga_set_clock_mmss;
-#ifdef CONFIG_AMIGA_FLOPPY
-  mach_floppy_setup    = amiga_floppy_setup;
-#endif
   mach_reset           = amiga_reset;
 #ifdef CONFIG_HEARTBEAT
   mach_heartbeat = amiga_heartbeat;
@@ -432,9 +426,9 @@ void __init config_amiga(void)
   amiga_colorclock = 5*amiga_eclock;   /* 3.5 MHz */
 
   /* clear all DMA bits */
-  custom.dmacon = DMAF_ALL;
+  amiga_custom.dmacon = DMAF_ALL;
   /* ensure that the DMA master bit is set */
-  custom.dmacon = DMAF_SETCLR | DMAF_MASTER;
+  amiga_custom.dmacon = DMAF_SETCLR | DMAF_MASTER;
 
   /* request all RAM */
   for (i = 0; i < m68k_num_memory; i++) {
@@ -753,9 +747,9 @@ static void amiga_savekmsg_init(void)
 
 static void amiga_serial_putc(char c)
 {
-    custom.serdat = (unsigned char)c | 0x100;
+    amiga_custom.serdat = (unsigned char)c | 0x100;
     mb();
-    while (!(custom.serdatr & 0x2000))
+    while (!(amiga_custom.serdatr & 0x2000))
        ;
 }
 
@@ -785,11 +779,11 @@ int amiga_serial_console_wait_key(struct console *co)
 {
     int ch;
 
-    while (!(custom.intreqr & IF_RBF))
+    while (!(amiga_custom.intreqr & IF_RBF))
        barrier();
-    ch = custom.serdatr & 0xff;
+    ch = amiga_custom.serdatr & 0xff;
     /* clear the interrupt, so that another character can be read */
-    custom.intreq = IF_RBF;
+    amiga_custom.intreq = IF_RBF;
     return ch;
 }
 
index e6c1d615bb860879d684da4d40f41a6a66a9608f..ca020130086820b6e21956f4fc10b0d5ebf58d33 100644 (file)
@@ -13,7 +13,6 @@ extra-$(CONFIG_POWER4)                += idle_power4.o
 extra-y                                += vmlinux.lds
 
 obj-y                          := entry.o traps.o idle.o time.o misc.o \
-                                       process.o \
                                        setup.o \
                                        ppc_htab.o
 obj-$(CONFIG_6xx)              += l2cr.o cpu_setup_6xx.o
index de0978742221ae6eb0ddfb7e6a60120c7c48c4fa..3e6ca7f5843ff43575b2f5e5fe834b60bf3f03dd 100644 (file)
@@ -375,6 +375,8 @@ DataStoreTLBMiss:
        lis     r11, swapper_pg_dir@h
        ori     r11, r11, swapper_pg_dir@l
        rlwimi  r10, r11, 0, 2, 19
+       stw     r12, 16(r0)
+       b LoadLargeDTLB
 3:
        lwz     r11, 0(r10)     /* Get the level 1 entry */
        rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
@@ -430,6 +432,81 @@ DataStoreTLBMiss:
 InstructionTLBError:
        b       InstructionAccess
 
+LoadLargeDTLB:
+       li      r12, 0
+       lwz     r11, 0(r10)     /* Get the level 1 entry */
+       rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
+       beq     3f              /* If zero, don't try to find a pte */
+
+       /* We have a pte table, so load fetch the pte from the table.
+        */
+       ori     r11, r11, 1     /* Set valid bit in physical L2 page */
+       DO_8xx_CPU6(0x3b80, r3)
+       mtspr   SPRN_MD_TWC, r11        /* Load pte table base address */
+       mfspr   r10, SPRN_MD_TWC        /* ....and get the pte address */
+       lwz     r10, 0(r10)     /* Get the pte */
+
+       /* Insert the Guarded flag into the TWC from the Linux PTE.
+        * It is bit 27 of both the Linux PTE and the TWC (at least
+        * I got that right :-).  It will be better when we can put
+        * this into the Linux pgd/pmd and load it in the operation
+        * above.
+        */
+       rlwimi  r11, r10, 0, 27, 27
+
+       rlwimi  r12, r10, 0, 0, 9       /* extract phys. addr */
+       mfspr   r3, SPRN_MD_EPN
+       rlwinm  r3, r3, 0, 0, 9         /* extract virtual address */
+       tophys(r3, r3)
+       cmpw    r3, r12                 /* only use 8M page if it is a direct 
+                                          kernel mapping */
+       bne     1f
+       ori     r11, r11, MD_PS8MEG
+       li      r12, 1
+       b       2f
+1:
+       li      r12, 0          /* can't use 8MB TLB, so zero r12. */
+2:
+       DO_8xx_CPU6(0x3b80, r3)
+       mtspr   SPRN_MD_TWC, r11
+
+       /* The Linux PTE won't go exactly into the MMU TLB.
+        * Software indicator bits 21, 22 and 28 must be clear.
+        * Software indicator bits 24, 25, 26, and 27 must be
+        * set.  All other Linux PTE bits control the behavior
+        * of the MMU.
+        */
+3:     li      r11, 0x00f0
+       rlwimi  r10, r11, 0, 24, 28     /* Set 24-27, clear 28 */
+       cmpwi   r12, 1
+       bne 4f
+       ori     r10, r10, 0x8
+
+       mfspr   r12, SPRN_MD_EPN
+       lis     r3, 0xff80              /* 10-19 must be clear for 8MB TLB */
+       ori     r3, r3, 0x0fff
+       and     r12, r3, r12
+       DO_8xx_CPU6(0x3780, r3)
+       mtspr   SPRN_MD_EPN, r12
+
+       lis     r3, 0xff80              /* 10-19 must be clear for 8MB TLB */
+       ori     r3, r3, 0x0fff
+       and     r10, r3, r10
+4:
+       DO_8xx_CPU6(0x3d80, r3)
+       mtspr   SPRN_MD_RPN, r10        /* Update TLB entry */
+
+       mfspr   r10, SPRN_M_TW  /* Restore registers */
+       lwz     r11, 0(r0)
+       mtcr    r11
+       lwz     r11, 4(r0)
+
+       lwz     r12, 16(r0)
+#ifdef CONFIG_8xx_CPU6
+       lwz     r3, 8(r0)
+#endif
+       rfi
+
 /* This is the data TLB error on the MPC8xx.  This could be due to
  * many reasons, including a dirty update to a pte.  We can catch that
  * one here, but anything else is an error.  First, we track down the
index 95075f99a6d4ea09abfb4c388a019a040d389e1c..fb0da7c209db6d4d35a3ba8e0370f6eff2858acd 100644 (file)
@@ -270,7 +270,6 @@ EXPORT_SYMBOL(__delay);
 EXPORT_SYMBOL(timer_interrupt);
 EXPORT_SYMBOL(irq_desc);
 EXPORT_SYMBOL(tb_ticks_per_jiffy);
-EXPORT_SYMBOL(get_wchan);
 EXPORT_SYMBOL(console_drivers);
 #ifdef CONFIG_XMON
 EXPORT_SYMBOL(xmon);
diff --git a/arch/ppc/kernel/process.c b/arch/ppc/kernel/process.c
deleted file mode 100644 (file)
index 25cbdc8..0000000
+++ /dev/null
@@ -1,851 +0,0 @@
-/*
- *  arch/ppc/kernel/process.c
- *
- *  Derived from "arch/i386/kernel/process.c"
- *    Copyright (C) 1995  Linus Torvalds
- *
- *  Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
- *  Paul Mackerras (paulus@cs.anu.edu.au)
- *
- *  PowerPC version
- *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- *
- *  This program is free software; you can redistribute it and/or
- *  modify it under the terms of the GNU General Public License
- *  as published by the Free Software Foundation; either version
- *  2 of the License, or (at your option) any later version.
- *
- */
-
-#include <linux/config.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/smp_lock.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/ptrace.h>
-#include <linux/slab.h>
-#include <linux/user.h>
-#include <linux/elf.h>
-#include <linux/init.h>
-#include <linux/prctl.h>
-#include <linux/init_task.h>
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/mqueue.h>
-#include <linux/hardirq.h>
-
-#include <asm/pgtable.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/processor.h>
-#include <asm/mmu.h>
-#include <asm/prom.h>
-
-extern unsigned long _get_SP(void);
-
-struct task_struct *last_task_used_math = NULL;
-struct task_struct *last_task_used_altivec = NULL;
-struct task_struct *last_task_used_spe = NULL;
-
-static struct fs_struct init_fs = INIT_FS;
-static struct files_struct init_files = INIT_FILES;
-static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
-static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-EXPORT_SYMBOL(init_mm);
-
-/* this is 8kB-aligned so we can get to the thread_info struct
-   at the base of it from the stack pointer with 1 integer instruction. */
-union thread_union init_thread_union
-       __attribute__((__section__(".data.init_task"))) =
-{ INIT_THREAD_INFO(init_task) };
-
-/* initial task structure */
-struct task_struct init_task = INIT_TASK(init_task);
-EXPORT_SYMBOL(init_task);
-
-/* only used to get secondary processor up */
-struct task_struct *current_set[NR_CPUS] = {&init_task, };
-
-#undef SHOW_TASK_SWITCHES
-#undef CHECK_STACK
-
-#if defined(CHECK_STACK)
-unsigned long
-kernel_stack_top(struct task_struct *tsk)
-{
-       return ((unsigned long)tsk) + sizeof(union task_union);
-}
-
-unsigned long
-task_top(struct task_struct *tsk)
-{
-       return ((unsigned long)tsk) + sizeof(struct thread_info);
-}
-
-/* check to make sure the kernel stack is healthy */
-int check_stack(struct task_struct *tsk)
-{
-       unsigned long stack_top = kernel_stack_top(tsk);
-       unsigned long tsk_top = task_top(tsk);
-       int ret = 0;
-
-#if 0
-       /* check thread magic */
-       if ( tsk->thread.magic != THREAD_MAGIC )
-       {
-               ret |= 1;
-               printk("thread.magic bad: %08x\n", tsk->thread.magic);
-       }
-#endif
-
-       if ( !tsk )
-               printk("check_stack(): tsk bad tsk %p\n",tsk);
-
-       /* check if stored ksp is bad */
-       if ( (tsk->thread.ksp > stack_top) || (tsk->thread.ksp < tsk_top) )
-       {
-               printk("stack out of bounds: %s/%d\n"
-                      " tsk_top %08lx ksp %08lx stack_top %08lx\n",
-                      tsk->comm,tsk->pid,
-                      tsk_top, tsk->thread.ksp, stack_top);
-               ret |= 2;
-       }
-
-       /* check if stack ptr RIGHT NOW is bad */
-       if ( (tsk == current) && ((_get_SP() > stack_top ) || (_get_SP() < tsk_top)) )
-       {
-               printk("current stack ptr out of bounds: %s/%d\n"
-                      " tsk_top %08lx sp %08lx stack_top %08lx\n",
-                      current->comm,current->pid,
-                      tsk_top, _get_SP(), stack_top);
-               ret |= 4;
-       }
-
-#if 0
-       /* check amount of free stack */
-       for ( i = (unsigned long *)task_top(tsk) ; i < kernel_stack_top(tsk) ; i++ )
-       {
-               if ( !i )
-                       printk("check_stack(): i = %p\n", i);
-               if ( *i != 0 )
-               {
-                       /* only notify if it's less than 900 bytes */
-                       if ( (i - (unsigned long *)task_top(tsk))  < 900 )
-                               printk("%d bytes free on stack\n",
-                                      i - task_top(tsk));
-                       break;
-               }
-       }
-#endif
-
-       if (ret)
-       {
-               panic("bad kernel stack");
-       }
-       return(ret);
-}
-#endif /* defined(CHECK_STACK) */
-
-/*
- * Make sure the floating-point register state in the
- * the thread_struct is up to date for task tsk.
- */
-void flush_fp_to_thread(struct task_struct *tsk)
-{
-       if (tsk->thread.regs) {
-               /*
-                * We need to disable preemption here because if we didn't,
-                * another process could get scheduled after the regs->msr
-                * test but before we have finished saving the FP registers
-                * to the thread_struct.  That process could take over the
-                * FPU, and then when we get scheduled again we would store
-                * bogus values for the remaining FP registers.
-                */
-               preempt_disable();
-               if (tsk->thread.regs->msr & MSR_FP) {
-#ifdef CONFIG_SMP
-                       /*
-                        * This should only ever be called for current or
-                        * for a stopped child process.  Since we save away
-                        * the FP register state on context switch on SMP,
-                        * there is something wrong if a stopped child appears
-                        * to still have its FP state in the CPU registers.
-                        */
-                       BUG_ON(tsk != current);
-#endif
-                       giveup_fpu(current);
-               }
-               preempt_enable();
-       }
-}
-
-void enable_kernel_fp(void)
-{
-       WARN_ON(preemptible());
-
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
-               giveup_fpu(current);
-       else
-               giveup_fpu(NULL);       /* just enables FP for kernel */
-#else
-       giveup_fpu(last_task_used_math);
-#endif /* CONFIG_SMP */
-}
-EXPORT_SYMBOL(enable_kernel_fp);
-
-int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
-{
-       preempt_disable();
-       if (tsk->thread.regs && (tsk->thread.regs->msr & MSR_FP))
-               giveup_fpu(tsk);
-       preempt_enable();
-       memcpy(fpregs, &tsk->thread.fpr[0], sizeof(*fpregs));
-       return 1;
-}
-
-#ifdef CONFIG_ALTIVEC
-void enable_kernel_altivec(void)
-{
-       WARN_ON(preemptible());
-
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
-               giveup_altivec(current);
-       else
-               giveup_altivec(NULL);   /* just enable AltiVec for kernel - force */
-#else
-       giveup_altivec(last_task_used_altivec);
-#endif /* __SMP __ */
-}
-EXPORT_SYMBOL(enable_kernel_altivec);
-
-/*
- * Make sure the VMX/Altivec register state in the
- * the thread_struct is up to date for task tsk.
- */
-void flush_altivec_to_thread(struct task_struct *tsk)
-{
-       if (tsk->thread.regs) {
-               preempt_disable();
-               if (tsk->thread.regs->msr & MSR_VEC) {
-#ifdef CONFIG_SMP
-                       BUG_ON(tsk != current);
-#endif
-                       giveup_altivec(current);
-               }
-               preempt_enable();
-       }
-}
-
-int dump_altivec(struct pt_regs *regs, elf_vrregset_t *vrregs)
-{
-       if (regs->msr & MSR_VEC)
-               giveup_altivec(current);
-       memcpy(vrregs, &current->thread.vr[0], sizeof(*vrregs));
-       return 1;
-}
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_SPE
-void
-enable_kernel_spe(void)
-{
-       WARN_ON(preemptible());
-
-#ifdef CONFIG_SMP
-       if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
-               giveup_spe(current);
-       else
-               giveup_spe(NULL);       /* just enable SPE for kernel - force */
-#else
-       giveup_spe(last_task_used_spe);
-#endif /* __SMP __ */
-}
-EXPORT_SYMBOL(enable_kernel_spe);
-
-void flush_spe_to_thread(struct task_struct *tsk)
-{
-       if (tsk->thread.regs) {
-               preempt_disable();
-               if (tsk->thread.regs->msr & MSR_SPE) {
-#ifdef CONFIG_SMP
-                       BUG_ON(tsk != current);
-#endif
-                       giveup_spe(current);
-               }
-               preempt_enable();
-       }
-}
-
-int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
-{
-       if (regs->msr & MSR_SPE)
-               giveup_spe(current);
-       /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
-       memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
-       return 1;
-}
-#endif /* CONFIG_SPE */
-
-struct task_struct *__switch_to(struct task_struct *prev,
-       struct task_struct *new)
-{
-       struct thread_struct *new_thread, *old_thread;
-       unsigned long s;
-       struct task_struct *last;
-
-       local_irq_save(s);
-#ifdef CHECK_STACK
-       check_stack(prev);
-       check_stack(new);
-#endif
-
-#ifdef CONFIG_SMP
-       /* avoid complexity of lazy save/restore of fpu
-        * by just saving it every time we switch out if
-        * this task used the fpu during the last quantum.
-        *
-        * If it tries to use the fpu again, it'll trap and
-        * reload its fp regs.  So we don't have to do a restore
-        * every switch, just a save.
-        *  -- Cort
-        */
-       if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
-               giveup_fpu(prev);
-#ifdef CONFIG_ALTIVEC
-       /*
-        * If the previous thread used altivec in the last quantum
-        * (thus changing altivec regs) then save them.
-        * We used to check the VRSAVE register but not all apps
-        * set it, so we don't rely on it now (and in fact we need
-        * to save & restore VSCR even if VRSAVE == 0).  -- paulus
-        *
-        * On SMP we always save/restore altivec regs just to avoid the
-        * complexity of changing processors.
-        *  -- Cort
-        */
-       if ((prev->thread.regs && (prev->thread.regs->msr & MSR_VEC)))
-               giveup_altivec(prev);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-       /*
-        * If the previous thread used spe in the last quantum
-        * (thus changing spe regs) then save them.
-        *
-        * On SMP we always save/restore spe regs just to avoid the
-        * complexity of changing processors.
-        */
-       if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
-               giveup_spe(prev);
-#endif /* CONFIG_SPE */
-#endif /* CONFIG_SMP */
-
-#ifdef CONFIG_ALTIVEC
-       /* Avoid the trap.  On smp this this never happens since
-        * we don't set last_task_used_altivec -- Cort
-        */
-       if (new->thread.regs && last_task_used_altivec == new)
-               new->thread.regs->msr |= MSR_VEC;
-#endif
-#ifdef CONFIG_SPE
-       /* Avoid the trap.  On smp this this never happens since
-        * we don't set last_task_used_spe
-        */
-       if (new->thread.regs && last_task_used_spe == new)
-               new->thread.regs->msr |= MSR_SPE;
-#endif /* CONFIG_SPE */
-       new_thread = &new->thread;
-       old_thread = &current->thread;
-       last = _switch(old_thread, new_thread);
-       local_irq_restore(s);
-       return last;
-}
-
-void show_regs(struct pt_regs * regs)
-{
-       int i, trap;
-
-       printk("NIP: %08lX LR: %08lX SP: %08lX REGS: %p TRAP: %04lx    %s\n",
-              regs->nip, regs->link, regs->gpr[1], regs, regs->trap,
-              print_tainted());
-       printk("MSR: %08lx EE: %01x PR: %01x FP: %01x ME: %01x IR/DR: %01x%01x\n",
-              regs->msr, regs->msr&MSR_EE ? 1 : 0, regs->msr&MSR_PR ? 1 : 0,
-              regs->msr & MSR_FP ? 1 : 0,regs->msr&MSR_ME ? 1 : 0,
-              regs->msr&MSR_IR ? 1 : 0,
-              regs->msr&MSR_DR ? 1 : 0);
-       trap = TRAP(regs);
-       if (trap == 0x300 || trap == 0x600)
-               printk("DAR: %08lX, DSISR: %08lX\n", regs->dar, regs->dsisr);
-       printk("TASK = %p[%d] '%s' THREAD: %p\n",
-              current, current->pid, current->comm, current->thread_info);
-       printk("Last syscall: %ld ", current->thread.last_syscall);
-
-#ifdef CONFIG_SMP
-       printk(" CPU: %d", smp_processor_id());
-#endif /* CONFIG_SMP */
-
-       for (i = 0;  i < 32;  i++) {
-               long r;
-               if ((i % 8) == 0)
-                       printk("\n" KERN_INFO "GPR%02d: ", i);
-               if (__get_user(r, &regs->gpr[i]))
-                       break;
-               printk("%08lX ", r);
-               if (i == 12 && !FULL_REGS(regs))
-                       break;
-       }
-       printk("\n");
-#ifdef CONFIG_KALLSYMS
-       /*
-        * Lookup NIP late so we have the best change of getting the
-        * above info out without failing
-        */
-       printk("NIP [%08lx] ", regs->nip);
-       print_symbol("%s\n", regs->nip);
-       printk("LR [%08lx] ", regs->link);
-       print_symbol("%s\n", regs->link);
-#endif
-       show_stack(current, (unsigned long *) regs->gpr[1]);
-}
-
-void exit_thread(void)
-{
-       preempt_disable();
-       if (last_task_used_math == current)
-               last_task_used_math = NULL;
-       if (last_task_used_altivec == current)
-               last_task_used_altivec = NULL;
-#ifdef CONFIG_SPE
-       if (last_task_used_spe == current)
-               last_task_used_spe = NULL;
-#endif
-       preempt_enable();
-}
-
-void flush_thread(void)
-{
-       preempt_disable();
-       if (last_task_used_math == current)
-               last_task_used_math = NULL;
-       if (last_task_used_altivec == current)
-               last_task_used_altivec = NULL;
-#ifdef CONFIG_SPE
-       if (last_task_used_spe == current)
-               last_task_used_spe = NULL;
-#endif
-       preempt_enable();
-}
-
-void
-release_thread(struct task_struct *t)
-{
-}
-
-/*
- * This gets called before we allocate a new thread and copy
- * the current task into it.
- */
-void prepare_to_copy(struct task_struct *tsk)
-{
-       struct pt_regs *regs = tsk->thread.regs;
-
-       if (regs == NULL)
-               return;
-       preempt_disable();
-       if (regs->msr & MSR_FP)
-               giveup_fpu(current);
-#ifdef CONFIG_ALTIVEC
-       if (regs->msr & MSR_VEC)
-               giveup_altivec(current);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-       if (regs->msr & MSR_SPE)
-               giveup_spe(current);
-#endif /* CONFIG_SPE */
-       preempt_enable();
-}
-
-/*
- * Copy a thread..
- */
-int
-copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
-           unsigned long unused,
-           struct task_struct *p, struct pt_regs *regs)
-{
-       struct pt_regs *childregs, *kregs;
-       extern void ret_from_fork(void);
-       unsigned long sp = (unsigned long)p->thread_info + THREAD_SIZE;
-       unsigned long childframe;
-
-       CHECK_FULL_REGS(regs);
-       /* Copy registers */
-       sp -= sizeof(struct pt_regs);
-       childregs = (struct pt_regs *) sp;
-       *childregs = *regs;
-       if ((childregs->msr & MSR_PR) == 0) {
-               /* for kernel thread, set `current' and stackptr in new task */
-               childregs->gpr[1] = sp + sizeof(struct pt_regs);
-               childregs->gpr[2] = (unsigned long) p;
-               p->thread.regs = NULL;  /* no user register state */
-       } else {
-               childregs->gpr[1] = usp;
-               p->thread.regs = childregs;
-               if (clone_flags & CLONE_SETTLS)
-                       childregs->gpr[2] = childregs->gpr[6];
-       }
-       childregs->gpr[3] = 0;  /* Result from fork() */
-       sp -= STACK_FRAME_OVERHEAD;
-       childframe = sp;
-
-       /*
-        * The way this works is that at some point in the future
-        * some task will call _switch to switch to the new task.
-        * That will pop off the stack frame created below and start
-        * the new task running at ret_from_fork.  The new task will
-        * do some house keeping and then return from the fork or clone
-        * system call, using the stack frame created above.
-        */
-       sp -= sizeof(struct pt_regs);
-       kregs = (struct pt_regs *) sp;
-       sp -= STACK_FRAME_OVERHEAD;
-       p->thread.ksp = sp;
-       kregs->nip = (unsigned long)ret_from_fork;
-
-       p->thread.last_syscall = -1;
-
-       return 0;
-}
-
-/*
- * Set up a thread for executing a new program
- */
-void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp)
-{
-       set_fs(USER_DS);
-       memset(regs->gpr, 0, sizeof(regs->gpr));
-       regs->ctr = 0;
-       regs->link = 0;
-       regs->xer = 0;
-       regs->ccr = 0;
-       regs->mq = 0;
-       regs->nip = nip;
-       regs->gpr[1] = sp;
-       regs->msr = MSR_USER;
-       preempt_disable();
-       if (last_task_used_math == current)
-               last_task_used_math = NULL;
-       if (last_task_used_altivec == current)
-               last_task_used_altivec = NULL;
-#ifdef CONFIG_SPE
-       if (last_task_used_spe == current)
-               last_task_used_spe = NULL;
-#endif
-       preempt_enable();
-       memset(current->thread.fpr, 0, sizeof(current->thread.fpr));
-       current->thread.fpscr.val = 0;
-#ifdef CONFIG_ALTIVEC
-       memset(current->thread.vr, 0, sizeof(current->thread.vr));
-       memset(&current->thread.vscr, 0, sizeof(current->thread.vscr));
-       current->thread.vrsave = 0;
-       current->thread.used_vr = 0;
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-       memset(current->thread.evr, 0, sizeof(current->thread.evr));
-       current->thread.acc = 0;
-       current->thread.spefscr = 0;
-       current->thread.used_spe = 0;
-#endif /* CONFIG_SPE */
-}
-
-#define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
-               | PR_FP_EXC_RES | PR_FP_EXC_INV)
-
-int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
-{
-       struct pt_regs *regs = tsk->thread.regs;
-
-       /* This is a bit hairy.  If we are an SPE enabled  processor
-        * (have embedded fp) we store the IEEE exception enable flags in
-        * fpexc_mode.  fpexc_mode is also used for setting FP exception
-        * mode (asyn, precise, disabled) for 'Classic' FP. */
-       if (val & PR_FP_EXC_SW_ENABLE) {
-#ifdef CONFIG_SPE
-               tsk->thread.fpexc_mode = val &
-                       (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
-#else
-               return -EINVAL;
-#endif
-       } else {
-               /* on a CONFIG_SPE this does not hurt us.  The bits that
-                * __pack_fe01 use do not overlap with bits used for
-                * PR_FP_EXC_SW_ENABLE.  Additionally, the MSR[FE0,FE1] bits
-                * on CONFIG_SPE implementations are reserved so writing to
-                * them does not change anything */
-               if (val > PR_FP_EXC_PRECISE)
-                       return -EINVAL;
-               tsk->thread.fpexc_mode = __pack_fe01(val);
-               if (regs != NULL && (regs->msr & MSR_FP) != 0)
-                       regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
-                               | tsk->thread.fpexc_mode;
-       }
-       return 0;
-}
-
-int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
-{
-       unsigned int val;
-
-       if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
-#ifdef CONFIG_SPE
-               val = tsk->thread.fpexc_mode;
-#else
-               return -EINVAL;
-#endif
-       else
-               val = __unpack_fe01(tsk->thread.fpexc_mode);
-       return put_user(val, (unsigned int __user *) adr);
-}
-
-int sys_clone(unsigned long clone_flags, unsigned long usp,
-             int __user *parent_tidp, void __user *child_threadptr,
-             int __user *child_tidp, int p6,
-             struct pt_regs *regs)
-{
-       CHECK_FULL_REGS(regs);
-       if (usp == 0)
-               usp = regs->gpr[1];     /* stack pointer for child */
-       return do_fork(clone_flags, usp, regs, 0, parent_tidp, child_tidp);
-}
-
-int sys_fork(unsigned long p1, unsigned long p2, unsigned long p3,
-            unsigned long p4, unsigned long p5, unsigned long p6,
-            struct pt_regs *regs)
-{
-       CHECK_FULL_REGS(regs);
-       return do_fork(SIGCHLD, regs->gpr[1], regs, 0, NULL, NULL);
-}
-
-int sys_vfork(unsigned long p1, unsigned long p2, unsigned long p3,
-             unsigned long p4, unsigned long p5, unsigned long p6,
-             struct pt_regs *regs)
-{
-       CHECK_FULL_REGS(regs);
-       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->gpr[1],
-                       regs, 0, NULL, NULL);
-}
-
-int sys_execve(unsigned long a0, unsigned long a1, unsigned long a2,
-              unsigned long a3, unsigned long a4, unsigned long a5,
-              struct pt_regs *regs)
-{
-       int error;
-       char * filename;
-
-       filename = getname((char __user *) a0);
-       error = PTR_ERR(filename);
-       if (IS_ERR(filename))
-               goto out;
-       preempt_disable();
-       if (regs->msr & MSR_FP)
-               giveup_fpu(current);
-#ifdef CONFIG_ALTIVEC
-       if (regs->msr & MSR_VEC)
-               giveup_altivec(current);
-#endif /* CONFIG_ALTIVEC */
-#ifdef CONFIG_SPE
-       if (regs->msr & MSR_SPE)
-               giveup_spe(current);
-#endif /* CONFIG_SPE */
-       preempt_enable();
-       error = do_execve(filename, (char __user *__user *) a1,
-                         (char __user *__user *) a2, regs);
-       if (error == 0) {
-               task_lock(current);
-               current->ptrace &= ~PT_DTRACE;
-               task_unlock(current);
-       }
-       putname(filename);
-out:
-       return error;
-}
-
-void dump_stack(void)
-{
-       show_stack(current, NULL);
-}
-
-EXPORT_SYMBOL(dump_stack);
-
-void show_stack(struct task_struct *tsk, unsigned long *stack)
-{
-       unsigned long sp, stack_top, prev_sp, ret;
-       int count = 0;
-       unsigned long next_exc = 0;
-       struct pt_regs *regs;
-       extern char ret_from_except, ret_from_except_full, ret_from_syscall;
-
-       sp = (unsigned long) stack;
-       if (tsk == NULL)
-               tsk = current;
-       if (sp == 0) {
-               if (tsk == current)
-                       asm("mr %0,1" : "=r" (sp));
-               else
-                       sp = tsk->thread.ksp;
-       }
-
-       prev_sp = (unsigned long) (tsk->thread_info + 1);
-       stack_top = (unsigned long) tsk->thread_info + THREAD_SIZE;
-       while (count < 16 && sp > prev_sp && sp < stack_top && (sp & 3) == 0) {
-               if (count == 0) {
-                       printk("Call trace:");
-#ifdef CONFIG_KALLSYMS
-                       printk("\n");
-#endif
-               } else {
-                       if (next_exc) {
-                               ret = next_exc;
-                               next_exc = 0;
-                       } else
-                               ret = *(unsigned long *)(sp + 4);
-                       printk(" [%08lx] ", ret);
-#ifdef CONFIG_KALLSYMS
-                       print_symbol("%s", ret);
-                       printk("\n");
-#endif
-                       if (ret == (unsigned long) &ret_from_except
-                           || ret == (unsigned long) &ret_from_except_full
-                           || ret == (unsigned long) &ret_from_syscall) {
-                               /* sp + 16 points to an exception frame */
-                               regs = (struct pt_regs *) (sp + 16);
-                               if (sp + 16 + sizeof(*regs) <= stack_top)
-                                       next_exc = regs->nip;
-                       }
-               }
-               ++count;
-               sp = *(unsigned long *)sp;
-       }
-#ifndef CONFIG_KALLSYMS
-       if (count > 0)
-               printk("\n");
-#endif
-}
-
-#if 0
-/*
- * Low level print for debugging - Cort
- */
-int __init ll_printk(const char *fmt, ...)
-{
-        va_list args;
-       char buf[256];
-        int i;
-
-        va_start(args, fmt);
-        i=vsprintf(buf,fmt,args);
-       ll_puts(buf);
-        va_end(args);
-        return i;
-}
-
-int lines = 24, cols = 80;
-int orig_x = 0, orig_y = 0;
-
-void puthex(unsigned long val)
-{
-       unsigned char buf[10];
-       int i;
-       for (i = 7;  i >= 0;  i--)
-       {
-               buf[i] = "0123456789ABCDEF"[val & 0x0F];
-               val >>= 4;
-       }
-       buf[8] = '\0';
-       prom_print(buf);
-}
-
-void __init ll_puts(const char *s)
-{
-       int x,y;
-       char *vidmem = (char *)/*(_ISA_MEM_BASE + 0xB8000) */0xD00B8000;
-       char c;
-       extern int mem_init_done;
-
-       if ( mem_init_done ) /* assume this means we can printk */
-       {
-               printk(s);
-               return;
-       }
-
-#if 0
-       if ( have_of )
-       {
-               prom_print(s);
-               return;
-       }
-#endif
-
-       /*
-        * can't ll_puts on chrp without openfirmware yet.
-        * vidmem just needs to be setup for it.
-        * -- Cort
-        */
-       if ( _machine != _MACH_prep )
-               return;
-       x = orig_x;
-       y = orig_y;
-
-       while ( ( c = *s++ ) != '\0' ) {
-               if ( c == '\n' ) {
-                       x = 0;
-                       if ( ++y >= lines ) {
-                               /*scroll();*/
-                               /*y--;*/
-                               y = 0;
-                       }
-               } else {
-                       vidmem [ ( x + cols * y ) * 2 ] = c;
-                       if ( ++x >= cols ) {
-                               x = 0;
-                               if ( ++y >= lines ) {
-                                       /*scroll();*/
-                                       /*y--;*/
-                                       y = 0;
-                               }
-                       }
-               }
-       }
-
-       orig_x = x;
-       orig_y = y;
-}
-#endif
-
-unsigned long get_wchan(struct task_struct *p)
-{
-       unsigned long ip, sp;
-       unsigned long stack_page = (unsigned long) p->thread_info;
-       int count = 0;
-       if (!p || p == current || p->state == TASK_RUNNING)
-               return 0;
-       sp = p->thread.ksp;
-       do {
-               sp = *(unsigned long *)sp;
-               if (sp < stack_page || sp >= stack_page + 8188)
-                       return 0;
-               if (count > 0) {
-                       ip = *(unsigned long *)(sp + 4);
-                       if (!in_sched_functions(ip))
-                               return ip;
-               }
-       } while (count++ < 16);
-       return 0;
-}
index becbfa397556a378c87aea3c05e8418073072fb4..e55cdda6149a1f1e189012a1b79c542477a008a9 100644 (file)
@@ -318,7 +318,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                p = fork_idle(cpu);
                if (IS_ERR(p))
                        panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
-               p->thread_info->cpu = cpu;
+               task_thread_info(p)->cpu = cpu;
                idle_tasks[cpu] = p;
        }
 }
@@ -369,7 +369,7 @@ int __cpu_up(unsigned int cpu)
        char buf[32];
        int c;
 
-       secondary_ti = idle_tasks[cpu]->thread_info;
+       secondary_ti = task_thread_info(idle_tasks[cpu]);
        mb();
 
        /*
index 04bdc39bf47b473f762ffbfc8d4a3d58c70671d9..012e1e652c03ed6e039e0c56254fe913f8d1ffbb 100644 (file)
@@ -51,9 +51,6 @@
 
 #include <syslib/ppc83xx_setup.h>
 
-static const char *GFAR_PHY_0 = "phy0:0";
-static const char *GFAR_PHY_1 = "phy0:1";
-
 #ifndef CONFIG_PCI
 unsigned long isa_io_base = 0;
 unsigned long isa_mem_base = 0;
@@ -129,20 +126,21 @@ mpc834x_sys_setup_arch(void)
        mdata->irq[1] = MPC83xx_IRQ_EXT2;
        mdata->irq[2] = -1;
        mdata->irq[31] = -1;
-       mdata->paddr += binfo->bi_immr_base;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC83xx_TSEC1);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_0;
+               pdata->bus_id = 0;
+               pdata->phy_id = 0;
                memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
        }
 
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC83xx_TSEC2);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_1;
+               pdata->bus_id = 0;
+               pdata->phy_id = 1;
                memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
        }
 
index c5cde97c6ef00b2832a9949c884b977d83578ba7..2eceb1e6f4eb956c5266be833f7b810ae42500cd 100644 (file)
 
 #include <syslib/ppc85xx_setup.h>
 
-static const char *GFAR_PHY_0 = "phy0:0";
-static const char *GFAR_PHY_1 = "phy0:1";
-static const char *GFAR_PHY_3 = "phy0:3";
-
 /* ************************************************************************
  *
  * Setup the architecture
@@ -102,27 +98,29 @@ mpc8540ads_setup_arch(void)
        mdata->irq[2] = -1;
        mdata->irq[3] = MPC85xx_IRQ_EXT5;
        mdata->irq[31] = -1;
-       mdata->paddr += binfo->bi_immr_base;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_0;
+               pdata->bus_id = 0;
+               pdata->phy_id = 0;
                memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
        }
 
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_1;
+               pdata->bus_id = 0;
+               pdata->phy_id = 1;
                memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
        }
 
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_FEC);
        if (pdata) {
                pdata->board_flags = 0;
-               pdata->bus_id = GFAR_PHY_3;
+               pdata->bus_id = 0;
+               pdata->phy_id = 3;
                memcpy(pdata->mac_addr, binfo->bi_enet2addr, 6);
        }
 
index 8e39a551709292fd8935d88a9c8591110cb42068..442c7ff195d3c16e4bc241d48e0ebfc29d4fcd7c 100644 (file)
 #include <syslib/ppc85xx_setup.h>
 
 
-static const char *GFAR_PHY_0 = "phy0:0";
-static const char *GFAR_PHY_1 = "phy0:1";
-static const char *GFAR_PHY_3 = "phy0:3";
-
 /* ************************************************************************
  *
  * Setup the architecture
@@ -99,20 +95,21 @@ mpc8560ads_setup_arch(void)
        mdata->irq[2] = -1;
        mdata->irq[3] = MPC85xx_IRQ_EXT5;
        mdata->irq[31] = -1;
-       mdata->paddr += binfo->bi_immr_base;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_0;
+               pdata->bus_id = 0;
+               pdata->phy_id = 0;
                memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
        }
 
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_1;
+               pdata->bus_id = 0;
+               pdata->phy_id = 1;
                memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
        }
 
index 2959e3c4083d0ff322c8c0f77462c7efedf6d7ea..b332ebae6bd3018296b047ebbdc0146db653f48f 100644 (file)
@@ -395,9 +395,6 @@ mpc85xx_cds_pcibios_fixup(void)
 
 TODC_ALLOC();
 
-static const char *GFAR_PHY_0 = "phy0:0";
-static const char *GFAR_PHY_1 = "phy0:1";
-
 /* ************************************************************************
  *
  * Setup the architecture
@@ -461,34 +458,37 @@ mpc85xx_cds_setup_arch(void)
        mdata->irq[2] = -1;
        mdata->irq[3] = -1;
        mdata->irq[31] = -1;
-       mdata->paddr += binfo->bi_immr_base;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_0;
+               pdata->bus_id = 0;
+               pdata->phy_id = 0;
                memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
        }
 
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_1;
+               pdata->bus_id = 0;
+               pdata->phy_id = 1;
                memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
        }
 
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC1);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_0;
+               pdata->bus_id = 0;
+               pdata->phy_id = 0;
                memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
        }
 
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_eTSEC2);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_1;
+               pdata->bus_id = 0;
+               pdata->phy_id = 1;
                memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
        }
 
index 45a5b81b4ed1ae4da5e81fa88fe280ea930b3401..e777ba824aa90c00d25453df347f72415aa0f2e1 100644 (file)
@@ -91,9 +91,6 @@ sbc8560_early_serial_map(void)
 }
 #endif
 
-static const char *GFAR_PHY_25 = "phy0:25";
-static const char *GFAR_PHY_26 = "phy0:26";
-
 /* ************************************************************************
  *
  * Setup the architecture
@@ -136,20 +133,21 @@ sbc8560_setup_arch(void)
        mdata->irq[25] = MPC85xx_IRQ_EXT6;
        mdata->irq[26] = MPC85xx_IRQ_EXT7;
        mdata->irq[31] = -1;
-       mdata->paddr += binfo->bi_immr_base;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_25;
+               pdata->bus_id = 0;
+               pdata->phy_id = 25;
                memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
        }
 
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_26;
+               pdata->bus_id = 0;
+               pdata->phy_id = 26;
                memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
        }
 
index 15ce9d070634eb26793e2800365674d5ddc84cb7..061bb7cf2d9a18ca12b9b7c6f4058b8de42c0f7d 100644 (file)
@@ -93,9 +93,6 @@ static u8 gp3_openpic_initsenses[] __initdata = {
        0x0,                            /* External 11: */
 };
 
-static const char *GFAR_PHY_2 = "phy0:2";
-static const char *GFAR_PHY_4 = "phy0:4";
-
 /*
  * Setup the architecture
  */
@@ -130,20 +127,21 @@ gp3_setup_arch(void)
        mdata->irq[2] = MPC85xx_IRQ_EXT5;
        mdata->irq[4] = MPC85xx_IRQ_EXT5;
        mdata->irq[31] = -1;
-       mdata->paddr += binfo->bi_immr_base;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
        if (pdata) {
        /*      pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */
-               pdata->bus_id = GFAR_PHY_2;
+               pdata->bus_id = 0;
+               pdata->phy_id = 2;
                memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
        }
 
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
        if (pdata) {
        /*      pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR; */
-               pdata->bus_id = GFAR_PHY_4;
+               pdata->bus_id = 0;
+               pdata->phy_id = 4;
                memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
        }
 
index c6dfd8f0f9df963f75e6b859b11266c6d775e31b..b436f4d0a3fa753e2a1b54764dbcab0b0150a26f 100644 (file)
@@ -91,12 +91,6 @@ static u_char tqm85xx_openpic_initsenses[] __initdata = {
        0x0,                            /* External 11: */
 };
 
-static const char *GFAR_PHY_0 = "phy0:2";
-static const char *GFAR_PHY_1 = "phy0:1";
-#ifdef CONFIG_MPC8540
-static const char *GFAR_PHY_3 = "phy0:3";
-#endif
-
 /* ************************************************************************
  *
  * Setup the architecture
@@ -149,20 +143,21 @@ tqm85xx_setup_arch(void)
        mdata->irq[2] = -1;
        mdata->irq[3] = MPC85xx_IRQ_EXT8;
        mdata->irq[31] = -1;
-       mdata->paddr += binfo->bi_immr_base;
 
        /* setup the board related information for the enet controllers */
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC1);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_0;
+               pdata->bus_id = 0;
+               pdata->phy_id = 2;
                memcpy(pdata->mac_addr, binfo->bi_enetaddr, 6);
        }
 
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_TSEC2);
        if (pdata) {
                pdata->board_flags = FSL_GIANFAR_BRD_HAS_PHY_INTR;
-               pdata->bus_id = GFAR_PHY_1;
+               pdata->bus_id = 0;
+               pdata->phy_id = 1;
                memcpy(pdata->mac_addr, binfo->bi_enet1addr, 6);
        }
 
@@ -170,7 +165,8 @@ tqm85xx_setup_arch(void)
        pdata = (struct gianfar_platform_data *) ppc_sys_get_pdata(MPC85xx_FEC);
        if (pdata) {
                pdata->board_flags = 0;
-               pdata->bus_id = GFAR_PHY_3;
+               pdata->bus_id = 0;
+               pdata->phy_id = 3;
                memcpy(pdata->mac_addr, binfo->bi_enet2addr, 6);
        }
 #endif
index 2f74fde98ebc68276aacd92f507bbe9ca7362808..c42c50073da54a6e621b16da9a57353299d10d66 100644 (file)
@@ -55,9 +55,6 @@ int (*mach_hwclk) (int, struct hwclk_time*) = NULL;
 int (*mach_set_clock_mmss) (unsigned long) = NULL;
 void (*mach_reset)( void );
 long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
-#if defined(CONFIG_AMIGA_FLOPPY)
-void (*mach_floppy_setup) (char *, int *) __initdata = NULL;
-#endif
 #ifdef CONFIG_HEARTBEAT
 void (*mach_heartbeat) (int) = NULL;
 extern void apus_heartbeat (void);
@@ -76,7 +73,6 @@ struct mem_info m68k_memory[NUM_MEMINFO];/* memory description */
 
 struct mem_info ramdisk;
 
-extern void amiga_floppy_setup(char *, int *);
 extern void config_amiga(void);
 
 static int __60nsram = 0;
@@ -305,16 +301,6 @@ void kbd_reset_setup(char *str, int *ints)
 {
 }
 
-/*********************************************************** FLOPPY */
-#if defined(CONFIG_AMIGA_FLOPPY)
-__init
-void floppy_setup(char *str, int *ints)
-{
-       if (mach_floppy_setup)
-               mach_floppy_setup (str, ints);
-}
-#endif
-
 /*********************************************************** MEMORY */
 #define KMAP_MAX 32
 unsigned long kmap_chunks[KMAP_MAX*3];
@@ -574,9 +560,9 @@ static __inline__ void ser_RTSon(void)
 
 int __debug_ser_out( unsigned char c )
 {
-       custom.serdat = c | 0x100;
+       amiga_custom.serdat = c | 0x100;
        mb();
-       while (!(custom.serdatr & 0x2000))
+       while (!(amiga_custom.serdatr & 0x2000))
                barrier();
        return 1;
 }
@@ -586,11 +572,11 @@ unsigned char __debug_ser_in( void )
        unsigned char c;
 
        /* XXX: is that ok?? derived from amiga_ser.c... */
-       while( !(custom.intreqr & IF_RBF) )
+       while( !(amiga_custom.intreqr & IF_RBF) )
                barrier();
-       c = custom.serdatr;
+       c = amiga_custom.serdatr;
        /* clear the interrupt, so that another character can be read */
-       custom.intreq = IF_RBF;
+       amiga_custom.intreq = IF_RBF;
        return c;
 }
 
@@ -601,10 +587,10 @@ int __debug_serinit( void )
        local_irq_save(flags);
 
        /* turn off Rx and Tx interrupts */
-       custom.intena = IF_RBF | IF_TBE;
+       amiga_custom.intena = IF_RBF | IF_TBE;
 
        /* clear any pending interrupt */
-       custom.intreq = IF_RBF | IF_TBE;
+       amiga_custom.intreq = IF_RBF | IF_TBE;
 
        local_irq_restore(flags);
 
@@ -617,7 +603,7 @@ int __debug_serinit( void )
 
 #ifdef CONFIG_KGDB
        /* turn Rx interrupts on for GDB */
-       custom.intena = IF_SETCLR | IF_RBF;
+       amiga_custom.intena = IF_SETCLR | IF_RBF;
        ser_RTSon();
 #endif
 
index 847df4409982af4298af2852920127ab28c2bfee..f9b95de70e230260a6f4e3b7d9cf0655719b90a8 100644 (file)
@@ -28,7 +28,6 @@
  */
 
 struct gianfar_mdio_data mpc83xx_mdio_pdata = {
-       .paddr = 0x24520,
 };
 
 static struct gianfar_platform_data mpc83xx_tsec1_pdata = {
@@ -226,7 +225,14 @@ struct platform_device ppc_sys_platform_devices[] = {
                .name = "fsl-gianfar_mdio",
                .id = 0,
                .dev.platform_data = &mpc83xx_mdio_pdata,
-               .num_resources = 0,
+               .num_resources = 1,
+               .resource = (struct resource[]) {
+                       {
+                               .start  = 0x24520,
+                               .end    = 0x2453f,
+                               .flags  = IORESOURCE_MEM,
+                       },
+               },
        },
 };
 
index 69949d255658c93b76fb24d3d6e52df6f7479a34..00e9b6ff2f6e39f1931ab3cdfdb93e7bac4ebdd9 100644 (file)
@@ -26,7 +26,6 @@
  * what CCSRBAR is, will get fixed up by mach_mpc85xx_fixup
  */
 struct gianfar_mdio_data mpc85xx_mdio_pdata = {
-       .paddr = MPC85xx_MIIM_OFFSET,
 };
 
 static struct gianfar_platform_data mpc85xx_tsec1_pdata = {
@@ -720,7 +719,14 @@ struct platform_device ppc_sys_platform_devices[] = {
                .name = "fsl-gianfar_mdio",
                .id = 0,
                .dev.platform_data = &mpc85xx_mdio_pdata,
-               .num_resources = 0,
+               .num_resources = 1,
+               .resource = (struct resource[]) {
+                       {
+                               .start  = 0x24520,
+                               .end    = 0x2453f,
+                               .flags  = IORESOURCE_MEM,
+                       },
+               },
        },
 };
 
index 9ccce438bd7af7ffc1dce54b32377a5975140bcd..ab34b1d6072f6e350c0dfd930c848702ce710157 100644 (file)
@@ -189,6 +189,8 @@ ocp_device_resume(struct device *dev)
 struct bus_type ocp_bus_type = {
        .name = "ocp",
        .match = ocp_device_match,
+       .probe = ocp_driver_probe,
+       .remove = ocp_driver_remove,
        .suspend = ocp_device_suspend,
        .resume = ocp_device_resume,
 };
@@ -210,8 +212,6 @@ ocp_register_driver(struct ocp_driver *drv)
        /* initialize common driver fields */
        drv->driver.name = drv->name;
        drv->driver.bus = &ocp_bus_type;
-       drv->driver.probe = ocp_device_probe;
-       drv->driver.remove = ocp_device_remove;
 
        /* register with core */
        return driver_register(&drv->driver);
index 2b483b4f1602b12a73d2b60ba55fea20faa53e31..9075a7538e26410d83bb33a50f822816b26e76bb 100644 (file)
@@ -99,7 +99,7 @@ static void remove_bpts(void);
 static void insert_bpts(void);
 static struct bpt *at_breakpoint(unsigned pc);
 static void bpt_cmds(void);
-static void cacheflush(void);
+void cacheflush(void);
 #ifdef CONFIG_SMP
 static void cpu_cmd(void);
 #endif /* CONFIG_SMP */
index 03ba5893f17b7fcef06b9112f524da8bc8382552..1f451c2cb071896f7032e990ac78256769a74c58 100644 (file)
@@ -112,7 +112,7 @@ static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
 
 static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
 {
-       struct pt_regs *ptregs = __KSTK_PTREGS(tsk);
+       struct pt_regs *ptregs = task_pt_regs(tsk);
        int i;
 
        memcpy(&regs->psw.mask, &ptregs->psw.mask, 4);
index 7dd58f8ac6b59848cec44f18fb5931b6d6ee2a47..2ff90a1a105657afc6586fa86c5689843db66fdc 100644 (file)
@@ -153,7 +153,7 @@ void show_regs(struct pt_regs *regs)
 {
        struct task_struct *tsk = current;
 
-        printk("CPU:    %d    %s\n", tsk->thread_info->cpu, print_tainted());
+        printk("CPU:    %d    %s\n", task_thread_info(tsk)->cpu, print_tainted());
         printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
               current->comm, current->pid, (void *) tsk,
               (void *) tsk->thread.ksp);
@@ -217,8 +217,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
             struct pt_regs childregs;
           } *frame;
 
-        frame = ((struct fake_frame *)
-                (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+        frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
         p->thread.ksp = (unsigned long) frame;
        /* Store access registers to kernel stack of new process. */
         frame->childregs = *regs;
@@ -358,11 +357,10 @@ unsigned long get_wchan(struct task_struct *p)
        unsigned long return_address;
        int count;
 
-       if (!p || p == current || p->state == TASK_RUNNING || !p->thread_info)
+       if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
                return 0;
-       low = (struct stack_frame *) p->thread_info;
-       high = (struct stack_frame *)
-               ((unsigned long) p->thread_info + THREAD_SIZE) - 1;
+       low = task_stack_page(p);
+       high = (struct stack_frame *) task_pt_regs(p);
        sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
        if (sf <= low || sf > high)
                return 0;
index cc02232aa96e93be846ceac801908fc08e070432..37dfe33dab735986e3eb781028884ac1e5c65816 100644 (file)
@@ -52,7 +52,7 @@ FixPerRegisters(struct task_struct *task)
        struct pt_regs *regs;
        per_struct *per_info;
 
-       regs = __KSTK_PTREGS(task);
+       regs = task_pt_regs(task);
        per_info = (per_struct *) &task->thread.per_info;
        per_info->control_regs.bits.em_instruction_fetch =
                per_info->single_step | per_info->instruction_fetch;
@@ -150,7 +150,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
                /*
                 * psw and gprs are stored on the stack
                 */
-               tmp = *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr);
+               tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
                if (addr == (addr_t) &dummy->regs.psw.mask)
                        /* Remove per bit from user psw. */
                        tmp &= ~PSW_MASK_PER;
@@ -176,7 +176,7 @@ peek_user(struct task_struct *child, addr_t addr, addr_t data)
                /*
                 * orig_gpr2 is stored on the kernel stack
                 */
-               tmp = (addr_t) __KSTK_PTREGS(child)->orig_gpr2;
+               tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
 
        } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
                /* 
@@ -243,7 +243,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
                           high order bit but older gdb's rely on it */
                        data |= PSW_ADDR_AMODE;
 #endif
-               *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr) = data;
+               *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
 
        } else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
                /*
@@ -267,7 +267,7 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
                /*
                 * orig_gpr2 is stored on the kernel stack
                 */
-               __KSTK_PTREGS(child)->orig_gpr2 = data;
+               task_pt_regs(child)->orig_gpr2 = data;
 
        } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
                /*
@@ -393,15 +393,15 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
                 */
                if (addr == (addr_t) &dummy32->regs.psw.mask) {
                        /* Fake a 31 bit psw mask. */
-                       tmp = (__u32)(__KSTK_PTREGS(child)->psw.mask >> 32);
+                       tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
                        tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp);
                } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
                        /* Fake a 31 bit psw address. */
-                       tmp = (__u32) __KSTK_PTREGS(child)->psw.addr |
+                       tmp = (__u32) task_pt_regs(child)->psw.addr |
                                PSW32_ADDR_AMODE31;
                } else {
                        /* gpr 0-15 */
-                       tmp = *(__u32 *)((addr_t) &__KSTK_PTREGS(child)->psw +
+                       tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
                                         addr*2 + 4);
                }
        } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
@@ -415,7 +415,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
                /*
                 * orig_gpr2 is stored on the kernel stack
                 */
-               tmp = *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4);
+               tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
 
        } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
                /*
@@ -472,15 +472,15 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
                        if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp))
                                /* Invalid psw mask. */
                                return -EINVAL;
-                       __KSTK_PTREGS(child)->psw.mask =
+                       task_pt_regs(child)->psw.mask =
                                PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32);
                } else if (addr == (addr_t) &dummy32->regs.psw.addr) {
                        /* Build a 64 bit psw address from 31 bit address. */
-                       __KSTK_PTREGS(child)->psw.addr = 
+                       task_pt_regs(child)->psw.addr =
                                (__u64) tmp & PSW32_ADDR_INSN;
                } else {
                        /* gpr 0-15 */
-                       *(__u32*)((addr_t) &__KSTK_PTREGS(child)->psw
+                       *(__u32*)((addr_t) &task_pt_regs(child)->psw
                                  + addr*2 + 4) = tmp;
                }
        } else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
@@ -494,7 +494,7 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
                /*
                 * orig_gpr2 is stored on the kernel stack
                 */
-               *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4) = tmp;
+               *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
 
        } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
                /*
index e10f4ca00499ede1690e0a650ee935a5b7f27c12..cbfcfd02a43a503447380c55f5ab852d248e545b 100644 (file)
@@ -657,7 +657,7 @@ __cpu_up(unsigned int cpu)
        idle = current_set[cpu];
         cpu_lowcore = lowcore_ptr[cpu];
        cpu_lowcore->kernel_stack = (unsigned long)
-               idle->thread_info + (THREAD_SIZE);
+               task_stack_page(idle) + (THREAD_SIZE);
        sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
                                     - sizeof(struct pt_regs)
                                     - sizeof(struct stack_frame));
index c36353e8c1404b22ae5dfbd8f0b0bd8f1a8214b8..b0d8ca8e5eebd7917a87b88ebfae98a258fa0cc4 100644 (file)
@@ -282,7 +282,7 @@ static inline void start_hz_timer(void)
 {
        if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
                return;
-       account_ticks(__KSTK_PTREGS(current));
+       account_ticks(task_pt_regs(current));
        cpu_clear(smp_processor_id(), nohz_cpu_mask);
 }
 
index 95d1099686198e8deb030b027722e832a990c1e3..5d21e9e6e7b4505fc6647aaa039222a44197de85 100644 (file)
@@ -136,8 +136,8 @@ void show_trace(struct task_struct *task, unsigned long * stack)
        sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
                          S390_lowcore.async_stack);
        if (task)
-               __show_trace(sp, (unsigned long) task->thread_info,
-                            (unsigned long) task->thread_info + THREAD_SIZE);
+               __show_trace(sp, (unsigned long) task_stack_page(task),
+                            (unsigned long) task_stack_page(task) + THREAD_SIZE);
        else
                __show_trace(sp, S390_lowcore.thread_info,
                             S390_lowcore.thread_info + THREAD_SIZE);
@@ -240,7 +240,7 @@ char *task_show_regs(struct task_struct *task, char *buffer)
 {
        struct pt_regs *regs;
 
-       regs = __KSTK_PTREGS(task);
+       regs = task_pt_regs(task);
        buffer += sprintf(buffer, "task: %p, ksp: %p\n",
                       task, (void *)task->thread.ksp);
        buffer += sprintf(buffer, "User PSW : %p %p\n",
index d4fee2a7937306db8010a07e48d05c63ef697fad..3278d234bb1b26ba9d2eeb1c3927cbfb17dae900 100644 (file)
@@ -53,21 +53,6 @@ static int sh_bus_resume(struct device *dev)
        return 0;
 }
 
-static struct device sh_bus_devices[SH_NR_BUSES] = {
-       {
-               .bus_id         = SH_BUS_NAME_VIRT,
-       },
-};
-
-struct bus_type sh_bus_types[SH_NR_BUSES] = {
-       {
-               .name           = SH_BUS_NAME_VIRT,
-               .match          = sh_bus_match,
-               .suspend        = sh_bus_suspend,
-               .resume         = sh_bus_resume,
-       },
-};
-
 static int sh_device_probe(struct device *dev)
 {
        struct sh_dev *shdev = to_sh_dev(dev);
@@ -90,6 +75,23 @@ static int sh_device_remove(struct device *dev)
        return 0;
 }
 
+static struct device sh_bus_devices[SH_NR_BUSES] = {
+       {
+               .bus_id         = SH_BUS_NAME_VIRT,
+       },
+};
+
+struct bus_type sh_bus_types[SH_NR_BUSES] = {
+       {
+               .name           = SH_BUS_NAME_VIRT,
+               .match          = sh_bus_match,
+               .probe          = sh_bus_probe,
+               .remove         = sh_bus_remove,
+               .suspend        = sh_bus_suspend,
+               .resume         = sh_bus_resume,
+       },
+};
+
 int sh_device_register(struct sh_dev *dev)
 {
        if (!dev)
@@ -133,8 +135,6 @@ int sh_driver_register(struct sh_driver *drv)
                return -EINVAL;
        }
 
-       drv->drv.probe  = sh_device_probe;
-       drv->drv.remove = sh_device_remove;
        drv->drv.bus    = &sh_bus_types[drv->bus_id];
 
        return driver_register(&drv->drv);
index 8a2bea34ddd2635ca344ea564385483c9919c59d..aac15e42d03bbe7300276c774cf9af20c8fb37b1 100644 (file)
@@ -191,13 +191,8 @@ void flush_thread(void)
 {
 #if defined(CONFIG_SH_FPU)
        struct task_struct *tsk = current;
-       struct pt_regs *regs = (struct pt_regs *)
-                               ((unsigned long)tsk->thread_info
-                                + THREAD_SIZE - sizeof(struct pt_regs)
-                                - sizeof(unsigned long));
-
        /* Forget lazy FPU state */
-       clear_fpu(tsk, regs);
+       clear_fpu(tsk, task_pt_regs(tsk));
        clear_used_math();
 #endif
 }
@@ -232,13 +227,7 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
 {
        struct pt_regs ptregs;
        
-       ptregs = *(struct pt_regs *)
-               ((unsigned long)tsk->thread_info + THREAD_SIZE
-                - sizeof(struct pt_regs)
-#ifdef CONFIG_SH_DSP
-                - sizeof(struct pt_dspregs)
-#endif
-                - sizeof(unsigned long));
+       ptregs = *task_pt_regs(tsk);
        elf_core_copy_regs(regs, &ptregs);
 
        return 1;
@@ -252,11 +241,7 @@ dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu)
 #if defined(CONFIG_SH_FPU)
        fpvalid = !!tsk_used_math(tsk);
        if (fpvalid) {
-               struct pt_regs *regs = (struct pt_regs *)
-                                       ((unsigned long)tsk->thread_info
-                                        + THREAD_SIZE - sizeof(struct pt_regs)
-                                        - sizeof(unsigned long));
-               unlazy_fpu(tsk, regs);
+               unlazy_fpu(tsk, task_pt_regs(tsk));
                memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
        }
 #endif
@@ -279,18 +264,13 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
        copy_to_stopped_child_used_math(p);
 #endif
 
-       childregs = ((struct pt_regs *)
-               (THREAD_SIZE + (unsigned long) p->thread_info)
-#ifdef CONFIG_SH_DSP
-               - sizeof(struct pt_dspregs)
-#endif
-               - sizeof(unsigned long)) - 1;
+       childregs = task_pt_regs(p);
        *childregs = *regs;
 
        if (user_mode(regs)) {
                childregs->regs[15] = usp;
        } else {
-               childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE;
+               childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
        }
         if (clone_flags & CLONE_SETTLS) {
                childregs->gbr = childregs->regs[0];
@@ -333,11 +313,7 @@ ubc_set_tracing(int asid, unsigned long pc)
 struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
 {
 #if defined(CONFIG_SH_FPU)
-       struct pt_regs *regs = (struct pt_regs *)
-                               ((unsigned long)prev->thread_info
-                                + THREAD_SIZE - sizeof(struct pt_regs)
-                                - sizeof(unsigned long));
-       unlazy_fpu(prev, regs);
+       unlazy_fpu(prev, task_pt_regs(prev));
 #endif
 
 #ifdef CONFIG_PREEMPT
@@ -346,13 +322,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
                struct pt_regs *regs;
 
                local_irq_save(flags);
-               regs = (struct pt_regs *)
-                       ((unsigned long)prev->thread_info
-                        + THREAD_SIZE - sizeof(struct pt_regs)
-#ifdef CONFIG_SH_DSP
-                        - sizeof(struct pt_dspregs)
-#endif
-                        - sizeof(unsigned long));
+               regs = task_pt_regs(prev);
                if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
                        int offset = (int)regs->regs[15];
 
@@ -372,7 +342,7 @@ struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *ne
         */
        asm volatile("ldc       %0, r7_bank"
                     : /* no output */
-                    : "r" (next->thread_info));
+                    : "r" (task_thread_info(next)));
 
 #ifdef CONFIG_MMU
        /* If no tasks are using the UBC, we're done */
index 1a8be06519ecf4c5de4fc7bcd5ed8dc56bad069b..3887b4f6feb26778cb7bc9fc1962993835356ea5 100644 (file)
@@ -41,12 +41,7 @@ static inline int get_stack_long(struct task_struct *task, int offset)
 {
        unsigned char *stack;
 
-       stack = (unsigned char *)
-               task->thread_info + THREAD_SIZE - sizeof(struct pt_regs)
-#ifdef CONFIG_SH_DSP
-               - sizeof(struct pt_dspregs)
-#endif
-               - sizeof(unsigned long);
+       stack = (unsigned char *)task_pt_regs(task);
        stack += offset;
        return (*((int *)stack));
 }
@@ -59,12 +54,7 @@ static inline int put_stack_long(struct task_struct *task, int offset,
 {
        unsigned char *stack;
 
-       stack = (unsigned char *)
-               task->thread_info + THREAD_SIZE - sizeof(struct pt_regs)
-#ifdef CONFIG_SH_DSP
-               - sizeof(struct pt_dspregs)
-#endif
-               - sizeof(unsigned long);
+       stack = (unsigned char *)task_pt_regs(task);
        stack += offset;
        *(unsigned long *) stack = data;
        return 0;
index 59e49b18252c47ff49e3b4790c0c0a7ab8a9a883..62c7d1c0ad7bd2a0f1070c731e5f63a4156e8384 100644 (file)
@@ -103,7 +103,7 @@ int __cpu_up(unsigned int cpu)
        if (IS_ERR(tsk))
                panic("Failed forking idle task for cpu %d\n", cpu);
        
-       tsk->thread_info->cpu = cpu;
+       task_thread_info(tsk)->cpu = cpu;
 
        cpu_set(cpu, cpu_online_map);
 
index 419b5a71044116c9f6d4d5a396be7e42263286c5..1da9c61d6823558b6d63c0cf82340102dc540a23 100644 (file)
@@ -744,7 +744,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
        }
 #endif
        /* Copy from sh version */
-       childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long) p->thread_info )) - 1;
+       childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
 
        *childregs = *regs;
 
@@ -752,7 +752,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
                childregs->regs[15] = usp;
                p->thread.uregs = childregs;
        } else {
-               childregs->regs[15] = (unsigned long)p->thread_info + THREAD_SIZE;
+               childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
        }
 
        childregs->regs[9] = 0; /* Set return value for child */
index 526fedae6db878e7d1cf19861a157cc62b388952..58087331b8a6acd3da88e41f8cb7912635258d64 100644 (file)
@@ -174,7 +174,7 @@ void evt_debug(int evt, int ret_addr, int event, int tra, struct pt_regs *regs)
        struct ring_node *rr;
 
        pid = current->pid;
-       stack_bottom = (unsigned long) current->thread_info;
+       stack_bottom = (unsigned long) task_stack_page(current);
        asm volatile("ori r15, 0, %0" : "=r" (sp));
        rr = event_ring + event_ptr;
        rr->evt = evt;
index ea86474114627f267d739def93a6c33d2b4c9c4d..fbb05a452e51552590e01d7da2e1af7c617d8a47 100644 (file)
@@ -302,7 +302,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
        int count = 0;
 
        if (tsk != NULL)
-               task_base = (unsigned long) tsk->thread_info;
+               task_base = (unsigned long) task_stack_page(tsk);
        else
                task_base = (unsigned long) current_thread_info();
 
@@ -337,7 +337,7 @@ EXPORT_SYMBOL(dump_stack);
  */
 unsigned long thread_saved_pc(struct task_struct *tsk)
 {
-       return tsk->thread_info->kpc;
+       return task_thread_info(tsk)->kpc;
 }
 
 /*
@@ -392,7 +392,7 @@ void flush_thread(void)
                /* We must fixup kregs as well. */
                /* XXX This was not fixed for ti for a while, worked. Unused? */
                current->thread.kregs = (struct pt_regs *)
-                   ((char *)current->thread_info + (THREAD_SIZE - TRACEREG_SZ));
+                   (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ));
        }
 }
 
@@ -459,7 +459,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
                unsigned long unused,
                struct task_struct *p, struct pt_regs *regs)
 {
-       struct thread_info *ti = p->thread_info;
+       struct thread_info *ti = task_thread_info(p);
        struct pt_regs *childregs;
        char *new_stack;
 
@@ -482,7 +482,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
         *  V                      V (stk.fr.) V  (pt_regs)  { (stk.fr.) }
         *  +----- - - - - - ------+===========+============={+==========}+
         */
-       new_stack = (char*)ti + THREAD_SIZE;
+       new_stack = task_stack_page(p) + THREAD_SIZE;
        if (regs->psr & PSR_PS)
                new_stack -= STACKFRAME_SZ;
        new_stack -= STACKFRAME_SZ + TRACEREG_SZ;
@@ -724,7 +724,7 @@ unsigned long get_wchan(struct task_struct *task)
             task->state == TASK_RUNNING)
                goto out;
 
-       fp = task->thread_info->ksp + bias;
+       fp = task_thread_info(task)->ksp + bias;
        do {
                /* Bogus frame pointer? */
                if (fp < (task_base + sizeof(struct thread_info)) ||
index fc470c0e9dc6bd453ce6b14143610d70225622d5..1baf13ed5c3a6b6e7186c022e69108ec6d490c46 100644 (file)
@@ -75,7 +75,7 @@ static inline void read_sunos_user(struct pt_regs *regs, unsigned long offset,
                                   struct task_struct *tsk, long __user *addr)
 {
        struct pt_regs *cregs = tsk->thread.kregs;
-       struct thread_info *t = tsk->thread_info;
+       struct thread_info *t = task_thread_info(tsk);
        int v;
        
        if(offset >= 1024)
@@ -170,7 +170,7 @@ static inline void write_sunos_user(struct pt_regs *regs, unsigned long offset,
                                    struct task_struct *tsk)
 {
        struct pt_regs *cregs = tsk->thread.kregs;
-       struct thread_info *t = tsk->thread_info;
+       struct thread_info *t = task_thread_info(tsk);
        unsigned long value = regs->u_regs[UREG_I3];
 
        if(offset >= 1024)
index cc1fc898495c899b4d39a66be014db5dce428191..40d426cce8244f16969e3a12f876855f0893bd0f 100644 (file)
@@ -200,7 +200,7 @@ void __init smp4d_boot_cpus(void)
                        /* Cook up an idler for this guy. */
                        p = fork_idle(i);
                        cpucount++;
-                       current_set[i] = p->thread_info;
+                       current_set[i] = task_thread_info(p);
                        for (no = 0; !cpu_find_by_instance(no, NULL, &mid)
                                     && mid != i; no++) ;
 
index f113422a372761dabce56f5624f0b2835c9e4a08..a21f27d10e55737881f81522262b4e07751476e9 100644 (file)
@@ -173,7 +173,7 @@ void __init smp4m_boot_cpus(void)
                        /* Cook up an idler for this guy. */
                        p = fork_idle(i);
                        cpucount++;
-                       current_set[i] = p->thread_info;
+                       current_set[i] = task_thread_info(p);
                        /* See trampoline.S for details... */
                        entry += ((i-1) * 3);
 
index 3f451ae664826b827ecd36027d543df74399fa1f..41d45c298fb2426d64a5b293857a4706d880a461 100644 (file)
@@ -291,7 +291,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
 #ifndef CONFIG_SMP
        if(!fpt) {
 #else
-        if(!(fpt->thread_info->flags & _TIF_USEDFPU)) {
+        if(!(task_thread_info(fpt)->flags & _TIF_USEDFPU)) {
 #endif
                fpsave(&fake_regs[0], &fake_fsr, &fake_queue[0], &fake_depth);
                regs->psr &= ~PSR_EF;
@@ -334,7 +334,7 @@ void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
        /* nope, better SIGFPE the offending process... */
               
 #ifdef CONFIG_SMP
-       fpt->thread_info->flags &= ~_TIF_USEDFPU;
+       task_thread_info(fpt)->flags &= ~_TIF_USEDFPU;
 #endif
        if(psr & PSR_PS) {
                /* The first fsr store/load we tried trapped,
index 02f9dec1d459d79731a8d32658db102940cce2cc..1dc3650c5caefaee73a69e9b68b1cf9705f6af03 100644 (file)
@@ -390,7 +390,7 @@ void show_regs32(struct pt_regs32 *regs)
 
 unsigned long thread_saved_pc(struct task_struct *tsk)
 {
-       struct thread_info *ti = tsk->thread_info;
+       struct thread_info *ti = task_thread_info(tsk);
        unsigned long ret = 0xdeadbeefUL;
        
        if (ti && ti->ksp) {
@@ -616,11 +616,11 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
                unsigned long unused,
                struct task_struct *p, struct pt_regs *regs)
 {
-       struct thread_info *t = p->thread_info;
+       struct thread_info *t = task_thread_info(p);
        char *child_trap_frame;
 
        /* Calculate offset to stack_frame & pt_regs */
-       child_trap_frame = ((char *)t) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
+       child_trap_frame = task_stack_page(p) + (THREAD_SIZE - (TRACEREG_SZ+STACKFRAME_SZ));
        memcpy(child_trap_frame, (((struct sparc_stackf *)regs)-1), (TRACEREG_SZ+STACKFRAME_SZ));
 
        t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
@@ -845,9 +845,9 @@ unsigned long get_wchan(struct task_struct *task)
             task->state == TASK_RUNNING)
                goto out;
 
-       thread_info_base = (unsigned long) task->thread_info;
+       thread_info_base = (unsigned long) task_stack_page(task);
        bias = STACK_BIAS;
-       fp = task->thread_info->ksp + bias;
+       fp = task_thread_info(task)->ksp + bias;
 
        do {
                /* Bogus frame pointer? */
index 84d3df2264cb7148e4a5390f05a0ce0baf618844..3f9746f856d28caede6522bbc8a161be74a684a2 100644 (file)
@@ -296,7 +296,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
        case PTRACE_GETREGS: {
                struct pt_regs32 __user *pregs =
                        (struct pt_regs32 __user *) addr;
-               struct pt_regs *cregs = child->thread_info->kregs;
+               struct pt_regs *cregs = task_pt_regs(child);
                int rval;
 
                if (__put_user(tstate_to_psr(cregs->tstate), (&pregs->psr)) ||
@@ -320,11 +320,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
 
        case PTRACE_GETREGS64: {
                struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-               struct pt_regs *cregs = child->thread_info->kregs;
+               struct pt_regs *cregs = task_pt_regs(child);
                unsigned long tpc = cregs->tpc;
                int rval;
 
-               if ((child->thread_info->flags & _TIF_32BIT) != 0)
+               if ((task_thread_info(child)->flags & _TIF_32BIT) != 0)
                        tpc &= 0xffffffff;
                if (__put_user(cregs->tstate, (&pregs->tstate)) ||
                    __put_user(tpc, (&pregs->tpc)) ||
@@ -348,7 +348,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
        case PTRACE_SETREGS: {
                struct pt_regs32 __user *pregs =
                        (struct pt_regs32 __user *) addr;
-               struct pt_regs *cregs = child->thread_info->kregs;
+               struct pt_regs *cregs = task_pt_regs(child);
                unsigned int psr, pc, npc, y;
                int i;
 
@@ -381,7 +381,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
 
        case PTRACE_SETREGS64: {
                struct pt_regs __user *pregs = (struct pt_regs __user *) addr;
-               struct pt_regs *cregs = child->thread_info->kregs;
+               struct pt_regs *cregs = task_pt_regs(child);
                unsigned long tstate, tpc, tnpc, y;
                int i;
 
@@ -395,7 +395,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
                        pt_error_return(regs, EFAULT);
                        goto out_tsk;
                }
-               if ((child->thread_info->flags & _TIF_32BIT) != 0) {
+               if ((task_thread_info(child)->flags & _TIF_32BIT) != 0) {
                        tpc &= 0xffffffff;
                        tnpc &= 0xffffffff;
                }
@@ -430,11 +430,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
                        } fpq[16];
                };
                struct fps __user *fps = (struct fps __user *) addr;
-               unsigned long *fpregs = child->thread_info->fpregs;
+               unsigned long *fpregs = task_thread_info(child)->fpregs;
 
                if (copy_to_user(&fps->regs[0], fpregs,
                                 (32 * sizeof(unsigned int))) ||
-                   __put_user(child->thread_info->xfsr[0], (&fps->fsr)) ||
+                   __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr)) ||
                    __put_user(0, (&fps->fpqd)) ||
                    __put_user(0, (&fps->flags)) ||
                    __put_user(0, (&fps->extra)) ||
@@ -452,11 +452,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
                        unsigned long fsr;
                };
                struct fps __user *fps = (struct fps __user *) addr;
-               unsigned long *fpregs = child->thread_info->fpregs;
+               unsigned long *fpregs = task_thread_info(child)->fpregs;
 
                if (copy_to_user(&fps->regs[0], fpregs,
                                 (64 * sizeof(unsigned int))) ||
-                   __put_user(child->thread_info->xfsr[0], (&fps->fsr))) {
+                   __put_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
                        pt_error_return(regs, EFAULT);
                        goto out_tsk;
                }
@@ -477,7 +477,7 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
                        } fpq[16];
                };
                struct fps __user *fps = (struct fps __user *) addr;
-               unsigned long *fpregs = child->thread_info->fpregs;
+               unsigned long *fpregs = task_thread_info(child)->fpregs;
                unsigned fsr;
 
                if (copy_from_user(fpregs, &fps->regs[0],
@@ -486,11 +486,11 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
                        pt_error_return(regs, EFAULT);
                        goto out_tsk;
                }
-               child->thread_info->xfsr[0] &= 0xffffffff00000000UL;
-               child->thread_info->xfsr[0] |= fsr;
-               if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
-                       child->thread_info->gsr[0] = 0;
-               child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
+               task_thread_info(child)->xfsr[0] &= 0xffffffff00000000UL;
+               task_thread_info(child)->xfsr[0] |= fsr;
+               if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
+                       task_thread_info(child)->gsr[0] = 0;
+               task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL);
                pt_succ_return(regs, 0);
                goto out_tsk;
        }
@@ -501,17 +501,17 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
                        unsigned long fsr;
                };
                struct fps __user *fps = (struct fps __user *) addr;
-               unsigned long *fpregs = child->thread_info->fpregs;
+               unsigned long *fpregs = task_thread_info(child)->fpregs;
 
                if (copy_from_user(fpregs, &fps->regs[0],
                                   (64 * sizeof(unsigned int))) ||
-                   __get_user(child->thread_info->xfsr[0], (&fps->fsr))) {
+                   __get_user(task_thread_info(child)->xfsr[0], (&fps->fsr))) {
                        pt_error_return(regs, EFAULT);
                        goto out_tsk;
                }
-               if (!(child->thread_info->fpsaved[0] & FPRS_FEF))
-                       child->thread_info->gsr[0] = 0;
-               child->thread_info->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
+               if (!(task_thread_info(child)->fpsaved[0] & FPRS_FEF))
+                       task_thread_info(child)->gsr[0] = 0;
+               task_thread_info(child)->fpsaved[0] |= (FPRS_FEF | FPRS_DL | FPRS_DU);
                pt_succ_return(regs, 0);
                goto out_tsk;
        }
@@ -562,8 +562,8 @@ asmlinkage void do_ptrace(struct pt_regs *regs)
 #ifdef DEBUG_PTRACE
                printk("CONT: %s [%d]: set exit_code = %x %lx %lx\n", child->comm,
                        child->pid, child->exit_code,
-                       child->thread_info->kregs->tpc,
-                       child->thread_info->kregs->tnpc);
+                       task_pt_regs(child)->tpc,
+                       task_pt_regs(child)->tnpc);
                       
 #endif
                wake_up_process(child);
index 48180531562fbdd4d21559986fb4b81ef4862dc7..250745896aeec4959087f544e8e46914bd6d2fdd 100644 (file)
@@ -520,7 +520,7 @@ void __init setup_arch(char **cmdline_p)
        rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);     
 #endif
 
-       init_task.thread_info->kregs = &fake_swapper_regs;
+       task_thread_info(&init_task)->kregs = &fake_swapper_regs;
 
 #ifdef CONFIG_IP_PNP
        if (!ic_set_manually) {
index 6efc03df51c3b1b72d13b850a512178ddf9dd067..1fb6323e65a4ec88ce0df377d7b1b770d1e856ed 100644 (file)
@@ -335,7 +335,7 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
 
        p = fork_idle(cpu);
        callin_flag = 0;
-       cpu_new_thread = p->thread_info;
+       cpu_new_thread = task_thread_info(p);
        cpu_set(cpu, cpu_callout_map);
 
        cpu_find_by_mid(cpu, &cpu_node);
index 5570e7bb22bb5a34db2bc7c1fd06feeda2ffdb9f..8d44ae5a15e32f11e8ebee392ecb67a350799fa3 100644 (file)
@@ -1808,7 +1808,7 @@ static void user_instruction_dump (unsigned int __user *pc)
 void show_stack(struct task_struct *tsk, unsigned long *_ksp)
 {
        unsigned long pc, fp, thread_base, ksp;
-       struct thread_info *tp = tsk->thread_info;
+       void *tp = task_stack_page(tsk);
        struct reg_window *rw;
        int count = 0;
 
@@ -1862,7 +1862,7 @@ static inline int is_kernel_stack(struct task_struct *task,
                        return 0;
        }
 
-       thread_base = (unsigned long) task->thread_info;
+       thread_base = (unsigned long) task_stack_page(task);
        thread_end = thread_base + sizeof(union thread_union);
        if (rw_addr >= thread_base &&
            rw_addr < thread_end &&
index d2d3f256778cb4371ab8f5a3b060a6885f757271..7f13b85d26564609d2f1c2fca954024a3bf28a4f 100644 (file)
@@ -107,7 +107,7 @@ void set_current(void *t)
 {
        struct task_struct *task = t;
 
-       cpu_tasks[task->thread_info->cpu] = ((struct cpu_task) 
+       cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
                { external_pid(task), task });
 }
 
index 09790ccb161ca7ddae9c91136ed805a2dc62a8dc..dc41c6dc2f343cce1fb28723a76534221be30e3b 100644 (file)
@@ -118,7 +118,7 @@ int copy_thread_skas(int nr, unsigned long clone_flags, unsigned long sp,
                handler = new_thread_handler;
        }
 
-       new_thread(p->thread_info, &p->thread.mode.skas.switch_buf,
+       new_thread(task_stack_page(p), &p->thread.mode.skas.switch_buf,
                   &p->thread.mode.skas.fork_buf, handler);
        return(0);
 }
@@ -185,7 +185,7 @@ int start_uml_skas(void)
 
        init_task.thread.request.u.thread.proc = start_kernel_proc;
        init_task.thread.request.u.thread.arg = NULL;
-       return(start_idle_thread(init_task.thread_info,
+       return(start_idle_thread(task_stack_page(&init_task),
                                 &init_task.thread.mode.skas.switch_buf,
                                 &init_task.thread.mode.skas.fork_buf));
 }
index 136e54c47d37fd1052304d5f11164fe53b2c15a8..8f40e4838736988645c1960b8e302d7ca52f48db 100644 (file)
@@ -39,7 +39,7 @@ void flush_thread_tt(void)
                do_exit(SIGKILL);
        }
                
-       new_pid = start_fork_tramp(current->thread_info, stack, 0, exec_tramp);
+       new_pid = start_fork_tramp(task_stack_page(current), stack, 0, exec_tramp);
        if(new_pid < 0){
                printk(KERN_ERR 
                       "flush_thread : new thread failed, errno = %d\n",
index 14d4622a5fb86ffbd9750f9d8ce6adf34dbf03b6..62535303aa277233241650a8c0a8686d1a6fb9c8 100644 (file)
@@ -36,7 +36,7 @@ void switch_to_tt(void *prev, void *next)
        from = prev;
        to = next;
 
-       cpu = from->thread_info->cpu;
+       cpu = task_thread_info(from)->cpu;
        if(cpu == 0)
                forward_interrupts(to->thread.mode.tt.extern_pid);
 #ifdef CONFIG_SMP
@@ -253,7 +253,7 @@ int copy_thread_tt(int nr, unsigned long clone_flags, unsigned long sp,
 
        clone_flags &= CLONE_VM;
        p->thread.temp_stack = stack;
-       new_pid = start_fork_tramp(p->thread_info, stack, clone_flags, tramp);
+       new_pid = start_fork_tramp(task_stack_page(p), stack, clone_flags, tramp);
        if(new_pid < 0){
                printk(KERN_ERR "copy_thread : clone failed - errno = %d\n", 
                       -new_pid);
@@ -343,7 +343,7 @@ int do_proc_op(void *t, int proc_id)
                pid = thread->request.u.exec.pid;
                do_exec(thread->mode.tt.extern_pid, pid);
                thread->mode.tt.extern_pid = pid;
-               cpu_tasks[task->thread_info->cpu].pid = pid;
+               cpu_tasks[task_thread_info(task)->cpu].pid = pid;
                break;
        case OP_FORK:
                attach_process(thread->request.u.fork.pid);
@@ -425,7 +425,7 @@ int start_uml_tt(void)
        int pages;
 
        pages = (1 << CONFIG_KERNEL_STACK_ORDER);
-       sp = (void *) ((unsigned long) init_task.thread_info) +
+       sp = task_stack_page(&init_task) +
                pages * PAGE_SIZE - sizeof(unsigned long);
        return(tracer(start_kernel_proc, sp));
 }
index 062ffa0a9998100205dcf30f0f503c448e604e89..eb909937958bcd5ba44c1a02121910ba63a7cd82 100644 (file)
@@ -114,7 +114,7 @@ int copy_thread (int nr, unsigned long clone_flags,
                 struct task_struct *p, struct pt_regs *regs)
 {
        /* Start pushing stuff from the top of the child's kernel stack.  */
-       unsigned long orig_ksp = (unsigned long)p->thread_info + THREAD_SIZE;
+       unsigned long orig_ksp = task_tos(p);
        unsigned long ksp = orig_ksp;
        /* We push two `state save' stack fames (see entry.S) on the new
           kernel stack:
index 18492d02aaf6dcffb146233fe14450493c9fcbce..67e057509664a89daa48936f1741ad711464f89c 100644 (file)
@@ -58,7 +58,7 @@ static v850_reg_t *reg_save_addr (unsigned reg_offs, struct task_struct *t)
                regs = thread_saved_regs (t);
        else
                /* Register saved during kernel entry (or not available).  */
-               regs = task_regs (t);
+               regs = task_pt_regs (t);
 
        return (v850_reg_t *)((char *)regs + reg_offs);
 }
index 2b760d0d9ce292a515af2977f5423d8f26b75069..029bddab045980ccb089242353ff33b08aaa7166 100644 (file)
@@ -197,8 +197,7 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re
 
 static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs)
 {      
-       struct pt_regs *pp = (struct pt_regs *)(t->thread.rsp0);
-       --pp;
+       struct pt_regs *pp = task_pt_regs(t);
        ELF_CORE_COPY_REGS((*elfregs), pp);
        /* fix wrong segments */ 
        (*elfregs)[7] = t->thread.ds; 
@@ -217,7 +216,7 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr
        if (!tsk_used_math(tsk))
                return 0;
        if (!regs)
-               regs = ((struct pt_regs *)tsk->thread.rsp0) - 1;
+               regs = task_pt_regs(tsk);
        if (tsk == current)
                unlazy_fpu(tsk);
        set_fs(KERNEL_DS); 
@@ -233,7 +232,7 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr
 static inline int 
 elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
 {
-       struct pt_regs *regs = ((struct pt_regs *)(t->thread.rsp0))-1; 
+       struct pt_regs *regs = task_pt_regs(t);
        if (!tsk_used_math(t))
                return 0;
        if (t == current)
index ea4394e021d65b6ed12982eb43d5657375b86a9e..23a4515a73b4024c9fb4254ec769e465e0c6fec8 100644 (file)
@@ -41,7 +41,7 @@
 static int putreg32(struct task_struct *child, unsigned regno, u32 val)
 {
        int i;
-       __u64 *stack = (__u64 *)(child->thread.rsp0 - sizeof(struct pt_regs)); 
+       __u64 *stack = (__u64 *)task_pt_regs(child);
 
        switch (regno) {
        case offsetof(struct user32, regs.fs):
@@ -137,7 +137,7 @@ static int putreg32(struct task_struct *child, unsigned regno, u32 val)
 
 static int getreg32(struct task_struct *child, unsigned regno, u32 *val)
 {
-       __u64 *stack = (__u64 *)(child->thread.rsp0 - sizeof(struct pt_regs)); 
+       __u64 *stack = (__u64 *)task_pt_regs(child);
 
        switch (regno) {
        case offsetof(struct user32, regs.fs):
@@ -238,7 +238,7 @@ asmlinkage long sys32_ptrace(long request, u32 pid, u32 addr, u32 data)
        if (ret < 0)
                goto out;
 
-       childregs = (struct pt_regs *)(child->thread.rsp0 - sizeof(struct pt_regs)); 
+       childregs = task_pt_regs(child);
 
        switch (request) {
        case PTRACE_PEEKDATA:
index d9b22b633e390c0739fdd33ba64b121982b8925f..a5d7e16b928ebc62dc9ba26a9215e30da63b9ad4 100644 (file)
@@ -95,7 +95,7 @@ int save_i387(struct _fpstate __user *buf)
        if (!used_math())
                return 0;
        clear_used_math(); /* trigger finit */
-       if (tsk->thread_info->status & TS_USEDFPU) {
+       if (task_thread_info(tsk)->status & TS_USEDFPU) {
                err = save_i387_checking((struct i387_fxsave_struct __user *)buf);
                if (err) return err;
                stts();
index b61965f0fb34e8d3e2796d260bc3d39876d7cb84..5ecd34ab8c2bba9ae70d15ff9803ead2957c72d9 100644 (file)
@@ -133,7 +133,7 @@ static void end_8259A_irq (unsigned int irq)
 {
        if (irq > 256) { 
                char var;
-               printk("return %p stack %p ti %p\n", __builtin_return_address(0), &var, current->thread_info); 
+               printk("return %p stack %p ti %p\n", __builtin_return_address(0), &var, task_thread_info(current));
 
                BUG(); 
        }
index 669cf0ed32661ced790f71fd15bcb4de49795c42..8ded407e4a9419fd5bcd57dea91e5ef42ad0d41a 100644 (file)
@@ -451,7 +451,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
        struct task_struct *me = current;
 
        childregs = ((struct pt_regs *)
-                       (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
+                       (THREAD_SIZE + task_stack_page(p))) - 1;
        *childregs = *regs;
 
        childregs->rax = 0;
@@ -463,7 +463,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
        p->thread.rsp0 = (unsigned long) (childregs+1);
        p->thread.userrsp = me->thread.userrsp; 
 
-       set_ti_thread_flag(p->thread_info, TIF_FORK);
+       set_tsk_thread_flag(p, TIF_FORK);
 
        p->thread.fs = me->thread.fs;
        p->thread.gs = me->thread.gs;
@@ -590,7 +590,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        write_pda(oldrsp, next->userrsp); 
        write_pda(pcurrent, next_p); 
        write_pda(kernelstack,
-           (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
+                 task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
 
        /*
         * Now maybe reload the debug registers
@@ -704,7 +704,7 @@ unsigned long get_wchan(struct task_struct *p)
 
        if (!p || p == current || p->state==TASK_RUNNING)
                return 0; 
-       stack = (unsigned long)p->thread_info; 
+       stack = (unsigned long)task_stack_page(p);
        if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
                return 0;
        fp = *(u64 *)(p->thread.rsp);
@@ -822,8 +822,7 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
 {
        struct pt_regs *pp, ptregs;
 
-       pp = (struct pt_regs *)(tsk->thread.rsp0);
-       --pp; 
+       pp = task_pt_regs(tsk);
 
        ptregs = *pp; 
        ptregs.cs &= 0xffff;
index 86248bc9303ea24e81ac19598c86e939e5730921..53205622351c3cc8b1c7110b93239fa8dc84fce4 100644 (file)
@@ -67,12 +67,6 @@ static inline unsigned long get_stack_long(struct task_struct *task, int offset)
        return (*((unsigned long *)stack));
 }
 
-static inline struct pt_regs *get_child_regs(struct task_struct *task)
-{
-       struct pt_regs *regs = (void *)task->thread.rsp0;
-       return regs - 1;
-}
-
 /*
  * this routine will put a word on the processes privileged stack. 
  * the offset is how far from the base addr as stored in the TSS.  
@@ -170,7 +164,7 @@ static int is_at_popf(struct task_struct *child, struct pt_regs *regs)
 
 static void set_singlestep(struct task_struct *child)
 {
-       struct pt_regs *regs = get_child_regs(child);
+       struct pt_regs *regs = task_pt_regs(child);
 
        /*
         * Always set TIF_SINGLESTEP - this guarantees that
@@ -208,7 +202,7 @@ static void clear_singlestep(struct task_struct *child)
 
        /* But touch TF only if it was set by us.. */
        if (child->ptrace & PT_DTRACE) {
-               struct pt_regs *regs = get_child_regs(child);
+               struct pt_regs *regs = task_pt_regs(child);
                regs->eflags &= ~TRAP_FLAG;
                child->ptrace &= ~PT_DTRACE;
        }
index c9e941ae50192cae78e18f8a912ce93d5abcb0f3..a28756ef7cef972dcfd8fff90b1f8a7a19ff5e4a 100644 (file)
@@ -776,7 +776,7 @@ static int __cpuinit do_boot_cpu(int cpu, int apicid)
 
        if (c_idle.idle) {
                c_idle.idle->thread.rsp = (unsigned long) (((struct pt_regs *)
-                       (THREAD_SIZE + (unsigned long) c_idle.idle->thread_info)) - 1);
+                       (THREAD_SIZE +  task_stack_page(c_idle.idle))) - 1);
                init_idle(c_idle.idle, cpu);
                goto do_rest;
        }
@@ -814,7 +814,7 @@ do_rest:
        init_rsp = c_idle.idle->thread.rsp;
        per_cpu(init_tss,cpu).rsp0 = init_rsp;
        initial_code = start_secondary;
-       clear_ti_thread_flag(c_idle.idle->thread_info, TIF_FORK);
+       clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
 
        printk(KERN_INFO "Booting processor %d/%d APIC 0x%x\n", cpu,
                cpus_weight(cpu_present_map),
index 2671fd46ea8568f3db7944e5f89a91f2e1eff0a5..8bb0aeda78b91fcb70c7aa9544b3fc2e03543860 100644 (file)
@@ -308,7 +308,7 @@ void show_registers(struct pt_regs *regs)
        printk("CPU %d ", cpu);
        __show_regs(regs);
        printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
-               cur->comm, cur->pid, cur->thread_info, cur);
+               cur->comm, cur->pid, task_thread_info(cur), cur);
 
        /*
         * When in-kernel, we also print out the stack and code at the
@@ -666,7 +666,7 @@ asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs)
                ;
        /* Exception from user space */
        else if (user_mode(eregs))
-               regs = ((struct pt_regs *)current->thread.rsp0) - 1;
+               regs = task_pt_regs(current);
        /* Exception from kernel and interrupts are enabled. Move to
           kernel process stack. */
        else if (eregs->eflags & X86_EFLAGS_IF)
@@ -912,7 +912,7 @@ asmlinkage void math_state_restore(void)
        if (!used_math())
                init_fpu(me);
        restore_fpu_checking(&me->thread.i387.fxsave);
-       me->thread_info->status |= TS_USEDFPU;
+       task_thread_info(me)->status |= TS_USEDFPU;
 }
 
 void __init trap_init(void)
index 6a44b54ae8173718b8e4a50518511e4777a43392..f1f596644bfcd255c88a4aa9e4dac4dc06ef886c 100644 (file)
@@ -145,7 +145,7 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
        int user_mode = user_mode(regs);
 
        /* Set up new TSS. */
-       tos = (unsigned long)p->thread_info + THREAD_SIZE;
+       tos = (unsigned long)task_stack_page(p) + THREAD_SIZE;
        if (user_mode)
                childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
        else
@@ -217,7 +217,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 unsigned long get_wchan(struct task_struct *p)
 {
        unsigned long sp, pc;
-       unsigned long stack_page = (unsigned long) p->thread_info;
+       unsigned long stack_page = (unsigned long) task_stack_page(p);
        int count = 0;
 
        if (!p || p == current || p->state == TASK_RUNNING)
index ab5c4c65b5c416848b553efda177b10038243858..4cc85285a70ac7324bb232f4d42274a944ebcaa7 100644 (file)
@@ -72,7 +72,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                struct pt_regs *regs;
                unsigned long tmp;
 
-               regs = xtensa_pt_regs(child);
+               regs = task_pt_regs(child);
                tmp = 0;  /* Default return value. */
 
                switch(addr) {
@@ -149,7 +149,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
        case PTRACE_POKEUSR:
                {
                struct pt_regs *regs;
-               regs = xtensa_pt_regs(child);
+               regs = task_pt_regs(child);
 
                switch (addr) {
                case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
@@ -240,7 +240,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                 * elf_gregset_t format. */
 
                xtensa_gregset_t format;
-               struct pt_regs *regs = xtensa_pt_regs(child);
+               struct pt_regs *regs = task_pt_regs(child);
 
                do_copy_regs (&format, regs, child);
 
@@ -257,7 +257,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                 * values in the elf_gregset_t format. */
 
                xtensa_gregset_t format;
-               struct pt_regs *regs = xtensa_pt_regs(child);
+               struct pt_regs *regs = task_pt_regs(child);
 
                if (copy_from_user(&format,(void *)data,sizeof(elf_gregset_t))){
                        ret = -EFAULT;
@@ -281,7 +281,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                 * elf_fpregset_t format. */
 
                elf_fpregset_t fpregs;
-               struct pt_regs *regs = xtensa_pt_regs(child);
+               struct pt_regs *regs = task_pt_regs(child);
 
                do_save_fpregs (&fpregs, regs, child);
 
@@ -299,7 +299,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                 * values in the elf_fpregset_t format.
                 */
                elf_fpregset_t fpregs;
-               struct pt_regs *regs = xtensa_pt_regs(child);
+               struct pt_regs *regs = task_pt_regs(child);
 
                ret = 0;
                if (copy_from_user(&fpregs, (void *)data, sizeof(elf_fpregset_t))) {
index 99a4d7b2f8ad335079ae1eebeab71136f20acc35..1d0759178e4b49710c6cb90c538fc6d0c897e126 100644 (file)
@@ -610,23 +610,23 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
         * request is released from the driver, io must be done
         */
        if (blk_account_rq(rq)) {
-               struct request *first_rq = list_entry_rq(q->queue_head.next);
-
                q->in_flight--;
+               if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
+                       e->ops->elevator_completed_req_fn(q, rq);
+       }
 
-               /*
-                * Check if the queue is waiting for fs requests to be
-                * drained for flush sequence.
-                */
-               if (q->ordseq && q->in_flight == 0 &&
+       /*
+        * Check if the queue is waiting for fs requests to be
+        * drained for flush sequence.
+        */
+       if (unlikely(q->ordseq)) {
+               struct request *first_rq = list_entry_rq(q->queue_head.next);
+               if (q->in_flight == 0 &&
                    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
                    blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
                        blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
                        q->request_fn(q);
                }
-
-               if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
-                       e->ops->elevator_completed_req_fn(q, rq);
        }
 }
 
index 48f446d3c671ade5a5198384c729f83118a35033..283c089537bcd3799226a2e87bfb43a5b62fdc31 100644 (file)
@@ -44,6 +44,8 @@ source "drivers/char/Kconfig"
 
 source "drivers/i2c/Kconfig"
 
+source "drivers/spi/Kconfig"
+
 source "drivers/w1/Kconfig"
 
 source "drivers/hwmon/Kconfig"
index 7fc3f0f08b29e76aa8db89f82e6e62378391d61d..7c45050ecd03c1b25d883429820875823a797bf4 100644 (file)
@@ -41,6 +41,7 @@ obj-$(CONFIG_FUSION)          += message/
 obj-$(CONFIG_IEEE1394)         += ieee1394/
 obj-y                          += cdrom/
 obj-$(CONFIG_MTD)              += mtd/
+obj-$(CONFIG_SPI)              += spi/
 obj-$(CONFIG_PCCARD)           += pcmcia/
 obj-$(CONFIG_DIO)              += dio/
 obj-$(CONFIG_SBUS)             += sbus/
index 2b905016664d9779439c4fa047507256b0cb2faf..730a9ce0a14a44d45c45fe34b15f1277157fe1d2 100644 (file)
@@ -78,7 +78,13 @@ int driver_probe_device(struct device_driver * drv, struct device * dev)
        pr_debug("%s: Matched Device %s with Driver %s\n",
                 drv->bus->name, dev->bus_id, drv->name);
        dev->driver = drv;
-       if (drv->probe) {
+       if (dev->bus->probe) {
+               ret = dev->bus->probe(dev);
+               if (ret) {
+                       dev->driver = NULL;
+                       goto ProbeFailed;
+               }
+       } else if (drv->probe) {
                ret = drv->probe(dev);
                if (ret) {
                        dev->driver = NULL;
@@ -203,7 +209,9 @@ static void __device_release_driver(struct device * dev)
                sysfs_remove_link(&dev->kobj, "driver");
                klist_remove(&dev->knode_driver);
 
-               if (drv->remove)
+               if (dev->bus->remove)
+                       dev->bus->remove(dev);
+               else if (drv->remove)
                        drv->remove(dev);
                dev->driver = NULL;
                put_driver(drv);
index 161f3a390d90651973a8a738446b3b7330950a09..b400314e1c62d46d9d2dc047cb76a08292b7f9ff 100644 (file)
@@ -171,6 +171,11 @@ static void klist_devices_put(struct klist_node *n)
  */
 int driver_register(struct device_driver * drv)
 {
+       if ((drv->bus->probe && drv->probe) ||
+           (drv->bus->remove && drv->remove) ||
+           (drv->bus->shutdown && drv->shutdown)) {
+               printk(KERN_WARNING "Driver '%s' needs updating - please use bus_type methods\n", drv->name);
+       }
        klist_init(&drv->klist_devices, klist_devices_get, klist_devices_put);
        init_completion(&drv->unloaded);
        return bus_add_driver(drv);
index 0f81731bdfa8122aeccfc439907533cfd3fc98ba..461554a025172d329b714e9bc46a389991eabe28 100644 (file)
@@ -327,7 +327,7 @@ EXPORT_SYMBOL_GPL(platform_device_register);
  *     @pdev:  platform device we're unregistering
  *
  *     Unregistration is done in 2 steps. Fisrt we release all resources
- *     and remove it from the sybsystem, then we drop reference count by
+ *     and remove it from the subsystem, then we drop reference count by
  *     calling platform_device_put().
  */
 void platform_device_unregister(struct platform_device * pdev)
index f50a08be424b1c524122b19ba769342f38caf02b..c2475f3134eae05e8d625af83a79afb210d07712 100644 (file)
@@ -35,12 +35,15 @@ extern int sysdev_shutdown(void);
  */
 void device_shutdown(void)
 {
-       struct device * dev;
+       struct device * dev, *devn;
 
        down_write(&devices_subsys.rwsem);
-       list_for_each_entry_reverse(dev, &devices_subsys.kset.list,
+       list_for_each_entry_safe_reverse(dev, devn, &devices_subsys.kset.list,
                                kobj.entry) {
-               if (dev->driver && dev->driver->shutdown) {
+               if (dev->bus && dev->bus->shutdown) {
+                       dev_dbg(dev, "shutdown\n");
+                       dev->bus->shutdown(dev);
+               } else if (dev->driver && dev->driver->shutdown) {
                        dev_dbg(dev, "shutdown\n");
                        dev->driver->shutdown(dev);
                }
index 3c679d30b69849f46e1cc0489de8a0b91891225a..b6e29095621401c6a1305037f681a2801c4547ab 100644 (file)
@@ -194,6 +194,8 @@ static DECLARE_WAIT_QUEUE_HEAD(ms_wait);
  */
 #define MAX_ERRORS 12
 
+#define custom amiga_custom
+
 /* Prevent "aliased" accesses. */
 static int fd_ref[4] = { 0,0,0,0 };
 static int fd_device[4] = { 0, 0, 0, 0 };
@@ -1439,6 +1441,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
 {
        int drive = iminor(inode) & 3;
        static struct floppy_struct getprm;
+       void __user *argp = (void __user *)param;
 
        switch(cmd){
        case FDFMTBEG:
@@ -1484,9 +1487,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
                getprm.head=unit[drive].type->heads;
                getprm.sect=unit[drive].dtype->sects * unit[drive].type->sect_mult;
                getprm.size=unit[drive].blocks;
-               if (copy_to_user((void *)param,
-                                (void *)&getprm,
-                                sizeof(struct floppy_struct)))
+               if (copy_to_user(argp, &getprm, sizeof(struct floppy_struct)))
                        return -EFAULT;
                break;
        case FDSETPRM:
@@ -1498,8 +1499,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
                break;
 #ifdef RAW_IOCTL
        case IOCTL_RAW_TRACK:
-               if (copy_to_user((void *)param, raw_buf,
-                                unit[drive].type->read_size))
+               if (copy_to_user(argp, raw_buf, unit[drive].type->read_size))
                        return -EFAULT;
                else
                        return unit[drive].type->read_size;
@@ -1654,12 +1654,6 @@ static struct block_device_operations floppy_fops = {
        .media_changed  = amiga_floppy_change,
 };
 
-void __init amiga_floppy_setup (char *str, int *ints)
-{
-       printk (KERN_INFO "amiflop: Setting default df0 to %x\n", ints[1]);
-       fd_def_df0 = ints[1];
-}
-
 static int __init fd_probe_drives(void)
 {
        int drive,drives,nomem;
@@ -1845,4 +1839,18 @@ void cleanup_module(void)
        unregister_blkdev(FLOPPY_MAJOR, "fd");
 }
 #endif
+
+#else
+static int __init amiga_floppy_setup (char *str)
+{
+       int n;
+       if (!MACH_IS_AMIGA)
+               return 0;
+       if (!get_option(&str, &n))
+               return 0;
+       printk (KERN_INFO "amiflop: Setting default df0 to %x\n", n);
+       fd_def_df0 = n;
+}
+
+__setup("floppy=", amiga_floppy_setup);
 #endif
index 3aa68a5447d69a65b405b7796706c89db506a648..f8ce235ccfc3b4e4ba65169e6ec3046c5fd0b67e 100644 (file)
@@ -1361,7 +1361,7 @@ static int floppy_revalidate(struct gendisk *disk)
                   formats, for 'permanent user-defined' parameter:
                   restore default_params[] here if flagged valid! */
                if (default_params[drive].blocks == 0)
-                       UDT = 0;
+                       UDT = NULL;
                else
                        UDT = &default_params[drive];
        }
@@ -1495,6 +1495,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
        struct floppy_struct getprm;
        int settype;
        struct floppy_struct setprm;
+       void __user *argp = (void __user *)param;
 
        switch (cmd) {
        case FDGETPRM:
@@ -1521,7 +1522,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
                getprm.head = 2;
                getprm.track = dtp->blocks/dtp->spt/2;
                getprm.stretch = dtp->stretch;
-               if (copy_to_user((void *)param, &getprm, sizeof(getprm)))
+               if (copy_to_user(argp, &getprm, sizeof(getprm)))
                        return -EFAULT;
                return 0;
        }
@@ -1540,7 +1541,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
                /* get the parameters from user space */
                if (floppy->ref != 1 && floppy->ref != -1)
                        return -EBUSY;
-               if (copy_from_user(&setprm, (void *) param, sizeof(setprm)))
+               if (copy_from_user(&setprm, argp, sizeof(setprm)))
                        return -EFAULT;
                /* 
                 * first of all: check for floppy change and revalidate, 
@@ -1647,7 +1648,7 @@ static int fd_ioctl(struct inode *inode, struct file *filp,
        case FDFMTTRK:
                if (floppy->ref != 1 && floppy->ref != -1)
                        return -EBUSY;
-               if (copy_from_user(&fmt_desc, (void *) param, sizeof(fmt_desc)))
+               if (copy_from_user(&fmt_desc, argp, sizeof(fmt_desc)))
                        return -EFAULT;
                return do_format(drive, type, &fmt_desc);
        case FDCLRPRM:
@@ -1950,14 +1951,20 @@ Enomem:
        return -ENOMEM;
 }
 
-
-void __init atari_floppy_setup( char *str, int *ints )
+#ifndef MODULE
+static int __init atari_floppy_setup(char *str)
 {
+       int ints[3 + FD_MAX_UNITS];
        int i;
+
+       if (!MACH_IS_ATARI)
+               return 0;
+
+       str = get_options(str, 3 + FD_MAX_UNITS, ints);
        
        if (ints[0] < 1) {
                printk(KERN_ERR "ataflop_setup: no arguments!\n" );
-               return;
+               return 0;
        }
        else if (ints[0] > 2+FD_MAX_UNITS) {
                printk(KERN_ERR "ataflop_setup: too many arguments\n" );
@@ -1977,9 +1984,13 @@ void __init atari_floppy_setup( char *str, int *ints )
                else
                        UserSteprate[i-3] = ints[i];
        }
+       return 1;
 }
 
-static void atari_floppy_exit(void)
+__setup("floppy=", atari_floppy_setup);
+#endif
+
+static void __exit atari_floppy_exit(void)
 {
        int i;
        blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
index d1aaf31bd97e85e762c33e2aa50b65748c45f0af..f63e07bd9f9c4e3b23f20c626c95e260451f1639 100644 (file)
@@ -293,6 +293,7 @@ static int send_request(struct request *req)
        u16 viocmd;
        HvLpEvent_Rc hvrc;
        struct vioblocklpevent *bevent;
+       struct HvLpEvent *hev;
        struct scatterlist sg[VIOMAXBLOCKDMA];
        int sgindex;
        int statindex;
@@ -347,22 +348,19 @@ static int send_request(struct request *req)
                 * token so we can match the response up later
                 */
                memset(bevent, 0, sizeof(struct vioblocklpevent));
-               bevent->event.xFlags.xValid = 1;
-               bevent->event.xFlags.xFunction = HvLpEvent_Function_Int;
-               bevent->event.xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
-               bevent->event.xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
-               bevent->event.xType = HvLpEvent_Type_VirtualIo;
-               bevent->event.xSubtype = viocmd;
-               bevent->event.xSourceLp = HvLpConfig_getLpIndex();
-               bevent->event.xTargetLp = viopath_hostLp;
-               bevent->event.xSizeMinus1 =
+               hev = &bevent->event;
+               hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK |
+                       HV_LP_EVENT_INT;
+               hev->xType = HvLpEvent_Type_VirtualIo;
+               hev->xSubtype = viocmd;
+               hev->xSourceLp = HvLpConfig_getLpIndex();
+               hev->xTargetLp = viopath_hostLp;
+               hev->xSizeMinus1 =
                        offsetof(struct vioblocklpevent, u.rw_data.dma_info) +
                        (sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1;
-               bevent->event.xSourceInstanceId =
-                       viopath_sourceinst(viopath_hostLp);
-               bevent->event.xTargetInstanceId =
-                       viopath_targetinst(viopath_hostLp);
-               bevent->event.xCorrelationToken = (u64)req;
+               hev->xSourceInstanceId = viopath_sourceinst(viopath_hostLp);
+               hev->xTargetInstanceId = viopath_targetinst(viopath_hostLp);
+               hev->xCorrelationToken = (u64)req;
                bevent->version = VIOVERSION;
                bevent->disk = DEVICE_NO(d);
                bevent->u.rw_data.offset = start;
@@ -649,10 +647,10 @@ static void handle_block_event(struct HvLpEvent *event)
                /* Notification that a partition went away! */
                return;
        /* First, we should NEVER get an int here...only acks */
-       if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+       if (hvlpevent_is_int(event)) {
                printk(VIOD_KERN_WARNING
                       "Yikes! got an int in viodasd event handler!\n");
-               if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
+               if (hvlpevent_need_ack(event)) {
                        event->xRc = HvLpEvent_Rc_InvalidSubtype;
                        HvCallEvent_ackLpEvent(event);
                }
@@ -695,7 +693,7 @@ static void handle_block_event(struct HvLpEvent *event)
 
        default:
                printk(VIOD_KERN_WARNING "invalid subtype!");
-               if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
+               if (hvlpevent_need_ack(event)) {
                        event->xRc = HvLpEvent_Rc_InvalidSubtype;
                        HvCallEvent_ackLpEvent(event);
                }
index b5191780ecca44ae9abf782a6a7c04f57916d3f2..193446e6a08a80f4232cd13fcd50526c37de1914 100644 (file)
@@ -542,10 +542,10 @@ static void vio_handle_cd_event(struct HvLpEvent *event)
                /* Notification that a partition went away! */
                return;
        /* First, we should NEVER get an int here...only acks */
-       if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+       if (hvlpevent_is_int(event)) {
                printk(VIOCD_KERN_WARNING
                                "Yikes! got an int in viocd event handler!\n");
-               if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
+               if (hvlpevent_need_ack(event)) {
                        event->xRc = HvLpEvent_Rc_InvalidSubtype;
                        HvCallEvent_ackLpEvent(event);
                }
@@ -616,7 +616,7 @@ return_complete:
                printk(VIOCD_KERN_WARNING
                                "message with invalid subtype %0x04X!\n",
                                event->xSubtype & VIOMINOR_SUBTYPE_MASK);
-               if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
+               if (hvlpevent_need_ack(event)) {
                        event->xRc = HvLpEvent_Rc_InvalidSubtype;
                        HvCallEvent_ackLpEvent(event);
                }
index 869518e4035fab624464b646807980415afa5dc5..7ac365b5d9ece1af03bbcaae6438282e0a7c7706 100644 (file)
@@ -99,6 +99,7 @@ static char *serial_version = "4.30";
 #define _INLINE_ inline
 #endif
 
+#define custom amiga_custom
 static char *serial_name = "Amiga-builtin serial driver";
 
 static struct tty_driver *serial_driver;
@@ -128,7 +129,6 @@ static struct serial_state rs_table[1];
  * memory if large numbers of serial ports are open.
  */
 static unsigned char *tmp_buf;
-static DECLARE_MUTEX(tmp_buf_sem);
 
 #include <asm/uaccess.h>
 
@@ -1088,7 +1088,7 @@ static void rs_unthrottle(struct tty_struct * tty)
  */
 
 static int get_serial_info(struct async_struct * info,
-                          struct serial_struct * retinfo)
+                          struct serial_struct __user * retinfo)
 {
        struct serial_struct tmp;
        struct serial_state *state = info->state;
@@ -1112,7 +1112,7 @@ static int get_serial_info(struct async_struct * info,
 }
 
 static int set_serial_info(struct async_struct * info,
-                          struct serial_struct * new_info)
+                          struct serial_struct __user * new_info)
 {
        struct serial_struct new_serial;
        struct serial_state old_state, *state;
@@ -1193,7 +1193,7 @@ check_and_exit:
  *         transmit holding register is empty.  This functionality
  *         allows an RS485 driver to be written in user space. 
  */
-static int get_lsr_info(struct async_struct * info, unsigned int *value)
+static int get_lsr_info(struct async_struct * info, unsigned int __user *value)
 {
        unsigned char status;
        unsigned int result;
@@ -1284,6 +1284,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
        struct async_struct * info = (struct async_struct *)tty->driver_data;
        struct async_icount cprev, cnow;        /* kernel counter temps */
        struct serial_icounter_struct icount;
+       void __user *argp = (void __user *)arg;
        unsigned long flags;
 
        if (serial_paranoia_check(info, tty->name, "rs_ioctl"))
@@ -1298,19 +1299,17 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
 
        switch (cmd) {
                case TIOCGSERIAL:
-                       return get_serial_info(info,
-                                              (struct serial_struct *) arg);
+                       return get_serial_info(info, argp);
                case TIOCSSERIAL:
-                       return set_serial_info(info,
-                                              (struct serial_struct *) arg);
+                       return set_serial_info(info, argp);
                case TIOCSERCONFIG:
                        return 0;
 
                case TIOCSERGETLSR: /* Get line status register */
-                       return get_lsr_info(info, (unsigned int *) arg);
+                       return get_lsr_info(info, argp);
 
                case TIOCSERGSTRUCT:
-                       if (copy_to_user((struct async_struct *) arg,
+                       if (copy_to_user(argp,
                                         info, sizeof(struct async_struct)))
                                return -EFAULT;
                        return 0;
@@ -1369,7 +1368,7 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file,
                        icount.brk = cnow.brk;
                        icount.buf_overrun = cnow.buf_overrun;
 
-                       if (copy_to_user((void *)arg, &icount, sizeof(icount)))
+                       if (copy_to_user(argp, &icount, sizeof(icount)))
                                return -EFAULT;
                        return 0;
                case TIOCSERGWILD:
index e41060c76226cd66038d9abfe31b025245b25c72..9d180c42816cb592c4c83b7331d2977116cf263d 100644 (file)
@@ -3,7 +3,7 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 drm-objs    := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
-               drm_drv.o drm_fops.o drm_init.o drm_ioctl.o drm_irq.o \
+               drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
                drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
                drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
                drm_sysfs.o
@@ -18,7 +18,7 @@ radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o
 ffb-objs    := ffb_drv.o ffb_context.o
 sis-objs    := sis_drv.o sis_ds.o sis_mm.o
 savage-objs := savage_drv.o savage_bci.o savage_state.o
-via-objs    := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o
+via-objs    := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o via_dmablit.o
 
 ifeq ($(CONFIG_COMPAT),y)
 drm-objs    += drm_ioc32.o
index efff0eec618c23c28fc78c1eb55dfe409cb7730c..5485382cadec8cc361293569bba79a985687f97e 100644 (file)
@@ -52,7 +52,7 @@
 # define ATI_MAX_PCIGART_PAGES         8192    /**< 32 MB aperture, 4K pages */
 # define ATI_PCIGART_PAGE_SIZE         4096    /**< PCI GART page size */
 
-static unsigned long drm_ati_alloc_pcigart_table(void)
+static void *drm_ati_alloc_pcigart_table(void)
 {
        unsigned long address;
        struct page *page;
@@ -72,27 +72,26 @@ static unsigned long drm_ati_alloc_pcigart_table(void)
        }
 
        DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
-       return address;
+       return (void *)address;
 }
 
-static void drm_ati_free_pcigart_table(unsigned long address)
+static void drm_ati_free_pcigart_table(void *address)
 {
        struct page *page;
        int i;
        DRM_DEBUG("%s\n", __FUNCTION__);
 
-       page = virt_to_page(address);
+       page = virt_to_page((unsigned long)address);
 
        for (i = 0; i < ATI_PCIGART_TABLE_PAGES; i++, page++) {
                __put_page(page);
                ClearPageReserved(page);
        }
 
-       free_pages(address, ATI_PCIGART_TABLE_ORDER);
+       free_pages((unsigned long)address, ATI_PCIGART_TABLE_ORDER);
 }
 
-int drm_ati_pcigart_cleanup(drm_device_t * dev,
-                           drm_ati_pcigart_info * gart_info)
+int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
 {
        drm_sg_mem_t *entry = dev->sg;
        unsigned long pages;
@@ -136,10 +135,10 @@ int drm_ati_pcigart_cleanup(drm_device_t * dev,
 
 EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
 
-int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
+int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
 {
        drm_sg_mem_t *entry = dev->sg;
-       unsigned long address = 0;
+       void *address = NULL;
        unsigned long pages;
        u32 *pci_gart, page_base, bus_address = 0;
        int i, j, ret = 0;
@@ -163,7 +162,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
                        goto done;
                }
 
-               bus_address = pci_map_single(dev->pdev, (void *)address,
+               bus_address = pci_map_single(dev->pdev, address,
                                             ATI_PCIGART_TABLE_PAGES *
                                             PAGE_SIZE, PCI_DMA_TODEVICE);
                if (bus_address == 0) {
@@ -176,7 +175,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
                address = gart_info->addr;
                bus_address = gart_info->bus_addr;
                DRM_DEBUG("PCI: Gart Table: VRAM %08X mapped at %08lX\n",
-                         bus_address, address);
+                         bus_address, (unsigned long)address);
        }
 
        pci_gart = (u32 *) address;
@@ -195,7 +194,7 @@ int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info * gart_info)
                if (entry->busaddr[i] == 0) {
                        DRM_ERROR("unable to map PCIGART pages!\n");
                        drm_ati_pcigart_cleanup(dev, gart_info);
-                       address = 0;
+                       address = NULL;
                        bus_address = 0;
                        goto done;
                }
index 64d6237fdd0baa5ce661472ba12ddd7eddacf304..9da0ddb892b58fd3a0db271797ed80d378d27c6f 100644 (file)
@@ -90,8 +90,8 @@
 #define DRM_MAX_ORDER  22        /**< Up to 2^22 bytes = 4MB */
 #define DRM_RAM_PERCENT 10       /**< How much system ram can we lock? */
 
-#define _DRM_LOCK_HELD 0x80000000 /**< Hardware lock is held */
-#define _DRM_LOCK_CONT 0x40000000 /**< Hardware lock is contended */
+#define _DRM_LOCK_HELD 0x80000000U /**< Hardware lock is held */
+#define _DRM_LOCK_CONT 0x40000000U /**< Hardware lock is contended */
 #define _DRM_LOCK_IS_HELD(lock)           ((lock) & _DRM_LOCK_HELD)
 #define _DRM_LOCK_IS_CONT(lock)           ((lock) & _DRM_LOCK_CONT)
 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
index 3dc3c9d79ae423d69a7c0fd9c59cc8d02b25b830..54b561e694862ef6924ef08f61ba2ad520c18a8f 100644 (file)
 /** \name Backward compatibility section */
 /*@{*/
 
-#ifndef MODULE_LICENSE
-#define MODULE_LICENSE(x)
-#endif
-
-#ifndef preempt_disable
-#define preempt_disable()
-#define preempt_enable()
-#endif
-
-#ifndef pte_offset_map
-#define pte_offset_map pte_offset
-#define pte_unmap(pte)
-#endif
-
 #define DRM_RPR_ARG(vma) vma,
 
 #define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
@@ -286,10 +272,13 @@ typedef int drm_ioctl_t(struct inode *inode, struct file *filp,
 typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
                               unsigned long arg);
 
+#define DRM_AUTH       0x1
+#define        DRM_MASTER      0x2
+#define DRM_ROOT_ONLY  0x4
+
 typedef struct drm_ioctl_desc {
        drm_ioctl_t *func;
-       int auth_needed;
-       int root_only;
+       int flags;
 } drm_ioctl_desc_t;
 
 typedef struct drm_devstate {
@@ -384,6 +373,7 @@ typedef struct drm_buf_entry {
 /** File private data */
 typedef struct drm_file {
        int authenticated;
+       int master;
        int minor;
        pid_t pid;
        uid_t uid;
@@ -532,8 +522,9 @@ typedef struct drm_vbl_sig {
 typedef struct ati_pcigart_info {
        int gart_table_location;
        int is_pcie;
-       unsigned long addr;
+       void *addr;
        dma_addr_t bus_addr;
+       drm_local_map_t mapping;
 } drm_ati_pcigart_info;
 
 /**
@@ -544,16 +535,14 @@ typedef struct ati_pcigart_info {
 struct drm_device;
 
 struct drm_driver {
-       int (*preinit) (struct drm_device *, unsigned long flags);
-       void (*prerelease) (struct drm_device *, struct file * filp);
-       void (*pretakedown) (struct drm_device *);
-       int (*postcleanup) (struct drm_device *);
-       int (*presetup) (struct drm_device *);
-       int (*postsetup) (struct drm_device *);
+       int (*load) (struct drm_device *, unsigned long flags);
+       int (*firstopen) (struct drm_device *);
+       int (*open) (struct drm_device *, drm_file_t *);
+       void (*preclose) (struct drm_device *, struct file * filp);
+       void (*postclose) (struct drm_device *, drm_file_t *);
+       void (*lastclose) (struct drm_device *);
+       int (*unload) (struct drm_device *);
        int (*dma_ioctl) (DRM_IOCTL_ARGS);
-       int (*open_helper) (struct drm_device *, drm_file_t *);
-       void (*free_filp_priv) (struct drm_device *, drm_file_t *);
-       void (*release) (struct drm_device *, struct file * filp);
        void (*dma_ready) (struct drm_device *);
        int (*dma_quiescent) (struct drm_device *);
        int (*context_ctor) (struct drm_device * dev, int context);
@@ -561,8 +550,9 @@ struct drm_driver {
        int (*kernel_context_switch) (struct drm_device * dev, int old,
                                      int new);
        void (*kernel_context_switch_unlock) (struct drm_device * dev,
-                                             drm_lock_t * lock);
+                                             drm_lock_t *lock);
        int (*vblank_wait) (struct drm_device * dev, unsigned int *sequence);
+       int (*dri_library_name) (struct drm_device *dev, char *buf);
 
        /**
         * Called by \c drm_device_is_agp.  Typically used to determine if a
@@ -579,16 +569,24 @@ struct drm_driver {
 
        /* these have to be filled in */
 
-       int (*postinit) (struct drm_device *, unsigned long flags);
-        irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
+       irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
        void (*irq_preinstall) (struct drm_device * dev);
        void (*irq_postinstall) (struct drm_device * dev);
        void (*irq_uninstall) (struct drm_device * dev);
        void (*reclaim_buffers) (struct drm_device * dev, struct file * filp);
+       void (*reclaim_buffers_locked) (struct drm_device *dev,
+                                       struct file *filp);
        unsigned long (*get_map_ofs) (drm_map_t * map);
        unsigned long (*get_reg_ofs) (struct drm_device * dev);
        void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
-       int (*version) (drm_version_t * version);
+
+       int major;
+       int minor;
+       int patchlevel;
+       char *name;
+       char *desc;
+       char *date;
+
        u32 driver_features;
        int dev_priv_size;
        drm_ioctl_desc_t *ioctls;
@@ -752,19 +750,43 @@ static inline int drm_core_has_MTRR(struct drm_device *dev)
 {
        return drm_core_check_feature(dev, DRIVER_USE_MTRR);
 }
+
+#define DRM_MTRR_WC            MTRR_TYPE_WRCOMB
+
+static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
+                              unsigned int flags)
+{
+       return mtrr_add(offset, size, flags, 1);
+}
+
+static inline int drm_mtrr_del(int handle, unsigned long offset,
+                              unsigned long size, unsigned int flags)
+{
+       return mtrr_del(handle, offset, size);
+}
+
 #else
 #define drm_core_has_MTRR(dev) (0)
+
+#define DRM_MTRR_WC            0
+
+static inline int drm_mtrr_add(unsigned long offset, unsigned long size,
+                              unsigned int flags)
+{
+       return 0;
+}
+
+static inline int drm_mtrr_del(int handle, unsigned long offset,
+                              unsigned long size, unsigned int flags)
+{
+       return 0;
+}
 #endif
 
 /******************************************************************/
 /** \name Internal function definitions */
 /*@{*/
 
-                               /* Misc. support (drm_init.h) */
-extern int drm_flags;
-extern void drm_parse_options(char *s);
-extern int drm_cpu_valid(void);
-
                                /* Driver support (drm_drv.h) */
 extern int drm_init(struct drm_driver *driver);
 extern void drm_exit(struct drm_driver *driver);
@@ -772,12 +794,11 @@ extern int drm_ioctl(struct inode *inode, struct file *filp,
                     unsigned int cmd, unsigned long arg);
 extern long drm_compat_ioctl(struct file *filp,
                             unsigned int cmd, unsigned long arg);
-extern int drm_takedown(drm_device_t * dev);
+extern int drm_lastclose(drm_device_t *dev);
 
                                /* Device support (drm_fops.h) */
 extern int drm_open(struct inode *inode, struct file *filp);
 extern int drm_stub_open(struct inode *inode, struct file *filp);
-extern int drm_flush(struct file *filp);
 extern int drm_fasync(int fd, struct file *filp, int on);
 extern int drm_release(struct inode *inode, struct file *filp);
 
@@ -819,6 +840,8 @@ extern int drm_getstats(struct inode *inode, struct file *filp,
                        unsigned int cmd, unsigned long arg);
 extern int drm_setversion(struct inode *inode, struct file *filp,
                          unsigned int cmd, unsigned long arg);
+extern int drm_noop(struct inode *inode, struct file *filp,
+                   unsigned int cmd, unsigned long arg);
 
                                /* Context IOCTL support (drm_context.h) */
 extern int drm_resctx(struct inode *inode, struct file *filp,
@@ -857,10 +880,6 @@ extern int drm_getmagic(struct inode *inode, struct file *filp,
 extern int drm_authmagic(struct inode *inode, struct file *filp,
                         unsigned int cmd, unsigned long arg);
 
-                               /* Placeholder for ioctls past */
-extern int drm_noop(struct inode *inode, struct file *filp,
-                   unsigned int cmd, unsigned long arg);
-
                                /* Locking IOCTL support (drm_lock.h) */
 extern int drm_lock(struct inode *inode, struct file *filp,
                    unsigned int cmd, unsigned long arg);
@@ -873,6 +892,7 @@ extern int drm_lock_free(drm_device_t * dev,
                                /* Buffer management support (drm_bufs.h) */
 extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
 extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request);
+extern int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request);
 extern int drm_addmap(drm_device_t * dev, unsigned int offset,
                      unsigned int size, drm_map_type_t type,
                      drm_map_flags_t flags, drm_local_map_t ** map_ptr);
@@ -908,8 +928,8 @@ extern void drm_core_reclaim_buffers(drm_device_t * dev, struct file *filp);
                                /* IRQ support (drm_irq.h) */
 extern int drm_control(struct inode *inode, struct file *filp,
                       unsigned int cmd, unsigned long arg);
-extern int drm_irq_uninstall(drm_device_t * dev);
 extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
+extern int drm_irq_uninstall(drm_device_t * dev);
 extern void drm_driver_irq_preinstall(drm_device_t * dev);
 extern void drm_driver_irq_postinstall(drm_device_t * dev);
 extern void drm_driver_irq_uninstall(drm_device_t * dev);
@@ -933,13 +953,17 @@ extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
 extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t * info);
 extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
                              unsigned int cmd, unsigned long arg);
-extern int drm_agp_alloc(struct inode *inode, struct file *filp,
+extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request);
+extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
                         unsigned int cmd, unsigned long arg);
-extern int drm_agp_free(struct inode *inode, struct file *filp,
+extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request);
+extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
                        unsigned int cmd, unsigned long arg);
-extern int drm_agp_unbind(struct inode *inode, struct file *filp,
+extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request);
+extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
                          unsigned int cmd, unsigned long arg);
-extern int drm_agp_bind(struct inode *inode, struct file *filp,
+extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request);
+extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
                        unsigned int cmd, unsigned long arg);
 extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge,
                                            size_t pages, u32 type);
@@ -991,10 +1015,8 @@ extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner,
                                                char *name);
 extern void drm_sysfs_destroy(struct drm_sysfs_class *cs);
 extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
-                                                dev_t dev,
-                                                struct device *device,
-                                                const char *fmt, ...);
-extern void drm_sysfs_device_remove(dev_t dev);
+                                                drm_head_t *head);
+extern void drm_sysfs_device_remove(struct class_device *class_dev);
 
 /* Inline replacements for DRM_IOREMAP macros */
 static __inline__ void drm_core_ioremap(struct drm_map *map,
index 2b6453a9ffce6e20c5a932f7ed00895acd8375d1..fabc930c67a23f72c04231b169856578733e13de 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * \file drm_agpsupport.h
+ * \file drm_agpsupport.c
  * DRM support for AGP/GART backend
  *
  * \author Rickard E. (Rik) Faith <faith@valinux.com>
@@ -91,7 +91,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
 /**
  * Acquire the AGP device.
  *
- * \param dev DRM device that is to acquire AGP
+ * \param dev DRM device that is to acquire AGP.
  * \return zero on success or a negative number on failure.
  *
  * Verifies the AGP device hasn't been acquired before and calls
@@ -134,7 +134,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
 /**
  * Release the AGP device.
  *
- * \param dev DRM device that is to release AGP
+ * \param dev DRM device that is to release AGP.
  * \return zero on success or a negative number on failure.
  *
  * Verifies the AGP device has been acquired and calls \c agp_backend_release.
@@ -147,7 +147,6 @@ int drm_agp_release(drm_device_t * dev)
        dev->agp->acquired = 0;
        return 0;
 }
-
 EXPORT_SYMBOL(drm_agp_release);
 
 int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
@@ -208,30 +207,22 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
  * Verifies the AGP device is present and has been acquired, allocates the
  * memory via alloc_agp() and creates a drm_agp_mem entry for it.
  */
-int drm_agp_alloc(struct inode *inode, struct file *filp,
-                 unsigned int cmd, unsigned long arg)
+int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
 {
-       drm_file_t *priv = filp->private_data;
-       drm_device_t *dev = priv->head->dev;
-       drm_agp_buffer_t request;
        drm_agp_mem_t *entry;
        DRM_AGP_MEM *memory;
        unsigned long pages;
        u32 type;
-       drm_agp_buffer_t __user *argp = (void __user *)arg;
 
        if (!dev->agp || !dev->agp->acquired)
                return -EINVAL;
-       if (copy_from_user(&request, argp, sizeof(request)))
-               return -EFAULT;
        if (!(entry = drm_alloc(sizeof(*entry), DRM_MEM_AGPLISTS)))
                return -ENOMEM;
 
        memset(entry, 0, sizeof(*entry));
 
-       pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
-       type = (u32) request.type;
-
+       pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+       type = (u32) request->type;
        if (!(memory = drm_alloc_agp(dev, pages, type))) {
                drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
                return -ENOMEM;
@@ -247,16 +238,39 @@ int drm_agp_alloc(struct inode *inode, struct file *filp,
                dev->agp->memory->prev = entry;
        dev->agp->memory = entry;
 
-       request.handle = entry->handle;
-       request.physical = memory->physical;
+       request->handle = entry->handle;
+       request->physical = memory->physical;
+
+       return 0;
+}
+EXPORT_SYMBOL(drm_agp_alloc);
+
+int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
+                       unsigned int cmd, unsigned long arg)
+{
+       drm_file_t *priv = filp->private_data;
+       drm_device_t *dev = priv->head->dev;
+       drm_agp_buffer_t request;
+       drm_agp_buffer_t __user *argp = (void __user *)arg;
+       int err;
+
+       if (copy_from_user(&request, argp, sizeof(request)))
+               return -EFAULT;
+
+       err = drm_agp_alloc(dev, &request);
+       if (err)
+               return err;
 
        if (copy_to_user(argp, &request, sizeof(request))) {
+               drm_agp_mem_t *entry = dev->agp->memory;
+
                dev->agp->memory = entry->next;
                dev->agp->memory->prev = NULL;
-               drm_free_agp(memory, pages);
+               drm_free_agp(entry->memory, entry->pages);
                drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
                return -EFAULT;
        }
+
        return 0;
 }
 
@@ -293,21 +307,14 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
  * Verifies the AGP device is present and acquired, looks-up the AGP memory
  * entry and passes it to the unbind_agp() function.
  */
-int drm_agp_unbind(struct inode *inode, struct file *filp,
-                  unsigned int cmd, unsigned long arg)
+int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request)
 {
-       drm_file_t *priv = filp->private_data;
-       drm_device_t *dev = priv->head->dev;
-       drm_agp_binding_t request;
        drm_agp_mem_t *entry;
        int ret;
 
        if (!dev->agp || !dev->agp->acquired)
                return -EINVAL;
-       if (copy_from_user
-           (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
-               return -EFAULT;
-       if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
                return -EINVAL;
        if (!entry->bound)
                return -EINVAL;
@@ -316,6 +323,21 @@ int drm_agp_unbind(struct inode *inode, struct file *filp,
                entry->bound = 0;
        return ret;
 }
+EXPORT_SYMBOL(drm_agp_unbind);
+
+int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
+                        unsigned int cmd, unsigned long arg)
+{
+       drm_file_t *priv = filp->private_data;
+       drm_device_t *dev = priv->head->dev;
+       drm_agp_binding_t request;
+
+       if (copy_from_user
+           (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
+               return -EFAULT;
+
+       return drm_agp_unbind(dev, &request);
+}
 
 /**
  * Bind AGP memory into the GATT (ioctl)
@@ -330,26 +352,19 @@ int drm_agp_unbind(struct inode *inode, struct file *filp,
  * is currently bound into the GATT. Looks-up the AGP memory entry and passes
  * it to bind_agp() function.
  */
-int drm_agp_bind(struct inode *inode, struct file *filp,
-                unsigned int cmd, unsigned long arg)
+int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request)
 {
-       drm_file_t *priv = filp->private_data;
-       drm_device_t *dev = priv->head->dev;
-       drm_agp_binding_t request;
        drm_agp_mem_t *entry;
        int retcode;
        int page;
 
        if (!dev->agp || !dev->agp->acquired)
                return -EINVAL;
-       if (copy_from_user
-           (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
-               return -EFAULT;
-       if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
                return -EINVAL;
        if (entry->bound)
                return -EINVAL;
-       page = (request.offset + PAGE_SIZE - 1) / PAGE_SIZE;
+       page = (request->offset + PAGE_SIZE - 1) / PAGE_SIZE;
        if ((retcode = drm_bind_agp(entry->memory, page)))
                return retcode;
        entry->bound = dev->agp->base + (page << PAGE_SHIFT);
@@ -357,6 +372,21 @@ int drm_agp_bind(struct inode *inode, struct file *filp,
                  dev->agp->base, entry->bound);
        return 0;
 }
+EXPORT_SYMBOL(drm_agp_bind);
+
+int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
+                      unsigned int cmd, unsigned long arg)
+{
+       drm_file_t *priv = filp->private_data;
+       drm_device_t *dev = priv->head->dev;
+       drm_agp_binding_t request;
+
+       if (copy_from_user
+           (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
+               return -EFAULT;
+
+       return drm_agp_bind(dev, &request);
+}
 
 /**
  * Free AGP memory (ioctl).
@@ -372,20 +402,13 @@ int drm_agp_bind(struct inode *inode, struct file *filp,
  * unbind_agp(). Frees it via free_agp() as well as the entry itself
  * and unlinks from the doubly linked list it's inserted in.
  */
-int drm_agp_free(struct inode *inode, struct file *filp,
-                unsigned int cmd, unsigned long arg)
+int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
 {
-       drm_file_t *priv = filp->private_data;
-       drm_device_t *dev = priv->head->dev;
-       drm_agp_buffer_t request;
        drm_agp_mem_t *entry;
 
        if (!dev->agp || !dev->agp->acquired)
                return -EINVAL;
-       if (copy_from_user
-           (&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
-               return -EFAULT;
-       if (!(entry = drm_agp_lookup_entry(dev, request.handle)))
+       if (!(entry = drm_agp_lookup_entry(dev, request->handle)))
                return -EINVAL;
        if (entry->bound)
                drm_unbind_agp(entry->memory);
@@ -402,12 +425,30 @@ int drm_agp_free(struct inode *inode, struct file *filp,
        drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
        return 0;
 }
+EXPORT_SYMBOL(drm_agp_free);
+
+int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
+                      unsigned int cmd, unsigned long arg)
+{
+       drm_file_t *priv = filp->private_data;
+       drm_device_t *dev = priv->head->dev;
+       drm_agp_buffer_t request;
+
+       if (copy_from_user
+           (&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
+               return -EFAULT;
+
+       return drm_agp_free(dev, &request);
+}
 
 /**
  * Initialize the AGP resources.
  *
  * \return pointer to a drm_agp_head structure.
  *
+ * Gets the drm_agp_t structure which is made available by the agpgart module
+ * via the inter_module_* functions. Creates and initializes a drm_agp_head
+ * structure.
  */
 drm_agp_head_t *drm_agp_init(drm_device_t * dev)
 {
index 319bdea8de8a63c7db21caa72ac1481e3f1330bb..1db12dcb6802f8e52b7622a415d6072d6c4f00f6 100644 (file)
 #include <linux/vmalloc.h>
 #include "drmP.h"
 
-unsigned long drm_get_resource_start(drm_device_t * dev, unsigned int resource)
+unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
 {
        return pci_resource_start(dev->pdev, resource);
 }
-
 EXPORT_SYMBOL(drm_get_resource_start);
 
-unsigned long drm_get_resource_len(drm_device_t * dev, unsigned int resource)
+unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
 {
        return pci_resource_len(dev->pdev, resource);
 }
 
 EXPORT_SYMBOL(drm_get_resource_len);
 
-static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
-                                            drm_local_map_t * map)
+static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
+                                            drm_local_map_t *map)
 {
        struct list_head *list;
 
@@ -74,7 +73,7 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t * dev,
 
 #ifdef _LP64
 static __inline__ unsigned int HandleID(unsigned long lhandle,
-                                       drm_device_t * dev)
+                                       drm_device_t *dev)
 {
        static unsigned int map32_handle = START_RANGE;
        unsigned int hash;
@@ -155,7 +154,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
        case _DRM_REGISTERS:
        case _DRM_FRAME_BUFFER:
 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
-               if (map->offset + map->size < map->offset ||
+               if (map->offset + (map->size-1) < map->offset ||
                    map->offset < virt_to_phys(high_memory)) {
                        drm_free(map, sizeof(*map), DRM_MEM_MAPS);
                        return -EINVAL;
@@ -301,6 +300,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
                return -EFAULT;
        }
 
+       if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
+               return -EPERM;
+
        err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
                              &maplist);
 
@@ -332,7 +334,7 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
  *
  * \sa drm_addmap
  */
-int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
+int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
 {
        struct list_head *list;
        drm_map_list_t *r_list = NULL;
@@ -384,10 +386,9 @@ int drm_rmmap_locked(drm_device_t * dev, drm_local_map_t * map)
 
        return 0;
 }
-
 EXPORT_SYMBOL(drm_rmmap_locked);
 
-int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
+int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
 {
        int ret;
 
@@ -397,7 +398,6 @@ int drm_rmmap(drm_device_t * dev, drm_local_map_t * map)
 
        return ret;
 }
-
 EXPORT_SYMBOL(drm_rmmap);
 
 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
@@ -548,7 +548,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
        DRM_DEBUG("count:      %d\n", count);
        DRM_DEBUG("order:      %d\n", order);
        DRM_DEBUG("size:       %d\n", size);
-       DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+       DRM_DEBUG("agp_offset: %lx\n", agp_offset);
        DRM_DEBUG("alignment:  %d\n", alignment);
        DRM_DEBUG("page_order: %d\n", page_order);
        DRM_DEBUG("total:      %d\n", total);
@@ -649,6 +649,8 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
        }
 
        dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
        dma->byte_count += byte_count;
 
        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@@ -664,7 +666,6 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
        atomic_dec(&dev->buf_alloc);
        return 0;
 }
-
 EXPORT_SYMBOL(drm_addbufs_agp);
 #endif                         /* __OS_HAS_AGP */
 
@@ -689,9 +690,13 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
 
        if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
                return -EINVAL;
+
        if (!dma)
                return -EINVAL;
 
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
        count = request->count;
        order = drm_order(request->size);
        size = 1 << order;
@@ -882,7 +887,6 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
        return 0;
 
 }
-
 EXPORT_SYMBOL(drm_addbufs_pci);
 
 static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
@@ -908,6 +912,9 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
        if (!dma)
                return -EINVAL;
 
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
        count = request->count;
        order = drm_order(request->size);
        size = 1 << order;
@@ -1026,6 +1033,8 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
        }
 
        dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
        dma->byte_count += byte_count;
 
        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@@ -1042,7 +1051,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
        return 0;
 }
 
-static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
+int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
 {
        drm_device_dma_t *dma = dev->dma;
        drm_buf_entry_t *entry;
@@ -1065,6 +1074,9 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
        if (!dma)
                return -EINVAL;
 
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
        count = request->count;
        order = drm_order(request->size);
        size = 1 << order;
@@ -1181,6 +1193,8 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
        }
 
        dma->buf_count += entry->buf_count;
+       dma->seg_count += entry->seg_count;
+       dma->page_count += byte_count >> PAGE_SHIFT;
        dma->byte_count += byte_count;
 
        DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
@@ -1196,6 +1210,8 @@ static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
        atomic_dec(&dev->buf_alloc);
        return 0;
 }
+EXPORT_SYMBOL(drm_addbufs_fb);
+
 
 /**
  * Add buffers for DMA transfers (ioctl).
@@ -1577,5 +1593,6 @@ int drm_order(unsigned long size)
 
        return order;
 }
-
 EXPORT_SYMBOL(drm_order);
+
+
index bd958d69a2ac2aeccb166739c83de1cb674fa4cc..f8425452694957d6c74a2955eac8e3abc8310f03 100644 (file)
@@ -433,7 +433,7 @@ int drm_addctx(struct inode *inode, struct file *filp,
        if (ctx.handle != DRM_KERNEL_CONTEXT) {
                if (dev->driver->context_ctor)
                        if (!dev->driver->context_ctor(dev, ctx.handle)) {
-                               DRM_DEBUG( "Running out of ctxs or memory.\n");
+                               DRM_DEBUG("Running out of ctxs or memory.\n");
                                return -ENOMEM;
                        }
        }
index cc97bb906dda21faa3a75506f6e7fee40cf309de..f4f9db6c7ed4c29f534f10c107a0ec7b22cba5f6 100644 (file)
 
 #define CORE_NAME              "drm"
 #define CORE_DESC              "DRM shared core routines"
-#define CORE_DATE              "20040925"
+#define CORE_DATE              "20051102"
 
 #define DRM_IF_MAJOR   1
 #define DRM_IF_MINOR   2
 
 #define CORE_MAJOR     1
 #define CORE_MINOR     0
-#define CORE_PATCHLEVEL 0
+#define CORE_PATCHLEVEL 1
index 4dff7554eb083e5f4b5a41b19db41ba3014db3e9..c4fa5a29582b515fadbbd4aa5ec6ab1d98089722 100644 (file)
@@ -56,66 +56,66 @@ static int drm_version(struct inode *inode, struct file *filp,
 
 /** Ioctl table */
 static drm_ioctl_desc_t drm_ioctls[] = {
-       [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0, 0},
-       [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0, 0},
-       [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0, 0},
-       [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, 0, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0, 0},
-       [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0, 0},
-       [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0, 0},
-       [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, 0, 1},
-
-       [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, 1, 1},
-
-       [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, 1, 0},
-
-       [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, 1, 0},
-
-       [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, 1, 0},
-       [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, 1, 0},
-
-       [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, 1, 1},
-
-       [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, 1, 0},
-       [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, 1, 0},
-
-       [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, 1, 0},
-
-       [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, 1, 0},
-       [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, 1, 0},
-       [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, 1, 0},
+       [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0},
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0},
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0},
+       [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0},
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0},
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0},
+       [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+
+       [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH},
+
+       [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH},
+
+       [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH},
+
+       [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+
+       [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH},
+
+       [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH},
+
+       [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH},
        /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
+       [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH},
 
-       [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, 1, 1},
+       [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
 
 #if __OS_HAS_AGP
-       [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, 1, 0},
-       [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind, 1, 1},
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
 #endif
 
-       [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, 1, 1},
-       [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, 1, 1},
+       [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
 
-       [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0, 0},
+       [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
 };
 
 #define DRIVER_IOCTL_COUNT     DRM_ARRAY_SIZE( drm_ioctls )
@@ -129,7 +129,7 @@ static drm_ioctl_desc_t drm_ioctls[] = {
  *
  * \sa drm_device
  */
-int drm_takedown(drm_device_t * dev)
+int drm_lastclose(drm_device_t * dev)
 {
        drm_magic_entry_t *pt, *next;
        drm_map_list_t *r_list;
@@ -138,9 +138,9 @@ int drm_takedown(drm_device_t * dev)
 
        DRM_DEBUG("\n");
 
-       if (dev->driver->pretakedown)
-               dev->driver->pretakedown(dev);
-       DRM_DEBUG("driver pretakedown completed\n");
+       if (dev->driver->lastclose)
+               dev->driver->lastclose(dev);
+       DRM_DEBUG("driver lastclose completed\n");
 
        if (dev->unique) {
                drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
@@ -233,7 +233,7 @@ int drm_takedown(drm_device_t * dev)
        }
        up(&dev->struct_sem);
 
-       DRM_DEBUG("takedown completed\n");
+       DRM_DEBUG("lastclose completed\n");
        return 0;
 }
 
@@ -281,7 +281,7 @@ EXPORT_SYMBOL(drm_init);
 /**
  * Called via cleanup_module() at module unload time.
  *
- * Cleans up all DRM device, calling takedown().
+ * Cleans up all DRM device, calling drm_lastclose().
  *
  * \sa drm_init
  */
@@ -294,7 +294,7 @@ static void drm_cleanup(drm_device_t * dev)
                return;
        }
 
-       drm_takedown(dev);
+       drm_lastclose(dev);
 
        if (dev->maplist) {
                drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
@@ -317,8 +317,8 @@ static void drm_cleanup(drm_device_t * dev)
                dev->agp = NULL;
        }
 
-       if (dev->driver->postcleanup)
-               dev->driver->postcleanup(dev);
+       if (dev->driver->unload)
+               dev->driver->unload(dev);
 
        drm_put_head(&dev->primary);
        if (drm_put_dev(dev))
@@ -342,12 +342,12 @@ void drm_exit(struct drm_driver *driver)
                if (head->dev->driver != driver)
                        continue;
                dev = head->dev;
-       }
-       if (dev) {
-               /* release the pci driver */
-               if (dev->pdev)
-                       pci_dev_put(dev->pdev);
-               drm_cleanup(dev);
+               if (dev) {
+                       /* release the pci driver */
+                       if (dev->pdev)
+                               pci_dev_put(dev->pdev);
+                       drm_cleanup(dev);
+               }
        }
        DRM_INFO("Module unloaded\n");
 }
@@ -432,14 +432,17 @@ static int drm_version(struct inode *inode, struct file *filp,
        drm_device_t *dev = priv->head->dev;
        drm_version_t __user *argp = (void __user *)arg;
        drm_version_t version;
-       int ret;
+       int len;
 
        if (copy_from_user(&version, argp, sizeof(version)))
                return -EFAULT;
 
-       /* version is a required function to return the personality module version */
-       if ((ret = dev->driver->version(&version)))
-               return ret;
+       version.version_major = dev->driver->major;
+       version.version_minor = dev->driver->minor;
+       version.version_patchlevel = dev->driver->patchlevel;
+       DRM_COPY(version.name, dev->driver->name);
+       DRM_COPY(version.date, dev->driver->date);
+       DRM_COPY(version.desc, dev->driver->desc);
 
        if (copy_to_user(argp, &version, sizeof(version)))
                return -EFAULT;
@@ -493,8 +496,9 @@ int drm_ioctl(struct inode *inode, struct file *filp,
        if (!func) {
                DRM_DEBUG("no function\n");
                retcode = -EINVAL;
-       } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) ||
-                  (ioctl->auth_needed && !priv->authenticated)) {
+       } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
+                  ((ioctl->flags & DRM_AUTH) && !priv->authenticated) ||
+                  ((ioctl->flags & DRM_MASTER) && !priv->master)) {
                retcode = -EACCES;
        } else {
                retcode = func(inode, filp, cmd, arg);
index bf0a740122bf815cd3fbc9a7ed3710cb98af6848..403f44a1bf016cebe684f8d35719bf3b8d6dbf06 100644 (file)
@@ -35,6 +35,7 @@
  */
 
 #include "drmP.h"
+#include "drm_sarea.h"
 #include <linux/poll.h>
 
 static int drm_open_helper(struct inode *inode, struct file *filp,
@@ -42,15 +43,21 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 
 static int drm_setup(drm_device_t * dev)
 {
+       drm_local_map_t *map;
        int i;
        int ret;
 
-       if (dev->driver->presetup) {
-               ret = dev->driver->presetup(dev);
+       if (dev->driver->firstopen) {
+               ret = dev->driver->firstopen(dev);
                if (ret != 0)
                        return ret;
        }
 
+       /* prebuild the SAREA */
+       i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map);
+       if (i != 0)
+               return i;
+
        atomic_set(&dev->ioctl_count, 0);
        atomic_set(&dev->vma_count, 0);
        dev->buf_use = 0;
@@ -109,8 +116,6 @@ static int drm_setup(drm_device_t * dev)
         * drm_select_queue fails between the time the interrupt is
         * initialized and the time the queues are initialized.
         */
-       if (dev->driver->postsetup)
-               dev->driver->postsetup(dev);
 
        return 0;
 }
@@ -154,9 +159,167 @@ int drm_open(struct inode *inode, struct file *filp)
 
        return retcode;
 }
-
 EXPORT_SYMBOL(drm_open);
 
+/**
+ * File \c open operation.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ *
+ * Puts the dev->fops corresponding to the device minor number into
+ * \p filp, call the \c open method, and restore the file operations.
+ */
+int drm_stub_open(struct inode *inode, struct file *filp)
+{
+       drm_device_t *dev = NULL;
+       int minor = iminor(inode);
+       int err = -ENODEV;
+       struct file_operations *old_fops;
+
+       DRM_DEBUG("\n");
+
+       if (!((minor >= 0) && (minor < drm_cards_limit)))
+               return -ENODEV;
+
+       if (!drm_heads[minor])
+               return -ENODEV;
+
+       if (!(dev = drm_heads[minor]->dev))
+               return -ENODEV;
+
+       old_fops = filp->f_op;
+       filp->f_op = fops_get(&dev->driver->fops);
+       if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
+               fops_put(filp->f_op);
+               filp->f_op = fops_get(old_fops);
+       }
+       fops_put(old_fops);
+
+       return err;
+}
+
+/**
+ * Check whether DRI will run on this CPU.
+ *
+ * \return non-zero if the DRI will run on this CPU, or zero otherwise.
+ */
+static int drm_cpu_valid(void)
+{
+#if defined(__i386__)
+       if (boot_cpu_data.x86 == 3)
+               return 0;       /* No cmpxchg on a 386 */
+#endif
+#if defined(__sparc__) && !defined(__sparc_v9__)
+       return 0;               /* No cmpxchg before v9 sparc. */
+#endif
+       return 1;
+}
+
+/**
+ * Called whenever a process opens /dev/drm.
+ *
+ * \param inode device inode.
+ * \param filp file pointer.
+ * \param dev device.
+ * \return zero on success or a negative number on failure.
+ *
+ * Creates and initializes a drm_file structure for the file private data in \p
+ * filp and add it into the double linked list in \p dev.
+ */
+static int drm_open_helper(struct inode *inode, struct file *filp,
+                          drm_device_t * dev)
+{
+       int minor = iminor(inode);
+       drm_file_t *priv;
+       int ret;
+
+       if (filp->f_flags & O_EXCL)
+               return -EBUSY;  /* No exclusive opens */
+       if (!drm_cpu_valid())
+               return -EINVAL;
+
+       DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
+
+       priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
+       if (!priv)
+               return -ENOMEM;
+
+       memset(priv, 0, sizeof(*priv));
+       filp->private_data = priv;
+       priv->uid = current->euid;
+       priv->pid = current->pid;
+       priv->minor = minor;
+       priv->head = drm_heads[minor];
+       priv->ioctl_count = 0;
+       /* for compatibility root is always authenticated */
+       priv->authenticated = capable(CAP_SYS_ADMIN);
+       priv->lock_count = 0;
+
+       if (dev->driver->open) {
+               ret = dev->driver->open(dev, priv);
+               if (ret < 0)
+                       goto out_free;
+       }
+
+       down(&dev->struct_sem);
+       if (!dev->file_last) {
+               priv->next = NULL;
+               priv->prev = NULL;
+               dev->file_first = priv;
+               dev->file_last = priv;
+               /* first opener automatically becomes master */
+               priv->master = 1;
+       } else {
+               priv->next = NULL;
+               priv->prev = dev->file_last;
+               dev->file_last->next = priv;
+               dev->file_last = priv;
+       }
+       up(&dev->struct_sem);
+
+#ifdef __alpha__
+       /*
+        * Default the hose
+        */
+       if (!dev->hose) {
+               struct pci_dev *pci_dev;
+               pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
+               if (pci_dev) {
+                       dev->hose = pci_dev->sysdata;
+                       pci_dev_put(pci_dev);
+               }
+               if (!dev->hose) {
+                       struct pci_bus *b = pci_bus_b(pci_root_buses.next);
+                       if (b)
+                               dev->hose = b->sysdata;
+               }
+       }
+#endif
+
+       return 0;
+      out_free:
+       drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
+       filp->private_data = NULL;
+       return ret;
+}
+
+/** No-op. */
+int drm_fasync(int fd, struct file *filp, int on)
+{
+       drm_file_t *priv = filp->private_data;
+       drm_device_t *dev = priv->head->dev;
+       int retcode;
+
+       DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
+                 (long)old_encode_dev(priv->head->device));
+       retcode = fasync_helper(fd, filp, on, &dev->buf_async);
+       if (retcode < 0)
+               return retcode;
+       return 0;
+}
+EXPORT_SYMBOL(drm_fasync);
+
 /**
  * Release file.
  *
@@ -167,7 +330,7 @@ EXPORT_SYMBOL(drm_open);
  * If the hardware lock is held then free it, and take it again for the kernel
  * context since it's necessary to reclaim buffers. Unlink the file private
  * data from its list and free it. Decreases the open count and if it reaches
- * zero calls takedown().
+ * zero calls drm_lastclose().
  */
 int drm_release(struct inode *inode, struct file *filp)
 {
@@ -180,8 +343,8 @@ int drm_release(struct inode *inode, struct file *filp)
 
        DRM_DEBUG("open_count = %d\n", dev->open_count);
 
-       if (dev->driver->prerelease)
-               dev->driver->prerelease(dev, filp);
+       if (dev->driver->preclose)
+               dev->driver->preclose(dev, filp);
 
        /* ========================================================
         * Begin inline drm_release
@@ -197,8 +360,8 @@ int drm_release(struct inode *inode, struct file *filp)
                DRM_DEBUG("File %p released, freeing lock for context %d\n",
                          filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
 
-               if (dev->driver->release)
-                       dev->driver->release(dev, filp);
+               if (dev->driver->reclaim_buffers_locked)
+                       dev->driver->reclaim_buffers_locked(dev, filp);
 
                drm_lock_free(dev, &dev->lock.hw_lock->lock,
                              _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
@@ -207,7 +370,7 @@ int drm_release(struct inode *inode, struct file *filp)
                   hardware at this point, possibly
                   processed via a callback to the X
                   server. */
-       } else if (dev->driver->release && priv->lock_count
+       } else if (dev->driver->reclaim_buffers_locked && priv->lock_count
                   && dev->lock.hw_lock) {
                /* The lock is required to reclaim buffers */
                DECLARE_WAITQUEUE(entry, current);
@@ -237,15 +400,14 @@ int drm_release(struct inode *inode, struct file *filp)
                __set_current_state(TASK_RUNNING);
                remove_wait_queue(&dev->lock.lock_queue, &entry);
                if (!retcode) {
-                       if (dev->driver->release)
-                               dev->driver->release(dev, filp);
+                       dev->driver->reclaim_buffers_locked(dev, filp);
                        drm_lock_free(dev, &dev->lock.hw_lock->lock,
                                      DRM_KERNEL_CONTEXT);
                }
        }
 
-       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)
-           && !dev->driver->release) {
+       if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
+           !dev->driver->reclaim_buffers_locked) {
                dev->driver->reclaim_buffers(dev, filp);
        }
 
@@ -292,9 +454,8 @@ int drm_release(struct inode *inode, struct file *filp)
        }
        up(&dev->struct_sem);
 
-       if (dev->driver->free_filp_priv)
-               dev->driver->free_filp_priv(dev, priv);
-
+       if (dev->driver->postclose)
+               dev->driver->postclose(dev, priv);
        drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
 
        /* ========================================================
@@ -313,7 +474,7 @@ int drm_release(struct inode *inode, struct file *filp)
                }
                spin_unlock(&dev->count_lock);
                unlock_kernel();
-               return drm_takedown(dev);
+               return drm_lastclose(dev);
        }
        spin_unlock(&dev->count_lock);
 
@@ -321,129 +482,11 @@ int drm_release(struct inode *inode, struct file *filp)
 
        return retcode;
 }
-
 EXPORT_SYMBOL(drm_release);
 
-/**
- * Called whenever a process opens /dev/drm.
- *
- * \param inode device inode.
- * \param filp file pointer.
- * \param dev device.
- * \return zero on success or a negative number on failure.
- *
- * Creates and initializes a drm_file structure for the file private data in \p
- * filp and add it into the double linked list in \p dev.
- */
-static int drm_open_helper(struct inode *inode, struct file *filp,
-                          drm_device_t * dev)
-{
-       int minor = iminor(inode);
-       drm_file_t *priv;
-       int ret;
-
-       if (filp->f_flags & O_EXCL)
-               return -EBUSY;  /* No exclusive opens */
-       if (!drm_cpu_valid())
-               return -EINVAL;
-
-       DRM_DEBUG("pid = %d, minor = %d\n", current->pid, minor);
-
-       priv = drm_alloc(sizeof(*priv), DRM_MEM_FILES);
-       if (!priv)
-               return -ENOMEM;
-
-       memset(priv, 0, sizeof(*priv));
-       filp->private_data = priv;
-       priv->uid = current->euid;
-       priv->pid = current->pid;
-       priv->minor = minor;
-       priv->head = drm_heads[minor];
-       priv->ioctl_count = 0;
-       priv->authenticated = capable(CAP_SYS_ADMIN);
-       priv->lock_count = 0;
-
-       if (dev->driver->open_helper) {
-               ret = dev->driver->open_helper(dev, priv);
-               if (ret < 0)
-                       goto out_free;
-       }
-
-       down(&dev->struct_sem);
-       if (!dev->file_last) {
-               priv->next = NULL;
-               priv->prev = NULL;
-               dev->file_first = priv;
-               dev->file_last = priv;
-       } else {
-               priv->next = NULL;
-               priv->prev = dev->file_last;
-               dev->file_last->next = priv;
-               dev->file_last = priv;
-       }
-       up(&dev->struct_sem);
-
-#ifdef __alpha__
-       /*
-        * Default the hose
-        */
-       if (!dev->hose) {
-               struct pci_dev *pci_dev;
-               pci_dev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, NULL);
-               if (pci_dev) {
-                       dev->hose = pci_dev->sysdata;
-                       pci_dev_put(pci_dev);
-               }
-               if (!dev->hose) {
-                       struct pci_bus *b = pci_bus_b(pci_root_buses.next);
-                       if (b)
-                               dev->hose = b->sysdata;
-               }
-       }
-#endif
-
-       return 0;
-      out_free:
-       drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
-       filp->private_data = NULL;
-       return ret;
-}
-
-/** No-op. */
-int drm_flush(struct file *filp)
-{
-       drm_file_t *priv = filp->private_data;
-       drm_device_t *dev = priv->head->dev;
-
-       DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
-                 current->pid, (long)old_encode_dev(priv->head->device),
-                 dev->open_count);
-       return 0;
-}
-
-EXPORT_SYMBOL(drm_flush);
-
-/** No-op. */
-int drm_fasync(int fd, struct file *filp, int on)
-{
-       drm_file_t *priv = filp->private_data;
-       drm_device_t *dev = priv->head->dev;
-       int retcode;
-
-       DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
-                 (long)old_encode_dev(priv->head->device));
-       retcode = fasync_helper(fd, filp, on, &dev->buf_async);
-       if (retcode < 0)
-               return retcode;
-       return 0;
-}
-
-EXPORT_SYMBOL(drm_fasync);
-
 /** No-op. */
 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
 {
        return 0;
 }
-
 EXPORT_SYMBOL(drm_poll);
diff --git a/drivers/char/drm/drm_init.c b/drivers/char/drm/drm_init.c
deleted file mode 100644 (file)
index 754b934..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * \file drm_init.c
- * Setup/Cleanup for DRM
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- */
-
-/*
- * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Check whether DRI will run on this CPU.
- *
- * \return non-zero if the DRI will run on this CPU, or zero otherwise.
- */
-int drm_cpu_valid(void)
-{
-#if defined(__i386__)
-       if (boot_cpu_data.x86 == 3)
-               return 0;       /* No cmpxchg on a 386 */
-#endif
-#if defined(__sparc__) && !defined(__sparc_v9__)
-       return 0;               /* No cmpxchg before v9 sparc. */
-#endif
-       return 1;
-}
index 9b0feba6b063e4ba949119f0981a6e600835feaa..bcd4e604d3ecd8f9c1c2467218f363e15fcfd331 100644 (file)
@@ -137,17 +137,22 @@ int drm_setunique(struct inode *inode, struct file *filp,
 
 static int drm_set_busid(drm_device_t * dev)
 {
+       int len;
+
        if (dev->unique != NULL)
                return EBUSY;
 
-       dev->unique_len = 20;
+       dev->unique_len = 40;
        dev->unique = drm_alloc(dev->unique_len + 1, DRM_MEM_DRIVER);
        if (dev->unique == NULL)
                return ENOMEM;
 
-       snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
+       len = snprintf(dev->unique, dev->unique_len, "pci:%04x:%02x:%02x.%d",
                 dev->pci_domain, dev->pci_bus, dev->pci_slot, dev->pci_func);
 
+       if (len > dev->unique_len)
+               DRM_ERROR("Unique buffer overflowed\n");
+
        dev->devname =
            drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
                      2, DRM_MEM_DRIVER);
@@ -239,7 +244,7 @@ int drm_getclient(struct inode *inode, struct file *filp,
 {
        drm_file_t *priv = filp->private_data;
        drm_device_t *dev = priv->head->dev;
-       drm_client_t __user *argp = (void __user *)arg;
+       drm_client_t __user *argp = (drm_client_t __user *)arg;
        drm_client_t client;
        drm_file_t *pt;
        int idx;
@@ -262,7 +267,7 @@ int drm_getclient(struct inode *inode, struct file *filp,
        client.iocs = pt->ioctl_count;
        up(&dev->struct_sem);
 
-       if (copy_to_user((drm_client_t __user *) arg, &client, sizeof(client)))
+       if (copy_to_user(argp, &client, sizeof(client)))
                return -EFAULT;
        return 0;
 }
@@ -325,17 +330,13 @@ int drm_setversion(DRM_IOCTL_ARGS)
        drm_set_version_t retv;
        int if_version;
        drm_set_version_t __user *argp = (void __user *)data;
-       drm_version_t version;
 
        DRM_COPY_FROM_USER_IOCTL(sv, argp, sizeof(sv));
 
-       memset(&version, 0, sizeof(version));
-
-       dev->driver->version(&version);
        retv.drm_di_major = DRM_IF_MAJOR;
        retv.drm_di_minor = DRM_IF_MINOR;
-       retv.drm_dd_major = version.version_major;
-       retv.drm_dd_minor = version.version_minor;
+       retv.drm_dd_major = dev->driver->major;
+       retv.drm_dd_minor = dev->driver->minor;
 
        DRM_COPY_TO_USER_IOCTL(argp, retv, sizeof(sv));
 
@@ -343,7 +344,7 @@ int drm_setversion(DRM_IOCTL_ARGS)
                if (sv.drm_di_major != DRM_IF_MAJOR ||
                    sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
                        return EINVAL;
-               if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_dd_minor);
+               if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
                dev->if_version = DRM_MAX(if_version, dev->if_version);
                if (sv.drm_di_minor >= 1) {
                        /*
@@ -354,9 +355,9 @@ int drm_setversion(DRM_IOCTL_ARGS)
        }
 
        if (sv.drm_dd_major != -1) {
-               if (sv.drm_dd_major != version.version_major ||
+               if (sv.drm_dd_major != dev->driver->major ||
                    sv.drm_dd_minor < 0
-                   || sv.drm_dd_minor > version.version_minor)
+                   || sv.drm_dd_minor > dev->driver->minor)
                        return EINVAL;
 
                if (dev->driver->set_version)
index b48a595d54eca0e9c13b0cb269b88fba6dc49f56..f9e45303498dd66164106d7af210200b7b6c86ba 100644 (file)
@@ -130,7 +130,6 @@ int drm_lock(struct inode *inode, struct file *filp,
        /* dev->driver->kernel_context_switch isn't used by any of the x86
         *  drivers but is used by the Sparc driver.
         */
-
        if (dev->driver->kernel_context_switch &&
            dev->last_context != lock.context) {
                dev->driver->kernel_context_switch(dev, dev->last_context,
index abef2acf99f596d67d34b29031c85cfadcd3afc3..8074771e348fd384598b259c748a67b23c99650b 100644 (file)
@@ -145,30 +145,22 @@ DRM_AGP_MEM *drm_alloc_agp(drm_device_t * dev, int pages, u32 type)
        return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
 }
 
-EXPORT_SYMBOL(drm_alloc_agp);
-
 /** Wrapper around agp_free_memory() */
 int drm_free_agp(DRM_AGP_MEM * handle, int pages)
 {
        return drm_agp_free_memory(handle) ? 0 : -EINVAL;
 }
 
-EXPORT_SYMBOL(drm_free_agp);
-
 /** Wrapper around agp_bind_memory() */
 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
 {
        return drm_agp_bind_memory(handle, start);
 }
 
-EXPORT_SYMBOL(drm_bind_agp);
-
 /** Wrapper around agp_unbind_memory() */
 int drm_unbind_agp(DRM_AGP_MEM * handle)
 {
        return drm_agp_unbind_memory(handle);
 }
-
-EXPORT_SYMBOL(drm_unbind_agp);
 #endif                         /* agp */
 #endif                         /* debug_memory */
index b370aca718d278602217bbbcb58fb5812218c96d..e84605fc54af09e8073cd3a99c399fc658b1cfd4 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * \file drm_memory.h
+ * \file drm_memory_debug.h
  * Memory management wrappers for DRM.
  *
  * \author Rickard E. (Rik) Faith <faith@valinux.com>
@@ -43,42 +43,41 @@ typedef struct drm_mem_stats {
        unsigned long bytes_freed;
 } drm_mem_stats_t;
 
-static DEFINE_SPINLOCK(DRM(mem_lock));
-static unsigned long DRM(ram_available) = 0;   /* In pages */
-static unsigned long DRM(ram_used) = 0;
-static drm_mem_stats_t DRM(mem_stats)[] =
+static spinlock_t drm_mem_lock = SPIN_LOCK_UNLOCKED;
+static unsigned long drm_ram_available = 0;    /* In pages */
+static unsigned long drm_ram_used = 0;
+static drm_mem_stats_t drm_mem_stats[] =
 {
-       [DRM_MEM_DMA] = {
-       "dmabufs"},[DRM_MEM_SAREA] = {
-       "sareas"},[DRM_MEM_DRIVER] = {
-       "driver"},[DRM_MEM_MAGIC] = {
-       "magic"},[DRM_MEM_IOCTLS] = {
-       "ioctltab"},[DRM_MEM_MAPS] = {
-       "maplist"},[DRM_MEM_VMAS] = {
-       "vmalist"},[DRM_MEM_BUFS] = {
-       "buflist"},[DRM_MEM_SEGS] = {
-       "seglist"},[DRM_MEM_PAGES] = {
-       "pagelist"},[DRM_MEM_FILES] = {
-       "files"},[DRM_MEM_QUEUES] = {
-       "queues"},[DRM_MEM_CMDS] = {
-       "commands"},[DRM_MEM_MAPPINGS] = {
-       "mappings"},[DRM_MEM_BUFLISTS] = {
-       "buflists"},[DRM_MEM_AGPLISTS] = {
-       "agplist"},[DRM_MEM_SGLISTS] = {
-       "sglist"},[DRM_MEM_TOTALAGP] = {
-       "totalagp"},[DRM_MEM_BOUNDAGP] = {
-       "boundagp"},[DRM_MEM_CTXBITMAP] = {
-       "ctxbitmap"},[DRM_MEM_CTXLIST] = {
-       "ctxlist"},[DRM_MEM_STUB] = {
-       "stub"}, {
-       NULL, 0,}               /* Last entry must be null */
+       [DRM_MEM_DMA] = {"dmabufs"},
+       [DRM_MEM_SAREA] = {"sareas"},
+       [DRM_MEM_DRIVER] = {"driver"},
+       [DRM_MEM_MAGIC] = {"magic"},
+       [DRM_MEM_IOCTLS] = {"ioctltab"},
+       [DRM_MEM_MAPS] = {"maplist"},
+       [DRM_MEM_VMAS] = {"vmalist"},
+       [DRM_MEM_BUFS] = {"buflist"},
+       [DRM_MEM_SEGS] = {"seglist"},
+       [DRM_MEM_PAGES] = {"pagelist"},
+       [DRM_MEM_FILES] = {"files"},
+       [DRM_MEM_QUEUES] = {"queues"},
+       [DRM_MEM_CMDS] = {"commands"},
+       [DRM_MEM_MAPPINGS] = {"mappings"},
+       [DRM_MEM_BUFLISTS] = {"buflists"},
+       [DRM_MEM_AGPLISTS] = {"agplist"},
+       [DRM_MEM_SGLISTS] = {"sglist"},
+       [DRM_MEM_TOTALAGP] = {"totalagp"},
+       [DRM_MEM_BOUNDAGP] = {"boundagp"},
+       [DRM_MEM_CTXBITMAP] = {"ctxbitmap"},
+       [DRM_MEM_CTXLIST] = {"ctxlist"},
+       [DRM_MEM_STUB] = {"stub"},
+       {NULL, 0,}              /* Last entry must be null */
 };
 
-void DRM(mem_init) (void) {
+void drm_mem_init (void) {
        drm_mem_stats_t *mem;
        struct sysinfo si;
 
-       for (mem = DRM(mem_stats); mem->name; ++mem) {
+       for (mem = drm_mem_stats; mem->name; ++mem) {
                mem->succeed_count = 0;
                mem->free_count = 0;
                mem->fail_count = 0;
@@ -87,13 +86,13 @@ void DRM(mem_init) (void) {
        }
 
        si_meminfo(&si);
-       DRM(ram_available) = si.totalram;
-       DRM(ram_used) = 0;
+       drm_ram_available = si.totalram;
+       drm_ram_used = 0;
 }
 
 /* drm_mem_info is called whenever a process reads /dev/drm/mem. */
 
-static int DRM(_mem_info) (char *buf, char **start, off_t offset,
+static int drm__mem_info (char *buf, char **start, off_t offset,
                           int request, int *eof, void *data) {
        drm_mem_stats_t *pt;
        int len = 0;
@@ -112,11 +111,11 @@ static int DRM(_mem_info) (char *buf, char **start, off_t offset,
                       " | allocs      bytes\n\n");
        DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
                       "system", 0, 0, 0,
-                      DRM(ram_available) << (PAGE_SHIFT - 10));
+                      drm_ram_available << (PAGE_SHIFT - 10));
        DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu kB         |\n",
-                      "locked", 0, 0, 0, DRM(ram_used) >> 10);
+                      "locked", 0, 0, 0, drm_ram_used >> 10);
        DRM_PROC_PRINT("\n");
-       for (pt = DRM(mem_stats); pt->name; pt++) {
+       for (pt = drm_mem_stats; pt->name; pt++) {
                DRM_PROC_PRINT("%-9.9s %5d %5d %4d %10lu %10lu | %6d %10ld\n",
                               pt->name,
                               pt->succeed_count,
@@ -135,17 +134,17 @@ static int DRM(_mem_info) (char *buf, char **start, off_t offset,
        return len - offset;
 }
 
-int DRM(mem_info) (char *buf, char **start, off_t offset,
+int drm_mem_info (char *buf, char **start, off_t offset,
                   int len, int *eof, void *data) {
        int ret;
 
-       spin_lock(&DRM(mem_lock));
-       ret = DRM(_mem_info) (buf, start, offset, len, eof, data);
-       spin_unlock(&DRM(mem_lock));
+       spin_lock(&drm_mem_lock);
+       ret = drm__mem_info (buf, start, offset, len, eof, data);
+       spin_unlock(&drm_mem_lock);
        return ret;
 }
 
-void *DRM(alloc) (size_t size, int area) {
+void *drm_alloc (size_t size, int area) {
        void *pt;
 
        if (!size) {
@@ -154,41 +153,41 @@ void *DRM(alloc) (size_t size, int area) {
        }
 
        if (!(pt = kmalloc(size, GFP_KERNEL))) {
-               spin_lock(&DRM(mem_lock));
-               ++DRM(mem_stats)[area].fail_count;
-               spin_unlock(&DRM(mem_lock));
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[area].fail_count;
+               spin_unlock(&drm_mem_lock);
                return NULL;
        }
-       spin_lock(&DRM(mem_lock));
-       ++DRM(mem_stats)[area].succeed_count;
-       DRM(mem_stats)[area].bytes_allocated += size;
-       spin_unlock(&DRM(mem_lock));
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[area].succeed_count;
+       drm_mem_stats[area].bytes_allocated += size;
+       spin_unlock(&drm_mem_lock);
        return pt;
 }
 
-void *DRM(calloc) (size_t nmemb, size_t size, int area) {
+void *drm_calloc (size_t nmemb, size_t size, int area) {
        void *addr;
 
-       addr = DRM(alloc) (nmemb * size, area);
+       addr = drm_alloc (nmemb * size, area);
        if (addr != NULL)
                memset((void *)addr, 0, size * nmemb);
 
        return addr;
 }
 
-void *DRM(realloc) (void *oldpt, size_t oldsize, size_t size, int area) {
+void *drm_realloc (void *oldpt, size_t oldsize, size_t size, int area) {
        void *pt;
 
-       if (!(pt = DRM(alloc) (size, area)))
+       if (!(pt = drm_alloc (size, area)))
                return NULL;
        if (oldpt && oldsize) {
                memcpy(pt, oldpt, oldsize);
-               DRM(free) (oldpt, oldsize, area);
+               drm_free (oldpt, oldsize, area);
        }
        return pt;
 }
 
-void DRM(free) (void *pt, size_t size, int area) {
+void drm_free (void *pt, size_t size, int area) {
        int alloc_count;
        int free_count;
 
@@ -196,43 +195,43 @@ void DRM(free) (void *pt, size_t size, int area) {
                DRM_MEM_ERROR(area, "Attempt to free NULL pointer\n");
        else
                kfree(pt);
-       spin_lock(&DRM(mem_lock));
-       DRM(mem_stats)[area].bytes_freed += size;
-       free_count = ++DRM(mem_stats)[area].free_count;
-       alloc_count = DRM(mem_stats)[area].succeed_count;
-       spin_unlock(&DRM(mem_lock));
+       spin_lock(&drm_mem_lock);
+       drm_mem_stats[area].bytes_freed += size;
+       free_count = ++drm_mem_stats[area].free_count;
+       alloc_count = drm_mem_stats[area].succeed_count;
+       spin_unlock(&drm_mem_lock);
        if (free_count > alloc_count) {
                DRM_MEM_ERROR(area, "Excess frees: %d frees, %d allocs\n",
                              free_count, alloc_count);
        }
 }
 
-unsigned long DRM(alloc_pages) (int order, int area) {
+unsigned long drm_alloc_pages (int order, int area) {
        unsigned long address;
        unsigned long bytes = PAGE_SIZE << order;
        unsigned long addr;
        unsigned int sz;
 
-       spin_lock(&DRM(mem_lock));
-       if ((DRM(ram_used) >> PAGE_SHIFT)
-           > (DRM_RAM_PERCENT * DRM(ram_available)) / 100) {
-               spin_unlock(&DRM(mem_lock));
+       spin_lock(&drm_mem_lock);
+       if ((drm_ram_used >> PAGE_SHIFT)
+           > (DRM_RAM_PERCENT * drm_ram_available) / 100) {
+               spin_unlock(&drm_mem_lock);
                return 0;
        }
-       spin_unlock(&DRM(mem_lock));
+       spin_unlock(&drm_mem_lock);
 
        address = __get_free_pages(GFP_KERNEL|__GFP_COMP, order);
        if (!address) {
-               spin_lock(&DRM(mem_lock));
-               ++DRM(mem_stats)[area].fail_count;
-               spin_unlock(&DRM(mem_lock));
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[area].fail_count;
+               spin_unlock(&drm_mem_lock);
                return 0;
        }
-       spin_lock(&DRM(mem_lock));
-       ++DRM(mem_stats)[area].succeed_count;
-       DRM(mem_stats)[area].bytes_allocated += bytes;
-       DRM(ram_used) += bytes;
-       spin_unlock(&DRM(mem_lock));
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[area].succeed_count;
+       drm_mem_stats[area].bytes_allocated += bytes;
+       drm_ram_used += bytes;
+       spin_unlock(&drm_mem_lock);
 
        /* Zero outside the lock */
        memset((void *)address, 0, bytes);
@@ -246,7 +245,7 @@ unsigned long DRM(alloc_pages) (int order, int area) {
        return address;
 }
 
-void DRM(free_pages) (unsigned long address, int order, int area) {
+void drm_free_pages (unsigned long address, int order, int area) {
        unsigned long bytes = PAGE_SIZE << order;
        int alloc_count;
        int free_count;
@@ -264,12 +263,12 @@ void DRM(free_pages) (unsigned long address, int order, int area) {
                free_pages(address, order);
        }
 
-       spin_lock(&DRM(mem_lock));
-       free_count = ++DRM(mem_stats)[area].free_count;
-       alloc_count = DRM(mem_stats)[area].succeed_count;
-       DRM(mem_stats)[area].bytes_freed += bytes;
-       DRM(ram_used) -= bytes;
-       spin_unlock(&DRM(mem_lock));
+       spin_lock(&drm_mem_lock);
+       free_count = ++drm_mem_stats[area].free_count;
+       alloc_count = drm_mem_stats[area].succeed_count;
+       drm_mem_stats[area].bytes_freed += bytes;
+       drm_ram_used -= bytes;
+       spin_unlock(&drm_mem_lock);
        if (free_count > alloc_count) {
                DRM_MEM_ERROR(area,
                              "Excess frees: %d frees, %d allocs\n",
@@ -277,7 +276,7 @@ void DRM(free_pages) (unsigned long address, int order, int area) {
        }
 }
 
-void *DRM(ioremap) (unsigned long offset, unsigned long size,
+void *drm_ioremap (unsigned long offset, unsigned long size,
                    drm_device_t * dev) {
        void *pt;
 
@@ -288,19 +287,19 @@ void *DRM(ioremap) (unsigned long offset, unsigned long size,
        }
 
        if (!(pt = drm_ioremap(offset, size, dev))) {
-               spin_lock(&DRM(mem_lock));
-               ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
-               spin_unlock(&DRM(mem_lock));
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
+               spin_unlock(&drm_mem_lock);
                return NULL;
        }
-       spin_lock(&DRM(mem_lock));
-       ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
-       DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
-       spin_unlock(&DRM(mem_lock));
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+       drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
+       spin_unlock(&drm_mem_lock);
        return pt;
 }
 
-void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size,
+void *drm_ioremap_nocache (unsigned long offset, unsigned long size,
                            drm_device_t * dev) {
        void *pt;
 
@@ -311,19 +310,19 @@ void *DRM(ioremap_nocache) (unsigned long offset, unsigned long size,
        }
 
        if (!(pt = drm_ioremap_nocache(offset, size, dev))) {
-               spin_lock(&DRM(mem_lock));
-               ++DRM(mem_stats)[DRM_MEM_MAPPINGS].fail_count;
-               spin_unlock(&DRM(mem_lock));
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count;
+               spin_unlock(&drm_mem_lock);
                return NULL;
        }
-       spin_lock(&DRM(mem_lock));
-       ++DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
-       DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_allocated += size;
-       spin_unlock(&DRM(mem_lock));
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+       drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size;
+       spin_unlock(&drm_mem_lock);
        return pt;
 }
 
-void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
+void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) {
        int alloc_count;
        int free_count;
 
@@ -333,11 +332,11 @@ void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
        else
                drm_ioremapfree(pt, size, dev);
 
-       spin_lock(&DRM(mem_lock));
-       DRM(mem_stats)[DRM_MEM_MAPPINGS].bytes_freed += size;
-       free_count = ++DRM(mem_stats)[DRM_MEM_MAPPINGS].free_count;
-       alloc_count = DRM(mem_stats)[DRM_MEM_MAPPINGS].succeed_count;
-       spin_unlock(&DRM(mem_lock));
+       spin_lock(&drm_mem_lock);
+       drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size;
+       free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count;
+       alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count;
+       spin_unlock(&drm_mem_lock);
        if (free_count > alloc_count) {
                DRM_MEM_ERROR(DRM_MEM_MAPPINGS,
                              "Excess frees: %d frees, %d allocs\n",
@@ -347,7 +346,7 @@ void DRM(ioremapfree) (void *pt, unsigned long size, drm_device_t * dev) {
 
 #if __OS_HAS_AGP
 
-DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) {
+DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
        DRM_AGP_MEM *handle;
 
        if (!pages) {
@@ -355,21 +354,21 @@ DRM_AGP_MEM *DRM(alloc_agp) (int pages, u32 type) {
                return NULL;
        }
 
-       if ((handle = DRM(agp_allocate_memory) (pages, type))) {
-               spin_lock(&DRM(mem_lock));
-               ++DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
-               DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_allocated
+       if ((handle = drm_agp_allocate_memory (pages, type))) {
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+               drm_mem_stats[DRM_MEM_TOTALAGP].bytes_allocated
                    += pages << PAGE_SHIFT;
-               spin_unlock(&DRM(mem_lock));
+               spin_unlock(&drm_mem_lock);
                return handle;
        }
-       spin_lock(&DRM(mem_lock));
-       ++DRM(mem_stats)[DRM_MEM_TOTALAGP].fail_count;
-       spin_unlock(&DRM(mem_lock));
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[DRM_MEM_TOTALAGP].fail_count;
+       spin_unlock(&drm_mem_lock);
        return NULL;
 }
 
-int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
+int drm_free_agp (DRM_AGP_MEM * handle, int pages) {
        int alloc_count;
        int free_count;
        int retval = -EINVAL;
@@ -380,13 +379,13 @@ int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
                return retval;
        }
 
-       if (DRM(agp_free_memory) (handle)) {
-               spin_lock(&DRM(mem_lock));
-               free_count = ++DRM(mem_stats)[DRM_MEM_TOTALAGP].free_count;
-               alloc_count = DRM(mem_stats)[DRM_MEM_TOTALAGP].succeed_count;
-               DRM(mem_stats)[DRM_MEM_TOTALAGP].bytes_freed
+       if (drm_agp_free_memory (handle)) {
+               spin_lock(&drm_mem_lock);
+               free_count = ++drm_mem_stats[DRM_MEM_TOTALAGP].free_count;
+               alloc_count = drm_mem_stats[DRM_MEM_TOTALAGP].succeed_count;
+               drm_mem_stats[DRM_MEM_TOTALAGP].bytes_freed
                    += pages << PAGE_SHIFT;
-               spin_unlock(&DRM(mem_lock));
+               spin_unlock(&drm_mem_lock);
                if (free_count > alloc_count) {
                        DRM_MEM_ERROR(DRM_MEM_TOTALAGP,
                                      "Excess frees: %d frees, %d allocs\n",
@@ -397,7 +396,7 @@ int DRM(free_agp) (DRM_AGP_MEM * handle, int pages) {
        return retval;
 }
 
-int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) {
+int drm_bind_agp (DRM_AGP_MEM * handle, unsigned int start) {
        int retcode = -EINVAL;
 
        if (!handle) {
@@ -406,21 +405,21 @@ int DRM(bind_agp) (DRM_AGP_MEM * handle, unsigned int start) {
                return retcode;
        }
 
-       if (!(retcode = DRM(agp_bind_memory) (handle, start))) {
-               spin_lock(&DRM(mem_lock));
-               ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
-               DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_allocated
+       if (!(retcode = drm_agp_bind_memory (handle, start))) {
+               spin_lock(&drm_mem_lock);
+               ++drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+               drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_allocated
                    += handle->page_count << PAGE_SHIFT;
-               spin_unlock(&DRM(mem_lock));
+               spin_unlock(&drm_mem_lock);
                return retcode;
        }
-       spin_lock(&DRM(mem_lock));
-       ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].fail_count;
-       spin_unlock(&DRM(mem_lock));
+       spin_lock(&drm_mem_lock);
+       ++drm_mem_stats[DRM_MEM_BOUNDAGP].fail_count;
+       spin_unlock(&drm_mem_lock);
        return retcode;
 }
 
-int DRM(unbind_agp) (DRM_AGP_MEM * handle) {
+int drm_unbind_agp (DRM_AGP_MEM * handle) {
        int alloc_count;
        int free_count;
        int retcode = -EINVAL;
@@ -431,14 +430,14 @@ int DRM(unbind_agp) (DRM_AGP_MEM * handle) {
                return retcode;
        }
 
-       if ((retcode = DRM(agp_unbind_memory) (handle)))
+       if ((retcode = drm_agp_unbind_memory (handle)))
                return retcode;
-       spin_lock(&DRM(mem_lock));
-       free_count = ++DRM(mem_stats)[DRM_MEM_BOUNDAGP].free_count;
-       alloc_count = DRM(mem_stats)[DRM_MEM_BOUNDAGP].succeed_count;
-       DRM(mem_stats)[DRM_MEM_BOUNDAGP].bytes_freed
+       spin_lock(&drm_mem_lock);
+       free_count = ++drm_mem_stats[DRM_MEM_BOUNDAGP].free_count;
+       alloc_count = drm_mem_stats[DRM_MEM_BOUNDAGP].succeed_count;
+       drm_mem_stats[DRM_MEM_BOUNDAGP].bytes_freed
            += handle->page_count << PAGE_SHIFT;
-       spin_unlock(&DRM(mem_lock));
+       spin_unlock(&drm_mem_lock);
        if (free_count > alloc_count) {
                DRM_MEM_ERROR(DRM_MEM_BOUNDAGP,
                              "Excess frees: %d frees, %d allocs\n",
index d51aeb4966f48fcc23c24ef4dd6eb42a03a40e52..695115d703822e704a447fcfeb64ea66f6ac713f 100644 (file)
@@ -13,6 +13,7 @@
 #define DRM_ERR(d)                     -(d)
 /** Current process ID */
 #define DRM_CURRENTPID                 current->pid
+#define DRM_SUSER(p)                   capable(CAP_SYS_ADMIN)
 #define DRM_UDELAY(d)                  udelay(d)
 /** Read a byte from a MMIO region */
 #define DRM_READ8(map, offset)         readb(((void __iomem *)(map)->handle) + (offset))
index d66dc55e29a07a428afa1b49078cf8d3a14665fa..5b1d3a04458d06bd5af8c81e0c8ef105a30426e1 100644 (file)
@@ -46,6 +46,7 @@
        {0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
        {0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
        {0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
+       {0x1002, 0x4E56, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
        {0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
        {0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
        {0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
@@ -69,6 +70,7 @@
        {0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
        {0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
        {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+       {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
        {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
        {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
        {0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
        {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \
        {0x1002, 0x596A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
        {0x1002, 0x596B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+       {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
        {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \
        {0x1002, 0x5c62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
        {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|CHIP_IS_MOBILITY}, \
        {0x1002, 0x5c64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \
+       {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+       {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
        {0, 0, 0}
 
 #define r128_PCI_IDS \
 
 #define viadrv_PCI_IDS \
        {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-       {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \
        {0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
        {0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
        {0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
        {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
        {0, 0, 0}
 
+#define gamma_PCI_IDS \
+       {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+       {0, 0, 0}
+
 #define savage_PCI_IDS \
        {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
        {0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
        {0x8086, 0x2592, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
        {0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
        {0, 0, 0}
+
index 3f452f763f0fa27d00b7119e170f7b0501534e8f..6f943e3309ef911e7e4d6e3f968d754697fbaf42 100644 (file)
@@ -61,16 +61,14 @@ static struct drm_proc_list {
        const char *name;       /**< file name */
        int (*f) (char *, char **, off_t, int, int *, void *);          /**< proc callback*/
 } drm_proc_list[] = {
-       {
-       "name", drm_name_info}, {
-       "mem", drm_mem_info}, {
-       "vm", drm_vm_info}, {
-       "clients", drm_clients_info}, {
-       "queues", drm_queues_info}, {
-       "bufs", drm_bufs_info},
+       {"name", drm_name_info},
+       {"mem", drm_mem_info},
+       {"vm", drm_vm_info},
+       {"clients", drm_clients_info},
+       {"queues", drm_queues_info},
+       {"bufs", drm_bufs_info},
 #if DRM_DEBUG_CODE
-       {
-       "vma", drm_vma_info},
+       {"vma", drm_vma_info},
 #endif
 };
 
index 60b6f8e8bf693ca441b7da046068a5d517522661..42d766359caa919d6600f7691dc870ef2f8d7ed4 100644 (file)
@@ -93,8 +93,8 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
 
        dev->driver = driver;
 
-       if (dev->driver->preinit)
-               if ((retcode = dev->driver->preinit(dev, ent->driver_data)))
+       if (dev->driver->load)
+               if ((retcode = dev->driver->load(dev, ent->driver_data)))
                        goto error_out_unreg;
 
        if (drm_core_has_AGP(dev)) {
@@ -124,47 +124,10 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
        return 0;
 
       error_out_unreg:
-       drm_takedown(dev);
+       drm_lastclose(dev);
        return retcode;
 }
 
-/**
- * File \c open operation.
- *
- * \param inode device inode.
- * \param filp file pointer.
- *
- * Puts the dev->fops corresponding to the device minor number into
- * \p filp, call the \c open method, and restore the file operations.
- */
-int drm_stub_open(struct inode *inode, struct file *filp)
-{
-       drm_device_t *dev = NULL;
-       int minor = iminor(inode);
-       int err = -ENODEV;
-       struct file_operations *old_fops;
-
-       DRM_DEBUG("\n");
-
-       if (!((minor >= 0) && (minor < drm_cards_limit)))
-               return -ENODEV;
-
-       if (!drm_heads[minor])
-               return -ENODEV;
-
-       if (!(dev = drm_heads[minor]->dev))
-               return -ENODEV;
-
-       old_fops = filp->f_op;
-       filp->f_op = fops_get(&dev->driver->fops);
-       if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) {
-               fops_put(filp->f_op);
-               filp->f_op = fops_get(old_fops);
-       }
-       fops_put(old_fops);
-
-       return err;
-}
 
 /**
  * Get a secondary minor number.
@@ -200,11 +163,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head)
                                goto err_g1;
                        }
 
-                       head->dev_class = drm_sysfs_device_add(drm_class,
-                                                              MKDEV(DRM_MAJOR,
-                                                                    minor),
-                                                              &dev->pdev->dev,
-                                                              "card%d", minor);
+                       head->dev_class = drm_sysfs_device_add(drm_class, head);
                        if (IS_ERR(head->dev_class)) {
                                printk(KERN_ERR
                                       "DRM: Error sysfs_device_add.\n");
@@ -258,11 +217,10 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
        }
        if ((ret = drm_get_head(dev, &dev->primary)))
                goto err_g1;
-
-       /* postinit is a required function to display the signon banner */
-       /* drivers add secondary heads here if needed */
-       if ((ret = dev->driver->postinit(dev, ent->driver_data)))
-               goto err_g1;
+       
+       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+                driver->name, driver->major, driver->minor, driver->patchlevel,
+                driver->date, dev->primary.minor);
 
        return 0;
 
@@ -318,10 +276,9 @@ int drm_put_head(drm_head_t * head)
        DRM_DEBUG("release secondary minor %d\n", minor);
 
        drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
-       drm_sysfs_device_remove(MKDEV(DRM_MAJOR, head->minor));
+       drm_sysfs_device_remove(head->dev_class);
 
-       *head = (drm_head_t) {
-       .dev = NULL};
+       *head = (drm_head_t) {.dev = NULL};
 
        drm_heads[minor] = NULL;
 
index 6d3449761914cc3099298d3456c86906ec8cddb3..68e43ddc16aea44fd927cbc9ae6eb532f7d7eb2a 100644 (file)
@@ -15,8 +15,6 @@
 #include <linux/device.h>
 #include <linux/kdev_t.h>
 #include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/string.h>
 
 #include "drm_core.h"
 #include "drmP.h"
@@ -28,15 +26,11 @@ struct drm_sysfs_class {
 #define to_drm_sysfs_class(d) container_of(d, struct drm_sysfs_class, class)
 
 struct simple_dev {
-       struct list_head node;
        dev_t dev;
        struct class_device class_dev;
 };
 #define to_simple_dev(d) container_of(d, struct simple_dev, class_dev)
 
-static LIST_HEAD(simple_dev_list);
-static DEFINE_SPINLOCK(simple_dev_list_lock);
-
 static void release_simple_dev(struct class_device *class_dev)
 {
        struct simple_dev *s_dev = to_simple_dev(class_dev);
@@ -124,6 +118,18 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs)
        class_unregister(&cs->class);
 }
 
+static ssize_t show_dri(struct class_device *class_device, char *buf)
+{
+       drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev;
+       if (dev->driver->dri_library_name)
+               return dev->driver->dri_library_name(dev, buf);
+       return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name);
+}
+
+static struct class_device_attribute class_device_attrs[] = {
+       __ATTR(dri_library_name, S_IRUGO, show_dri, NULL),
+};
+
 /**
  * drm_sysfs_device_add - adds a class device to sysfs for a character driver
  * @cs: pointer to the struct drm_sysfs_class that this device should be registered to.
@@ -138,13 +144,11 @@ void drm_sysfs_destroy(struct drm_sysfs_class *cs)
  * Note: the struct drm_sysfs_class passed to this function must have previously been
  * created with a call to drm_sysfs_create().
  */
-struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
-                                         struct device *device,
-                                         const char *fmt, ...)
+struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
+                                         drm_head_t *head)
 {
-       va_list args;
        struct simple_dev *s_dev = NULL;
-       int retval;
+       int i, retval;
 
        if ((cs == NULL) || (IS_ERR(cs))) {
                retval = -ENODEV;
@@ -158,26 +162,23 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
        }
        memset(s_dev, 0x00, sizeof(*s_dev));
 
-       s_dev->dev = dev;
-       s_dev->class_dev.dev = device;
+       s_dev->dev = MKDEV(DRM_MAJOR, head->minor);
+       s_dev->class_dev.dev = &(head->dev->pdev)->dev;
        s_dev->class_dev.class = &cs->class;
 
-       va_start(args, fmt);
-       vsnprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, fmt, args);
-       va_end(args);
+       snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor);
        retval = class_device_register(&s_dev->class_dev);
        if (retval)
                goto error;
 
        class_device_create_file(&s_dev->class_dev, &cs->attr);
+       class_set_devdata(&s_dev->class_dev, head);
 
-       spin_lock(&simple_dev_list_lock);
-       list_add(&s_dev->node, &simple_dev_list);
-       spin_unlock(&simple_dev_list_lock);
-
+       for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
+               class_device_create_file(&s_dev->class_dev, &class_device_attrs[i]);
        return &s_dev->class_dev;
 
-      error:
+error:
        kfree(s_dev);
        return ERR_PTR(retval);
 }
@@ -189,23 +190,12 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, dev_t dev,
  * This call unregisters and cleans up a class device that was created with a
  * call to drm_sysfs_device_add()
  */
-void drm_sysfs_device_remove(dev_t dev)
+void drm_sysfs_device_remove(struct class_device *class_dev)
 {
-       struct simple_dev *s_dev = NULL;
-       int found = 0;
-
-       spin_lock(&simple_dev_list_lock);
-       list_for_each_entry(s_dev, &simple_dev_list, node) {
-               if (s_dev->dev == dev) {
-                       found = 1;
-                       break;
-               }
-       }
-       if (found) {
-               list_del(&s_dev->node);
-               spin_unlock(&simple_dev_list_lock);
-               class_device_unregister(&s_dev->class_dev);
-       } else {
-               spin_unlock(&simple_dev_list_lock);
-       }
+       struct simple_dev *s_dev = to_simple_dev(class_dev);
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
+               class_device_remove_file(&s_dev->class_dev, &class_device_attrs[i]);
+       class_device_unregister(&s_dev->class_dev);
 }
index dba502373da10e31d608c2db51b7663348589331..cc1b89086876b3f289301685ebb0aefbcd2146a5 100644 (file)
@@ -114,7 +114,6 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
 
 static struct file_operations i810_buffer_fops = {
        .open = drm_open,
-       .flush = drm_flush,
        .release = drm_release,
        .ioctl = drm_ioctl,
        .mmap = i810_mmap_buffers,
@@ -1319,12 +1318,24 @@ static int i810_flip_bufs(struct inode *inode, struct file *filp,
        return 0;
 }
 
-void i810_driver_pretakedown(drm_device_t * dev)
+int i810_driver_load(drm_device_t *dev, unsigned long flags)
+{
+       /* i810 has 4 more counters */
+       dev->counters += 4;
+       dev->types[6] = _DRM_STAT_IRQ;
+       dev->types[7] = _DRM_STAT_PRIMARY;
+       dev->types[8] = _DRM_STAT_SECONDARY;
+       dev->types[9] = _DRM_STAT_DMA;
+
+       return 0;
+}
+
+void i810_driver_lastclose(drm_device_t * dev)
 {
        i810_dma_cleanup(dev);
 }
 
-void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp)
+void i810_driver_preclose(drm_device_t * dev, DRMFILE filp)
 {
        if (dev->dev_private) {
                drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1334,7 +1345,7 @@ void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp)
        }
 }
 
-void i810_driver_release(drm_device_t * dev, struct file *filp)
+void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
 {
        i810_reclaim_buffers(dev, filp);
 }
@@ -1346,21 +1357,21 @@ int i810_driver_dma_quiescent(drm_device_t * dev)
 }
 
 drm_ioctl_desc_t i810_ioctls[] = {
-       [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, 1, 1},
-       [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, 1, 0},
-       [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, 1, 0},
-       [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, 1, 0},
-       [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, 1, 0},
-       [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, 1, 0},
-       [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, 1, 0},
-       [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, 1, 0},
-       [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, 1, 0},
-       [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, 1, 0},
-       [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, 1, 0},
-       [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, 1, 0},
-       [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, 1, 1},
-       [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, 1, 0},
-       [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, 1, 0}
+       [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH}
 };
 
 int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
index 070cef6c2b46c2849e9da709973e2b3c6fba89db..dfe6ad2b6a6e22ef23bf20dfe4c41064ae4a42e6 100644 (file)
 
 #include "drm_pciids.h"
 
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
-       /* i810 has 4 more counters */
-       dev->counters += 4;
-       dev->types[6] = _DRM_STAT_IRQ;
-       dev->types[7] = _DRM_STAT_PRIMARY;
-       dev->types[8] = _DRM_STAT_SECONDARY;
-       dev->types[9] = _DRM_STAT_DMA;
-
-       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
-                DRIVER_NAME,
-                DRIVER_MAJOR,
-                DRIVER_MINOR,
-                DRIVER_PATCHLEVEL,
-                DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
-           );
-       return 0;
-}
-
-static int version(drm_version_t * version)
-{
-       int len;
-
-       version->version_major = DRIVER_MAJOR;
-       version->version_minor = DRIVER_MINOR;
-       version->version_patchlevel = DRIVER_PATCHLEVEL;
-       DRM_COPY(version->name, DRIVER_NAME);
-       DRM_COPY(version->date, DRIVER_DATE);
-       DRM_COPY(version->desc, DRIVER_DESC);
-       return 0;
-}
-
 static struct pci_device_id pciidlist[] = {
        i810_PCI_IDS
 };
@@ -79,16 +47,14 @@ static struct drm_driver driver = {
            DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
            DRIVER_HAVE_DMA | DRIVER_DMA_QUEUE,
        .dev_priv_size = sizeof(drm_i810_buf_priv_t),
-       .pretakedown = i810_driver_pretakedown,
-       .prerelease = i810_driver_prerelease,
+       .load = i810_driver_load,
+       .lastclose = i810_driver_lastclose,
+       .preclose = i810_driver_preclose,
        .device_is_agp = i810_driver_device_is_agp,
-       .release = i810_driver_release,
+       .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
        .dma_quiescent = i810_driver_dma_quiescent,
-       .reclaim_buffers = i810_reclaim_buffers,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
-       .postinit = postinit,
-       .version = version,
        .ioctls = i810_ioctls,
        .fops = {
                 .owner = THIS_MODULE,
@@ -98,13 +64,19 @@ static struct drm_driver driver = {
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
-                }
-       ,
+       },
+
        .pci_driver = {
-                      .name = DRIVER_NAME,
-                      .id_table = pciidlist,
-                      }
-       ,
+                .name = DRIVER_NAME,
+                .id_table = pciidlist,
+       },
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
 };
 
 static int __init i810_init(void)
index c78f36aaa2f06c3da16fee4d91a994f5a51d4ed6..a18b80d91920b073fc46f787160ed1fa889bd7c9 100644 (file)
@@ -116,9 +116,13 @@ typedef struct drm_i810_private {
 extern void i810_reclaim_buffers(drm_device_t * dev, struct file *filp);
 
 extern int i810_driver_dma_quiescent(drm_device_t * dev);
-extern void i810_driver_release(drm_device_t * dev, struct file *filp);
-extern void i810_driver_pretakedown(drm_device_t * dev);
-extern void i810_driver_prerelease(drm_device_t * dev, DRMFILE filp);
+extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
+                                              struct file *filp);
+extern int i810_driver_load(struct drm_device *, unsigned long flags);
+extern void i810_driver_lastclose(drm_device_t * dev);
+extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp);
+extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
+                                              struct file *filp);
 extern int i810_driver_device_is_agp(drm_device_t * dev);
 
 extern drm_ioctl_desc_t i810_ioctls[];
index dc94f1914425b7eabadc538e31c66998e2c08c30..4fea32aed6d240f493946137a7c06c5b29cada72 100644 (file)
@@ -116,7 +116,6 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
 
 static struct file_operations i830_buffer_fops = {
        .open = drm_open,
-       .flush = drm_flush,
        .release = drm_release,
        .ioctl = drm_ioctl,
        .mmap = i830_mmap_buffers,
@@ -1517,12 +1516,24 @@ static int i830_setparam(struct inode *inode, struct file *filp,
        return 0;
 }
 
-void i830_driver_pretakedown(drm_device_t * dev)
+int i830_driver_load(drm_device_t *dev, unsigned long flags)
+{
+       /* i830 has 4 more counters */
+       dev->counters += 4;
+       dev->types[6] = _DRM_STAT_IRQ;
+       dev->types[7] = _DRM_STAT_PRIMARY;
+       dev->types[8] = _DRM_STAT_SECONDARY;
+       dev->types[9] = _DRM_STAT_DMA;
+
+       return 0;
+}
+
+void i830_driver_lastclose(drm_device_t * dev)
 {
        i830_dma_cleanup(dev);
 }
 
-void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp)
+void i830_driver_preclose(drm_device_t * dev, DRMFILE filp)
 {
        if (dev->dev_private) {
                drm_i830_private_t *dev_priv = dev->dev_private;
@@ -1532,7 +1543,7 @@ void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp)
        }
 }
 
-void i830_driver_release(drm_device_t * dev, struct file *filp)
+void i830_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
 {
        i830_reclaim_buffers(dev, filp);
 }
@@ -1544,20 +1555,20 @@ int i830_driver_dma_quiescent(drm_device_t * dev)
 }
 
 drm_ioctl_desc_t i830_ioctls[] = {
-       [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, 1, 1},
-       [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, 1, 0},
-       [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, 1, 0},
-       [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, 1, 0},
-       [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, 1, 0},
-       [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, 1, 0},
-       [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, 1, 0},
-       [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, 1, 0},
-       [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, 1, 0},
-       [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, 1, 0},
-       [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, 1, 0},
-       [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, 1, 0},
-       [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, 1, 0},
-       [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, 1, 0}
+       [DRM_IOCTL_NR(DRM_I830_INIT)] = {i830_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_I830_VERTEX)] = {i830_dma_vertex, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I830_CLEAR)] = {i830_clear_bufs, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I830_FLUSH)] = {i830_flush_ioctl, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I830_GETAGE)] = {i830_getage, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I830_GETBUF)] = {i830_getbuf, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I830_SWAP)] = {i830_swap_bufs, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I830_COPY)] = {i830_copybuf, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I830_DOCOPY)] = {i830_docopy, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I830_FLIP)] = {i830_flip_bufs, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I830_IRQ_EMIT)] = {i830_irq_emit, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I830_IRQ_WAIT)] = {i830_irq_wait, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I830_GETPARAM)] = {i830_getparam, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I830_SETPARAM)] = {i830_setparam, DRM_AUTH}
 };
 
 int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
index acd821e8fe4d3672eb60b955084496b9509fa797..722658188f5f45619ec8515c6214979f458dde93 100644 (file)
 
 #include "drm_pciids.h"
 
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
-       dev->counters += 4;
-       dev->types[6] = _DRM_STAT_IRQ;
-       dev->types[7] = _DRM_STAT_PRIMARY;
-       dev->types[8] = _DRM_STAT_SECONDARY;
-       dev->types[9] = _DRM_STAT_DMA;
-
-       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
-                DRIVER_NAME,
-                DRIVER_MAJOR,
-                DRIVER_MINOR,
-                DRIVER_PATCHLEVEL,
-                DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
-           );
-       return 0;
-}
-
-static int version(drm_version_t * version)
-{
-       int len;
-
-       version->version_major = DRIVER_MAJOR;
-       version->version_minor = DRIVER_MINOR;
-       version->version_patchlevel = DRIVER_PATCHLEVEL;
-       DRM_COPY(version->name, DRIVER_NAME);
-       DRM_COPY(version->date, DRIVER_DATE);
-       DRM_COPY(version->desc, DRIVER_DESC);
-       return 0;
-}
-
 static struct pci_device_id pciidlist[] = {
        i830_PCI_IDS
 };
@@ -83,12 +52,12 @@ static struct drm_driver driver = {
        .driver_features |= DRIVER_HAVE_IRQ | DRIVER_SHARED_IRQ,
 #endif
        .dev_priv_size = sizeof(drm_i830_buf_priv_t),
-       .pretakedown = i830_driver_pretakedown,
-       .prerelease = i830_driver_prerelease,
+       .load = i830_driver_load,
+       .lastclose = i830_driver_lastclose,
+       .preclose = i830_driver_preclose,
        .device_is_agp = i830_driver_device_is_agp,
-       .release = i830_driver_release,
+       .reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
        .dma_quiescent = i830_driver_dma_quiescent,
-       .reclaim_buffers = i830_reclaim_buffers,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
 #if USE_IRQS
@@ -97,8 +66,6 @@ static struct drm_driver driver = {
        .irq_uninstall = i830_driver_irq_uninstall,
        .irq_handler = i830_driver_irq_handler,
 #endif
-       .postinit = postinit,
-       .version = version,
        .ioctls = i830_ioctls,
        .fops = {
                 .owner = THIS_MODULE,
@@ -108,13 +75,19 @@ static struct drm_driver driver = {
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
-                }
-       ,
+       },
+
        .pci_driver = {
-                      .name = DRIVER_NAME,
-                      .id_table = pciidlist,
-                      }
+                .name = DRIVER_NAME,
+                .id_table = pciidlist,
+       },
 
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
 };
 
 static int __init i830_init(void)
index bc4bd49fb0ccbae7d257780e2cef26f2c2f6a27d..bf9075b576bddbcaa36cf29e71464e3c3a90a4f3 100644 (file)
@@ -136,10 +136,12 @@ extern irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS);
 extern void i830_driver_irq_preinstall(drm_device_t * dev);
 extern void i830_driver_irq_postinstall(drm_device_t * dev);
 extern void i830_driver_irq_uninstall(drm_device_t * dev);
-extern void i830_driver_pretakedown(drm_device_t * dev);
-extern void i830_driver_release(drm_device_t * dev, struct file *filp);
+extern int i830_driver_load(struct drm_device *, unsigned long flags);
+extern void i830_driver_preclose(drm_device_t * dev, DRMFILE filp);
+extern void i830_driver_lastclose(drm_device_t * dev);
+extern void i830_driver_reclaim_buffers_locked(drm_device_t * dev,
+                                              struct file *filp);
 extern int i830_driver_dma_quiescent(drm_device_t * dev);
-extern void i830_driver_prerelease(drm_device_t * dev, DRMFILE filp);
 extern int i830_driver_device_is_agp(drm_device_t * dev);
 
 #define I830_READ(reg)          DRM_READ32(dev_priv->mmio_map, reg)
index f3aa0c3701270ba9d9ef4b777afc74fbd746fa46..9140703da1ba0aac8288ab7258fe05d599a6e491 100644 (file)
@@ -1,7 +1,6 @@
 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
  */
-/**************************************************************************
- *
+/*
  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  * All Rights Reserved.
  *
@@ -25,7 +24,7 @@
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
- **************************************************************************/
+ */
 
 #include "drmP.h"
 #include "drm.h"
@@ -196,7 +195,7 @@ static int i915_initialize(drm_device_t * dev,
        return 0;
 }
 
-static int i915_resume(drm_device_t * dev)
+static int i915_dma_resume(drm_device_t * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -253,7 +252,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS)
                retcode = i915_dma_cleanup(dev);
                break;
        case I915_RESUME_DMA:
-               retcode = i915_resume(dev);
+               retcode = i915_dma_resume(dev);
                break;
        default:
                retcode = -EINVAL;
@@ -654,6 +653,9 @@ static int i915_getparam(DRM_IOCTL_ARGS)
        case I915_PARAM_ALLOW_BATCHBUFFER:
                value = dev_priv->allow_batchbuffer ? 1 : 0;
                break;
+       case I915_PARAM_LAST_DISPATCH:
+               value = READ_BREADCRUMB(dev_priv);
+               break;
        default:
                DRM_ERROR("Unkown parameter %d\n", param.param);
                return DRM_ERR(EINVAL);
@@ -699,7 +701,19 @@ static int i915_setparam(DRM_IOCTL_ARGS)
        return 0;
 }
 
-void i915_driver_pretakedown(drm_device_t * dev)
+int i915_driver_load(drm_device_t *dev, unsigned long flags)
+{
+       /* i915 has 4 more counters */
+       dev->counters += 4;
+       dev->types[6] = _DRM_STAT_IRQ;
+       dev->types[7] = _DRM_STAT_PRIMARY;
+       dev->types[8] = _DRM_STAT_SECONDARY;
+       dev->types[9] = _DRM_STAT_DMA;
+
+       return 0;
+}
+
+void i915_driver_lastclose(drm_device_t * dev)
 {
        if (dev->dev_private) {
                drm_i915_private_t *dev_priv = dev->dev_private;
@@ -708,7 +722,7 @@ void i915_driver_pretakedown(drm_device_t * dev)
        i915_dma_cleanup(dev);
 }
 
-void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp)
+void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
 {
        if (dev->dev_private) {
                drm_i915_private_t *dev_priv = dev->dev_private;
@@ -717,18 +731,18 @@ void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp)
 }
 
 drm_ioctl_desc_t i915_ioctls[] = {
-       [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, 1, 1},
-       [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, 1, 0},
-       [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, 1, 0},
-       [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, 1, 0},
-       [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, 1, 0},
-       [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, 1, 0},
-       [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, 1, 0},
-       [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, 1, 1},
-       [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, 1, 0},
-       [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, 1, 0},
-       [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, 1, 1},
-       [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, 1, 0}
+       [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH}
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
index 23e027d2908069af6f71a8d592669e5adbca3c6b..77412ddac007625637e24aabd38a69f94a690cd3 100644 (file)
@@ -1,5 +1,4 @@
-/**************************************************************************
- *
+/*
  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  * All Rights Reserved.
  *
@@ -23,7 +22,7 @@
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
- **************************************************************************/
+ */
 
 #ifndef _I915_DRM_H_
 #define _I915_DRM_H_
@@ -152,6 +151,7 @@ typedef struct drm_i915_irq_wait {
  */
 #define I915_PARAM_IRQ_ACTIVE            1
 #define I915_PARAM_ALLOW_BATCHBUFFER     2
+#define I915_PARAM_LAST_DISPATCH         3
 
 typedef struct drm_i915_getparam {
        int param;
index 0508240f4e3bea16393941faca74ae115da8b68e..8e2e6095c4b3fa927334fe77efaa05a326ce137c 100644 (file)
@@ -1,6 +1,6 @@
 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
  */
-/**************************************************************************
+/*
  *
  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  * All Rights Reserved.
@@ -25,7 +25,7 @@
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
- **************************************************************************/
+ */
 
 #include "drmP.h"
 #include "drm.h"
 
 #include "drm_pciids.h"
 
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
-       dev->counters += 4;
-       dev->types[6] = _DRM_STAT_IRQ;
-       dev->types[7] = _DRM_STAT_PRIMARY;
-       dev->types[8] = _DRM_STAT_SECONDARY;
-       dev->types[9] = _DRM_STAT_DMA;
-
-       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
-                DRIVER_NAME,
-                DRIVER_MAJOR,
-                DRIVER_MINOR,
-                DRIVER_PATCHLEVEL,
-                DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
-           );
-       return 0;
-}
-
-static int version(drm_version_t * version)
-{
-       int len;
-
-       version->version_major = DRIVER_MAJOR;
-       version->version_minor = DRIVER_MINOR;
-       version->version_patchlevel = DRIVER_PATCHLEVEL;
-       DRM_COPY(version->name, DRIVER_NAME);
-       DRM_COPY(version->date, DRIVER_DATE);
-       DRM_COPY(version->desc, DRIVER_DESC);
-       return 0;
-}
-
 static struct pci_device_id pciidlist[] = {
        i915_PCI_IDS
 };
 
 static struct drm_driver driver = {
+       /* don't use mtrr's here, the Xserver or user space app should
+        * deal with them for intel hardware.
+        */
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
-           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
-       .pretakedown = i915_driver_pretakedown,
-       .prerelease = i915_driver_prerelease,
+           DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
+           DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+       .load = i915_driver_load,
+       .lastclose = i915_driver_lastclose,
+       .preclose = i915_driver_preclose,
        .device_is_agp = i915_driver_device_is_agp,
+       .vblank_wait = i915_driver_vblank_wait,
        .irq_preinstall = i915_driver_irq_preinstall,
        .irq_postinstall = i915_driver_irq_postinstall,
        .irq_uninstall = i915_driver_irq_uninstall,
@@ -83,8 +57,6 @@ static struct drm_driver driver = {
        .reclaim_buffers = drm_core_reclaim_buffers,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
-       .postinit = postinit,
-       .version = version,
        .ioctls = i915_ioctls,
        .fops = {
                 .owner = THIS_MODULE,
@@ -97,11 +69,19 @@ static struct drm_driver driver = {
 #ifdef CONFIG_COMPAT
                 .compat_ioctl = i915_compat_ioctl,
 #endif
-                },
+       },
+
        .pci_driver = {
-                      .name = DRIVER_NAME,
-                      .id_table = pciidlist,
-                      }
+                .name = DRIVER_NAME,
+                .id_table = pciidlist,
+       },
+       
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
 };
 
 static int __init i915_init(void)
index 17e457c73dc779a7a8e6d42a50ec80c2715485cb..c6c71b45f1013b92949c0a92c800f79a840a358a 100644 (file)
@@ -1,6 +1,6 @@
 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
  */
-/**************************************************************************
+/*
  *
  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  * All Rights Reserved.
@@ -25,7 +25,7 @@
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
- **************************************************************************/
+ */
 
 #ifndef _I915_DRV_H_
 #define _I915_DRV_H_
 
 #define DRIVER_NAME            "i915"
 #define DRIVER_DESC            "Intel Graphics"
-#define DRIVER_DATE            "20040405"
+#define DRIVER_DATE            "20051209"
 
 /* Interface history:
  *
  * 1.1: Original.
+ * 1.2: Add Power Management
+ * 1.3: Add vblank support
  */
 #define DRIVER_MAJOR           1
-#define DRIVER_MINOR           1
+#define DRIVER_MINOR           3
 #define DRIVER_PATCHLEVEL      0
 
-/* We use our own dma mechanisms, not the drm template code.  However,
- * the shared IRQ code is useful to us:
- */
-#define __HAVE_PM              1
-
 typedef struct _drm_i915_ring_buffer {
        int tail_mask;
        unsigned long Start;
@@ -97,6 +94,7 @@ typedef struct drm_i915_private {
        int tex_lru_log_granularity;
        int allow_batchbuffer;
        struct mem_block *agp_heap;
+       unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
 } drm_i915_private_t;
 
 extern drm_ioctl_desc_t i915_ioctls[];
@@ -104,14 +102,18 @@ extern int i915_max_ioctl;
 
                                /* i915_dma.c */
 extern void i915_kernel_lost_context(drm_device_t * dev);
-extern void i915_driver_pretakedown(drm_device_t * dev);
-extern void i915_driver_prerelease(drm_device_t * dev, DRMFILE filp);
+extern int i915_driver_load(struct drm_device *, unsigned long flags);
+extern void i915_driver_lastclose(drm_device_t * dev);
+extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp);
 extern int i915_driver_device_is_agp(drm_device_t * dev);
+extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
+                             unsigned long arg);
 
 /* i915_irq.c */
 extern int i915_irq_emit(DRM_IOCTL_ARGS);
 extern int i915_irq_wait(DRM_IOCTL_ARGS);
 
+extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
 extern void i915_driver_irq_preinstall(drm_device_t * dev);
 extern void i915_driver_irq_postinstall(drm_device_t * dev);
@@ -125,13 +127,10 @@ extern void i915_mem_takedown(struct mem_block **heap);
 extern void i915_mem_release(drm_device_t * dev,
                             DRMFILE filp, struct mem_block *heap);
 
-extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
-                             unsigned long arg);
-
-#define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, reg)
-#define I915_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, reg, val)
-#define I915_READ16(reg)       DRM_READ16(dev_priv->mmio_map, reg)
-#define I915_WRITE16(reg,val)  DRM_WRITE16(dev_priv->mmio_map, reg, val)
+#define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, (reg))
+#define I915_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
+#define I915_READ16(reg)       DRM_READ16(dev_priv->mmio_map, (reg))
+#define I915_WRITE16(reg,val)  DRM_WRITE16(dev_priv->mmio_map, (reg), (val))
 
 #define I915_VERBOSE 0
 
@@ -195,6 +194,13 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
 #define PPCR                   0x61204
 #define PPCR_ON                        (1<<0)
 
+#define DVOB                   0x61140
+#define DVOB_ON                        (1<<31)
+#define DVOC                   0x61160
+#define DVOC_ON                        (1<<31)
+#define LVDS                   0x61180
+#define LVDS_ON                        (1<<31)
+
 #define ADPA                   0x61100
 #define ADPA_DPMS_MASK         (~(3<<10))
 #define ADPA_DPMS_ON           (0<<10)
@@ -258,4 +264,6 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
 
 #define CMD_OP_DESTBUFFER_INFO  ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
 
+#define READ_BREADCRUMB(dev_priv) (((u32 *)(dev_priv->hw_status_page))[5])
+
 #endif
index 4fa448ee846b58dbd4ee0a821c964c341944a1ad..a1381c61aa631e3b9e27f9256157897c9dfaa1db 100644 (file)
@@ -1,7 +1,6 @@
-/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
+/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
  */
-/**************************************************************************
- *
+/*
  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  * All Rights Reserved.
  *
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
- **************************************************************************/
+ */
 
 #include "drmP.h"
 #include "drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 
-#define USER_INT_FLAG 0x2
+#define USER_INT_FLAG (1<<1)
+#define VSYNC_PIPEB_FLAG (1<<5)
+#define VSYNC_PIPEA_FLAG (1<<7)
+
 #define MAX_NOPID ((u32)~0)
-#define READ_BREADCRUMB(dev_priv)  (((u32*)(dev_priv->hw_status_page))[5])
 
 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 {
@@ -43,7 +44,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
        u16 temp;
 
        temp = I915_READ16(I915REG_INT_IDENTITY_R);
-       temp &= USER_INT_FLAG;
+       temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG);
 
        DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
 
@@ -51,7 +52,15 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
                return IRQ_NONE;
 
        I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
-       DRM_WAKEUP(&dev_priv->irq_queue);
+
+       if (temp & USER_INT_FLAG)
+               DRM_WAKEUP(&dev_priv->irq_queue);
+
+       if (temp & VSYNC_PIPEA_FLAG) {
+               atomic_inc(&dev->vbl_received);
+               DRM_WAKEUP(&dev->vbl_queue);
+               drm_vbl_send_signals(dev);
+       }
 
        return IRQ_HANDLED;
 }
@@ -102,6 +111,27 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
        return ret;
 }
 
+int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       unsigned int cur_vblank;
+       int ret = 0;
+
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return DRM_ERR(EINVAL);
+       }
+
+       DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
+                   (((cur_vblank = atomic_read(&dev->vbl_received))
+                       - *sequence) <= (1<<23)));
+       
+       *sequence = cur_vblank;
+
+       return ret;
+}
+
+
 /* Needs the lock as it touches the ring.
  */
 int i915_irq_emit(DRM_IOCTL_ARGS)
@@ -165,7 +195,7 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-       I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG);
+       I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | VSYNC_PIPEA_FLAG);
        DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
 }
 
index 13176d136a99f3e7da85d076f393f72971f34b53..ba87ff17ff64cfaf123afe895a4b0d6a5a0bc262 100644 (file)
@@ -1,7 +1,6 @@
 /* i915_mem.c -- Simple agp/fb memory manager for i915 -*- linux-c -*-
  */
-/**************************************************************************
- *
+/*
  * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
  * All Rights Reserved.
  *
@@ -25,7 +24,7 @@
  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  *
- **************************************************************************/
+ */
 
 #include "drmP.h"
 #include "drm.h"
index 70dc7f64b7b9146c138a42bf97d3648854bb3c5c..c2a4bac14521f2b50d9257544ae45066e44620b4 100644 (file)
@@ -44,7 +44,9 @@
 #define MGA_DEFAULT_USEC_TIMEOUT       10000
 #define MGA_FREELIST_DEBUG             0
 
-static int mga_do_cleanup_dma(drm_device_t * dev);
+#define MINIMAL_CLEANUP 0
+#define FULL_CLEANUP 1
+static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup);
 
 /* ================================================================
  * Engine control
@@ -391,7 +393,7 @@ int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf)
  * DMA initialization, cleanup
  */
 
-int mga_driver_preinit(drm_device_t * dev, unsigned long flags)
+int mga_driver_load(drm_device_t * dev, unsigned long flags)
 {
        drm_mga_private_t *dev_priv;
 
@@ -405,6 +407,14 @@ int mga_driver_preinit(drm_device_t * dev, unsigned long flags)
        dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
        dev_priv->chipset = flags;
 
+       dev_priv->mmio_base = drm_get_resource_start(dev, 1);
+       dev_priv->mmio_size = drm_get_resource_len(dev, 1);
+
+       dev->counters += 3;
+       dev->types[6] = _DRM_STAT_IRQ;
+       dev->types[7] = _DRM_STAT_PRIMARY;
+       dev->types[8] = _DRM_STAT_SECONDARY;
+
        return 0;
 }
 
@@ -438,17 +448,19 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
        drm_buf_desc_t req;
        drm_agp_mode_t mode;
        drm_agp_info_t info;
+       drm_agp_buffer_t agp_req;
+       drm_agp_binding_t bind_req;
 
        /* Acquire AGP. */
        err = drm_agp_acquire(dev);
        if (err) {
-               DRM_ERROR("Unable to acquire AGP\n");
+               DRM_ERROR("Unable to acquire AGP: %d\n", err);
                return err;
        }
 
        err = drm_agp_info(dev, &info);
        if (err) {
-               DRM_ERROR("Unable to get AGP info\n");
+               DRM_ERROR("Unable to get AGP info: %d\n", err);
                return err;
        }
 
@@ -472,18 +484,24 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
        }
 
        /* Allocate and bind AGP memory. */
-       dev_priv->agp_pages = agp_size / PAGE_SIZE;
-       dev_priv->agp_mem = drm_alloc_agp(dev, dev_priv->agp_pages, 0);
-       if (dev_priv->agp_mem == NULL) {
-               dev_priv->agp_pages = 0;
+       agp_req.size = agp_size;
+       agp_req.type = 0;
+       err = drm_agp_alloc(dev, &agp_req);
+       if (err) {
+               dev_priv->agp_size = 0;
                DRM_ERROR("Unable to allocate %uMB AGP memory\n",
                          dma_bs->agp_size);
-               return DRM_ERR(ENOMEM);
+               return err;
        }
+       
+       dev_priv->agp_size = agp_size;
+       dev_priv->agp_handle = agp_req.handle;
 
-       err = drm_bind_agp(dev_priv->agp_mem, 0);
+       bind_req.handle = agp_req.handle;
+       bind_req.offset = 0;
+       err = drm_agp_bind(dev, &bind_req);
        if (err) {
-               DRM_ERROR("Unable to bind AGP memory\n");
+               DRM_ERROR("Unable to bind AGP memory: %d\n", err);
                return err;
        }
 
@@ -497,7 +515,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
        err = drm_addmap(dev, offset, warp_size,
                         _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);
        if (err) {
-               DRM_ERROR("Unable to map WARP microcode\n");
+               DRM_ERROR("Unable to map WARP microcode: %d\n", err);
                return err;
        }
 
@@ -505,7 +523,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
        err = drm_addmap(dev, offset, dma_bs->primary_size,
                         _DRM_AGP, _DRM_READ_ONLY, &dev_priv->primary);
        if (err) {
-               DRM_ERROR("Unable to map primary DMA region\n");
+               DRM_ERROR("Unable to map primary DMA region: %d\n", err);
                return err;
        }
 
@@ -513,7 +531,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
        err = drm_addmap(dev, offset, secondary_size,
                         _DRM_AGP, 0, &dev->agp_buffer_map);
        if (err) {
-               DRM_ERROR("Unable to map secondary DMA region\n");
+               DRM_ERROR("Unable to map secondary DMA region: %d\n", err);
                return err;
        }
 
@@ -525,15 +543,29 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
 
        err = drm_addbufs_agp(dev, &req);
        if (err) {
-               DRM_ERROR("Unable to add secondary DMA buffers\n");
+               DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
                return err;
        }
 
+       {
+               drm_map_list_t *_entry;
+               unsigned long agp_token = 0;
+               
+               list_for_each_entry(_entry, &dev->maplist->head, head) {
+                       if (_entry->map == dev->agp_buffer_map)
+                               agp_token = _entry->user_token;
+               }
+               if (!agp_token)
+                       return -EFAULT;
+
+               dev->agp_buffer_token = agp_token;
+       }
+
        offset += secondary_size;
        err = drm_addmap(dev, offset, agp_size - offset,
                         _DRM_AGP, 0, &dev_priv->agp_textures);
        if (err) {
-               DRM_ERROR("Unable to map AGP texture region\n");
+               DRM_ERROR("Unable to map AGP texture region %d\n", err);
                return err;
        }
 
@@ -603,7 +635,8 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
        err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
                         _DRM_READ_ONLY, &dev_priv->warp);
        if (err != 0) {
-               DRM_ERROR("Unable to create mapping for WARP microcode\n");
+               DRM_ERROR("Unable to create mapping for WARP microcode: %d\n",
+                         err);
                return err;
        }
 
@@ -622,7 +655,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
        }
 
        if (err != 0) {
-               DRM_ERROR("Unable to allocate primary DMA region\n");
+               DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
                return DRM_ERR(ENOMEM);
        }
 
@@ -646,7 +679,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
        }
 
        if (bin_count == 0) {
-               DRM_ERROR("Unable to add secondary DMA buffers\n");
+               DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);
                return err;
        }
 
@@ -682,7 +715,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
        err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size,
                         _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);
        if (err) {
-               DRM_ERROR("Unable to map MMIO region\n");
+               DRM_ERROR("Unable to map MMIO region: %d\n", err);
                return err;
        }
 
@@ -690,7 +723,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
                         _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
                         &dev_priv->status);
        if (err) {
-               DRM_ERROR("Unable to map status region\n");
+               DRM_ERROR("Unable to map status region: %d\n", err);
                return err;
        }
 
@@ -708,7 +741,7 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
         */
 
        if (err) {
-               mga_do_cleanup_dma(dev);
+               mga_do_cleanup_dma(dev, MINIMAL_CLEANUP);
        }
 
        /* Not only do we want to try and initialized PCI cards for PCI DMA,
@@ -731,35 +764,32 @@ int mga_dma_bootstrap(DRM_IOCTL_ARGS)
        DRM_DEVICE;
        drm_mga_dma_bootstrap_t bootstrap;
        int err;
+       static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
+       const drm_mga_private_t *const dev_priv =
+               (drm_mga_private_t *) dev->dev_private;
 
        DRM_COPY_FROM_USER_IOCTL(bootstrap,
                                 (drm_mga_dma_bootstrap_t __user *) data,
                                 sizeof(bootstrap));
 
        err = mga_do_dma_bootstrap(dev, &bootstrap);
-       if (!err) {
-               static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
-               const drm_mga_private_t *const dev_priv =
-                   (drm_mga_private_t *) dev->dev_private;
-
-               if (dev_priv->agp_textures != NULL) {
-                       bootstrap.texture_handle =
-                           dev_priv->agp_textures->offset;
-                       bootstrap.texture_size = dev_priv->agp_textures->size;
-               } else {
-                       bootstrap.texture_handle = 0;
-                       bootstrap.texture_size = 0;
-               }
+       if (err) {
+               mga_do_cleanup_dma(dev, FULL_CLEANUP);
+               return err;
+       }
 
-               bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07];
-               if (DRM_COPY_TO_USER((void __user *)data, &bootstrap,
-                                    sizeof(bootstrap))) {
-                       err = DRM_ERR(EFAULT);
-               }
+       if (dev_priv->agp_textures != NULL) {
+               bootstrap.texture_handle = dev_priv->agp_textures->offset;
+               bootstrap.texture_size = dev_priv->agp_textures->size;
        } else {
-               mga_do_cleanup_dma(dev);
+               bootstrap.texture_handle = 0;
+               bootstrap.texture_size = 0;
        }
 
+       bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07];
+       DRM_COPY_TO_USER_IOCTL((drm_mga_dma_bootstrap_t __user *)data,
+                              bootstrap, sizeof(bootstrap));
+
        return err;
 }
 
@@ -853,13 +883,13 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
 
        ret = mga_warp_install_microcode(dev_priv);
        if (ret < 0) {
-               DRM_ERROR("failed to install WARP ucode!\n");
+               DRM_ERROR("failed to install WARP ucode!: %d\n", ret);
                return ret;
        }
 
        ret = mga_warp_init(dev_priv);
        if (ret < 0) {
-               DRM_ERROR("failed to init WARP engine!\n");
+               DRM_ERROR("failed to init WARP engine!: %d\n", ret);
                return ret;
        }
 
@@ -904,7 +934,7 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
        return 0;
 }
 
-static int mga_do_cleanup_dma(drm_device_t * dev)
+static int mga_do_cleanup_dma(drm_device_t *dev, int full_cleanup)
 {
        int err = 0;
        DRM_DEBUG("\n");
@@ -932,31 +962,39 @@ static int mga_do_cleanup_dma(drm_device_t * dev)
 
                if (dev_priv->used_new_dma_init) {
 #if __OS_HAS_AGP
-                       if (dev_priv->agp_mem != NULL) {
-                               dev_priv->agp_textures = NULL;
-                               drm_unbind_agp(dev_priv->agp_mem);
+                       if (dev_priv->agp_handle != 0) {
+                               drm_agp_binding_t unbind_req;
+                               drm_agp_buffer_t free_req;
+
+                               unbind_req.handle = dev_priv->agp_handle;
+                               drm_agp_unbind(dev, &unbind_req);
 
-                               drm_free_agp(dev_priv->agp_mem,
-                                            dev_priv->agp_pages);
-                               dev_priv->agp_pages = 0;
-                               dev_priv->agp_mem = NULL;
+                               free_req.handle = dev_priv->agp_handle;
+                               drm_agp_free(dev, &free_req);
+       
+                               dev_priv->agp_textures = NULL;
+                               dev_priv->agp_size = 0;
+                               dev_priv->agp_handle = 0;
                        }
 
                        if ((dev->agp != NULL) && dev->agp->acquired) {
                                err = drm_agp_release(dev);
                        }
 #endif
-                       dev_priv->used_new_dma_init = 0;
                }
 
                dev_priv->warp = NULL;
                dev_priv->primary = NULL;
-               dev_priv->mmio = NULL;
-               dev_priv->status = NULL;
                dev_priv->sarea = NULL;
                dev_priv->sarea_priv = NULL;
                dev->agp_buffer_map = NULL;
 
+               if (full_cleanup) {
+                       dev_priv->mmio = NULL;
+                       dev_priv->status = NULL;
+                       dev_priv->used_new_dma_init = 0;
+               }
+
                memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
                dev_priv->warp_pipe = 0;
                memset(dev_priv->warp_pipe_phys, 0,
@@ -967,7 +1005,7 @@ static int mga_do_cleanup_dma(drm_device_t * dev)
                }
        }
 
-       return err;
+       return 0;
 }
 
 int mga_dma_init(DRM_IOCTL_ARGS)
@@ -985,11 +1023,11 @@ int mga_dma_init(DRM_IOCTL_ARGS)
        case MGA_INIT_DMA:
                err = mga_do_init_dma(dev, &init);
                if (err) {
-                       (void)mga_do_cleanup_dma(dev);
+                       (void)mga_do_cleanup_dma(dev, FULL_CLEANUP);
                }
                return err;
        case MGA_CLEANUP_DMA:
-               return mga_do_cleanup_dma(dev);
+               return mga_do_cleanup_dma(dev, FULL_CLEANUP);
        }
 
        return DRM_ERR(EINVAL);
@@ -1118,7 +1156,7 @@ int mga_dma_buffers(DRM_IOCTL_ARGS)
 /**
  * Called just before the module is unloaded.
  */
-int mga_driver_postcleanup(drm_device_t * dev)
+int mga_driver_unload(drm_device_t * dev)
 {
        drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
        dev->dev_private = NULL;
@@ -1129,9 +1167,9 @@ int mga_driver_postcleanup(drm_device_t * dev)
 /**
  * Called when the last opener of the device is closed.
  */
-void mga_driver_pretakedown(drm_device_t * dev)
+void mga_driver_lastclose(drm_device_t * dev)
 {
-       mga_do_cleanup_dma(dev);
+       mga_do_cleanup_dma(dev, FULL_CLEANUP);
 }
 
 int mga_driver_dma_quiescent(drm_device_t * dev)
index 1713451a5cc658ba0ca1c8ef291076010c78c7d6..9f7ed0e0351b0c39cab6751c97df2b5694b5a872 100644 (file)
 #include "drm_pciids.h"
 
 static int mga_driver_device_is_agp(drm_device_t * dev);
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
-       drm_mga_private_t *const dev_priv =
-           (drm_mga_private_t *) dev->dev_private;
-
-       dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
-       dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
-
-       dev->counters += 3;
-       dev->types[6] = _DRM_STAT_IRQ;
-       dev->types[7] = _DRM_STAT_PRIMARY;
-       dev->types[8] = _DRM_STAT_SECONDARY;
-
-       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
-                DRIVER_NAME,
-                DRIVER_MAJOR,
-                DRIVER_MINOR,
-                DRIVER_PATCHLEVEL,
-                DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
-           );
-       return 0;
-}
-
-static int version(drm_version_t * version)
-{
-       int len;
-
-       version->version_major = DRIVER_MAJOR;
-       version->version_minor = DRIVER_MINOR;
-       version->version_patchlevel = DRIVER_PATCHLEVEL;
-       DRM_COPY(version->name, DRIVER_NAME);
-       DRM_COPY(version->date, DRIVER_DATE);
-       DRM_COPY(version->desc, DRIVER_DESC);
-       return 0;
-}
 
 static struct pci_device_id pciidlist[] = {
        mga_PCI_IDS
@@ -80,12 +45,12 @@ static struct pci_device_id pciidlist[] = {
 
 static struct drm_driver driver = {
        .driver_features =
-           DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
+           DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
            DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
            DRIVER_IRQ_VBL,
-       .preinit = mga_driver_preinit,
-       .postcleanup = mga_driver_postcleanup,
-       .pretakedown = mga_driver_pretakedown,
+       .load = mga_driver_load,
+       .unload = mga_driver_unload,
+       .lastclose = mga_driver_lastclose,
        .dma_quiescent = mga_driver_dma_quiescent,
        .device_is_agp = mga_driver_device_is_agp,
        .vblank_wait = mga_driver_vblank_wait,
@@ -96,8 +61,6 @@ static struct drm_driver driver = {
        .reclaim_buffers = drm_core_reclaim_buffers,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
-       .postinit = postinit,
-       .version = version,
        .ioctls = mga_ioctls,
        .dma_ioctl = mga_dma_buffers,
        .fops = {
@@ -113,9 +76,16 @@ static struct drm_driver driver = {
 #endif
                 },
        .pci_driver = {
-                      .name = DRIVER_NAME,
-                      .id_table = pciidlist,
-                      }
+                .name = DRIVER_NAME,
+                .id_table = pciidlist,
+       },
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
 };
 
 static int __init mga_init(void)
index 461728e6a58a456ca96dceb8245048025cf64ce7..6b0c5319350615e9008bda3cd46629aa29f6ebe2 100644 (file)
 
 #define DRIVER_NAME            "mga"
 #define DRIVER_DESC            "Matrox G200/G400"
-#define DRIVER_DATE            "20050607"
+#define DRIVER_DATE            "20051102"
 
 #define DRIVER_MAJOR           3
 #define DRIVER_MINOR           2
-#define DRIVER_PATCHLEVEL      0
+#define DRIVER_PATCHLEVEL      1
 
 typedef struct drm_mga_primary_buffer {
        u8 *start;
@@ -144,22 +144,22 @@ typedef struct drm_mga_private {
        drm_local_map_t *primary;
        drm_local_map_t *agp_textures;
 
-       DRM_AGP_MEM *agp_mem;
-       unsigned int agp_pages;
+       unsigned long agp_handle;
+       unsigned int agp_size;
 } drm_mga_private_t;
 
 extern drm_ioctl_desc_t mga_ioctls[];
 extern int mga_max_ioctl;
 
                                /* mga_dma.c */
-extern int mga_driver_preinit(drm_device_t * dev, unsigned long flags);
 extern int mga_dma_bootstrap(DRM_IOCTL_ARGS);
 extern int mga_dma_init(DRM_IOCTL_ARGS);
 extern int mga_dma_flush(DRM_IOCTL_ARGS);
 extern int mga_dma_reset(DRM_IOCTL_ARGS);
 extern int mga_dma_buffers(DRM_IOCTL_ARGS);
-extern int mga_driver_postcleanup(drm_device_t * dev);
-extern void mga_driver_pretakedown(drm_device_t * dev);
+extern int mga_driver_load(drm_device_t *dev, unsigned long flags);
+extern int mga_driver_unload(drm_device_t * dev);
+extern void mga_driver_lastclose(drm_device_t * dev);
 extern int mga_driver_dma_quiescent(drm_device_t * dev);
 
 extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
index 47f54b5ae956f0e1e1d442f9365bae4d778b5174..2837e669183a29674747b71dd1101f09b1638a9a 100644 (file)
@@ -1127,19 +1127,19 @@ static int mga_wait_fence(DRM_IOCTL_ARGS)
 }
 
 drm_ioctl_desc_t mga_ioctls[] = {
-       [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1},
-       [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0},
-       [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0},
-       [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0},
-       [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0},
-       [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0},
-       [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0},
-       [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0},
-       [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0},
-       [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0},
-       [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0},
-       [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0},
-       [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1},
+       [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
 };
 
 int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
index 7452753d4d01d2c0ba8c9c4d6cecd51c95044cfd..db5a60450e689c58f8df6c0ee6800bb8f4f78c0b 100644 (file)
@@ -1,6 +1,7 @@
-/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*-
+/* r128_cce.c -- ATI Rage 128 driver -*- linux-c -*- 
  * Created: Wed Apr  5 19:24:19 2000 by kevin@precisioninsight.com
- *
+ */
+/*
  * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
@@ -559,7 +560,8 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
        if (dev_priv->is_pci) {
 #endif
                dev_priv->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
-               dev_priv->gart_info.addr = dev_priv->gart_info.bus_addr = 0;
+               dev_priv->gart_info.addr = NULL;
+               dev_priv->gart_info.bus_addr = 0;
                dev_priv->gart_info.is_pcie = 0;
                if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
                        DRM_ERROR("failed to init PCI GART!\n");
@@ -601,15 +603,16 @@ int r128_do_cleanup_cce(drm_device_t * dev)
                                drm_core_ioremapfree(dev_priv->cce_ring, dev);
                        if (dev_priv->ring_rptr != NULL)
                                drm_core_ioremapfree(dev_priv->ring_rptr, dev);
-                       if (dev->agp_buffer_map != NULL)
+                       if (dev->agp_buffer_map != NULL) {
                                drm_core_ioremapfree(dev->agp_buffer_map, dev);
+                               dev->agp_buffer_map = NULL;
+                       }
                } else
 #endif
                {
                        if (dev_priv->gart_info.bus_addr)
                                if (!drm_ati_pcigart_cleanup(dev,
-                                                            &dev_priv->
-                                                            gart_info))
+                                                       &dev_priv->gart_info))
                                        DRM_ERROR
                                            ("failed to cleanup PCI GART!\n");
                }
index 5ddc03202411aea7525739a00e1fc3bb1b52b0a2..5d835b006f55e41941e7ccdae4f7a11e45d61655 100644 (file)
@@ -1,7 +1,7 @@
 /* r128_drm.h -- Public header for the r128 driver -*- linux-c -*-
  * Created: Wed Apr  5 19:24:19 2000 by kevin@precisioninsight.com
- *
- * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
+ */
+/* Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All rights reserved.
  *
index 1661e735140257e071ce6dbd19f633143f49be89..e20450ae220e3d8d109b12cbc4ac481f54842be9 100644 (file)
 
 #include "drm_pciids.h"
 
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
-       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
-                DRIVER_NAME,
-                DRIVER_MAJOR,
-                DRIVER_MINOR,
-                DRIVER_PATCHLEVEL,
-                DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
-           );
-       return 0;
-}
-
-static int version(drm_version_t * version)
-{
-       int len;
-
-       version->version_major = DRIVER_MAJOR;
-       version->version_minor = DRIVER_MINOR;
-       version->version_patchlevel = DRIVER_PATCHLEVEL;
-       DRM_COPY(version->name, DRIVER_NAME);
-       DRM_COPY(version->date, DRIVER_DATE);
-       DRM_COPY(version->desc, DRIVER_DESC);
-       return 0;
-}
-
 static struct pci_device_id pciidlist[] = {
        r128_PCI_IDS
 };
@@ -72,8 +47,8 @@ static struct drm_driver driver = {
            DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
            DRIVER_IRQ_VBL,
        .dev_priv_size = sizeof(drm_r128_buf_priv_t),
-       .prerelease = r128_driver_prerelease,
-       .pretakedown = r128_driver_pretakedown,
+       .preclose = r128_driver_preclose,
+       .lastclose = r128_driver_lastclose,
        .vblank_wait = r128_driver_vblank_wait,
        .irq_preinstall = r128_driver_irq_preinstall,
        .irq_postinstall = r128_driver_irq_postinstall,
@@ -82,8 +57,6 @@ static struct drm_driver driver = {
        .reclaim_buffers = drm_core_reclaim_buffers,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
-       .postinit = postinit,
-       .version = version,
        .ioctls = r128_ioctls,
        .dma_ioctl = r128_cce_buffers,
        .fops = {
@@ -97,12 +70,19 @@ static struct drm_driver driver = {
 #ifdef CONFIG_COMPAT
                 .compat_ioctl = r128_compat_ioctl,
 #endif
-                }
-       ,
+       },
+
        .pci_driver = {
-                      .name = DRIVER_NAME,
-                      .id_table = pciidlist,
-                      }
+                .name = DRIVER_NAME,
+                .id_table = pciidlist,
+       },
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
 };
 
 static int __init r128_init(void)
index 5c79e40eb88f95f74e7bcf6c71e03cf87ebab103..94abffb2cca56859ded7d19631b77f39af2396cf 100644 (file)
@@ -1,7 +1,7 @@
 /* r128_drv.h -- Private header for r128 driver -*- linux-c -*-
  * Created: Mon Dec 13 09:51:11 1999 by faith@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ */
+/* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All rights reserved.
  *
@@ -154,8 +154,8 @@ extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
 extern void r128_driver_irq_preinstall(drm_device_t * dev);
 extern void r128_driver_irq_postinstall(drm_device_t * dev);
 extern void r128_driver_irq_uninstall(drm_device_t * dev);
-extern void r128_driver_pretakedown(drm_device_t * dev);
-extern void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp);
+extern void r128_driver_lastclose(drm_device_t * dev);
+extern void r128_driver_preclose(drm_device_t * dev, DRMFILE filp);
 
 extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
                              unsigned long arg);
index 27eb0e31bd3b3007f7b84a7c717ee7fa2ec7d177..87f8ca2b0685aa4497fb2c02cb906b2f87cdbd40 100644 (file)
@@ -1,5 +1,5 @@
-/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*-
- *
+/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */
+/*
  * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
  *
  * The Weather Channel (TM) funded Tungsten Graphics to develop the
index 14479cc08a570279626226fa30cdeb839d630c87..caeecc2c36dab290ddd6da3cf85c38bab7c2d157 100644 (file)
@@ -1,7 +1,7 @@
 /* r128_state.c -- State support for r128 -*- linux-c -*-
  * Created: Thu Jan 27 02:53:43 2000 by gareth@valinux.com
- *
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ */
+/* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -1674,7 +1674,7 @@ static int r128_getparam(DRM_IOCTL_ARGS)
        return 0;
 }
 
-void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp)
+void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
 {
        if (dev->dev_private) {
                drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1684,29 +1684,29 @@ void r128_driver_prerelease(drm_device_t * dev, DRMFILE filp)
        }
 }
 
-void r128_driver_pretakedown(drm_device_t * dev)
+void r128_driver_lastclose(drm_device_t * dev)
 {
        r128_do_cleanup_cce(dev);
 }
 
 drm_ioctl_desc_t r128_ioctls[] = {
-       [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, 1, 1},
-       [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, 1, 1},
-       [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, 1, 1},
-       [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, 1, 1},
-       [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, 1, 0},
-       [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, 1, 0},
-       [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, 1, 0},
-       [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, 1, 0},
-       [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, 1, 0},
-       [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, 1, 0},
-       [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, 1, 0},
-       [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, 1, 0},
-       [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, 1, 0},
-       [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, 1, 0},
-       [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, 1, 0},
-       [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, 1, 1},
-       [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, 1, 0},
+       [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH},
 };
 
 int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
index 3a1ac5f78b433ac62dbe32900cff440a2ede29b0..291dbf4c8186c4a9926df2fd97c61dfeff1082b8 100644 (file)
@@ -52,8 +52,8 @@ static const int r300_cliprect_cntl[4] = {
  * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
  * buffer, starting with index n.
  */
-static int r300_emit_cliprects(drm_radeon_private_t * dev_priv,
-                              drm_radeon_kcmd_buffer_t * cmdbuf, int n)
+static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
+                              drm_radeon_kcmd_buffer_t *cmdbuf, int n)
 {
        drm_clip_rect_t box;
        int nr;
@@ -216,6 +216,7 @@ void r300_init_reg_flags(void)
        ADD_RANGE(R300_TX_UNK1_0, 16);
        ADD_RANGE(R300_TX_SIZE_0, 16);
        ADD_RANGE(R300_TX_FORMAT_0, 16);
+       ADD_RANGE(R300_TX_PITCH_0, 16);
        /* Texture offset is dangerous and needs more checking */
        ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
        ADD_RANGE(R300_TX_UNK4_0, 16);
@@ -242,7 +243,7 @@ static __inline__ int r300_check_range(unsigned reg, int count)
 
   /* we expect offsets passed to the framebuffer to be either within video memory or
      within AGP space */
-static __inline__ int r300_check_offset(drm_radeon_private_t * dev_priv,
+static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv,
                                        u32 offset)
 {
        /* we realy want to check against end of video aperture
@@ -317,8 +318,8 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
  *
  * Note that checks are performed on contents and addresses of the registers
  */
-static __inline__ int r300_emit_packet0(drm_radeon_private_t * dev_priv,
-                                       drm_radeon_kcmd_buffer_t * cmdbuf,
+static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
+                                       drm_radeon_kcmd_buffer_t *cmdbuf,
                                        drm_r300_cmd_header_t header)
 {
        int reg;
@@ -363,8 +364,8 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t * dev_priv,
  * the graphics card.
  * Called by r300_do_cp_cmdbuf.
  */
-static __inline__ int r300_emit_vpu(drm_radeon_private_t * dev_priv,
-                                   drm_radeon_kcmd_buffer_t * cmdbuf,
+static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
+                                   drm_radeon_kcmd_buffer_t *cmdbuf,
                                    drm_r300_cmd_header_t header)
 {
        int sz;
@@ -400,8 +401,8 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t * dev_priv,
  * Emit a clear packet from userspace.
  * Called by r300_emit_packet3.
  */
-static __inline__ int r300_emit_clear(drm_radeon_private_t * dev_priv,
-                                     drm_radeon_kcmd_buffer_t * cmdbuf)
+static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
+                                     drm_radeon_kcmd_buffer_t *cmdbuf)
 {
        RING_LOCALS;
 
@@ -421,8 +422,8 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t * dev_priv,
        return 0;
 }
 
-static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t * dev_priv,
-                                              drm_radeon_kcmd_buffer_t * cmdbuf,
+static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
+                                              drm_radeon_kcmd_buffer_t *cmdbuf,
                                               u32 header)
 {
        int count, i, k;
@@ -489,8 +490,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t * dev_priv,
        return 0;
 }
 
-static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t * dev_priv,
-                                           drm_radeon_kcmd_buffer_t * cmdbuf)
+static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
+                                           drm_radeon_kcmd_buffer_t *cmdbuf)
 {
        u32 header;
        int count;
@@ -554,8 +555,8 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t * dev_priv,
  * Emit a rendering packet3 from userspace.
  * Called by r300_do_cp_cmdbuf.
  */
-static __inline__ int r300_emit_packet3(drm_radeon_private_t * dev_priv,
-                                       drm_radeon_kcmd_buffer_t * cmdbuf,
+static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
+                                       drm_radeon_kcmd_buffer_t *cmdbuf,
                                        drm_r300_cmd_header_t header)
 {
        int n;
@@ -623,7 +624,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t * dev_priv,
 /**
  * Emit the sequence to pacify R300.
  */
-static __inline__ void r300_pacify(drm_radeon_private_t * dev_priv)
+static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
 {
        RING_LOCALS;
 
@@ -657,9 +658,10 @@ static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
  * commands on the DMA ring buffer.
  * Called by the ioctl handler function radeon_cp_cmdbuf.
  */
-int r300_do_cp_cmdbuf(drm_device_t * dev,
+int r300_do_cp_cmdbuf(drm_device_t *dev,
                      DRMFILE filp,
-                     drm_file_t * filp_priv, drm_radeon_kcmd_buffer_t * cmdbuf)
+                     drm_file_t *filp_priv,
+                     drm_radeon_kcmd_buffer_t *cmdbuf)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
        drm_device_dma_t *dma = dev->dma;
index e5b73c002394635b00b893feb015596b0d3373e1..a0ed20e25221d678bf2785b342dad9021c0b0e3c 100644 (file)
@@ -797,6 +797,7 @@ I am fairly certain that they are correct unless stated otherwise in comments.
 
 #      define R300_TX_FORMAT_YUV_MODE          0x00800000
 
+#define R300_TX_PITCH_0                            0x4500
 #define R300_TX_OFFSET_0                    0x4540
 /* BEGIN: Guess from R200 */
 #       define R300_TXO_ENDIAN_NO_SWAP           (0 << 0)
index 342302d4674352f85f63b5fa9847b25e98129bd2..915665c7fe7c672bebe54a7f8f73f6d80dc65d29 100644 (file)
@@ -1,5 +1,5 @@
-/* radeon_cp.c -- CP support for Radeon -*- linux-c -*-
- *
+/* radeon_cp.c -- CP support for Radeon -*- linux-c -*- */
+/*
  * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
  * All Rights Reserved.
@@ -824,7 +824,7 @@ static int RADEON_READ_PLL(drm_device_t * dev, int addr)
        return RADEON_READ(RADEON_CLOCK_CNTL_DATA);
 }
 
-static int RADEON_READ_PCIE(drm_radeon_private_t * dev_priv, int addr)
+static int RADEON_READ_PCIE(drm_radeon_private_t *dev_priv, int addr)
 {
        RADEON_WRITE8(RADEON_PCIE_INDEX, addr & 0xff);
        return RADEON_READ(RADEON_PCIE_DATA);
@@ -1125,7 +1125,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
                     | (dev_priv->fb_location >> 16));
 
 #if __OS_HAS_AGP
-       if (!dev_priv->is_pci) {
+       if (dev_priv->flags & CHIP_IS_AGP) {
                RADEON_WRITE(RADEON_MC_AGP_LOCATION,
                             (((dev_priv->gart_vm_start - 1 +
                                dev_priv->gart_size) & 0xffff0000) |
@@ -1152,7 +1152,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
        dev_priv->ring.tail = cur_read_ptr;
 
 #if __OS_HAS_AGP
-       if (!dev_priv->is_pci) {
+       if (dev_priv->flags & CHIP_IS_AGP) {
                /* set RADEON_AGP_BASE here instead of relying on X from user space */
                RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
                RADEON_WRITE(RADEON_CP_RB_RPTR_ADDR,
@@ -1278,13 +1278,15 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)
 /* Enable or disable PCI GART on the chip */
 static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
 {
-       u32 tmp = RADEON_READ(RADEON_AIC_CNTL);
+       u32 tmp;
 
        if (dev_priv->flags & CHIP_IS_PCIE) {
                radeon_set_pciegart(dev_priv, on);
                return;
        }
 
+       tmp = RADEON_READ(RADEON_AIC_CNTL);
+
        if (on) {
                RADEON_WRITE(RADEON_AIC_CNTL,
                             tmp | RADEON_PCIGART_TRANSLATE_EN);
@@ -1312,13 +1314,17 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
 static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
+
        DRM_DEBUG("\n");
 
-       dev_priv->is_pci = init->is_pci;
+       if (init->is_pci && (dev_priv->flags & CHIP_IS_AGP))
+       {
+               DRM_DEBUG("Forcing AGP card to PCI mode\n");
+               dev_priv->flags &= ~CHIP_IS_AGP;
+       }
 
-       if (dev_priv->is_pci && !dev->sg) {
+       if ((!(dev_priv->flags & CHIP_IS_AGP)) && !dev->sg) {
                DRM_ERROR("PCI GART memory not allocated!\n");
-               dev->dev_private = (void *)dev_priv;
                radeon_do_cleanup_cp(dev);
                return DRM_ERR(EINVAL);
        }
@@ -1327,12 +1333,11 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
        if (dev_priv->usec_timeout < 1 ||
            dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
                DRM_DEBUG("TIMEOUT problem!\n");
-               dev->dev_private = (void *)dev_priv;
                radeon_do_cleanup_cp(dev);
                return DRM_ERR(EINVAL);
        }
 
-       switch (init->func) {
+       switch(init->func) {
        case RADEON_INIT_R200_CP:
                dev_priv->microcode_version = UCODE_R200;
                break;
@@ -1353,7 +1358,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
        if ((init->cp_mode != RADEON_CSQ_PRIBM_INDDIS) &&
            (init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
                DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
-               dev->dev_private = (void *)dev_priv;
                radeon_do_cleanup_cp(dev);
                return DRM_ERR(EINVAL);
        }
@@ -1416,8 +1420,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
 
        DRM_GETSAREA();
 
-       dev_priv->fb_offset = init->fb_offset;
-       dev_priv->mmio_offset = init->mmio_offset;
        dev_priv->ring_offset = init->ring_offset;
        dev_priv->ring_rptr_offset = init->ring_rptr_offset;
        dev_priv->buffers_offset = init->buffers_offset;
@@ -1425,29 +1427,19 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
 
        if (!dev_priv->sarea) {
                DRM_ERROR("could not find sarea!\n");
-               dev->dev_private = (void *)dev_priv;
                radeon_do_cleanup_cp(dev);
                return DRM_ERR(EINVAL);
        }
 
-       dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
-       if (!dev_priv->mmio) {
-               DRM_ERROR("could not find mmio region!\n");
-               dev->dev_private = (void *)dev_priv;
-               radeon_do_cleanup_cp(dev);
-               return DRM_ERR(EINVAL);
-       }
        dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
        if (!dev_priv->cp_ring) {
                DRM_ERROR("could not find cp ring region!\n");
-               dev->dev_private = (void *)dev_priv;
                radeon_do_cleanup_cp(dev);
                return DRM_ERR(EINVAL);
        }
        dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
        if (!dev_priv->ring_rptr) {
                DRM_ERROR("could not find ring read pointer!\n");
-               dev->dev_private = (void *)dev_priv;
                radeon_do_cleanup_cp(dev);
                return DRM_ERR(EINVAL);
        }
@@ -1455,7 +1447,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
        dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
        if (!dev->agp_buffer_map) {
                DRM_ERROR("could not find dma buffer region!\n");
-               dev->dev_private = (void *)dev_priv;
                radeon_do_cleanup_cp(dev);
                return DRM_ERR(EINVAL);
        }
@@ -1465,7 +1456,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
                    drm_core_findmap(dev, init->gart_textures_offset);
                if (!dev_priv->gart_textures) {
                        DRM_ERROR("could not find GART texture region!\n");
-                       dev->dev_private = (void *)dev_priv;
                        radeon_do_cleanup_cp(dev);
                        return DRM_ERR(EINVAL);
                }
@@ -1476,7 +1466,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
                                    init->sarea_priv_offset);
 
 #if __OS_HAS_AGP
-       if (!dev_priv->is_pci) {
+       if (dev_priv->flags & CHIP_IS_AGP) {
                drm_core_ioremap(dev_priv->cp_ring, dev);
                drm_core_ioremap(dev_priv->ring_rptr, dev);
                drm_core_ioremap(dev->agp_buffer_map, dev);
@@ -1484,7 +1474,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
                    !dev_priv->ring_rptr->handle ||
                    !dev->agp_buffer_map->handle) {
                        DRM_ERROR("could not find ioremap agp regions!\n");
-                       dev->dev_private = (void *)dev_priv;
                        radeon_do_cleanup_cp(dev);
                        return DRM_ERR(EINVAL);
                }
@@ -1525,7 +1514,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
            + RADEON_READ(RADEON_CONFIG_APER_SIZE);
 
 #if __OS_HAS_AGP
-       if (!dev_priv->is_pci)
+       if (dev_priv->flags & CHIP_IS_AGP)
                dev_priv->gart_buffers_offset = (dev->agp_buffer_map->offset
                                                 - dev->agp->base
                                                 + dev_priv->gart_vm_start);
@@ -1551,7 +1540,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
        dev_priv->ring.high_mark = RADEON_RING_HIGH_MARK;
 
 #if __OS_HAS_AGP
-       if (!dev_priv->is_pci) {
+       if (dev_priv->flags & CHIP_IS_AGP) {
                /* Turn off PCI GART */
                radeon_set_pcigart(dev_priv, 0);
        } else
@@ -1561,25 +1550,28 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
                if (dev_priv->pcigart_offset) {
                        dev_priv->gart_info.bus_addr =
                            dev_priv->pcigart_offset + dev_priv->fb_location;
+                       dev_priv->gart_info.mapping.offset =
+                           dev_priv->gart_info.bus_addr;
+                       dev_priv->gart_info.mapping.size =
+                           RADEON_PCIGART_TABLE_SIZE;
+
+                       drm_core_ioremap(&dev_priv->gart_info.mapping, dev);
                        dev_priv->gart_info.addr =
-                           (unsigned long)drm_ioremap(dev_priv->gart_info.
-                                                      bus_addr,
-                                                      RADEON_PCIGART_TABLE_SIZE,
-                                                      dev);
+                           dev_priv->gart_info.mapping.handle;
 
                        dev_priv->gart_info.is_pcie =
                            !!(dev_priv->flags & CHIP_IS_PCIE);
                        dev_priv->gart_info.gart_table_location =
                            DRM_ATI_GART_FB;
 
-                       DRM_DEBUG("Setting phys_pci_gart to %08lX %08lX\n",
+                       DRM_DEBUG("Setting phys_pci_gart to %p %08lX\n",
                                  dev_priv->gart_info.addr,
                                  dev_priv->pcigart_offset);
                } else {
                        dev_priv->gart_info.gart_table_location =
                            DRM_ATI_GART_MAIN;
-                       dev_priv->gart_info.addr =
-                           dev_priv->gart_info.bus_addr = 0;
+                       dev_priv->gart_info.addr = NULL;
+                       dev_priv->gart_info.bus_addr = 0;
                        if (dev_priv->flags & CHIP_IS_PCIE) {
                                DRM_ERROR
                                    ("Cannot use PCI Express without GART in FB memory\n");
@@ -1590,7 +1582,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
 
                if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
                        DRM_ERROR("failed to init PCI GART!\n");
-                       dev->dev_private = (void *)dev_priv;
                        radeon_do_cleanup_cp(dev);
                        return DRM_ERR(ENOMEM);
                }
@@ -1604,8 +1595,6 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
 
        dev_priv->last_buf = 0;
 
-       dev->dev_private = (void *)dev_priv;
-
        radeon_do_engine_reset(dev);
 
        return 0;
@@ -1624,11 +1613,15 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
                drm_irq_uninstall(dev);
 
 #if __OS_HAS_AGP
-       if (!dev_priv->is_pci) {
-               if (dev_priv->cp_ring != NULL)
+       if (dev_priv->flags & CHIP_IS_AGP) {
+               if (dev_priv->cp_ring != NULL) {
                        drm_core_ioremapfree(dev_priv->cp_ring, dev);
-               if (dev_priv->ring_rptr != NULL)
+                       dev_priv->cp_ring = NULL;
+               }
+               if (dev_priv->ring_rptr != NULL) {
                        drm_core_ioremapfree(dev_priv->ring_rptr, dev);
+                       dev_priv->ring_rptr = NULL;
+               }
                if (dev->agp_buffer_map != NULL) {
                        drm_core_ioremapfree(dev->agp_buffer_map, dev);
                        dev->agp_buffer_map = NULL;
@@ -1636,17 +1629,20 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
        } else
 #endif
        {
-               if (dev_priv->gart_info.bus_addr)
+
+               if (dev_priv->gart_info.bus_addr) {
+                       /* Turn off PCI GART */
+                       radeon_set_pcigart(dev_priv, 0);
                        if (!drm_ati_pcigart_cleanup(dev, &dev_priv->gart_info))
                                DRM_ERROR("failed to cleanup PCI GART!\n");
+               }
 
-               if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB) {
-                       drm_ioremapfree((void *)dev_priv->gart_info.addr,
-                                       RADEON_PCIGART_TABLE_SIZE, dev);
+               if (dev_priv->gart_info.gart_table_location == DRM_ATI_GART_FB)
+               {
+                       drm_core_ioremapfree(&dev_priv->gart_info.mapping, dev);
                        dev_priv->gart_info.addr = 0;
                }
        }
-
        /* only clear to the start of flags */
        memset(dev_priv, 0, offsetof(drm_radeon_private_t, flags));
 
@@ -1672,7 +1668,7 @@ static int radeon_do_resume_cp(drm_device_t * dev)
        DRM_DEBUG("Starting radeon_do_resume_cp()\n");
 
 #if __OS_HAS_AGP
-       if (!dev_priv->is_pci) {
+       if (dev_priv->flags & CHIP_IS_AGP) {
                /* Turn off PCI GART */
                radeon_set_pcigart(dev_priv, 0);
        } else
@@ -2103,7 +2099,7 @@ int radeon_cp_buffers(DRM_IOCTL_ARGS)
        return ret;
 }
 
-int radeon_driver_preinit(struct drm_device *dev, unsigned long flags)
+int radeon_driver_load(struct drm_device *dev, unsigned long flags)
 {
        drm_radeon_private_t *dev_priv;
        int ret = 0;
@@ -2136,11 +2132,14 @@ int radeon_driver_preinit(struct drm_device *dev, unsigned long flags)
                dev_priv->flags |= CHIP_IS_PCIE;
 
        DRM_DEBUG("%s card detected\n",
-                 ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI"));
+                 ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : (((dev_priv->flags & CHIP_IS_PCIE) ? "PCIE" : "PCI"))));
        return ret;
 }
 
-int radeon_presetup(struct drm_device *dev)
+/* Create mappings for registers and framebuffer so userland doesn't necessarily
+ * have to find them.
+ */
+int radeon_driver_firstopen(struct drm_device *dev)
 {
        int ret;
        drm_local_map_t *map;
@@ -2161,12 +2160,11 @@ int radeon_presetup(struct drm_device *dev)
        return 0;
 }
 
-int radeon_driver_postcleanup(struct drm_device *dev)
+int radeon_driver_unload(struct drm_device *dev)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
 
        DRM_DEBUG("\n");
-
        drm_free(dev_priv, sizeof(*dev_priv), DRM_MEM_DRIVER);
 
        dev->dev_private = NULL;
index 1cd81a671a36cbcefb87a476e6a717a4e97cde08..9c177a6b2a4c46ed953a28a79d34eadd1cfbffd3 100644 (file)
@@ -624,6 +624,11 @@ typedef struct drm_radeon_indirect {
        int discard;
 } drm_radeon_indirect_t;
 
+/* enum for card type parameters */
+#define RADEON_CARD_PCI 0
+#define RADEON_CARD_AGP 1
+#define RADEON_CARD_PCIE 2
+
 /* 1.3: An ioctl to get parameters that aren't available to the 3d
  * client any other way.
  */
@@ -640,6 +645,7 @@ typedef struct drm_radeon_indirect {
 #define RADEON_PARAM_SAREA_HANDLE          9
 #define RADEON_PARAM_GART_TEX_HANDLE       10
 #define RADEON_PARAM_SCRATCH_OFFSET        11
+#define RADEON_PARAM_CARD_TYPE             12
 
 typedef struct drm_radeon_getparam {
        int param;
index ee49670d8162391970125e6e0ebe194122a2148f..b04ed1b562b9509c7e83635888c8f44baf6ae07b 100644 (file)
@@ -42,29 +42,15 @@ int radeon_no_wb;
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers\n");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
 
-static int postinit(struct drm_device *dev, unsigned long flags)
+static int dri_library_name(struct drm_device *dev, char *buf)
 {
-       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
-                DRIVER_NAME,
-                DRIVER_MAJOR,
-                DRIVER_MINOR,
-                DRIVER_PATCHLEVEL,
-                DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
-           );
-       return 0;
-}
-
-static int version(drm_version_t * version)
-{
-       int len;
+       drm_radeon_private_t *dev_priv = dev->dev_private;
+       int family = dev_priv->flags & CHIP_FAMILY_MASK;
 
-       version->version_major = DRIVER_MAJOR;
-       version->version_minor = DRIVER_MINOR;
-       version->version_patchlevel = DRIVER_PATCHLEVEL;
-       DRM_COPY(version->name, DRIVER_NAME);
-       DRM_COPY(version->date, DRIVER_DATE);
-       DRM_COPY(version->desc, DRIVER_DESC);
-       return 0;
+       return snprintf(buf, PAGE_SIZE, "%s\n",
+                       (family < CHIP_R200) ? "radeon" :
+                       ((family < CHIP_R300) ? "r200" :
+                       "r300"));
 }
 
 static struct pci_device_id pciidlist[] = {
@@ -77,23 +63,22 @@ static struct drm_driver driver = {
            DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
            DRIVER_IRQ_VBL,
        .dev_priv_size = sizeof(drm_radeon_buf_priv_t),
-       .preinit = radeon_driver_preinit,
-       .presetup = radeon_presetup,
-       .postcleanup = radeon_driver_postcleanup,
-       .prerelease = radeon_driver_prerelease,
-       .pretakedown = radeon_driver_pretakedown,
-       .open_helper = radeon_driver_open_helper,
+       .load = radeon_driver_load,
+       .firstopen = radeon_driver_firstopen,
+       .open = radeon_driver_open,
+       .preclose = radeon_driver_preclose,
+       .postclose = radeon_driver_postclose,
+       .lastclose = radeon_driver_lastclose,
+       .unload = radeon_driver_unload,
        .vblank_wait = radeon_driver_vblank_wait,
+       .dri_library_name = dri_library_name,
        .irq_preinstall = radeon_driver_irq_preinstall,
        .irq_postinstall = radeon_driver_irq_postinstall,
        .irq_uninstall = radeon_driver_irq_uninstall,
        .irq_handler = radeon_driver_irq_handler,
-       .free_filp_priv = radeon_driver_free_filp_priv,
        .reclaim_buffers = drm_core_reclaim_buffers,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
-       .postinit = postinit,
-       .version = version,
        .ioctls = radeon_ioctls,
        .dma_ioctl = radeon_cp_buffers,
        .fops = {
@@ -107,12 +92,19 @@ static struct drm_driver driver = {
 #ifdef CONFIG_COMPAT
                 .compat_ioctl = radeon_compat_ioctl,
 #endif
-                }
-       ,
+       },
+
        .pci_driver = {
-                      .name = DRIVER_NAME,
-                      .id_table = pciidlist,
-                      }
+                .name = DRIVER_NAME,
+                .id_table = pciidlist,
+       },
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
 };
 
 static int __init radeon_init(void)
index d92ccee3e54c64b29c9bbc7fcc4b1973386d3644..498b19b1d641aeae0ac6ca52228c38f2925f10fa 100644 (file)
@@ -38,7 +38,7 @@
 
 #define DRIVER_NAME            "radeon"
 #define DRIVER_DESC            "ATI Radeon"
-#define DRIVER_DATE            "20050911"
+#define DRIVER_DATE            "20051229"
 
 /* Interface history:
  *
@@ -73,7 +73,7 @@
  * 1.11- Add packet R200_EMIT_RB3D_BLENDCOLOR to support GL_EXT_blend_color
  *       and GL_EXT_blend_[func|equation]_separate on r200
  * 1.12- Add R300 CP microcode support - this just loads the CP on r300
- *       (No 3D support yet - just microcode loading)
+ *       (No 3D support yet - just microcode loading).
  * 1.13- Add packet R200_EMIT_TCL_POINT_SPRITE_CNTL for ARB_point_parameters
  *     - Add hyperz support, add hyperz flags to clear ioctl.
  * 1.14- Add support for color tiling
  *       R200_EMIT_PP_TXFILTER_0-5, 2 more regs) and R200_EMIT_ATF_TFACTOR
  *       (replaces R200_EMIT_TFACTOR_0 (8 consts instead of 6)
  * 1.19- Add support for gart table in FB memory and PCIE r300
+ * 1.20- Add support for r300 texrect
+ * 1.21- Add support for card type getparam
  */
 #define DRIVER_MAJOR           1
-#define DRIVER_MINOR           19
+#define DRIVER_MINOR           21
 #define DRIVER_PATCHLEVEL      0
 
-#define GET_RING_HEAD(dev_priv)                DRM_READ32(  (dev_priv)->ring_rptr, 0 )
-#define SET_RING_HEAD(dev_priv,val)    DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
-
 /*
  * Radeon chip families
  */
@@ -103,8 +102,8 @@ enum radeon_family {
        CHIP_R100,
        CHIP_RS100,
        CHIP_RV100,
-       CHIP_R200,
        CHIP_RV200,
+       CHIP_R200,
        CHIP_RS200,
        CHIP_R250,
        CHIP_RS250,
@@ -138,6 +137,9 @@ enum radeon_chip_flags {
        CHIP_IS_PCIE = 0x00200000UL,
 };
 
+#define GET_RING_HEAD(dev_priv)                DRM_READ32(  (dev_priv)->ring_rptr, 0 )
+#define SET_RING_HEAD(dev_priv,val)    DRM_WRITE32( (dev_priv)->ring_rptr, 0, (val) )
+
 typedef struct drm_radeon_freelist {
        unsigned int age;
        drm_buf_t *buf;
@@ -245,8 +247,6 @@ typedef struct drm_radeon_private {
 
        drm_radeon_depth_clear_t depth_clear;
 
-       unsigned long fb_offset;
-       unsigned long mmio_offset;
        unsigned long ring_offset;
        unsigned long ring_rptr_offset;
        unsigned long buffers_offset;
@@ -273,7 +273,6 @@ typedef struct drm_radeon_private {
 
        /* starting from here on, data is preserved accross an open */
        uint32_t flags;         /* see radeon_chip_flags */
-       int is_pci;
 } drm_radeon_private_t;
 
 typedef struct drm_radeon_buf_priv {
@@ -330,17 +329,14 @@ extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
 extern void radeon_driver_irq_preinstall(drm_device_t * dev);
 extern void radeon_driver_irq_postinstall(drm_device_t * dev);
 extern void radeon_driver_irq_uninstall(drm_device_t * dev);
-extern void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp);
-extern void radeon_driver_pretakedown(drm_device_t * dev);
-extern int radeon_driver_open_helper(drm_device_t * dev,
-                                    drm_file_t * filp_priv);
-extern void radeon_driver_free_filp_priv(drm_device_t * dev,
-                                        drm_file_t * filp_priv);
-
-extern int radeon_preinit(struct drm_device *dev, unsigned long flags);
-extern int radeon_postinit(struct drm_device *dev, unsigned long flags);
-extern int radeon_postcleanup(struct drm_device *dev);
 
+extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
+extern int radeon_driver_unload(struct drm_device *dev);
+extern int radeon_driver_firstopen(struct drm_device *dev);
+extern void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp);
+extern void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp);
+extern void radeon_driver_lastclose(drm_device_t * dev);
+extern int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv);
 extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
                                unsigned long arg);
 
@@ -364,6 +360,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
  */
 
 #define RADEON_AGP_COMMAND             0x0f60
+#define RADEON_AGP_COMMAND_PCI_CONFIG   0x0060 /* offset in PCI config */
+#      define RADEON_AGP_ENABLE        (1<<8)
 #define RADEON_AUX_SCISSOR_CNTL                0x26f0
 #      define RADEON_EXCLUSIVE_SCISSOR_0       (1 << 24)
 #      define RADEON_EXCLUSIVE_SCISSOR_1       (1 << 25)
@@ -651,6 +649,8 @@ extern int r300_do_cp_cmdbuf(drm_device_t * dev, DRMFILE filp,
 
 #define RADEON_WAIT_UNTIL              0x1720
 #      define RADEON_WAIT_CRTC_PFLIP           (1 << 0)
+#      define RADEON_WAIT_2D_IDLE              (1 << 14)
+#      define RADEON_WAIT_3D_IDLE              (1 << 15)
 #      define RADEON_WAIT_2D_IDLECLEAN         (1 << 16)
 #      define RADEON_WAIT_3D_IDLECLEAN         (1 << 17)
 #      define RADEON_WAIT_HOST_IDLECLEAN       (1 << 18)
@@ -1105,7 +1105,6 @@ do {                                                                      \
                write = 0;                                      \
                _tab += _i;                                     \
        }                                                       \
-                                                               \
        while (_size > 0) {                                     \
                *(ring + write) = *_tab++;                      \
                write++;                                        \
index 231ac1438c699c4a6323b108af1e8c9f160bb51c..7bc27516d42550d7cde72a37a24227391f59ae26 100644 (file)
@@ -1,5 +1,5 @@
-/* radeon_state.c -- State support for Radeon -*- linux-c -*-
- *
+/* radeon_state.c -- State support for Radeon -*- linux-c -*- */
+/*
  * Copyright 2000 VA Linux Systems, Inc., Fremont, California.
  * All Rights Reserved.
  *
@@ -72,10 +72,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
 
        case RADEON_EMIT_PP_MISC:
                if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
-                                                 &data[(RADEON_RB3D_DEPTHOFFSET
-                                                        -
-                                                        RADEON_PP_MISC) /
-                                                       4])) {
+                   &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
                        DRM_ERROR("Invalid depth buffer offset\n");
                        return DRM_ERR(EINVAL);
                }
@@ -83,10 +80,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
 
        case RADEON_EMIT_PP_CNTL:
                if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
-                                                 &data[(RADEON_RB3D_COLOROFFSET
-                                                        -
-                                                        RADEON_PP_CNTL) /
-                                                       4])) {
+                   &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
                        DRM_ERROR("Invalid colour buffer offset\n");
                        return DRM_ERR(EINVAL);
                }
@@ -109,10 +103,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
        case RADEON_EMIT_PP_TXFILTER_1:
        case RADEON_EMIT_PP_TXFILTER_2:
                if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
-                                                 &data[(RADEON_PP_TXOFFSET_0
-                                                        -
-                                                        RADEON_PP_TXFILTER_0) /
-                                                       4])) {
+                   &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
                        DRM_ERROR("Invalid R100 texture offset\n");
                        return DRM_ERR(EINVAL);
                }
@@ -126,8 +117,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
        case R200_EMIT_PP_CUBIC_OFFSETS_5:{
                        int i;
                        for (i = 0; i < 5; i++) {
-                               if (radeon_check_and_fixup_offset
-                                   (dev_priv, filp_priv, &data[i])) {
+                               if (radeon_check_and_fixup_offset(dev_priv,
+                                                                 filp_priv,
+                                                                 &data[i])) {
                                        DRM_ERROR
                                            ("Invalid R200 cubic texture offset\n");
                                        return DRM_ERR(EINVAL);
@@ -239,8 +231,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
 
 static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
                                                     dev_priv,
-                                                    drm_file_t * filp_priv,
-                                                    drm_radeon_kcmd_buffer_t *cmdbuf,
+                                                    drm_file_t *filp_priv,
+                                                    drm_radeon_kcmd_buffer_t *
+                                                    cmdbuf,
                                                     unsigned int *cmdsz)
 {
        u32 *cmd = (u32 *) cmdbuf->buf;
@@ -555,7 +548,8 @@ static struct {
        {R200_PP_TXOFFSET_4, 1, "R200_PP_TXOFFSET_4"},
        {R200_PP_TXOFFSET_5, 1, "R200_PP_TXOFFSET_5"},
        {R200_SE_VTE_CNTL, 1, "R200_SE_VTE_CNTL"},
-       {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1, "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
+       {R200_SE_TCL_OUTPUT_VTX_COMP_SEL, 1,
+        "R200_SE_TCL_OUTPUT_VTX_COMP_SEL"},
        {R200_PP_TAM_DEBUG3, 1, "R200_PP_TAM_DEBUG3"},
        {R200_PP_CNTL_X, 1, "R200_PP_CNTL_X"},
        {R200_RB3D_DEPTHXY_OFFSET, 1, "R200_RB3D_DEPTHXY_OFFSET"},
@@ -569,7 +563,7 @@ static struct {
        {R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0, 4,
                    "R200_SE_TCL_INPUT_VTX_VECTOR_ADDR_0"},
        {R200_PP_CUBIC_FACES_0, 1, "R200_PP_CUBIC_FACES_0"},    /* 61 */
-       {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"},    /* 62 */
+       {R200_PP_CUBIC_OFFSET_F1_0, 5, "R200_PP_CUBIC_OFFSET_F1_0"}, /* 62 */
        {R200_PP_CUBIC_FACES_1, 1, "R200_PP_CUBIC_FACES_1"},
        {R200_PP_CUBIC_OFFSET_F1_1, 5, "R200_PP_CUBIC_OFFSET_F1_1"},
        {R200_PP_CUBIC_FACES_2, 1, "R200_PP_CUBIC_FACES_2"},
@@ -592,7 +586,7 @@ static struct {
        {RADEON_PP_CUBIC_FACES_2, 1, "RADEON_PP_CUBIC_FACES_2"},
        {RADEON_PP_CUBIC_OFFSET_T2_0, 5, "RADEON_PP_CUBIC_OFFSET_T2_0"},
        {R200_PP_TRI_PERF, 2, "R200_PP_TRI_PERF"},
-       {R200_PP_AFS_0, 32, "R200_PP_AFS_0"},   /* 85 */
+       {R200_PP_AFS_0, 32, "R200_PP_AFS_0"},     /* 85 */
        {R200_PP_AFS_1, 32, "R200_PP_AFS_1"},
        {R200_PP_TFACTOR_0, 8, "R200_ATF_TFACTOR"},
        {R200_PP_TXFILTER_0, 8, "R200_PP_TXCTLALL_0"},
@@ -985,8 +979,8 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
         * rendering a quad into just those buffers.  Thus, we have to
         * make sure the 3D engine is configured correctly.
         */
-       if ((dev_priv->microcode_version == UCODE_R200) &&
-           (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
+       else if ((dev_priv->microcode_version == UCODE_R200) &&
+               (flags & (RADEON_DEPTH | RADEON_STENCIL))) {
 
                int tempPP_CNTL;
                int tempRE_CNTL;
@@ -1637,6 +1631,14 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
                    (u32 *) ((char *)dev->agp_buffer_map->handle + buf->offset);
                dwords = size / 4;
 
+#define RADEON_COPY_MT(_buf, _data, _width) \
+       do { \
+               if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
+                       DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
+                       return DRM_ERR(EFAULT); \
+               } \
+       } while(0)
+
                if (microtile) {
                        /* texture micro tiling in use, minimum texture width is thus 16 bytes.
                           however, we cannot use blitter directly for texture width < 64 bytes,
@@ -1648,46 +1650,19 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
                           from user space. */
                        if (tex->height == 1) {
                                if (tex_width >= 64 || tex_width <= 16) {
-                                       if (DRM_COPY_FROM_USER(buffer, data,
-                                                              tex_width *
-                                                              sizeof(u32))) {
-                                               DRM_ERROR
-                                                   ("EFAULT on pad, %d bytes\n",
-                                                    tex_width);
-                                               return DRM_ERR(EFAULT);
-                                       }
+                                       RADEON_COPY_MT(buffer, data,
+                                               (int)(tex_width * sizeof(u32)));
                                } else if (tex_width == 32) {
-                                       if (DRM_COPY_FROM_USER
-                                           (buffer, data, 16)) {
-                                               DRM_ERROR
-                                                   ("EFAULT on pad, %d bytes\n",
-                                                    tex_width);
-                                               return DRM_ERR(EFAULT);
-                                       }
-                                       if (DRM_COPY_FROM_USER
-                                           (buffer + 8, data + 16, 16)) {
-                                               DRM_ERROR
-                                                   ("EFAULT on pad, %d bytes\n",
-                                                    tex_width);
-                                               return DRM_ERR(EFAULT);
-                                       }
+                                       RADEON_COPY_MT(buffer, data, 16);
+                                       RADEON_COPY_MT(buffer + 8,
+                                                      data + 16, 16);
                                }
                        } else if (tex_width >= 64 || tex_width == 16) {
-                               if (DRM_COPY_FROM_USER(buffer, data,
-                                                      dwords * sizeof(u32))) {
-                                       DRM_ERROR("EFAULT on data, %d dwords\n",
-                                                 dwords);
-                                       return DRM_ERR(EFAULT);
-                               }
+                               RADEON_COPY_MT(buffer, data,
+                                              (int)(dwords * sizeof(u32)));
                        } else if (tex_width < 16) {
                                for (i = 0; i < tex->height; i++) {
-                                       if (DRM_COPY_FROM_USER
-                                           (buffer, data, tex_width)) {
-                                               DRM_ERROR
-                                                   ("EFAULT on pad, %d bytes\n",
-                                                    tex_width);
-                                               return DRM_ERR(EFAULT);
-                                       }
+                                       RADEON_COPY_MT(buffer, data, tex_width);
                                        buffer += 4;
                                        data += tex_width;
                                }
@@ -1695,37 +1670,13 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
                                /* TODO: make sure this works when not fitting in one buffer
                                   (i.e. 32bytes x 2048...) */
                                for (i = 0; i < tex->height; i += 2) {
-                                       if (DRM_COPY_FROM_USER
-                                           (buffer, data, 16)) {
-                                               DRM_ERROR
-                                                   ("EFAULT on pad, %d bytes\n",
-                                                    tex_width);
-                                               return DRM_ERR(EFAULT);
-                                       }
+                                       RADEON_COPY_MT(buffer, data, 16);
                                        data += 16;
-                                       if (DRM_COPY_FROM_USER
-                                           (buffer + 8, data, 16)) {
-                                               DRM_ERROR
-                                                   ("EFAULT on pad, %d bytes\n",
-                                                    tex_width);
-                                               return DRM_ERR(EFAULT);
-                                       }
+                                       RADEON_COPY_MT(buffer + 8, data, 16);
                                        data += 16;
-                                       if (DRM_COPY_FROM_USER
-                                           (buffer + 4, data, 16)) {
-                                               DRM_ERROR
-                                                   ("EFAULT on pad, %d bytes\n",
-                                                    tex_width);
-                                               return DRM_ERR(EFAULT);
-                                       }
+                                       RADEON_COPY_MT(buffer + 4, data, 16);
                                        data += 16;
-                                       if (DRM_COPY_FROM_USER
-                                           (buffer + 12, data, 16)) {
-                                               DRM_ERROR
-                                                   ("EFAULT on pad, %d bytes\n",
-                                                    tex_width);
-                                               return DRM_ERR(EFAULT);
-                                       }
+                                       RADEON_COPY_MT(buffer + 12, data, 16);
                                        data += 16;
                                        buffer += 16;
                                }
@@ -1735,31 +1686,22 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
                                /* Texture image width is larger than the minimum, so we
                                 * can upload it directly.
                                 */
-                               if (DRM_COPY_FROM_USER(buffer, data,
-                                                      dwords * sizeof(u32))) {
-                                       DRM_ERROR("EFAULT on data, %d dwords\n",
-                                                 dwords);
-                                       return DRM_ERR(EFAULT);
-                               }
+                               RADEON_COPY_MT(buffer, data,
+                                              (int)(dwords * sizeof(u32)));
                        } else {
                                /* Texture image width is less than the minimum, so we
                                 * need to pad out each image scanline to the minimum
                                 * width.
                                 */
                                for (i = 0; i < tex->height; i++) {
-                                       if (DRM_COPY_FROM_USER
-                                           (buffer, data, tex_width)) {
-                                               DRM_ERROR
-                                                   ("EFAULT on pad, %d bytes\n",
-                                                    tex_width);
-                                               return DRM_ERR(EFAULT);
-                                       }
+                                       RADEON_COPY_MT(buffer, data, tex_width);
                                        buffer += 8;
                                        data += tex_width;
                                }
                        }
                }
 
+#undef RADEON_COPY_MT
                buf->filp = filp;
                buf->used = size;
                offset = dev_priv->gart_buffers_offset + buf->offset;
@@ -1821,7 +1763,7 @@ static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
 }
 
 static void radeon_apply_surface_regs(int surf_index,
-                                     drm_radeon_private_t * dev_priv)
+                                     drm_radeon_private_t *dev_priv)
 {
        if (!dev_priv->mmio)
                return;
@@ -1847,8 +1789,8 @@ static void radeon_apply_surface_regs(int surf_index,
  * freed, we suddenly need two surfaces to store A and C, which might
  * not always be available.
  */
-static int alloc_surface(drm_radeon_surface_alloc_t * new,
-                        drm_radeon_private_t * dev_priv, DRMFILE filp)
+static int alloc_surface(drm_radeon_surface_alloc_t *new,
+                        drm_radeon_private_t *dev_priv, DRMFILE filp)
 {
        struct radeon_virt_surface *s;
        int i;
@@ -2158,6 +2100,11 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
 
        LOCK_TEST_WITH_RETURN(dev, filp);
 
+       if (!dev_priv) {
+               DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+               return DRM_ERR(EINVAL);
+       }
+
        DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
 
        DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
@@ -2596,9 +2543,9 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
        return 0;
 }
 
-static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv,
+static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
                                          drm_radeon_cmd_header_t header,
-                                         drm_radeon_kcmd_buffer_t * cmdbuf)
+                                         drm_radeon_kcmd_buffer_t *cmdbuf)
 {
        int sz = header.scalars.count;
        int start = header.scalars.offset;
@@ -2618,9 +2565,9 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t * dev_priv,
 
 /* God this is ugly
  */
-static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv,
+static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
                                           drm_radeon_cmd_header_t header,
-                                          drm_radeon_kcmd_buffer_t * cmdbuf)
+                                          drm_radeon_kcmd_buffer_t *cmdbuf)
 {
        int sz = header.scalars.count;
        int start = ((unsigned int)header.scalars.offset) + 0x100;
@@ -2638,9 +2585,9 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t * dev_priv,
        return 0;
 }
 
-static __inline__ int radeon_emit_vectors(drm_radeon_private_t * dev_priv,
+static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
                                          drm_radeon_cmd_header_t header,
-                                         drm_radeon_kcmd_buffer_t * cmdbuf)
+                                         drm_radeon_kcmd_buffer_t *cmdbuf)
 {
        int sz = header.vectors.count;
        int start = header.vectors.offset;
@@ -2685,8 +2632,8 @@ static int radeon_emit_packet3(drm_device_t * dev,
        return 0;
 }
 
-static int radeon_emit_packet3_cliprect(drm_device_t * dev,
-                                       drm_file_t * filp_priv,
+static int radeon_emit_packet3_cliprect(drm_device_t *dev,
+                                       drm_file_t *filp_priv,
                                        drm_radeon_kcmd_buffer_t *cmdbuf,
                                        int orig_nbox)
 {
@@ -2818,7 +2765,8 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
                kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
                if (kbuf == NULL)
                        return DRM_ERR(ENOMEM);
-               if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf, cmdbuf.bufsz)) {
+               if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf,
+                                      cmdbuf.bufsz)) {
                        drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
                        return DRM_ERR(EFAULT);
                }
@@ -2981,7 +2929,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
                value = dev_priv->gart_vm_start;
                break;
        case RADEON_PARAM_REGISTER_HANDLE:
-               value = dev_priv->mmio_offset;
+               value = dev_priv->mmio->offset;
                break;
        case RADEON_PARAM_STATUS_HANDLE:
                value = dev_priv->ring_rptr_offset;
@@ -3004,6 +2952,15 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
        case RADEON_PARAM_GART_TEX_HANDLE:
                value = dev_priv->gart_textures_offset;
                break;
+       
+       case RADEON_PARAM_CARD_TYPE:
+               if (dev_priv->flags & CHIP_IS_PCIE)
+                       value = RADEON_CARD_PCIE;
+               else if (dev_priv->flags & CHIP_IS_AGP)
+                       value = RADEON_CARD_AGP;
+               else
+                       value = RADEON_CARD_PCI;
+               break;
        default:
                return DRM_ERR(EINVAL);
        }
@@ -3066,10 +3023,11 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
 /* When a client dies:
  *    - Check for and clean up flipped page state
  *    - Free any alloced GART memory.
+ *    - Free any alloced radeon surfaces.
  *
  * DRM infrastructure takes care of reclaiming dma buffers.
  */
-void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp)
+void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
 {
        if (dev->dev_private) {
                drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -3082,16 +3040,17 @@ void radeon_driver_prerelease(drm_device_t * dev, DRMFILE filp)
        }
 }
 
-void radeon_driver_pretakedown(drm_device_t * dev)
+void radeon_driver_lastclose(drm_device_t * dev)
 {
        radeon_do_release(dev);
 }
 
-int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv)
+int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
 {
        drm_radeon_private_t *dev_priv = dev->dev_private;
        struct drm_radeon_driver_file_fields *radeon_priv;
 
+       DRM_DEBUG("\n");
        radeon_priv =
            (struct drm_radeon_driver_file_fields *)
            drm_alloc(sizeof(*radeon_priv), DRM_MEM_FILES);
@@ -3100,6 +3059,7 @@ int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv)
                return -ENOMEM;
 
        filp_priv->driver_priv = radeon_priv;
+
        if (dev_priv)
                radeon_priv->radeon_fb_delta = dev_priv->fb_location;
        else
@@ -3107,7 +3067,7 @@ int radeon_driver_open_helper(drm_device_t * dev, drm_file_t * filp_priv)
        return 0;
 }
 
-void radeon_driver_free_filp_priv(drm_device_t * dev, drm_file_t * filp_priv)
+void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv)
 {
        struct drm_radeon_driver_file_fields *radeon_priv =
            filp_priv->driver_priv;
@@ -3116,33 +3076,33 @@ void radeon_driver_free_filp_priv(drm_device_t * dev, drm_file_t * filp_priv)
 }
 
 drm_ioctl_desc_t radeon_ioctls[] = {
-       [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, 1, 1},
-       [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, 1, 1},
-       [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, 1, 1},
-       [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, 1, 1},
-       [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, 1, 1},
-       [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, 1, 1},
-       [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, 1, 0},
-       [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, 1, 0}
+       [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, DRM_AUTH}
 };
 
 int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
index 6d10515795cc91f1dc7564d0060a02a7afa78b9c..0d426deeefec1992681f43259ea0c55aae4b0cdd 100644 (file)
@@ -533,16 +533,32 @@ static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
        dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
 }
 
+int savage_driver_load(drm_device_t *dev, unsigned long chipset)
+{
+       drm_savage_private_t *dev_priv;
+
+       dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
+       if (dev_priv == NULL)
+               return DRM_ERR(ENOMEM);
+
+       memset(dev_priv, 0, sizeof(drm_savage_private_t));
+       dev->dev_private = (void *)dev_priv;
+
+       dev_priv->chipset = (enum savage_family)chipset;
+
+       return 0;
+}
+
+
 /*
  * Initalize mappings. On Savage4 and SavageIX the alignment
  * and size of the aperture is not suitable for automatic MTRR setup
- * in drm_addmap. Therefore we do it manually before the maps are
- * initialized. We also need to take care of deleting the MTRRs in
- * postcleanup.
+ * in drm_addmap. Therefore we add them manually before the maps are
+ * initialized, and tear them down on last close.
  */
-int savage_preinit(drm_device_t * dev, unsigned long chipset)
+int savage_driver_firstopen(drm_device_t *dev)
 {
-       drm_savage_private_t *dev_priv;
+       drm_savage_private_t *dev_priv = dev->dev_private;
        unsigned long mmio_base, fb_base, fb_size, aperture_base;
        /* fb_rsrc and aper_rsrc aren't really used currently, but still exist
         * in case we decide we need information on the BAR for BSD in the
@@ -551,14 +567,6 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
        unsigned int fb_rsrc, aper_rsrc;
        int ret = 0;
 
-       dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
-       if (dev_priv == NULL)
-               return DRM_ERR(ENOMEM);
-
-       memset(dev_priv, 0, sizeof(drm_savage_private_t));
-       dev->dev_private = (void *)dev_priv;
-       dev_priv->chipset = (enum savage_family)chipset;
-
        dev_priv->mtrr[0].handle = -1;
        dev_priv->mtrr[1].handle = -1;
        dev_priv->mtrr[2].handle = -1;
@@ -576,26 +584,24 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
                        dev_priv->mtrr[0].base = fb_base;
                        dev_priv->mtrr[0].size = 0x01000000;
                        dev_priv->mtrr[0].handle =
-                           mtrr_add(dev_priv->mtrr[0].base,
-                                    dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB,
-                                    1);
+                           drm_mtrr_add(dev_priv->mtrr[0].base,
+                                        dev_priv->mtrr[0].size, DRM_MTRR_WC);
                        dev_priv->mtrr[1].base = fb_base + 0x02000000;
                        dev_priv->mtrr[1].size = 0x02000000;
                        dev_priv->mtrr[1].handle =
-                           mtrr_add(dev_priv->mtrr[1].base,
-                                    dev_priv->mtrr[1].size, MTRR_TYPE_WRCOMB,
-                                    1);
+                           drm_mtrr_add(dev_priv->mtrr[1].base,
+                                        dev_priv->mtrr[1].size, DRM_MTRR_WC);
                        dev_priv->mtrr[2].base = fb_base + 0x04000000;
                        dev_priv->mtrr[2].size = 0x04000000;
                        dev_priv->mtrr[2].handle =
-                           mtrr_add(dev_priv->mtrr[2].base,
-                                    dev_priv->mtrr[2].size, MTRR_TYPE_WRCOMB,
-                                    1);
+                           drm_mtrr_add(dev_priv->mtrr[2].base,
+                                        dev_priv->mtrr[2].size, DRM_MTRR_WC);
                } else {
                        DRM_ERROR("strange pci_resource_len %08lx\n",
                                  drm_get_resource_len(dev, 0));
                }
-       } else if (chipset != S3_SUPERSAVAGE && chipset != S3_SAVAGE2000) {
+       } else if (dev_priv->chipset != S3_SUPERSAVAGE &&
+                  dev_priv->chipset != S3_SAVAGE2000) {
                mmio_base = drm_get_resource_start(dev, 0);
                fb_rsrc = 1;
                fb_base = drm_get_resource_start(dev, 1);
@@ -609,9 +615,8 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
                        dev_priv->mtrr[0].base = fb_base;
                        dev_priv->mtrr[0].size = 0x08000000;
                        dev_priv->mtrr[0].handle =
-                           mtrr_add(dev_priv->mtrr[0].base,
-                                    dev_priv->mtrr[0].size, MTRR_TYPE_WRCOMB,
-                                    1);
+                           drm_mtrr_add(dev_priv->mtrr[0].base,
+                                        dev_priv->mtrr[0].size, DRM_MTRR_WC);
                } else {
                        DRM_ERROR("strange pci_resource_len %08lx\n",
                                  drm_get_resource_len(dev, 1));
@@ -648,16 +653,21 @@ int savage_preinit(drm_device_t * dev, unsigned long chipset)
 /*
  * Delete MTRRs and free device-private data.
  */
-int savage_postcleanup(drm_device_t * dev)
+void savage_driver_lastclose(drm_device_t *dev)
 {
        drm_savage_private_t *dev_priv = dev->dev_private;
        int i;
 
        for (i = 0; i < 3; ++i)
                if (dev_priv->mtrr[i].handle >= 0)
-                       mtrr_del(dev_priv->mtrr[i].handle,
+                       drm_mtrr_del(dev_priv->mtrr[i].handle,
                                 dev_priv->mtrr[i].base,
-                                dev_priv->mtrr[i].size);
+                                dev_priv->mtrr[i].size, DRM_MTRR_WC);
+}
+
+int savage_driver_unload(drm_device_t *dev)
+{
+       drm_savage_private_t *dev_priv = dev->dev_private;
 
        drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
 
@@ -994,8 +1004,7 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
  * DMA buffer management
  */
 
-static int savage_bci_get_buffers(DRMFILE filp, drm_device_t * dev,
-                                 drm_dma_t * d)
+static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
 {
        drm_buf_t *buf;
        int i;
@@ -1057,7 +1066,7 @@ int savage_bci_buffers(DRM_IOCTL_ARGS)
        return ret;
 }
 
-void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp)
+void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
 {
        drm_device_dma_t *dma = dev->dma;
        drm_savage_private_t *dev_priv = dev->dev_private;
@@ -1090,10 +1099,10 @@ void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp)
 }
 
 drm_ioctl_desc_t savage_ioctls[] = {
-       [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1},
-       [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0},
-       [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0},
-       [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0},
+       [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, DRM_AUTH},
 };
 
 int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
index 22d799cde41cb3fd3e1af3a3e6aaea41be6298a5..aa6c0d1a82f87caec7ae419ccee64cf51261634b 100644 (file)
 
 #include "drm_pciids.h"
 
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
-       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
-                DRIVER_NAME,
-                DRIVER_MAJOR,
-                DRIVER_MINOR,
-                DRIVER_PATCHLEVEL,
-                DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
-           );
-       return 0;
-}
-
-static int version(drm_version_t * version)
-{
-       int len;
-
-       version->version_major = DRIVER_MAJOR;
-       version->version_minor = DRIVER_MINOR;
-       version->version_patchlevel = DRIVER_PATCHLEVEL;
-       DRM_COPY(version->name, DRIVER_NAME);
-       DRM_COPY(version->date, DRIVER_DATE);
-       DRM_COPY(version->desc, DRIVER_DESC);
-       return 0;
-}
-
 static struct pci_device_id pciidlist[] = {
        savage_PCI_IDS
 };
@@ -63,13 +38,13 @@ static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
        .dev_priv_size = sizeof(drm_savage_buf_priv_t),
-       .preinit = savage_preinit,
-       .postinit = postinit,
-       .postcleanup = savage_postcleanup,
+       .load = savage_driver_load,
+       .firstopen = savage_driver_firstopen,
+       .lastclose = savage_driver_lastclose,
+       .unload = savage_driver_unload,
        .reclaim_buffers = savage_reclaim_buffers,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
-       .version = version,
        .ioctls = savage_ioctls,
        .dma_ioctl = savage_bci_buffers,
        .fops = {
@@ -80,12 +55,19 @@ static struct drm_driver driver = {
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
-                }
-       ,
+       },
+
        .pci_driver = {
-                      .name = DRIVER_NAME,
-                      .id_table = pciidlist,
-                      }
+                .name = DRIVER_NAME,
+                .id_table = pciidlist,
+       },
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
 };
 
 static int __init savage_init(void)
index a4b0fa998a95614c4a88087172eea68b8d1f8b5c..dd46cb85439c846d03a46ae019f721948d87857a 100644 (file)
@@ -1,5 +1,5 @@
-/* savage_drv.h -- Private header for the savage driver
- *
+/* savage_drv.h -- Private header for the savage driver */
+/*
  * Copyright 2004  Felix Kuehling
  * All Rights Reserved.
  *
@@ -192,7 +192,7 @@ typedef struct drm_savage_private {
        /* Err, there is a macro wait_event in include/linux/wait.h.
         * Avoid unwanted macro expansion. */
        void (*emit_clip_rect) (struct drm_savage_private * dev_priv,
-                               drm_clip_rect_t * pbox);
+                               const drm_clip_rect_t * pbox);
        void (*dma_flush) (struct drm_savage_private * dev_priv);
 } drm_savage_private_t;
 
@@ -208,16 +208,18 @@ extern void savage_dma_reset(drm_savage_private_t * dev_priv);
 extern void savage_dma_wait(drm_savage_private_t * dev_priv, unsigned int page);
 extern uint32_t *savage_dma_alloc(drm_savage_private_t * dev_priv,
                                  unsigned int n);
-extern int savage_preinit(drm_device_t * dev, unsigned long chipset);
-extern int savage_postcleanup(drm_device_t * dev);
+extern int savage_driver_load(drm_device_t *dev, unsigned long chipset);
+extern int savage_driver_firstopen(drm_device_t *dev);
+extern void savage_driver_lastclose(drm_device_t *dev);
+extern int savage_driver_unload(drm_device_t *dev);
 extern int savage_do_cleanup_bci(drm_device_t * dev);
 extern void savage_reclaim_buffers(drm_device_t * dev, DRMFILE filp);
 
 /* state functions */
 extern void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
-                                     drm_clip_rect_t * pbox);
+                                     const drm_clip_rect_t * pbox);
 extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
-                                    drm_clip_rect_t * pbox);
+                                    const drm_clip_rect_t * pbox);
 
 #define SAVAGE_FB_SIZE_S3      0x01000000      /*  16MB */
 #define SAVAGE_FB_SIZE_S4      0x02000000      /*  32MB */
@@ -500,15 +502,6 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
 
 #define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
 
-#define BCI_COPY_FROM_USER(src,n) do {                         \
-    unsigned int i;                                            \
-    for (i = 0; i < n; ++i) {                                  \
-       uint32_t val;                                           \
-       DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]);    \
-       BCI_WRITE(val);                                         \
-    }                                                          \
-} while(0)
-
 /*
  * command DMA support
  */
@@ -534,8 +527,8 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
 
 #define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
 
-#define DMA_COPY_FROM_USER(src,n) do {                         \
-       DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4);    \
+#define DMA_COPY(src, n) do {                                  \
+       memcpy(dma_ptr, (src), (n)*4);                          \
        dma_ptr += n;                                           \
 } while(0)
 
index e87a5d59b99c4559ce3b6d7a62a4bb7976fa5de4..ef2581d16146de03a6b9e717b9ba701490918038 100644 (file)
@@ -27,7 +27,7 @@
 #include "savage_drv.h"
 
 void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
-                              drm_clip_rect_t * pbox)
+                              const drm_clip_rect_t * pbox)
 {
        uint32_t scstart = dev_priv->state.s3d.new_scstart;
        uint32_t scend = dev_priv->state.s3d.new_scend;
@@ -53,7 +53,7 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t * dev_priv,
 }
 
 void savage_emit_clip_rect_s4(drm_savage_private_t * dev_priv,
-                             drm_clip_rect_t * pbox)
+                             const drm_clip_rect_t * pbox)
 {
        uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
        uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
@@ -115,18 +115,19 @@ static int savage_verify_texaddr(drm_savage_private_t * dev_priv, int unit,
 
 #define SAVE_STATE(reg,where)                  \
        if(start <= reg && start+count > reg)   \
-               DRM_GET_USER_UNCHECKED(dev_priv->state.where, &regs[reg-start])
+               dev_priv->state.where = regs[reg - start]
 #define SAVE_STATE_MASK(reg,where,mask) do {                   \
        if(start <= reg && start+count > reg) {                 \
                uint32_t tmp;                                   \
-               DRM_GET_USER_UNCHECKED(tmp, &regs[reg-start]);  \
+               tmp = regs[reg - start];                        \
                dev_priv->state.where = (tmp & (mask)) |        \
                        (dev_priv->state.where & ~(mask));      \
        }                                                       \
 } while (0)
+
 static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
                                   unsigned int start, unsigned int count,
-                                  const uint32_t __user * regs)
+                                  const uint32_t *regs)
 {
        if (start < SAVAGE_TEXPALADDR_S3D ||
            start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
@@ -148,8 +149,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
                SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
                if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
                        return savage_verify_texaddr(dev_priv, 0,
-                                                    dev_priv->state.s3d.
-                                                    texaddr);
+                                               dev_priv->state.s3d.texaddr);
        }
 
        return 0;
@@ -157,7 +157,7 @@ static int savage_verify_state_s3d(drm_savage_private_t * dev_priv,
 
 static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
                                  unsigned int start, unsigned int count,
-                                 const uint32_t __user * regs)
+                                 const uint32_t *regs)
 {
        int ret = 0;
 
@@ -174,19 +174,18 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
                        ~SAVAGE_SCISSOR_MASK_S4);
 
        /* if any texture regs were changed ... */
-       if (start <= SAVAGE_TEXDESCR_S4 && start + count > SAVAGE_TEXPALADDR_S4) {
+       if (start <= SAVAGE_TEXDESCR_S4 &&
+           start + count > SAVAGE_TEXPALADDR_S4) {
                /* ... check texture state */
                SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
                SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
                SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
                if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
-                       ret |=
-                           savage_verify_texaddr(dev_priv, 0,
-                                                 dev_priv->state.s4.texaddr0);
+                       ret |= savage_verify_texaddr(dev_priv, 0,
+                                               dev_priv->state.s4.texaddr0);
                if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
-                       ret |=
-                           savage_verify_texaddr(dev_priv, 1,
-                                                 dev_priv->state.s4.texaddr1);
+                       ret |= savage_verify_texaddr(dev_priv, 1,
+                                               dev_priv->state.s4.texaddr1);
        }
 
        return ret;
@@ -197,7 +196,7 @@ static int savage_verify_state_s4(drm_savage_private_t * dev_priv,
 
 static int savage_dispatch_state(drm_savage_private_t * dev_priv,
                                 const drm_savage_cmd_header_t * cmd_header,
-                                const uint32_t __user * regs)
+                                const uint32_t *regs)
 {
        unsigned int count = cmd_header->state.count;
        unsigned int start = cmd_header->state.start;
@@ -209,9 +208,6 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
        if (!count)
                return 0;
 
-       if (DRM_VERIFYAREA_READ(regs, count * 4))
-               return DRM_ERR(EFAULT);
-
        if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
                ret = savage_verify_state_s3d(dev_priv, start, count, regs);
                if (ret != 0)
@@ -236,8 +232,8 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
                /* scissor regs are emitted in savage_dispatch_draw */
                if (start < SAVAGE_DRAWCTRL0_S4) {
                        if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)
-                               count2 =
-                                   count - (SAVAGE_DRAWCTRL1_S4 + 1 - start);
+                               count2 = count -
+                                        (SAVAGE_DRAWCTRL1_S4 + 1 - start);
                        if (start + count > SAVAGE_DRAWCTRL0_S4)
                                count = SAVAGE_DRAWCTRL0_S4 - start;
                } else if (start <= SAVAGE_DRAWCTRL1_S4) {
@@ -263,7 +259,7 @@ static int savage_dispatch_state(drm_savage_private_t * dev_priv,
                while (count > 0) {
                        unsigned int n = count < 255 ? count : 255;
                        DMA_SET_REGISTERS(start, n);
-                       DMA_COPY_FROM_USER(regs, n);
+                       DMA_COPY(regs, n);
                        count -= n;
                        start += n;
                        regs += n;
@@ -421,8 +417,8 @@ static int savage_dispatch_dma_prim(drm_savage_private_t * dev_priv,
 
 static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
                                   const drm_savage_cmd_header_t * cmd_header,
-                                  const uint32_t __user * vtxbuf,
-                                  unsigned int vb_size, unsigned int vb_stride)
+                                  const uint32_t *vtxbuf, unsigned int vb_size,
+                                  unsigned int vb_stride)
 {
        unsigned char reorder = 0;
        unsigned int prim = cmd_header->prim.prim;
@@ -507,8 +503,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
 
                        for (i = start; i < start + count; ++i) {
                                unsigned int j = i + reorder[i % 3];
-                               DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j],
-                                                  vtx_size);
+                               DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
                        }
 
                        DMA_COMMIT();
@@ -517,13 +512,12 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
                        DMA_DRAW_PRIMITIVE(count, prim, skip);
 
                        if (vb_stride == vtx_size) {
-                               DMA_COPY_FROM_USER(&vtxbuf[vb_stride * start],
-                                                  vtx_size * count);
+                               DMA_COPY(&vtxbuf[vb_stride * start], 
+                                        vtx_size * count);
                        } else {
                                for (i = start; i < start + count; ++i) {
-                                       DMA_COPY_FROM_USER(&vtxbuf
-                                                          [vb_stride * i],
-                                                          vtx_size);
+                                       DMA_COPY(&vtxbuf [vb_stride * i],
+                                                vtx_size);
                                }
                        }
 
@@ -541,7 +535,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t * dev_priv,
 
 static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
                                   const drm_savage_cmd_header_t * cmd_header,
-                                  const uint16_t __user * usr_idx,
+                                  const uint16_t *idx,
                                   const drm_buf_t * dmabuf)
 {
        unsigned char reorder = 0;
@@ -628,11 +622,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
        while (n != 0) {
                /* Can emit up to 255 indices (85 triangles) at once. */
                unsigned int count = n > 255 ? 255 : n;
-               /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
-               uint16_t idx[255];
 
-               /* Copy and check indices */
-               DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2);
+               /* check indices */
                for (i = 0; i < count; ++i) {
                        if (idx[i] > dmabuf->total / 32) {
                                DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
@@ -652,8 +643,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
 
                        for (i = 1; i + 1 < count; i += 2)
                                BCI_WRITE(idx[i + reorder[i % 3]] |
-                                         (idx[i + 1 + reorder[(i + 1) % 3]] <<
-                                          16));
+                                         (idx[i + 1 +
+                                          reorder[(i + 1) % 3]] << 16));
                        if (i < count)
                                BCI_WRITE(idx[i + reorder[i % 3]]);
                } else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
@@ -674,7 +665,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
                                BCI_WRITE(idx[i]);
                }
 
-               usr_idx += count;
+               idx += count;
                n -= count;
 
                prim |= BCI_CMD_DRAW_CONT;
@@ -685,8 +676,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t * dev_priv,
 
 static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
                                  const drm_savage_cmd_header_t * cmd_header,
-                                 const uint16_t __user * usr_idx,
-                                 const uint32_t __user * vtxbuf,
+                                 const uint16_t *idx,
+                                 const uint32_t *vtxbuf,
                                  unsigned int vb_size, unsigned int vb_stride)
 {
        unsigned char reorder = 0;
@@ -751,11 +742,8 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
        while (n != 0) {
                /* Can emit up to 255 vertices (85 triangles) at once. */
                unsigned int count = n > 255 ? 255 : n;
-               /* Is it ok to allocate 510 bytes on the stack in an ioctl? */
-               uint16_t idx[255];
-
-               /* Copy and check indices */
-               DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count * 2);
+               
+               /* Check indices */
                for (i = 0; i < count; ++i) {
                        if (idx[i] > vb_size / (vb_stride * 4)) {
                                DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
@@ -775,8 +763,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
 
                        for (i = 0; i < count; ++i) {
                                unsigned int j = idx[i + reorder[i % 3]];
-                               DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j],
-                                                  vtx_size);
+                               DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
                        }
 
                        DMA_COMMIT();
@@ -786,14 +773,13 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
 
                        for (i = 0; i < count; ++i) {
                                unsigned int j = idx[i];
-                               DMA_COPY_FROM_USER(&vtxbuf[vb_stride * j],
-                                                  vtx_size);
+                               DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);
                        }
 
                        DMA_COMMIT();
                }
 
-               usr_idx += count;
+               idx += count;
                n -= count;
 
                prim |= BCI_CMD_DRAW_CONT;
@@ -804,11 +790,11 @@ static int savage_dispatch_vb_idx(drm_savage_private_t * dev_priv,
 
 static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
                                 const drm_savage_cmd_header_t * cmd_header,
-                                const drm_savage_cmd_header_t __user * data,
+                                const drm_savage_cmd_header_t *data,
                                 unsigned int nbox,
-                                const drm_clip_rect_t __user * usr_boxes)
+                                const drm_clip_rect_t *boxes)
 {
-       unsigned int flags = cmd_header->clear0.flags, mask, value;
+       unsigned int flags = cmd_header->clear0.flags;
        unsigned int clear_cmd;
        unsigned int i, nbufs;
        DMA_LOCALS;
@@ -816,9 +802,6 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
        if (nbox == 0)
                return 0;
 
-       DRM_GET_USER_UNCHECKED(mask, &data->clear1.mask);
-       DRM_GET_USER_UNCHECKED(value, &data->clear1.value);
-
        clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
            BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
        BCI_CMD_SET_ROP(clear_cmd, 0xCC);
@@ -828,21 +811,19 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
        if (nbufs == 0)
                return 0;
 
-       if (mask != 0xffffffff) {
+       if (data->clear1.mask != 0xffffffff) {
                /* set mask */
                BEGIN_DMA(2);
                DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
-               DMA_WRITE(mask);
+               DMA_WRITE(data->clear1.mask);
                DMA_COMMIT();
        }
        for (i = 0; i < nbox; ++i) {
-               drm_clip_rect_t box;
                unsigned int x, y, w, h;
                unsigned int buf;
-               DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
-               x = box.x1, y = box.y1;
-               w = box.x2 - box.x1;
-               h = box.y2 - box.y1;
+               x = boxes[i].x1, y = boxes[i].y1;
+               w = boxes[i].x2 - boxes[i].x1;
+               h = boxes[i].y2 - boxes[i].y1;
                BEGIN_DMA(nbufs * 6);
                for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
                        if (!(flags & buf))
@@ -862,13 +843,13 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
                                DMA_WRITE(dev_priv->depth_bd);
                                break;
                        }
-                       DMA_WRITE(value);
+                       DMA_WRITE(data->clear1.value);
                        DMA_WRITE(BCI_X_Y(x, y));
                        DMA_WRITE(BCI_W_H(w, h));
                }
                DMA_COMMIT();
        }
-       if (mask != 0xffffffff) {
+       if (data->clear1.mask != 0xffffffff) {
                /* reset mask */
                BEGIN_DMA(2);
                DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
@@ -880,8 +861,7 @@ static int savage_dispatch_clear(drm_savage_private_t * dev_priv,
 }
 
 static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
-                               unsigned int nbox,
-                               const drm_clip_rect_t __user * usr_boxes)
+                               unsigned int nbox, const drm_clip_rect_t *boxes)
 {
        unsigned int swap_cmd;
        unsigned int i;
@@ -895,16 +875,14 @@ static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
        BCI_CMD_SET_ROP(swap_cmd, 0xCC);
 
        for (i = 0; i < nbox; ++i) {
-               drm_clip_rect_t box;
-               DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
-
                BEGIN_DMA(6);
                DMA_WRITE(swap_cmd);
                DMA_WRITE(dev_priv->back_offset);
                DMA_WRITE(dev_priv->back_bd);
-               DMA_WRITE(BCI_X_Y(box.x1, box.y1));
-               DMA_WRITE(BCI_X_Y(box.x1, box.y1));
-               DMA_WRITE(BCI_W_H(box.x2 - box.x1, box.y2 - box.y1));
+               DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
+               DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));
+               DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1,
+                                 boxes[i].y2 - boxes[i].y1));
                DMA_COMMIT();
        }
 
@@ -912,68 +890,52 @@ static int savage_dispatch_swap(drm_savage_private_t * dev_priv,
 }
 
 static int savage_dispatch_draw(drm_savage_private_t * dev_priv,
-                               const drm_savage_cmd_header_t __user * start,
-                               const drm_savage_cmd_header_t __user * end,
+                               const drm_savage_cmd_header_t *start,
+                               const drm_savage_cmd_header_t *end,
                                const drm_buf_t * dmabuf,
-                               const unsigned int __user * usr_vtxbuf,
+                               const unsigned int *vtxbuf,
                                unsigned int vb_size, unsigned int vb_stride,
                                unsigned int nbox,
-                               const drm_clip_rect_t __user * usr_boxes)
+                               const drm_clip_rect_t *boxes)
 {
        unsigned int i, j;
        int ret;
 
        for (i = 0; i < nbox; ++i) {
-               drm_clip_rect_t box;
-               const drm_savage_cmd_header_t __user *usr_cmdbuf;
-               DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
-               dev_priv->emit_clip_rect(dev_priv, &box);
+               const drm_savage_cmd_header_t *cmdbuf;
+               dev_priv->emit_clip_rect(dev_priv, &boxes[i]);
 
-               usr_cmdbuf = start;
-               while (usr_cmdbuf < end) {
+               cmdbuf = start;
+               while (cmdbuf < end) {
                        drm_savage_cmd_header_t cmd_header;
-                       DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
-                                                    sizeof(cmd_header));
-                       usr_cmdbuf++;
+                       cmd_header = *cmdbuf;
+                       cmdbuf++;
                        switch (cmd_header.cmd.cmd) {
                        case SAVAGE_CMD_DMA_PRIM:
-                               ret =
-                                   savage_dispatch_dma_prim(dev_priv,
-                                                            &cmd_header,
-                                                            dmabuf);
+                               ret = savage_dispatch_dma_prim(
+                                       dev_priv, &cmd_header, dmabuf);
                                break;
                        case SAVAGE_CMD_VB_PRIM:
-                               ret =
-                                   savage_dispatch_vb_prim(dev_priv,
-                                                           &cmd_header,
-                                                           (const uint32_t
-                                                            __user *)
-                                                           usr_vtxbuf, vb_size,
-                                                           vb_stride);
+                               ret = savage_dispatch_vb_prim(
+                                       dev_priv, &cmd_header,
+                                       vtxbuf, vb_size, vb_stride);
                                break;
                        case SAVAGE_CMD_DMA_IDX:
                                j = (cmd_header.idx.count + 3) / 4;
                                /* j was check in savage_bci_cmdbuf */
-                               ret =
-                                   savage_dispatch_dma_idx(dev_priv,
-                                                           &cmd_header,
-                                                           (const uint16_t
-                                                            __user *)
-                                                           usr_cmdbuf, dmabuf);
-                               usr_cmdbuf += j;
+                               ret = savage_dispatch_dma_idx(dev_priv,
+                                       &cmd_header, (const uint16_t *)cmdbuf,
+                                       dmabuf);
+                               cmdbuf += j;
                                break;
                        case SAVAGE_CMD_VB_IDX:
                                j = (cmd_header.idx.count + 3) / 4;
                                /* j was check in savage_bci_cmdbuf */
-                               ret =
-                                   savage_dispatch_vb_idx(dev_priv,
-                                                          &cmd_header,
-                                                          (const uint16_t
-                                                           __user *)usr_cmdbuf,
-                                                          (const uint32_t
-                                                           __user *)usr_vtxbuf,
-                                                          vb_size, vb_stride);
-                               usr_cmdbuf += j;
+                               ret = savage_dispatch_vb_idx(dev_priv,
+                                       &cmd_header, (const uint16_t *)cmdbuf,
+                                       (const uint32_t *)vtxbuf, vb_size, 
+                                       vb_stride);
+                               cmdbuf += j;
                                break;
                        default:
                                /* What's the best return code? EFAULT? */
@@ -998,10 +960,10 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
        drm_device_dma_t *dma = dev->dma;
        drm_buf_t *dmabuf;
        drm_savage_cmdbuf_t cmdbuf;
-       drm_savage_cmd_header_t __user *usr_cmdbuf;
-       drm_savage_cmd_header_t __user *first_draw_cmd;
-       unsigned int __user *usr_vtxbuf;
-       drm_clip_rect_t __user *usr_boxes;
+       drm_savage_cmd_header_t *kcmd_addr = NULL;
+       drm_savage_cmd_header_t *first_draw_cmd;
+       unsigned int *kvb_addr = NULL;
+       drm_clip_rect_t *kbox_addr = NULL;
        unsigned int i, j;
        int ret = 0;
 
@@ -1024,15 +986,53 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
                dmabuf = NULL;
        }
 
-       usr_cmdbuf = (drm_savage_cmd_header_t __user *) cmdbuf.cmd_addr;
-       usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr;
-       usr_boxes = (drm_clip_rect_t __user *) cmdbuf.box_addr;
-       if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size * 8)) ||
-           (cmdbuf.vb_size && DRM_VERIFYAREA_READ(usr_vtxbuf, cmdbuf.vb_size))
-           || (cmdbuf.nbox
-               && DRM_VERIFYAREA_READ(usr_boxes,
-                                      cmdbuf.nbox * sizeof(drm_clip_rect_t))))
-               return DRM_ERR(EFAULT);
+       /* Copy the user buffers into kernel temporary areas.  This hasn't been
+        * a performance loss compared to VERIFYAREA_READ/
+        * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
+        * for locking on FreeBSD.
+        */
+       if (cmdbuf.size) {
+               kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER);
+               if (kcmd_addr == NULL)
+                       return ENOMEM;
+
+               if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr,
+                                      cmdbuf.size * 8))
+               {
+                       drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
+                       return DRM_ERR(EFAULT);
+               }
+               cmdbuf.cmd_addr = kcmd_addr;
+       }
+       if (cmdbuf.vb_size) {
+               kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER);
+               if (kvb_addr == NULL) {
+                       ret = DRM_ERR(ENOMEM);
+                       goto done;
+               }
+
+               if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr,
+                                      cmdbuf.vb_size)) {
+                       ret = DRM_ERR(EFAULT);
+                       goto done;
+               }
+               cmdbuf.vb_addr = kvb_addr;
+       }
+       if (cmdbuf.nbox) {
+               kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t),
+                                      DRM_MEM_DRIVER);
+               if (kbox_addr == NULL) {
+                       ret = DRM_ERR(ENOMEM);
+                       goto done;
+               }
+
+               if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr,
+                                      cmdbuf.nbox * sizeof(drm_clip_rect_t))) {
+                       ret = DRM_ERR(EFAULT);
+                       goto done;
+               }
+       cmdbuf.box_addr = kbox_addr;
+       }
 
        /* Make sure writes to DMA buffers are finished before sending
         * DMA commands to the graphics hardware. */
@@ -1046,9 +1046,8 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
        first_draw_cmd = NULL;
        while (i < cmdbuf.size) {
                drm_savage_cmd_header_t cmd_header;
-               DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
-                                            sizeof(cmd_header));
-               usr_cmdbuf++;
+               cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr;
+               cmdbuf.cmd_addr++;
                i++;
 
                /* Group drawing commands with same state to minimize
@@ -1068,21 +1067,18 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
                case SAVAGE_CMD_DMA_PRIM:
                case SAVAGE_CMD_VB_PRIM:
                        if (!first_draw_cmd)
-                               first_draw_cmd = usr_cmdbuf - 1;
-                       usr_cmdbuf += j;
+                               first_draw_cmd = cmdbuf.cmd_addr - 1;
+                       cmdbuf.cmd_addr += j;
                        i += j;
                        break;
                default:
                        if (first_draw_cmd) {
-                               ret =
-                                   savage_dispatch_draw(dev_priv,
-                                                        first_draw_cmd,
-                                                        usr_cmdbuf - 1, dmabuf,
-                                                        usr_vtxbuf,
-                                                        cmdbuf.vb_size,
-                                                        cmdbuf.vb_stride,
-                                                        cmdbuf.nbox,
-                                                        usr_boxes);
+                               ret = savage_dispatch_draw(
+                                     dev_priv, first_draw_cmd,
+                                     cmdbuf.cmd_addr - 1,
+                                     dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size,
+                                     cmdbuf.vb_stride,
+                                     cmdbuf.nbox, cmdbuf.box_addr);
                                if (ret != 0)
                                        return ret;
                                first_draw_cmd = NULL;
@@ -1098,12 +1094,12 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
                                DRM_ERROR("command SAVAGE_CMD_STATE extends "
                                          "beyond end of command buffer\n");
                                DMA_FLUSH();
-                               return DRM_ERR(EINVAL);
+                               ret = DRM_ERR(EINVAL);
+                               goto done;
                        }
                        ret = savage_dispatch_state(dev_priv, &cmd_header,
-                                                   (uint32_t __user *)
-                                                   usr_cmdbuf);
-                       usr_cmdbuf += j;
+                               (const uint32_t *)cmdbuf.cmd_addr);
+                       cmdbuf.cmd_addr += j;
                        i += j;
                        break;
                case SAVAGE_CMD_CLEAR:
@@ -1111,39 +1107,40 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
                                DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
                                          "beyond end of command buffer\n");
                                DMA_FLUSH();
-                               return DRM_ERR(EINVAL);
+                               ret = DRM_ERR(EINVAL);
+                               goto done;
                        }
                        ret = savage_dispatch_clear(dev_priv, &cmd_header,
-                                                   usr_cmdbuf,
-                                                   cmdbuf.nbox, usr_boxes);
-                       usr_cmdbuf++;
+                                                   cmdbuf.cmd_addr,
+                                                   cmdbuf.nbox, cmdbuf.box_addr);
+                       cmdbuf.cmd_addr++;
                        i++;
                        break;
                case SAVAGE_CMD_SWAP:
-                       ret = savage_dispatch_swap(dev_priv,
-                                                  cmdbuf.nbox, usr_boxes);
+                       ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox,
+                                                  cmdbuf.box_addr);
                        break;
                default:
                        DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
                        DMA_FLUSH();
-                       return DRM_ERR(EINVAL);
+                       ret = DRM_ERR(EINVAL);
+                       goto done;
                }
 
                if (ret != 0) {
                        DMA_FLUSH();
-                       return ret;
+                       goto done;
                }
        }
 
        if (first_draw_cmd) {
-               ret =
-                   savage_dispatch_draw(dev_priv, first_draw_cmd, usr_cmdbuf,
-                                        dmabuf, usr_vtxbuf, cmdbuf.vb_size,
-                                        cmdbuf.vb_stride, cmdbuf.nbox,
-                                        usr_boxes);
+               ret = savage_dispatch_draw (
+                       dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf,
+                       cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride,
+                       cmdbuf.nbox, cmdbuf.box_addr);
                if (ret != 0) {
                        DMA_FLUSH();
-                       return ret;
+                       goto done;
                }
        }
 
@@ -1157,5 +1154,12 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
                savage_freelist_put(dev, dmabuf);
        }
 
-       return 0;
+done:
+       /* If we didn't need to allocate them, these'll be NULL */
+       drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
+       drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER);
+       drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t),
+                DRM_MEM_DRIVER);
+
+       return ret;
 }
index 8f273da76ddbd73c2d702024885b373159cbaf4c..30f7b38274668c0fa766cad45bee6256425968f3 100644 (file)
@@ -1,3 +1,28 @@
+/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
+/*
+ * Copyright 2005 Eric Anholt
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
 
 #ifndef __SIS_DRM_H__
 #define __SIS_DRM_H__
index 3cef10643a8f4c148f0ecd10328f3300c6cf08f8..6f6d7d613ede7dbcade3db4d04988a1c96dfc950 100644 (file)
 
 #include "drm_pciids.h"
 
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
-       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
-                DRIVER_NAME,
-                DRIVER_MAJOR,
-                DRIVER_MINOR,
-                DRIVER_PATCHLEVEL,
-                DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
-           );
-       return 0;
-}
-
-static int version(drm_version_t * version)
-{
-       int len;
-
-       version->version_major = DRIVER_MAJOR;
-       version->version_minor = DRIVER_MINOR;
-       version->version_patchlevel = DRIVER_PATCHLEVEL;
-       DRM_COPY(version->name, DRIVER_NAME);
-       DRM_COPY(version->date, DRIVER_DATE);
-       DRM_COPY(version->desc, DRIVER_DESC);
-       return 0;
-}
-
 static struct pci_device_id pciidlist[] = {
        sisdrv_PCI_IDS
 };
@@ -68,8 +43,6 @@ static struct drm_driver driver = {
        .reclaim_buffers = drm_core_reclaim_buffers,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
-       .postinit = postinit,
-       .version = version,
        .ioctls = sis_ioctls,
        .fops = {
                 .owner = THIS_MODULE,
@@ -79,11 +52,18 @@ static struct drm_driver driver = {
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
-                },
+       },
        .pci_driver = {
-                      .name = DRIVER_NAME,
-                      .id_table = pciidlist,
-                      }
+                .name = DRIVER_NAME,
+                .id_table = pciidlist,
+       },
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
 };
 
 static int __init sis_init(void)
index b1fddad83a936f4dcaf59533cc0b3a715d46f28c..e218e5269503b80ff65d6127b249ba88baf79bab 100644 (file)
@@ -1,5 +1,5 @@
-/* sis_drv.h -- Private header for sis driver -*- linux-c -*-
- *
+/* sis_drv.h -- Private header for sis driver -*- linux-c -*- */
+/*
  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All rights reserved.
index da850b4f5440a684512a3fd258417114f71e3bfc..94f2b4728b638bfdfbed941584e0521e1f3925c6 100644 (file)
@@ -1,6 +1,7 @@
-/* sis_ds.h -- Private header for Direct Rendering Manager -*- linux-c -*-
+/* sis_ds.h -- Private header for Direct Rendering Manager -*- linux-c -*- 
  * Created: Mon Jan  4 10:05:05 1999 by sclin@sis.com.tw
- *
+ */
+/*
  * Copyright 2000 Silicon Integrated Systems Corp, Inc., HsinChu, Taiwan.
  * All rights reserved.
  *
@@ -35,7 +36,7 @@
 
 #define SET_SIZE 5000
 
-typedef unsigned int ITEM_TYPE;
+typedef unsigned long ITEM_TYPE;
 
 typedef struct {
        ITEM_TYPE val;
index a8529728fa63592f1511b15e4a1e28c5dc781c71..6774d2fe3452c22931e6bc1e288c07329099e6f6 100644 (file)
@@ -86,7 +86,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
 {
        drm_sis_mem_t fb;
        struct sis_memreq req;
-       drm_sis_mem_t __user *argp = (void __user *)data;
+       drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
        int retval = 0;
 
        DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
@@ -110,7 +110,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
 
        DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
 
-       DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, req.offset);
+       DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb.size, req.offset);
 
        return retval;
 }
@@ -127,9 +127,9 @@ static int sis_fb_free(DRM_IOCTL_ARGS)
 
        if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free))
                retval = DRM_ERR(EINVAL);
-       sis_free((u32) fb.free);
+       sis_free(fb.free);
 
-       DRM_DEBUG("free fb, offset = %lu\n", fb.free);
+       DRM_DEBUG("free fb, offset = 0x%lx\n", fb.free);
 
        return retval;
 }
@@ -176,7 +176,7 @@ static int sis_fb_alloc(DRM_IOCTL_ARGS)
 {
        DRM_DEVICE;
        drm_sis_private_t *dev_priv = dev->dev_private;
-       drm_sis_mem_t __user *argp = (void __user *)data;
+       drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
        drm_sis_mem_t fb;
        PMemBlock block;
        int retval = 0;
@@ -267,7 +267,7 @@ static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS)
 {
        DRM_DEVICE;
        drm_sis_private_t *dev_priv = dev->dev_private;
-       drm_sis_mem_t __user *argp = (void __user *)data;
+       drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
        drm_sis_mem_t agp;
        PMemBlock block;
        int retval = 0;
@@ -367,7 +367,7 @@ int sis_final_context(struct drm_device *dev, int context)
 
        if (i < MAX_CONTEXT) {
                set_t *set;
-               unsigned int item;
+               ITEM_TYPE item;
                int retval;
 
                DRM_DEBUG("find socket %d, context = %d\n", i, context);
@@ -376,7 +376,7 @@ int sis_final_context(struct drm_device *dev, int context)
                set = global_ppriv[i].sets[0];
                retval = setFirst(set, &item);
                while (retval) {
-                       DRM_DEBUG("free video memory 0x%x\n", item);
+                       DRM_DEBUG("free video memory 0x%lx\n", item);
 #if defined(__linux__) && defined(CONFIG_FB_SIS)
                        sis_free(item);
 #else
@@ -390,7 +390,7 @@ int sis_final_context(struct drm_device *dev, int context)
                set = global_ppriv[i].sets[1];
                retval = setFirst(set, &item);
                while (retval) {
-                       DRM_DEBUG("free agp memory 0x%x\n", item);
+                       DRM_DEBUG("free agp memory 0x%lx\n", item);
                        mmFreeMem((PMemBlock) item);
                        retval = setNext(set, &item);
                }
@@ -403,12 +403,12 @@ int sis_final_context(struct drm_device *dev, int context)
 }
 
 drm_ioctl_desc_t sis_ioctls[] = {
-       [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, 1, 0},
-       [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, 1, 0},
-       [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, 1, 1},
-       [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, 1, 0},
-       [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, 1, 0},
-       [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, 1, 1}
+       [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+       [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}
 };
 
 int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
index c275cbb6e9ce22e9f6b8085663a0b9f6a0670e1f..baa4416032a82f053214e34dbf82203ebdc43361 100644 (file)
 
 #include "drm_pciids.h"
 
-static int postinit(struct drm_device *dev, unsigned long flags)
-{
-       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
-                DRIVER_NAME,
-                DRIVER_MAJOR,
-                DRIVER_MINOR,
-                DRIVER_PATCHLEVEL,
-                DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
-           );
-       return 0;
-}
-
-static int version(drm_version_t * version)
-{
-       int len;
-
-       version->version_major = DRIVER_MAJOR;
-       version->version_minor = DRIVER_MINOR;
-       version->version_patchlevel = DRIVER_PATCHLEVEL;
-       DRM_COPY(version->name, DRIVER_NAME);
-       DRM_COPY(version->date, DRIVER_DATE);
-       DRM_COPY(version->desc, DRIVER_DESC);
-       return 0;
-}
-
 static struct pci_device_id pciidlist[] = {
        tdfx_PCI_IDS
 };
@@ -70,8 +45,6 @@ static struct drm_driver driver = {
        .reclaim_buffers = drm_core_reclaim_buffers,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
-       .postinit = postinit,
-       .version = version,
        .fops = {
                 .owner = THIS_MODULE,
                 .open = drm_open,
@@ -80,11 +53,18 @@ static struct drm_driver driver = {
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
-                },
+       },
        .pci_driver = {
-                      .name = DRIVER_NAME,
-                      .id_table = pciidlist,
-                      }
+                .name = DRIVER_NAME,
+                .id_table = pciidlist,
+       },
+
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
 };
 
 static int __init tdfx_init(void)
index a582a3db4c75201442366746ff320050123a186b..84204ec1b046b9723225aeab9568c26ca52a9295 100644 (file)
@@ -1,6 +1,7 @@
 /* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
  * Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
- *
+ */
+/*
  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
  * All Rights Reserved.
  *
 #ifndef __TDFX_H__
 #define __TDFX_H__
 
-/* This remains constant for all DRM template files.
- */
-#define DRM(x) tdfx_##x
-
 /* General customization:
  */
 
index d4b1766608b0e269319efb7c20b8966f4b020ee1..593c0b8f650a911c51a3558b0ff136780df63bf4 100644 (file)
@@ -213,7 +213,9 @@ static int via_initialize(drm_device_t * dev,
        dev_priv->dma_wrap = init->size;
        dev_priv->dma_offset = init->offset;
        dev_priv->last_pause_ptr = NULL;
-       dev_priv->hw_addr_ptr = dev_priv->mmio->handle + init->reg_pause_addr;
+       dev_priv->hw_addr_ptr =
+               (volatile uint32_t *)((char *)dev_priv->mmio->handle +
+               init->reg_pause_addr);
 
        via_cmdbuf_start(dev_priv);
 
@@ -232,13 +234,13 @@ int via_dma_init(DRM_IOCTL_ARGS)
 
        switch (init.func) {
        case VIA_INIT_DMA:
-               if (!capable(CAP_SYS_ADMIN))
+               if (!DRM_SUSER(DRM_CURPROC))
                        retcode = DRM_ERR(EPERM);
                else
                        retcode = via_initialize(dev, dev_priv, &init);
                break;
        case VIA_CLEANUP_DMA:
-               if (!capable(CAP_SYS_ADMIN))
+               if (!DRM_SUSER(DRM_CURPROC))
                        retcode = DRM_ERR(EPERM);
                else
                        retcode = via_dma_cleanup(dev);
@@ -349,9 +351,6 @@ int via_cmdbuffer(DRM_IOCTL_ARGS)
        return 0;
 }
 
-extern int
-via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
-                        unsigned int size);
 static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
                                      drm_via_cmdbuffer_t * cmd)
 {
@@ -450,9 +449,9 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
        if ((count <= 8) && (count >= 0)) {
                uint32_t rgtr, ptr;
                rgtr = *(dev_priv->hw_addr_ptr);
-               ptr = ((char *)dev_priv->last_pause_ptr - dev_priv->dma_ptr) +
-                   dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4 -
-                   CMDBUF_ALIGNMENT_SIZE;
+               ptr = ((volatile char *)dev_priv->last_pause_ptr -
+                     dev_priv->dma_ptr) + dev_priv->dma_offset +
+                     (uint32_t) dev_priv->agpAddr + 4 - CMDBUF_ALIGNMENT_SIZE;
                if (rgtr <= ptr) {
                        DRM_ERROR
                            ("Command regulator\npaused at count %d, address %x, "
@@ -472,7 +471,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv,
                       && count--) ;
 
                rgtr = *(dev_priv->hw_addr_ptr);
-               ptr = ((char *)paused_at - dev_priv->dma_ptr) +
+               ptr = ((volatile char *)paused_at - dev_priv->dma_ptr) +
                    dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr + 4;
 
                ptr_low = (ptr > 3 * CMDBUF_ALIGNMENT_SIZE) ?
@@ -724,3 +723,22 @@ int via_cmdbuf_size(DRM_IOCTL_ARGS)
                               sizeof(d_siz));
        return ret;
 }
+
+drm_ioctl_desc_t via_ioctls[] = {
+       [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER},
+       [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, DRM_AUTH|DRM_MASTER},
+       [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, DRM_AUTH|DRM_MASTER},
+       [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH},
+       [DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH}
+};
+
+int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/drivers/char/drm/via_dmablit.c b/drivers/char/drm/via_dmablit.c
new file mode 100644 (file)
index 0000000..9d5e027
--- /dev/null
@@ -0,0 +1,805 @@
+/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
+ * 
+ * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: 
+ *    Thomas Hellstrom.
+ *    Partially based on code obtained from Digeo Inc.
+ */
+
+
+/*
+ * Unmaps the DMA mappings. 
+ * FIXME: Is this a NoOp on x86? Also 
+ * FIXME: What happens if this one is called and a pending blit has previously done 
+ * the same DMA mappings? 
+ */
+
+#include "drmP.h"
+#include "via_drm.h"
+#include "via_drv.h"
+#include "via_dmablit.h"
+
+#include <linux/pagemap.h>
+
+#define VIA_PGDN(x)             (((unsigned long)(x)) & PAGE_MASK)
+#define VIA_PGOFF(x)            (((unsigned long)(x)) & ~PAGE_MASK)
+#define VIA_PFN(x)              ((unsigned long)(x) >> PAGE_SHIFT)
+
+typedef struct _drm_via_descriptor {
+       uint32_t mem_addr;
+       uint32_t dev_addr;
+       uint32_t size;
+       uint32_t next;
+} drm_via_descriptor_t;
+
+
+/*
+ * Unmap a DMA mapping.
+ */
+
+
+
+static void
+via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+{
+       int num_desc = vsg->num_desc;
+       unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
+       unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
+       drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + 
+               descriptor_this_page;
+       dma_addr_t next = vsg->chain_start;
+
+       while(num_desc--) {
+               if (descriptor_this_page-- == 0) {
+                       cur_descriptor_page--;
+                       descriptor_this_page = vsg->descriptors_per_page - 1;
+                       desc_ptr = vsg->desc_pages[cur_descriptor_page] + 
+                               descriptor_this_page;
+               }
+               dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
+               dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
+               next = (dma_addr_t) desc_ptr->next;
+               desc_ptr--;
+       }
+}
+
+/*
+ * If mode = 0, count how many descriptors are needed.
+ * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
+ * Descriptors are run in reverse order by the hardware because we are not allowed to update the
+ * 'next' field without syncing calls when the descriptor is already mapped.
+ */
+
+static void
+via_map_blit_for_device(struct pci_dev *pdev,
+                  const drm_via_dmablit_t *xfer,
+                  drm_via_sg_info_t *vsg, 
+                  int mode)
+{
+       unsigned cur_descriptor_page = 0;
+       unsigned num_descriptors_this_page = 0;
+       unsigned char *mem_addr = xfer->mem_addr;
+       unsigned char *cur_mem;
+       unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
+       uint32_t fb_addr = xfer->fb_addr;
+       uint32_t cur_fb;
+       unsigned long line_len;
+       unsigned remaining_len;
+       int num_desc = 0;
+       int cur_line;
+       dma_addr_t next = 0 | VIA_DMA_DPR_EC;
+       drm_via_descriptor_t *desc_ptr = 0;
+
+       if (mode == 1) 
+               desc_ptr = vsg->desc_pages[cur_descriptor_page];
+
+       for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
+
+               line_len = xfer->line_length;
+               cur_fb = fb_addr;
+               cur_mem = mem_addr;
+               
+               while (line_len > 0) {
+
+                        remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
+                       line_len -= remaining_len;
+
+                       if (mode == 1) {
+                                desc_ptr->mem_addr = 
+                                       dma_map_page(&pdev->dev, 
+                                                    vsg->pages[VIA_PFN(cur_mem) - 
+                                                               VIA_PFN(first_addr)],
+                                                    VIA_PGOFF(cur_mem), remaining_len, 
+                                                    vsg->direction);
+                                desc_ptr->dev_addr = cur_fb;
+                               
+                                desc_ptr->size = remaining_len;
+                               desc_ptr->next = (uint32_t) next;
+                               next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), 
+                                                     DMA_TO_DEVICE);
+                               desc_ptr++;
+                               if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
+                                       num_descriptors_this_page = 0;
+                                       desc_ptr = vsg->desc_pages[++cur_descriptor_page];
+                               }
+                       }
+                       
+                       num_desc++;
+                       cur_mem += remaining_len;
+                       cur_fb += remaining_len;
+               }
+               
+               mem_addr += xfer->mem_stride;
+               fb_addr += xfer->fb_stride;
+       }
+
+       if (mode == 1) {
+               vsg->chain_start = next;
+               vsg->state = dr_via_device_mapped;
+       }
+       vsg->num_desc = num_desc;
+}
+
+/*
+ * Function that frees up all resources for a blit. It is usable even if the 
+ * blit info has only be partially built as long as the status enum is consistent
+ * with the actual status of the used resources.
+ */
+
+
+void
+via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) 
+{
+       struct page *page;
+       int i;
+
+       switch(vsg->state) {
+       case dr_via_device_mapped:
+               via_unmap_blit_from_device(pdev, vsg);
+       case dr_via_desc_pages_alloc:
+               for (i=0; i<vsg->num_desc_pages; ++i) {
+                       if (vsg->desc_pages[i] != NULL)
+                         free_page((unsigned long)vsg->desc_pages[i]);
+               }
+               kfree(vsg->desc_pages);
+       case dr_via_pages_locked:
+               for (i=0; i<vsg->num_pages; ++i) {
+                       if ( NULL != (page = vsg->pages[i])) {
+                               if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 
+                                       SetPageDirty(page);
+                               page_cache_release(page);
+                       }
+               }
+       case dr_via_pages_alloc:
+               vfree(vsg->pages);
+       default:
+               vsg->state = dr_via_sg_init;
+       }
+       if (vsg->bounce_buffer) {
+               vfree(vsg->bounce_buffer);
+               vsg->bounce_buffer = NULL;
+       }
+       vsg->free_on_sequence = 0;
+}              
+
+/*
+ * Fire a blit engine.
+ */
+
+static void
+via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
+{
+       drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+       VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
+       VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
+       VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | 
+                 VIA_DMA_CSR_DE);
+       VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
+       VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
+       VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
+       VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
+}
+
+/*
+ * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
+ * occur here if the calling user does not have access to the submitted address.
+ */
+
+static int
+via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
+{
+       int ret;
+       unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
+       vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - 
+               first_pfn + 1;
+       
+       if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
+               return DRM_ERR(ENOMEM);
+       memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
+       down_read(&current->mm->mmap_sem);
+       ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,
+                            vsg->num_pages, vsg->direction, 0, vsg->pages, NULL);
+
+       up_read(&current->mm->mmap_sem);
+       if (ret != vsg->num_pages) {
+               if (ret < 0) 
+                       return ret;
+               vsg->state = dr_via_pages_locked;
+               return DRM_ERR(EINVAL);
+       }
+       vsg->state = dr_via_pages_locked;
+       DRM_DEBUG("DMA pages locked\n");
+       return 0;
+}
+
+/*
+ * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
+ * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
+ * quite large for some blits, and pages don't need to be contingous.
+ */
+
+static int 
+via_alloc_desc_pages(drm_via_sg_info_t *vsg)
+{
+       int i;
+       
+       vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
+       vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 
+               vsg->descriptors_per_page;
+
+       if (NULL ==  (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL))) 
+               return DRM_ERR(ENOMEM);
+       
+       memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
+       vsg->state = dr_via_desc_pages_alloc;
+       for (i=0; i<vsg->num_desc_pages; ++i) {
+               if (NULL == (vsg->desc_pages[i] = 
+                            (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
+                       return DRM_ERR(ENOMEM);
+       }
+       DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
+                 vsg->num_desc);
+       return 0;
+}
+                       
+static void
+via_abort_dmablit(drm_device_t *dev, int engine)
+{
+       drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+       VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
+}
+
+static void
+via_dmablit_engine_off(drm_device_t *dev, int engine)
+{
+       drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+
+       VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); 
+}
+
+
+
+/*
+ * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
+ * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
+ * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
+ * the workqueue task takes care of processing associated with the old blit.
+ */
+               
+void
+via_dmablit_handler(drm_device_t *dev, int engine, int from_irq)
+{
+       drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+       drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
+       int cur;
+       int done_transfer;
+       unsigned long irqsave=0;
+       uint32_t status = 0;
+
+       DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
+                 engine, from_irq, (unsigned long) blitq);
+
+       if (from_irq) {
+               spin_lock(&blitq->blit_lock);
+       } else {
+               spin_lock_irqsave(&blitq->blit_lock, irqsave);
+       }
+
+       done_transfer = blitq->is_active && 
+         (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
+       done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); 
+
+       cur = blitq->cur;
+       if (done_transfer) {
+
+               blitq->blits[cur]->aborted = blitq->aborting;
+               blitq->done_blit_handle++;
+               DRM_WAKEUP(blitq->blit_queue + cur);            
+
+               cur++;
+               if (cur >= VIA_NUM_BLIT_SLOTS) 
+                       cur = 0;
+               blitq->cur = cur;
+
+               /*
+                * Clear transfer done flag.
+                */
+
+               VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04,  VIA_DMA_CSR_TD);
+
+               blitq->is_active = 0;
+               blitq->aborting = 0;
+               schedule_work(&blitq->wq);      
+
+       } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
+
+               /*
+                * Abort transfer after one second.
+                */
+
+               via_abort_dmablit(dev, engine);
+               blitq->aborting = 1;
+               blitq->end = jiffies + DRM_HZ;
+       }
+                       
+       if (!blitq->is_active) {
+               if (blitq->num_outstanding) {
+                       via_fire_dmablit(dev, blitq->blits[cur], engine);
+                       blitq->is_active = 1;
+                       blitq->cur = cur;
+                       blitq->num_outstanding--;
+                       blitq->end = jiffies + DRM_HZ;
+                       if (!timer_pending(&blitq->poll_timer)) {
+                               blitq->poll_timer.expires = jiffies+1;
+                               add_timer(&blitq->poll_timer);
+                       }
+               } else {
+                       if (timer_pending(&blitq->poll_timer)) {
+                               del_timer(&blitq->poll_timer);
+                       }
+                       via_dmablit_engine_off(dev, engine);
+               }
+       }               
+
+       if (from_irq) {
+               spin_unlock(&blitq->blit_lock);
+       } else {
+               spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+       }
+} 
+
+
+
+/*
+ * Check whether this blit is still active, performing necessary locking.
+ */
+
+static int
+via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
+{
+       unsigned long irqsave;
+       uint32_t slot;
+       int active;
+
+       spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+       /*
+        * Allow for handle wraparounds.
+        */
+
+       active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
+               ((blitq->cur_blit_handle - handle) <= (1 << 23));
+
+       if (queue && active) {
+               slot = handle - blitq->done_blit_handle + blitq->cur -1;
+               if (slot >= VIA_NUM_BLIT_SLOTS) {
+                       slot -= VIA_NUM_BLIT_SLOTS;
+               }
+               *queue = blitq->blit_queue + slot;
+       }
+
+       spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+       return active;
+}
+       
+/*
+ * Sync. Wait for at least three seconds for the blit to be performed.
+ */
+
+static int
+via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine) 
+{
+
+       drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+       drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
+       wait_queue_head_t *queue;
+       int ret = 0;
+
+       if (via_dmablit_active(blitq, engine, handle, &queue)) {
+               DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, 
+                           !via_dmablit_active(blitq, engine, handle, NULL));
+       }
+       DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
+                 handle, engine, ret);
+       
+       return ret;
+}
+
+
+/*
+ * A timer that regularly polls the blit engine in cases where we don't have interrupts:
+ * a) Broken hardware (typically those that don't have any video capture facility).
+ * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
+ * The timer and hardware IRQ's can and do work in parallel. If the hardware has
+ * irqs, it will shorten the latency somewhat.
+ */
+
+
+
+static void
+via_dmablit_timer(unsigned long data)
+{
+       drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+       drm_device_t *dev = blitq->dev;
+       int engine = (int)
+               (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
+               
+       DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, 
+                 (unsigned long) jiffies);
+
+       via_dmablit_handler(dev, engine, 0);
+       
+       if (!timer_pending(&blitq->poll_timer)) {
+               blitq->poll_timer.expires = jiffies+1;
+               add_timer(&blitq->poll_timer);
+       }
+       via_dmablit_handler(dev, engine, 0);
+
+}
+
+
+
+
+/*
+ * Workqueue task that frees data and mappings associated with a blit.
+ * Also wakes up waiting processes. Each of these tasks handles one
+ * blit engine only and may not be called on each interrupt.
+ */
+
+
+static void 
+via_dmablit_workqueue(void *data)
+{
+       drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+       drm_device_t *dev = blitq->dev;
+       unsigned long irqsave;
+       drm_via_sg_info_t *cur_sg;
+       int cur_released;
+       
+       
+       DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) 
+                 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
+
+       spin_lock_irqsave(&blitq->blit_lock, irqsave);
+       
+       while(blitq->serviced != blitq->cur) {
+
+               cur_released = blitq->serviced++;
+
+               DRM_DEBUG("Releasing blit slot %d\n", cur_released);
+
+               if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) 
+                       blitq->serviced = 0;
+               
+               cur_sg = blitq->blits[cur_released];
+               blitq->num_free++;
+                               
+               spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+               
+               DRM_WAKEUP(&blitq->busy_queue);
+               
+               via_free_sg_info(dev->pdev, cur_sg);
+               kfree(cur_sg);
+               
+               spin_lock_irqsave(&blitq->blit_lock, irqsave);
+       }
+
+       spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+}
+       
+
+/*
+ * Init all blit engines. Currently we use two, but some hardware have 4.
+ */
+
+
+void
+via_init_dmablit(drm_device_t *dev)
+{
+       int i,j;
+       drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+       drm_via_blitq_t *blitq;
+
+       pci_set_master(dev->pdev);      
+       
+       for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
+               blitq = dev_priv->blit_queues + i;
+               blitq->dev = dev;
+               blitq->cur_blit_handle = 0;
+               blitq->done_blit_handle = 0;
+               blitq->head = 0;
+               blitq->cur = 0;
+               blitq->serviced = 0;
+               blitq->num_free = VIA_NUM_BLIT_SLOTS;
+               blitq->num_outstanding = 0;
+               blitq->is_active = 0;
+               blitq->aborting = 0;
+               blitq->blit_lock = SPIN_LOCK_UNLOCKED;
+               for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
+                       DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
+               }
+               DRM_INIT_WAITQUEUE(&blitq->busy_queue);
+               INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
+               init_timer(&blitq->poll_timer);
+               blitq->poll_timer.function = &via_dmablit_timer;
+               blitq->poll_timer.data = (unsigned long) blitq;
+       }       
+}
+
+/*
+ * Build all info and do all mappings required for a blit.
+ */
+               
+
+static int
+via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+{
+       int draw = xfer->to_fb;
+       int ret = 0;
+       
+       vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+       vsg->bounce_buffer = 0;
+
+       vsg->state = dr_via_sg_init;
+
+       if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
+               DRM_ERROR("Zero size bitblt.\n");
+               return DRM_ERR(EINVAL);
+       }
+
+       /*
+        * Below check is a driver limitation, not a hardware one. We
+        * don't want to lock unused pages, and don't want to incoporate the
+        * extra logic of avoiding them. Make sure there are no. 
+        * (Not a big limitation anyway.)
+        */
+
+       if (((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) ||
+           (xfer->mem_stride > 2048*4)) {
+               DRM_ERROR("Too large system memory stride. Stride: %d, "
+                         "Length: %d\n", xfer->mem_stride, xfer->line_length);
+               return DRM_ERR(EINVAL);
+       }
+
+       if (xfer->num_lines > 2048) {
+               DRM_ERROR("Too many PCI DMA bitblt lines.\n");
+               return DRM_ERR(EINVAL);
+       }               
+
+       /* 
+        * we allow a negative fb stride to allow flipping of images in
+        * transfer. 
+        */
+
+       if (xfer->mem_stride < xfer->line_length ||
+               abs(xfer->fb_stride) < xfer->line_length) {
+               DRM_ERROR("Invalid frame-buffer / memory stride.\n");
+               return DRM_ERR(EINVAL);
+       }
+
+       /*
+        * A hardware bug seems to be worked around if system memory addresses start on
+        * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
+        * about this. Meanwhile, impose the following restrictions:
+        */
+
+#ifdef VIA_BUGFREE
+       if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
+           ((xfer->mem_stride & 3) != (xfer->fb_stride & 3))) {
+               DRM_ERROR("Invalid DRM bitblt alignment.\n");
+               return DRM_ERR(EINVAL);
+       }
+#else
+       if ((((unsigned long)xfer->mem_addr & 15) ||
+           ((unsigned long)xfer->fb_addr & 3)) || (xfer->mem_stride & 15) ||
+           (xfer->fb_stride & 3)) {
+               DRM_ERROR("Invalid DRM bitblt alignment.\n");
+               return DRM_ERR(EINVAL);
+       }       
+#endif
+
+       if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
+               DRM_ERROR("Could not lock DMA pages.\n");
+               via_free_sg_info(dev->pdev, vsg);
+               return ret;
+       }
+
+       via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
+       if (0 != (ret = via_alloc_desc_pages(vsg))) {
+               DRM_ERROR("Could not allocate DMA descriptor pages.\n");
+               via_free_sg_info(dev->pdev, vsg);
+               return ret;
+       }
+       via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
+       
+       return 0;
+}
+       
+
+/*
+ * Reserve one free slot in the blit queue. Will wait for one second for one
+ * to become available. Otherwise -EBUSY is returned.
+ */
+
+static int 
+via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
+{
+       int ret=0;
+       unsigned long irqsave;
+
+       DRM_DEBUG("Num free is %d\n", blitq->num_free);
+       spin_lock_irqsave(&blitq->blit_lock, irqsave);
+       while(blitq->num_free == 0) {
+               spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+               DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
+               if (ret) {
+                       return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret;
+               }
+               
+               spin_lock_irqsave(&blitq->blit_lock, irqsave);
+       }
+       
+       blitq->num_free--;
+       spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+
+       return 0;
+}
+
+/*
+ * Hand back a free slot if we changed our mind.
+ */
+
+static void 
+via_dmablit_release_slot(drm_via_blitq_t *blitq)
+{
+       unsigned long irqsave;
+
+       spin_lock_irqsave(&blitq->blit_lock, irqsave);
+       blitq->num_free++;
+       spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+       DRM_WAKEUP( &blitq->busy_queue );
+}
+
+/*
+ * Grab a free slot. Build blit info and queue a blit.
+ */
+
+
+static int 
+via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)         
+{
+       drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+       drm_via_sg_info_t *vsg;
+       drm_via_blitq_t *blitq;
+        int ret;
+       int engine;
+       unsigned long irqsave;
+
+       if (dev_priv == NULL) {
+               DRM_ERROR("Called without initialization.\n");
+               return DRM_ERR(EINVAL);
+       }
+
+       engine = (xfer->to_fb) ? 0 : 1;
+       blitq = dev_priv->blit_queues + engine;
+       if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
+               return ret;
+       }
+       if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
+               via_dmablit_release_slot(blitq);
+               return DRM_ERR(ENOMEM);
+       }
+       if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
+               via_dmablit_release_slot(blitq);
+               kfree(vsg);
+               return ret;
+       }
+       spin_lock_irqsave(&blitq->blit_lock, irqsave);
+
+       blitq->blits[blitq->head++] = vsg;
+       if (blitq->head >= VIA_NUM_BLIT_SLOTS) 
+               blitq->head = 0;
+       blitq->num_outstanding++;
+       xfer->sync.sync_handle = ++blitq->cur_blit_handle; 
+
+       spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
+       xfer->sync.engine = engine;
+
+               via_dmablit_handler(dev, engine, 0);
+
+       return 0;
+}
+
+/*
+ * Sync on a previously submitted blit. Note that the X server use signals extensively, and
+ * that there is a very big proability that this IOCTL will be interrupted by a signal. In that
+ * case it returns with -EAGAIN for the signal to be delivered. 
+ * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
+ */
+
+int
+via_dma_blit_sync( DRM_IOCTL_ARGS )
+{
+       drm_via_blitsync_t sync;
+       int err;
+       DRM_DEVICE;
+
+       DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync));
+       
+       if (sync.engine >= VIA_NUM_BLIT_ENGINES) 
+               return DRM_ERR(EINVAL);
+
+       err = via_dmablit_sync(dev, sync.sync_handle, sync.engine);
+
+       if (DRM_ERR(EINTR) == err)
+               err = DRM_ERR(EAGAIN);
+
+       return err;
+}
+       
+
+/*
+ * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
+ * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should 
+ * be reissued. See the above IOCTL code.
+ */
+
+int 
+via_dma_blit( DRM_IOCTL_ARGS )
+{
+       drm_via_dmablit_t xfer;
+       int err;
+       DRM_DEVICE;
+
+       DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer));
+
+       err = via_dmablit(dev, &xfer);
+
+       DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer));
+
+       return err;
+}
diff --git a/drivers/char/drm/via_dmablit.h b/drivers/char/drm/via_dmablit.h
new file mode 100644 (file)
index 0000000..f4036cd
--- /dev/null
@@ -0,0 +1,140 @@
+/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
+ * 
+ * Copyright 2005 Thomas Hellstrom.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: 
+ *    Thomas Hellstrom.
+ *    Register info from Digeo Inc.
+ */
+
+#ifndef _VIA_DMABLIT_H
+#define _VIA_DMABLIT_H
+
+#include <linux/dma-mapping.h>
+
+#define VIA_NUM_BLIT_ENGINES 2
+#define VIA_NUM_BLIT_SLOTS 8
+
+struct _drm_via_descriptor;
+
+typedef struct _drm_via_sg_info {
+       struct page **pages;
+       unsigned long num_pages;
+       struct _drm_via_descriptor **desc_pages;
+       int num_desc_pages;
+       int num_desc;
+       enum dma_data_direction direction;
+       unsigned char *bounce_buffer;
+        dma_addr_t chain_start;
+       uint32_t free_on_sequence;
+        unsigned int descriptors_per_page;
+       int aborted;
+       enum {
+               dr_via_device_mapped,
+               dr_via_desc_pages_alloc,
+               dr_via_pages_locked,
+               dr_via_pages_alloc,
+               dr_via_sg_init
+       } state;
+} drm_via_sg_info_t;
+
+typedef struct _drm_via_blitq {
+       drm_device_t *dev;
+       uint32_t cur_blit_handle;
+       uint32_t done_blit_handle;
+       unsigned serviced;
+       unsigned head;
+       unsigned cur;
+       unsigned num_free;
+       unsigned num_outstanding;
+       unsigned long end;  
+        int aborting;
+       int is_active;
+       drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
+       spinlock_t blit_lock;
+       wait_queue_head_t blit_queue[VIA_NUM_BLIT_SLOTS];
+       wait_queue_head_t busy_queue;
+       struct work_struct wq;
+       struct timer_list poll_timer;
+} drm_via_blitq_t;
+       
+
+/* 
+ *  PCI DMA Registers
+ *  Channels 2 & 3 don't seem to be implemented in hardware.
+ */
+#define VIA_PCI_DMA_MAR0            0xE40   /* Memory Address Register of Channel 0 */ 
+#define VIA_PCI_DMA_DAR0            0xE44   /* Device Address Register of Channel 0 */ 
+#define VIA_PCI_DMA_BCR0            0xE48   /* Byte Count Register of Channel 0 */ 
+#define VIA_PCI_DMA_DPR0            0xE4C   /* Descriptor Pointer Register of Channel 0 */ 
+
+#define VIA_PCI_DMA_MAR1            0xE50   /* Memory Address Register of Channel 1 */ 
+#define VIA_PCI_DMA_DAR1            0xE54   /* Device Address Register of Channel 1 */ 
+#define VIA_PCI_DMA_BCR1            0xE58   /* Byte Count Register of Channel 1 */ 
+#define VIA_PCI_DMA_DPR1            0xE5C   /* Descriptor Pointer Register of Channel 1 */ 
+
+#define VIA_PCI_DMA_MAR2            0xE60   /* Memory Address Register of Channel 2 */ 
+#define VIA_PCI_DMA_DAR2            0xE64   /* Device Address Register of Channel 2 */ 
+#define VIA_PCI_DMA_BCR2            0xE68   /* Byte Count Register of Channel 2 */ 
+#define VIA_PCI_DMA_DPR2            0xE6C   /* Descriptor Pointer Register of Channel 2 */ 
+
+#define VIA_PCI_DMA_MAR3            0xE70   /* Memory Address Register of Channel 3 */ 
+#define VIA_PCI_DMA_DAR3            0xE74   /* Device Address Register of Channel 3 */ 
+#define VIA_PCI_DMA_BCR3            0xE78   /* Byte Count Register of Channel 3 */ 
+#define VIA_PCI_DMA_DPR3            0xE7C   /* Descriptor Pointer Register of Channel 3 */ 
+
+#define VIA_PCI_DMA_MR0             0xE80   /* Mode Register of Channel 0 */ 
+#define VIA_PCI_DMA_MR1             0xE84   /* Mode Register of Channel 1 */ 
+#define VIA_PCI_DMA_MR2             0xE88   /* Mode Register of Channel 2 */ 
+#define VIA_PCI_DMA_MR3             0xE8C   /* Mode Register of Channel 3 */ 
+
+#define VIA_PCI_DMA_CSR0            0xE90   /* Command/Status Register of Channel 0 */ 
+#define VIA_PCI_DMA_CSR1            0xE94   /* Command/Status Register of Channel 1 */ 
+#define VIA_PCI_DMA_CSR2            0xE98   /* Command/Status Register of Channel 2 */ 
+#define VIA_PCI_DMA_CSR3            0xE9C   /* Command/Status Register of Channel 3 */ 
+
+#define VIA_PCI_DMA_PTR             0xEA0   /* Priority Type Register */ 
+
+/* Define for DMA engine */ 
+/* DPR */
+#define VIA_DMA_DPR_EC         (1<<1)  /* end of chain */
+#define VIA_DMA_DPR_DDIE       (1<<2)  /* descriptor done interrupt enable */
+#define VIA_DMA_DPR_DT         (1<<3)  /* direction of transfer (RO) */
+
+/* MR */
+#define VIA_DMA_MR_CM          (1<<0)  /* chaining mode */
+#define VIA_DMA_MR_TDIE                (1<<1)  /* transfer done interrupt enable */
+#define VIA_DMA_MR_HENDMACMD           (1<<7) /* ? */
+
+/* CSR */
+#define VIA_DMA_CSR_DE         (1<<0)  /* DMA enable */
+#define VIA_DMA_CSR_TS         (1<<1)  /* transfer start */
+#define VIA_DMA_CSR_TA         (1<<2)  /* transfer abort */
+#define VIA_DMA_CSR_TD         (1<<3)  /* transfer done */
+#define VIA_DMA_CSR_DD         (1<<4)  /* descriptor done */
+#define VIA_DMA_DPR_EC          (1<<1)  /* end of chain */
+
+
+
+#endif
index ebde9206115eb417957e22ef577e073bfe49bcd4..47f0b5b26379522d1881ea95158c74283572c96b 100644 (file)
@@ -75,6 +75,8 @@
 #define DRM_VIA_CMDBUF_SIZE    0x0b
 #define NOT_USED
 #define DRM_VIA_WAIT_IRQ        0x0d
+#define DRM_VIA_DMA_BLIT        0x0e
+#define DRM_VIA_BLIT_SYNC       0x0f
 
 #define DRM_IOCTL_VIA_ALLOCMEM   DRM_IOWR(DRM_COMMAND_BASE + DRM_VIA_ALLOCMEM, drm_via_mem_t)
 #define DRM_IOCTL_VIA_FREEMEM    DRM_IOW( DRM_COMMAND_BASE + DRM_VIA_FREEMEM, drm_via_mem_t)
@@ -89,6 +91,8 @@
 #define DRM_IOCTL_VIA_CMDBUF_SIZE DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_CMDBUF_SIZE, \
                                            drm_via_cmdbuf_size_t)
 #define DRM_IOCTL_VIA_WAIT_IRQ    DRM_IOWR( DRM_COMMAND_BASE + DRM_VIA_WAIT_IRQ, drm_via_irqwait_t)
+#define DRM_IOCTL_VIA_DMA_BLIT    DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_DMA_BLIT, drm_via_dmablit_t)
+#define DRM_IOCTL_VIA_BLIT_SYNC   DRM_IOW(DRM_COMMAND_BASE + DRM_VIA_BLIT_SYNC, drm_via_blitsync_t)
 
 /* Indices into buf.Setup where various bits of state are mirrored per
  * context and per buffer.  These can be fired at the card as a unit,
 #define VIA_BACK    0x2
 #define VIA_DEPTH   0x4
 #define VIA_STENCIL 0x8
-#define VIDEO 0
-#define AGP 1
+#define VIA_MEM_VIDEO   0      /* matches drm constant */
+#define VIA_MEM_AGP     1      /* matches drm constant */
+#define VIA_MEM_SYSTEM  2
+#define VIA_MEM_MIXED   3
+#define VIA_MEM_UNKNOWN 4
+
 typedef struct {
        uint32_t offset;
        uint32_t size;
@@ -192,6 +200,9 @@ typedef struct _drm_via_sarea {
        unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS];
        unsigned int XvMCCtxNoGrabbed;  /* Last context to hold decoder */
 
+       /* Used by the 3d driver only at this point, for pageflipping:
+        */
+       unsigned int pfCurrentOffset;
 } drm_via_sarea_t;
 
 typedef struct _drm_via_cmdbuf_size {
@@ -212,6 +223,16 @@ typedef enum {
 
 #define VIA_IRQ_FLAGS_MASK 0xF0000000
 
+enum drm_via_irqs {
+       drm_via_irq_hqv0 = 0,
+       drm_via_irq_hqv1,
+       drm_via_irq_dma0_dd,
+       drm_via_irq_dma0_td,
+       drm_via_irq_dma1_dd,
+       drm_via_irq_dma1_td,
+       drm_via_irq_num
+};
+
 struct drm_via_wait_irq_request {
        unsigned irq;
        via_irq_seq_type_t type;
@@ -224,20 +245,25 @@ typedef union drm_via_irqwait {
        struct drm_wait_vblank_reply reply;
 } drm_via_irqwait_t;
 
-#ifdef __KERNEL__
-
-int via_fb_init(DRM_IOCTL_ARGS);
-int via_mem_alloc(DRM_IOCTL_ARGS);
-int via_mem_free(DRM_IOCTL_ARGS);
-int via_agp_init(DRM_IOCTL_ARGS);
-int via_map_init(DRM_IOCTL_ARGS);
-int via_decoder_futex(DRM_IOCTL_ARGS);
-int via_dma_init(DRM_IOCTL_ARGS);
-int via_cmdbuffer(DRM_IOCTL_ARGS);
-int via_flush_ioctl(DRM_IOCTL_ARGS);
-int via_pci_cmdbuffer(DRM_IOCTL_ARGS);
-int via_cmdbuf_size(DRM_IOCTL_ARGS);
-int via_wait_irq(DRM_IOCTL_ARGS);
+typedef struct drm_via_blitsync {
+       uint32_t sync_handle;
+       unsigned engine;
+} drm_via_blitsync_t;
+
+typedef struct drm_via_dmablit {
+       uint32_t num_lines;
+       uint32_t line_length;
+       
+       uint32_t fb_addr;
+       uint32_t fb_stride;
+
+       unsigned char *mem_addr;
+       uint32_t mem_stride;
+
+       int bounce_buffer;
+       int to_fb;
+
+       drm_via_blitsync_t sync;
+} drm_via_dmablit_t;
 
-#endif
 #endif                         /* _VIA_DRM_H_ */
index 016665e0c69f646993f5652b19b5aa89bc645e64..3f012255d31532e9181d7880bc2e2f27539c6040 100644 (file)
 
 #include "drm_pciids.h"
 
-static int postinit(struct drm_device *dev, unsigned long flags)
+static int dri_library_name(struct drm_device *dev, char *buf)
 {
-       DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n",
-                DRIVER_NAME,
-                DRIVER_MAJOR,
-                DRIVER_MINOR,
-                DRIVER_PATCHLEVEL,
-                DRIVER_DATE, dev->primary.minor, pci_pretty_name(dev->pdev)
-           );
-       return 0;
-}
-
-static int version(drm_version_t * version)
-{
-       int len;
-
-       version->version_major = DRIVER_MAJOR;
-       version->version_minor = DRIVER_MINOR;
-       version->version_patchlevel = DRIVER_PATCHLEVEL;
-       DRM_COPY(version->name, DRIVER_NAME);
-       DRM_COPY(version->date, DRIVER_DATE);
-       DRM_COPY(version->desc, DRIVER_DESC);
-       return 0;
+       return snprintf(buf, PAGE_SIZE, "unichrome");
 }
 
 static struct pci_device_id pciidlist[] = {
        viadrv_PCI_IDS
 };
 
-static drm_ioctl_desc_t ioctls[] = {
-       [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, 1, 0},
-       [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, 1, 0},
-       [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, 1, 0},
-       [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, 1, 0},
-       [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, 1, 0},
-       [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, 1, 0},
-       [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, 1, 0},
-       [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, 1, 0},
-       [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, 1, 0},
-       [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, 1, 0},
-       [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, 1, 0},
-       [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, 1, 0}
-};
-
 static struct drm_driver driver = {
        .driver_features =
            DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
            DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+       .load = via_driver_load,
+       .unload = via_driver_unload,
        .context_ctor = via_init_context,
        .context_dtor = via_final_context,
        .vblank_wait = via_driver_vblank_wait,
@@ -85,13 +52,11 @@ static struct drm_driver driver = {
        .irq_uninstall = via_driver_irq_uninstall,
        .irq_handler = via_driver_irq_handler,
        .dma_quiescent = via_driver_dma_quiescent,
+       .dri_library_name = dri_library_name,
        .reclaim_buffers = drm_core_reclaim_buffers,
        .get_map_ofs = drm_core_get_map_ofs,
        .get_reg_ofs = drm_core_get_reg_ofs,
-       .postinit = postinit,
-       .version = version,
-       .ioctls = ioctls,
-       .num_ioctls = DRM_ARRAY_SIZE(ioctls),
+       .ioctls = via_ioctls,
        .fops = {
                 .owner = THIS_MODULE,
                 .open = drm_open,
@@ -100,15 +65,23 @@ static struct drm_driver driver = {
                 .mmap = drm_mmap,
                 .poll = drm_poll,
                 .fasync = drm_fasync,
-                },
+       },
        .pci_driver = {
-                      .name = DRIVER_NAME,
-                      .id_table = pciidlist,
-                      }
+                .name = DRIVER_NAME,
+                .id_table = pciidlist,
+       },
+       
+       .name = DRIVER_NAME,
+       .desc = DRIVER_DESC,
+       .date = DRIVER_DATE,
+       .major = DRIVER_MAJOR,
+       .minor = DRIVER_MINOR,
+       .patchlevel = DRIVER_PATCHLEVEL,
 };
 
 static int __init via_init(void)
 {
+       driver.num_ioctls = via_max_ioctl;
        via_init_command_verifier();
        return drm_init(&driver);
 }
index 7d5daf43797ee5e0ae567857e3407519f6e3c221..aad4f99f540578964013702f310daa67e46e361e 100644 (file)
 #ifndef _VIA_DRV_H_
 #define _VIA_DRV_H_
 
-#define DRIVER_AUTHOR  "VIA"
+#define DRIVER_AUTHOR  "Various"
 
 #define DRIVER_NAME            "via"
 #define DRIVER_DESC            "VIA Unichrome / Pro"
-#define DRIVER_DATE            "20050523"
+#define DRIVER_DATE            "20051116"
 
 #define DRIVER_MAJOR           2
-#define DRIVER_MINOR           6
-#define DRIVER_PATCHLEVEL      3
+#define DRIVER_MINOR           7
+#define DRIVER_PATCHLEVEL      4
 
 #include "via_verifier.h"
 
+#include "via_dmablit.h"
+
 #define VIA_PCI_BUF_SIZE 60000
 #define VIA_FIRE_BUF_SIZE  1024
-#define VIA_NUM_IRQS 2
+#define VIA_NUM_IRQS 4
 
 typedef struct drm_via_ring_buffer {
-       drm_map_t map;
+       drm_local_map_t map;
        char *virtual_start;
 } drm_via_ring_buffer_t;
 
@@ -56,9 +58,9 @@ typedef struct drm_via_irq {
 
 typedef struct drm_via_private {
        drm_via_sarea_t *sarea_priv;
-       drm_map_t *sarea;
-       drm_map_t *fb;
-       drm_map_t *mmio;
+       drm_local_map_t *sarea;
+       drm_local_map_t *fb;
+       drm_local_map_t *mmio;
        unsigned long agpAddr;
        wait_queue_head_t decoder_queue[VIA_NR_XVMC_LOCKS];
        char *dma_ptr;
@@ -82,8 +84,15 @@ typedef struct drm_via_private {
        maskarray_t *irq_masks;
        uint32_t irq_enable_mask;
        uint32_t irq_pending_mask;
+       int *irq_map;
+       drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
 } drm_via_private_t;
 
+enum via_family {
+       VIA_OTHER = 0,
+       VIA_PRO_GROUP_A,
+};
+
 /* VIA MMIO register access */
 #define VIA_BASE ((dev_priv->mmio))
 
@@ -92,12 +101,31 @@ typedef struct drm_via_private {
 #define VIA_READ8(reg)         DRM_READ8(VIA_BASE, reg)
 #define VIA_WRITE8(reg,val)    DRM_WRITE8(VIA_BASE, reg, val)
 
+extern drm_ioctl_desc_t via_ioctls[];
+extern int via_max_ioctl;
+
+extern int via_fb_init(DRM_IOCTL_ARGS);
+extern int via_mem_alloc(DRM_IOCTL_ARGS);
+extern int via_mem_free(DRM_IOCTL_ARGS);
+extern int via_agp_init(DRM_IOCTL_ARGS);
+extern int via_map_init(DRM_IOCTL_ARGS);
+extern int via_decoder_futex(DRM_IOCTL_ARGS);
+extern int via_dma_init(DRM_IOCTL_ARGS);
+extern int via_cmdbuffer(DRM_IOCTL_ARGS);
+extern int via_flush_ioctl(DRM_IOCTL_ARGS);
+extern int via_pci_cmdbuffer(DRM_IOCTL_ARGS);
+extern int via_cmdbuf_size(DRM_IOCTL_ARGS);
+extern int via_wait_irq(DRM_IOCTL_ARGS);
+extern int via_dma_blit_sync( DRM_IOCTL_ARGS );
+extern int via_dma_blit( DRM_IOCTL_ARGS );
+
+extern int via_driver_load(drm_device_t *dev, unsigned long chipset);
+extern int via_driver_unload(drm_device_t *dev);
+
 extern int via_init_context(drm_device_t * dev, int context);
 extern int via_final_context(drm_device_t * dev, int context);
 
 extern int via_do_cleanup_map(drm_device_t * dev);
-extern int via_map_init(struct inode *inode, struct file *filp,
-                       unsigned int cmd, unsigned long arg);
 extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
 
 extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
@@ -111,8 +139,10 @@ extern int via_driver_dma_quiescent(drm_device_t * dev);
 extern void via_init_futex(drm_via_private_t * dev_priv);
 extern void via_cleanup_futex(drm_via_private_t * dev_priv);
 extern void via_release_futex(drm_via_private_t * dev_priv, int context);
+extern int via_driver_irq_wait(drm_device_t * dev, unsigned int irq,
+                              int force_sequence, unsigned int *sequence);
 
-extern int via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
-                                   unsigned int size);
+extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq);
+extern void via_init_dmablit(drm_device_t *dev);
 
 #endif
index 5c71e089246c75a713146248a8c04e3f9d617bd3..9429736b3b96fbaf94e6cc0ebdc572a3ec32a7b9 100644 (file)
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
  */
-#include <linux/module.h>
-#include <linux/delay.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/poll.h>
-#include <linux/pci.h>
-#include <asm/io.h>
+#include "drmP.h"
 
 #include "via_ds.h"
 extern unsigned int VIA_DEBUG;
index d023add1929b181bec707b5c516d4d37b6258a6f..56d7e3daea126d8a2f926cd0e21e5444b4d6c479 100644 (file)
 #define VIA_IRQ_HQV1_ENABLE     (1 << 25)
 #define VIA_IRQ_HQV0_PENDING    (1 << 9)
 #define VIA_IRQ_HQV1_PENDING    (1 << 10)
+#define VIA_IRQ_DMA0_DD_ENABLE  (1 << 20)
+#define VIA_IRQ_DMA0_TD_ENABLE  (1 << 21)
+#define VIA_IRQ_DMA1_DD_ENABLE  (1 << 22)
+#define VIA_IRQ_DMA1_TD_ENABLE  (1 << 23)
+#define VIA_IRQ_DMA0_DD_PENDING (1 << 4)
+#define VIA_IRQ_DMA0_TD_PENDING (1 << 5)
+#define VIA_IRQ_DMA1_DD_PENDING (1 << 6)
+#define VIA_IRQ_DMA1_TD_PENDING (1 << 7)
+
 
 /*
  * Device-specific IRQs go here. This type might need to be extended with
@@ -61,13 +70,24 @@ static maskarray_t via_pro_group_a_irqs[] = {
        {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
         0x00000000},
        {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
-        0x00000000}
+        0x00000000},
+       {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+        VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+       {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+        VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
 };
 static int via_num_pro_group_a =
     sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
+static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
 
-static maskarray_t via_unichrome_irqs[] = { };
+static maskarray_t via_unichrome_irqs[] = {
+       {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
+        VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
+       {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
+        VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
+};
 static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t);
+static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
 
 static unsigned time_diff(struct timeval *now, struct timeval *then)
 {
@@ -113,6 +133,11 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
                        atomic_inc(&cur_irq->irq_received);
                        DRM_WAKEUP(&cur_irq->irq_queue);
                        handled = 1;
+                       if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
+                               via_dmablit_handler(dev, 0, 1);
+                       } else if (dev_priv->irq_map[drm_via_irq_dma1_td] == i) {
+                               via_dmablit_handler(dev, 1, 1);
+                       }
                }
                cur_irq++;
        }
@@ -165,7 +190,7 @@ int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
        return ret;
 }
 
-static int
+int
 via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
                    unsigned int *sequence)
 {
@@ -174,6 +199,7 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
        drm_via_irq_t *cur_irq = dev_priv->via_irqs;
        int ret = 0;
        maskarray_t *masks = dev_priv->irq_masks;
+       int real_irq;
 
        DRM_DEBUG("%s\n", __FUNCTION__);
 
@@ -182,15 +208,23 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
                return DRM_ERR(EINVAL);
        }
 
-       if (irq >= dev_priv->num_irqs) {
+       if (irq >= drm_via_irq_num) {
                DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
                          irq);
                return DRM_ERR(EINVAL);
        }
 
-       cur_irq += irq;
+       real_irq = dev_priv->irq_map[irq];
+
+       if (real_irq < 0) {
+               DRM_ERROR("%s Video IRQ %d not available on this hardware.\n",
+                         __FUNCTION__, irq);
+               return DRM_ERR(EINVAL);
+       }
+       
+       cur_irq += real_irq;
 
-       if (masks[irq][2] && !force_sequence) {
+       if (masks[real_irq][2] && !force_sequence) {
                DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
                            ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
                             masks[irq][4]));
@@ -226,6 +260,8 @@ void via_driver_irq_preinstall(drm_device_t * dev)
                    via_pro_group_a_irqs : via_unichrome_irqs;
                dev_priv->num_irqs = (dev_priv->pro_group_a) ?
                    via_num_pro_group_a : via_num_unichrome;
+               dev_priv->irq_map = (dev_priv->pro_group_a) ?
+                       via_irqmap_pro_group_a : via_irqmap_unichrome;
 
                for (i = 0; i < dev_priv->num_irqs; ++i) {
                        atomic_set(&cur_irq->irq_received, 0);
@@ -241,7 +277,7 @@ void via_driver_irq_preinstall(drm_device_t * dev)
 
                dev_priv->last_vblank_valid = 0;
 
-               // Clear VSync interrupt regs
+               /* Clear VSync interrupt regs */
                status = VIA_READ(VIA_REG_INTERRUPT);
                VIA_WRITE(VIA_REG_INTERRUPT, status &
                          ~(dev_priv->irq_enable_mask));
@@ -291,8 +327,7 @@ void via_driver_irq_uninstall(drm_device_t * dev)
 
 int via_wait_irq(DRM_IOCTL_ARGS)
 {
-       drm_file_t *priv = filp->private_data;
-       drm_device_t *dev = priv->head->dev;
+       DRM_DEVICE;
        drm_via_irqwait_t __user *argp = (void __user *)data;
        drm_via_irqwait_t irqwait;
        struct timeval now;
index 6bd6ac52ad1b2d5c8e71c89cd76029bb767c89de..c6a08e96285bf55c62a96480c7989ea0632a7419 100644 (file)
 
 static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
 {
-       drm_via_private_t *dev_priv;
+       drm_via_private_t *dev_priv = dev->dev_private;
 
        DRM_DEBUG("%s\n", __FUNCTION__);
 
-       dev_priv = drm_alloc(sizeof(drm_via_private_t), DRM_MEM_DRIVER);
-       if (dev_priv == NULL)
-               return -ENOMEM;
-
-       memset(dev_priv, 0, sizeof(drm_via_private_t));
-
        DRM_GETSAREA();
        if (!dev_priv->sarea) {
                DRM_ERROR("could not find sarea!\n");
@@ -67,7 +61,8 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
        dev_priv->agpAddr = init->agpAddr;
 
        via_init_futex(dev_priv);
-       dev_priv->pro_group_a = (dev->pdev->device == 0x3118);
+
+       via_init_dmablit(dev);
 
        dev->dev_private = (void *)dev_priv;
        return 0;
@@ -75,15 +70,7 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
 
 int via_do_cleanup_map(drm_device_t * dev)
 {
-       if (dev->dev_private) {
-
-               drm_via_private_t *dev_priv = dev->dev_private;
-
-               via_dma_cleanup(dev);
-
-               drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
-               dev->dev_private = NULL;
-       }
+       via_dma_cleanup(dev);
 
        return 0;
 }
@@ -107,3 +94,29 @@ int via_map_init(DRM_IOCTL_ARGS)
 
        return -EINVAL;
 }
+
+int via_driver_load(drm_device_t *dev, unsigned long chipset)
+{
+       drm_via_private_t *dev_priv;
+
+       dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
+       if (dev_priv == NULL)
+               return DRM_ERR(ENOMEM);
+
+       dev->dev_private = (void *)dev_priv;
+
+       if (chipset == VIA_PRO_GROUP_A)
+               dev_priv->pro_group_a = 1;
+
+       return 0;
+}
+
+int via_driver_unload(drm_device_t *dev)
+{
+       drm_via_private_t *dev_priv = dev->dev_private;
+
+       drm_free(dev_priv, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
+
+       return 0;
+}
+
index 3baddacdff26ab9100aa3682bb382e1cd6996ae7..33e0cb12e4c30dd448365847421b9266f8ac71c5 100644 (file)
@@ -42,7 +42,7 @@ static int via_agp_free(drm_via_mem_t * mem);
 static int via_fb_alloc(drm_via_mem_t * mem);
 static int via_fb_free(drm_via_mem_t * mem);
 
-static int add_alloc_set(int context, int type, unsigned int val)
+static int add_alloc_set(int context, int type, unsigned long val)
 {
        int i, retval = 0;
 
@@ -56,7 +56,7 @@ static int add_alloc_set(int context, int type, unsigned int val)
        return retval;
 }
 
-static int del_alloc_set(int context, int type, unsigned int val)
+static int del_alloc_set(int context, int type, unsigned long val)
 {
        int i, retval = 0;
 
@@ -199,13 +199,13 @@ int via_mem_alloc(DRM_IOCTL_ARGS)
                                 sizeof(mem));
 
        switch (mem.type) {
-       case VIDEO:
+       case VIA_MEM_VIDEO:
                if (via_fb_alloc(&mem) < 0)
                        return -EFAULT;
                DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
                                       sizeof(mem));
                return 0;
-       case AGP:
+       case VIA_MEM_AGP:
                if (via_agp_alloc(&mem) < 0)
                        return -EFAULT;
                DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
@@ -232,7 +232,7 @@ static int via_fb_alloc(drm_via_mem_t * mem)
        if (block) {
                fb.offset = block->ofs;
                fb.free = (unsigned long)block;
-               if (!add_alloc_set(fb.context, VIDEO, fb.free)) {
+               if (!add_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) {
                        DRM_DEBUG("adding to allocation set fails\n");
                        via_mmFreeMem((PMemBlock) fb.free);
                        retval = -1;
@@ -269,7 +269,7 @@ static int via_agp_alloc(drm_via_mem_t * mem)
        if (block) {
                agp.offset = block->ofs;
                agp.free = (unsigned long)block;
-               if (!add_alloc_set(agp.context, AGP, agp.free)) {
+               if (!add_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) {
                        DRM_DEBUG("adding to allocation set fails\n");
                        via_mmFreeMem((PMemBlock) agp.free);
                        retval = -1;
@@ -297,11 +297,11 @@ int via_mem_free(DRM_IOCTL_ARGS)
 
        switch (mem.type) {
 
-       case VIDEO:
+       case VIA_MEM_VIDEO:
                if (via_fb_free(&mem) == 0)
                        return 0;
                break;
-       case AGP:
+       case VIA_MEM_AGP:
                if (via_agp_free(&mem) == 0)
                        return 0;
                break;
@@ -329,7 +329,7 @@ static int via_fb_free(drm_via_mem_t * mem)
 
        via_mmFreeMem((PMemBlock) fb.free);
 
-       if (!del_alloc_set(fb.context, VIDEO, fb.free)) {
+       if (!del_alloc_set(fb.context, VIA_MEM_VIDEO, fb.free)) {
                retval = -1;
        }
 
@@ -352,7 +352,7 @@ static int via_agp_free(drm_via_mem_t * mem)
 
        via_mmFreeMem((PMemBlock) agp.free);
 
-       if (!del_alloc_set(agp.context, AGP, agp.free)) {
+       if (!del_alloc_set(agp.context, VIA_MEM_AGP, agp.free)) {
                retval = -1;
        }
 
index 4ac495f297f7d6bc78fd6303dd21f760bdc48ed0..70c897c8876643313e3093b1360e6f1c0cf1e92e 100644 (file)
@@ -237,7 +237,7 @@ static hazard_t table3[256];
 static __inline__ int
 eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
 {
-       if ((*buf - buf_end) >= num_words) {
+       if ((buf_end - *buf) >= num_words) {
                *buf += num_words;
                return 0;
        }
@@ -249,14 +249,14 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
  * Partially stolen from drm_memory.h
  */
 
-static __inline__ drm_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq,
+static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
                                                    unsigned long offset,
                                                    unsigned long size,
                                                    drm_device_t * dev)
 {
        struct list_head *list;
        drm_map_list_t *r_list;
-       drm_map_t *map = seq->map_cache;
+       drm_local_map_t *map = seq->map_cache;
 
        if (map && map->offset <= offset
            && (offset + size) <= (map->offset + map->size)) {
index eb4eda344345129144913003b7c242a4e2db3166..256590fcc22acab17d4b258d2b8ac45de1d6e4e5 100644 (file)
@@ -47,7 +47,7 @@ typedef struct {
        int agp_texture;
        int multitex;
        drm_device_t *dev;
-       drm_map_t *map_cache;
+       drm_local_map_t *map_cache;
        uint32_t vertex_count;
        int agp;
        const uint32_t *buf_start;
@@ -55,5 +55,7 @@ typedef struct {
 
 extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
                                     drm_device_t * dev, int agp);
+extern int via_parse_command_stream(drm_device_t *dev, const uint32_t *buf,
+                                   unsigned int size);
 
 #endif
index 7fab9fbdf424f64aa263a592820a9ee60c75c708..300ac61b09edbacd68b92f7446b97a133349d203 100644 (file)
@@ -50,8 +50,11 @@ void via_release_futex(drm_via_private_t * dev_priv, int context)
        unsigned int i;
        volatile int *lock;
 
+       if (!dev_priv->sarea_priv)
+               return;
+
        for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {
-               lock = (int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
+               lock = (volatile int *)XVMCLOCKPTR(dev_priv->sarea_priv, i);
                if ((_DRM_LOCKING_CONTEXT(*lock) == context)) {
                        if (_DRM_LOCK_IS_HELD(*lock)
                            && (*lock & _DRM_LOCK_CONT)) {
@@ -79,7 +82,7 @@ int via_decoder_futex(DRM_IOCTL_ARGS)
        if (fx.lock > VIA_NR_XVMC_LOCKS)
                return -EFAULT;
 
-       lock = (int *)XVMCLOCKPTR(sAPriv, fx.lock);
+       lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx.lock);
 
        switch (fx.func) {
        case VIA_FUTEX_WAIT:
index 8693835cb2d51f9f8868f1d2b6971e7d8942b69f..e233cf280bc024498c3fdd637d219e8159bb6f2a 100644 (file)
@@ -165,7 +165,7 @@ static int dsp56k_reset(void)
        return 0;
 }
 
-static int dsp56k_upload(u_char *bin, int len)
+static int dsp56k_upload(u_char __user *bin, int len)
 {
        int i;
        u_char *p;
@@ -199,7 +199,7 @@ static int dsp56k_upload(u_char *bin, int len)
        return 0;
 }
 
-static ssize_t dsp56k_read(struct file *file, char *buf, size_t count,
+static ssize_t dsp56k_read(struct file *file, char __user *buf, size_t count,
                           loff_t *ppos)
 {
        struct inode *inode = file->f_dentry->d_inode;
@@ -225,10 +225,10 @@ static ssize_t dsp56k_read(struct file *file, char *buf, size_t count,
                }
                case 2:  /* 16 bit */
                {
-                       short *data;
+                       short __user *data;
 
                        count /= 2;
-                       data = (short*) buf;
+                       data = (short __user *) buf;
                        handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
                                  put_user(dsp56k_host_interface.data.w[1], data+n++));
                        return 2*n;
@@ -244,10 +244,10 @@ static ssize_t dsp56k_read(struct file *file, char *buf, size_t count,
                }
                case 4:  /* 32 bit */
                {
-                       long *data;
+                       long __user *data;
 
                        count /= 4;
-                       data = (long*) buf;
+                       data = (long __user *) buf;
                        handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_RECEIVE,
                                  put_user(dsp56k_host_interface.data.l, data+n++));
                        return 4*n;
@@ -262,7 +262,7 @@ static ssize_t dsp56k_read(struct file *file, char *buf, size_t count,
        }
 }
 
-static ssize_t dsp56k_write(struct file *file, const char *buf, size_t count,
+static ssize_t dsp56k_write(struct file *file, const char __user *buf, size_t count,
                            loff_t *ppos)
 {
        struct inode *inode = file->f_dentry->d_inode;
@@ -287,10 +287,10 @@ static ssize_t dsp56k_write(struct file *file, const char *buf, size_t count,
                }
                case 2:  /* 16 bit */
                {
-                       const short *data;
+                       const short __user *data;
 
                        count /= 2;
-                       data = (const short *)buf;
+                       data = (const short __user *)buf;
                        handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
                                  get_user(dsp56k_host_interface.data.w[1], data+n++));
                        return 2*n;
@@ -306,10 +306,10 @@ static ssize_t dsp56k_write(struct file *file, const char *buf, size_t count,
                }
                case 4:  /* 32 bit */
                {
-                       const long *data;
+                       const long __user *data;
 
                        count /= 4;
-                       data = (const long *)buf;
+                       data = (const long __user *)buf;
                        handshake(count, dsp56k.maxio, dsp56k.timeout, DSP56K_TRANSMIT,
                                  get_user(dsp56k_host_interface.data.l, data+n++));
                        return 4*n;
@@ -328,6 +328,7 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
                        unsigned int cmd, unsigned long arg)
 {
        int dev = iminor(inode) & 0x0f;
+       void __user *argp = (void __user *)arg;
 
        switch(dev)
        {
@@ -336,9 +337,9 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
                switch(cmd) {
                case DSP56K_UPLOAD:
                {
-                       char *bin;
+                       char __user *bin;
                        int r, len;
-                       struct dsp56k_upload *binary = (struct dsp56k_upload *) arg;
+                       struct dsp56k_upload __user *binary = argp;
     
                        if(get_user(len, &binary->len) < 0)
                                return -EFAULT;
@@ -372,7 +373,7 @@ static int dsp56k_ioctl(struct inode *inode, struct file *file,
                case DSP56K_HOST_FLAGS:
                {
                        int dir, out, status;
-                       struct dsp56k_host_flags *hf = (struct dsp56k_host_flags*) arg;
+                       struct dsp56k_host_flags __user *hf = argp;
     
                        if(get_user(dir, &hf->dir) < 0)
                                return -EFAULT;
index e469f641c7289b75439b06e8f31e402eba7cf44c..dd5dc8fa490de04820743c9c15435b1cc4da1224 100644 (file)
@@ -160,7 +160,6 @@ static void rs_wait_until_sent(struct tty_struct *, int);
  * memory if large numbers of serial ports are open.
  */
 static unsigned char *tmp_buf;
-static DECLARE_MUTEX(tmp_buf_sem);
 
 static inline int serial_paranoia_check(struct esp_struct *info,
                                        char *name, const char *routine)
index 204a7302a4a9597e13fcfda676d53f00cb7a79af..e38a5f0e07bbd7d90cc843e9b5d171d622335f20 100644 (file)
@@ -34,7 +34,6 @@
 #define DEBUG 
 
 static char *                  tmp_buf; 
-static DECLARE_MUTEX(tmp_buf_sem);
 
 static int gs_debug;
 
index 704c3c07f0ab8e817c1e9e429521dc30c4037d83..29c41f4418c065d9aa8a447ae34f984e2cf3f321 100644 (file)
@@ -534,7 +534,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
        return virtr + wrote;
 }
 
-#if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
 static ssize_t read_port(struct file * file, char __user * buf,
                         size_t count, loff_t *ppos)
 {
@@ -795,7 +795,7 @@ static struct file_operations null_fops = {
        .write          = write_null,
 };
 
-#if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
 static struct file_operations port_fops = {
        .llseek         = memory_lseek,
        .read           = read_port,
@@ -865,7 +865,7 @@ static int memory_open(struct inode * inode, struct file * filp)
                case 3:
                        filp->f_op = &null_fops;
                        break;
-#if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
                case 4:
                        filp->f_op = &port_fops;
                        break;
@@ -912,7 +912,7 @@ static const struct {
        {1, "mem",     S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
        {2, "kmem",    S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
        {3, "null",    S_IRUGO | S_IWUGO,           &null_fops},
-#if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
+#if defined(CONFIG_ISA) || !defined(__mc68000__)
        {4, "port",    S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
 #endif
        {5, "zero",    S_IRUGO | S_IWUGO,           &zero_fops},
index 050e70ee59202f03ff3bee326583d5de4530f534..119e629656b7b4d5e835ec1fe11a423a4f35c258 100644 (file)
@@ -82,7 +82,6 @@
 static struct riscom_board * IRQ_to_board[16];
 static struct tty_driver *riscom_driver;
 static unsigned char * tmp_buf;
-static DECLARE_MUTEX(tmp_buf_sem);
 
 static unsigned long baud_table[] =  {
        0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
index 51810f72f1a92def836878e99c6e64967a8f0c88..93998f5baff576d9a68dcbd4077aa1ad37be1c95 100644 (file)
@@ -399,7 +399,7 @@ struct scc_port {
                __asm__ __volatile__ ( "tstb %0" : : "g" (*_scc_del) : "cc" );\
     } while (0)
 
-extern unsigned char scc_shadow[2][16];
+static unsigned char scc_shadow[2][16];
 
 /* The following functions should relax the somehow complicated
  * register access of the SCC. _SCCwrite() stores all written values
index f36342ae8e7e91b7121162580d13f39faa176deb..037c940ac71b8bb10b4dd2788aff141980ddba63 100644 (file)
@@ -129,7 +129,6 @@ struct cyclades_port cy_port[] = {
  * memory if large numbers of serial ports are open.
  */
 static unsigned char *tmp_buf = 0;
-DECLARE_MUTEX(tmp_buf_sem);
 
 /*
  * This is used to look up the divisor speeds and the timeouts
index 0a574bdbce3695df8ab4e0553753e9e5f2f7a1f7..5343e9fc6ab7e721dc1a578e6a6e81e044549490 100644 (file)
@@ -184,7 +184,6 @@ static int sx_poll = HZ;
 
 static struct tty_driver *specialix_driver;
 static unsigned char * tmp_buf;
-static DECLARE_MUTEX(tmp_buf_sem);
 
 static unsigned long baud_table[] =  {
        0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
@@ -2556,8 +2555,6 @@ static int __init specialix_init_module(void)
 
        func_enter();
 
-       init_MUTEX(&tmp_buf_sem); /* Init de the semaphore - pvdl */
-
        if (iobase[0] || iobase[1] || iobase[2] || iobase[3]) {
                for(i = 0; i < SX_NBOARD; i++) {
                        sx_board[i].base = iobase[i];
index 9f1b466c4f84fcd555c79f92021ce17343c936de..ede688a4e141ad71834fb90930d83b7466751caa 100644 (file)
@@ -951,7 +951,6 @@ static void* mgsl_get_text_ptr(void)
  * memory if large numbers of serial ports are open.
  */
 static unsigned char *tmp_buf;
-static DECLARE_MUTEX(tmp_buf_sem);
 
 static inline int mgsl_paranoia_check(struct mgsl_struct *info,
                                        char *name, const char *routine)
index faee5e7acaf7ae7707847bcb9f2ff8f044f41279..4e5360388748e7facd991ceefe64a06439f3b35c 100644 (file)
@@ -476,19 +476,19 @@ static struct port_info *get_port_data(struct tty_struct *tty)
  */
 static void initDataEvent(struct viocharlpevent *viochar, HvLpIndex lp)
 {
+       struct HvLpEvent *hev = &viochar->event;
+
        memset(viochar, 0, sizeof(struct viocharlpevent));
 
-       viochar->event.xFlags.xValid = 1;
-       viochar->event.xFlags.xFunction = HvLpEvent_Function_Int;
-       viochar->event.xFlags.xAckInd = HvLpEvent_AckInd_NoAck;
-       viochar->event.xFlags.xAckType = HvLpEvent_AckType_DeferredAck;
-       viochar->event.xType = HvLpEvent_Type_VirtualIo;
-       viochar->event.xSubtype = viomajorsubtype_chario | viochardata;
-       viochar->event.xSourceLp = HvLpConfig_getLpIndex();
-       viochar->event.xTargetLp = lp;
-       viochar->event.xSizeMinus1 = sizeof(struct viocharlpevent);
-       viochar->event.xSourceInstanceId = viopath_sourceinst(lp);
-       viochar->event.xTargetInstanceId = viopath_targetinst(lp);
+       hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DEFERRED_ACK |
+               HV_LP_EVENT_INT;
+       hev->xType = HvLpEvent_Type_VirtualIo;
+       hev->xSubtype = viomajorsubtype_chario | viochardata;
+       hev->xSourceLp = HvLpConfig_getLpIndex();
+       hev->xTargetLp = lp;
+       hev->xSizeMinus1 = sizeof(struct viocharlpevent);
+       hev->xSourceInstanceId = viopath_sourceinst(lp);
+       hev->xTargetInstanceId = viopath_targetinst(lp);
 }
 
 /*
@@ -752,7 +752,7 @@ static void vioHandleOpenEvent(struct HvLpEvent *event)
        struct port_info *pi;
        int reject = 0;
 
-       if (event->xFlags.xFunction == HvLpEvent_Function_Ack) {
+       if (hvlpevent_is_ack(event)) {
                if (port >= VTTY_PORTS)
                        return;
 
@@ -788,7 +788,7 @@ static void vioHandleOpenEvent(struct HvLpEvent *event)
        }
 
        /* This had better require an ack, otherwise complain */
-       if (event->xFlags.xAckInd != HvLpEvent_AckInd_DoAck) {
+       if (!hvlpevent_need_ack(event)) {
                printk(VIOCONS_KERN_WARN "viocharopen without ack bit!\n");
                return;
        }
@@ -856,7 +856,7 @@ static void vioHandleCloseEvent(struct HvLpEvent *event)
        struct viocharlpevent *cevent = (struct viocharlpevent *)event;
        u8 port = cevent->virtual_device;
 
-       if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+       if (hvlpevent_is_int(event)) {
                if (port >= VTTY_PORTS) {
                        printk(VIOCONS_KERN_WARN
                                        "close message from invalid virtual device.\n");
@@ -1056,8 +1056,7 @@ static void vioHandleCharEvent(struct HvLpEvent *event)
                vioHandleConfig(event);
                break;
        default:
-               if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
-                   (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
+               if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
                        event->xRc = HvLpEvent_Rc_InvalidSubtype;
                        HvCallEvent_ackLpEvent(event);
                }
index ffe6f44ac76fcb080e976584c7499f1ace73e13d..ca8e69d2f64d632118c32e9c05c63a5ae660a71e 100644 (file)
@@ -83,7 +83,6 @@ int dio_register_driver(struct dio_driver *drv)
        /* initialize common driver fields */
        drv->driver.name = drv->name;
        drv->driver.bus = &dio_bus_type;
-       drv->driver.probe = dio_device_probe;
 
        /* register with core */
        count = driver_register(&drv->driver);
@@ -145,7 +144,8 @@ static int dio_bus_match(struct device *dev, struct device_driver *drv)
 
 struct bus_type dio_bus_type = {
        .name   = "dio",
-       .match  = dio_bus_match
+       .match  = dio_bus_match,
+       .probe  = dio_device_probe,
 };
 
 
index 70f7ab829d36f5dee4db91bfd5a8ed80a41d944e..86e2234faf80e6ab21fe27165ecd3eeabdb42462 100644 (file)
@@ -899,6 +899,12 @@ static int i2c_pxa_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num
        struct pxa_i2c *i2c = adap->algo_data;
        int ret, i;
 
+       /* If the I2C controller is disabled we need to reset it (probably due
+          to a suspend/resume destroying state). We do this here as we can then
+          avoid worrying about resuming the controller before its users. */
+       if (!(ICR & ICR_IUE))
+               i2c_pxa_reset(i2c);
+
        for (i = adap->retries; i >= 0; i--) {
                ret = i2c_pxa_do_xfer(i2c, msgs, num);
                if (ret != I2C_RETRY)
@@ -939,7 +945,9 @@ static struct pxa_i2c i2c_pxa = {
 static int i2c_pxa_probe(struct platform_device *dev)
 {
        struct pxa_i2c *i2c = &i2c_pxa;
+#ifdef CONFIG_I2C_PXA_SLAVE
        struct i2c_pxa_platform_data *plat = dev->dev.platform_data;
+#endif
        int ret;
 
 #ifdef CONFIG_PXA27x
@@ -1024,5 +1032,7 @@ static void i2c_adap_pxa_exit(void)
        return platform_driver_unregister(&i2c_pxa_driver);
 }
 
+MODULE_LICENSE("GPL");
+
 module_init(i2c_adap_pxa_init);
 module_exit(i2c_adap_pxa_exit);
index 52b77477df573232730bbdae37ccd61d53beba22..0ce58b506046b458edb85cf7bda4472a56920da1 100644 (file)
@@ -63,13 +63,6 @@ static int i2c_bus_resume(struct device * dev)
        return rc;
 }
 
-struct bus_type i2c_bus_type = {
-       .name =         "i2c",
-       .match =        i2c_device_match,
-       .suspend =      i2c_bus_suspend,
-       .resume =       i2c_bus_resume,
-};
-
 static int i2c_device_probe(struct device *dev)
 {
        return -ENODEV;
@@ -80,6 +73,15 @@ static int i2c_device_remove(struct device *dev)
        return 0;
 }
 
+struct bus_type i2c_bus_type = {
+       .name =         "i2c",
+       .match =        i2c_device_match,
+       .probe =        i2c_device_probe,
+       .remove =       i2c_device_remove,
+       .suspend =      i2c_bus_suspend,
+       .resume =       i2c_bus_resume,
+};
+
 void i2c_adapter_dev_release(struct device *dev)
 {
        struct i2c_adapter *adap = dev_to_i2c_adapter(dev);
@@ -90,8 +92,6 @@ struct device_driver i2c_adapter_driver = {
        .owner = THIS_MODULE,
        .name = "i2c_adapter",
        .bus = &i2c_bus_type,
-       .probe = i2c_device_probe,
-       .remove = i2c_device_remove,
 };
 
 static void i2c_adapter_class_dev_release(struct class_device *dev)
@@ -294,8 +294,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
        /* add the driver to the list of i2c drivers in the driver core */
        driver->driver.owner = owner;
        driver->driver.bus = &i2c_bus_type;
-       driver->driver.probe = i2c_device_probe;
-       driver->driver.remove = i2c_device_remove;
 
        res = driver_register(&driver->driver);
        if (res)
index 9b2ebd219ad0d0fc8c8aabf83c60fc1b99548d1f..ef09a7ef2396bd6fb99c32bb75df8402f3b351cc 100644 (file)
@@ -3256,9 +3256,8 @@ sector_t ide_cdrom_capacity (ide_drive_t *drive)
 }
 #endif
 
-static int ide_cd_remove(struct device *dev)
+static void ide_cd_remove(ide_drive_t *drive)
 {
-       ide_drive_t *drive = to_ide_device(dev);
        struct cdrom_info *info = drive->driver_data;
 
        ide_unregister_subdriver(drive, info->driver);
@@ -3266,8 +3265,6 @@ static int ide_cd_remove(struct device *dev)
        del_gendisk(info->disk);
 
        ide_cd_put(info);
-
-       return 0;
 }
 
 static void ide_cd_release(struct kref *kref)
@@ -3291,7 +3288,7 @@ static void ide_cd_release(struct kref *kref)
        kfree(info);
 }
 
-static int ide_cd_probe(struct device *);
+static int ide_cd_probe(ide_drive_t *);
 
 #ifdef CONFIG_PROC_FS
 static int proc_idecd_read_capacity
@@ -3317,9 +3314,9 @@ static ide_driver_t ide_cdrom_driver = {
                .owner          = THIS_MODULE,
                .name           = "ide-cdrom",
                .bus            = &ide_bus_type,
-               .probe          = ide_cd_probe,
-               .remove         = ide_cd_remove,
        },
+       .probe                  = ide_cd_probe,
+       .remove                 = ide_cd_remove,
        .version                = IDECD_VERSION,
        .media                  = ide_cdrom,
        .supports_dsc_overlap   = 1,
@@ -3413,9 +3410,8 @@ static char *ignore = NULL;
 module_param(ignore, charp, 0400);
 MODULE_DESCRIPTION("ATAPI CD-ROM Driver");
 
-static int ide_cd_probe(struct device *dev)
+static int ide_cd_probe(ide_drive_t *drive)
 {
-       ide_drive_t *drive = to_ide_device(dev);
        struct cdrom_info *info;
        struct gendisk *g;
        struct request_sense sense;
index cab362ea03360a1bc4586ebec338cf2cadf74418..245b508208dfa09fa17bbb7dd5224cdf2b74b931 100644 (file)
@@ -997,9 +997,8 @@ static void ide_cacheflush_p(ide_drive_t *drive)
                printk(KERN_INFO "%s: wcache flush failed!\n", drive->name);
 }
 
-static int ide_disk_remove(struct device *dev)
+static void ide_disk_remove(ide_drive_t *drive)
 {
-       ide_drive_t *drive = to_ide_device(dev);
        struct ide_disk_obj *idkp = drive->driver_data;
        struct gendisk *g = idkp->disk;
 
@@ -1010,8 +1009,6 @@ static int ide_disk_remove(struct device *dev)
        ide_cacheflush_p(drive);
 
        ide_disk_put(idkp);
-
-       return 0;
 }
 
 static void ide_disk_release(struct kref *kref)
@@ -1027,12 +1024,10 @@ static void ide_disk_release(struct kref *kref)
        kfree(idkp);
 }
 
-static int ide_disk_probe(struct device *dev);
+static int ide_disk_probe(ide_drive_t *drive);
 
-static void ide_device_shutdown(struct device *dev)
+static void ide_device_shutdown(ide_drive_t *drive)
 {
-       ide_drive_t *drive = container_of(dev, ide_drive_t, gendev);
-
 #ifdef CONFIG_ALPHA
        /* On Alpha, halt(8) doesn't actually turn the machine off,
           it puts you into the sort of firmware monitor. Typically,
@@ -1054,7 +1049,7 @@ static void ide_device_shutdown(struct device *dev)
        }
 
        printk("Shutdown: %s\n", drive->name);
-       dev->bus->suspend(dev, PMSG_SUSPEND);
+       drive->gendev.bus->suspend(&drive->gendev, PMSG_SUSPEND);
 }
 
 static ide_driver_t idedisk_driver = {
@@ -1062,10 +1057,10 @@ static ide_driver_t idedisk_driver = {
                .owner          = THIS_MODULE,
                .name           = "ide-disk",
                .bus            = &ide_bus_type,
-               .probe          = ide_disk_probe,
-               .remove         = ide_disk_remove,
-               .shutdown       = ide_device_shutdown,
        },
+       .probe                  = ide_disk_probe,
+       .remove                 = ide_disk_remove,
+       .shutdown               = ide_device_shutdown,
        .version                = IDEDISK_VERSION,
        .media                  = ide_disk,
        .supports_dsc_overlap   = 0,
@@ -1182,9 +1177,8 @@ static struct block_device_operations idedisk_ops = {
 
 MODULE_DESCRIPTION("ATA DISK Driver");
 
-static int ide_disk_probe(struct device *dev)
+static int ide_disk_probe(ide_drive_t *drive)
 {
-       ide_drive_t *drive = to_ide_device(dev);
        struct ide_disk_obj *idkp;
        struct gendisk *g;
 
index 5945f551aaaad493d22c05e358f2df30b06f110c..1f8db9ac05d12def5a5fe8f49a7492e628e4a0bb 100644 (file)
@@ -1871,9 +1871,8 @@ static void idefloppy_setup (ide_drive_t *drive, idefloppy_floppy_t *floppy)
        idefloppy_add_settings(drive);
 }
 
-static int ide_floppy_remove(struct device *dev)
+static void ide_floppy_remove(ide_drive_t *drive)
 {
-       ide_drive_t *drive = to_ide_device(dev);
        idefloppy_floppy_t *floppy = drive->driver_data;
        struct gendisk *g = floppy->disk;
 
@@ -1882,8 +1881,6 @@ static int ide_floppy_remove(struct device *dev)
        del_gendisk(g);
 
        ide_floppy_put(floppy);
-
-       return 0;
 }
 
 static void ide_floppy_release(struct kref *kref)
@@ -1922,16 +1919,16 @@ static ide_proc_entry_t idefloppy_proc[] = {
 
 #endif /* CONFIG_PROC_FS */
 
-static int ide_floppy_probe(struct device *);
+static int ide_floppy_probe(ide_drive_t *);
 
 static ide_driver_t idefloppy_driver = {
        .gen_driver = {
                .owner          = THIS_MODULE,
                .name           = "ide-floppy",
                .bus            = &ide_bus_type,
-               .probe          = ide_floppy_probe,
-               .remove         = ide_floppy_remove,
        },
+       .probe                  = ide_floppy_probe,
+       .remove                 = ide_floppy_remove,
        .version                = IDEFLOPPY_VERSION,
        .media                  = ide_floppy,
        .supports_dsc_overlap   = 0,
@@ -2136,9 +2133,8 @@ static struct block_device_operations idefloppy_ops = {
        .revalidate_disk= idefloppy_revalidate_disk
 };
 
-static int ide_floppy_probe(struct device *dev)
+static int ide_floppy_probe(ide_drive_t *drive)
 {
-       ide_drive_t *drive = to_ide_device(dev);
        idefloppy_floppy_t *floppy;
        struct gendisk *g;
 
index bcbaeb50bb933cbb57c8301291e90297448cbed2..8d50df4526a4ec85cf05627e396902af84cf1f57 100644 (file)
 #include <asm/io.h>
 #include <asm/bitops.h>
 
-void ide_softirq_done(struct request *rq)
-{
-       request_queue_t *q = rq->q;
-
-       add_disk_randomness(rq->rq_disk);
-       end_that_request_chunk(rq, 1, rq->data_len);
-
-       spin_lock_irq(q->queue_lock);
-       end_that_request_last(rq, 1);
-       spin_unlock_irq(q->queue_lock);
-}
-
 int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
                      int nr_sectors)
 {
-       unsigned int nbytes;
        int ret = 1;
 
        BUG_ON(!(rq->flags & REQ_STARTED));
@@ -94,27 +81,12 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate,
                HWGROUP(drive)->hwif->ide_dma_on(drive);
        }
 
-       /*
-        * For partial completions (or non fs/pc requests), use the regular
-        * direct completion path. Same thing for requests that failed, to
-        * preserve the ->errors value we use the normal completion path
-        * for those
-        */
-       nbytes = nr_sectors << 9;
-       if (!rq->errors && rq_all_done(rq, nbytes)) {
-               rq->data_len = nbytes;
+       if (!end_that_request_first(rq, uptodate, nr_sectors)) {
+               add_disk_randomness(rq->rq_disk);
                blkdev_dequeue_request(rq);
                HWGROUP(drive)->rq = NULL;
-               blk_complete_request(rq);
+               end_that_request_last(rq, uptodate);
                ret = 0;
-       } else {
-               if (!end_that_request_first(rq, uptodate, nr_sectors)) {
-                       add_disk_randomness(rq->rq_disk);
-                       blkdev_dequeue_request(rq);
-                       HWGROUP(drive)->rq = NULL;
-                       end_that_request_last(rq, uptodate);
-                       ret = 0;
-               }
        }
 
        return ret;
index 7cb2d86601dbde9424a7cd7cae46f324640c4fd7..e7425546b4b1786feb0333f71b0aa43b570d1e43 100644 (file)
@@ -1011,8 +1011,6 @@ static int ide_init_queue(ide_drive_t *drive)
        blk_queue_max_hw_segments(q, max_sg_entries);
        blk_queue_max_phys_segments(q, max_sg_entries);
 
-       blk_queue_softirq_done(q, ide_softirq_done);
-
        /* assign drive queue */
        drive->queue = q;
 
index fab9b2b025044728e5d80a53d929e7d9715ba36a..0101d0def7c519814f45c09af92221a27c9362fb 100644 (file)
@@ -4682,9 +4682,8 @@ static void idetape_setup (ide_drive_t *drive, idetape_tape_t *tape, int minor)
        idetape_add_settings(drive);
 }
 
-static int ide_tape_remove(struct device *dev)
+static void ide_tape_remove(ide_drive_t *drive)
 {
-       ide_drive_t *drive = to_ide_device(dev);
        idetape_tape_t *tape = drive->driver_data;
 
        ide_unregister_subdriver(drive, tape->driver);
@@ -4692,8 +4691,6 @@ static int ide_tape_remove(struct device *dev)
        ide_unregister_region(tape->disk);
 
        ide_tape_put(tape);
-
-       return 0;
 }
 
 static void ide_tape_release(struct kref *kref)
@@ -4745,16 +4742,16 @@ static ide_proc_entry_t idetape_proc[] = {
 
 #endif
 
-static int ide_tape_probe(struct device *);
+static int ide_tape_probe(ide_drive_t *);
 
 static ide_driver_t idetape_driver = {
        .gen_driver = {
                .owner          = THIS_MODULE,
                .name           = "ide-tape",
                .bus            = &ide_bus_type,
-               .probe          = ide_tape_probe,
-               .remove         = ide_tape_remove,
        },
+       .probe                  = ide_tape_probe,
+       .remove                 = ide_tape_remove,
        .version                = IDETAPE_VERSION,
        .media                  = ide_tape,
        .supports_dsc_overlap   = 1,
@@ -4825,9 +4822,8 @@ static struct block_device_operations idetape_block_ops = {
        .ioctl          = idetape_ioctl,
 };
 
-static int ide_tape_probe(struct device *dev)
+static int ide_tape_probe(ide_drive_t *drive)
 {
-       ide_drive_t *drive = to_ide_device(dev);
        idetape_tape_t *tape;
        struct gendisk *g;
        int minor;
@@ -4883,9 +4879,9 @@ static int ide_tape_probe(struct device *dev)
        idetape_setup(drive, tape, minor);
 
        class_device_create(idetape_sysfs_class, NULL,
-                       MKDEV(IDETAPE_MAJOR, minor), dev, "%s", tape->name);
+                       MKDEV(IDETAPE_MAJOR, minor), &drive->gendev, "%s", tape->name);
        class_device_create(idetape_sysfs_class, NULL,
-                       MKDEV(IDETAPE_MAJOR, minor + 128), dev, "n%s", tape->name);
+                       MKDEV(IDETAPE_MAJOR, minor + 128), &drive->gendev, "n%s", tape->name);
 
        devfs_mk_cdev(MKDEV(HWIF(drive)->major, minor),
                        S_IFCHR | S_IRUGO | S_IWUGO,
index ec5a4cb173b073ff41f5a41608e73c6fcd343817..afeb02bbb72210b4ee45bbcf6c6ed5e4a0cd416c 100644 (file)
@@ -1949,10 +1949,41 @@ static int ide_uevent(struct device *dev, char **envp, int num_envp,
        return 0;
 }
 
+static int generic_ide_probe(struct device *dev)
+{
+       ide_drive_t *drive = to_ide_device(dev);
+       ide_driver_t *drv = to_ide_driver(dev->driver);
+
+       return drv->probe ? drv->probe(drive) : -ENODEV;
+}
+
+static int generic_ide_remove(struct device *dev)
+{
+       ide_drive_t *drive = to_ide_device(dev);
+       ide_driver_t *drv = to_ide_driver(dev->driver);
+
+       if (drv->remove)
+               drv->remove(drive);
+
+       return 0;
+}
+
+static void generic_ide_shutdown(struct device *dev)
+{
+       ide_drive_t *drive = to_ide_device(dev);
+       ide_driver_t *drv = to_ide_driver(dev->driver);
+
+       if (dev->driver && drv->shutdown)
+               drv->shutdown(drive);
+}
+
 struct bus_type ide_bus_type = {
        .name           = "ide",
        .match          = ide_bus_match,
        .uevent         = ide_uevent,
+       .probe          = generic_ide_probe,
+       .remove         = generic_ide_remove,
+       .shutdown       = generic_ide_shutdown,
        .dev_attrs      = ide_dev_attrs,
        .suspend        = generic_ide_suspend,
        .resume         = generic_ide_resume,
index 3a611fe5497e06c0b9e2de2cc9eaf75d2abf0caa..c06b18102b6a19452b4f9ecff5861e6c251e29df 100644 (file)
@@ -3163,22 +3163,6 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
 }
 EXPORT_SYMBOL(ib_cm_init_qp_attr);
 
-static __be64 cm_get_ca_guid(struct ib_device *device)
-{
-       struct ib_device_attr *device_attr;
-       __be64 guid;
-       int ret;
-
-       device_attr = kmalloc(sizeof *device_attr, GFP_KERNEL);
-       if (!device_attr)
-               return 0;
-
-       ret = ib_query_device(device, device_attr);
-       guid = ret ? 0 : device_attr->node_guid;
-       kfree(device_attr);
-       return guid;
-}
-
 static void cm_add_one(struct ib_device *device)
 {
        struct cm_device *cm_dev;
@@ -3200,9 +3184,7 @@ static void cm_add_one(struct ib_device *device)
                return;
 
        cm_dev->device = device;
-       cm_dev->ca_guid = cm_get_ca_guid(device);
-       if (!cm_dev->ca_guid)
-               goto error1;
+       cm_dev->ca_guid = device->node_guid;
 
        set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
        for (i = 1; i <= device->phys_port_cnt; i++) {
@@ -3217,11 +3199,11 @@ static void cm_add_one(struct ib_device *device)
                                                        cm_recv_handler,
                                                        port);
                if (IS_ERR(port->mad_agent))
-                       goto error2;
+                       goto error1;
 
                ret = ib_modify_port(device, i, 0, &port_modify);
                if (ret)
-                       goto error3;
+                       goto error2;
        }
        ib_set_client_data(device, &cm_client, cm_dev);
 
@@ -3230,9 +3212,9 @@ static void cm_add_one(struct ib_device *device)
        write_unlock_irqrestore(&cm.device_lock, flags);
        return;
 
-error3:
-       ib_unregister_mad_agent(port->mad_agent);
 error2:
+       ib_unregister_mad_agent(port->mad_agent);
+error1:
        port_modify.set_port_cap_mask = 0;
        port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
        while (--i) {
@@ -3240,7 +3222,6 @@ error2:
                ib_modify_port(device, port->port_num, 0, &port_modify);
                ib_unregister_mad_agent(port->mad_agent);
        }
-error1:
        kfree(cm_dev);
 }
 
index e169e798354b6283174a66adfe0f0027b9412ca5..b2f3cb91d9bcfdb90159d1e72cc5493d6798343d 100644 (file)
@@ -38,8 +38,7 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/init.h>
-
-#include <asm/semaphore.h>
+#include <linux/mutex.h>
 
 #include "core_priv.h"
 
@@ -57,13 +56,13 @@ static LIST_HEAD(device_list);
 static LIST_HEAD(client_list);
 
 /*
- * device_sem protects access to both device_list and client_list.
+ * device_mutex protects access to both device_list and client_list.
  * There's no real point to using multiple locks or something fancier
  * like an rwsem: we always access both lists, and we're always
  * modifying one list or the other list.  In any case this is not a
  * hot path so there's no point in trying to optimize.
  */
-static DECLARE_MUTEX(device_sem);
+static DEFINE_MUTEX(device_mutex);
 
 static int ib_device_check_mandatory(struct ib_device *device)
 {
@@ -221,7 +220,7 @@ int ib_register_device(struct ib_device *device)
 {
        int ret;
 
-       down(&device_sem);
+       mutex_lock(&device_mutex);
 
        if (strchr(device->name, '%')) {
                ret = alloc_name(device->name);
@@ -259,7 +258,7 @@ int ib_register_device(struct ib_device *device)
        }
 
  out:
-       up(&device_sem);
+       mutex_unlock(&device_mutex);
        return ret;
 }
 EXPORT_SYMBOL(ib_register_device);
@@ -276,7 +275,7 @@ void ib_unregister_device(struct ib_device *device)
        struct ib_client_data *context, *tmp;
        unsigned long flags;
 
-       down(&device_sem);
+       mutex_lock(&device_mutex);
 
        list_for_each_entry_reverse(client, &client_list, list)
                if (client->remove)
@@ -284,7 +283,7 @@ void ib_unregister_device(struct ib_device *device)
 
        list_del(&device->core_list);
 
-       up(&device_sem);
+       mutex_unlock(&device_mutex);
 
        spin_lock_irqsave(&device->client_data_lock, flags);
        list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
@@ -312,14 +311,14 @@ int ib_register_client(struct ib_client *client)
 {
        struct ib_device *device;
 
-       down(&device_sem);
+       mutex_lock(&device_mutex);
 
        list_add_tail(&client->list, &client_list);
        list_for_each_entry(device, &device_list, core_list)
                if (client->add && !add_client_context(device, client))
                        client->add(device);
 
-       up(&device_sem);
+       mutex_unlock(&device_mutex);
 
        return 0;
 }
@@ -339,7 +338,7 @@ void ib_unregister_client(struct ib_client *client)
        struct ib_device *device;
        unsigned long flags;
 
-       down(&device_sem);
+       mutex_lock(&device_mutex);
 
        list_for_each_entry(device, &device_list, core_list) {
                if (client->remove)
@@ -355,7 +354,7 @@ void ib_unregister_client(struct ib_client *client)
        }
        list_del(&client->list);
 
-       up(&device_sem);
+       mutex_unlock(&device_mutex);
 }
 EXPORT_SYMBOL(ib_unregister_client);
 
index 1f1743c5c9a3b0ead51a349ac445e682f8d9b866..5982d687a0009e2db11befcce2d0c4d9fde519f8 100644 (file)
@@ -445,13 +445,7 @@ static int ib_device_uevent(struct class_device *cdev, char **envp,
                return -ENOMEM;
 
        /*
-        * It might be nice to pass the node GUID with the event, but
-        * right now the only way to get it is to query the device
-        * provider, and this can crash during device removal because
-        * we are will be running after driver removal has started.
-        * We could add a node_guid field to struct ib_device, or we
-        * could just let userspace read the node GUID from sysfs when
-        * devices are added.
+        * It would be nice to pass the node GUID with the event...
         */
 
        envp[i] = NULL;
@@ -623,21 +617,15 @@ static ssize_t show_sys_image_guid(struct class_device *cdev, char *buf)
 static ssize_t show_node_guid(struct class_device *cdev, char *buf)
 {
        struct ib_device *dev = container_of(cdev, struct ib_device, class_dev);
-       struct ib_device_attr attr;
-       ssize_t ret;
 
        if (!ibdev_is_alive(dev))
                return -ENODEV;
 
-       ret = ib_query_device(dev, &attr);
-       if (ret)
-               return ret;
-
        return sprintf(buf, "%04x:%04x:%04x:%04x\n",
-                      be16_to_cpu(((__be16 *) &attr.node_guid)[0]),
-                      be16_to_cpu(((__be16 *) &attr.node_guid)[1]),
-                      be16_to_cpu(((__be16 *) &attr.node_guid)[2]),
-                      be16_to_cpu(((__be16 *) &attr.node_guid)[3]));
+                      be16_to_cpu(((__be16 *) &dev->node_guid)[0]),
+                      be16_to_cpu(((__be16 *) &dev->node_guid)[1]),
+                      be16_to_cpu(((__be16 *) &dev->node_guid)[2]),
+                      be16_to_cpu(((__be16 *) &dev->node_guid)[3]));
 }
 
 static CLASS_DEVICE_ATTR(node_type, S_IRUGO, show_node_type, NULL);
index 6e15787d1de1a1d32e6f0df8a2e95c02e79a12f4..e95c4293a4967d108ea79c7c1d5166bbaad00999 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/mount.h>
 #include <linux/cdev.h>
 #include <linux/idr.h>
+#include <linux/mutex.h>
 
 #include <asm/uaccess.h>
 
@@ -113,7 +114,7 @@ static struct ib_client ucm_client = {
        .remove = ib_ucm_remove_one
 };
 
-static DECLARE_MUTEX(ctx_id_mutex);
+static DEFINE_MUTEX(ctx_id_mutex);
 static DEFINE_IDR(ctx_id_table);
 static DECLARE_BITMAP(dev_map, IB_UCM_MAX_DEVICES);
 
@@ -121,7 +122,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
 {
        struct ib_ucm_context *ctx;
 
-       down(&ctx_id_mutex);
+       mutex_lock(&ctx_id_mutex);
        ctx = idr_find(&ctx_id_table, id);
        if (!ctx)
                ctx = ERR_PTR(-ENOENT);
@@ -129,7 +130,7 @@ static struct ib_ucm_context *ib_ucm_ctx_get(struct ib_ucm_file *file, int id)
                ctx = ERR_PTR(-EINVAL);
        else
                atomic_inc(&ctx->ref);
-       up(&ctx_id_mutex);
+       mutex_unlock(&ctx_id_mutex);
 
        return ctx;
 }
@@ -186,9 +187,9 @@ static struct ib_ucm_context *ib_ucm_ctx_alloc(struct ib_ucm_file *file)
                if (!result)
                        goto error;
 
-               down(&ctx_id_mutex);
+               mutex_lock(&ctx_id_mutex);
                result = idr_get_new(&ctx_id_table, ctx, &ctx->id);
-               up(&ctx_id_mutex);
+               mutex_unlock(&ctx_id_mutex);
        } while (result == -EAGAIN);
 
        if (result)
@@ -550,9 +551,9 @@ static ssize_t ib_ucm_create_id(struct ib_ucm_file *file,
 err2:
        ib_destroy_cm_id(ctx->cm_id);
 err1:
-       down(&ctx_id_mutex);
+       mutex_lock(&ctx_id_mutex);
        idr_remove(&ctx_id_table, ctx->id);
-       up(&ctx_id_mutex);
+       mutex_unlock(&ctx_id_mutex);
        kfree(ctx);
        return result;
 }
@@ -572,7 +573,7 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
        if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
                return -EFAULT;
 
-       down(&ctx_id_mutex);
+       mutex_lock(&ctx_id_mutex);
        ctx = idr_find(&ctx_id_table, cmd.id);
        if (!ctx)
                ctx = ERR_PTR(-ENOENT);
@@ -580,7 +581,7 @@ static ssize_t ib_ucm_destroy_id(struct ib_ucm_file *file,
                ctx = ERR_PTR(-EINVAL);
        else
                idr_remove(&ctx_id_table, ctx->id);
-       up(&ctx_id_mutex);
+       mutex_unlock(&ctx_id_mutex);
 
        if (IS_ERR(ctx))
                return PTR_ERR(ctx);
@@ -1280,9 +1281,9 @@ static int ib_ucm_close(struct inode *inode, struct file *filp)
                                 struct ib_ucm_context, file_list);
                up(&file->mutex);
 
-               down(&ctx_id_mutex);
+               mutex_lock(&ctx_id_mutex);
                idr_remove(&ctx_id_table, ctx->id);
-               up(&ctx_id_mutex);
+               mutex_unlock(&ctx_id_mutex);
 
                ib_destroy_cm_id(ctx->cm_id);
                ib_ucm_cleanup_events(ctx);
index 7114e3fbab00d2d1bfccd76c7964ad6d4a81c130..f7eecbc6af6c2b0b371ff361508d9ae074cab484 100644 (file)
@@ -41,6 +41,7 @@
 
 #include <linux/kref.h>
 #include <linux/idr.h>
+#include <linux/mutex.h>
 
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_user_verbs.h>
@@ -88,7 +89,7 @@ struct ib_uverbs_event_file {
 
 struct ib_uverbs_file {
        struct kref                             ref;
-       struct semaphore                        mutex;
+       struct mutex                            mutex;
        struct ib_uverbs_device                *device;
        struct ib_ucontext                     *ucontext;
        struct ib_event_handler                 event_handler;
@@ -131,7 +132,7 @@ struct ib_ucq_object {
        u32                     async_events_reported;
 };
 
-extern struct semaphore ib_uverbs_idr_mutex;
+extern struct mutex ib_uverbs_idr_mutex;
 extern struct idr ib_uverbs_pd_idr;
 extern struct idr ib_uverbs_mr_idr;
 extern struct idr ib_uverbs_mw_idr;
index a02c5a05c984f7931310de1fca427265efc3e05b..407b6284d7d5c502c88595f1f2820bf856aac1c8 100644 (file)
@@ -67,7 +67,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
        if (copy_from_user(&cmd, buf, sizeof cmd))
                return -EFAULT;
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
 
        if (file->ucontext) {
                ret = -EINVAL;
@@ -119,7 +119,7 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
 
        fd_install(resp.async_fd, filp);
 
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
        return in_len;
 
@@ -131,7 +131,7 @@ err_free:
        ibdev->dealloc_ucontext(ucontext);
 
 err:
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
        return ret;
 }
 
@@ -157,7 +157,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
        memset(&resp, 0, sizeof resp);
 
        resp.fw_ver                    = attr.fw_ver;
-       resp.node_guid                 = attr.node_guid;
+       resp.node_guid                 = file->device->ib_dev->node_guid;
        resp.sys_image_guid            = attr.sys_image_guid;
        resp.max_mr_size               = attr.max_mr_size;
        resp.page_size_cap             = attr.page_size_cap;
@@ -290,7 +290,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
        pd->uobject = uobj;
        atomic_set(&pd->usecnt, 0);
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
 retry:
        if (!idr_pre_get(&ib_uverbs_pd_idr, GFP_KERNEL)) {
@@ -314,11 +314,11 @@ retry:
                goto err_idr;
        }
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
        list_add_tail(&uobj->list, &file->ucontext->pd_list);
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return in_len;
 
@@ -326,7 +326,7 @@ err_idr:
        idr_remove(&ib_uverbs_pd_idr, uobj->id);
 
 err_up:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
        ib_dealloc_pd(pd);
 
 err:
@@ -346,7 +346,7 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
        if (copy_from_user(&cmd, buf, sizeof cmd))
                return -EFAULT;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
        if (!pd || pd->uobject->context != file->ucontext)
@@ -360,14 +360,14 @@ ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
 
        idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
        list_del(&uobj->list);
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
        kfree(uobj);
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return ret ? ret : in_len;
 }
@@ -426,7 +426,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
 
        obj->umem.virt_base = cmd.hca_va;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
        if (!pd || pd->uobject->context != file->ucontext) {
@@ -476,11 +476,11 @@ retry:
                goto err_idr;
        }
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
        list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return in_len;
 
@@ -492,7 +492,7 @@ err_unreg:
        atomic_dec(&pd->usecnt);
 
 err_up:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        ib_umem_release(file->device->ib_dev, &obj->umem);
 
@@ -513,7 +513,7 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
        if (copy_from_user(&cmd, buf, sizeof cmd))
                return -EFAULT;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle);
        if (!mr || mr->uobject->context != file->ucontext)
@@ -527,15 +527,15 @@ ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
 
        idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
        list_del(&memobj->uobject.list);
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
        ib_umem_release(file->device->ib_dev, &memobj->umem);
        kfree(memobj);
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return ret ? ret : in_len;
 }
@@ -628,7 +628,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
        cq->cq_context    = ev_file;
        atomic_set(&cq->usecnt, 0);
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
 retry:
        if (!idr_pre_get(&ib_uverbs_cq_idr, GFP_KERNEL)) {
@@ -653,11 +653,11 @@ retry:
                goto err_idr;
        }
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
        list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return in_len;
 
@@ -665,7 +665,7 @@ err_idr:
        idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
 
 err_up:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
        ib_destroy_cq(cq);
 
 err:
@@ -701,7 +701,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
                goto out_wc;
        }
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
        cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
        if (!cq || cq->uobject->context != file->ucontext) {
                ret = -EINVAL;
@@ -731,7 +731,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
                ret = -EFAULT;
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
        kfree(resp);
 
 out_wc:
@@ -750,14 +750,14 @@ ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
        if (copy_from_user(&cmd, buf, sizeof cmd))
                return -EFAULT;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
        cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
        if (cq && cq->uobject->context == file->ucontext) {
                ib_req_notify_cq(cq, cmd.solicited_only ?
                                        IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
                ret = in_len;
        }
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return ret;
 }
@@ -779,7 +779,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
 
        memset(&resp, 0, sizeof resp);
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
        if (!cq || cq->uobject->context != file->ucontext)
@@ -795,9 +795,9 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
 
        idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
        list_del(&uobj->uobject.list);
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
        ib_uverbs_release_ucq(file, ev_file, uobj);
 
@@ -811,7 +811,7 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
                ret = -EFAULT;
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return ret ? ret : in_len;
 }
@@ -845,7 +845,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
        if (!uobj)
                return -ENOMEM;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        pd  = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
        scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
@@ -930,11 +930,11 @@ retry:
                goto err_idr;
        }
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
        list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list);
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return in_len;
 
@@ -950,7 +950,7 @@ err_destroy:
                atomic_dec(&attr.srq->usecnt);
 
 err_up:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        kfree(uobj);
        return ret;
@@ -972,7 +972,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
        if (!attr)
                return -ENOMEM;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
        if (!qp || qp->uobject->context != file->ucontext) {
@@ -1033,7 +1033,7 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
        ret = in_len;
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
        kfree(attr);
 
        return ret;
@@ -1054,7 +1054,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
 
        memset(&resp, 0, sizeof resp);
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
        if (!qp || qp->uobject->context != file->ucontext)
@@ -1073,9 +1073,9 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
 
        idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
        list_del(&uobj->uevent.uobject.list);
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
        ib_uverbs_release_uevent(file, &uobj->uevent);
 
@@ -1088,7 +1088,7 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
                ret = -EFAULT;
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return ret ? ret : in_len;
 }
@@ -1119,7 +1119,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
        if (!user_wr)
                return -ENOMEM;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
        if (!qp || qp->uobject->context != file->ucontext)
@@ -1224,7 +1224,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
                ret = -EFAULT;
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        while (wr) {
                next = wr->next;
@@ -1341,7 +1341,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
        if (IS_ERR(wr))
                return PTR_ERR(wr);
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
        if (!qp || qp->uobject->context != file->ucontext)
@@ -1362,7 +1362,7 @@ ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
                ret = -EFAULT;
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        while (wr) {
                next = wr->next;
@@ -1392,7 +1392,7 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
        if (IS_ERR(wr))
                return PTR_ERR(wr);
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
        if (!srq || srq->uobject->context != file->ucontext)
@@ -1413,7 +1413,7 @@ ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
                ret = -EFAULT;
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        while (wr) {
                next = wr->next;
@@ -1446,7 +1446,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
        if (!uobj)
                return -ENOMEM;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
        if (!pd || pd->uobject->context != file->ucontext) {
@@ -1498,11 +1498,11 @@ retry:
                goto err_idr;
        }
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
        list_add_tail(&uobj->list, &file->ucontext->ah_list);
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return in_len;
 
@@ -1513,7 +1513,7 @@ err_destroy:
        ib_destroy_ah(ah);
 
 err_up:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        kfree(uobj);
        return ret;
@@ -1530,7 +1530,7 @@ ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
        if (copy_from_user(&cmd, buf, sizeof cmd))
                return -EFAULT;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        ah = idr_find(&ib_uverbs_ah_idr, cmd.ah_handle);
        if (!ah || ah->uobject->context != file->ucontext)
@@ -1544,14 +1544,14 @@ ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
 
        idr_remove(&ib_uverbs_ah_idr, cmd.ah_handle);
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
        list_del(&uobj->list);
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
        kfree(uobj);
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return ret ? ret : in_len;
 }
@@ -1569,7 +1569,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
        if (copy_from_user(&cmd, buf, sizeof cmd))
                return -EFAULT;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
        if (!qp || qp->uobject->context != file->ucontext)
@@ -1602,7 +1602,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
                kfree(mcast);
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return ret ? ret : in_len;
 }
@@ -1620,7 +1620,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
        if (copy_from_user(&cmd, buf, sizeof cmd))
                return -EFAULT;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
        if (!qp || qp->uobject->context != file->ucontext)
@@ -1641,7 +1641,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
                }
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return ret ? ret : in_len;
 }
@@ -1673,7 +1673,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
        if (!uobj)
                return -ENOMEM;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        pd  = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
 
@@ -1730,11 +1730,11 @@ retry:
                goto err_idr;
        }
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
        list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return in_len;
 
@@ -1746,7 +1746,7 @@ err_destroy:
        atomic_dec(&pd->usecnt);
 
 err_up:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        kfree(uobj);
        return ret;
@@ -1764,7 +1764,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
        if (copy_from_user(&cmd, buf, sizeof cmd))
                return -EFAULT;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
        if (!srq || srq->uobject->context != file->ucontext) {
@@ -1778,7 +1778,7 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
        ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return ret ? ret : in_len;
 }
@@ -1796,7 +1796,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
        if (copy_from_user(&cmd, buf, sizeof cmd))
                return -EFAULT;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        memset(&resp, 0, sizeof resp);
 
@@ -1812,9 +1812,9 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
 
        idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
 
-       down(&file->mutex);
+       mutex_lock(&file->mutex);
        list_del(&uobj->uobject.list);
-       up(&file->mutex);
+       mutex_unlock(&file->mutex);
 
        ib_uverbs_release_uevent(file, uobj);
 
@@ -1827,7 +1827,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
                ret = -EFAULT;
 
 out:
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return ret ? ret : in_len;
 }
index 81737bd6faea1405ef9f3fe41f4f2f136e6b043c..96ea79b63df7221747e1ccd65cf1622d93dc6885 100644 (file)
@@ -66,7 +66,7 @@ enum {
 
 static struct class *uverbs_class;
 
-DECLARE_MUTEX(ib_uverbs_idr_mutex);
+DEFINE_MUTEX(ib_uverbs_idr_mutex);
 DEFINE_IDR(ib_uverbs_pd_idr);
 DEFINE_IDR(ib_uverbs_mr_idr);
 DEFINE_IDR(ib_uverbs_mw_idr);
@@ -180,7 +180,7 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
        if (!context)
                return 0;
 
-       down(&ib_uverbs_idr_mutex);
+       mutex_lock(&ib_uverbs_idr_mutex);
 
        list_for_each_entry_safe(uobj, tmp, &context->ah_list, list) {
                struct ib_ah *ah = idr_find(&ib_uverbs_ah_idr, uobj->id);
@@ -250,7 +250,7 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file,
                kfree(uobj);
        }
 
-       up(&ib_uverbs_idr_mutex);
+       mutex_unlock(&ib_uverbs_idr_mutex);
 
        return context->device->dealloc_ucontext(context);
 }
@@ -653,7 +653,7 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
        file->ucontext   = NULL;
        file->async_file = NULL;
        kref_init(&file->ref);
-       init_MUTEX(&file->mutex);
+       mutex_init(&file->mutex);
 
        filp->private_data = file;
 
index 22fdc446f25cdcd0d3100cd20dd800d7b7d42830..a14eed08a0fcb5a4503ee59695671ae8e32ac82b 100644 (file)
@@ -163,6 +163,11 @@ int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah)
        return 0;
 }
 
+int mthca_ah_grh_present(struct mthca_ah *ah)
+{
+       return !!(ah->av->g_slid & 0x80);
+}
+
 int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
                  struct ib_ud_header *header)
 {
@@ -172,8 +177,7 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
        header->lrh.service_level   = be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 28;
        header->lrh.destination_lid = ah->av->dlid;
        header->lrh.source_lid      = cpu_to_be16(ah->av->g_slid & 0x7f);
-       if (ah->av->g_slid & 0x80) {
-               header->grh_present = 1;
+       if (mthca_ah_grh_present(ah)) {
                header->grh.traffic_class =
                        (be32_to_cpu(ah->av->sl_tclass_flowlabel) >> 20) & 0xff;
                header->grh.flow_label    =
@@ -184,8 +188,6 @@ int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
                                  &header->grh.source_gid);
                memcpy(header->grh.destination_gid.raw,
                       ah->av->dgid, 16);
-       } else {
-               header->grh_present = 0;
        }
 
        return 0;
index 22ac72bc20c388860df359ade45c8842d6fbc0e5..be1791be627bcdd08b03ac7290741f8ae133e26b 100644 (file)
@@ -606,7 +606,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
                        err = -EINVAL;
                        goto out;
                }
-               for (i = 0; i < mthca_icm_size(&iter) / (1 << lg); ++i) {
+               for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
                        if (virt != -1) {
                                pages[nent * 2] = cpu_to_be64(virt);
                                virt += 1 << lg;
@@ -727,8 +727,8 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
                 * system pages needed.
                 */
                dev->fw.arbel.fw_pages =
-                       (dev->fw.arbel.fw_pages + (1 << (PAGE_SHIFT - 12)) - 1) >>
-                       (PAGE_SHIFT - 12);
+                       ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >>
+                               (PAGE_SHIFT - 12);
 
                mthca_dbg(dev, "Clear int @ %llx, EQ arm @ %llx, EQ set CI @ %llx\n",
                          (unsigned long long) dev->fw.arbel.clr_int_base,
@@ -1445,6 +1445,7 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
         * pages needed.
         */
        *aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12);
+       *aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12);
 
        return 0;
 }
index 795b379260bfeb6eb5cc77c023b0bf103c5210fd..a104ab041ea35477f0682a74c13f14d9eb61e2f5 100644 (file)
@@ -520,6 +520,7 @@ int mthca_create_ah(struct mthca_dev *dev,
 int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah);
 int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah,
                  struct ib_ud_header *header);
+int mthca_ah_grh_present(struct mthca_ah *ah);
 
 int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
 int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid);
index e8a948f087c06835cacf604c34188ce785ec02b7..2eabb27804cd367b138d2b0e167617a6df20d3b4 100644 (file)
@@ -45,6 +45,7 @@
 enum {
        MTHCA_NUM_ASYNC_EQE = 0x80,
        MTHCA_NUM_CMD_EQE   = 0x80,
+       MTHCA_NUM_SPARE_EQE = 0x80,
        MTHCA_EQ_ENTRY_SIZE = 0x20
 };
 
@@ -277,11 +278,10 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
 {
        struct mthca_eqe *eqe;
        int disarm_cqn;
-       int  eqes_found = 0;
+       int eqes_found = 0;
+       int set_ci = 0;
 
        while ((eqe = next_eqe_sw(eq))) {
-               int set_ci = 0;
-
                /*
                 * Make sure we read EQ entry contents after we've
                 * checked the ownership bit.
@@ -345,12 +345,6 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
                                        be16_to_cpu(eqe->event.cmd.token),
                                        eqe->event.cmd.status,
                                        be64_to_cpu(eqe->event.cmd.out_param));
-                       /*
-                        * cmd_event() may add more commands.
-                        * The card will think the queue has overflowed if
-                        * we don't tell it we've been processing events.
-                        */
-                       set_ci = 1;
                        break;
 
                case MTHCA_EVENT_TYPE_PORT_CHANGE:
@@ -385,8 +379,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
                set_eqe_hw(eqe);
                ++eq->cons_index;
                eqes_found = 1;
+               ++set_ci;
 
-               if (unlikely(set_ci)) {
+               /*
+                * The HCA will think the queue has overflowed if we
+                * don't tell it we've been processing events.  We
+                * create our EQs with MTHCA_NUM_SPARE_EQE extra
+                * entries, so we must update our consumer index at
+                * least that often.
+                */
+               if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) {
                        /*
                         * Conditional on hca_type is OK here because
                         * this is a rare case, not the fast path.
@@ -862,19 +864,19 @@ int __devinit mthca_init_eq_table(struct mthca_dev *dev)
        intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ?
                128 : dev->eq_table.inta_pin;
 
-       err = mthca_create_eq(dev, dev->limits.num_cqs,
+       err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE,
                              (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr,
                              &dev->eq_table.eq[MTHCA_EQ_COMP]);
        if (err)
                goto err_out_unmap;
 
-       err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE,
+       err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE,
                              (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr,
                              &dev->eq_table.eq[MTHCA_EQ_ASYNC]);
        if (err)
                goto err_out_comp;
 
-       err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE,
+       err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE,
                              (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr,
                              &dev->eq_table.eq[MTHCA_EQ_CMD]);
        if (err)
index 4cc7e2846df16c08e4f114a7832ca86b066b8cdb..484a7e6b7f8c895de887e8d3cfdbdfb4345390ee 100644 (file)
@@ -33,7 +33,7 @@
  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  *
- * $Id: mthca_provider.c 1397 2004-12-28 05:09:00Z roland $
+ * $Id: mthca_provider.c 4859 2006-01-09 21:55:10Z roland $
  */
 
 #include <rdma/ib_smi.h>
 #include "mthca_user.h"
 #include "mthca_memfree.h"
 
+static void init_query_mad(struct ib_smp *mad)
+{
+       mad->base_version  = 1;
+       mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
+       mad->class_version = 1;
+       mad->method        = IB_MGMT_METHOD_GET;
+}
+
 static int mthca_query_device(struct ib_device *ibdev,
                              struct ib_device_attr *props)
 {
@@ -55,7 +63,7 @@ static int mthca_query_device(struct ib_device *ibdev,
 
        u8 status;
 
-       in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
+       in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
        if (!in_mad || !out_mad)
                goto out;
@@ -64,12 +72,8 @@ static int mthca_query_device(struct ib_device *ibdev,
 
        props->fw_ver              = mdev->fw_ver;
 
-       memset(in_mad, 0, sizeof *in_mad);
-       in_mad->base_version       = 1;
-       in_mad->mgmt_class         = IB_MGMT_CLASS_SUBN_LID_ROUTED;
-       in_mad->class_version      = 1;
-       in_mad->method             = IB_MGMT_METHOD_GET;
-       in_mad->attr_id            = IB_SMP_ATTR_NODE_INFO;
+       init_query_mad(in_mad);
+       in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
 
        err = mthca_MAD_IFC(mdev, 1, 1,
                            1, NULL, NULL, in_mad, out_mad,
@@ -87,7 +91,6 @@ static int mthca_query_device(struct ib_device *ibdev,
        props->vendor_part_id      = be16_to_cpup((__be16 *) (out_mad->data + 30));
        props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
        memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
-       memcpy(&props->node_guid,      out_mad->data + 12, 8);
 
        props->max_mr_size         = ~0ull;
        props->page_size_cap       = mdev->limits.page_size_cap;
@@ -128,20 +131,16 @@ static int mthca_query_port(struct ib_device *ibdev,
        int err = -ENOMEM;
        u8 status;
 
-       in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
+       in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
        if (!in_mad || !out_mad)
                goto out;
 
        memset(props, 0, sizeof *props);
 
-       memset(in_mad, 0, sizeof *in_mad);
-       in_mad->base_version       = 1;
-       in_mad->mgmt_class         = IB_MGMT_CLASS_SUBN_LID_ROUTED;
-       in_mad->class_version      = 1;
-       in_mad->method             = IB_MGMT_METHOD_GET;
-       in_mad->attr_id            = IB_SMP_ATTR_PORT_INFO;
-       in_mad->attr_mod           = cpu_to_be32(port);
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
+       in_mad->attr_mod = cpu_to_be32(port);
 
        err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
                            port, NULL, NULL, in_mad, out_mad,
@@ -220,18 +219,14 @@ static int mthca_query_pkey(struct ib_device *ibdev,
        int err = -ENOMEM;
        u8 status;
 
-       in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
+       in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
        if (!in_mad || !out_mad)
                goto out;
 
-       memset(in_mad, 0, sizeof *in_mad);
-       in_mad->base_version       = 1;
-       in_mad->mgmt_class         = IB_MGMT_CLASS_SUBN_LID_ROUTED;
-       in_mad->class_version      = 1;
-       in_mad->method             = IB_MGMT_METHOD_GET;
-       in_mad->attr_id            = IB_SMP_ATTR_PKEY_TABLE;
-       in_mad->attr_mod           = cpu_to_be32(index / 32);
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
+       in_mad->attr_mod = cpu_to_be32(index / 32);
 
        err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
                            port, NULL, NULL, in_mad, out_mad,
@@ -259,18 +254,14 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
        int err = -ENOMEM;
        u8 status;
 
-       in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
+       in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
        out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
        if (!in_mad || !out_mad)
                goto out;
 
-       memset(in_mad, 0, sizeof *in_mad);
-       in_mad->base_version       = 1;
-       in_mad->mgmt_class         = IB_MGMT_CLASS_SUBN_LID_ROUTED;
-       in_mad->class_version      = 1;
-       in_mad->method             = IB_MGMT_METHOD_GET;
-       in_mad->attr_id            = IB_SMP_ATTR_PORT_INFO;
-       in_mad->attr_mod           = cpu_to_be32(port);
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
+       in_mad->attr_mod = cpu_to_be32(port);
 
        err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
                            port, NULL, NULL, in_mad, out_mad,
@@ -284,13 +275,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
 
        memcpy(gid->raw, out_mad->data + 8, 8);
 
-       memset(in_mad, 0, sizeof *in_mad);
-       in_mad->base_version       = 1;
-       in_mad->mgmt_class         = IB_MGMT_CLASS_SUBN_LID_ROUTED;
-       in_mad->class_version      = 1;
-       in_mad->method             = IB_MGMT_METHOD_GET;
-       in_mad->attr_id            = IB_SMP_ATTR_GUID_INFO;
-       in_mad->attr_mod           = cpu_to_be32(index / 8);
+       init_query_mad(in_mad);
+       in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
+       in_mad->attr_mod = cpu_to_be32(index / 8);
 
        err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
                            port, NULL, NULL, in_mad, out_mad,
@@ -458,8 +445,10 @@ static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
        if (pd->uobject) {
                context = to_mucontext(pd->uobject->context);
 
-               if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
-                       return ERR_PTR(-EFAULT);
+               if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
+                       err = -EFAULT;
+                       goto err_free;
+               }
 
                err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
                                        context->db_tab, ucmd.db_index,
@@ -535,8 +524,10 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
                if (pd->uobject) {
                        context = to_mucontext(pd->uobject->context);
 
-                       if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
+                       if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
+                               kfree(qp);
                                return ERR_PTR(-EFAULT);
+                       }
 
                        err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
                                                context->db_tab,
@@ -783,24 +774,20 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd       *pd,
        if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK))
                return ERR_PTR(-EINVAL);
 
-       if (num_phys_buf > 1 &&
-           ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK))
-               return ERR_PTR(-EINVAL);
-
        mask = 0;
        total_size = 0;
        for (i = 0; i < num_phys_buf; ++i) {
-               if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
-                       return ERR_PTR(-EINVAL);
-               if (i != 0 && i != num_phys_buf - 1 &&
-                   (buffer_list[i].size & ~PAGE_MASK))
-                       return ERR_PTR(-EINVAL);
+               if (i != 0)
+                       mask |= buffer_list[i].addr;
+               if (i != num_phys_buf - 1)
+                       mask |= buffer_list[i].addr + buffer_list[i].size;
 
                total_size += buffer_list[i].size;
-               if (i > 0)
-                       mask |= buffer_list[i].addr;
        }
 
+       if (mask & ~PAGE_MASK)
+               return ERR_PTR(-EINVAL);
+
        /* Find largest page shift we can use to cover buffers */
        for (shift = PAGE_SHIFT; shift < 31; ++shift)
                if (num_phys_buf > 1) {
@@ -1070,11 +1057,48 @@ static struct class_device_attribute *mthca_class_attributes[] = {
        &class_device_attr_board_id
 };
 
+static int mthca_init_node_data(struct mthca_dev *dev)
+{
+       struct ib_smp *in_mad  = NULL;
+       struct ib_smp *out_mad = NULL;
+       int err = -ENOMEM;
+       u8 status;
+
+       in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
+       out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
+       if (!in_mad || !out_mad)
+               goto out;
+
+       init_query_mad(in_mad);
+       in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
+
+       err = mthca_MAD_IFC(dev, 1, 1,
+                           1, NULL, NULL, in_mad, out_mad,
+                           &status);
+       if (err)
+               goto out;
+       if (status) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
+
+out:
+       kfree(in_mad);
+       kfree(out_mad);
+       return err;
+}
+
 int mthca_register_device(struct mthca_dev *dev)
 {
        int ret;
        int i;
 
+       ret = mthca_init_node_data(dev);
+       if (ret)
+               return ret;
+
        strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
        dev->ib_dev.owner                = THIS_MODULE;
 
index 564b6d51c394ae8c38e407cac17a8b51983d5889..fba608ed7df2fca3c00cff0b4ef762ca9f0c52a5 100644 (file)
@@ -1434,7 +1434,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
        u16 pkey;
 
        ib_ud_header_init(256, /* assume a MAD */
-                         sqp->ud_header.grh_present,
+                         mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
                          &sqp->ud_header);
 
        err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
index 9923a15a9996975ace266e077132aecc8944f4d5..e0a5412b7e68042f820a0c6c32261eba3f39d23c 100644 (file)
 #include <linux/config.h>
 #include <linux/kref.h>
 #include <linux/if_infiniband.h>
+#include <linux/mutex.h>
 
 #include <net/neighbour.h>
 
 #include <asm/atomic.h>
-#include <asm/semaphore.h>
 
 #include <rdma/ib_verbs.h>
 #include <rdma/ib_pack.h>
@@ -123,8 +123,8 @@ struct ipoib_dev_priv {
 
        unsigned long flags;
 
-       struct semaphore mcast_mutex;
-       struct semaphore vlan_mutex;
+       struct mutex mcast_mutex;
+       struct mutex vlan_mutex;
 
        struct rb_root  path_tree;
        struct list_head path_list;
index 23885801b6d2b3efd1cfefc925b8b451eafceff9..86bcdd72a10706260bf2585fe5d3b5e8103e65e5 100644 (file)
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(data_debug_level,
 
 #define        IPOIB_OP_RECV   (1ul << 31)
 
-static DECLARE_MUTEX(pkey_sem);
+static DEFINE_MUTEX(pkey_mutex);
 
 struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
                                 struct ib_pd *pd, struct ib_ah_attr *attr)
@@ -445,25 +445,16 @@ int ipoib_ib_dev_down(struct net_device *dev)
 
        /* Shutdown the P_Key thread if still active */
        if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
-               down(&pkey_sem);
+               mutex_lock(&pkey_mutex);
                set_bit(IPOIB_PKEY_STOP, &priv->flags);
                cancel_delayed_work(&priv->pkey_task);
-               up(&pkey_sem);
+               mutex_unlock(&pkey_mutex);
                flush_workqueue(ipoib_workqueue);
        }
 
        ipoib_mcast_stop_thread(dev, 1);
-
-       /*
-        * Flush the multicast groups first so we stop any multicast joins. The
-        * completion thread may have already died and we may deadlock waiting
-        * for the completion thread to finish some multicast joins.
-        */
        ipoib_mcast_dev_flush(dev);
 
-       /* Delete broadcast and local addresses since they will be recreated */
-       ipoib_mcast_dev_down(dev);
-
        ipoib_flush_paths(dev);
 
        return 0;
@@ -608,13 +599,13 @@ void ipoib_ib_dev_flush(void *_dev)
        if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
                ipoib_ib_dev_up(dev);
 
-       down(&priv->vlan_mutex);
+       mutex_lock(&priv->vlan_mutex);
 
        /* Flush any child interfaces too */
        list_for_each_entry(cpriv, &priv->child_intfs, list)
                ipoib_ib_dev_flush(&cpriv->dev);
 
-       up(&priv->vlan_mutex);
+       mutex_unlock(&priv->vlan_mutex);
 }
 
 void ipoib_ib_dev_cleanup(struct net_device *dev)
@@ -624,9 +615,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
        ipoib_dbg(priv, "cleaning up ib_dev\n");
 
        ipoib_mcast_stop_thread(dev, 1);
-
-       /* Delete the broadcast address and the local address */
-       ipoib_mcast_dev_down(dev);
+       ipoib_mcast_dev_flush(dev);
 
        ipoib_transport_dev_cleanup(dev);
 }
@@ -662,12 +651,12 @@ void ipoib_pkey_poll(void *dev_ptr)
        if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
                ipoib_open(dev);
        else {
-               down(&pkey_sem);
+               mutex_lock(&pkey_mutex);
                if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
                        queue_delayed_work(ipoib_workqueue,
                                           &priv->pkey_task,
                                           HZ);
-               up(&pkey_sem);
+               mutex_unlock(&pkey_mutex);
        }
 }
 
@@ -681,12 +670,12 @@ int ipoib_pkey_dev_delay_open(struct net_device *dev)
 
        /* P_Key value not assigned yet - start polling */
        if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
-               down(&pkey_sem);
+               mutex_lock(&pkey_mutex);
                clear_bit(IPOIB_PKEY_STOP, &priv->flags);
                queue_delayed_work(ipoib_workqueue,
                                   &priv->pkey_task,
                                   HZ);
-               up(&pkey_sem);
+               mutex_unlock(&pkey_mutex);
                return 1;
        }
 
index 780009c7eaa651f908e78def9a8106eea752cd81..fd3f5c862a5d92aef6b61202768bf9c29b24ccbe 100644 (file)
@@ -105,7 +105,7 @@ int ipoib_open(struct net_device *dev)
                struct ipoib_dev_priv *cpriv;
 
                /* Bring up any child interfaces too */
-               down(&priv->vlan_mutex);
+               mutex_lock(&priv->vlan_mutex);
                list_for_each_entry(cpriv, &priv->child_intfs, list) {
                        int flags;
 
@@ -115,7 +115,7 @@ int ipoib_open(struct net_device *dev)
 
                        dev_change_flags(cpriv->dev, flags | IFF_UP);
                }
-               up(&priv->vlan_mutex);
+               mutex_unlock(&priv->vlan_mutex);
        }
 
        netif_start_queue(dev);
@@ -140,7 +140,7 @@ static int ipoib_stop(struct net_device *dev)
                struct ipoib_dev_priv *cpriv;
 
                /* Bring down any child interfaces too */
-               down(&priv->vlan_mutex);
+               mutex_lock(&priv->vlan_mutex);
                list_for_each_entry(cpriv, &priv->child_intfs, list) {
                        int flags;
 
@@ -150,7 +150,7 @@ static int ipoib_stop(struct net_device *dev)
 
                        dev_change_flags(cpriv->dev, flags & ~IFF_UP);
                }
-               up(&priv->vlan_mutex);
+               mutex_unlock(&priv->vlan_mutex);
        }
 
        return 0;
@@ -892,8 +892,8 @@ static void ipoib_setup(struct net_device *dev)
        spin_lock_init(&priv->lock);
        spin_lock_init(&priv->tx_lock);
 
-       init_MUTEX(&priv->mcast_mutex);
-       init_MUTEX(&priv->vlan_mutex);
+       mutex_init(&priv->mcast_mutex);
+       mutex_init(&priv->vlan_mutex);
 
        INIT_LIST_HEAD(&priv->path_list);
        INIT_LIST_HEAD(&priv->child_intfs);
index ed0c2ead8bc16f0c215a054f0c765d4d2a440eee..98039da0caf0e4ff67e958c3679540f4030b12a9 100644 (file)
@@ -55,7 +55,7 @@ MODULE_PARM_DESC(mcast_debug_level,
                 "Enable multicast debug tracing if > 0");
 #endif
 
-static DECLARE_MUTEX(mcast_mutex);
+static DEFINE_MUTEX(mcast_mutex);
 
 /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
 struct ipoib_mcast {
@@ -97,8 +97,6 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        struct ipoib_neigh *neigh, *tmp;
        unsigned long flags;
-       LIST_HEAD(ah_list);
-       struct ipoib_ah *ah, *tah;
 
        ipoib_dbg_mcast(netdev_priv(dev),
                        "deleting multicast group " IPOIB_GID_FMT "\n",
@@ -107,8 +105,14 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
        spin_lock_irqsave(&priv->lock, flags);
 
        list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
+               /*
+                * It's safe to call ipoib_put_ah() inside priv->lock
+                * here, because we know that mcast->ah will always
+                * hold one more reference, so ipoib_put_ah() will
+                * never do more than decrement the ref count.
+                */
                if (neigh->ah)
-                       list_add_tail(&neigh->ah->list, &ah_list);
+                       ipoib_put_ah(neigh->ah);
                *to_ipoib_neigh(neigh->neighbour) = NULL;
                neigh->neighbour->ops->destructor = NULL;
                kfree(neigh);
@@ -116,9 +120,6 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
 
        spin_unlock_irqrestore(&priv->lock, flags);
 
-       list_for_each_entry_safe(ah, tah, &ah_list, list)
-               ipoib_put_ah(ah);
-
        if (mcast->ah)
                ipoib_put_ah(mcast->ah);
 
@@ -384,10 +385,10 @@ static void ipoib_mcast_join_complete(int status,
 
        if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
                mcast->backoff = 1;
-               down(&mcast_mutex);
+               mutex_lock(&mcast_mutex);
                if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
                        queue_work(ipoib_workqueue, &priv->mcast_task);
-               up(&mcast_mutex);
+               mutex_unlock(&mcast_mutex);
                complete(&mcast->done);
                return;
        }
@@ -417,7 +418,7 @@ static void ipoib_mcast_join_complete(int status,
 
        mcast->query = NULL;
 
-       down(&mcast_mutex);
+       mutex_lock(&mcast_mutex);
        if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
                if (status == -ETIMEDOUT)
                        queue_work(ipoib_workqueue, &priv->mcast_task);
@@ -426,7 +427,7 @@ static void ipoib_mcast_join_complete(int status,
                                           mcast->backoff * HZ);
        } else
                complete(&mcast->done);
-       up(&mcast_mutex);
+       mutex_unlock(&mcast_mutex);
 
        return;
 }
@@ -481,12 +482,12 @@ static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
                if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
                        mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
 
-               down(&mcast_mutex);
+               mutex_lock(&mcast_mutex);
                if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
                        queue_delayed_work(ipoib_workqueue,
                                           &priv->mcast_task,
                                           mcast->backoff * HZ);
-               up(&mcast_mutex);
+               mutex_unlock(&mcast_mutex);
        } else
                mcast->query_id = ret;
 }
@@ -519,11 +520,11 @@ void ipoib_mcast_join_task(void *dev_ptr)
                priv->broadcast = ipoib_mcast_alloc(dev, 1);
                if (!priv->broadcast) {
                        ipoib_warn(priv, "failed to allocate broadcast group\n");
-                       down(&mcast_mutex);
+                       mutex_lock(&mcast_mutex);
                        if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
                                queue_delayed_work(ipoib_workqueue,
                                                   &priv->mcast_task, HZ);
-                       up(&mcast_mutex);
+                       mutex_unlock(&mcast_mutex);
                        return;
                }
 
@@ -579,10 +580,10 @@ int ipoib_mcast_start_thread(struct net_device *dev)
 
        ipoib_dbg_mcast(priv, "starting multicast thread\n");
 
-       down(&mcast_mutex);
+       mutex_lock(&mcast_mutex);
        if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
                queue_work(ipoib_workqueue, &priv->mcast_task);
-       up(&mcast_mutex);
+       mutex_unlock(&mcast_mutex);
 
        return 0;
 }
@@ -594,10 +595,10 @@ int ipoib_mcast_stop_thread(struct net_device *dev, int flush)
 
        ipoib_dbg_mcast(priv, "stopping multicast thread\n");
 
-       down(&mcast_mutex);
+       mutex_lock(&mcast_mutex);
        clear_bit(IPOIB_MCAST_RUN, &priv->flags);
        cancel_delayed_work(&priv->mcast_task);
-       up(&mcast_mutex);
+       mutex_unlock(&mcast_mutex);
 
        if (flush)
                flush_workqueue(ipoib_workqueue);
@@ -741,48 +742,23 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
 {
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        LIST_HEAD(remove_list);
-       struct ipoib_mcast *mcast, *tmcast, *nmcast;
+       struct ipoib_mcast *mcast, *tmcast;
        unsigned long flags;
 
        ipoib_dbg_mcast(priv, "flushing multicast list\n");
 
        spin_lock_irqsave(&priv->lock, flags);
-       list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
-               nmcast = ipoib_mcast_alloc(dev, 0);
-               if (nmcast) {
-                       nmcast->flags =
-                               mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY);
 
-                       nmcast->mcmember.mgid = mcast->mcmember.mgid;
-
-                       /* Add the new group in before the to-be-destroyed group */
-                       list_add_tail(&nmcast->list, &mcast->list);
-                       list_del_init(&mcast->list);
-
-                       rb_replace_node(&mcast->rb_node, &nmcast->rb_node,
-                                       &priv->multicast_tree);
-
-                       list_add_tail(&mcast->list, &remove_list);
-               } else {
-                       ipoib_warn(priv, "could not reallocate multicast group "
-                                  IPOIB_GID_FMT "\n",
-                                  IPOIB_GID_ARG(mcast->mcmember.mgid));
-               }
+       list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
+               list_del(&mcast->list);
+               rb_erase(&mcast->rb_node, &priv->multicast_tree);
+               list_add_tail(&mcast->list, &remove_list);
        }
 
        if (priv->broadcast) {
-               nmcast = ipoib_mcast_alloc(dev, 0);
-               if (nmcast) {
-                       nmcast->mcmember.mgid = priv->broadcast->mcmember.mgid;
-
-                       rb_replace_node(&priv->broadcast->rb_node,
-                                       &nmcast->rb_node,
-                                       &priv->multicast_tree);
-
-                       list_add_tail(&priv->broadcast->list, &remove_list);
-               }
-
-               priv->broadcast = nmcast;
+               rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
+               list_add_tail(&priv->broadcast->list, &remove_list);
+               priv->broadcast = NULL;
        }
 
        spin_unlock_irqrestore(&priv->lock, flags);
@@ -793,24 +769,6 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
        }
 }
 
-void ipoib_mcast_dev_down(struct net_device *dev)
-{
-       struct ipoib_dev_priv *priv = netdev_priv(dev);
-       unsigned long flags;
-
-       /* Delete broadcast since it will be recreated */
-       if (priv->broadcast) {
-               ipoib_dbg_mcast(priv, "deleting broadcast group\n");
-
-               spin_lock_irqsave(&priv->lock, flags);
-               rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
-               spin_unlock_irqrestore(&priv->lock, flags);
-               ipoib_mcast_leave(dev, priv->broadcast);
-               ipoib_mcast_free(priv->broadcast);
-               priv->broadcast = NULL;
-       }
-}
-
 void ipoib_mcast_restart_task(void *dev_ptr)
 {
        struct net_device *dev = dev_ptr;
@@ -824,7 +782,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
 
        ipoib_mcast_stop_thread(dev, 0);
 
-       spin_lock_irqsave(&priv->lock, flags);
+       spin_lock_irqsave(&dev->xmit_lock, flags);
+       spin_lock(&priv->lock);
 
        /*
         * Unfortunately, the networking core only gives us a list of all of
@@ -896,7 +855,9 @@ void ipoib_mcast_restart_task(void *dev_ptr)
                        list_add_tail(&mcast->list, &remove_list);
                }
        }
-       spin_unlock_irqrestore(&priv->lock, flags);
+
+       spin_unlock(&priv->lock);
+       spin_unlock_irqrestore(&dev->xmit_lock, flags);
 
        /* We have to cancel outside of the spinlock */
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
index e829e10400e35436a38b24860974e984a3aff40f..faaf10e5fc7b0b7eaee6ccb1bd1ef217595d902d 100644 (file)
@@ -65,9 +65,9 @@ int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
        }
 
        /* attach QP to multicast group */
-       down(&priv->mcast_mutex);
+       mutex_lock(&priv->mcast_mutex);
        ret = ib_attach_mcast(priv->qp, mgid, mlid);
-       up(&priv->mcast_mutex);
+       mutex_unlock(&priv->mcast_mutex);
        if (ret)
                ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret);
 
@@ -81,9 +81,9 @@ int ipoib_mcast_detach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
        struct ipoib_dev_priv *priv = netdev_priv(dev);
        int ret;
 
-       down(&priv->mcast_mutex);
+       mutex_lock(&priv->mcast_mutex);
        ret = ib_detach_mcast(priv->qp, mgid, mlid);
-       up(&priv->mcast_mutex);
+       mutex_unlock(&priv->mcast_mutex);
        if (ret)
                ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
 
index d280b341a37fa25bff8323778ddc2fc87ecad67c..4ca175553f9f71201ff6a6ad604093e24e88950b 100644 (file)
@@ -63,7 +63,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
 
        ppriv = netdev_priv(pdev);
 
-       down(&ppriv->vlan_mutex);
+       mutex_lock(&ppriv->vlan_mutex);
 
        /*
         * First ensure this isn't a duplicate. We check the parent device and
@@ -124,7 +124,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
 
        list_add_tail(&priv->list, &ppriv->child_intfs);
 
-       up(&ppriv->vlan_mutex);
+       mutex_unlock(&ppriv->vlan_mutex);
 
        return 0;
 
@@ -139,7 +139,7 @@ device_init_failed:
        free_netdev(priv->dev);
 
 err:
-       up(&ppriv->vlan_mutex);
+       mutex_unlock(&ppriv->vlan_mutex);
        return result;
 }
 
@@ -153,7 +153,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
 
        ppriv = netdev_priv(pdev);
 
-       down(&ppriv->vlan_mutex);
+       mutex_lock(&ppriv->vlan_mutex);
        list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
                if (priv->pkey == pkey) {
                        unregister_netdev(priv->dev);
@@ -167,7 +167,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
                        break;
                }
        }
-       up(&ppriv->vlan_mutex);
+       mutex_unlock(&ppriv->vlan_mutex);
 
        return ret;
 }
index dd488d3cffa93c9b11316765750fe28005a6e148..31207e664148e440d44790bc5f4cb85a31d66fb9 100644 (file)
@@ -1516,8 +1516,7 @@ static ssize_t show_port(struct class_device *class_dev, char *buf)
 
 static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
 
-static struct srp_host *srp_add_port(struct ib_device *device,
-                                    __be64 node_guid, u8 port)
+static struct srp_host *srp_add_port(struct ib_device *device, u8 port)
 {
        struct srp_host *host;
 
@@ -1532,7 +1531,7 @@ static struct srp_host *srp_add_port(struct ib_device *device,
        host->port = port;
 
        host->initiator_port_id[7] = port;
-       memcpy(host->initiator_port_id + 8, &node_guid, 8);
+       memcpy(host->initiator_port_id + 8, &device->node_guid, 8);
 
        host->pd   = ib_alloc_pd(device);
        if (IS_ERR(host->pd))
@@ -1580,22 +1579,11 @@ static void srp_add_one(struct ib_device *device)
 {
        struct list_head *dev_list;
        struct srp_host *host;
-       struct ib_device_attr *dev_attr;
        int s, e, p;
 
-       dev_attr = kmalloc(sizeof *dev_attr, GFP_KERNEL);
-       if (!dev_attr)
-               return;
-
-       if (ib_query_device(device, dev_attr)) {
-               printk(KERN_WARNING PFX "Couldn't query node GUID for %s.\n",
-                      device->name);
-               goto out;
-       }
-
        dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
        if (!dev_list)
-               goto out;
+               return;
 
        INIT_LIST_HEAD(dev_list);
 
@@ -1608,15 +1596,12 @@ static void srp_add_one(struct ib_device *device)
        }
 
        for (p = s; p <= e; ++p) {
-               host = srp_add_port(device, dev_attr->node_guid, p);
+               host = srp_add_port(device, p);
                if (host)
                        list_add_tail(&host->list, dev_list);
        }
 
        ib_set_client_data(device, &srp_client, dev_list);
-
-out:
-       kfree(dev_attr);
 }
 
 static void srp_remove_one(struct ib_device *device)
index 362b33556b1affa5b9b66b565627f05098b7f9a9..745979f33dc2e2cf3e3a9bbc1909b50897cb6c04 100644 (file)
@@ -159,7 +159,7 @@ struct input_event_compat {
 #ifdef CONFIG_X86_64
 #  define COMPAT_TEST is_compat_task()
 #elif defined(CONFIG_IA64)
-#  define COMPAT_TEST IS_IA32_PROCESS(ia64_task_regs(current))
+#  define COMPAT_TEST IS_IA32_PROCESS(task_pt_regs(current))
 #elif defined(CONFIG_S390)
 #  define COMPAT_TEST test_thread_flag(TIF_31BIT)
 #elif defined(CONFIG_MIPS)
index caac6d63d46f4a7da7ffc70293aad635d7c6bb37..b765a155c0088db9c29cad78ca8fd3f9810ac838 100644 (file)
@@ -50,9 +50,7 @@ static DECLARE_MUTEX(gameport_sem);
 
 static LIST_HEAD(gameport_list);
 
-static struct bus_type gameport_bus = {
-       .name = "gameport",
-};
+static struct bus_type gameport_bus;
 
 static void gameport_add_port(struct gameport *gameport);
 static void gameport_destroy_port(struct gameport *gameport);
@@ -703,11 +701,15 @@ static int gameport_driver_remove(struct device *dev)
        return 0;
 }
 
+static struct bus_type gameport_bus = {
+       .name = "gameport",
+       .probe = gameport_driver_probe,
+       .remove = gameport_driver_remove,
+};
+
 void __gameport_register_driver(struct gameport_driver *drv, struct module *owner)
 {
        drv->driver.bus = &gameport_bus;
-       drv->driver.probe = gameport_driver_probe;
-       drv->driver.remove = gameport_driver_remove;
        gameport_queue_event(drv, owner, GAMEPORT_REGISTER_DRIVER);
 }
 
index fe33ff334e27cf48d27d366df69ab4249a206842..4fe3da3c667a0ecfa06eefad1759d831aa7f014f 100644 (file)
@@ -528,40 +528,56 @@ INPUT_DEV_STRING_ATTR_SHOW(name);
 INPUT_DEV_STRING_ATTR_SHOW(phys);
 INPUT_DEV_STRING_ATTR_SHOW(uniq);
 
-static int print_modalias_bits(char *buf, char prefix, unsigned long *arr,
+static int print_modalias_bits(char *buf, int size, char prefix, unsigned long *arr,
                               unsigned int min, unsigned int max)
 {
        int len, i;
 
-       len = sprintf(buf, "%c", prefix);
+       len = snprintf(buf, size, "%c", prefix);
        for (i = min; i < max; i++)
                if (arr[LONG(i)] & BIT(i))
-                       len += sprintf(buf+len, "%X,", i);
+                       len += snprintf(buf + len, size - len, "%X,", i);
        return len;
 }
 
-static ssize_t input_dev_show_modalias(struct class_device *dev, char *buf)
+static int print_modalias(char *buf, int size, struct input_dev *id)
 {
-       struct input_dev *id = to_input_dev(dev);
-       ssize_t len = 0;
+       int len;
 
-       len += sprintf(buf+len, "input:b%04Xv%04Xp%04Xe%04X-",
+       len = snprintf(buf, size, "input:b%04Xv%04Xp%04Xe%04X-",
                       id->id.bustype,
                       id->id.vendor,
                       id->id.product,
                       id->id.version);
 
-       len += print_modalias_bits(buf+len, 'e', id->evbit, 0, EV_MAX);
-       len += print_modalias_bits(buf+len, 'k', id->keybit,
+       len += print_modalias_bits(buf + len, size - len, 'e', id->evbit,
+                                  0, EV_MAX);
+       len += print_modalias_bits(buf + len, size - len, 'k', id->keybit,
                                   KEY_MIN_INTERESTING, KEY_MAX);
-       len += print_modalias_bits(buf+len, 'r', id->relbit, 0, REL_MAX);
-       len += print_modalias_bits(buf+len, 'a', id->absbit, 0, ABS_MAX);
-       len += print_modalias_bits(buf+len, 'm', id->mscbit, 0, MSC_MAX);
-       len += print_modalias_bits(buf+len, 'l', id->ledbit, 0, LED_MAX);
-       len += print_modalias_bits(buf+len, 's', id->sndbit, 0, SND_MAX);
-       len += print_modalias_bits(buf+len, 'f', id->ffbit, 0, FF_MAX);
-       len += print_modalias_bits(buf+len, 'w', id->swbit, 0, SW_MAX);
-       len += sprintf(buf+len, "\n");
+       len += print_modalias_bits(buf + len, size - len, 'r', id->relbit,
+                                  0, REL_MAX);
+       len += print_modalias_bits(buf + len, size - len, 'a', id->absbit,
+                                  0, ABS_MAX);
+       len += print_modalias_bits(buf + len, size - len, 'm', id->mscbit,
+                                  0, MSC_MAX);
+       len += print_modalias_bits(buf + len, size - len, 'l', id->ledbit,
+                                  0, LED_MAX);
+       len += print_modalias_bits(buf + len, size - len, 's', id->sndbit,
+                                  0, SND_MAX);
+       len += print_modalias_bits(buf + len, size - len, 'f', id->ffbit,
+                                  0, FF_MAX);
+       len += print_modalias_bits(buf + len, size - len, 'w', id->swbit,
+                                  0, SW_MAX);
+       return len;
+}
+
+static ssize_t input_dev_show_modalias(struct class_device *dev, char *buf)
+{
+       struct input_dev *id = to_input_dev(dev);
+       ssize_t len;
+
+       len = print_modalias(buf, PAGE_SIZE, id);
+       len += snprintf(buf + len, PAGE_SIZE-len, "\n");
        return len;
 }
 static CLASS_DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
@@ -728,8 +744,11 @@ static int input_dev_uevent(struct class_device *cdev, char **envp,
        if (test_bit(EV_SW, dev->evbit))
                INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX);
 
-       envp[i] = NULL;
+       envp[i++] = buffer + len;
+       len += snprintf(buffer + len, buffer_size - len, "MODALIAS=");
+       len += print_modalias(buffer + len, buffer_size - len, dev) + 1;
 
+       envp[i] = NULL;
        return 0;
 }
 
index 8558a99f6635a653287563d7e31557cdf04080b5..ec55a29fc861047dcd5ff11100a8fb797faaec8e 100644 (file)
@@ -64,8 +64,8 @@ static irqreturn_t amijoy_interrupt(int irq, void *dummy, struct pt_regs *fp)
                if (amijoy[i]) {
 
                        switch (i) {
-                               case 0: data = ~custom.joy0dat; button = (~ciaa.pra >> 6) & 1; break;
-                               case 1: data = ~custom.joy1dat; button = (~ciaa.pra >> 7) & 1; break;
+                               case 0: data = ~amiga_custom.joy0dat; button = (~ciaa.pra >> 6) & 1; break;
+                               case 1: data = ~amiga_custom.joy1dat; button = (~ciaa.pra >> 7) & 1; break;
                        }
 
                        input_regs(amijoy_dev[i], fp);
index 24474335dfd1f38fb171817b844e3c8ec6a24771..2141501e9f2e498246df66233623490ade12494a 100644 (file)
@@ -348,6 +348,40 @@ static int alps_tap_mode(struct psmouse *psmouse, int enable)
        return 0;
 }
 
+/*
+ * alps_poll() - poll the touchpad for current motion packet.
+ * Used in resync.
+ */
+static int alps_poll(struct psmouse *psmouse)
+{
+       struct alps_data *priv = psmouse->private;
+       unsigned char buf[6];
+       int poll_failed;
+
+       if (priv->i->flags & ALPS_PASS)
+               alps_passthrough_mode(psmouse, 1);
+
+       poll_failed = ps2_command(&psmouse->ps2dev, buf,
+                                 PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0;
+
+       if (priv->i->flags & ALPS_PASS)
+               alps_passthrough_mode(psmouse, 0);
+
+       if (poll_failed || (buf[0] & priv->i->mask0) != priv->i->byte0)
+               return -1;
+
+       if ((psmouse->badbyte & 0xc8) == 0x08) {
+/*
+ * Poll the track stick ...
+ */
+               if (ps2_command(&psmouse->ps2dev, buf, PSMOUSE_CMD_POLL | (3 << 8)))
+                       return -1;
+       }
+
+       memcpy(psmouse->packet, buf, sizeof(buf));
+       return 0;
+}
+
 static int alps_reconnect(struct psmouse *psmouse)
 {
        struct alps_data *priv = psmouse->private;
@@ -451,10 +485,14 @@ int alps_init(struct psmouse *psmouse)
        input_register_device(priv->dev2);
 
        psmouse->protocol_handler = alps_process_byte;
+       psmouse->poll = alps_poll;
        psmouse->disconnect = alps_disconnect;
        psmouse->reconnect = alps_reconnect;
        psmouse->pktsize = 6;
 
+       /* We are having trouble resyncing ALPS touchpads so disable it for now */
+       psmouse->resync_time = 0;
+
        return 0;
 
 init_fail:
index d13d4c8fe3c5b20111a0118bc5b32d34f16ce25d..c8b2cc9f184c7355b1004d913cc5c0f7ace57c29 100644 (file)
@@ -41,7 +41,7 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy, struct pt_regs *fp)
        unsigned short joy0dat, potgor;
        int nx, ny, dx, dy;
 
-       joy0dat = custom.joy0dat;
+       joy0dat = amiga_custom.joy0dat;
 
        nx = joy0dat & 0xff;
        ny = joy0dat >> 8;
@@ -57,7 +57,7 @@ static irqreturn_t amimouse_interrupt(int irq, void *dummy, struct pt_regs *fp)
        amimouse_lastx = nx;
        amimouse_lasty = ny;
 
-       potgor = custom.potgor;
+       potgor = amiga_custom.potgor;
 
        input_regs(amimouse_dev, fp);
 
@@ -77,7 +77,7 @@ static int amimouse_open(struct input_dev *dev)
 {
        unsigned short joy0dat;
 
-       joy0dat = custom.joy0dat;
+       joy0dat = amiga_custom.joy0dat;
 
        amimouse_lastx = joy0dat & 0xff;
        amimouse_lasty = joy0dat >> 8;
index 025a71de540415ce58ca3be10c94b0ae80ac66e3..c88520d3d13cadc2c6b03819ab2ff9ade3861dd5 100644 (file)
@@ -117,7 +117,7 @@ static int ps2pp_cmd(struct psmouse *psmouse, unsigned char *param, unsigned cha
        if (psmouse_sliced_command(psmouse, command))
                return -1;
 
-       if (ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_POLL))
+       if (ps2_command(&psmouse->ps2dev, param, PSMOUSE_CMD_POLL | 0x0300))
                return -1;
 
        return 0;
index 4d5ecc04c5b65382d5b8de27c345e2ba8813e05e..7665fd9ce559c49a153289bd64e19d6255134c11 100644 (file)
@@ -54,10 +54,14 @@ static unsigned int psmouse_smartscroll = 1;
 module_param_named(smartscroll, psmouse_smartscroll, bool, 0644);
 MODULE_PARM_DESC(smartscroll, "Logitech Smartscroll autorepeat, 1 = enabled (default), 0 = disabled.");
 
-static unsigned int psmouse_resetafter;
+static unsigned int psmouse_resetafter = 5;
 module_param_named(resetafter, psmouse_resetafter, uint, 0644);
 MODULE_PARM_DESC(resetafter, "Reset device after so many bad packets (0 = never).");
 
+static unsigned int psmouse_resync_time = 5;
+module_param_named(resync_time, psmouse_resync_time, uint, 0644);
+MODULE_PARM_DESC(resync_time, "How long can mouse stay idle before forcing resync (in seconds, 0 = never).");
+
 PSMOUSE_DEFINE_ATTR(protocol, S_IWUSR | S_IRUGO,
                        NULL,
                        psmouse_attr_show_protocol, psmouse_attr_set_protocol);
@@ -70,12 +74,16 @@ PSMOUSE_DEFINE_ATTR(resolution, S_IWUSR | S_IRUGO,
 PSMOUSE_DEFINE_ATTR(resetafter, S_IWUSR | S_IRUGO,
                        (void *) offsetof(struct psmouse, resetafter),
                        psmouse_show_int_attr, psmouse_set_int_attr);
+PSMOUSE_DEFINE_ATTR(resync_time, S_IWUSR | S_IRUGO,
+                       (void *) offsetof(struct psmouse, resync_time),
+                       psmouse_show_int_attr, psmouse_set_int_attr);
 
 static struct attribute *psmouse_attributes[] = {
        &psmouse_attr_protocol.dattr.attr,
        &psmouse_attr_rate.dattr.attr,
        &psmouse_attr_resolution.dattr.attr,
        &psmouse_attr_resetafter.dattr.attr,
+       &psmouse_attr_resync_time.dattr.attr,
        NULL
 };
 
@@ -98,6 +106,8 @@ __obsolete_setup("psmouse_rate=");
  */
 static DECLARE_MUTEX(psmouse_sem);
 
+static struct workqueue_struct *kpsmoused_wq;
+
 struct psmouse_protocol {
        enum psmouse_type type;
        char *name;
@@ -178,15 +188,79 @@ static psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse, struct pt_reg
 }
 
 /*
- * psmouse_interrupt() handles incoming characters, either gathering them into
- * packets or passing them to the command routine as command output.
+ * __psmouse_set_state() sets new psmouse state and resets all flags.
+ */
+
+static inline void __psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
+{
+       psmouse->state = new_state;
+       psmouse->pktcnt = psmouse->out_of_sync = 0;
+       psmouse->ps2dev.flags = 0;
+       psmouse->last = jiffies;
+}
+
+
+/*
+ * psmouse_set_state() sets new psmouse state and resets all flags and
+ * counters while holding serio lock so fighting with interrupt handler
+ * is not a concern.
+ */
+
+static void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
+{
+       serio_pause_rx(psmouse->ps2dev.serio);
+       __psmouse_set_state(psmouse, new_state);
+       serio_continue_rx(psmouse->ps2dev.serio);
+}
+
+/*
+ * psmouse_handle_byte() processes one byte of the input data stream
+ * by calling corresponding protocol handler.
+ */
+
+static int psmouse_handle_byte(struct psmouse *psmouse, struct pt_regs *regs)
+{
+       psmouse_ret_t rc = psmouse->protocol_handler(psmouse, regs);
+
+       switch (rc) {
+               case PSMOUSE_BAD_DATA:
+                       if (psmouse->state == PSMOUSE_ACTIVATED) {
+                               printk(KERN_WARNING "psmouse.c: %s at %s lost sync at byte %d\n",
+                                       psmouse->name, psmouse->phys, psmouse->pktcnt);
+                               if (++psmouse->out_of_sync == psmouse->resetafter) {
+                                       __psmouse_set_state(psmouse, PSMOUSE_IGNORE);
+                                       printk(KERN_NOTICE "psmouse.c: issuing reconnect request\n");
+                                       serio_reconnect(psmouse->ps2dev.serio);
+                                       return -1;
+                               }
+                       }
+                       psmouse->pktcnt = 0;
+                       break;
+
+               case PSMOUSE_FULL_PACKET:
+                       psmouse->pktcnt = 0;
+                       if (psmouse->out_of_sync) {
+                               psmouse->out_of_sync = 0;
+                               printk(KERN_NOTICE "psmouse.c: %s at %s - driver resynched.\n",
+                                       psmouse->name, psmouse->phys);
+                       }
+                       break;
+
+               case PSMOUSE_GOOD_DATA:
+                       break;
+       }
+       return 0;
+}
+
+/*
+ * psmouse_interrupt() handles incoming characters, either passing them
+ * for normal processing or gathering them as command response.
  */
 
 static irqreturn_t psmouse_interrupt(struct serio *serio,
                unsigned char data, unsigned int flags, struct pt_regs *regs)
 {
        struct psmouse *psmouse = serio_get_drvdata(serio);
-       psmouse_ret_t rc;
 
        if (psmouse->state == PSMOUSE_IGNORE)
                goto out;
@@ -208,67 +282,58 @@ static irqreturn_t psmouse_interrupt(struct serio *serio,
                if  (ps2_handle_response(&psmouse->ps2dev, data))
                        goto out;
 
-       if (psmouse->state == PSMOUSE_INITIALIZING)
+       if (psmouse->state <= PSMOUSE_RESYNCING)
                goto out;
 
        if (psmouse->state == PSMOUSE_ACTIVATED &&
            psmouse->pktcnt && time_after(jiffies, psmouse->last + HZ/2)) {
-               printk(KERN_WARNING "psmouse.c: %s at %s lost synchronization, throwing %d bytes away.\n",
+               printk(KERN_INFO "psmouse.c: %s at %s lost synchronization, throwing %d bytes away.\n",
                       psmouse->name, psmouse->phys, psmouse->pktcnt);
-               psmouse->pktcnt = 0;
+               psmouse->badbyte = psmouse->packet[0];
+               __psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
+               queue_work(kpsmoused_wq, &psmouse->resync_work);
+               goto out;
        }
 
-       psmouse->last = jiffies;
        psmouse->packet[psmouse->pktcnt++] = data;
-
-       if (psmouse->packet[0] == PSMOUSE_RET_BAT) {
+/*
+ * Check if this is a new device announcement (0xAA 0x00)
+ */
+       if (unlikely(psmouse->packet[0] == PSMOUSE_RET_BAT && psmouse->pktcnt <= 2)) {
                if (psmouse->pktcnt == 1)
                        goto out;
 
-               if (psmouse->pktcnt == 2) {
-                       if (psmouse->packet[1] == PSMOUSE_RET_ID) {
-                               psmouse->state = PSMOUSE_IGNORE;
-                               serio_reconnect(serio);
-                               goto out;
-                       }
-                       if (psmouse->type == PSMOUSE_SYNAPTICS) {
-                               /* neither 0xAA nor 0x00 are valid first bytes
-                                * for a packet in absolute mode
-                                */
-                               psmouse->pktcnt = 0;
-                               goto out;
-                       }
+               if (psmouse->packet[1] == PSMOUSE_RET_ID) {
+                       __psmouse_set_state(psmouse, PSMOUSE_IGNORE);
+                       serio_reconnect(serio);
+                       goto out;
                }
-       }
-
-       rc = psmouse->protocol_handler(psmouse, regs);
+/*
+ * Not a new device, try processing first byte normally
+ */
+               psmouse->pktcnt = 1;
+               if (psmouse_handle_byte(psmouse, regs))
+                       goto out;
 
-       switch (rc) {
-               case PSMOUSE_BAD_DATA:
-                       printk(KERN_WARNING "psmouse.c: %s at %s lost sync at byte %d\n",
-                               psmouse->name, psmouse->phys, psmouse->pktcnt);
-                       psmouse->pktcnt = 0;
+               psmouse->packet[psmouse->pktcnt++] = data;
+       }
 
-                       if (++psmouse->out_of_sync == psmouse->resetafter) {
-                               psmouse->state = PSMOUSE_IGNORE;
-                               printk(KERN_NOTICE "psmouse.c: issuing reconnect request\n");
-                               serio_reconnect(psmouse->ps2dev.serio);
-                       }
-                       break;
+/*
+ * See if we need to force resync because mouse was idle for too long
+ */
+       if (psmouse->state == PSMOUSE_ACTIVATED &&
+           psmouse->pktcnt == 1 && psmouse->resync_time &&
+           time_after(jiffies, psmouse->last + psmouse->resync_time * HZ)) {
+               psmouse->badbyte = psmouse->packet[0];
+               __psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
+               queue_work(kpsmoused_wq, &psmouse->resync_work);
+               goto out;
+       }
 
-               case PSMOUSE_FULL_PACKET:
-                       psmouse->pktcnt = 0;
-                       if (psmouse->out_of_sync) {
-                               psmouse->out_of_sync = 0;
-                               printk(KERN_NOTICE "psmouse.c: %s at %s - driver resynched.\n",
-                                       psmouse->name, psmouse->phys);
-                       }
-                       break;
+       psmouse->last = jiffies;
+       psmouse_handle_byte(psmouse, regs);
 
-               case PSMOUSE_GOOD_DATA:
-                       break;
-       }
-out:
+ out:
        return IRQ_HANDLED;
 }
 
@@ -751,21 +816,6 @@ static void psmouse_initialize(struct psmouse *psmouse)
        }
 }
 
-/*
- * psmouse_set_state() sets new psmouse state and resets all flags and
- * counters while holding serio lock so fighting with interrupt handler
- * is not a concern.
- */
-
-static void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state)
-{
-       serio_pause_rx(psmouse->ps2dev.serio);
-       psmouse->state = new_state;
-       psmouse->pktcnt = psmouse->out_of_sync = 0;
-       psmouse->ps2dev.flags = 0;
-       serio_continue_rx(psmouse->ps2dev.serio);
-}
-
 /*
  * psmouse_activate() enables the mouse so that we get motion reports from it.
  */
@@ -794,6 +844,111 @@ static void psmouse_deactivate(struct psmouse *psmouse)
        psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
 }
 
+/*
+ * psmouse_poll() - default poll hanlder. Everyone except for ALPS uses it.
+ */
+
+static int psmouse_poll(struct psmouse *psmouse)
+{
+       return ps2_command(&psmouse->ps2dev, psmouse->packet,
+                          PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
+}
+
+
+/*
+ * psmouse_resync() attempts to re-validate current protocol.
+ */
+
+static void psmouse_resync(void *p)
+{
+       struct psmouse *psmouse = p, *parent = NULL;
+       struct serio *serio = psmouse->ps2dev.serio;
+       psmouse_ret_t rc = PSMOUSE_GOOD_DATA;
+       int failed = 0, enabled = 0;
+       int i;
+
+       down(&psmouse_sem);
+
+       if (psmouse->state != PSMOUSE_RESYNCING)
+               goto out;
+
+       if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
+               parent = serio_get_drvdata(serio->parent);
+               psmouse_deactivate(parent);
+       }
+
+/*
+ * Some mice don't ACK commands sent while they are in the middle of
+ * transmitting motion packet. To avoid delay we use ps2_sendbyte()
+ * instead of ps2_command() which would wait for 200ms for an ACK
+ * that may never come.
+ * As an additional quirk ALPS touchpads may not only forget to ACK
+ * disable command but will stop reporting taps, so if we see that
+ * mouse at least once ACKs disable we will do full reconnect if ACK
+ * is missing.
+ */
+       psmouse->num_resyncs++;
+
+       if (ps2_sendbyte(&psmouse->ps2dev, PSMOUSE_CMD_DISABLE, 20)) {
+               if (psmouse->num_resyncs < 3 || psmouse->acks_disable_command)
+                       failed = 1;
+       } else
+               psmouse->acks_disable_command = 1;
+
+/*
+ * Poll the mouse. If it was reset the packet will be shorter than
+ * psmouse->pktsize and ps2_command will fail. We do not expect and
+ * do not handle scenario when mouse "upgrades" its protocol while
+ * disconnected since it would require additional delay. If we ever
+ * see a mouse that does it we'll adjust the code.
+ */
+       if (!failed) {
+               if (psmouse->poll(psmouse))
+                       failed = 1;
+               else {
+                       psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
+                       for (i = 0; i < psmouse->pktsize; i++) {
+                               psmouse->pktcnt++;
+                               rc = psmouse->protocol_handler(psmouse, NULL);
+                               if (rc != PSMOUSE_GOOD_DATA)
+                                       break;
+                       }
+                       if (rc != PSMOUSE_FULL_PACKET)
+                               failed = 1;
+                       psmouse_set_state(psmouse, PSMOUSE_RESYNCING);
+               }
+       }
+/*
+ * Now try to enable mouse. We try to do that even if poll failed and also
+ * repeat our attempts 5 times, otherwise we may be left out with disabled
+ * mouse.
+ */
+       for (i = 0; i < 5; i++) {
+               if (!ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
+                       enabled = 1;
+                       break;
+               }
+               msleep(200);
+       }
+
+       if (!enabled) {
+               printk(KERN_WARNING "psmouse.c: failed to re-enable mouse on %s\n",
+                       psmouse->ps2dev.serio->phys);
+               failed = 1;
+       }
+
+       if (failed) {
+               psmouse_set_state(psmouse, PSMOUSE_IGNORE);
+               printk(KERN_INFO "psmouse.c: resync failed, issuing reconnect request\n");
+               serio_reconnect(serio);
+       } else
+               psmouse_set_state(psmouse, PSMOUSE_ACTIVATED);
+
+       if (parent)
+               psmouse_activate(parent);
+ out:
+       up(&psmouse_sem);
+}
 
 /*
  * psmouse_cleanup() resets the mouse into power-on state.
@@ -822,6 +977,11 @@ static void psmouse_disconnect(struct serio *serio)
 
        psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
 
+       /* make sure we don't have a resync in progress */
+       up(&psmouse_sem);
+       flush_workqueue(kpsmoused_wq);
+       down(&psmouse_sem);
+
        if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
                parent = serio_get_drvdata(serio->parent);
                psmouse_deactivate(parent);
@@ -859,6 +1019,7 @@ static int psmouse_switch_protocol(struct psmouse *psmouse, struct psmouse_proto
 
        psmouse->set_rate = psmouse_set_rate;
        psmouse->set_resolution = psmouse_set_resolution;
+       psmouse->poll = psmouse_poll;
        psmouse->protocol_handler = psmouse_process_byte;
        psmouse->pktsize = 3;
 
@@ -874,6 +1035,23 @@ static int psmouse_switch_protocol(struct psmouse *psmouse, struct psmouse_proto
        else
                psmouse->type = psmouse_extensions(psmouse, psmouse_max_proto, 1);
 
+       /*
+        * If mouse's packet size is 3 there is no point in polling the
+        * device in hopes to detect protocol reset - we won't get less
+        * than 3 bytes response anyhow.
+        */
+       if (psmouse->pktsize == 3)
+               psmouse->resync_time = 0;
+
+       /*
+        * Some smart KVMs fake response to POLL command returning just
+        * 3 bytes and messing up our resync logic, so if initial poll
+        * fails we won't try polling the device anymore. Hopefully
+        * such KVM will maintain initially selected protocol.
+        */
+       if (psmouse->resync_time && psmouse->poll(psmouse))
+               psmouse->resync_time = 0;
+
        sprintf(psmouse->devname, "%s %s %s",
                psmouse_protocol_by_type(psmouse->type)->name, psmouse->vendor, psmouse->name);
 
@@ -914,6 +1092,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
                goto out;
 
        ps2_init(&psmouse->ps2dev, serio);
+       INIT_WORK(&psmouse->resync_work, psmouse_resync, psmouse);
        psmouse->dev = input_dev;
        sprintf(psmouse->phys, "%s/input0", serio->phys);
 
@@ -934,6 +1113,7 @@ static int psmouse_connect(struct serio *serio, struct serio_driver *drv)
        psmouse->rate = psmouse_rate;
        psmouse->resolution = psmouse_resolution;
        psmouse->resetafter = psmouse_resetafter;
+       psmouse->resync_time = parent ? 0 : psmouse_resync_time;
        psmouse->smartscroll = psmouse_smartscroll;
 
        psmouse_switch_protocol(psmouse, NULL);
@@ -1278,13 +1458,21 @@ static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp)
 
 static int __init psmouse_init(void)
 {
+       kpsmoused_wq = create_singlethread_workqueue("kpsmoused");
+       if (!kpsmoused_wq) {
+               printk(KERN_ERR "psmouse: failed to create kpsmoused workqueue\n");
+               return -ENOMEM;
+       }
+
        serio_register_driver(&psmouse_drv);
+
        return 0;
 }
 
 static void __exit psmouse_exit(void)
 {
        serio_unregister_driver(&psmouse_drv);
+       destroy_workqueue(kpsmoused_wq);
 }
 
 module_init(psmouse_init);
index 7c4192bd1279c62e16d621942b0855384e7f3f91..4d9107fba6a10e8da641a3b5e5630924b66fe110 100644 (file)
@@ -7,7 +7,7 @@
 #define PSMOUSE_CMD_GETINFO    0x03e9
 #define PSMOUSE_CMD_SETSTREAM  0x00ea
 #define PSMOUSE_CMD_SETPOLL    0x00f0
-#define PSMOUSE_CMD_POLL       0x03eb
+#define PSMOUSE_CMD_POLL       0x00eb  /* caller sets number of bytes to receive */
 #define PSMOUSE_CMD_GETID      0x02f2
 #define PSMOUSE_CMD_SETRATE    0x10f3
 #define PSMOUSE_CMD_ENABLE     0x00f4
@@ -23,6 +23,7 @@
 enum psmouse_state {
        PSMOUSE_IGNORE,
        PSMOUSE_INITIALIZING,
+       PSMOUSE_RESYNCING,
        PSMOUSE_CMD_MODE,
        PSMOUSE_ACTIVATED,
 };
@@ -38,15 +39,19 @@ struct psmouse {
        void *private;
        struct input_dev *dev;
        struct ps2dev ps2dev;
+       struct work_struct resync_work;
        char *vendor;
        char *name;
        unsigned char packet[8];
+       unsigned char badbyte;
        unsigned char pktcnt;
        unsigned char pktsize;
        unsigned char type;
+       unsigned char acks_disable_command;
        unsigned int model;
        unsigned long last;
        unsigned long out_of_sync;
+       unsigned long num_resyncs;
        enum psmouse_state state;
        char devname[64];
        char phys[32];
@@ -54,6 +59,7 @@ struct psmouse {
        unsigned int rate;
        unsigned int resolution;
        unsigned int resetafter;
+       unsigned int resync_time;
        unsigned int smartscroll;       /* Logitech only */
 
        psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse, struct pt_regs *regs);
@@ -62,6 +68,7 @@ struct psmouse {
 
        int (*reconnect)(struct psmouse *psmouse);
        void (*disconnect)(struct psmouse *psmouse);
+       int (*poll)(struct psmouse *psmouse);
 
        void (*pt_activate)(struct psmouse *psmouse);
        void (*pt_deactivate)(struct psmouse *psmouse);
index 97cdfd6acaca039e06d89e3073a72c8b712d70dd..2051bec2c394b896700612590749ac7b31da9de2 100644 (file)
@@ -652,6 +652,8 @@ int synaptics_init(struct psmouse *psmouse)
        psmouse->disconnect = synaptics_disconnect;
        psmouse->reconnect = synaptics_reconnect;
        psmouse->pktsize = 6;
+       /* Synaptics can usually stay in sync without extra help */
+       psmouse->resync_time = 0;
 
        if (SYN_CAP_PASS_THROUGH(priv->capabilities))
                synaptics_pt_create(psmouse);
index 2d2f9fb3adede436e1e965be059f8f6210e77092..a4c6f352272390af0d2d8b740d53a1bb8cd2668d 100644 (file)
@@ -173,6 +173,13 @@ static struct dmi_system_id __initdata i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
                },
        },
+       {
+               .ident = "Sony Vaio FS-115b",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
+               },
+       },
        { }
 };
 
index 8e530cc970e198c62f535d4d3361e98189cd400b..2f76813c3a645c7d3d6a9e797df4525f19c6ac3d 100644 (file)
@@ -59,9 +59,7 @@ static DECLARE_MUTEX(serio_sem);
 
 static LIST_HEAD(serio_list);
 
-static struct bus_type serio_bus = {
-       .name = "serio",
-};
+static struct bus_type serio_bus;
 
 static void serio_add_port(struct serio *serio);
 static void serio_destroy_port(struct serio *serio);
@@ -750,11 +748,15 @@ static int serio_driver_remove(struct device *dev)
        return 0;
 }
 
+static struct bus_type serio_bus = {
+       .name = "serio",
+       .probe = serio_driver_probe,
+       .remove = serio_driver_remove,
+};
+
 void __serio_register_driver(struct serio_driver *drv, struct module *owner)
 {
        drv->driver.bus = &serio_bus;
-       drv->driver.probe = serio_driver_probe;
-       drv->driver.remove = serio_driver_remove;
 
        serio_queue_event(drv, owner, SERIO_REGISTER_DRIVER);
 }
index 21d55ed4b88a69e08881a4b03f1ef9dbf20548d2..2c674023a6acd5f71d2fe742a7a9d2428ae180b1 100644 (file)
@@ -11,6 +11,19 @@ menuconfig INPUT_TOUCHSCREEN
 
 if INPUT_TOUCHSCREEN
 
+config TOUCHSCREEN_ADS7846
+       tristate "ADS 7846 based touchscreens"
+       depends on SPI_MASTER
+       help
+         Say Y here if you have a touchscreen interface using the
+         ADS7846 controller, and your board-specific initialization
+         code includes that in its table of SPI devices.
+
+         If unsure, say N (but it's safe to say "Y").
+
+         To compile this driver as a module, choose M here: the
+         module will be called ads7846.
+
 config TOUCHSCREEN_BITSY
        tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
        depends on SA1100_BITSY
index 6842869c9a263d570b0ace2c53ace0e461cac9dc..5e5557c43121a128243412074d689c8340a5f2fd 100644 (file)
@@ -4,6 +4,7 @@
 
 # Each configuration option enables a list of files.
 
+obj-$(CONFIG_TOUCHSCREEN_ADS7846)      += ads7846.o
 obj-$(CONFIG_TOUCHSCREEN_BITSY)        += h3600_ts_input.o
 obj-$(CONFIG_TOUCHSCREEN_CORGI)        += corgi_ts.o
 obj-$(CONFIG_TOUCHSCREEN_GUNZE)        += gunze.o
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
new file mode 100644 (file)
index 0000000..dd8c6a9
--- /dev/null
@@ -0,0 +1,625 @@
+/*
+ * ADS7846 based touchscreen and sensor driver
+ *
+ * Copyright (c) 2005 David Brownell
+ *
+ * Using code from:
+ *  - corgi_ts.c
+ *     Copyright (C) 2004-2005 Richard Purdie
+ *  - omap_ts.[hc], ads7846.h, ts_osk.c
+ *     Copyright (C) 2002 MontaVista Software
+ *     Copyright (C) 2004 Texas Instruments
+ *     Copyright (C) 2005 Dirk Behme
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License version 2 as
+ *  published by the Free Software Foundation.
+ */
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/ads7846.h>
+
+#ifdef CONFIG_ARM
+#include <asm/mach-types.h>
+#ifdef CONFIG_ARCH_OMAP
+#include <asm/arch/gpio.h>
+#endif
+
+#else
+#define        set_irq_type(irq,type)  do{}while(0)
+#endif
+
+
+/*
+ * This code has been lightly tested on an ads7846.
+ * Support for ads7843 and ads7845 has only been stubbed in.
+ *
+ * Not yet done:  investigate the values reported.  Are x/y/pressure
+ * event values sane enough for X11?  How accurate are the temperature
+ * and voltage readings?  (System-specific calibration should support
+ * accuracy of 0.3 degrees C; otherwise it's 2.0 degrees.)
+ *
+ * app note sbaa036 talks in more detail about accurate sampling...
+ * that ought to help in situations like LCDs inducing noise (which
+ * can also be helped by using synch signals) and more generally.
+ */
+
+#define        TS_POLL_PERIOD  msecs_to_jiffies(10)
+
+struct ts_event {
+       /* For portability, we can't read 12 bit values using SPI (which
+        * would make the controller deliver them as native byteorder u16
+        * with msbs zeroed).  Instead, we read them as two 8-byte values,
+        * which need byteswapping then range adjustment.
+        */
+       __be16 x;
+       __be16 y;
+       __be16 z1, z2;
+};
+
+struct ads7846 {
+       struct input_dev        input;
+       char                    phys[32];
+
+       struct spi_device       *spi;
+       u16                     model;
+       u16                     vref_delay_usecs;
+       u16                     x_plate_ohms;
+
+       struct ts_event         tc;
+
+       struct spi_transfer     xfer[8];
+       struct spi_message      msg;
+
+       spinlock_t              lock;
+       struct timer_list       timer;          /* P: lock */
+       unsigned                pendown:1;      /* P: lock */
+       unsigned                pending:1;      /* P: lock */
+// FIXME remove "irq_disabled"
+       unsigned                irq_disabled:1; /* P: lock */
+};
+
+/* leave chip selected when we're done, for quicker re-select? */
+#if    0
+#define        CS_CHANGE(xfer) ((xfer).cs_change = 1)
+#else
+#define        CS_CHANGE(xfer) ((xfer).cs_change = 0)
+#endif
+
+/*--------------------------------------------------------------------------*/
+
+/* The ADS7846 has touchscreen and other sensors.
+ * Earlier ads784x chips are somewhat compatible.
+ */
+#define        ADS_START               (1 << 7)
+#define        ADS_A2A1A0_d_y          (1 << 4)        /* differential */
+#define        ADS_A2A1A0_d_z1         (3 << 4)        /* differential */
+#define        ADS_A2A1A0_d_z2         (4 << 4)        /* differential */
+#define        ADS_A2A1A0_d_x          (5 << 4)        /* differential */
+#define        ADS_A2A1A0_temp0        (0 << 4)        /* non-differential */
+#define        ADS_A2A1A0_vbatt        (2 << 4)        /* non-differential */
+#define        ADS_A2A1A0_vaux         (6 << 4)        /* non-differential */
+#define        ADS_A2A1A0_temp1        (7 << 4)        /* non-differential */
+#define        ADS_8_BIT               (1 << 3)
+#define        ADS_12_BIT              (0 << 3)
+#define        ADS_SER                 (1 << 2)        /* non-differential */
+#define        ADS_DFR                 (0 << 2)        /* differential */
+#define        ADS_PD10_PDOWN          (0 << 0)        /* lowpower mode + penirq */
+#define        ADS_PD10_ADC_ON         (1 << 0)        /* ADC on */
+#define        ADS_PD10_REF_ON         (2 << 0)        /* vREF on + penirq */
+#define        ADS_PD10_ALL_ON         (3 << 0)        /* ADC + vREF on */
+
+#define        MAX_12BIT       ((1<<12)-1)
+
+/* leave ADC powered up (disables penirq) between differential samples */
+#define        READ_12BIT_DFR(x) (ADS_START | ADS_A2A1A0_d_ ## x \
+       | ADS_12_BIT | ADS_DFR)
+
+static const u8        read_y  = READ_12BIT_DFR(y)  | ADS_PD10_ADC_ON;
+static const u8        read_z1 = READ_12BIT_DFR(z1) | ADS_PD10_ADC_ON;
+static const u8        read_z2 = READ_12BIT_DFR(z2) | ADS_PD10_ADC_ON;
+static const u8        read_x  = READ_12BIT_DFR(x)  | ADS_PD10_PDOWN;  /* LAST */
+
+/* single-ended samples need to first power up reference voltage;
+ * we leave both ADC and VREF powered
+ */
+#define        READ_12BIT_SER(x) (ADS_START | ADS_A2A1A0_ ## x \
+       | ADS_12_BIT | ADS_SER)
+
+static const u8        ref_on = READ_12BIT_DFR(x) | ADS_PD10_ALL_ON;
+static const u8        ref_off = READ_12BIT_DFR(y) | ADS_PD10_PDOWN;
+
+/*--------------------------------------------------------------------------*/
+
+/*
+ * Non-touchscreen sensors only use single-ended conversions.
+ */
+
+struct ser_req {
+       u8                      command;
+       u16                     scratch;
+       __be16                  sample;
+       struct spi_message      msg;
+       struct spi_transfer     xfer[6];
+};
+
+static int ads7846_read12_ser(struct device *dev, unsigned command)
+{
+       struct spi_device       *spi = to_spi_device(dev);
+       struct ads7846          *ts = dev_get_drvdata(dev);
+       struct ser_req          *req = kzalloc(sizeof *req, SLAB_KERNEL);
+       int                     status;
+       int                     sample;
+       int                     i;
+
+       if (!req)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&req->msg.transfers);
+
+       /* activate reference, so it has time to settle; */
+       req->xfer[0].tx_buf = &ref_on;
+       req->xfer[0].len = 1;
+       req->xfer[1].rx_buf = &req->scratch;
+       req->xfer[1].len = 2;
+
+       /*
+        * for external VREF, 0 usec (and assume it's always on);
+        * for 1uF, use 800 usec;
+        * no cap, 100 usec.
+        */
+       req->xfer[1].delay_usecs = ts->vref_delay_usecs;
+
+       /* take sample */
+       req->command = (u8) command;
+       req->xfer[2].tx_buf = &req->command;
+       req->xfer[2].len = 1;
+       req->xfer[3].rx_buf = &req->sample;
+       req->xfer[3].len = 2;
+
+       /* REVISIT:  take a few more samples, and compare ... */
+
+       /* turn off reference */
+       req->xfer[4].tx_buf = &ref_off;
+       req->xfer[4].len = 1;
+       req->xfer[5].rx_buf = &req->scratch;
+       req->xfer[5].len = 2;
+
+       CS_CHANGE(req->xfer[5]);
+
+       /* group all the transfers together, so we can't interfere with
+        * reading touchscreen state; disable penirq while sampling
+        */
+       for (i = 0; i < 6; i++)
+               spi_message_add_tail(&req->xfer[i], &req->msg);
+
+       disable_irq(spi->irq);
+       status = spi_sync(spi, &req->msg);
+       enable_irq(spi->irq);
+
+       if (req->msg.status)
+               status = req->msg.status;
+       sample = be16_to_cpu(req->sample);
+       sample = sample >> 4;
+       kfree(req);
+
+       return status ? status : sample;
+}
+
+#define SHOW(name) static ssize_t \
+name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \
+{ \
+       ssize_t v = ads7846_read12_ser(dev, \
+                       READ_12BIT_SER(name) | ADS_PD10_ALL_ON); \
+       if (v < 0) \
+               return v; \
+       return sprintf(buf, "%u\n", (unsigned) v); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, name ## _show, NULL);
+
+SHOW(temp0)
+SHOW(temp1)
+SHOW(vaux)
+SHOW(vbatt)
+
+/*--------------------------------------------------------------------------*/
+
+/*
+ * PENIRQ only kicks the timer.  The timer only reissues the SPI transfer,
+ * to retrieve touchscreen status.
+ *
+ * The SPI transfer completion callback does the real work.  It reports
+ * touchscreen events and reactivates the timer (or IRQ) as appropriate.
+ */
+
+static void ads7846_rx(void *ads)
+{
+       struct ads7846  *ts = ads;
+       unsigned        Rt;
+       unsigned        sync = 0;
+       u16             x, y, z1, z2;
+       unsigned long   flags;
+
+       /* adjust:  12 bit samples (left aligned), built from
+        * two 8 bit values writen msb-first.
+        */
+       x = be16_to_cpu(ts->tc.x) >> 4;
+       y = be16_to_cpu(ts->tc.y) >> 4;
+       z1 = be16_to_cpu(ts->tc.z1) >> 4;
+       z2 = be16_to_cpu(ts->tc.z2) >> 4;
+
+       /* range filtering */
+       if (x == MAX_12BIT)
+               x = 0;
+
+       if (x && z1 && ts->spi->dev.power.power_state.event == PM_EVENT_ON) {
+               /* compute touch pressure resistance using equation #2 */
+               Rt = z2;
+               Rt -= z1;
+               Rt *= x;
+               Rt *= ts->x_plate_ohms;
+               Rt /= z1;
+               Rt = (Rt + 2047) >> 12;
+       } else
+               Rt = 0;
+
+       /* NOTE:  "pendown" is inferred from pressure; we don't rely on
+        * being able to check nPENIRQ status, or "friendly" trigger modes
+        * (both-edges is much better than just-falling or low-level).
+        *
+        * REVISIT:  some boards may require reading nPENIRQ; it's
+        * needed on 7843.  and 7845 reads pressure differently...
+        *
+        * REVISIT:  the touchscreen might not be connected; this code
+        * won't notice that, even if nPENIRQ never fires ...
+        */
+       if (!ts->pendown && Rt != 0) {
+               input_report_key(&ts->input, BTN_TOUCH, 1);
+               sync = 1;
+       } else if (ts->pendown && Rt == 0) {
+               input_report_key(&ts->input, BTN_TOUCH, 0);
+               sync = 1;
+       }
+
+       if (Rt) {
+               input_report_abs(&ts->input, ABS_X, x);
+               input_report_abs(&ts->input, ABS_Y, y);
+               input_report_abs(&ts->input, ABS_PRESSURE, Rt);
+               sync = 1;
+       }
+       if (sync)
+               input_sync(&ts->input);
+
+#ifdef VERBOSE
+       if (Rt || ts->pendown)
+               pr_debug("%s: %d/%d/%d%s\n", ts->spi->dev.bus_id,
+                       x, y, Rt, Rt ? "" : " UP");
+#endif
+
+       /* don't retrigger while we're suspended */
+       spin_lock_irqsave(&ts->lock, flags);
+
+       ts->pendown = (Rt != 0);
+       ts->pending = 0;
+
+       if (ts->spi->dev.power.power_state.event == PM_EVENT_ON) {
+               if (ts->pendown)
+                       mod_timer(&ts->timer, jiffies + TS_POLL_PERIOD);
+               else if (ts->irq_disabled) {
+                       ts->irq_disabled = 0;
+                       enable_irq(ts->spi->irq);
+               }
+       }
+
+       spin_unlock_irqrestore(&ts->lock, flags);
+}
+
+static void ads7846_timer(unsigned long handle)
+{
+       struct ads7846  *ts = (void *)handle;
+       int             status = 0;
+       unsigned long   flags;
+
+       spin_lock_irqsave(&ts->lock, flags);
+       if (!ts->pending) {
+               ts->pending = 1;
+               if (!ts->irq_disabled) {
+                       ts->irq_disabled = 1;
+                       disable_irq(ts->spi->irq);
+               }
+               status = spi_async(ts->spi, &ts->msg);
+               if (status)
+                       dev_err(&ts->spi->dev, "spi_async --> %d\n",
+                                       status);
+       }
+       spin_unlock_irqrestore(&ts->lock, flags);
+}
+
+static irqreturn_t ads7846_irq(int irq, void *handle, struct pt_regs *regs)
+{
+       ads7846_timer((unsigned long) handle);
+       return IRQ_HANDLED;
+}
+
+/*--------------------------------------------------------------------------*/
+
+static int
+ads7846_suspend(struct spi_device *spi, pm_message_t message)
+{
+       struct ads7846 *ts = dev_get_drvdata(&spi->dev);
+       unsigned long   flags;
+
+       spin_lock_irqsave(&ts->lock, flags);
+
+       spi->dev.power.power_state = message;
+
+       /* are we waiting for IRQ, or polling? */
+       if (!ts->pendown) {
+               if (!ts->irq_disabled) {
+                       ts->irq_disabled = 1;
+                       disable_irq(ts->spi->irq);
+               }
+       } else {
+               /* polling; force a final SPI completion;
+                * that will clean things up neatly
+                */
+               if (!ts->pending)
+                       mod_timer(&ts->timer, jiffies);
+
+               while (ts->pendown || ts->pending) {
+                       spin_unlock_irqrestore(&ts->lock, flags);
+                       udelay(10);
+                       spin_lock_irqsave(&ts->lock, flags);
+               }
+       }
+
+       /* we know the chip's in lowpower mode since we always
+        * leave it that way after every request
+        */
+
+       spin_unlock_irqrestore(&ts->lock, flags);
+       return 0;
+}
+
+static int ads7846_resume(struct spi_device *spi)
+{
+       struct ads7846 *ts = dev_get_drvdata(&spi->dev);
+
+       ts->irq_disabled = 0;
+       enable_irq(ts->spi->irq);
+       spi->dev.power.power_state = PMSG_ON;
+       return 0;
+}
+
+static int __devinit ads7846_probe(struct spi_device *spi)
+{
+       struct ads7846                  *ts;
+       struct ads7846_platform_data    *pdata = spi->dev.platform_data;
+       struct spi_transfer             *x;
+       int                             i;
+
+       if (!spi->irq) {
+               dev_dbg(&spi->dev, "no IRQ?\n");
+               return -ENODEV;
+       }
+
+       if (!pdata) {
+               dev_dbg(&spi->dev, "no platform data?\n");
+               return -ENODEV;
+       }
+
+       /* don't exceed max specified sample rate */
+       if (spi->max_speed_hz > (125000 * 16)) {
+               dev_dbg(&spi->dev, "f(sample) %d KHz?\n",
+                               (spi->max_speed_hz/16)/1000);
+               return -EINVAL;
+       }
+
+       /* We'd set the wordsize to 12 bits ... except that some controllers
+        * will then treat the 8 bit command words as 12 bits (and drop the
+        * four MSBs of the 12 bit result).  Result: inputs must be shifted
+        * to discard the four garbage LSBs.
+        */
+
+       if (!(ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL)))
+               return -ENOMEM;
+
+       dev_set_drvdata(&spi->dev, ts);
+
+       ts->spi = spi;
+       spi->dev.power.power_state = PMSG_ON;
+
+       init_timer(&ts->timer);
+       ts->timer.data = (unsigned long) ts;
+       ts->timer.function = ads7846_timer;
+
+       ts->model = pdata->model ? : 7846;
+       ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100;
+       ts->x_plate_ohms = pdata->x_plate_ohms ? : 400;
+
+       init_input_dev(&ts->input);
+
+       ts->input.dev = &spi->dev;
+       ts->input.name = "ADS784x Touchscreen";
+       snprintf(ts->phys, sizeof ts->phys, "%s/input0", spi->dev.bus_id);
+       ts->input.phys = ts->phys;
+
+       ts->input.evbit[0] = BIT(EV_KEY) | BIT(EV_ABS);
+       ts->input.keybit[LONG(BTN_TOUCH)] = BIT(BTN_TOUCH);
+       input_set_abs_params(&ts->input, ABS_X,
+                       pdata->x_min ? : 0,
+                       pdata->x_max ? : MAX_12BIT,
+                       0, 0);
+       input_set_abs_params(&ts->input, ABS_Y,
+                       pdata->y_min ? : 0,
+                       pdata->y_max ? : MAX_12BIT,
+                       0, 0);
+       input_set_abs_params(&ts->input, ABS_PRESSURE,
+                       pdata->pressure_min, pdata->pressure_max, 0, 0);
+
+       input_register_device(&ts->input);
+
+       /* set up the transfers to read touchscreen state; this assumes we
+        * use formula #2 for pressure, not #3.
+        */
+       x = ts->xfer;
+
+       /* y- still on; turn on only y+ (and ADC) */
+       x->tx_buf = &read_y;
+       x->len = 1;
+       x++;
+       x->rx_buf = &ts->tc.y;
+       x->len = 2;
+       x++;
+
+       /* turn y+ off, x- on; we'll use formula #2 */
+       if (ts->model == 7846) {
+               x->tx_buf = &read_z1;
+               x->len = 1;
+               x++;
+               x->rx_buf = &ts->tc.z1;
+               x->len = 2;
+               x++;
+
+               x->tx_buf = &read_z2;
+               x->len = 1;
+               x++;
+               x->rx_buf = &ts->tc.z2;
+               x->len = 2;
+               x++;
+       }
+
+       /* turn y- off, x+ on, then leave in lowpower */
+       x->tx_buf = &read_x;
+       x->len = 1;
+       x++;
+       x->rx_buf = &ts->tc.x;
+       x->len = 2;
+       x++;
+
+       CS_CHANGE(x[-1]);
+
+       for (i = 0; i < x - ts->xfer; i++)
+               spi_message_add_tail(&ts->xfer[i], &ts->msg);
+       ts->msg.complete = ads7846_rx;
+       ts->msg.context = ts;
+
+       if (request_irq(spi->irq, ads7846_irq, SA_SAMPLE_RANDOM,
+                               spi->dev.bus_id, ts)) {
+               dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq);
+               input_unregister_device(&ts->input);
+               kfree(ts);
+               return -EBUSY;
+       }
+       set_irq_type(spi->irq, IRQT_FALLING);
+
+       dev_info(&spi->dev, "touchscreen, irq %d\n", spi->irq);
+
+       /* take a first sample, leaving nPENIRQ active; avoid
+        * the touchscreen, in case it's not connected.
+        */
+       (void) ads7846_read12_ser(&spi->dev,
+                         READ_12BIT_SER(vaux) | ADS_PD10_ALL_ON);
+
+       /* ads7843/7845 don't have temperature sensors, and
+        * use the other sensors a bit differently too
+        */
+       if (ts->model == 7846) {
+               device_create_file(&spi->dev, &dev_attr_temp0);
+               device_create_file(&spi->dev, &dev_attr_temp1);
+       }
+       if (ts->model != 7845)
+               device_create_file(&spi->dev, &dev_attr_vbatt);
+       device_create_file(&spi->dev, &dev_attr_vaux);
+
+       return 0;
+}
+
+static int __devexit ads7846_remove(struct spi_device *spi)
+{
+       struct ads7846          *ts = dev_get_drvdata(&spi->dev);
+
+       ads7846_suspend(spi, PMSG_SUSPEND);
+       free_irq(ts->spi->irq, ts);
+       if (ts->irq_disabled)
+               enable_irq(ts->spi->irq);
+
+       if (ts->model == 7846) {
+               device_remove_file(&spi->dev, &dev_attr_temp0);
+               device_remove_file(&spi->dev, &dev_attr_temp1);
+       }
+       if (ts->model != 7845)
+               device_remove_file(&spi->dev, &dev_attr_vbatt);
+       device_remove_file(&spi->dev, &dev_attr_vaux);
+
+       input_unregister_device(&ts->input);
+       kfree(ts);
+
+       dev_dbg(&spi->dev, "unregistered touchscreen\n");
+       return 0;
+}
+
+static struct spi_driver ads7846_driver = {
+       .driver = {
+               .name   = "ads7846",
+               .bus    = &spi_bus_type,
+               .owner  = THIS_MODULE,
+       },
+       .probe          = ads7846_probe,
+       .remove         = __devexit_p(ads7846_remove),
+       .suspend        = ads7846_suspend,
+       .resume         = ads7846_resume,
+};
+
+static int __init ads7846_init(void)
+{
+       /* grr, board-specific init should stay out of drivers!! */
+
+#ifdef CONFIG_ARCH_OMAP
+       if (machine_is_omap_osk()) {
+               /* GPIO4 = PENIRQ; GPIO6 = BUSY */
+               omap_request_gpio(4);
+               omap_set_gpio_direction(4, 1);
+               omap_request_gpio(6);
+               omap_set_gpio_direction(6, 1);
+       }
+       // also TI 1510 Innovator, bitbanging through FPGA
+       // also Nokia 770
+       // also Palm Tungsten T2
+#endif
+
+       // PXA:
+       // also Dell Axim X50
+       // also HP iPaq H191x/H192x/H415x/H435x
+       // also Intel Lubbock (additional to UCB1400; as temperature sensor)
+       // also Sharp Zaurus C7xx, C8xx (corgi/sheperd/husky)
+
+       // Atmel at91sam9261-EK uses ads7843
+
+       // also various AMD Au1x00 devel boards
+
+       return spi_register_driver(&ads7846_driver);
+}
+module_init(ads7846_init);
+
+static void __exit ads7846_exit(void)
+{
+       spi_unregister_driver(&ads7846_driver);
+
+#ifdef CONFIG_ARCH_OMAP
+       if (machine_is_omap_osk()) {
+               omap_free_gpio(4);
+               omap_free_gpio(6);
+       }
+#endif
+
+}
+module_exit(ads7846_exit);
+
+MODULE_DESCRIPTION("ADS7846 TouchScreen Driver");
+MODULE_LICENSE("GPL");
index 71aeb912ec61e455d4db26ae6e8091a42aadfa53..d56d400b6aaa52644fe610ef5042b00cddfa538e 100644 (file)
@@ -239,7 +239,7 @@ static int adb_iop_write(struct adb_request *req)
 
        local_irq_save(flags);
 
-       req->next = 0;
+       req->next = NULL;
        req->sent = 0;
        req->complete = 0;
        req->reply_len = 0;
index cf6a6f2248ac421aee3fc1bb2629710c280ef33c..314fc0830d9004d4a32998e6bb563c5719357822 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/irq.h>
 #include <asm/system.h>
 #include <linux/init.h>
+#include <linux/ioport.h>
 
 struct preg {
        unsigned char r;
@@ -88,24 +89,26 @@ int macio_probe(void)
 int macio_init(void)
 {
        struct device_node *adbs;
+       struct resource r;
 
        adbs = find_compatible_devices("adb", "chrp,adb0");
        if (adbs == 0)
                return -ENXIO;
 
 #if 0
-       { int i;
+       { int i = 0;
 
        printk("macio_adb_init: node = %p, addrs =", adbs->node);
-       for (i = 0; i < adbs->n_addrs; ++i)
-               printk(" %x(%x)", adbs->addrs[i].address, adbs->addrs[i].size);
+       while(!of_address_to_resource(adbs, i, &r))
+               printk(" %x(%x)", r.start, r.end - r.start);
        printk(", intrs =");
        for (i = 0; i < adbs->n_intrs; ++i)
                printk(" %x", adbs->intrs[i].line);
        printk("\n"); }
 #endif
-       
-       adb = ioremap(adbs->addrs->address, sizeof(struct adb_regs));
+       if (of_address_to_resource(adbs, 0, &r))
+               return -ENXIO;
+       adb = ioremap(r.start, sizeof(struct adb_regs));
 
        out_8(&adb->ctrl.r, 0);
        out_8(&adb->intr.r, 0);
index 2a545ceb523b05cfb57fdf20b8c79e1ca1f5ca2e..ed6d3174d66050f1ca3358909469770682111243 100644 (file)
@@ -211,6 +211,9 @@ struct bus_type macio_bus_type = {
        .name   = "macio",
        .match  = macio_bus_match,
        .uevent = macio_uevent,
+       .probe  = macio_device_probe,
+       .remove = macio_device_remove,
+       .shutdown = macio_device_shutdown,
        .suspend        = macio_device_suspend,
        .resume = macio_device_resume,
        .dev_attrs = macio_dev_attrs,
@@ -528,9 +531,6 @@ int macio_register_driver(struct macio_driver *drv)
        /* initialize common driver fields */
        drv->driver.name = drv->name;
        drv->driver.bus = &macio_bus_type;
-       drv->driver.probe = macio_device_probe;
-       drv->driver.remove = macio_device_remove;
-       drv->driver.shutdown = macio_device_shutdown;
 
        /* register with core */
        count = driver_register(&drv->driver);
index e9a159ad3022cfebe01928c3fd7c3581565dce74..2a2ffe060169a5f96d9679466b745f8341ee5397 100644 (file)
@@ -260,7 +260,7 @@ static int macii_write(struct adb_request *req)
                return -EINVAL;
        }
        
-       req->next = 0;
+       req->next = NULL;
        req->sent = 0;
        req->complete = 0;
        req->reply_len = 0;
@@ -295,7 +295,7 @@ static void macii_poll(void)
        unsigned long flags;
 
        local_irq_save(flags);
-       if (via[IFR] & SR_INT) macii_interrupt(0, 0, 0);
+       if (via[IFR] & SR_INT) macii_interrupt(0, NULL, NULL);
        local_irq_restore(flags);
 }
 
index a1966975d58fc6af85a702a36fcc4cc4ef3b6ac6..0129fcc3b183b050cd797910b9571cb2d9dccb5c 100644 (file)
@@ -294,6 +294,24 @@ static void maciisi_sync(struct adb_request *req)
                printk(KERN_ERR "maciisi_send_request: poll timed out!\n");
 }
 
+int
+maciisi_request(struct adb_request *req, void (*done)(struct adb_request *),
+           int nbytes, ...)
+{
+       va_list list;
+       int i;
+
+       req->nbytes = nbytes;
+       req->done = done;
+       req->reply_expected = 0;
+       va_start(list, nbytes);
+       for (i = 0; i < nbytes; i++)
+               req->data[i++] = va_arg(list, int);
+       va_end(list);
+
+       return maciisi_send_request(req, 1);
+}
+
 /* Enqueue a request, and run the queue if possible */
 static int
 maciisi_write(struct adb_request* req)
@@ -308,7 +326,7 @@ maciisi_write(struct adb_request* req)
                req->complete = 1;
                return -EINVAL;
        }
-       req->next = 0;
+       req->next = NULL;
        req->sent = 0;
        req->complete = 0;
        req->reply_len = 0;
@@ -403,7 +421,7 @@ maciisi_poll(void)
 
        local_irq_save(flags);
        if (via[IFR] & SR_INT) {
-               maciisi_interrupt(0, 0, 0);
+               maciisi_interrupt(0, NULL, NULL);
        }
        else /* avoid calling this function too quickly in a loop */
                udelay(ADB_DELAY);
index 6f80d76ac17c619b5aea4406140eae094cdf6a7b..f08e52f2107b97ef0bac9d10e52b92ba677f71b4 100644 (file)
@@ -493,7 +493,7 @@ pmu_queue_request(struct adb_request *req)
                return -EINVAL;
        }
 
-       req->next = 0;
+       req->next = NULL;
        req->sent = 0;
        req->complete = 0;
        local_irq_save(flags);
@@ -717,7 +717,7 @@ pmu_handle_data(unsigned char *data, int len, struct pt_regs *regs)
                                printk(KERN_ERR "PMU: extra ADB reply\n");
                                return;
                        }
-                       req_awaiting_reply = 0;
+                       req_awaiting_reply = NULL;
                        if (len <= 2)
                                req->reply_len = 0;
                        else {
index 0302723fa21ff724ce5b7ead17d1c717318e6d19..1778104e106cf4b331b44e09da51dd02aaf415fc 100644 (file)
@@ -1238,6 +1238,7 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
        mdk_rdev_t *same_pdev;
        char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE];
        struct kobject *ko;
+       char *s;
 
        if (rdev->mddev) {
                MD_BUG();
@@ -1277,6 +1278,8 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev)
        bdevname(rdev->bdev,b);
        if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0)
                return -ENOMEM;
+       while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL)
+               *s = '!';
                        
        list_add(&rdev->same_set, &mddev->disks);
        rdev->mddev = mddev;
index f65f64b00ff342aa5c0140198e13be55db99ff2b..44fcbe77c8f95b24e5d5d137e490c22144685800 100644 (file)
@@ -779,9 +779,8 @@ static int __init dvb_bt8xx_load_card(struct dvb_bt8xx_card *card, u32 type)
        return 0;
 }
 
-static int dvb_bt8xx_probe(struct device *dev)
+static int dvb_bt8xx_probe(struct bttv_sub_device *sub)
 {
-       struct bttv_sub_device *sub = to_bttv_sub_dev(dev);
        struct dvb_bt8xx_card *card;
        struct pci_dev* bttv_pci_dev;
        int ret;
@@ -890,13 +889,13 @@ static int dvb_bt8xx_probe(struct device *dev)
                return ret;
        }
 
-       dev_set_drvdata(dev, card);
+       dev_set_drvdata(&sub->dev, card);
        return 0;
 }
 
-static int dvb_bt8xx_remove(struct device *dev)
+static int dvb_bt8xx_remove(struct bttv_sub_device *sub)
 {
-       struct dvb_bt8xx_card *card = dev_get_drvdata(dev);
+       struct dvb_bt8xx_card *card = dev_get_drvdata(&sub->dev);
 
        dprintk("dvb_bt8xx: unloading card%d\n", card->bttv_nr);
 
@@ -919,14 +918,14 @@ static int dvb_bt8xx_remove(struct device *dev)
 static struct bttv_sub_driver driver = {
        .drv = {
                .name           = "dvb-bt8xx",
-               .probe          = dvb_bt8xx_probe,
-               .remove         = dvb_bt8xx_remove,
-               /* FIXME:
-                * .shutdown    = dvb_bt8xx_shutdown,
-                * .suspend     = dvb_bt8xx_suspend,
-                * .resume      = dvb_bt8xx_resume,
-                */
        },
+       .probe          = dvb_bt8xx_probe,
+       .remove         = dvb_bt8xx_remove,
+       /* FIXME:
+        * .shutdown    = dvb_bt8xx_shutdown,
+        * .suspend     = dvb_bt8xx_suspend,
+        * .resume      = dvb_bt8xx_resume,
+        */
 };
 
 static int __init dvb_bt8xx_init(void)
index d64accc17b0ef7854438845f6b4cb49096988e89..c4d5e2b70c28e12c28761b09713e5d669cb3fcdc 100644 (file)
@@ -47,9 +47,29 @@ static int bttv_sub_bus_match(struct device *dev, struct device_driver *drv)
        return 0;
 }
 
+static int bttv_sub_probe(struct device *dev)
+{
+       struct bttv_sub_device *sdev = to_bttv_sub_dev(dev);
+       struct bttv_sub_driver *sub = to_bttv_sub_drv(dev->driver);
+
+       return sub->probe ? sub->probe(sdev) : -ENODEV;
+}
+
+static int bttv_sub_remove(struct device *dev)
+{
+       struct bttv_sub_device *sdev = to_bttv_sub_dev(dev);
+       struct bttv_sub_driver *sub = to_bttv_sub_drv(dev->driver);
+
+       if (sub->remove)
+               sub->remove(sdev);
+       return 0;
+}
+
 struct bus_type bttv_sub_bus_type = {
-       .name  = "bttv-sub",
-       .match = &bttv_sub_bus_match,
+       .name   = "bttv-sub",
+       .match  = &bttv_sub_bus_match,
+       .probe  = bttv_sub_probe,
+       .remove = bttv_sub_remove,
 };
 EXPORT_SYMBOL(bttv_sub_bus_type);
 
index e370d74f2a1b1f50a17aaa0b4d8b75f6cdb7ab8e..9908c8e0c951636e68c1121f2e2b4a7262194020 100644 (file)
@@ -365,6 +365,8 @@ struct bttv_sub_device {
 struct bttv_sub_driver {
        struct device_driver   drv;
        char                   wanted[BUS_ID_SIZE];
+       int                    (*probe)(struct bttv_sub_device *sub);
+       void                   (*remove)(struct bttv_sub_device *sub);
        void                   (*gpio_irq)(struct bttv_sub_device *sub);
 };
 #define to_bttv_sub_drv(x) container_of((x), struct bttv_sub_driver, drv)
index 55ba23075c90ed688ad2ee88c9608a1fefcda77a..75f401d52fda117409468a431424fe9cc177c091 100644 (file)
@@ -77,6 +77,8 @@ static int mcp_bus_resume(struct device *dev)
 static struct bus_type mcp_bus_type = {
        .name           = "mcp",
        .match          = mcp_bus_match,
+       .probe          = mcp_bus_probe,
+       .remove         = mcp_bus_remove,
        .suspend        = mcp_bus_suspend,
        .resume         = mcp_bus_resume,
 };
@@ -227,8 +229,6 @@ EXPORT_SYMBOL(mcp_host_unregister);
 int mcp_driver_register(struct mcp_driver *mcpdrv)
 {
        mcpdrv->drv.bus = &mcp_bus_type;
-       mcpdrv->drv.probe = mcp_bus_probe;
-       mcpdrv->drv.remove = mcp_bus_remove;
        return driver_register(&mcpdrv->drv);
 }
 EXPORT_SYMBOL(mcp_driver_register);
index b42e0fbab59b6f880897e1d2303e48342341b8f7..aff83f966803b2f068719d0cc076092d2a2b5c2c 100644 (file)
 #include <linux/errno.h>
 #include <linux/interrupt.h>
 #include <linux/device.h>
+#include <linux/mutex.h>
 
 #include <asm/dma.h>
 #include <asm/hardware.h>
 
 #include "ucb1x00.h"
 
-static DECLARE_MUTEX(ucb1x00_sem);
+static DEFINE_MUTEX(ucb1x00_mutex);
 static LIST_HEAD(ucb1x00_drivers);
 static LIST_HEAD(ucb1x00_devices);
 
@@ -521,12 +522,12 @@ static int ucb1x00_probe(struct mcp *mcp)
                goto err_irq;
 
        INIT_LIST_HEAD(&ucb->devs);
-       down(&ucb1x00_sem);
+       mutex_lock(&ucb1x00_mutex);
        list_add(&ucb->node, &ucb1x00_devices);
        list_for_each_entry(drv, &ucb1x00_drivers, node) {
                ucb1x00_add_dev(ucb, drv);
        }
-       up(&ucb1x00_sem);
+       mutex_unlock(&ucb1x00_mutex);
        goto out;
 
  err_irq:
@@ -544,13 +545,13 @@ static void ucb1x00_remove(struct mcp *mcp)
        struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
        struct list_head *l, *n;
 
-       down(&ucb1x00_sem);
+       mutex_lock(&ucb1x00_mutex);
        list_del(&ucb->node);
        list_for_each_safe(l, n, &ucb->devs) {
                struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node);
                ucb1x00_remove_dev(dev);
        }
-       up(&ucb1x00_sem);
+       mutex_unlock(&ucb1x00_mutex);
 
        free_irq(ucb->irq, ucb);
        class_device_unregister(&ucb->cdev);
@@ -561,12 +562,12 @@ int ucb1x00_register_driver(struct ucb1x00_driver *drv)
        struct ucb1x00 *ucb;
 
        INIT_LIST_HEAD(&drv->devs);
-       down(&ucb1x00_sem);
+       mutex_lock(&ucb1x00_mutex);
        list_add(&drv->node, &ucb1x00_drivers);
        list_for_each_entry(ucb, &ucb1x00_devices, node) {
                ucb1x00_add_dev(ucb, drv);
        }
-       up(&ucb1x00_sem);
+       mutex_unlock(&ucb1x00_mutex);
        return 0;
 }
 
@@ -574,13 +575,13 @@ void ucb1x00_unregister_driver(struct ucb1x00_driver *drv)
 {
        struct list_head *n, *l;
 
-       down(&ucb1x00_sem);
+       mutex_lock(&ucb1x00_mutex);
        list_del(&drv->node);
        list_for_each_safe(l, n, &drv->devs) {
                struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node);
                ucb1x00_remove_dev(dev);
        }
-       up(&ucb1x00_sem);
+       mutex_unlock(&ucb1x00_mutex);
 }
 
 static int ucb1x00_suspend(struct mcp *mcp, pm_message_t state)
@@ -588,12 +589,12 @@ static int ucb1x00_suspend(struct mcp *mcp, pm_message_t state)
        struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
        struct ucb1x00_dev *dev;
 
-       down(&ucb1x00_sem);
+       mutex_lock(&ucb1x00_mutex);
        list_for_each_entry(dev, &ucb->devs, dev_node) {
                if (dev->drv->suspend)
                        dev->drv->suspend(dev, state);
        }
-       up(&ucb1x00_sem);
+       mutex_unlock(&ucb1x00_mutex);
        return 0;
 }
 
@@ -602,12 +603,12 @@ static int ucb1x00_resume(struct mcp *mcp)
        struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
        struct ucb1x00_dev *dev;
 
-       down(&ucb1x00_sem);
+       mutex_lock(&ucb1x00_mutex);
        list_for_each_entry(dev, &ucb->devs, dev_node) {
                if (dev->drv->resume)
                        dev->drv->resume(dev);
        }
-       up(&ucb1x00_sem);
+       mutex_unlock(&ucb1x00_mutex);
        return 0;
 }
 
index f2c42b13945d9bc4b0d13b8d7159c7e65abc174b..9b7c37e0e5742c6731ceb4494bd2c48f04fc6b31 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/kdev_t.h>
 #include <linux/blkdev.h>
 #include <linux/devfs_fs_kernel.h>
+#include <linux/mutex.h>
 
 #include <linux/mmc/card.h>
 #include <linux/mmc/protocol.h>
@@ -57,33 +58,33 @@ struct mmc_blk_data {
        unsigned int    read_only;
 };
 
-static DECLARE_MUTEX(open_lock);
+static DEFINE_MUTEX(open_lock);
 
 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 {
        struct mmc_blk_data *md;
 
-       down(&open_lock);
+       mutex_lock(&open_lock);
        md = disk->private_data;
        if (md && md->usage == 0)
                md = NULL;
        if (md)
                md->usage++;
-       up(&open_lock);
+       mutex_unlock(&open_lock);
 
        return md;
 }
 
 static void mmc_blk_put(struct mmc_blk_data *md)
 {
-       down(&open_lock);
+       mutex_lock(&open_lock);
        md->usage--;
        if (md->usage == 0) {
                put_disk(md->disk);
                mmc_cleanup_queue(&md->queue);
                kfree(md);
        }
-       up(&open_lock);
+       mutex_unlock(&open_lock);
 }
 
 static int mmc_blk_open(struct inode *inode, struct file *filp)
index ec701667abfc120cf3bcb15c9c8381513e29e3b4..a2a35fd946eecf8a2b9fc585b2687f39e2806e82 100644 (file)
@@ -136,17 +136,7 @@ static int mmc_bus_resume(struct device *dev)
        return ret;
 }
 
-static struct bus_type mmc_bus_type = {
-       .name           = "mmc",
-       .dev_attrs      = mmc_dev_attrs,
-       .match          = mmc_bus_match,
-       .uevent         = mmc_bus_uevent,
-       .suspend        = mmc_bus_suspend,
-       .resume         = mmc_bus_resume,
-};
-
-
-static int mmc_drv_probe(struct device *dev)
+static int mmc_bus_probe(struct device *dev)
 {
        struct mmc_driver *drv = to_mmc_driver(dev->driver);
        struct mmc_card *card = dev_to_mmc_card(dev);
@@ -154,7 +144,7 @@ static int mmc_drv_probe(struct device *dev)
        return drv->probe(card);
 }
 
-static int mmc_drv_remove(struct device *dev)
+static int mmc_bus_remove(struct device *dev)
 {
        struct mmc_driver *drv = to_mmc_driver(dev->driver);
        struct mmc_card *card = dev_to_mmc_card(dev);
@@ -164,6 +154,16 @@ static int mmc_drv_remove(struct device *dev)
        return 0;
 }
 
+static struct bus_type mmc_bus_type = {
+       .name           = "mmc",
+       .dev_attrs      = mmc_dev_attrs,
+       .match          = mmc_bus_match,
+       .uevent         = mmc_bus_uevent,
+       .probe          = mmc_bus_probe,
+       .remove         = mmc_bus_remove,
+       .suspend        = mmc_bus_suspend,
+       .resume         = mmc_bus_resume,
+};
 
 /**
  *     mmc_register_driver - register a media driver
@@ -172,8 +172,6 @@ static int mmc_drv_remove(struct device *dev)
 int mmc_register_driver(struct mmc_driver *drv)
 {
        drv->drv.bus = &mmc_bus_type;
-       drv->drv.probe = mmc_drv_probe;
-       drv->drv.remove = mmc_drv_remove;
        return driver_register(&drv->drv);
 }
 
index 9a2aa4033c6a6e37ea60ade60a4f67c70613ccc5..5038e90ceb1255d269c4b4054830317c0ffa8db4 100644 (file)
@@ -47,6 +47,22 @@ config MTD_MS02NV
          accelerator.  Say Y here if you have a DECstation 5000/2x0 or a
          DECsystem 5900 equipped with such a module.
 
+config MTD_DATAFLASH
+       tristate "Support for AT45xxx DataFlash"
+       depends on MTD && SPI_MASTER && EXPERIMENTAL
+       help
+         This enables access to AT45xxx DataFlash chips, using SPI.
+         Sometimes DataFlash chips are packaged inside MMC-format
+         cards; at this writing, the MMC stack won't handle those.
+
+config MTD_M25P80
+       tristate "Support for M25 SPI Flash"
+       depends on MTD && SPI_MASTER && EXPERIMENTAL
+       help
+         This enables access to ST M25P80 and similar SPI flash chips,
+         used for program and data storage.  Set up your spi devices
+         with the right board-specific platform data.
+
 config MTD_SLRAM
        tristate "Uncached system RAM"
        depends on MTD
index e38db348057d578af0394b901946296c65dfe52a..7c5ed2178380119b074f738ea14d5e6b5fd1e345 100644 (file)
@@ -23,3 +23,5 @@ obj-$(CONFIG_MTD_MTDRAM)      += mtdram.o
 obj-$(CONFIG_MTD_LART)         += lart.o
 obj-$(CONFIG_MTD_BLKMTD)       += blkmtd.o
 obj-$(CONFIG_MTD_BLOCK2MTD)    += block2mtd.o
+obj-$(CONFIG_MTD_DATAFLASH)    += mtd_dataflash.o
+obj-$(CONFIG_MTD_M25P80)       += m25p80.o
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
new file mode 100644 (file)
index 0000000..d5f2408
--- /dev/null
@@ -0,0 +1,582 @@
+/*
+ * MTD SPI driver for ST M25Pxx flash chips
+ *
+ * Author: Mike Lavender, mike@steroidmicros.com
+ *
+ * Copyright (c) 2005, Intec Automation Inc.
+ *
+ * Some parts are based on lart.c by Abraham Van Der Merwe
+ *
+ * Cleaned up and generalized based on mtd_dataflash.c
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/interrupt.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+
+#include <asm/semaphore.h>
+
+
+/* NOTE: AT 25F and SST 25LF series are very similar,
+ * but commands for sector erase and chip id differ...
+ */
+
+#define FLASH_PAGESIZE         256
+
+/* Flash opcodes. */
+#define        OPCODE_WREN             6       /* Write enable */
+#define        OPCODE_RDSR             5       /* Read status register */
+#define        OPCODE_READ             3       /* Read data bytes */
+#define        OPCODE_PP               2       /* Page program */
+#define        OPCODE_SE               0xd8    /* Sector erase */
+#define        OPCODE_RES              0xab    /* Read Electronic Signature */
+#define        OPCODE_RDID             0x9f    /* Read JEDEC ID */
+
+/* Status Register bits. */
+#define        SR_WIP                  1       /* Write in progress */
+#define        SR_WEL                  2       /* Write enable latch */
+#define        SR_BP0                  4       /* Block protect 0 */
+#define        SR_BP1                  8       /* Block protect 1 */
+#define        SR_BP2                  0x10    /* Block protect 2 */
+#define        SR_SRWD                 0x80    /* SR write protect */
+
+/* Define max times to check status register before we give up. */
+#define        MAX_READY_WAIT_COUNT    100000
+
+
+#ifdef CONFIG_MTD_PARTITIONS
+#define        mtd_has_partitions()    (1)
+#else
+#define        mtd_has_partitions()    (0)
+#endif
+
+/****************************************************************************/
+
+struct m25p {
+       struct spi_device       *spi;
+       struct semaphore        lock;
+       struct mtd_info         mtd;
+       unsigned                partitioned;
+       u8                      command[4];
+};
+
+static inline struct m25p *mtd_to_m25p(struct mtd_info *mtd)
+{
+       return container_of(mtd, struct m25p, mtd);
+}
+
+/****************************************************************************/
+
+/*
+ * Internal helper functions
+ */
+
+/*
+ * Read the status register, returning its value in the location
+ * Return the status register value.
+ * Returns negative if error occurred.
+ */
+static int read_sr(struct m25p *flash)
+{
+       ssize_t retval;
+       u8 code = OPCODE_RDSR;
+       u8 val;
+
+       retval = spi_write_then_read(flash->spi, &code, 1, &val, 1);
+
+       if (retval < 0) {
+               dev_err(&flash->spi->dev, "error %d reading SR\n",
+                               (int) retval);
+               return retval;
+       }
+
+       return val;
+}
+
+
+/*
+ * Set write enable latch with Write Enable command.
+ * Returns negative if error occurred.
+ */
+static inline int write_enable(struct m25p *flash)
+{
+       u8      code = OPCODE_WREN;
+
+       return spi_write_then_read(flash->spi, &code, 1, NULL, 0);
+}
+
+
+/*
+ * Service routine to read status register until ready, or timeout occurs.
+ * Returns non-zero if error.
+ */
+static int wait_till_ready(struct m25p *flash)
+{
+       int count;
+       int sr;
+
+       /* one chip guarantees max 5 msec wait here after page writes,
+        * but potentially three seconds (!) after page erase.
+        */
+       for (count = 0; count < MAX_READY_WAIT_COUNT; count++) {
+               if ((sr = read_sr(flash)) < 0)
+                       break;
+               else if (!(sr & SR_WIP))
+                       return 0;
+
+               /* REVISIT sometimes sleeping would be best */
+       }
+
+       return 1;
+}
+
+
+/*
+ * Erase one sector of flash memory at offset ``offset'' which is any
+ * address within the sector which should be erased.
+ *
+ * Returns 0 if successful, non-zero otherwise.
+ */
+static int erase_sector(struct m25p *flash, u32 offset)
+{
+       DEBUG(MTD_DEBUG_LEVEL3, "%s: %s at 0x%08x\n", flash->spi->dev.bus_id,
+                       __FUNCTION__, offset);
+
+       /* Wait until finished previous write command. */
+       if (wait_till_ready(flash))
+               return 1;
+
+       /* Send write enable, then erase commands. */
+       write_enable(flash);
+
+       /* Set up command buffer. */
+       flash->command[0] = OPCODE_SE;
+       flash->command[1] = offset >> 16;
+       flash->command[2] = offset >> 8;
+       flash->command[3] = offset;
+
+       spi_write(flash->spi, flash->command, sizeof(flash->command));
+
+       return 0;
+}
+
+/****************************************************************************/
+
+/*
+ * MTD implementation
+ */
+
+/*
+ * Erase an address range on the flash chip.  The address range may extend
+ * one or more erase sectors.  Return an error is there is a problem erasing.
+ */
+static int m25p80_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+       struct m25p *flash = mtd_to_m25p(mtd);
+       u32 addr,len;
+
+       DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
+                       flash->spi->dev.bus_id, __FUNCTION__, "at",
+                       (u32)instr->addr, instr->len);
+
+       /* sanity checks */
+       if (instr->addr + instr->len > flash->mtd.size)
+               return -EINVAL;
+       if ((instr->addr % mtd->erasesize) != 0
+                       || (instr->len % mtd->erasesize) != 0) {
+               return -EINVAL;
+       }
+
+       addr = instr->addr;
+       len = instr->len;
+
+       down(&flash->lock);
+
+       /* now erase those sectors */
+       while (len) {
+               if (erase_sector(flash, addr)) {
+                       instr->state = MTD_ERASE_FAILED;
+                       up(&flash->lock);
+                       return -EIO;
+               }
+
+               addr += mtd->erasesize;
+               len -= mtd->erasesize;
+       }
+
+       up(&flash->lock);
+
+       instr->state = MTD_ERASE_DONE;
+       mtd_erase_callback(instr);
+
+       return 0;
+}
+
+/*
+ * Read an address range from the flash chip.  The address range
+ * may be any size provided it is within the physical boundaries.
+ */
+static int m25p80_read(struct mtd_info *mtd, loff_t from, size_t len,
+       size_t *retlen, u_char *buf)
+{
+       struct m25p *flash = mtd_to_m25p(mtd);
+       struct spi_transfer t[2];
+       struct spi_message m;
+
+       DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
+                       flash->spi->dev.bus_id, __FUNCTION__, "from",
+                       (u32)from, len);
+
+       /* sanity checks */
+       if (!len)
+               return 0;
+
+       if (from + len > flash->mtd.size)
+               return -EINVAL;
+
+       spi_message_init(&m);
+       memset(t, 0, (sizeof t));
+
+       t[0].tx_buf = flash->command;
+       t[0].len = sizeof(flash->command);
+       spi_message_add_tail(&t[0], &m);
+
+       t[1].rx_buf = buf;
+       t[1].len = len;
+       spi_message_add_tail(&t[1], &m);
+
+       /* Byte count starts at zero. */
+       if (retlen)
+               *retlen = 0;
+
+       down(&flash->lock);
+
+       /* Wait till previous write/erase is done. */
+       if (wait_till_ready(flash)) {
+               /* REVISIT status return?? */
+               up(&flash->lock);
+               return 1;
+       }
+
+       /* NOTE:  OPCODE_FAST_READ (if available) is faster... */
+
+       /* Set up the write data buffer. */
+       flash->command[0] = OPCODE_READ;
+       flash->command[1] = from >> 16;
+       flash->command[2] = from >> 8;
+       flash->command[3] = from;
+
+       spi_sync(flash->spi, &m);
+
+       *retlen = m.actual_length - sizeof(flash->command);
+
+       up(&flash->lock);
+
+       return 0;
+}
+
+/*
+ * Write an address range to the flash chip.  Data must be written in
+ * FLASH_PAGESIZE chunks.  The address range may be any size provided
+ * it is within the physical boundaries.
+ */
+static int m25p80_write(struct mtd_info *mtd, loff_t to, size_t len,
+       size_t *retlen, const u_char *buf)
+{
+       struct m25p *flash = mtd_to_m25p(mtd);
+       u32 page_offset, page_size;
+       struct spi_transfer t[2];
+       struct spi_message m;
+
+       DEBUG(MTD_DEBUG_LEVEL2, "%s: %s %s 0x%08x, len %zd\n",
+                       flash->spi->dev.bus_id, __FUNCTION__, "to",
+                       (u32)to, len);
+
+       if (retlen)
+               *retlen = 0;
+
+       /* sanity checks */
+       if (!len)
+               return(0);
+
+       if (to + len > flash->mtd.size)
+               return -EINVAL;
+
+       spi_message_init(&m);
+       memset(t, 0, (sizeof t));
+
+       t[0].tx_buf = flash->command;
+       t[0].len = sizeof(flash->command);
+       spi_message_add_tail(&t[0], &m);
+
+       t[1].tx_buf = buf;
+       spi_message_add_tail(&t[1], &m);
+
+       down(&flash->lock);
+
+       /* Wait until finished previous write command. */
+       if (wait_till_ready(flash))
+               return 1;
+
+       write_enable(flash);
+
+       /* Set up the opcode in the write buffer. */
+       flash->command[0] = OPCODE_PP;
+       flash->command[1] = to >> 16;
+       flash->command[2] = to >> 8;
+       flash->command[3] = to;
+
+       /* what page do we start with? */
+       page_offset = to % FLASH_PAGESIZE;
+
+       /* do all the bytes fit onto one page? */
+       if (page_offset + len <= FLASH_PAGESIZE) {
+               t[1].len = len;
+
+               spi_sync(flash->spi, &m);
+
+               *retlen = m.actual_length - sizeof(flash->command);
+       } else {
+               u32 i;
+
+               /* the size of data remaining on the first page */
+               page_size = FLASH_PAGESIZE - page_offset;
+
+               t[1].len = page_size;
+               spi_sync(flash->spi, &m);
+
+               *retlen = m.actual_length - sizeof(flash->command);
+
+               /* write everything in PAGESIZE chunks */
+               for (i = page_size; i < len; i += page_size) {
+                       page_size = len - i;
+                       if (page_size > FLASH_PAGESIZE)
+                               page_size = FLASH_PAGESIZE;
+
+                       /* write the next page to flash */
+                       flash->command[1] = (to + i) >> 16;
+                       flash->command[2] = (to + i) >> 8;
+                       flash->command[3] = (to + i);
+
+                       t[1].tx_buf = buf + i;
+                       t[1].len = page_size;
+
+                       wait_till_ready(flash);
+
+                       write_enable(flash);
+
+                       spi_sync(flash->spi, &m);
+
+                       if (retlen)
+                               *retlen += m.actual_length
+                                       - sizeof(flash->command);
+               }
+       }
+
+       up(&flash->lock);
+
+       return 0;
+}
+
+
+/****************************************************************************/
+
+/*
+ * SPI device driver setup and teardown
+ */
+
+struct flash_info {
+       char            *name;
+       u8              id;
+       u16             jedec_id;
+       unsigned        sector_size;
+       unsigned        n_sectors;
+};
+
+static struct flash_info __devinitdata m25p_data [] = {
+       /* REVISIT: fill in JEDEC ids, for parts that have them */
+       { "m25p05", 0x05, 0x0000, 32 * 1024, 2 },
+       { "m25p10", 0x10, 0x0000, 32 * 1024, 4 },
+       { "m25p20", 0x11, 0x0000, 64 * 1024, 4 },
+       { "m25p40", 0x12, 0x0000, 64 * 1024, 8 },
+       { "m25p80", 0x13, 0x0000, 64 * 1024, 16 },
+       { "m25p16", 0x14, 0x0000, 64 * 1024, 32 },
+       { "m25p32", 0x15, 0x0000, 64 * 1024, 64 },
+       { "m25p64", 0x16, 0x2017, 64 * 1024, 128 },
+};
+
+/*
+ * board specific setup should have ensured the SPI clock used here
+ * matches what the READ command supports, at least until this driver
+ * understands FAST_READ (for clocks over 25 MHz).
+ */
+static int __devinit m25p_probe(struct spi_device *spi)
+{
+       struct flash_platform_data      *data;
+       struct m25p                     *flash;
+       struct flash_info               *info;
+       unsigned                        i;
+
+       /* Platform data helps sort out which chip type we have, as
+        * well as how this board partitions it.
+        */
+       data = spi->dev.platform_data;
+       if (!data || !data->type) {
+               /* FIXME some chips can identify themselves with RES
+                * or JEDEC get-id commands.  Try them ...
+                */
+               DEBUG(MTD_DEBUG_LEVEL1, "%s: no chip id\n",
+                               flash->spi->dev.bus_id);
+               return -ENODEV;
+       }
+
+       for (i = 0, info = m25p_data; i < ARRAY_SIZE(m25p_data); i++, info++) {
+               if (strcmp(data->type, info->name) == 0)
+                       break;
+       }
+       if (i == ARRAY_SIZE(m25p_data)) {
+               DEBUG(MTD_DEBUG_LEVEL1, "%s: unrecognized id %s\n",
+                               flash->spi->dev.bus_id, data->type);
+               return -ENODEV;
+       }
+
+       flash = kzalloc(sizeof *flash, SLAB_KERNEL);
+       if (!flash)
+               return -ENOMEM;
+
+       flash->spi = spi;
+       init_MUTEX(&flash->lock);
+       dev_set_drvdata(&spi->dev, flash);
+
+       if (data->name)
+               flash->mtd.name = data->name;
+       else
+               flash->mtd.name = spi->dev.bus_id;
+
+       flash->mtd.type = MTD_NORFLASH;
+       flash->mtd.flags = MTD_CAP_NORFLASH;
+       flash->mtd.size = info->sector_size * info->n_sectors;
+       flash->mtd.erasesize = info->sector_size;
+       flash->mtd.erase = m25p80_erase;
+       flash->mtd.read = m25p80_read;
+       flash->mtd.write = m25p80_write;
+
+       dev_info(&spi->dev, "%s (%d Kbytes)\n", info->name,
+                       flash->mtd.size / 1024);
+
+       DEBUG(MTD_DEBUG_LEVEL2,
+               "mtd .name = %s, .size = 0x%.8x (%uM) "
+                       ".erasesize = 0x%.8x (%uK) .numeraseregions = %d\n",
+               flash->mtd.name,
+               flash->mtd.size, flash->mtd.size / (1024*1024),
+               flash->mtd.erasesize, flash->mtd.erasesize / 1024,
+               flash->mtd.numeraseregions);
+
+       if (flash->mtd.numeraseregions)
+               for (i = 0; i < flash->mtd.numeraseregions; i++)
+                       DEBUG(MTD_DEBUG_LEVEL2,
+                               "mtd.eraseregions[%d] = { .offset = 0x%.8x, "
+                               ".erasesize = 0x%.8x (%uK), "
+                               ".numblocks = %d }\n",
+                               i, flash->mtd.eraseregions[i].offset,
+                               flash->mtd.eraseregions[i].erasesize,
+                               flash->mtd.eraseregions[i].erasesize / 1024,
+                               flash->mtd.eraseregions[i].numblocks);
+
+
+       /* partitions should match sector boundaries; and it may be good to
+        * use readonly partitions for writeprotected sectors (BP2..BP0).
+        */
+       if (mtd_has_partitions()) {
+               struct mtd_partition    *parts = NULL;
+               int                     nr_parts = 0;
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+               static const char *part_probes[] = { "cmdlinepart", NULL, };
+
+               nr_parts = parse_mtd_partitions(&flash->mtd,
+                               part_probes, &parts, 0);
+#endif
+
+               if (nr_parts <= 0 && data && data->parts) {
+                       parts = data->parts;
+                       nr_parts = data->nr_parts;
+               }
+
+               if (nr_parts > 0) {
+                       for (i = 0; i < data->nr_parts; i++) {
+                               DEBUG(MTD_DEBUG_LEVEL2, "partitions[%d] = "
+                                       "{.name = %s, .offset = 0x%.8x, "
+                                               ".size = 0x%.8x (%uK) }\n",
+                                       i, data->parts[i].name,
+                                       data->parts[i].offset,
+                                       data->parts[i].size,
+                                       data->parts[i].size / 1024);
+                       }
+                       flash->partitioned = 1;
+                       return add_mtd_partitions(&flash->mtd, parts, nr_parts);
+               }
+       } else if (data->nr_parts)
+               dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
+                               data->nr_parts, data->name);
+
+       return add_mtd_device(&flash->mtd) == 1 ? -ENODEV : 0;
+}
+
+
+static int __devexit m25p_remove(struct spi_device *spi)
+{
+       struct m25p     *flash = dev_get_drvdata(&spi->dev);
+       int             status;
+
+       /* Clean up MTD stuff. */
+       if (mtd_has_partitions() && flash->partitioned)
+               status = del_mtd_partitions(&flash->mtd);
+       else
+               status = del_mtd_device(&flash->mtd);
+       if (status == 0)
+               kfree(flash);
+       return 0;
+}
+
+
+static struct spi_driver m25p80_driver = {
+       .driver = {
+               .name   = "m25p80",
+               .bus    = &spi_bus_type,
+               .owner  = THIS_MODULE,
+       },
+       .probe  = m25p_probe,
+       .remove = __devexit_p(m25p_remove),
+};
+
+
+static int m25p80_init(void)
+{
+       return spi_register_driver(&m25p80_driver);
+}
+
+
+static void m25p80_exit(void)
+{
+       spi_unregister_driver(&m25p80_driver);
+}
+
+
+module_init(m25p80_init);
+module_exit(m25p80_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Mike Lavender");
+MODULE_DESCRIPTION("MTD SPI driver for ST M25Pxx flash chips");
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
new file mode 100644 (file)
index 0000000..155737e
--- /dev/null
@@ -0,0 +1,629 @@
+/*
+ * Atmel AT45xxx DataFlash MTD driver for lightweight SPI framework
+ *
+ * Largely derived from at91_dataflash.c:
+ *  Copyright (C) 2003-2005 SAN People (Pty) Ltd
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+*/
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/flash.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+
+/*
+ * DataFlash is a kind of SPI flash.  Most AT45 chips have two buffers in
+ * each chip, which may be used for double buffered I/O; but this driver
+ * doesn't (yet) use these for any kind of i/o overlap or prefetching.
+ *
+ * Sometimes DataFlash is packaged in MMC-format cards, although the
+ * MMC stack can't use SPI (yet), or distinguish between MMC and DataFlash
+ * protocols during enumeration.
+ */
+
+#define CONFIG_DATAFLASH_WRITE_VERIFY
+
+/* reads can bypass the buffers */
+#define OP_READ_CONTINUOUS     0xE8
+#define OP_READ_PAGE           0xD2
+
+/* group B requests can run even while status reports "busy" */
+#define OP_READ_STATUS         0xD7    /* group B */
+
+/* move data between host and buffer */
+#define OP_READ_BUFFER1                0xD4    /* group B */
+#define OP_READ_BUFFER2                0xD6    /* group B */
+#define OP_WRITE_BUFFER1       0x84    /* group B */
+#define OP_WRITE_BUFFER2       0x87    /* group B */
+
+/* erasing flash */
+#define OP_ERASE_PAGE          0x81
+#define OP_ERASE_BLOCK         0x50
+
+/* move data between buffer and flash */
+#define OP_TRANSFER_BUF1       0x53
+#define OP_TRANSFER_BUF2       0x55
+#define OP_MREAD_BUFFER1       0xD4
+#define OP_MREAD_BUFFER2       0xD6
+#define OP_MWERASE_BUFFER1     0x83
+#define OP_MWERASE_BUFFER2     0x86
+#define OP_MWRITE_BUFFER1      0x88    /* sector must be pre-erased */
+#define OP_MWRITE_BUFFER2      0x89    /* sector must be pre-erased */
+
+/* write to buffer, then write-erase to flash */
+#define OP_PROGRAM_VIA_BUF1    0x82
+#define OP_PROGRAM_VIA_BUF2    0x85
+
+/* compare buffer to flash */
+#define OP_COMPARE_BUF1                0x60
+#define OP_COMPARE_BUF2                0x61
+
+/* read flash to buffer, then write-erase to flash */
+#define OP_REWRITE_VIA_BUF1    0x58
+#define OP_REWRITE_VIA_BUF2    0x59
+
+/* newer chips report JEDEC manufacturer and device IDs; chip
+ * serial number and OTP bits; and per-sector writeprotect.
+ */
+#define OP_READ_ID             0x9F
+#define OP_READ_SECURITY       0x77
+#define OP_WRITE_SECURITY      0x9A    /* OTP bits */
+
+
+struct dataflash {
+       u8                      command[4];
+       char                    name[24];
+
+       unsigned                partitioned:1;
+
+       unsigned short          page_offset;    /* offset in flash address */
+       unsigned int            page_size;      /* of bytes per page */
+
+       struct semaphore        lock;
+       struct spi_device       *spi;
+
+       struct mtd_info         mtd;
+};
+
+#ifdef CONFIG_MTD_PARTITIONS
+#define        mtd_has_partitions()    (1)
+#else
+#define        mtd_has_partitions()    (0)
+#endif
+
+/* ......................................................................... */
+
+/*
+ * Return the status of the DataFlash device.
+ */
+static inline int dataflash_status(struct spi_device *spi)
+{
+       /* NOTE:  at45db321c over 25 MHz wants to write
+        * a dummy byte after the opcode...
+        */
+       return spi_w8r8(spi, OP_READ_STATUS);
+}
+
+/*
+ * Poll the DataFlash device until it is READY.
+ * This usually takes 5-20 msec or so; more for sector erase.
+ */
+static int dataflash_waitready(struct spi_device *spi)
+{
+       int     status;
+
+       for (;;) {
+               status = dataflash_status(spi);
+               if (status < 0) {
+                       DEBUG(MTD_DEBUG_LEVEL1, "%s: status %d?\n",
+                                       spi->dev.bus_id, status);
+                       status = 0;
+               }
+
+               if (status & (1 << 7))  /* RDY/nBSY */
+                       return status;
+
+               msleep(3);
+       }
+}
+
+/* ......................................................................... */
+
+/*
+ * Erase pages of flash.
+ */
+static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+       struct dataflash        *priv = (struct dataflash *)mtd->priv;
+       struct spi_device       *spi = priv->spi;
+       struct spi_transfer     x = { .tx_dma = 0, };
+       struct spi_message      msg;
+       unsigned                blocksize = priv->page_size << 3;
+       u8                      *command;
+
+       DEBUG(MTD_DEBUG_LEVEL2, "%s: erase addr=0x%x len 0x%x\n",
+                       spi->dev.bus_id,
+                       instr->addr, instr->len);
+
+       /* Sanity checks */
+       if ((instr->addr + instr->len) > mtd->size
+                       || (instr->len % priv->page_size) != 0
+                       || (instr->addr % priv->page_size) != 0)
+               return -EINVAL;
+
+       spi_message_init(&msg);
+
+       x.tx_buf = command = priv->command;
+       x.len = 4;
+       spi_message_add_tail(&x, &msg);
+
+       down(&priv->lock);
+       while (instr->len > 0) {
+               unsigned int    pageaddr;
+               int             status;
+               int             do_block;
+
+               /* Calculate flash page address; use block erase (for speed) if
+                * we're at a block boundary and need to erase the whole block.
+                */
+               pageaddr = instr->addr / priv->page_size;
+               do_block = (pageaddr & 0x7) == 0 && instr->len <= blocksize;
+               pageaddr = pageaddr << priv->page_offset;
+
+               command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE;
+               command[1] = (u8)(pageaddr >> 16);
+               command[2] = (u8)(pageaddr >> 8);
+               command[3] = 0;
+
+               DEBUG(MTD_DEBUG_LEVEL3, "ERASE %s: (%x) %x %x %x [%i]\n",
+                       do_block ? "block" : "page",
+                       command[0], command[1], command[2], command[3],
+                       pageaddr);
+
+               status = spi_sync(spi, &msg);
+               (void) dataflash_waitready(spi);
+
+               if (status < 0) {
+                       printk(KERN_ERR "%s: erase %x, err %d\n",
+                               spi->dev.bus_id, pageaddr, status);
+                       /* REVISIT:  can retry instr->retries times; or
+                        * giveup and instr->fail_addr = instr->addr;
+                        */
+                       continue;
+               }
+
+               if (do_block) {
+                       instr->addr += blocksize;
+                       instr->len -= blocksize;
+               } else {
+                       instr->addr += priv->page_size;
+                       instr->len -= priv->page_size;
+               }
+       }
+       up(&priv->lock);
+
+       /* Inform MTD subsystem that erase is complete */
+       instr->state = MTD_ERASE_DONE;
+       mtd_erase_callback(instr);
+
+       return 0;
+}
+
+/*
+ * Read from the DataFlash device.
+ *   from   : Start offset in flash device
+ *   len    : Amount to read
+ *   retlen : About of data actually read
+ *   buf    : Buffer containing the data
+ */
+static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len,
+                              size_t *retlen, u_char *buf)
+{
+       struct dataflash        *priv = (struct dataflash *)mtd->priv;
+       struct spi_transfer     x[2] = { { .tx_dma = 0, }, };
+       struct spi_message      msg;
+       unsigned int            addr;
+       u8                      *command;
+       int                     status;
+
+       DEBUG(MTD_DEBUG_LEVEL2, "%s: read 0x%x..0x%x\n",
+               priv->spi->dev.bus_id, (unsigned)from, (unsigned)(from + len));
+
+       *retlen = 0;
+
+       /* Sanity checks */
+       if (!len)
+               return 0;
+       if (from + len > mtd->size)
+               return -EINVAL;
+
+       /* Calculate flash page/byte address */
+       addr = (((unsigned)from / priv->page_size) << priv->page_offset)
+               + ((unsigned)from % priv->page_size);
+
+       command = priv->command;
+
+       DEBUG(MTD_DEBUG_LEVEL3, "READ: (%x) %x %x %x\n",
+               command[0], command[1], command[2], command[3]);
+
+       spi_message_init(&msg);
+
+       x[0].tx_buf = command;
+       x[0].len = 8;
+       spi_message_add_tail(&x[0], &msg);
+
+       x[1].rx_buf = buf;
+       x[1].len = len;
+       spi_message_add_tail(&x[1], &msg);
+
+       down(&priv->lock);
+
+       /* Continuous read, max clock = f(car) which may be less than
+        * the peak rate available.  Some chips support commands with
+        * fewer "don't care" bytes.  Both buffers stay unchanged.
+        */
+       command[0] = OP_READ_CONTINUOUS;
+       command[1] = (u8)(addr >> 16);
+       command[2] = (u8)(addr >> 8);
+       command[3] = (u8)(addr >> 0);
+       /* plus 4 "don't care" bytes */
+
+       status = spi_sync(priv->spi, &msg);
+       up(&priv->lock);
+
+       if (status >= 0) {
+               *retlen = msg.actual_length - 8;
+               status = 0;
+       } else
+               DEBUG(MTD_DEBUG_LEVEL1, "%s: read %x..%x --> %d\n",
+                       priv->spi->dev.bus_id,
+                       (unsigned)from, (unsigned)(from + len),
+                       status);
+       return status;
+}
+
+/*
+ * Write to the DataFlash device.
+ *   to     : Start offset in flash device
+ *   len    : Amount to write
+ *   retlen : Amount of data actually written
+ *   buf    : Buffer containing the data
+ */
+static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len,
+                               size_t * retlen, const u_char * buf)
+{
+       struct dataflash        *priv = (struct dataflash *)mtd->priv;
+       struct spi_device       *spi = priv->spi;
+       struct spi_transfer     x[2] = { { .tx_dma = 0, }, };
+       struct spi_message      msg;
+       unsigned int            pageaddr, addr, offset, writelen;
+       size_t                  remaining = len;
+       u_char                  *writebuf = (u_char *) buf;
+       int                     status = -EINVAL;
+       u8                      *command;
+
+       DEBUG(MTD_DEBUG_LEVEL2, "%s: write 0x%x..0x%x\n",
+               spi->dev.bus_id, (unsigned)to, (unsigned)(to + len));
+
+       *retlen = 0;
+
+       /* Sanity checks */
+       if (!len)
+               return 0;
+       if ((to + len) > mtd->size)
+               return -EINVAL;
+
+       spi_message_init(&msg);
+
+       x[0].tx_buf = command = priv->command;
+       x[0].len = 4;
+       spi_message_add_tail(&x[0], &msg);
+
+       pageaddr = ((unsigned)to / priv->page_size);
+       offset = ((unsigned)to % priv->page_size);
+       if (offset + len > priv->page_size)
+               writelen = priv->page_size - offset;
+       else
+               writelen = len;
+
+       down(&priv->lock);
+       while (remaining > 0) {
+               DEBUG(MTD_DEBUG_LEVEL3, "write @ %i:%i len=%i\n",
+                       pageaddr, offset, writelen);
+
+               /* REVISIT:
+                * (a) each page in a sector must be rewritten at least
+                *     once every 10K sibling erase/program operations.
+                * (b) for pages that are already erased, we could
+                *     use WRITE+MWRITE not PROGRAM for ~30% speedup.
+                * (c) WRITE to buffer could be done while waiting for
+                *     a previous MWRITE/MWERASE to complete ...
+                * (d) error handling here seems to be mostly missing.
+                *
+                * Two persistent bits per page, plus a per-sector counter,
+                * could support (a) and (b) ... we might consider using
+                * the second half of sector zero, which is just one block,
+                * to track that state.  (On AT91, that sector should also
+                * support boot-from-DataFlash.)
+                */
+
+               addr = pageaddr << priv->page_offset;
+
+               /* (1) Maybe transfer partial page to Buffer1 */
+               if (writelen != priv->page_size) {
+                       command[0] = OP_TRANSFER_BUF1;
+                       command[1] = (addr & 0x00FF0000) >> 16;
+                       command[2] = (addr & 0x0000FF00) >> 8;
+                       command[3] = 0;
+
+                       DEBUG(MTD_DEBUG_LEVEL3, "TRANSFER: (%x) %x %x %x\n",
+                               command[0], command[1], command[2], command[3]);
+
+                       status = spi_sync(spi, &msg);
+                       if (status < 0)
+                               DEBUG(MTD_DEBUG_LEVEL1, "%s: xfer %u -> %d \n",
+                                       spi->dev.bus_id, addr, status);
+
+                       (void) dataflash_waitready(priv->spi);
+               }
+
+               /* (2) Program full page via Buffer1 */
+               addr += offset;
+               command[0] = OP_PROGRAM_VIA_BUF1;
+               command[1] = (addr & 0x00FF0000) >> 16;
+               command[2] = (addr & 0x0000FF00) >> 8;
+               command[3] = (addr & 0x000000FF);
+
+               DEBUG(MTD_DEBUG_LEVEL3, "PROGRAM: (%x) %x %x %x\n",
+                       command[0], command[1], command[2], command[3]);
+
+               x[1].tx_buf = writebuf;
+               x[1].len = writelen;
+               spi_message_add_tail(x + 1, &msg);
+               status = spi_sync(spi, &msg);
+               spi_transfer_del(x + 1);
+               if (status < 0)
+                       DEBUG(MTD_DEBUG_LEVEL1, "%s: pgm %u/%u -> %d \n",
+                               spi->dev.bus_id, addr, writelen, status);
+
+               (void) dataflash_waitready(priv->spi);
+
+
+#ifdef CONFIG_DATAFLASH_WRITE_VERIFY
+
+               /* (3) Compare to Buffer1 */
+               addr = pageaddr << priv->page_offset;
+               command[0] = OP_COMPARE_BUF1;
+               command[1] = (addr & 0x00FF0000) >> 16;
+               command[2] = (addr & 0x0000FF00) >> 8;
+               command[3] = 0;
+
+               DEBUG(MTD_DEBUG_LEVEL3, "COMPARE: (%x) %x %x %x\n",
+                       command[0], command[1], command[2], command[3]);
+
+               status = spi_sync(spi, &msg);
+               if (status < 0)
+                       DEBUG(MTD_DEBUG_LEVEL1, "%s: compare %u -> %d \n",
+                               spi->dev.bus_id, addr, status);
+
+               status = dataflash_waitready(priv->spi);
+
+               /* Check result of the compare operation */
+               if ((status & (1 << 6)) == 1) {
+                       printk(KERN_ERR "%s: compare page %u, err %d\n",
+                               spi->dev.bus_id, pageaddr, status);
+                       remaining = 0;
+                       status = -EIO;
+                       break;
+               } else
+                       status = 0;
+
+#endif /* CONFIG_DATAFLASH_WRITE_VERIFY */
+
+               remaining = remaining - writelen;
+               pageaddr++;
+               offset = 0;
+               writebuf += writelen;
+               *retlen += writelen;
+
+               if (remaining > priv->page_size)
+                       writelen = priv->page_size;
+               else
+                       writelen = remaining;
+       }
+       up(&priv->lock);
+
+       return status;
+}
+
+/* ......................................................................... */
+
+/*
+ * Register DataFlash device with MTD subsystem.
+ */
+static int __devinit
+add_dataflash(struct spi_device *spi, char *name,
+               int nr_pages, int pagesize, int pageoffset)
+{
+       struct dataflash                *priv;
+       struct mtd_info                 *device;
+       struct flash_platform_data      *pdata = spi->dev.platform_data;
+
+       priv = (struct dataflash *) kzalloc(sizeof *priv, GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       init_MUTEX(&priv->lock);
+       priv->spi = spi;
+       priv->page_size = pagesize;
+       priv->page_offset = pageoffset;
+
+       /* name must be usable with cmdlinepart */
+       sprintf(priv->name, "spi%d.%d-%s",
+                       spi->master->bus_num, spi->chip_select,
+                       name);
+
+       device = &priv->mtd;
+       device->name = (pdata && pdata->name) ? pdata->name : priv->name;
+       device->size = nr_pages * pagesize;
+       device->erasesize = pagesize;
+       device->owner = THIS_MODULE;
+       device->type = MTD_DATAFLASH;
+       device->flags = MTD_CAP_NORFLASH;
+       device->erase = dataflash_erase;
+       device->read = dataflash_read;
+       device->write = dataflash_write;
+       device->priv = priv;
+
+       dev_info(&spi->dev, "%s (%d KBytes)\n", name, device->size/1024);
+       dev_set_drvdata(&spi->dev, priv);
+
+       if (mtd_has_partitions()) {
+               struct mtd_partition    *parts;
+               int                     nr_parts = 0;
+
+#ifdef CONFIG_MTD_CMDLINE_PARTS
+               static const char *part_probes[] = { "cmdlinepart", NULL, };
+
+               nr_parts = parse_mtd_partitions(device, part_probes, &parts, 0);
+#endif
+
+               if (nr_parts <= 0 && pdata && pdata->parts) {
+                       parts = pdata->parts;
+                       nr_parts = pdata->nr_parts;
+               }
+
+               if (nr_parts > 0) {
+                       priv->partitioned = 1;
+                       return add_mtd_partitions(device, parts, nr_parts);
+               }
+       } else if (pdata && pdata->nr_parts)
+               dev_warn(&spi->dev, "ignoring %d default partitions on %s\n",
+                               pdata->nr_parts, device->name);
+
+       return add_mtd_device(device) == 1 ? -ENODEV : 0;
+}
+
+/*
+ * Detect and initialize DataFlash device:
+ *
+ *   Device      Density         ID code          #Pages PageSize  Offset
+ *   AT45DB011B  1Mbit   (128K)  xx0011xx (0x0c)    512    264      9
+ *   AT45DB021B  2Mbit   (256K)  xx0101xx (0x14)   1025    264      9
+ *   AT45DB041B  4Mbit   (512K)  xx0111xx (0x1c)   2048    264      9
+ *   AT45DB081B  8Mbit   (1M)    xx1001xx (0x24)   4096    264      9
+ *   AT45DB0161B 16Mbit  (2M)    xx1011xx (0x2c)   4096    528     10
+ *   AT45DB0321B 32Mbit  (4M)    xx1101xx (0x34)   8192    528     10
+ *   AT45DB0642  64Mbit  (8M)    xx111xxx (0x3c)   8192   1056     11
+ *   AT45DB1282  128Mbit (16M)   xx0100xx (0x10)  16384   1056     11
+ */
+static int __devinit dataflash_probe(struct spi_device *spi)
+{
+       int status;
+
+       status = dataflash_status(spi);
+       if (status <= 0 || status == 0xff) {
+               DEBUG(MTD_DEBUG_LEVEL1, "%s: status error %d\n",
+                               spi->dev.bus_id, status);
+               if (status == 0xff)
+                       status = -ENODEV;
+               return status;
+       }
+
+       /* if there's a device there, assume it's dataflash.
+        * board setup should have set spi->max_speed_max to
+        * match f(car) for continuous reads, mode 0 or 3.
+        */
+       switch (status & 0x3c) {
+       case 0x0c:      /* 0 0 1 1 x x */
+               status = add_dataflash(spi, "AT45DB011B", 512, 264, 9);
+               break;
+       case 0x14:      /* 0 1 0 1 x x */
+               status = add_dataflash(spi, "AT45DB021B", 1025, 264, 9);
+               break;
+       case 0x1c:      /* 0 1 1 1 x x */
+               status = add_dataflash(spi, "AT45DB041x", 2048, 264, 9);
+               break;
+       case 0x24:      /* 1 0 0 1 x x */
+               status = add_dataflash(spi, "AT45DB081B", 4096, 264, 9);
+               break;
+       case 0x2c:      /* 1 0 1 1 x x */
+               status = add_dataflash(spi, "AT45DB161x", 4096, 528, 10);
+               break;
+       case 0x34:      /* 1 1 0 1 x x */
+               status = add_dataflash(spi, "AT45DB321x", 8192, 528, 10);
+               break;
+       case 0x38:      /* 1 1 1 x x x */
+       case 0x3c:
+               status = add_dataflash(spi, "AT45DB642x", 8192, 1056, 11);
+               break;
+       /* obsolete AT45DB1282 not (yet?) supported */
+       default:
+               DEBUG(MTD_DEBUG_LEVEL1, "%s: unsupported device (%x)\n",
+                               spi->dev.bus_id, status & 0x3c);
+               status = -ENODEV;
+       }
+
+       if (status < 0)
+               DEBUG(MTD_DEBUG_LEVEL1, "%s: add_dataflash --> %d\n",
+                               spi->dev.bus_id, status);
+
+       return status;
+}
+
+static int __devexit dataflash_remove(struct spi_device *spi)
+{
+       struct dataflash        *flash = dev_get_drvdata(&spi->dev);
+       int                     status;
+
+       DEBUG(MTD_DEBUG_LEVEL1, "%s: remove\n", spi->dev.bus_id);
+
+       if (mtd_has_partitions() && flash->partitioned)
+               status = del_mtd_partitions(&flash->mtd);
+       else
+               status = del_mtd_device(&flash->mtd);
+       if (status == 0)
+               kfree(flash);
+       return status;
+}
+
+static struct spi_driver dataflash_driver = {
+       .driver = {
+               .name           = "mtd_dataflash",
+               .bus            = &spi_bus_type,
+               .owner          = THIS_MODULE,
+       },
+
+       .probe          = dataflash_probe,
+       .remove         = __devexit_p(dataflash_remove),
+
+       /* FIXME:  investigate suspend and resume... */
+};
+
+static int __init dataflash_init(void)
+{
+       return spi_register_driver(&dataflash_driver);
+}
+module_init(dataflash_init);
+
+static void __exit dataflash_exit(void)
+{
+       spi_unregister_driver(&dataflash_driver);
+}
+module_exit(dataflash_exit);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew Victor, David Brownell");
+MODULE_DESCRIPTION("MTD DataFlash driver");
index 854ddfb90da19139bfcdbbe47810117772c4f8ef..f2a63186ae052fb0efdece2cd03d41c3f5990c11 100644 (file)
@@ -169,9 +169,9 @@ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_
                index = next_index;
        }
 
-       _unlock_tx_hashtbl(bond);
-
        tlb_init_slave(slave);
+
+       _unlock_tx_hashtbl(bond);
 }
 
 /* Must be called before starting the monitor timer */
index f20bb85c1ea57f8ccf4f07ced53236cbcd1aeb5a..3dd78d048c3ee85f38c3b5063d543a0673c59fb5 100644 (file)
@@ -22,8 +22,8 @@
 #include "bond_3ad.h"
 #include "bond_alb.h"
 
-#define DRV_VERSION    "3.0.0"
-#define DRV_RELDATE    "November 8, 2005"
+#define DRV_VERSION    "3.0.1"
+#define DRV_RELDATE    "January 9, 2006"
 #define DRV_NAME       "bonding"
 #define DRV_DESCRIPTION        "Ethernet Channel Bonding Driver"
 
index 22cd045567075d33f26356cac785866212362506..23de22631c644d8d40cb17b40295494666fe181d 100644 (file)
  *     TODO:
  *     o several entry points race with dev->close
  *     o check for tx-no-resources/stop Q races with tx clean/wake Q
+ *
+ *     FIXES:
+ * 2005/12/02 - Michael O'Donnell <Michael.ODonnell at stratus dot com>
+ *     - Stratus87247: protect MDI control register manipulations
  */
 
 #include <linux/config.h>
@@ -578,6 +582,7 @@ struct nic {
        u16 leds;
        u16 eeprom_wc;
        u16 eeprom[256];
+       spinlock_t mdio_lock;
 };
 
 static inline void e100_write_flush(struct nic *nic)
@@ -876,15 +881,35 @@ static u16 mdio_ctrl(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
 {
        u32 data_out = 0;
        unsigned int i;
+       unsigned long flags;
 
+
+       /*
+        * Stratus87247: we shouldn't be writing the MDI control
+        * register until the Ready bit shows True.  Also, since
+        * manipulation of the MDI control registers is a multi-step
+        * procedure it should be done under lock.
+        */
+       spin_lock_irqsave(&nic->mdio_lock, flags);
+       for (i = 100; i; --i) {
+               if (readl(&nic->csr->mdi_ctrl) & mdi_ready)
+                       break;
+               udelay(20);
+       }
+       if (unlikely(!i)) {
+               printk("e100.mdio_ctrl(%s) won't go Ready\n",
+                       nic->netdev->name );
+               spin_unlock_irqrestore(&nic->mdio_lock, flags);
+               return 0;               /* No way to indicate timeout error */
+       }
        writel((reg << 16) | (addr << 21) | dir | data, &nic->csr->mdi_ctrl);
 
-       for(i = 0; i < 100; i++) {
+       for (i = 0; i < 100; i++) {
                udelay(20);
-               if((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
+               if ((data_out = readl(&nic->csr->mdi_ctrl)) & mdi_ready)
                        break;
        }
-
+       spin_unlock_irqrestore(&nic->mdio_lock, flags);
        DPRINTK(HW, DEBUG,
                "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
                dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out);
@@ -2562,6 +2587,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
        /* locks must be initialized before calling hw_reset */
        spin_lock_init(&nic->cb_lock);
        spin_lock_init(&nic->cmd_lock);
+       spin_lock_init(&nic->mdio_lock);
 
        /* Reset the device before pci_set_master() in case device is in some
         * funky state and has an interrupt pending - hint: we don't have the
index 146f9513aea57f89e04cea2b5f201730590dfc1d..0c18dbd67d3b0b95074c6e2b192d65339eaa478f 100644 (file)
@@ -84,6 +84,7 @@
 #include <linux/ip.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
+#include <linux/in.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -398,12 +399,15 @@ static int init_phy(struct net_device *dev)
                priv->einfo->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
                SUPPORTED_1000baseT_Full : 0;
        struct phy_device *phydev;
+       char phy_id[BUS_ID_SIZE];
 
        priv->oldlink = 0;
        priv->oldspeed = 0;
        priv->oldduplex = -1;
 
-       phydev = phy_connect(dev, priv->einfo->bus_id, &adjust_link, 0);
+       snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, priv->einfo->bus_id, priv->einfo->phy_id);
+
+       phydev = phy_connect(dev, phy_id, &adjust_link, 0);
 
        if (IS_ERR(phydev)) {
                printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
index 04a462c2a5b7e8bd47bdb0299841d63840816467..74e52fcbf8064d38186bab71450c57823345cc1c 100644 (file)
@@ -128,6 +128,7 @@ int gfar_mdio_probe(struct device *dev)
        struct gianfar_mdio_data *pdata;
        struct gfar_mii *regs;
        struct mii_bus *new_bus;
+       struct resource *r;
        int err = 0;
 
        if (NULL == dev)
@@ -151,8 +152,10 @@ int gfar_mdio_probe(struct device *dev)
                return -ENODEV;
        }
 
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
        /* Set the PHY base address */
-       regs = (struct gfar_mii *) ioremap(pdata->paddr, 
+       regs = (struct gfar_mii *) ioremap(r->start,
                        sizeof (struct gfar_mii));
 
        if (NULL == regs) {
index 08703d6f934c7926e8ed1c53a9c45b3ec1a5dd47..d8410634bcafc078ad4da1f6979fe1577a842b81 100644 (file)
@@ -150,7 +150,7 @@ static void __init hplance_init(struct net_device *dev, struct dio_dev *d)
         lp->lance.name = (char*)d->name;                /* discards const, shut up gcc */
         lp->lance.base = va;
         lp->lance.init_block = (struct lance_init_block *)(va + HPLANCE_MEMOFF); /* CPU addr */
-        lp->lance.lance_init_block = 0;                 /* LANCE addr of same RAM */
+        lp->lance.lance_init_block = NULL;              /* LANCE addr of same RAM */
         lp->lance.busmaster_regval = LE_C3_BSWP;        /* we're bigendian */
         lp->lance.irq = d->ipl;
         lp->lance.writerap = hplance_writerap;
index 77eadf84cb2cb7895597180ce5f35e77b34d2d0c..f0f04be989d62bf5003838fbea839ee097f88883 100644 (file)
@@ -590,9 +590,9 @@ static void veth_handle_event(struct HvLpEvent *event, struct pt_regs *regs)
 {
        struct veth_lpevent *veth_event = (struct veth_lpevent *)event;
 
-       if (event->xFlags.xFunction == HvLpEvent_Function_Ack)
+       if (hvlpevent_is_ack(event))
                veth_handle_ack(veth_event);
-       else if (event->xFlags.xFunction == HvLpEvent_Function_Int)
+       else
                veth_handle_int(veth_event);
 }
 
index d8c99f038fa0a7ce07400af7ef8352126b9167be..06cb460361a81432160cdaa841bcaad316184350 100644 (file)
@@ -559,55 +559,52 @@ static void mac8390_no_reset(struct net_device *dev)
 /* directly from daynaport.c by Alan Cox */
 static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count)
 {
-       volatile unsigned short *ptr;
-       unsigned short *target=to;
+       volatile unsigned char *ptr;
+       unsigned char *target=to;
        from<<=1;       /* word, skip overhead */
-       ptr=(unsigned short *)(dev->mem_start+from);
+       ptr=(unsigned char *)(dev->mem_start+from);
        /* Leading byte? */
        if (from&2) {
-               *((char *)target)++ = *(((char *)ptr++)-1);
+               *target++ = ptr[-1];
+               ptr += 2;
                count--;
        }
        while(count>=2)
        {
-               *target++=*ptr++;       /* Copy and */
-               ptr++;                  /* skip cruft */
+               *(unsigned short *)target = *(unsigned short volatile *)ptr;
+               ptr += 4;                       /* skip cruft */
+               target += 2;
                count-=2;
        }
        /* Trailing byte? */
        if(count)
-       {
-               /* Big endian */
-               unsigned short v=*ptr;
-               *((char *)target)=v>>8;
-       }
+               *target = *ptr;
 }
 
 static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count)
 {
        volatile unsigned short *ptr;
-       const unsigned short *src=from;
+       const unsigned char *src=from;
        to<<=1; /* word, skip overhead */
        ptr=(unsigned short *)(dev->mem_start+to);
        /* Leading byte? */
        if (to&2) { /* avoid a byte write (stomps on other data) */
-               ptr[-1] = (ptr[-1]&0xFF00)|*((unsigned char *)src)++;
+               ptr[-1] = (ptr[-1]&0xFF00)|*src++;
                ptr++;
                count--;
        }
        while(count>=2)
        {
-               *ptr++=*src++;          /* Copy and */
+               *ptr++=*(unsigned short *)src;          /* Copy and */
                ptr++;                  /* skip cruft */
+               src += 2;
                count-=2;
        }
        /* Trailing byte? */
        if(count)
        {
-               /* Big endian */
-               unsigned short v=*src;
                /* card doesn't like byte writes */
-               *ptr=(*ptr&0x00FF)|(v&0xFF00);
+               *ptr=(*ptr&0x00FF)|(*src << 8);
        }
 }
 
index 02940c0fef680af5d913e7aac2af9d86337c42a2..459443b572cef542a6757c1f02443f6ee42cbe7f 100644 (file)
@@ -81,7 +81,7 @@ int mdiobus_register(struct mii_bus *bus)
 
                        phydev->dev.parent = bus->dev;
                        phydev->dev.bus = &mdio_bus_type;
-                       sprintf(phydev->dev.bus_id, "phy%d:%d", bus->id, i);
+                       snprintf(phydev->dev.bus_id, BUS_ID_SIZE, PHY_ID_FMT, bus->id, i);
 
                        phydev->bus = bus;
 
index b8686e47f8996470c780305e15905e434649dda2..1474b7c5ac0bd5c882c29a6d77fa0708aac664c5 100644 (file)
@@ -42,7 +42,7 @@
  */
 void phy_print_status(struct phy_device *phydev)
 {
-       pr_info("%s: Link is %s", phydev->dev.bus_id,
+       pr_info("PHY: %s - Link is %s", phydev->dev.bus_id,
                        phydev->link ? "Up" : "Down");
        if (phydev->link)
                printk(" - %d/%s", phydev->speed,
index 5c8fcd40ef4d29936fad6828d3fda9d9df58ffcd..01bdb23340584bf6576d9a63b90f5a92645110df 100644 (file)
@@ -389,7 +389,7 @@ static int __init lance_probe( struct net_device *dev)
        dev->stop = &lance_close;
        dev->get_stats = &lance_get_stats;
        dev->set_multicast_list = &set_multicast_list;
-       dev->set_mac_address = 0;
+       dev->set_mac_address = NULL;
 //     KLUDGE -- REMOVE ME
        set_bit(__LINK_STATE_PRESENT, &dev->state);
 
index 1a43163362569f9c08458c079d5c78257dcce08b..9839816668007223ebcb98dd21f7e50747542272 100644 (file)
@@ -1689,9 +1689,9 @@ MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
 MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
 MODULE_LICENSE("GPL");
 
-MODULE_PARM(debug, "i");
-MODULE_PARM(mode, "i");
-MODULE_PARM(cr6set, "i");
+module_param(debug, int, 0644);
+module_param(mode, int, 0);
+module_param(cr6set, int, 0);
 MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
 MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
 
index 82c6b757d30664e0ae7cf106fa36a6dd4411dfc3..c2d5907dc8e054edd6ecab18bdc954cfbfd1554b 100644 (file)
@@ -791,7 +791,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
 #endif
 
        if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) {
-               dev->features |= NETIF_F_HW_CSUM;
+               dev->features |= NETIF_F_IP_CSUM;
        }
 
        ret = register_netdev(dev);
index 24f7967aab67218f8f68fd1cdeae19a47183b3e1..c1a6e69f7905a41356151febb8473a12360fa964 100644 (file)
@@ -243,7 +243,7 @@ config IPW2200_DEBUG
 
 config AIRO
        tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
-       depends on NET_RADIO && ISA_DMA_API && (PCI || BROKEN)
+       depends on NET_RADIO && ISA_DMA_API && CRYPTO && (PCI || BROKEN)
        ---help---
          This is the standard Linux driver to support Cisco/Aironet ISA and
          PCI 802.11 wireless cards.
index e4729ddf29fd652b023601d1f523affd6aa88e53..f0ccfef664459fced62da435aec49d65da8dd11a 100644 (file)
@@ -1407,6 +1407,17 @@ static int atmel_close(struct net_device *dev)
 {
        struct atmel_private *priv = netdev_priv(dev);
 
+       /* Send event to userspace that we are disassociating */
+       if (priv->station_state == STATION_STATE_READY) {
+               union iwreq_data wrqu;
+
+               wrqu.data.length = 0;
+               wrqu.data.flags = 0;
+               wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+               memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+               wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+       }
+
        atmel_enter_state(priv, STATION_STATE_DOWN);
 
        if (priv->bus_type == BUS_TYPE_PCCARD)
@@ -1780,10 +1791,10 @@ static int atmel_set_encode(struct net_device *dev,
                        priv->wep_is_on = 1;
                        priv->exclude_unencrypted = 1;
                        if (priv->wep_key_len[index] > 5) {
-                               priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
+                               priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
                                priv->encryption_level = 2;
                        } else {
-                               priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
+                               priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
                                priv->encryption_level = 1;
                        }
                }
@@ -1853,6 +1864,181 @@ static int atmel_get_encode(struct net_device *dev,
        return 0;
 }
 
+static int atmel_set_encodeext(struct net_device *dev,
+                           struct iw_request_info *info,
+                           union iwreq_data *wrqu,
+                           char *extra)
+{
+       struct atmel_private *priv = netdev_priv(dev);
+       struct iw_point *encoding = &wrqu->encoding;
+       struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+       int idx, key_len;
+
+       /* Determine and validate the key index */
+       idx = encoding->flags & IW_ENCODE_INDEX;
+       if (idx) {
+               if (idx < 1 || idx > WEP_KEYS)
+                       return -EINVAL;
+               idx--;
+       } else
+               idx = priv->default_key;
+
+       if ((encoding->flags & IW_ENCODE_DISABLED) ||
+           ext->alg == IW_ENCODE_ALG_NONE) {
+               priv->wep_is_on = 0;
+               priv->encryption_level = 0;
+               priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
+       }
+
+       if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
+               priv->default_key = idx;
+
+       /* Set the requested key */
+       switch (ext->alg) {
+       case IW_ENCODE_ALG_NONE:
+               break;
+       case IW_ENCODE_ALG_WEP:
+               if (ext->key_len > 5) {
+                       priv->wep_key_len[idx] = 13;
+                       priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
+                       priv->encryption_level = 2;
+               } else if (ext->key_len > 0) {
+                       priv->wep_key_len[idx] = 5;
+                       priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
+                       priv->encryption_level = 1;
+               } else {
+                       return -EINVAL;
+               }
+               priv->wep_is_on = 1;
+               memset(priv->wep_keys[idx], 0, 13);
+               key_len = min ((int)ext->key_len, priv->wep_key_len[idx]);
+               memcpy(priv->wep_keys[idx], ext->key, key_len);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return -EINPROGRESS;
+}
+
+static int atmel_get_encodeext(struct net_device *dev,
+                           struct iw_request_info *info,
+                           union iwreq_data *wrqu,
+                           char *extra)
+{
+       struct atmel_private *priv = netdev_priv(dev);
+       struct iw_point *encoding = &wrqu->encoding;
+       struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
+       int idx, max_key_len;
+
+       max_key_len = encoding->length - sizeof(*ext);
+       if (max_key_len < 0)
+               return -EINVAL;
+
+       idx = encoding->flags & IW_ENCODE_INDEX;
+       if (idx) {
+               if (idx < 1 || idx > WEP_KEYS)
+                       return -EINVAL;
+               idx--;
+       } else
+               idx = priv->default_key;
+
+       encoding->flags = idx + 1;
+       memset(ext, 0, sizeof(*ext));
+       
+       if (!priv->wep_is_on) {
+               ext->alg = IW_ENCODE_ALG_NONE;
+               ext->key_len = 0;
+               encoding->flags |= IW_ENCODE_DISABLED;
+       } else {
+               if (priv->encryption_level > 0)
+                       ext->alg = IW_ENCODE_ALG_WEP;
+               else
+                       return -EINVAL;
+
+               ext->key_len = priv->wep_key_len[idx];
+               memcpy(ext->key, priv->wep_keys[idx], ext->key_len);
+               encoding->flags |= IW_ENCODE_ENABLED;
+       }
+
+       return 0;
+}
+
+static int atmel_set_auth(struct net_device *dev,
+                              struct iw_request_info *info,
+                              union iwreq_data *wrqu, char *extra)
+{
+       struct atmel_private *priv = netdev_priv(dev);
+       struct iw_param *param = &wrqu->param;
+
+       switch (param->flags & IW_AUTH_INDEX) {
+       case IW_AUTH_WPA_VERSION:
+       case IW_AUTH_CIPHER_PAIRWISE:
+       case IW_AUTH_CIPHER_GROUP:
+       case IW_AUTH_KEY_MGMT:
+       case IW_AUTH_RX_UNENCRYPTED_EAPOL:
+       case IW_AUTH_PRIVACY_INVOKED:
+               /*
+                * atmel does not use these parameters
+                */
+               break;
+
+       case IW_AUTH_DROP_UNENCRYPTED:
+               priv->exclude_unencrypted = param->value ? 1 : 0;
+               break;
+
+       case IW_AUTH_80211_AUTH_ALG: {
+                       if (param->value & IW_AUTH_ALG_SHARED_KEY) {
+                               priv->exclude_unencrypted = 1;
+                       } else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
+                               priv->exclude_unencrypted = 0;
+                       } else
+                               return -EINVAL;
+                       break;
+               }
+
+       case IW_AUTH_WPA_ENABLED:
+               /* Silently accept disable of WPA */
+               if (param->value > 0)
+                       return -EOPNOTSUPP;
+               break;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+       return -EINPROGRESS;
+}
+
+static int atmel_get_auth(struct net_device *dev,
+                              struct iw_request_info *info,
+                              union iwreq_data *wrqu, char *extra)
+{
+       struct atmel_private *priv = netdev_priv(dev);
+       struct iw_param *param = &wrqu->param;
+
+       switch (param->flags & IW_AUTH_INDEX) {
+       case IW_AUTH_DROP_UNENCRYPTED:
+               param->value = priv->exclude_unencrypted;
+               break;
+
+       case IW_AUTH_80211_AUTH_ALG:
+               if (priv->exclude_unencrypted == 1)
+                       param->value = IW_AUTH_ALG_SHARED_KEY;
+               else
+                       param->value = IW_AUTH_ALG_OPEN_SYSTEM;
+               break;
+
+       case IW_AUTH_WPA_ENABLED:
+               param->value = 0;
+               break;
+
+       default:
+               return -EOPNOTSUPP;
+       }
+       return 0;
+}
+
+
 static int atmel_get_name(struct net_device *dev,
                          struct iw_request_info *info,
                          char *cwrq,
@@ -2289,13 +2475,15 @@ static int atmel_set_wap(struct net_device *dev,
 {
        struct atmel_private *priv = netdev_priv(dev);
        int i;
-       static const u8 bcast[] = { 255, 255, 255, 255, 255, 255 };
+       static const u8 any[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+       static const u8 off[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
        unsigned long flags;
 
        if (awrq->sa_family != ARPHRD_ETHER)
                return -EINVAL;
 
-       if (memcmp(bcast, awrq->sa_data, 6) == 0) {
+       if (!memcmp(any, awrq->sa_data, 6) ||
+           !memcmp(off, awrq->sa_data, 6)) {
                del_timer_sync(&priv->management_timer);
                spin_lock_irqsave(&priv->irqlock, flags);
                atmel_scan(priv, 1);
@@ -2378,6 +2566,15 @@ static const iw_handler atmel_handler[] =
        (iw_handler) atmel_get_encode,          /* SIOCGIWENCODE */
        (iw_handler) atmel_set_power,           /* SIOCSIWPOWER */
        (iw_handler) atmel_get_power,           /* SIOCGIWPOWER */
+       (iw_handler) NULL,                      /* -- hole -- */
+       (iw_handler) NULL,                      /* -- hole -- */
+       (iw_handler) NULL,                      /* SIOCSIWGENIE */
+       (iw_handler) NULL,                      /* SIOCGIWGENIE */
+       (iw_handler) atmel_set_auth,            /* SIOCSIWAUTH */
+       (iw_handler) atmel_get_auth,            /* SIOCGIWAUTH */
+       (iw_handler) atmel_set_encodeext,       /* SIOCSIWENCODEEXT */
+       (iw_handler) atmel_get_encodeext,       /* SIOCGIWENCODEEXT */
+       (iw_handler) NULL,                      /* SIOCSIWPMKSA */
 };
 
 static const iw_handler atmel_private_handler[] =
@@ -2924,6 +3121,8 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
        u16 ass_id = le16_to_cpu(ass_resp->ass_id);
        u16 rates_len = ass_resp->length > 4 ? 4 : ass_resp->length;
 
+       union iwreq_data wrqu;
+
        if (frame_len < 8 + rates_len)
                return;
 
@@ -2954,6 +3153,14 @@ static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
                priv->station_is_associated = 1;
                priv->station_was_associated = 1;
                atmel_enter_state(priv, STATION_STATE_READY);
+
+               /* Send association event to userspace */
+               wrqu.data.length = 0;
+               wrqu.data.flags = 0;
+               memcpy(wrqu.ap_addr.sa_data, priv->CurrentBSSID, ETH_ALEN);
+               wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+               wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+
                return;
        }
 
@@ -3632,6 +3839,7 @@ static int reset_atmel_card(struct net_device *dev)
 
        struct atmel_private *priv = netdev_priv(dev);
        u8 configuration;
+       int old_state = priv->station_state;
 
        /* data to add to the firmware names, in priority order
           this implemenents firmware versioning */
@@ -3792,6 +4000,17 @@ static int reset_atmel_card(struct net_device *dev)
        else
                build_wep_mib(priv);
 
+       if (old_state == STATION_STATE_READY)
+       {
+               union iwreq_data wrqu;
+
+               wrqu.data.length = 0;
+               wrqu.data.flags = 0;
+               wrqu.ap_addr.sa_family = ARPHRD_ETHER;
+               memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN);
+               wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
+       }
+
        return 1;
 }
 
index 7146b69b812cff19ad6ff2f8bd80acea4a971a91..0aa14c92b5700eca800e937de7ed17e2024e83b2 100644 (file)
@@ -380,8 +380,6 @@ int __pci_register_driver(struct pci_driver *drv, struct module *owner)
        /* initialize common driver fields */
        drv->driver.name = drv->name;
        drv->driver.bus = &pci_bus_type;
-       drv->driver.probe = pci_device_probe;
-       drv->driver.remove = pci_device_remove;
        /* FIXME, once all of the existing PCI drivers have been fixed to set
         * the pci shutdown function, this test can go away. */
        if (!drv->driver.shutdown)
@@ -513,6 +511,8 @@ struct bus_type pci_bus_type = {
        .name           = "pci",
        .match          = pci_bus_match,
        .uevent         = pci_uevent,
+       .probe          = pci_device_probe,
+       .remove         = pci_device_remove,
        .suspend        = pci_device_suspend,
        .resume         = pci_device_resume,
        .dev_attrs      = pci_dev_attrs,
index 0252582b91cd7f6ef2d050dbf7c05d4440d3c762..0a424a4e8187f8c3a80509014acbbb86399af7d0 100644 (file)
@@ -311,8 +311,6 @@ int pcmcia_register_driver(struct pcmcia_driver *driver)
        /* initialize common fields */
        driver->drv.bus = &pcmcia_bus_type;
        driver->drv.owner = driver->owner;
-       driver->drv.probe = pcmcia_device_probe;
-       driver->drv.remove = pcmcia_device_remove;
 
        return driver_register(&driver->drv);
 }
@@ -920,6 +918,37 @@ pcmcia_device_stringattr(prod_id2, prod_id[1]);
 pcmcia_device_stringattr(prod_id3, prod_id[2]);
 pcmcia_device_stringattr(prod_id4, prod_id[3]);
 
+
+static ssize_t pcmcia_show_pm_state(struct device *dev, struct device_attribute *attr, char *buf)
+{
+       struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
+
+       if (p_dev->dev.power.power_state.event != PM_EVENT_ON)
+               return sprintf(buf, "off\n");
+       else
+               return sprintf(buf, "on\n");
+}
+
+static ssize_t pcmcia_store_pm_state(struct device *dev, struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
+       int ret = 0;
+
+        if (!count)
+                return -EINVAL;
+
+       if ((p_dev->dev.power.power_state.event == PM_EVENT_ON) &&
+           (!strncmp(buf, "off", 3)))
+               ret = dpm_runtime_suspend(dev, PMSG_SUSPEND);
+       else if ((p_dev->dev.power.power_state.event != PM_EVENT_ON) &&
+                (!strncmp(buf, "on", 2)))
+               dpm_runtime_resume(dev);
+
+       return ret ? ret : count;
+}
+
+
 static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
@@ -945,8 +974,9 @@ static ssize_t pcmcia_store_allow_func_id_match(struct device *dev,
                struct device_attribute *attr, const char *buf, size_t count)
 {
        struct pcmcia_device *p_dev = to_pcmcia_dev(dev);
-        if (!count)
-                return -EINVAL;
+
+       if (!count)
+               return -EINVAL;
 
        down(&p_dev->socket->skt_sem);
        p_dev->allow_func_id_match = 1;
@@ -959,6 +989,7 @@ static ssize_t pcmcia_store_allow_func_id_match(struct device *dev,
 
 static struct device_attribute pcmcia_dev_attrs[] = {
        __ATTR(function, 0444, func_show, NULL),
+       __ATTR(pm_state, 0644, pcmcia_show_pm_state, pcmcia_store_pm_state),
        __ATTR_RO(func_id),
        __ATTR_RO(manf_id),
        __ATTR_RO(card_id),
@@ -1167,6 +1198,8 @@ struct bus_type pcmcia_bus_type = {
        .uevent = pcmcia_bus_uevent,
        .match = pcmcia_bus_match,
        .dev_attrs = pcmcia_dev_attrs,
+       .probe = pcmcia_device_probe,
+       .remove = pcmcia_device_remove,
        .suspend = pcmcia_dev_suspend,
        .resume = pcmcia_dev_resume,
 };
index 5d957dfe23d942f37c6851a2fdc963e9eff7f64e..fda06941e73062cdf9a6629dc9452153cf69f6b3 100644 (file)
@@ -171,27 +171,22 @@ static int __init mst_pcmcia_init(void)
 {
        int ret;
 
-       mst_pcmcia_device = kzalloc(sizeof(*mst_pcmcia_device), GFP_KERNEL);
+       mst_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
        if (!mst_pcmcia_device)
                return -ENOMEM;
-       mst_pcmcia_device->name = "pxa2xx-pcmcia";
+
        mst_pcmcia_device->dev.platform_data = &mst_pcmcia_ops;
 
-       ret = platform_device_register(mst_pcmcia_device);
+       ret = platform_device_add(mst_pcmcia_device);
+
        if (ret)
-               kfree(mst_pcmcia_device);
+               platform_device_put(mst_pcmcia_device);
 
        return ret;
 }
 
 static void __exit mst_pcmcia_exit(void)
 {
-       /*
-        * This call is supposed to free our mst_pcmcia_device.
-        * Unfortunately platform_device don't have a free method, and
-        * we can't assume it's free of any reference at this point so we
-        * can't free it either.
-        */
        platform_device_unregister(mst_pcmcia_device);
 }
 
index 12a7244a5ec86e4a92bfca58f422c7d4c7c69511..fd3647368955f1f68aa8dad0f99ed25994b8ed15 100644 (file)
@@ -263,30 +263,25 @@ static int __init sharpsl_pcmcia_init(void)
 {
        int ret;
 
-       sharpsl_pcmcia_ops.nr=platform_scoop_config->num_devs;
-       sharpsl_pcmcia_device = kzalloc(sizeof(*sharpsl_pcmcia_device), GFP_KERNEL);
+       sharpsl_pcmcia_ops.nr = platform_scoop_config->num_devs;
+       sharpsl_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
+
        if (!sharpsl_pcmcia_device)
                return -ENOMEM;
 
-       sharpsl_pcmcia_device->name = "pxa2xx-pcmcia";
        sharpsl_pcmcia_device->dev.platform_data = &sharpsl_pcmcia_ops;
-       sharpsl_pcmcia_device->dev.parent=platform_scoop_config->devs[0].dev;
+       sharpsl_pcmcia_device->dev.parent = platform_scoop_config->devs[0].dev;
+
+       ret = platform_device_add(sharpsl_pcmcia_device);
 
-       ret = platform_device_register(sharpsl_pcmcia_device);
        if (ret)
-               kfree(sharpsl_pcmcia_device);
+               platform_device_put(sharpsl_pcmcia_device);
 
        return ret;
 }
 
 static void __exit sharpsl_pcmcia_exit(void)
 {
-       /*
-        * This call is supposed to free our sharpsl_pcmcia_device.
-        * Unfortunately platform_device don't have a free method, and
-        * we can't assume it's free of any reference at this point so we
-        * can't free it either.
-        */
        platform_device_unregister(sharpsl_pcmcia_device);
 }
 
index 7a7744662d54d5128eadeece889f400e82824c2f..5ab1cdef7c48cf97229c0fa7ed18708e49444cf4 100644 (file)
@@ -98,6 +98,30 @@ static ssize_t pccard_store_insert(struct class_device *dev, const char *buf, si
 }
 static CLASS_DEVICE_ATTR(card_insert, 0200, NULL, pccard_store_insert);
 
+
+static ssize_t pccard_show_card_pm_state(struct class_device *dev, char *buf)
+{
+       struct pcmcia_socket *s = to_socket(dev);
+       return sprintf(buf, "%s\n", s->state & SOCKET_SUSPEND ? "off" : "on");
+}
+
+static ssize_t pccard_store_card_pm_state(struct class_device *dev, const char *buf, size_t count)
+{
+       ssize_t ret = -EINVAL;
+       struct pcmcia_socket *s = to_socket(dev);
+
+       if (!count)
+               return -EINVAL;
+
+       if (!(s->state & SOCKET_SUSPEND) && !strncmp(buf, "off", 3))
+               ret = pcmcia_suspend_card(s);
+       else if ((s->state & SOCKET_SUSPEND) && !strncmp(buf, "on", 2))
+               ret = pcmcia_resume_card(s);
+
+       return ret ? -ENODEV : count;
+}
+static CLASS_DEVICE_ATTR(card_pm_state, 0644, pccard_show_card_pm_state, pccard_store_card_pm_state);
+
 static ssize_t pccard_store_eject(struct class_device *dev, const char *buf, size_t count)
 {
        ssize_t ret;
@@ -320,6 +344,7 @@ static struct class_device_attribute *pccard_socket_attributes[] = {
        &class_device_attr_card_vpp,
        &class_device_attr_card_vcc,
        &class_device_attr_card_insert,
+       &class_device_attr_card_pm_state,
        &class_device_attr_card_eject,
        &class_device_attr_card_irq_mask,
        &class_device_attr_available_resources_setup_done,
index 15fb758a9e526e0428fe88fe1ceb8e03d067906f..7cafacdd12b0a4dae9fea941b3b8efc0076fea85 100644 (file)
@@ -195,6 +195,8 @@ static int pnp_bus_resume(struct device *dev)
 struct bus_type pnp_bus_type = {
        .name   = "pnp",
        .match  = pnp_bus_match,
+       .probe  = pnp_device_probe,
+       .remove = pnp_device_remove,
        .suspend = pnp_bus_suspend,
        .resume = pnp_bus_resume,
 };
@@ -215,8 +217,6 @@ int pnp_register_driver(struct pnp_driver *drv)
 
        drv->driver.name = drv->name;
        drv->driver.bus = &pnp_bus_type;
-       drv->driver.probe = pnp_device_probe;
-       drv->driver.remove = pnp_device_remove;
 
        count = driver_register(&drv->driver);
 
index dc749609699aa178e5e1321942d44930378d2285..5480119ff9d36d94af3dce527dbaacac4d5811d7 100644 (file)
@@ -147,8 +147,6 @@ int rio_register_driver(struct rio_driver *rdrv)
        /* initialize common driver fields */
        rdrv->driver.name = rdrv->name;
        rdrv->driver.bus = &rio_bus_type;
-       rdrv->driver.probe = rio_device_probe;
-       rdrv->driver.remove = rio_device_remove;
 
        /* register with core */
        return driver_register(&rdrv->driver);
@@ -204,7 +202,9 @@ static struct device rio_bus = {
 struct bus_type rio_bus_type = {
        .name = "rapidio",
        .match = rio_match_bus,
-       .dev_attrs = rio_dev_attrs
+       .dev_attrs = rio_dev_attrs,
+       .probe = rio_device_probe,
+       .remove = rio_device_remove,
 };
 
 /**
index e849289d4f3c708a2029794c9f01f9349213e561..503a568e47c3655847d6b467e1568915c21f36f5 100644 (file)
@@ -52,11 +52,7 @@ ccwgroup_uevent (struct device *dev, char **envp, int num_envp, char *buffer,
        return 0;
 }
 
-static struct bus_type ccwgroup_bus_type = {
-       .name    = "ccwgroup",
-       .match   = ccwgroup_bus_match,
-       .uevent = ccwgroup_uevent,
-};
+static struct bus_type ccwgroup_bus_type;
 
 static inline void
 __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
@@ -389,6 +385,14 @@ ccwgroup_remove (struct device *dev)
        return 0;
 }
 
+static struct bus_type ccwgroup_bus_type = {
+       .name   = "ccwgroup",
+       .match  = ccwgroup_bus_match,
+       .uevent = ccwgroup_uevent,
+       .probe  = ccwgroup_probe,
+       .remove = ccwgroup_remove,
+};
+
 int
 ccwgroup_driver_register (struct ccwgroup_driver *cdriver)
 {
@@ -396,8 +400,6 @@ ccwgroup_driver_register (struct ccwgroup_driver *cdriver)
        cdriver->driver = (struct device_driver) {
                .bus = &ccwgroup_bus_type,
                .name = cdriver->name,
-               .probe = ccwgroup_probe,
-               .remove = ccwgroup_remove,
        };
 
        return driver_register(&cdriver->driver);
index e565193650c767038b8046a9309e38b5734bc86f..2d319fb812ca935db1ef84f520c964f52bfd6e5b 100644 (file)
@@ -542,9 +542,41 @@ css_bus_match (struct device *dev, struct device_driver *drv)
        return 0;
 }
 
+static int
+css_probe (struct device *dev)
+{
+       struct subchannel *sch;
+
+       sch = to_subchannel(dev);
+       sch->driver = container_of (dev->driver, struct css_driver, drv);
+       return (sch->driver->probe ? sch->driver->probe(sch) : 0);
+}
+
+static int
+css_remove (struct device *dev)
+{
+       struct subchannel *sch;
+
+       sch = to_subchannel(dev);
+       return (sch->driver->remove ? sch->driver->remove(sch) : 0);
+}
+
+static void
+css_shutdown (struct device *dev)
+{
+       struct subchannel *sch;
+
+       sch = to_subchannel(dev);
+       if (sch->driver->shutdown)
+               sch->driver->shutdown(sch);
+}
+
 struct bus_type css_bus_type = {
-       .name  = "css",
-       .match = &css_bus_match,
+       .name     = "css",
+       .match    = css_bus_match,
+       .probe    = css_probe,
+       .remove   = css_remove,
+       .shutdown = css_shutdown,
 };
 
 subsys_initcall(init_channel_subsystem);
index 251ebd7a7d3a260ce07ad9c77cd13cc28bd57cbc..aa5ab5d4547c29a4460f9c205851c20f38250108 100644 (file)
@@ -115,6 +115,7 @@ struct ccw_device_private {
  * Currently, we only care about I/O subchannels (type 0), these
  * have a ccw_device connected to them.
  */
+struct subchannel;
 struct css_driver {
        unsigned int subchannel_type;
        struct device_driver drv;
@@ -122,6 +123,9 @@ struct css_driver {
        int (*notify)(struct device *, int);
        void (*verify)(struct device *);
        void (*termination)(struct device *);
+       int (*probe)(struct subchannel *);
+       int (*remove)(struct subchannel *);
+       void (*shutdown)(struct subchannel *);
 };
 
 /*
index fa3e4c0a25364d2427800a7a986c621d92a39866..eb73605a05275b00973572823f4251def0832c3a 100644 (file)
@@ -107,33 +107,29 @@ ccw_uevent (struct device *dev, char **envp, int num_envp,
        return 0;
 }
 
-struct bus_type ccw_bus_type = {
-       .name  = "ccw",
-       .match = &ccw_bus_match,
-       .uevent = &ccw_uevent,
-};
+struct bus_type ccw_bus_type;
 
-static int io_subchannel_probe (struct device *);
-static int io_subchannel_remove (struct device *);
+static int io_subchannel_probe (struct subchannel *);
+static int io_subchannel_remove (struct subchannel *);
 void io_subchannel_irq (struct device *);
 static int io_subchannel_notify(struct device *, int);
 static void io_subchannel_verify(struct device *);
 static void io_subchannel_ioterm(struct device *);
-static void io_subchannel_shutdown(struct device *);
+static void io_subchannel_shutdown(struct subchannel *);
 
 struct css_driver io_subchannel_driver = {
        .subchannel_type = SUBCHANNEL_TYPE_IO,
        .drv = {
                .name = "io_subchannel",
                .bus  = &css_bus_type,
-               .probe = &io_subchannel_probe,
-               .remove = &io_subchannel_remove,
-               .shutdown = &io_subchannel_shutdown,
        },
        .irq = io_subchannel_irq,
        .notify = io_subchannel_notify,
        .verify = io_subchannel_verify,
        .termination = io_subchannel_ioterm,
+       .probe = io_subchannel_probe,
+       .remove = io_subchannel_remove,
+       .shutdown = io_subchannel_shutdown,
 };
 
 struct workqueue_struct *ccw_device_work;
@@ -803,14 +799,12 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
 }
 
 static int
-io_subchannel_probe (struct device *pdev)
+io_subchannel_probe (struct subchannel *sch)
 {
-       struct subchannel *sch;
        struct ccw_device *cdev;
        int rc;
        unsigned long flags;
 
-       sch = to_subchannel(pdev);
        if (sch->dev.driver_data) {
                /*
                 * This subchannel already has an associated ccw_device.
@@ -846,7 +840,7 @@ io_subchannel_probe (struct device *pdev)
        memset(cdev->private, 0, sizeof(struct ccw_device_private));
        atomic_set(&cdev->private->onoff, 0);
        cdev->dev = (struct device) {
-               .parent = pdev,
+               .parent = &sch->dev,
                .release = ccw_device_release,
        };
        INIT_LIST_HEAD(&cdev->private->kick_work.entry);
@@ -859,7 +853,7 @@ io_subchannel_probe (struct device *pdev)
                return -ENODEV;
        }
 
-       rc = io_subchannel_recog(cdev, to_subchannel(pdev));
+       rc = io_subchannel_recog(cdev, sch);
        if (rc) {
                spin_lock_irqsave(&sch->lock, flags);
                sch->dev.driver_data = NULL;
@@ -883,17 +877,17 @@ ccw_device_unregister(void *data)
 }
 
 static int
-io_subchannel_remove (struct device *dev)
+io_subchannel_remove (struct subchannel *sch)
 {
        struct ccw_device *cdev;
        unsigned long flags;
 
-       if (!dev->driver_data)
+       if (!sch->dev.driver_data)
                return 0;
-       cdev = dev->driver_data;
+       cdev = sch->dev.driver_data;
        /* Set ccw device to not operational and drop reference. */
        spin_lock_irqsave(cdev->ccwlock, flags);
-       dev->driver_data = NULL;
+       sch->dev.driver_data = NULL;
        cdev->private->state = DEV_STATE_NOT_OPER;
        spin_unlock_irqrestore(cdev->ccwlock, flags);
        /*
@@ -948,14 +942,12 @@ io_subchannel_ioterm(struct device *dev)
 }
 
 static void
-io_subchannel_shutdown(struct device *dev)
+io_subchannel_shutdown(struct subchannel *sch)
 {
-       struct subchannel *sch;
        struct ccw_device *cdev;
        int ret;
 
-       sch = to_subchannel(dev);
-       cdev = dev->driver_data;
+       cdev = sch->dev.driver_data;
 
        if (cio_is_console(sch->schid))
                return;
@@ -1129,6 +1121,14 @@ ccw_device_remove (struct device *dev)
        return 0;
 }
 
+struct bus_type ccw_bus_type = {
+       .name   = "ccw",
+       .match  = ccw_bus_match,
+       .uevent = ccw_uevent,
+       .probe  = ccw_device_probe,
+       .remove = ccw_device_remove,
+};
+
 int
 ccw_driver_register (struct ccw_driver *cdriver)
 {
@@ -1136,8 +1136,6 @@ ccw_driver_register (struct ccw_driver *cdriver)
 
        drv->bus = &ccw_bus_type;
        drv->name = cdriver->name;
-       drv->probe = ccw_device_probe;
-       drv->remove = ccw_device_remove;
 
        return driver_register(drv);
 }
index 92e6c5639dd3b7ef0793091def7ee6ddcc827619..015db40ad8a46079607b00eb9b13ed4564707aca 100644 (file)
@@ -92,7 +92,6 @@ static struct Aurora_port aurora_port[AURORA_TNPORTS] =  {
 
 /* no longer used. static struct Aurora_board * IRQ_to_board[16] = { NULL, } ;*/
 static unsigned char * tmp_buf = NULL;
-static DECLARE_MUTEX(tmp_buf_sem);
 
 DECLARE_TASK_QUEUE(tq_aurora);
 
index b9d2bb88787a30568fde6f65f8bf77c92c2d0f38..320e765fa0cd97b873e62b6c805d07c52aab67d1 100644 (file)
@@ -45,7 +45,7 @@ obj-$(CONFIG_CYBERSTORMII_SCSI)       += NCR53C9x.o   cyberstormII.o
 obj-$(CONFIG_BLZ2060_SCSI)     += NCR53C9x.o   blz2060.o
 obj-$(CONFIG_BLZ1230_SCSI)     += NCR53C9x.o   blz1230.o
 obj-$(CONFIG_FASTLANE_SCSI)    += NCR53C9x.o   fastlane.o
-obj-$(CONFIG_OKTAGON_SCSI)     += NCR53C9x.o   oktagon_esp.o   oktagon_io.o
+obj-$(CONFIG_OKTAGON_SCSI)     += NCR53C9x.o   oktagon_esp_mod.o
 obj-$(CONFIG_ATARI_SCSI)       += atari_scsi.o
 obj-$(CONFIG_MAC_SCSI)         += mac_scsi.o
 obj-$(CONFIG_SCSI_MAC_ESP)     += mac_esp.o    NCR53C9x.o
@@ -164,6 +164,7 @@ CFLAGS_ncr53c8xx.o  := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
 zalon7xx-objs  := zalon.o ncr53c8xx.o
 NCR_Q720_mod-objs      := NCR_Q720.o ncr53c8xx.o
 libata-objs    := libata-core.o libata-scsi.o
+oktagon_esp_mod-objs   := oktagon_esp.o oktagon_io.o
 
 # Files generated that shall be removed upon make clean
 clean-files := 53c7xx_d.h 53c700_d.h   \
index 640590bd014a10c5c1f98c3974bc42be58e447e4..c7dd0154d012b10bdeb5fc4d51a855955e0b61ac 100644 (file)
@@ -1799,6 +1799,7 @@ static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
                 */
                int oldphase, i = 0; /* or where we left off last time ?? esp->current_data ?? */
                int fifocnt = 0;
+               unsigned char *p = phys_to_virt((unsigned long)SCptr->SCp.ptr);
 
                oldphase = esp_read(eregs->esp_status) & ESP_STAT_PMASK;
 
@@ -1860,7 +1861,7 @@ static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
 
                                /* read fifo */
                                for(j=0;j<fifocnt;j++)
-                                       SCptr->SCp.ptr[i++] = esp_read(eregs->esp_fdata);
+                                       p[i++] = esp_read(eregs->esp_fdata);
 
                                ESPDATA(("(%d) ", i));
 
@@ -1882,7 +1883,7 @@ static int esp_do_data(struct NCR_ESP *esp, struct ESP_regs *eregs)
 
                                /* fill fifo */
                                for(j=0;j<this_count;j++)
-                                       esp_write(eregs->esp_fdata, SCptr->SCp.ptr[i++]);
+                                       esp_write(eregs->esp_fdata, p[i++]);
 
                                /* how many left if this goes out ?? */
                                hmuch -= this_count;
index 763e409a1ff37b96684cc9ecd2d6d6a2d17befc6..3867ac2de4c22666790c18c91fba03b3d76d6eb7 100644 (file)
@@ -224,7 +224,7 @@ static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
 static void dma_dump_state(struct NCR_ESP *esp)
 {
        ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
-               custom.intreqr, custom.intenar));
+               amiga_custom.intreqr, amiga_custom.intenar));
 }
 
 void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -298,7 +298,7 @@ static int dma_irq_p(struct NCR_ESP *esp)
 
 static int dma_ports_p(struct NCR_ESP *esp)
 {
-       return ((custom.intenar) & IF_PORTS);
+       return ((amiga_custom.intenar) & IF_PORTS);
 }
 
 static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
index d72d05fffdfa45747c4abe68d5a3e757ea08a5fb..4ebe69e3275696bf8ae25fd4878f05089c13ec57 100644 (file)
@@ -190,7 +190,7 @@ static int dma_can_transfer(struct NCR_ESP *esp, Scsi_Cmnd *sp)
 static void dma_dump_state(struct NCR_ESP *esp)
 {
        ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
-               custom.intreqr, custom.intenar));
+               amiga_custom.intreqr, amiga_custom.intenar));
 }
 
 static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -251,7 +251,7 @@ static void dma_led_on(struct NCR_ESP *esp)
 
 static int dma_ports_p(struct NCR_ESP *esp)
 {
-       return ((custom.intenar) & IF_PORTS);
+       return ((amiga_custom.intenar) & IF_PORTS);
 }
 
 static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
index f9b940e56430c432760edc06e53613417f349bc5..a4a4fac5c0a11454473f53bf26056e40ef43c8dd 100644 (file)
@@ -223,7 +223,7 @@ static void dma_dump_state(struct NCR_ESP *esp)
                esp->esp_id, ((struct cyber_dma_registers *)
                              (esp->dregs))->cond_reg));
        ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
-               custom.intreqr, custom.intenar));
+               amiga_custom.intreqr, amiga_custom.intenar));
 }
 
 static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -322,7 +322,7 @@ static void dma_led_on(struct NCR_ESP *esp)
 
 static int dma_ports_p(struct NCR_ESP *esp)
 {
-       return ((custom.intenar) & IF_PORTS);
+       return ((amiga_custom.intenar) & IF_PORTS);
 }
 
 static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
index a3caabfd7557ff08e5234b0eea90536970a09b37..3a803d73bc5fa46591c792d59de0edeea7b15de7 100644 (file)
@@ -200,7 +200,7 @@ static void dma_dump_state(struct NCR_ESP *esp)
                esp->esp_id, ((struct cyberII_dma_registers *)
                              (esp->dregs))->cond_reg));
        ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
-               custom.intreqr, custom.intenar));
+               amiga_custom.intreqr, amiga_custom.intenar));
 }
 
 static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -259,7 +259,7 @@ static void dma_led_on(struct NCR_ESP *esp)
 
 static int dma_ports_p(struct NCR_ESP *esp)
 {
-       return ((custom.intenar) & IF_PORTS);
+       return ((amiga_custom.intenar) & IF_PORTS);
 }
 
 static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
index ccee68b52f7e8dc53c20cb53c6c87db9e06cbc13..8ae9c406a83b4d1727756af596feec9408f8a11f 100644 (file)
@@ -268,7 +268,7 @@ static void dma_dump_state(struct NCR_ESP *esp)
                esp->esp_id, ((struct fastlane_dma_registers *)
                              (esp->dregs))->cond_reg));
        ESPLOG(("intreq:<%04x>, intena:<%04x>\n",
-               custom.intreqr, custom.intenar));
+               amiga_custom.intreqr, amiga_custom.intenar));
 }
 
 static void dma_init_read(struct NCR_ESP *esp, __u32 addr, int length)
@@ -368,7 +368,7 @@ static void dma_led_on(struct NCR_ESP *esp)
 
 static int dma_ports_p(struct NCR_ESP *esp)
 {
-       return ((custom.intenar) & IF_PORTS);
+       return ((amiga_custom.intenar) & IF_PORTS);
 }
 
 static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
index 5d9c9ada814f16c653e062530d717168283734d2..dee426f8c07b566bd8fdbdf148483b40e4f09e06 100644 (file)
@@ -490,7 +490,7 @@ static void dma_led_on(struct NCR_ESP *esp)
 
 static int dma_ports_p(struct NCR_ESP *esp)
 {
-       return ((custom.intenar) & IF_PORTS);
+       return ((amiga_custom.intenar) & IF_PORTS);
 }
 
 static void dma_setup(struct NCR_ESP *esp, __u32 addr, int count, int write)
index 3ded9daaf4a01c21732f7e7d4b3fb67d5e19fda4..0e529f8171c4123c98253e5035ede1b79d9cb1cf 100644 (file)
@@ -221,8 +221,6 @@ static struct bus_type pseudo_lld_bus;
 static struct device_driver sdebug_driverfs_driver = {
        .name           = sdebug_proc_name,
        .bus            = &pseudo_lld_bus,
-       .probe          = sdebug_driver_probe,
-       .remove         = sdebug_driver_remove,
 };
 
 static const int check_condition_result =
@@ -1796,6 +1794,8 @@ static int pseudo_lld_bus_match(struct device *dev,
 static struct bus_type pseudo_lld_bus = {
         .name = "pseudo",
         .match = pseudo_lld_bus_match,
+       .probe = sdebug_driver_probe,
+       .remove = sdebug_driver_remove,
 };
 
 static void sdebug_release_adapter(struct device * dev)
index fd63add6a577aba8ab03c1389e0ef7497ffc789f..fb53eeaee617b62fc453058879b795ea729859ac 100644 (file)
@@ -465,7 +465,7 @@ wd33c93_execute(struct Scsi_Host *instance)
         */
 
        cmd = (struct scsi_cmnd *) hostdata->input_Q;
-       prev = 0;
+       prev = NULL;
        while (cmd) {
                if (!(hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)))
                        break;
@@ -1569,7 +1569,7 @@ wd33c93_abort(struct scsi_cmnd * cmd)
  */
 
        tmp = (struct scsi_cmnd *) hostdata->input_Q;
-       prev = 0;
+       prev = NULL;
        while (tmp) {
                if (tmp == cmd) {
                        if (prev)
index 4dd5c3f98167a143b2e29ea17b991540e618843d..8cbf0fc5a225ce0ba54a8a9eeb85ce65e15bfd12 100644 (file)
@@ -143,7 +143,6 @@ static int m68328_console_cbaud   = DEFAULT_CBAUD;
  * memory if large numbers of serial ports are open.
  */
 static unsigned char tmp_buf[SERIAL_XMIT_SIZE]; /* This is cheating */
-DECLARE_MUTEX(tmp_buf_sem);
 
 static inline int serial_paranoia_check(struct m68k_serial *info,
                                        char *name, const char *routine)
index 54e5cc0dd5f8c323134531f6671c559754d10b5d..fb610c3634a4b7f49f63a9bfebd0541dad7c19d9 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/serial.h>
 #include <linux/serial_8250.h>
 #include <linux/nmi.h>
+#include <linux/mutex.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -2467,7 +2468,7 @@ static struct platform_device *serial8250_isa_devs;
  * 16x50 serial ports to be configured at run-time, to support PCMCIA
  * modems and PCI multiport cards.
  */
-static DECLARE_MUTEX(serial_sem);
+static DEFINE_MUTEX(serial_mutex);
 
 static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *port)
 {
@@ -2522,7 +2523,7 @@ int serial8250_register_port(struct uart_port *port)
        if (port->uartclk == 0)
                return -EINVAL;
 
-       down(&serial_sem);
+       mutex_lock(&serial_mutex);
 
        uart = serial8250_find_match_or_unused(port);
        if (uart) {
@@ -2544,7 +2545,7 @@ int serial8250_register_port(struct uart_port *port)
                if (ret == 0)
                        ret = uart->port.line;
        }
-       up(&serial_sem);
+       mutex_unlock(&serial_mutex);
 
        return ret;
 }
@@ -2561,7 +2562,7 @@ void serial8250_unregister_port(int line)
 {
        struct uart_8250_port *uart = &serial8250_ports[line];
 
-       down(&serial_sem);
+       mutex_lock(&serial_mutex);
        uart_remove_one_port(&serial8250_reg, &uart->port);
        if (serial8250_isa_devs) {
                uart->port.flags &= ~UPF_BOOT_AUTOCONF;
@@ -2571,7 +2572,7 @@ void serial8250_unregister_port(int line)
        } else {
                uart->port.dev = NULL;
        }
-       up(&serial_sem);
+       mutex_unlock(&serial_mutex);
 }
 EXPORT_SYMBOL(serial8250_unregister_port);
 
index 698cb76819d95d1a3851f6fc70613de38421931d..843717275d497d7445f26f75f4aa1b0a68ebf40d 100644 (file)
@@ -280,6 +280,40 @@ config SERIAL_AMBA_PL011_CONSOLE
          your boot loader (lilo or loadlin) about how to pass options to the
          kernel at boot time.)
 
+config SERIAL_AT91
+       bool "AT91RM9200 serial port support"
+       depends on ARM && ARCH_AT91RM9200
+       select SERIAL_CORE
+       help
+         This enables the driver for the on-chip UARTs of the AT91RM9200
+         processor.
+
+config SERIAL_AT91_CONSOLE
+       bool "Support for console on AT91RM9200 serial port"
+       depends on SERIAL_AT91=y
+       select SERIAL_CORE_CONSOLE
+       help
+         Say Y here if you wish to use a UART on the AT91RM9200 as the system
+         console (the system console is the device which receives all kernel
+         messages and warnings and which allows logins in single user mode).
+
+config SERIAL_AT91_TTYAT
+       bool "Install as device ttyAT0-4 instead of ttyS0-4"
+       depends on SERIAL_AT91=y
+       help
+         Say Y here if you wish to have the five internal AT91RM9200 UARTs
+         appear as /dev/ttyAT0-4 (major 240, minor 0-4) instead of the
+         normal /dev/ttyS0-4 (major 4, minor 64-68). This is necessary if
+         you also want other UARTs, such as external 8250/16C550 compatible
+         UARTs.
+         The ttySn nodes are legally reserved for the 8250 serial driver
+         but are often misused by other serial drivers.
+
+         To use this, you should create suitable ttyATn device nodes in
+         /dev/, and pass "console=ttyATn" to the kernel.
+
+         Say Y if you have an external 8250/16C550 UART.  If unsure, say N.
+
 config SERIAL_CLPS711X
        tristate "CLPS711X serial port support"
        depends on ARM && ARCH_CLPS711X
index 137148bba4faa03c19fceaf7df281544e84a45f1..24a583e482bbf149a6b1aa26acca9781bab42cbc 100644 (file)
@@ -56,3 +56,4 @@ obj-$(CONFIG_SERIAL_JSM) += jsm/
 obj-$(CONFIG_SERIAL_TXX9) += serial_txx9.o
 obj-$(CONFIG_SERIAL_VR41XX) += vr41xx_siu.o
 obj-$(CONFIG_SERIAL_SGI_IOC4) += ioc4_serial.o
+obj-$(CONFIG_SERIAL_AT91) += at91_serial.o
diff --git a/drivers/serial/at91_serial.c b/drivers/serial/at91_serial.c
new file mode 100644 (file)
index 0000000..0e20606
--- /dev/null
@@ -0,0 +1,894 @@
+/*
+ *  linux/drivers/char/at91_serial.c
+ *
+ *  Driver for Atmel AT91RM9200 Serial ports
+ *
+ *  Copyright (C) 2003 Rick Bronson
+ *
+ *  Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd.
+ *  Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/tty_flip.h>
+
+#include <asm/io.h>
+
+#include <asm/arch/at91rm9200_usart.h>
+#include <asm/mach/serial_at91rm9200.h>
+#include <asm/arch/board.h>
+#include <asm/arch/pio.h>
+
+
+#if defined(CONFIG_SERIAL_AT91_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
+#define SUPPORT_SYSRQ
+#endif
+
+#include <linux/serial_core.h>
+
+#ifdef CONFIG_SERIAL_AT91_TTYAT
+
+/* Use device name ttyAT, major 204 and minor 154-169.  This is necessary if we
+ * should coexist with the 8250 driver, such as if we have an external 16C550
+ * UART. */
+#define SERIAL_AT91_MAJOR      204
+#define MINOR_START            154
+#define AT91_DEVICENAME                "ttyAT"
+
+#else
+
+/* Use device name ttyS, major 4, minor 64-68.  This is the usual serial port
+ * name, but it is legally reserved for the 8250 driver. */
+#define SERIAL_AT91_MAJOR      TTY_MAJOR
+#define MINOR_START            64
+#define AT91_DEVICENAME                "ttyS"
+
+#endif
+
+#define AT91_VA_BASE_DBGU      ((unsigned long) AT91_VA_BASE_SYS + AT91_DBGU)
+#define AT91_ISR_PASS_LIMIT    256
+
+#define UART_PUT_CR(port,v)    writel(v, (port)->membase + AT91_US_CR)
+#define UART_GET_MR(port)      readl((port)->membase + AT91_US_MR)
+#define UART_PUT_MR(port,v)    writel(v, (port)->membase + AT91_US_MR)
+#define UART_PUT_IER(port,v)   writel(v, (port)->membase + AT91_US_IER)
+#define UART_PUT_IDR(port,v)   writel(v, (port)->membase + AT91_US_IDR)
+#define UART_GET_IMR(port)     readl((port)->membase + AT91_US_IMR)
+#define UART_GET_CSR(port)     readl((port)->membase + AT91_US_CSR)
+#define UART_GET_CHAR(port)    readl((port)->membase + AT91_US_RHR)
+#define UART_PUT_CHAR(port,v)  writel(v, (port)->membase + AT91_US_THR)
+#define UART_GET_BRGR(port)    readl((port)->membase + AT91_US_BRGR)
+#define UART_PUT_BRGR(port,v)  writel(v, (port)->membase + AT91_US_BRGR)
+#define UART_PUT_RTOR(port,v)  writel(v, (port)->membase + AT91_US_RTOR)
+
+// #define UART_GET_CR(port)   readl((port)->membase + AT91_US_CR)             // is write-only
+
+ /* PDC registers */
+#define UART_PUT_PTCR(port,v)  writel(v, (port)->membase + AT91_PDC_PTCR)
+#define UART_PUT_RPR(port,v)   writel(v, (port)->membase + AT91_PDC_RPR)
+#define UART_PUT_RCR(port,v)   writel(v, (port)->membase + AT91_PDC_RCR)
+#define UART_GET_RCR(port)     readl((port)->membase + AT91_PDC_RCR)
+#define UART_PUT_RNPR(port,v)  writel(v, (port)->membase + AT91_PDC_RNPR)
+#define UART_PUT_RNCR(port,v)  writel(v, (port)->membase + AT91_PDC_RNCR)
+
+
+static int (*at91_open)(struct uart_port *);
+static void (*at91_close)(struct uart_port *);
+
+#ifdef SUPPORT_SYSRQ
+static struct console at91_console;
+#endif
+
+/*
+ * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty.
+ */
+static u_int at91_tx_empty(struct uart_port *port)
+{
+       return (UART_GET_CSR(port) & AT91_US_TXEMPTY) ? TIOCSER_TEMT : 0;
+}
+
+/*
+ * Set state of the modem control output lines
+ */
+static void at91_set_mctrl(struct uart_port *port, u_int mctrl)
+{
+       unsigned int control = 0;
+
+       /*
+        * Errata #39: RTS0 is not internally connected to PA21.  We need to drive
+        *  the pin manually.
+        */
+       if (port->mapbase == AT91_VA_BASE_US0) {
+               if (mctrl & TIOCM_RTS)
+                       at91_sys_write(AT91_PIOA + PIO_CODR, AT91_PA21_RTS0);
+               else
+                       at91_sys_write(AT91_PIOA + PIO_SODR, AT91_PA21_RTS0);
+       }
+
+       if (mctrl & TIOCM_RTS)
+               control |= AT91_US_RTSEN;
+       else
+               control |= AT91_US_RTSDIS;
+
+       if (mctrl & TIOCM_DTR)
+               control |= AT91_US_DTREN;
+       else
+               control |= AT91_US_DTRDIS;
+
+       UART_PUT_CR(port,control);
+}
+
+/*
+ * Get state of the modem control input lines
+ */
+static u_int at91_get_mctrl(struct uart_port *port)
+{
+       unsigned int status, ret = 0;
+
+       status = UART_GET_CSR(port);
+
+       /*
+        * The control signals are active low.
+        */
+       if (!(status & AT91_US_DCD))
+               ret |= TIOCM_CD;
+       if (!(status & AT91_US_CTS))
+               ret |= TIOCM_CTS;
+       if (!(status & AT91_US_DSR))
+               ret |= TIOCM_DSR;
+       if (!(status & AT91_US_RI))
+               ret |= TIOCM_RI;
+
+       return ret;
+}
+
+/*
+ * Stop transmitting.
+ */
+static void at91_stop_tx(struct uart_port *port)
+{
+       UART_PUT_IDR(port, AT91_US_TXRDY);
+       port->read_status_mask &= ~AT91_US_TXRDY;
+}
+
+/*
+ * Start transmitting.
+ */
+static void at91_start_tx(struct uart_port *port)
+{
+       port->read_status_mask |= AT91_US_TXRDY;
+       UART_PUT_IER(port, AT91_US_TXRDY);
+}
+
+/*
+ * Stop receiving - port is in process of being closed.
+ */
+static void at91_stop_rx(struct uart_port *port)
+{
+       UART_PUT_IDR(port, AT91_US_RXRDY);
+}
+
+/*
+ * Enable modem status interrupts
+ */
+static void at91_enable_ms(struct uart_port *port)
+{
+       port->read_status_mask |= (AT91_US_RIIC | AT91_US_DSRIC | AT91_US_DCDIC | AT91_US_CTSIC);
+       UART_PUT_IER(port, AT91_US_RIIC | AT91_US_DSRIC | AT91_US_DCDIC | AT91_US_CTSIC);
+}
+
+/*
+ * Control the transmission of a break signal
+ */
+static void at91_break_ctl(struct uart_port *port, int break_state)
+{
+       if (break_state != 0)
+               UART_PUT_CR(port, AT91_US_STTBRK);      /* start break */
+       else
+               UART_PUT_CR(port, AT91_US_STPBRK);      /* stop break */
+}
+
+/*
+ * Characters received (called from interrupt handler)
+ */
+static void at91_rx_chars(struct uart_port *port, struct pt_regs *regs)
+{
+       struct tty_struct *tty = port->info->tty;
+       unsigned int status, ch, flg;
+
+       status = UART_GET_CSR(port) & port->read_status_mask;
+       while (status & (AT91_US_RXRDY)) {
+               ch = UART_GET_CHAR(port);
+
+               if (tty->flip.count >= TTY_FLIPBUF_SIZE)
+                       goto ignore_char;
+               port->icount.rx++;
+
+               flg = TTY_NORMAL;
+
+               /*
+                * note that the error handling code is
+                * out of the main execution path
+                */
+               if (unlikely(status & (AT91_US_PARE | AT91_US_FRAME | AT91_US_OVRE))) {
+                       UART_PUT_CR(port, AT91_US_RSTSTA);      /* clear error */
+                       if (status & (AT91_US_PARE))
+                               port->icount.parity++;
+                       if (status & (AT91_US_FRAME))
+                               port->icount.frame++;
+                       if (status & (AT91_US_OVRE))
+                               port->icount.overrun++;
+
+                       if (status & AT91_US_PARE)
+                               flg = TTY_PARITY;
+                       else if (status & AT91_US_FRAME)
+                               flg = TTY_FRAME;
+                       if (status & AT91_US_OVRE) {
+                               /*
+                                * overrun does *not* affect the character
+                                * we read from the FIFO
+                                */
+                               tty_insert_flip_char(tty, ch, flg);
+                               ch = 0;
+                               flg = TTY_OVERRUN;
+                       }
+#ifdef SUPPORT_SYSRQ
+                       port->sysrq = 0;
+#endif
+               }
+
+               if (uart_handle_sysrq_char(port, ch, regs))
+                       goto ignore_char;
+
+               tty_insert_flip_char(tty, ch, flg);
+
+       ignore_char:
+               status = UART_GET_CSR(port) & port->read_status_mask;
+       }
+
+       tty_flip_buffer_push(tty);
+}
+
+/*
+ * Transmit characters (called from interrupt handler)
+ */
+static void at91_tx_chars(struct uart_port *port)
+{
+       struct circ_buf *xmit = &port->info->xmit;
+
+       if (port->x_char) {
+               UART_PUT_CHAR(port, port->x_char);
+               port->icount.tx++;
+               port->x_char = 0;
+               return;
+       }
+       if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+               at91_stop_tx(port);
+               return;
+       }
+
+       while (UART_GET_CSR(port) & AT91_US_TXRDY) {
+               UART_PUT_CHAR(port, xmit->buf[xmit->tail]);
+               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+               port->icount.tx++;
+               if (uart_circ_empty(xmit))
+                       break;
+       }
+
+       if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+               uart_write_wakeup(port);
+
+       if (uart_circ_empty(xmit))
+               at91_stop_tx(port);
+}
+
+/*
+ * Interrupt handler
+ */
+static irqreturn_t at91_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+       struct uart_port *port = dev_id;
+       unsigned int status, pending, pass_counter = 0;
+
+       status = UART_GET_CSR(port);
+       pending = status & port->read_status_mask;
+       if (pending) {
+               do {
+                       if (pending & AT91_US_RXRDY)
+                               at91_rx_chars(port, regs);
+
+                       /* Clear the relevent break bits */
+                       if (pending & AT91_US_RXBRK) {
+                               UART_PUT_CR(port, AT91_US_RSTSTA);
+                               port->icount.brk++;
+                               uart_handle_break(port);
+                       }
+
+                       // TODO: All reads to CSR will clear these interrupts!
+                       if (pending & AT91_US_RIIC) port->icount.rng++;
+                       if (pending & AT91_US_DSRIC) port->icount.dsr++;
+                       if (pending & AT91_US_DCDIC)
+                               uart_handle_dcd_change(port, !(status & AT91_US_DCD));
+                       if (pending & AT91_US_CTSIC)
+                               uart_handle_cts_change(port, !(status & AT91_US_CTS));
+                       if (pending & (AT91_US_RIIC | AT91_US_DSRIC | AT91_US_DCDIC | AT91_US_CTSIC))
+                               wake_up_interruptible(&port->info->delta_msr_wait);
+
+                       if (pending & AT91_US_TXRDY)
+                               at91_tx_chars(port);
+                       if (pass_counter++ > AT91_ISR_PASS_LIMIT)
+                               break;
+
+                       status = UART_GET_CSR(port);
+                       pending = status & port->read_status_mask;
+               } while (pending);
+       }
+       return IRQ_HANDLED;
+}
+
+/*
+ * Perform initialization and enable port for reception
+ */
+static int at91_startup(struct uart_port *port)
+{
+       int retval;
+
+       /*
+        * Ensure that no interrupts are enabled otherwise when
+        * request_irq() is called we could get stuck trying to
+        * handle an unexpected interrupt
+        */
+       UART_PUT_IDR(port, -1);
+
+       /*
+        * Allocate the IRQ
+        */
+       retval = request_irq(port->irq, at91_interrupt, SA_SHIRQ, "at91_serial", port);
+       if (retval) {
+               printk("at91_serial: at91_startup - Can't get irq\n");
+               return retval;
+       }
+
+       /*
+        * If there is a specific "open" function (to register
+        * control line interrupts)
+        */
+       if (at91_open) {
+               retval = at91_open(port);
+               if (retval) {
+                       free_irq(port->irq, port);
+                       return retval;
+               }
+       }
+
+       port->read_status_mask = AT91_US_RXRDY | AT91_US_TXRDY | AT91_US_OVRE
+                       | AT91_US_FRAME | AT91_US_PARE | AT91_US_RXBRK;
+       /*
+        * Finally, enable the serial port
+        */
+       UART_PUT_CR(port, AT91_US_RSTSTA | AT91_US_RSTRX);
+       UART_PUT_CR(port, AT91_US_TXEN | AT91_US_RXEN);         /* enable xmit & rcvr */
+       UART_PUT_IER(port, AT91_US_RXRDY);                      /* do receive only */
+       return 0;
+}
+
+/*
+ * Disable the port
+ */
+static void at91_shutdown(struct uart_port *port)
+{
+       /*
+        * Disable all interrupts, port and break condition.
+        */
+       UART_PUT_CR(port, AT91_US_RSTSTA);
+       UART_PUT_IDR(port, -1);
+
+       /*
+        * Free the interrupt
+        */
+       free_irq(port->irq, port);
+
+       /*
+        * If there is a specific "close" function (to unregister
+        * control line interrupts)
+        */
+       if (at91_close)
+               at91_close(port);
+}
+
+/*
+ * Power / Clock management.
+ */
+static void at91_serial_pm(struct uart_port *port, unsigned int state, unsigned int oldstate)
+{
+       switch (state) {
+               case 0:
+                       /*
+                        * Enable the peripheral clock for this serial port.
+                        * This is called on uart_open() or a resume event.
+                        */
+                       at91_sys_write(AT91_PMC_PCER, 1 << port->irq);
+                       break;
+               case 3:
+                       /*
+                        * Disable the peripheral clock for this serial port.
+                        * This is called on uart_close() or a suspend event.
+                        */
+                       if (port->irq != AT91_ID_SYS)                   /* is this a shared clock? */
+                               at91_sys_write(AT91_PMC_PCDR, 1 << port->irq);
+                       break;
+               default:
+                       printk(KERN_ERR "at91_serial: unknown pm %d\n", state);
+       }
+}
+
+/*
+ * Change the port parameters
+ */
+static void at91_set_termios(struct uart_port *port, struct termios * termios, struct termios * old)
+{
+       unsigned long flags;
+       unsigned int mode, imr, quot, baud;
+
+       baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16);
+       quot = uart_get_divisor(port, baud);
+
+       /* Get current mode register */
+       mode = UART_GET_MR(port) & ~(AT91_US_CHRL | AT91_US_NBSTOP | AT91_US_PAR);
+
+       /* byte size */
+       switch (termios->c_cflag & CSIZE) {
+       case CS5:
+               mode |= AT91_US_CHRL_5;
+               break;
+       case CS6:
+               mode |= AT91_US_CHRL_6;
+               break;
+       case CS7:
+               mode |= AT91_US_CHRL_7;
+               break;
+       default:
+               mode |= AT91_US_CHRL_8;
+               break;
+       }
+
+       /* stop bits */
+       if (termios->c_cflag & CSTOPB)
+               mode |= AT91_US_NBSTOP_2;
+
+       /* parity */
+       if (termios->c_cflag & PARENB) {
+               if (termios->c_cflag & CMSPAR) {                        /* Mark or Space parity */
+                       if (termios->c_cflag & PARODD)
+                               mode |= AT91_US_PAR_MARK;
+                       else
+                               mode |= AT91_US_PAR_SPACE;
+               }
+               else if (termios->c_cflag & PARODD)
+                       mode |= AT91_US_PAR_ODD;
+               else
+                       mode |= AT91_US_PAR_EVEN;
+       }
+       else
+               mode |= AT91_US_PAR_NONE;
+
+       spin_lock_irqsave(&port->lock, flags);
+
+       port->read_status_mask |= AT91_US_OVRE;
+       if (termios->c_iflag & INPCK)
+               port->read_status_mask |= AT91_US_FRAME | AT91_US_PARE;
+       if (termios->c_iflag & (BRKINT | PARMRK))
+               port->read_status_mask |= AT91_US_RXBRK;
+
+       /*
+        * Characters to ignore
+        */
+       port->ignore_status_mask = 0;
+       if (termios->c_iflag & IGNPAR)
+               port->ignore_status_mask |= (AT91_US_FRAME | AT91_US_PARE);
+       if (termios->c_iflag & IGNBRK) {
+               port->ignore_status_mask |= AT91_US_RXBRK;
+               /*
+                * If we're ignoring parity and break indicators,
+                * ignore overruns too (for real raw support).
+                */
+               if (termios->c_iflag & IGNPAR)
+                       port->ignore_status_mask |= AT91_US_OVRE;
+       }
+
+       // TODO: Ignore all characters if CREAD is set.
+
+       /* update the per-port timeout */
+       uart_update_timeout(port, termios->c_cflag, baud);
+
+       /* disable interrupts and drain transmitter */
+       imr = UART_GET_IMR(port);       /* get interrupt mask */
+       UART_PUT_IDR(port, -1);         /* disable all interrupts */
+       while (!(UART_GET_CSR(port) & AT91_US_TXEMPTY)) { barrier(); }
+
+       /* disable receiver and transmitter */
+       UART_PUT_CR(port, AT91_US_TXDIS | AT91_US_RXDIS);
+
+       /* set the parity, stop bits and data size */
+       UART_PUT_MR(port, mode);
+
+       /* set the baud rate */
+       UART_PUT_BRGR(port, quot);
+       UART_PUT_CR(port, AT91_US_RSTSTA | AT91_US_RSTRX);
+       UART_PUT_CR(port, AT91_US_TXEN | AT91_US_RXEN);
+
+       /* restore interrupts */
+       UART_PUT_IER(port, imr);
+
+       /* CTS flow-control and modem-status interrupts */
+       if (UART_ENABLE_MS(port, termios->c_cflag))
+               port->ops->enable_ms(port);
+
+       spin_unlock_irqrestore(&port->lock, flags);
+}
+
+/*
+ * Return string describing the specified port
+ */
+static const char *at91_type(struct uart_port *port)
+{
+       return (port->type == PORT_AT91RM9200) ? "AT91_SERIAL" : NULL;
+}
+
+/*
+ * Release the memory region(s) being used by 'port'.
+ */
+static void at91_release_port(struct uart_port *port)
+{
+       release_mem_region(port->mapbase,
+               (port->mapbase == AT91_VA_BASE_DBGU) ? 512 : SZ_16K);
+}
+
+/*
+ * Request the memory region(s) being used by 'port'.
+ */
+static int at91_request_port(struct uart_port *port)
+{
+       return request_mem_region(port->mapbase,
+               (port->mapbase == AT91_VA_BASE_DBGU) ? 512 : SZ_16K,
+               "at91_serial") != NULL ? 0 : -EBUSY;
+
+}
+
+/*
+ * Configure/autoconfigure the port.
+ */
+static void at91_config_port(struct uart_port *port, int flags)
+{
+       if (flags & UART_CONFIG_TYPE) {
+               port->type = PORT_AT91RM9200;
+               at91_request_port(port);
+       }
+}
+
+/*
+ * Verify the new serial_struct (for TIOCSSERIAL).
+ */
+static int at91_verify_port(struct uart_port *port, struct serial_struct *ser)
+{
+       int ret = 0;
+       if (ser->type != PORT_UNKNOWN && ser->type != PORT_AT91RM9200)
+               ret = -EINVAL;
+       if (port->irq != ser->irq)
+               ret = -EINVAL;
+       if (ser->io_type != SERIAL_IO_MEM)
+               ret = -EINVAL;
+       if (port->uartclk / 16 != ser->baud_base)
+               ret = -EINVAL;
+       if ((void *)port->mapbase != ser->iomem_base)
+               ret = -EINVAL;
+       if (port->iobase != ser->port)
+               ret = -EINVAL;
+       if (ser->hub6 != 0)
+               ret = -EINVAL;
+       return ret;
+}
+
+static struct uart_ops at91_pops = {
+       .tx_empty       = at91_tx_empty,
+       .set_mctrl      = at91_set_mctrl,
+       .get_mctrl      = at91_get_mctrl,
+       .stop_tx        = at91_stop_tx,
+       .start_tx       = at91_start_tx,
+       .stop_rx        = at91_stop_rx,
+       .enable_ms      = at91_enable_ms,
+       .break_ctl      = at91_break_ctl,
+       .startup        = at91_startup,
+       .shutdown       = at91_shutdown,
+       .set_termios    = at91_set_termios,
+       .type           = at91_type,
+       .release_port   = at91_release_port,
+       .request_port   = at91_request_port,
+       .config_port    = at91_config_port,
+       .verify_port    = at91_verify_port,
+       .pm             = at91_serial_pm,
+};
+
+static struct uart_port at91_ports[AT91_NR_UART];
+
+void __init at91_init_ports(void)
+{
+       static int first = 1;
+       int i;
+
+       if (!first)
+               return;
+       first = 0;
+
+       for (i = 0; i < AT91_NR_UART; i++) {
+               at91_ports[i].iotype    = UPIO_MEM;
+               at91_ports[i].flags     = UPF_BOOT_AUTOCONF;
+               at91_ports[i].uartclk   = at91_master_clock;
+               at91_ports[i].ops       = &at91_pops;
+               at91_ports[i].fifosize  = 1;
+               at91_ports[i].line      = i;
+       }
+}
+
+void __init at91_register_uart_fns(struct at91rm9200_port_fns *fns)
+{
+       if (fns->enable_ms)
+               at91_pops.enable_ms = fns->enable_ms;
+       if (fns->get_mctrl)
+               at91_pops.get_mctrl = fns->get_mctrl;
+       if (fns->set_mctrl)
+               at91_pops.set_mctrl = fns->set_mctrl;
+       at91_open          = fns->open;
+       at91_close         = fns->close;
+       at91_pops.pm       = fns->pm;
+       at91_pops.set_wake = fns->set_wake;
+}
+
+/*
+ * Setup ports.
+ */
+void __init at91_register_uart(int idx, int port)
+{
+       if ((idx < 0) || (idx >= AT91_NR_UART)) {
+               printk(KERN_ERR "%s: bad index number %d\n", __FUNCTION__, idx);
+               return;
+       }
+
+       switch (port) {
+       case 0:
+               at91_ports[idx].membase = (void __iomem *) AT91_VA_BASE_US0;
+               at91_ports[idx].mapbase = AT91_VA_BASE_US0;
+               at91_ports[idx].irq     = AT91_ID_US0;
+               AT91_CfgPIO_USART0();
+               break;
+       case 1:
+               at91_ports[idx].membase = (void __iomem *) AT91_VA_BASE_US1;
+               at91_ports[idx].mapbase = AT91_VA_BASE_US1;
+               at91_ports[idx].irq     = AT91_ID_US1;
+               AT91_CfgPIO_USART1();
+               break;
+       case 2:
+               at91_ports[idx].membase = (void __iomem *) AT91_VA_BASE_US2;
+               at91_ports[idx].mapbase = AT91_VA_BASE_US2;
+               at91_ports[idx].irq     = AT91_ID_US2;
+               AT91_CfgPIO_USART2();
+               break;
+       case 3:
+               at91_ports[idx].membase = (void __iomem *) AT91_VA_BASE_US3;
+               at91_ports[idx].mapbase = AT91_VA_BASE_US3;
+               at91_ports[idx].irq     = AT91_ID_US3;
+               AT91_CfgPIO_USART3();
+               break;
+       case 4:
+               at91_ports[idx].membase = (void __iomem *) AT91_VA_BASE_DBGU;
+               at91_ports[idx].mapbase = AT91_VA_BASE_DBGU;
+               at91_ports[idx].irq     = AT91_ID_SYS;
+               AT91_CfgPIO_DBGU();
+               break;
+       default:
+               printk(KERN_ERR  "%s : bad port number %d\n", __FUNCTION__, port);
+       }
+}
+
+#ifdef CONFIG_SERIAL_AT91_CONSOLE
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void at91_console_write(struct console *co, const char *s, u_int count)
+{
+       struct uart_port *port = at91_ports + co->index;
+       unsigned int status, i, imr;
+
+       /*
+        *      First, save IMR and then disable interrupts
+        */
+       imr = UART_GET_IMR(port);       /* get interrupt mask */
+       UART_PUT_IDR(port, AT91_US_RXRDY | AT91_US_TXRDY);
+
+       /*
+        *      Now, do each character
+        */
+       for (i = 0; i < count; i++) {
+               do {
+                       status = UART_GET_CSR(port);
+               } while (!(status & AT91_US_TXRDY));
+               UART_PUT_CHAR(port, s[i]);
+               if (s[i] == '\n') {
+                       do {
+                               status = UART_GET_CSR(port);
+                       } while (!(status & AT91_US_TXRDY));
+                       UART_PUT_CHAR(port, '\r');
+               }
+       }
+
+       /*
+        *      Finally, wait for transmitter to become empty
+        *      and restore IMR
+        */
+       do {
+               status = UART_GET_CSR(port);
+       } while (!(status & AT91_US_TXRDY));
+       UART_PUT_IER(port, imr);        /* set interrupts back the way they were */
+}
+
+/*
+ * If the port was already initialised (eg, by a boot loader), try to determine
+ * the current setup.
+ */
+static void __init at91_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits)
+{
+       unsigned int mr, quot;
+
+// TODO: CR is a write-only register
+//     unsigned int cr;
+//
+//     cr = UART_GET_CR(port) & (AT91_US_RXEN | AT91_US_TXEN);
+//     if (cr == (AT91_US_RXEN | AT91_US_TXEN)) {
+//             /* ok, the port was enabled */
+//     }
+
+       mr = UART_GET_MR(port) & AT91_US_CHRL;
+       if (mr == AT91_US_CHRL_8)
+               *bits = 8;
+       else
+               *bits = 7;
+
+       mr = UART_GET_MR(port) & AT91_US_PAR;
+       if (mr == AT91_US_PAR_EVEN)
+               *parity = 'e';
+       else if (mr == AT91_US_PAR_ODD)
+               *parity = 'o';
+
+       quot = UART_GET_BRGR(port);
+       *baud = port->uartclk / (16 * (quot));
+}
+
+static int __init at91_console_setup(struct console *co, char *options)
+{
+       struct uart_port *port;
+       int baud = 115200;
+       int bits = 8;
+       int parity = 'n';
+       int flow = 'n';
+
+       /*
+        * Check whether an invalid uart number has been specified, and
+        * if so, search for the first available port that does have
+        * console support.
+        */
+       port = uart_get_console(at91_ports, AT91_NR_UART, co);
+
+       /*
+        * Enable the serial console, in-case bootloader did not do it.
+        */
+       at91_sys_write(AT91_PMC_PCER, 1 << port->irq);  /* enable clock */
+       UART_PUT_IDR(port, -1);                         /* disable interrupts */
+       UART_PUT_CR(port, AT91_US_RSTSTA | AT91_US_RSTRX);
+       UART_PUT_CR(port, AT91_US_TXEN | AT91_US_RXEN);
+
+       if (options)
+               uart_parse_options(options, &baud, &parity, &bits, &flow);
+       else
+               at91_console_get_options(port, &baud, &parity, &bits);
+
+       return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver at91_uart;
+
+static struct console at91_console = {
+       .name           = AT91_DEVICENAME,
+       .write          = at91_console_write,
+       .device         = uart_console_device,
+       .setup          = at91_console_setup,
+       .flags          = CON_PRINTBUFFER,
+       .index          = -1,
+       .data           = &at91_uart,
+};
+
+#define AT91_CONSOLE_DEVICE    &at91_console
+
+static int  __init at91_console_init(void)
+{
+       at91_init_ports();
+
+       at91_console.index = at91_console_port;
+       register_console(&at91_console);
+       return 0;
+}
+console_initcall(at91_console_init);
+
+#else
+#define AT91_CONSOLE_DEVICE    NULL
+#endif
+
+static struct uart_driver at91_uart = {
+       .owner                  = THIS_MODULE,
+       .driver_name            = AT91_DEVICENAME,
+       .dev_name               = AT91_DEVICENAME,
+       .devfs_name             = AT91_DEVICENAME,
+       .major                  = SERIAL_AT91_MAJOR,
+       .minor                  = MINOR_START,
+       .nr                     = AT91_NR_UART,
+       .cons                   = AT91_CONSOLE_DEVICE,
+};
+
+static int __init at91_serial_init(void)
+{
+       int ret, i;
+
+       at91_init_ports();
+
+       ret = uart_register_driver(&at91_uart);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < AT91_NR_UART; i++) {
+               if (at91_serial_map[i] >= 0)
+                       uart_add_one_port(&at91_uart, &at91_ports[i]);
+       }
+
+       return 0;
+}
+
+static void __exit at91_serial_exit(void)
+{
+       int i;
+
+       for (i = 0; i < AT91_NR_UART; i++) {
+               if (at91_serial_map[i] >= 0)
+                       uart_remove_one_port(&at91_uart, &at91_ports[i]);
+       }
+
+       uart_unregister_driver(&at91_uart);
+}
+
+module_init(at91_serial_init);
+module_exit(at91_serial_exit);
+
+MODULE_AUTHOR("Rick Bronson");
+MODULE_DESCRIPTION("AT91 generic serial port driver");
+MODULE_LICENSE("GPL");
index 08c42c000188405f6363b3d49293dbe1e43a104b..be12623d85447348980d59240ae6244ba6305360 100644 (file)
@@ -442,6 +442,7 @@ static char *serial_version = "$Revision: 1.25 $";
 #include <linux/init.h>
 #include <asm/uaccess.h>
 #include <linux/kernel.h>
+#include <linux/mutex.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -1315,11 +1316,7 @@ static const struct control_pins e100_modem_pins[NR_PORTS] =
  * memory if large numbers of serial ports are open.
  */
 static unsigned char *tmp_buf;
-#ifdef DECLARE_MUTEX
-static DECLARE_MUTEX(tmp_buf_sem);
-#else
-static struct semaphore tmp_buf_sem = MUTEX;
-#endif
+static DEFINE_MUTEX(tmp_buf_mutex);
 
 /* Calculate the chartime depending on baudrate, numbor of bits etc. */
 static void update_char_time(struct e100_serial * info)
@@ -3661,7 +3658,7 @@ rs_raw_write(struct tty_struct * tty, int from_user,
         * design.
         */
        if (from_user) {
-               down(&tmp_buf_sem);
+               mutex_lock(&tmp_buf_mutex);
                while (1) {
                        int c1;
                        c = CIRC_SPACE_TO_END(info->xmit.head,
@@ -3692,7 +3689,7 @@ rs_raw_write(struct tty_struct * tty, int from_user,
                        count -= c;
                        ret += c;
                }
-               up(&tmp_buf_sem);
+               mutex_unlock(&tmp_buf_mutex);
        } else {
                cli();
                while (count) {
index f330d6c0e0dfa0ca0878316a8c41337acc524c83..5f52883e64d2ade773b968edd967e395121a58bd 100644 (file)
@@ -60,6 +60,7 @@
 #include <linux/pmu.h>
 #include <linux/bitops.h>
 #include <linux/sysrq.h>
+#include <linux/mutex.h>
 #include <asm/sections.h>
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -96,7 +97,7 @@ MODULE_LICENSE("GPL");
  */
 static struct uart_pmac_port   pmz_ports[MAX_ZS_PORTS];
 static int                     pmz_ports_count;
-static DECLARE_MUTEX(pmz_irq_sem);
+static DEFINE_MUTEX(pmz_irq_mutex);
 
 static struct uart_driver pmz_uart_reg = {
        .owner          =       THIS_MODULE,
@@ -922,7 +923,7 @@ static int pmz_startup(struct uart_port *port)
        if (uap->node == NULL)
                return -ENODEV;
 
-       down(&pmz_irq_sem);
+       mutex_lock(&pmz_irq_mutex);
 
        uap->flags |= PMACZILOG_FLAG_IS_OPEN;
 
@@ -940,11 +941,11 @@ static int pmz_startup(struct uart_port *port)
                dev_err(&uap->dev->ofdev.dev,
                        "Unable to register zs interrupt handler.\n");
                pmz_set_scc_power(uap, 0);
-               up(&pmz_irq_sem);
+               mutex_unlock(&pmz_irq_mutex);
                return -ENXIO;
        }
 
-       up(&pmz_irq_sem);
+       mutex_unlock(&pmz_irq_mutex);
 
        /* Right now, we deal with delay by blocking here, I'll be
         * smarter later on
@@ -981,7 +982,7 @@ static void pmz_shutdown(struct uart_port *port)
        if (uap->node == NULL)
                return;
 
-       down(&pmz_irq_sem);
+       mutex_lock(&pmz_irq_mutex);
 
        /* Release interrupt handler */
                free_irq(uap->port.irq, uap);
@@ -1002,7 +1003,7 @@ static void pmz_shutdown(struct uart_port *port)
 
        if (ZS_IS_CONS(uap) || ZS_IS_ASLEEP(uap)) {
                spin_unlock_irqrestore(&port->lock, flags);
-               up(&pmz_irq_sem);
+               mutex_unlock(&pmz_irq_mutex);
                return;
        }
 
@@ -1019,7 +1020,7 @@ static void pmz_shutdown(struct uart_port *port)
 
        spin_unlock_irqrestore(&port->lock, flags);
 
-       up(&pmz_irq_sem);
+       mutex_unlock(&pmz_irq_mutex);
 
        pmz_debug("pmz: shutdown() done.\n");
 }
@@ -1591,7 +1592,7 @@ static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
 
        state = pmz_uart_reg.state + uap->port.line;
 
-       down(&pmz_irq_sem);
+       mutex_lock(&pmz_irq_mutex);
        down(&state->sem);
 
        spin_lock_irqsave(&uap->port.lock, flags);
@@ -1624,7 +1625,7 @@ static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
        pmz_set_scc_power(uap, 0);
 
        up(&state->sem);
-       up(&pmz_irq_sem);
+       mutex_unlock(&pmz_irq_mutex);
 
        pmz_debug("suspend, switching complete\n");
 
@@ -1651,7 +1652,7 @@ static int pmz_resume(struct macio_dev *mdev)
 
        state = pmz_uart_reg.state + uap->port.line;
 
-       down(&pmz_irq_sem);
+       mutex_lock(&pmz_irq_mutex);
        down(&state->sem);
 
        spin_lock_irqsave(&uap->port.lock, flags);
@@ -1685,7 +1686,7 @@ static int pmz_resume(struct macio_dev *mdev)
 
  bail:
        up(&state->sem);
-       up(&pmz_irq_sem);
+       mutex_unlock(&pmz_irq_mutex);
 
        /* Right now, we deal with delay by blocking here, I'll be
         * smarter later on
index 9589509fc5bd7b48584ececb3933cbed97b78bd2..2ca620900bcc24cf9b6f18850853597ddbbdaab2 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/device.h>
 #include <linux/serial.h> /* for serial_state and serial_icounter_struct */
 #include <linux/delay.h>
+#include <linux/mutex.h>
 
 #include <asm/irq.h>
 #include <asm/uaccess.h>
@@ -47,7 +48,7 @@
 /*
  * This is used to lock changes in serial line configuration.
  */
-static DECLARE_MUTEX(port_sem);
+static DEFINE_MUTEX(port_mutex);
 
 #define HIGH_BITS_OFFSET       ((sizeof(long)-sizeof(int))*8)
 
@@ -1472,7 +1473,7 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line)
 {
        struct uart_state *state;
 
-       down(&port_sem);
+       mutex_lock(&port_mutex);
        state = drv->state + line;
        if (down_interruptible(&state->sem)) {
                state = ERR_PTR(-ERESTARTSYS);
@@ -1509,7 +1510,7 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line)
        }
 
  out:
-       up(&port_sem);
+       mutex_unlock(&port_mutex);
        return state;
 }
 
@@ -2219,7 +2220,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
 
        state = drv->state + port->line;
 
-       down(&port_sem);
+       mutex_lock(&port_mutex);
        if (state->port) {
                ret = -EINVAL;
                goto out;
@@ -2255,7 +2256,7 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *port)
                register_console(port->cons);
 
  out:
-       up(&port_sem);
+       mutex_unlock(&port_mutex);
 
        return ret;
 }
@@ -2279,7 +2280,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
                printk(KERN_ALERT "Removing wrong port: %p != %p\n",
                        state->port, port);
 
-       down(&port_sem);
+       mutex_lock(&port_mutex);
 
        /*
         * Remove the devices from devfs
@@ -2288,7 +2289,7 @@ int uart_remove_one_port(struct uart_driver *drv, struct uart_port *port)
 
        uart_unconfigure_port(drv, state);
        state->port = NULL;
-       up(&port_sem);
+       mutex_unlock(&port_mutex);
 
        return 0;
 }
index 96969cb960a929d9d8a3da42e9c86a1ec0ff2c52..c30333694fdef0dfac3347714acad9af25691861 100644 (file)
@@ -785,6 +785,8 @@ static struct pcmcia_device_id serial_ids[] = {
        PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "3CXEM556.cis"),
        PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "3CXEM556.cis"),
        PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0x0710, "SW_7xx_SER.cis"),  /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */
+       PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "SW_555_SER.cis"),  /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */
+       PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "SW_555_SER.cis"),  /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */
        PCMCIA_DEVICE_CIS_PROD_ID12("MultiTech", "PCMCIA 56K DataFax", 0x842047ee, 0xc2efcf03, "MT5634ZLX.cis"),
        PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "COMpad4.cis"),
        PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "COMpad2.cis"),
index fdd1f1915a427d4104574b8d3deeebf6b8bffa56..ee98a867bc6dabd5604763fb7c0c26518bdb57bf 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
 #include <linux/serial.h>
+#include <linux/mutex.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -1018,7 +1019,7 @@ static void serial_txx9_resume_port(int line)
        uart_resume_port(&serial_txx9_reg, &serial_txx9_ports[line].port);
 }
 
-static DECLARE_MUTEX(serial_txx9_sem);
+static DEFINE_MUTEX(serial_txx9_mutex);
 
 /**
  *     serial_txx9_register_port - register a serial port
@@ -1037,7 +1038,7 @@ static int __devinit serial_txx9_register_port(struct uart_port *port)
        struct uart_txx9_port *uart;
        int ret = -ENOSPC;
 
-       down(&serial_txx9_sem);
+       mutex_lock(&serial_txx9_mutex);
        for (i = 0; i < UART_NR; i++) {
                uart = &serial_txx9_ports[i];
                if (uart->port.type == PORT_UNKNOWN)
@@ -1058,7 +1059,7 @@ static int __devinit serial_txx9_register_port(struct uart_port *port)
                if (ret == 0)
                        ret = uart->port.line;
        }
-       up(&serial_txx9_sem);
+       mutex_unlock(&serial_txx9_mutex);
        return ret;
 }
 
@@ -1073,7 +1074,7 @@ static void __devexit serial_txx9_unregister_port(int line)
 {
        struct uart_txx9_port *uart = &serial_txx9_ports[line];
 
-       down(&serial_txx9_sem);
+       mutex_lock(&serial_txx9_mutex);
        uart_remove_one_port(&serial_txx9_reg, &uart->port);
        uart->port.flags = 0;
        uart->port.type = PORT_UNKNOWN;
@@ -1082,7 +1083,7 @@ static void __devexit serial_txx9_unregister_port(int line)
        uart->port.membase = 0;
        uart->port.dev = NULL;
        uart_add_one_port(&serial_txx9_reg, &uart->port);
-       up(&serial_txx9_sem);
+       mutex_unlock(&serial_txx9_mutex);
 }
 
 /*
index 7bdab2a7f59c61f25f7dd8b6b44c1b10e5ff6b42..94b22903119841369dcae3779c55eaeb36fce066 100644 (file)
@@ -175,8 +175,6 @@ int superhyway_register_driver(struct superhyway_driver *drv)
 {
        drv->drv.name   = drv->name;
        drv->drv.bus    = &superhyway_bus_type;
-       drv->drv.probe  = superhyway_device_probe;
-       drv->drv.remove = superhyway_device_remove;
 
        return driver_register(&drv->drv);
 }
@@ -213,6 +211,8 @@ struct bus_type superhyway_bus_type = {
 #ifdef CONFIG_SYSFS
        .dev_attrs      = superhyway_dev_attrs,
 #endif
+       .probe          = superhyway_device_probe,
+       .remove         = superhyway_device_remove,
 };
 
 static int __init superhyway_bus_init(void)
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
new file mode 100644 (file)
index 0000000..b77dbd6
--- /dev/null
@@ -0,0 +1,109 @@
+#
+# SPI driver configuration
+#
+# NOTE:  the reason this doesn't show SPI slave support is mostly that
+# nobody's needed a slave side API yet.  The master-role API is not
+# fully appropriate there, so it'd need some thought to do well.
+#
+menu "SPI support"
+
+config SPI
+       bool "SPI support"
+       help
+         The "Serial Peripheral Interface" is a low level synchronous
+         protocol.  Chips that support SPI can have data transfer rates
+         up to several tens of Mbit/sec.  Chips are addressed with a
+         controller and a chipselect.  Most SPI slaves don't support
+         dynamic device discovery; some are even write-only or read-only.
+
+         SPI is widely used by microcontollers to talk with sensors,
+         eeprom and flash memory, codecs and various other controller
+         chips, analog to digital (and d-to-a) converters, and more.
+         MMC and SD cards can be accessed using SPI protocol; and for
+         DataFlash cards used in MMC sockets, SPI must always be used.
+
+         SPI is one of a family of similar protocols using a four wire
+         interface (select, clock, data in, data out) including Microwire
+         (half duplex), SSP, SSI, and PSP.  This driver framework should
+         work with most such devices and controllers.
+
+config SPI_DEBUG
+       boolean "Debug support for SPI drivers"
+       depends on SPI && DEBUG_KERNEL
+       help
+         Say "yes" to enable debug messaging (like dev_dbg and pr_debug),
+         sysfs, and debugfs support in SPI controller and protocol drivers.
+
+#
+# MASTER side ... talking to discrete SPI slave chips including microcontrollers
+#
+
+config SPI_MASTER
+#      boolean "SPI Master Support"
+       boolean
+       default SPI
+       help
+         If your system has an master-capable SPI controller (which
+         provides the clock and chipselect), you can enable that
+         controller and the protocol drivers for the SPI slave chips
+         that are connected.
+
+comment "SPI Master Controller Drivers"
+       depends on SPI_MASTER
+
+config SPI_BITBANG
+       tristate "Bitbanging SPI master"
+       depends on SPI_MASTER && EXPERIMENTAL
+       help
+         With a few GPIO pins, your system can bitbang the SPI protocol.
+         Select this to get SPI support through I/O pins (GPIO, parallel
+         port, etc).  Or, some systems' SPI master controller drivers use
+         this code to manage the per-word or per-transfer accesses to the
+         hardware shift registers.
+
+         This is library code, and is automatically selected by drivers that
+         need it.  You only need to select this explicitly to support driver
+         modules that aren't part of this kernel tree.
+
+config SPI_BUTTERFLY
+       tristate "Parallel port adapter for AVR Butterfly (DEVELOPMENT)"
+       depends on SPI_MASTER && PARPORT && EXPERIMENTAL
+       select SPI_BITBANG
+       help
+         This uses a custom parallel port cable to connect to an AVR
+         Butterfly <http://www.atmel.com/products/avr/butterfly>, an
+         inexpensive battery powered microcontroller evaluation board.
+         This same cable can be used to flash new firmware.
+
+config SPI_BUTTERFLY
+       tristate "Parallel port adapter for AVR Butterfly (DEVELOPMENT)"
+       depends on SPI_MASTER && PARPORT && EXPERIMENTAL
+       select SPI_BITBANG
+       help
+         This uses a custom parallel port cable to connect to an AVR
+         Butterfly <http://www.atmel.com/products/avr/butterfly>, an
+         inexpensive battery powered microcontroller evaluation board.
+         This same cable can be used to flash new firmware.
+
+#
+# Add new SPI master controllers in alphabetical order above this line
+#
+
+
+#
+# There are lots of SPI device types, with sensors and memory
+# being probably the most widely used ones.
+#
+comment "SPI Protocol Masters"
+       depends on SPI_MASTER
+
+
+#
+# Add new SPI protocol masters in alphabetical order above this line
+#
+
+
+# (slave support would go here)
+
+endmenu # "SPI support"
+
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
new file mode 100644 (file)
index 0000000..c2c87e8
--- /dev/null
@@ -0,0 +1,25 @@
+#
+# Makefile for kernel SPI drivers.
+#
+
+ifeq ($(CONFIG_SPI_DEBUG),y)
+EXTRA_CFLAGS += -DDEBUG
+endif
+
+# small core, mostly translating board-specific
+# config declarations into driver model code
+obj-$(CONFIG_SPI_MASTER)               += spi.o
+
+# SPI master controller drivers (bus)
+obj-$(CONFIG_SPI_BITBANG)              += spi_bitbang.o
+obj-$(CONFIG_SPI_BUTTERFLY)            += spi_butterfly.o
+#      ... add above this line ...
+
+# SPI protocol drivers (device/link on bus)
+#      ... add above this line ...
+
+# SPI slave controller drivers (upstream link)
+#      ... add above this line ...
+
+# SPI slave drivers (protocol for that link)
+#      ... add above this line ...
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
new file mode 100644 (file)
index 0000000..791c4dc
--- /dev/null
@@ -0,0 +1,642 @@
+/*
+ * spi.c - SPI init/core code
+ *
+ * Copyright (C) 2005 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/autoconf.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/spi/spi.h>
+
+
+/* SPI bustype and spi_master class are registered after board init code
+ * provides the SPI device tables, ensuring that both are present by the
+ * time controller driver registration causes spi_devices to "enumerate".
+ */
+static void spidev_release(struct device *dev)
+{
+       const struct spi_device *spi = to_spi_device(dev);
+
+       /* spi masters may cleanup for released devices */
+       if (spi->master->cleanup)
+               spi->master->cleanup(spi);
+
+       spi_master_put(spi->master);
+       kfree(dev);
+}
+
+static ssize_t
+modalias_show(struct device *dev, struct device_attribute *a, char *buf)
+{
+       const struct spi_device *spi = to_spi_device(dev);
+
+       return snprintf(buf, BUS_ID_SIZE + 1, "%s\n", spi->modalias);
+}
+
+static struct device_attribute spi_dev_attrs[] = {
+       __ATTR_RO(modalias),
+       __ATTR_NULL,
+};
+
+/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
+ * and the sysfs version makes coldplug work too.
+ */
+
+static int spi_match_device(struct device *dev, struct device_driver *drv)
+{
+       const struct spi_device *spi = to_spi_device(dev);
+
+       return strncmp(spi->modalias, drv->name, BUS_ID_SIZE) == 0;
+}
+
+static int spi_uevent(struct device *dev, char **envp, int num_envp,
+               char *buffer, int buffer_size)
+{
+       const struct spi_device         *spi = to_spi_device(dev);
+
+       envp[0] = buffer;
+       snprintf(buffer, buffer_size, "MODALIAS=%s", spi->modalias);
+       envp[1] = NULL;
+       return 0;
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * NOTE:  the suspend() method for an spi_master controller driver
+ * should verify that all its child devices are marked as suspended;
+ * suspend requests delivered through sysfs power/state files don't
+ * enforce such constraints.
+ */
+static int spi_suspend(struct device *dev, pm_message_t message)
+{
+       int                     value;
+       struct spi_driver       *drv = to_spi_driver(dev->driver);
+
+       if (!drv->suspend)
+               return 0;
+
+       /* suspend will stop irqs and dma; no more i/o */
+       value = drv->suspend(to_spi_device(dev), message);
+       if (value == 0)
+               dev->power.power_state = message;
+       return value;
+}
+
+static int spi_resume(struct device *dev)
+{
+       int                     value;
+       struct spi_driver       *drv = to_spi_driver(dev->driver);
+
+       if (!drv->resume)
+               return 0;
+
+       /* resume may restart the i/o queue */
+       value = drv->resume(to_spi_device(dev));
+       if (value == 0)
+               dev->power.power_state = PMSG_ON;
+       return value;
+}
+
+#else
+#define spi_suspend    NULL
+#define spi_resume     NULL
+#endif
+
+struct bus_type spi_bus_type = {
+       .name           = "spi",
+       .dev_attrs      = spi_dev_attrs,
+       .match          = spi_match_device,
+       .uevent         = spi_uevent,
+       .suspend        = spi_suspend,
+       .resume         = spi_resume,
+};
+EXPORT_SYMBOL_GPL(spi_bus_type);
+
+
+static int spi_drv_probe(struct device *dev)
+{
+       const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
+
+       return sdrv->probe(to_spi_device(dev));
+}
+
+static int spi_drv_remove(struct device *dev)
+{
+       const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
+
+       return sdrv->remove(to_spi_device(dev));
+}
+
+static void spi_drv_shutdown(struct device *dev)
+{
+       const struct spi_driver         *sdrv = to_spi_driver(dev->driver);
+
+       sdrv->shutdown(to_spi_device(dev));
+}
+
+int spi_register_driver(struct spi_driver *sdrv)
+{
+       sdrv->driver.bus = &spi_bus_type;
+       if (sdrv->probe)
+               sdrv->driver.probe = spi_drv_probe;
+       if (sdrv->remove)
+               sdrv->driver.remove = spi_drv_remove;
+       if (sdrv->shutdown)
+               sdrv->driver.shutdown = spi_drv_shutdown;
+       return driver_register(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(spi_register_driver);
+
+/*-------------------------------------------------------------------------*/
+
+/* SPI devices should normally not be created by SPI device drivers; that
+ * would make them board-specific.  Similarly with SPI master drivers.
+ * Device registration normally goes into like arch/.../mach.../board-YYY.c
+ * with other readonly (flashable) information about mainboard devices.
+ */
+
+struct boardinfo {
+       struct list_head        list;
+       unsigned                n_board_info;
+       struct spi_board_info   board_info[0];
+};
+
+static LIST_HEAD(board_list);
+static DECLARE_MUTEX(board_lock);
+
+
+/* On typical mainboards, this is purely internal; and it's not needed
+ * after board init creates the hard-wired devices.  Some development
+ * platforms may not be able to use spi_register_board_info though, and
+ * this is exported so that for example a USB or parport based adapter
+ * driver could add devices (which it would learn about out-of-band).
+ */
+struct spi_device *__init_or_module
+spi_new_device(struct spi_master *master, struct spi_board_info *chip)
+{
+       struct spi_device       *proxy;
+       struct device           *dev = master->cdev.dev;
+       int                     status;
+
+       /* NOTE:  caller did any chip->bus_num checks necessary */
+
+       if (!spi_master_get(master))
+               return NULL;
+
+       proxy = kzalloc(sizeof *proxy, GFP_KERNEL);
+       if (!proxy) {
+               dev_err(dev, "can't alloc dev for cs%d\n",
+                       chip->chip_select);
+               goto fail;
+       }
+       proxy->master = master;
+       proxy->chip_select = chip->chip_select;
+       proxy->max_speed_hz = chip->max_speed_hz;
+       proxy->irq = chip->irq;
+       proxy->modalias = chip->modalias;
+
+       snprintf(proxy->dev.bus_id, sizeof proxy->dev.bus_id,
+                       "%s.%u", master->cdev.class_id,
+                       chip->chip_select);
+       proxy->dev.parent = dev;
+       proxy->dev.bus = &spi_bus_type;
+       proxy->dev.platform_data = (void *) chip->platform_data;
+       proxy->controller_data = chip->controller_data;
+       proxy->controller_state = NULL;
+       proxy->dev.release = spidev_release;
+
+       /* drivers may modify this default i/o setup */
+       status = master->setup(proxy);
+       if (status < 0) {
+               dev_dbg(dev, "can't %s %s, status %d\n",
+                               "setup", proxy->dev.bus_id, status);
+               goto fail;
+       }
+
+       /* driver core catches callers that misbehave by defining
+        * devices that already exist.
+        */
+       status = device_register(&proxy->dev);
+       if (status < 0) {
+               dev_dbg(dev, "can't %s %s, status %d\n",
+                               "add", proxy->dev.bus_id, status);
+               goto fail;
+       }
+       dev_dbg(dev, "registered child %s\n", proxy->dev.bus_id);
+       return proxy;
+
+fail:
+       spi_master_put(master);
+       kfree(proxy);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(spi_new_device);
+
+/*
+ * Board-specific early init code calls this (probably during arch_initcall)
+ * with segments of the SPI device table.  Any device nodes are created later,
+ * after the relevant parent SPI controller (bus_num) is defined.  We keep
+ * this table of devices forever, so that reloading a controller driver will
+ * not make Linux forget about these hard-wired devices.
+ *
+ * Other code can also call this, e.g. a particular add-on board might provide
+ * SPI devices through its expansion connector, so code initializing that board
+ * would naturally declare its SPI devices.
+ *
+ * The board info passed can safely be __initdata ... but be careful of
+ * any embedded pointers (platform_data, etc), they're copied as-is.
+ */
+int __init
+spi_register_board_info(struct spi_board_info const *info, unsigned n)
+{
+       struct boardinfo        *bi;
+
+       bi = kmalloc(sizeof(*bi) + n * sizeof *info, GFP_KERNEL);
+       if (!bi)
+               return -ENOMEM;
+       bi->n_board_info = n;
+       memcpy(bi->board_info, info, n * sizeof *info);
+
+       down(&board_lock);
+       list_add_tail(&bi->list, &board_list);
+       up(&board_lock);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(spi_register_board_info);
+
+/* FIXME someone should add support for a __setup("spi", ...) that
+ * creates board info from kernel command lines
+ */
+
+static void __init_or_module
+scan_boardinfo(struct spi_master *master)
+{
+       struct boardinfo        *bi;
+       struct device           *dev = master->cdev.dev;
+
+       down(&board_lock);
+       list_for_each_entry(bi, &board_list, list) {
+               struct spi_board_info   *chip = bi->board_info;
+               unsigned                n;
+
+               for (n = bi->n_board_info; n > 0; n--, chip++) {
+                       if (chip->bus_num != master->bus_num)
+                               continue;
+                       /* some controllers only have one chip, so they
+                        * might not use chipselects.  otherwise, the
+                        * chipselects are numbered 0..max.
+                        */
+                       if (chip->chip_select >= master->num_chipselect
+                                       && master->num_chipselect) {
+                               dev_dbg(dev, "cs%d > max %d\n",
+                                       chip->chip_select,
+                                       master->num_chipselect);
+                               continue;
+                       }
+                       (void) spi_new_device(master, chip);
+               }
+       }
+       up(&board_lock);
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void spi_master_release(struct class_device *cdev)
+{
+       struct spi_master *master;
+
+       master = container_of(cdev, struct spi_master, cdev);
+       kfree(master);
+}
+
+static struct class spi_master_class = {
+       .name           = "spi_master",
+       .owner          = THIS_MODULE,
+       .release        = spi_master_release,
+};
+
+
+/**
+ * spi_alloc_master - allocate SPI master controller
+ * @dev: the controller, possibly using the platform_bus
+ * @size: how much driver-private data to preallocate; the pointer to this
+ *     memory is in the class_data field of the returned class_device,
+ *     accessible with spi_master_get_devdata().
+ *
+ * This call is used only by SPI master controller drivers, which are the
+ * only ones directly touching chip registers.  It's how they allocate
+ * an spi_master structure, prior to calling spi_add_master().
+ *
+ * This must be called from context that can sleep.  It returns the SPI
+ * master structure on success, else NULL.
+ *
+ * The caller is responsible for assigning the bus number and initializing
+ * the master's methods before calling spi_add_master(); and (after errors
+ * adding the device) calling spi_master_put() to prevent a memory leak.
+ */
+struct spi_master * __init_or_module
+spi_alloc_master(struct device *dev, unsigned size)
+{
+       struct spi_master       *master;
+
+       if (!dev)
+               return NULL;
+
+       master = kzalloc(size + sizeof *master, SLAB_KERNEL);
+       if (!master)
+               return NULL;
+
+       class_device_initialize(&master->cdev);
+       master->cdev.class = &spi_master_class;
+       master->cdev.dev = get_device(dev);
+       spi_master_set_devdata(master, &master[1]);
+
+       return master;
+}
+EXPORT_SYMBOL_GPL(spi_alloc_master);
+
+/**
+ * spi_register_master - register SPI master controller
+ * @master: initialized master, originally from spi_alloc_master()
+ *
+ * SPI master controllers connect to their drivers using some non-SPI bus,
+ * such as the platform bus.  The final stage of probe() in that code
+ * includes calling spi_register_master() to hook up to this SPI bus glue.
+ *
+ * SPI controllers use board specific (often SOC specific) bus numbers,
+ * and board-specific addressing for SPI devices combines those numbers
+ * with chip select numbers.  Since SPI does not directly support dynamic
+ * device identification, boards need configuration tables telling which
+ * chip is at which address.
+ *
+ * This must be called from context that can sleep.  It returns zero on
+ * success, else a negative error code (dropping the master's refcount).
+ * After a successful return, the caller is responsible for calling
+ * spi_unregister_master().
+ */
+int __init_or_module
+spi_register_master(struct spi_master *master)
+{
+       static atomic_t         dyn_bus_id = ATOMIC_INIT(0);
+       struct device           *dev = master->cdev.dev;
+       int                     status = -ENODEV;
+       int                     dynamic = 0;
+
+       if (!dev)
+               return -ENODEV;
+
+       /* convention:  dynamically assigned bus IDs count down from the max */
+       if (master->bus_num == 0) {
+               master->bus_num = atomic_dec_return(&dyn_bus_id);
+               dynamic = 1;
+       }
+
+       /* register the device, then userspace will see it.
+        * registration fails if the bus ID is in use.
+        */
+       snprintf(master->cdev.class_id, sizeof master->cdev.class_id,
+               "spi%u", master->bus_num);
+       status = class_device_add(&master->cdev);
+       if (status < 0)
+               goto done;
+       dev_dbg(dev, "registered master %s%s\n", master->cdev.class_id,
+                       dynamic ? " (dynamic)" : "");
+
+       /* populate children from any spi device tables */
+       scan_boardinfo(master);
+       status = 0;
+done:
+       return status;
+}
+EXPORT_SYMBOL_GPL(spi_register_master);
+
+
+static int __unregister(struct device *dev, void *unused)
+{
+       /* note: before about 2.6.14-rc1 this would corrupt memory: */
+       spi_unregister_device(to_spi_device(dev));
+       return 0;
+}
+
+/**
+ * spi_unregister_master - unregister SPI master controller
+ * @master: the master being unregistered
+ *
+ * This call is used only by SPI master controller drivers, which are the
+ * only ones directly touching chip registers.
+ *
+ * This must be called from context that can sleep.
+ */
+void spi_unregister_master(struct spi_master *master)
+{
+       (void) device_for_each_child(master->cdev.dev, NULL, __unregister);
+       class_device_unregister(&master->cdev);
+       master->cdev.dev = NULL;
+}
+EXPORT_SYMBOL_GPL(spi_unregister_master);
+
+/**
+ * spi_busnum_to_master - look up master associated with bus_num
+ * @bus_num: the master's bus number
+ *
+ * This call may be used with devices that are registered after
+ * arch init time.  It returns a refcounted pointer to the relevant
+ * spi_master (which the caller must release), or NULL if there is
+ * no such master registered.
+ */
+struct spi_master *spi_busnum_to_master(u16 bus_num)
+{
+       if (bus_num) {
+               char                    name[8];
+               struct kobject          *bus;
+
+               snprintf(name, sizeof name, "spi%u", bus_num);
+               bus = kset_find_obj(&spi_master_class.subsys.kset, name);
+               if (bus)
+                       return container_of(bus, struct spi_master, cdev.kobj);
+       }
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(spi_busnum_to_master);
+
+
+/*-------------------------------------------------------------------------*/
+
+static void spi_complete(void *arg)
+{
+       complete(arg);
+}
+
+/**
+ * spi_sync - blocking/synchronous SPI data transfers
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers
+ *
+ * This call may only be used from a context that may sleep.  The sleep
+ * is non-interruptible, and has no timeout.  Low-overhead controller
+ * drivers may DMA directly into and out of the message buffers.
+ *
+ * Note that the SPI device's chip select is active during the message,
+ * and then is normally disabled between messages.  Drivers for some
+ * frequently-used devices may want to minimize costs of selecting a chip,
+ * by leaving it selected in anticipation that the next message will go
+ * to the same chip.  (That may increase power usage.)
+ *
+ * Also, the caller is guaranteeing that the memory associated with the
+ * message will not be freed before this call returns.
+ *
+ * The return value is a negative error code if the message could not be
+ * submitted, else zero.  When the value is zero, then message->status is
+ * also defined:  it's the completion code for the transfer, either zero
+ * or a negative error code from the controller driver.
+ */
+int spi_sync(struct spi_device *spi, struct spi_message *message)
+{
+       DECLARE_COMPLETION(done);
+       int status;
+
+       message->complete = spi_complete;
+       message->context = &done;
+       status = spi_async(spi, message);
+       if (status == 0)
+               wait_for_completion(&done);
+       message->context = NULL;
+       return status;
+}
+EXPORT_SYMBOL_GPL(spi_sync);
+
+#define        SPI_BUFSIZ      (SMP_CACHE_BYTES)
+
+static u8      *buf;
+
+/**
+ * spi_write_then_read - SPI synchronous write followed by read
+ * @spi: device with which data will be exchanged
+ * @txbuf: data to be written (need not be dma-safe)
+ * @n_tx: size of txbuf, in bytes
+ * @rxbuf: buffer into which data will be read
+ * @n_rx: size of rxbuf, in bytes (need not be dma-safe)
+ *
+ * This performs a half duplex MicroWire style transaction with the
+ * device, sending txbuf and then reading rxbuf.  The return value
+ * is zero for success, else a negative errno status code.
+ * This call may only be used from a context that may sleep.
+ *
+ * Parameters to this routine are always copied using a small buffer;
+ * performance-sensitive or bulk transfer code should instead use
+ * spi_{async,sync}() calls with dma-safe buffers.
+ */
+int spi_write_then_read(struct spi_device *spi,
+               const u8 *txbuf, unsigned n_tx,
+               u8 *rxbuf, unsigned n_rx)
+{
+       static DECLARE_MUTEX(lock);
+
+       int                     status;
+       struct spi_message      message;
+       struct spi_transfer     x[2];
+       u8                      *local_buf;
+
+       /* Use preallocated DMA-safe buffer.  We can't avoid copying here,
+        * (as a pure convenience thing), but we can keep heap costs
+        * out of the hot path ...
+        */
+       if ((n_tx + n_rx) > SPI_BUFSIZ)
+               return -EINVAL;
+
+       spi_message_init(&message);
+       memset(x, 0, sizeof x);
+       if (n_tx) {
+               x[0].len = n_tx;
+               spi_message_add_tail(&x[0], &message);
+       }
+       if (n_rx) {
+               x[1].len = n_rx;
+               spi_message_add_tail(&x[1], &message);
+       }
+
+       /* ... unless someone else is using the pre-allocated buffer */
+       if (down_trylock(&lock)) {
+               local_buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
+               if (!local_buf)
+                       return -ENOMEM;
+       } else
+               local_buf = buf;
+
+       memcpy(local_buf, txbuf, n_tx);
+       x[0].tx_buf = local_buf;
+       x[1].rx_buf = local_buf + n_tx;
+
+       /* do the i/o */
+       status = spi_sync(spi, &message);
+       if (status == 0) {
+               memcpy(rxbuf, x[1].rx_buf, n_rx);
+               status = message.status;
+       }
+
+       if (x[0].tx_buf == buf)
+               up(&lock);
+       else
+               kfree(local_buf);
+
+       return status;
+}
+EXPORT_SYMBOL_GPL(spi_write_then_read);
+
+/*-------------------------------------------------------------------------*/
+
+static int __init spi_init(void)
+{
+       int     status;
+
+       buf = kmalloc(SPI_BUFSIZ, SLAB_KERNEL);
+       if (!buf) {
+               status = -ENOMEM;
+               goto err0;
+       }
+
+       status = bus_register(&spi_bus_type);
+       if (status < 0)
+               goto err1;
+
+       status = class_register(&spi_master_class);
+       if (status < 0)
+               goto err2;
+       return 0;
+
+err2:
+       bus_unregister(&spi_bus_type);
+err1:
+       kfree(buf);
+       buf = NULL;
+err0:
+       return status;
+}
+
+/* board_info is normally registered in arch_initcall(),
+ * but even essential drivers wait till later
+ *
+ * REVISIT only boardinfo really needs static linking. the rest (device and
+ * driver registration) _could_ be dynamically linked (modular) ... costs
+ * include needing to have boardinfo data structures be much more public.
+ */
+subsys_initcall(spi_init);
+
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c
new file mode 100644 (file)
index 0000000..f037e55
--- /dev/null
@@ -0,0 +1,472 @@
+/*
+ * spi_bitbang.c - polling/bitbanging SPI master controller driver utilities
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * FIRST PART (OPTIONAL):  word-at-a-time spi_transfer support.
+ * Use this for GPIO or shift-register level hardware APIs.
+ *
+ * spi_bitbang_cs is in spi_device->controller_state, which is unavailable
+ * to glue code.  These bitbang setup() and cleanup() routines are always
+ * used, though maybe they're called from controller-aware code.
+ *
+ * chipselect() and friends may use use spi_device->controller_data and
+ * controller registers as appropriate.
+ *
+ *
+ * NOTE:  SPI controller pins can often be used as GPIO pins instead,
+ * which means you could use a bitbang driver either to get hardware
+ * working quickly, or testing for differences that aren't speed related.
+ */
+
+struct spi_bitbang_cs {
+       unsigned        nsecs;  /* (clock cycle time)/2 */
+       u32             (*txrx_word)(struct spi_device *spi, unsigned nsecs,
+                                       u32 word, u8 bits);
+       unsigned        (*txrx_bufs)(struct spi_device *,
+                                       u32 (*txrx_word)(
+                                               struct spi_device *spi,
+                                               unsigned nsecs,
+                                               u32 word, u8 bits),
+                                       unsigned, struct spi_transfer *);
+};
+
+static unsigned bitbang_txrx_8(
+       struct spi_device       *spi,
+       u32                     (*txrx_word)(struct spi_device *spi,
+                                       unsigned nsecs,
+                                       u32 word, u8 bits),
+       unsigned                ns,
+       struct spi_transfer     *t
+) {
+       unsigned                bits = spi->bits_per_word;
+       unsigned                count = t->len;
+       const u8                *tx = t->tx_buf;
+       u8                      *rx = t->rx_buf;
+
+       while (likely(count > 0)) {
+               u8              word = 0;
+
+               if (tx)
+                       word = *tx++;
+               word = txrx_word(spi, ns, word, bits);
+               if (rx)
+                       *rx++ = word;
+               count -= 1;
+       }
+       return t->len - count;
+}
+
+static unsigned bitbang_txrx_16(
+       struct spi_device       *spi,
+       u32                     (*txrx_word)(struct spi_device *spi,
+                                       unsigned nsecs,
+                                       u32 word, u8 bits),
+       unsigned                ns,
+       struct spi_transfer     *t
+) {
+       unsigned                bits = spi->bits_per_word;
+       unsigned                count = t->len;
+       const u16               *tx = t->tx_buf;
+       u16                     *rx = t->rx_buf;
+
+       while (likely(count > 1)) {
+               u16             word = 0;
+
+               if (tx)
+                       word = *tx++;
+               word = txrx_word(spi, ns, word, bits);
+               if (rx)
+                       *rx++ = word;
+               count -= 2;
+       }
+       return t->len - count;
+}
+
+static unsigned bitbang_txrx_32(
+       struct spi_device       *spi,
+       u32                     (*txrx_word)(struct spi_device *spi,
+                                       unsigned nsecs,
+                                       u32 word, u8 bits),
+       unsigned                ns,
+       struct spi_transfer     *t
+) {
+       unsigned                bits = spi->bits_per_word;
+       unsigned                count = t->len;
+       const u32               *tx = t->tx_buf;
+       u32                     *rx = t->rx_buf;
+
+       while (likely(count > 3)) {
+               u32             word = 0;
+
+               if (tx)
+                       word = *tx++;
+               word = txrx_word(spi, ns, word, bits);
+               if (rx)
+                       *rx++ = word;
+               count -= 4;
+       }
+       return t->len - count;
+}
+
+/**
+ * spi_bitbang_setup - default setup for per-word I/O loops
+ */
+int spi_bitbang_setup(struct spi_device *spi)
+{
+       struct spi_bitbang_cs   *cs = spi->controller_state;
+       struct spi_bitbang      *bitbang;
+
+       if (!spi->max_speed_hz)
+               return -EINVAL;
+
+       if (!cs) {
+               cs = kzalloc(sizeof *cs, SLAB_KERNEL);
+               if (!cs)
+                       return -ENOMEM;
+               spi->controller_state = cs;
+       }
+       bitbang = spi_master_get_devdata(spi->master);
+
+       if (!spi->bits_per_word)
+               spi->bits_per_word = 8;
+
+       /* spi_transfer level calls that work per-word */
+       if (spi->bits_per_word <= 8)
+               cs->txrx_bufs = bitbang_txrx_8;
+       else if (spi->bits_per_word <= 16)
+               cs->txrx_bufs = bitbang_txrx_16;
+       else if (spi->bits_per_word <= 32)
+               cs->txrx_bufs = bitbang_txrx_32;
+       else
+               return -EINVAL;
+
+       /* per-word shift register access, in hardware or bitbanging */
+       cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
+       if (!cs->txrx_word)
+               return -EINVAL;
+
+       /* nsecs = (clock period)/2 */
+       cs->nsecs = (1000000000/2) / (spi->max_speed_hz);
+       if (cs->nsecs > MAX_UDELAY_MS * 1000)
+               return -EINVAL;
+
+       dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n",
+                       __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA),
+                       spi->bits_per_word, 2 * cs->nsecs);
+
+       /* NOTE we _need_ to call chipselect() early, ideally with adapter
+        * setup, unless the hardware defaults cooperate to avoid confusion
+        * between normal (active low) and inverted chipselects.
+        */
+
+       /* deselect chip (low or high) */
+       spin_lock(&bitbang->lock);
+       if (!bitbang->busy) {
+               bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
+               ndelay(cs->nsecs);
+       }
+       spin_unlock(&bitbang->lock);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_setup);
+
+/**
+ * spi_bitbang_cleanup - default cleanup for per-word I/O loops
+ */
+void spi_bitbang_cleanup(const struct spi_device *spi)
+{
+       kfree(spi->controller_state);
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_cleanup);
+
+static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+       struct spi_bitbang_cs   *cs = spi->controller_state;
+       unsigned                nsecs = cs->nsecs;
+
+       return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t);
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * SECOND PART ... simple transfer queue runner.
+ *
+ * This costs a task context per controller, running the queue by
+ * performing each transfer in sequence.  Smarter hardware can queue
+ * several DMA transfers at once, and process several controller queues
+ * in parallel; this driver doesn't match such hardware very well.
+ *
+ * Drivers can provide word-at-a-time i/o primitives, or provide
+ * transfer-at-a-time ones to leverage dma or fifo hardware.
+ */
+static void bitbang_work(void *_bitbang)
+{
+       struct spi_bitbang      *bitbang = _bitbang;
+       unsigned long           flags;
+
+       spin_lock_irqsave(&bitbang->lock, flags);
+       bitbang->busy = 1;
+       while (!list_empty(&bitbang->queue)) {
+               struct spi_message      *m;
+               struct spi_device       *spi;
+               unsigned                nsecs;
+               struct spi_transfer     *t = NULL;
+               unsigned                tmp;
+               unsigned                cs_change;
+               int                     status;
+
+               m = container_of(bitbang->queue.next, struct spi_message,
+                               queue);
+               list_del_init(&m->queue);
+               spin_unlock_irqrestore(&bitbang->lock, flags);
+
+               /* FIXME this is made-up ... the correct value is known to
+                * word-at-a-time bitbang code, and presumably chipselect()
+                * should enforce these requirements too?
+                */
+               nsecs = 100;
+
+               spi = m->spi;
+               tmp = 0;
+               cs_change = 1;
+               status = 0;
+
+               list_for_each_entry (t, &m->transfers, transfer_list) {
+                       if (bitbang->shutdown) {
+                               status = -ESHUTDOWN;
+                               break;
+                       }
+
+                       /* set up default clock polarity, and activate chip;
+                        * this implicitly updates clock and spi modes as
+                        * previously recorded for this device via setup().
+                        * (and also deselects any other chip that might be
+                        * selected ...)
+                        */
+                       if (cs_change) {
+                               bitbang->chipselect(spi, BITBANG_CS_ACTIVE);
+                               ndelay(nsecs);
+                       }
+                       cs_change = t->cs_change;
+                       if (!t->tx_buf && !t->rx_buf && t->len) {
+                               status = -EINVAL;
+                               break;
+                       }
+
+                       /* transfer data.  the lower level code handles any
+                        * new dma mappings it needs. our caller always gave
+                        * us dma-safe buffers.
+                        */
+                       if (t->len) {
+                               /* REVISIT dma API still needs a designated
+                                * DMA_ADDR_INVALID; ~0 might be better.
+                                */
+                               if (!m->is_dma_mapped)
+                                       t->rx_dma = t->tx_dma = 0;
+                               status = bitbang->txrx_bufs(spi, t);
+                       }
+                       if (status != t->len) {
+                               if (status > 0)
+                                       status = -EMSGSIZE;
+                               break;
+                       }
+                       m->actual_length += status;
+                       status = 0;
+
+                       /* protocol tweaks before next transfer */
+                       if (t->delay_usecs)
+                               udelay(t->delay_usecs);
+
+                       if (!cs_change)
+                               continue;
+                       if (t->transfer_list.next == &m->transfers)
+                               break;
+
+                       /* sometimes a short mid-message deselect of the chip
+                        * may be needed to terminate a mode or command
+                        */
+                       ndelay(nsecs);
+                       bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
+                       ndelay(nsecs);
+               }
+
+               m->status = status;
+               m->complete(m->context);
+
+               /* normally deactivate chipselect ... unless no error and
+                * cs_change has hinted that the next message will probably
+                * be for this chip too.
+                */
+               if (!(status == 0 && cs_change)) {
+                       ndelay(nsecs);
+                       bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
+                       ndelay(nsecs);
+               }
+
+               spin_lock_irqsave(&bitbang->lock, flags);
+       }
+       bitbang->busy = 0;
+       spin_unlock_irqrestore(&bitbang->lock, flags);
+}
+
+/**
+ * spi_bitbang_transfer - default submit to transfer queue
+ */
+int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m)
+{
+       struct spi_bitbang      *bitbang;
+       unsigned long           flags;
+
+       m->actual_length = 0;
+       m->status = -EINPROGRESS;
+
+       bitbang = spi_master_get_devdata(spi->master);
+       if (bitbang->shutdown)
+               return -ESHUTDOWN;
+
+       spin_lock_irqsave(&bitbang->lock, flags);
+       list_add_tail(&m->queue, &bitbang->queue);
+       queue_work(bitbang->workqueue, &bitbang->work);
+       spin_unlock_irqrestore(&bitbang->lock, flags);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_transfer);
+
+/*----------------------------------------------------------------------*/
+
+/**
+ * spi_bitbang_start - start up a polled/bitbanging SPI master driver
+ * @bitbang: driver handle
+ *
+ * Caller should have zero-initialized all parts of the structure, and then
+ * provided callbacks for chip selection and I/O loops.  If the master has
+ * a transfer method, its final step should call spi_bitbang_transfer; or,
+ * that's the default if the transfer routine is not initialized.  It should
+ * also set up the bus number and number of chipselects.
+ *
+ * For i/o loops, provide callbacks either per-word (for bitbanging, or for
+ * hardware that basically exposes a shift register) or per-spi_transfer
+ * (which takes better advantage of hardware like fifos or DMA engines).
+ *
+ * Drivers using per-word I/O loops should use (or call) spi_bitbang_setup and
+ * spi_bitbang_cleanup to handle those spi master methods.  Those methods are
+ * the defaults if the bitbang->txrx_bufs routine isn't initialized.
+ *
+ * This routine registers the spi_master, which will process requests in a
+ * dedicated task, keeping IRQs unblocked most of the time.  To stop
+ * processing those requests, call spi_bitbang_stop().
+ */
+int spi_bitbang_start(struct spi_bitbang *bitbang)
+{
+       int     status;
+
+       if (!bitbang->master || !bitbang->chipselect)
+               return -EINVAL;
+
+       INIT_WORK(&bitbang->work, bitbang_work, bitbang);
+       spin_lock_init(&bitbang->lock);
+       INIT_LIST_HEAD(&bitbang->queue);
+
+       if (!bitbang->master->transfer)
+               bitbang->master->transfer = spi_bitbang_transfer;
+       if (!bitbang->txrx_bufs) {
+               bitbang->use_dma = 0;
+               bitbang->txrx_bufs = spi_bitbang_bufs;
+               if (!bitbang->master->setup) {
+                       bitbang->master->setup = spi_bitbang_setup;
+                       bitbang->master->cleanup = spi_bitbang_cleanup;
+               }
+       } else if (!bitbang->master->setup)
+               return -EINVAL;
+
+       /* this task is the only thing to touch the SPI bits */
+       bitbang->busy = 0;
+       bitbang->workqueue = create_singlethread_workqueue(
+                       bitbang->master->cdev.dev->bus_id);
+       if (bitbang->workqueue == NULL) {
+               status = -EBUSY;
+               goto err1;
+       }
+
+       /* driver may get busy before register() returns, especially
+        * if someone registered boardinfo for devices
+        */
+       status = spi_register_master(bitbang->master);
+       if (status < 0)
+               goto err2;
+
+       return status;
+
+err2:
+       destroy_workqueue(bitbang->workqueue);
+err1:
+       return status;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_start);
+
+/**
+ * spi_bitbang_stop - stops the task providing spi communication
+ */
+int spi_bitbang_stop(struct spi_bitbang *bitbang)
+{
+       unsigned        limit = 500;
+
+       spin_lock_irq(&bitbang->lock);
+       bitbang->shutdown = 0;
+       while (!list_empty(&bitbang->queue) && limit--) {
+               spin_unlock_irq(&bitbang->lock);
+
+               dev_dbg(bitbang->master->cdev.dev, "wait for queue\n");
+               msleep(10);
+
+               spin_lock_irq(&bitbang->lock);
+       }
+       spin_unlock_irq(&bitbang->lock);
+       if (!list_empty(&bitbang->queue)) {
+               dev_err(bitbang->master->cdev.dev, "queue didn't empty\n");
+               return -EBUSY;
+       }
+
+       destroy_workqueue(bitbang->workqueue);
+
+       spi_unregister_master(bitbang->master);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_stop);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c
new file mode 100644 (file)
index 0000000..79a3c59
--- /dev/null
@@ -0,0 +1,423 @@
+/*
+ * spi_butterfly.c - parport-to-butterfly adapter
+ *
+ * Copyright (C) 2005 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/parport.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/flash.h>
+
+#include <linux/mtd/partitions.h>
+
+
+/*
+ * This uses SPI to talk with an "AVR Butterfly", which is a $US20 card
+ * with a battery powered AVR microcontroller and lots of goodies.  You
+ * can use GCC to develop firmware for this.
+ *
+ * See Documentation/spi/butterfly for information about how to build
+ * and use this custom parallel port cable.
+ */
+
+#undef HAVE_USI        /* nyet */
+
+
+/* DATA output bits (pins 2..9 == D0..D7) */
+#define        butterfly_nreset (1 << 1)               /* pin 3 */
+
+#define        spi_sck_bit     (1 << 0)                /* pin 2 */
+#define        spi_mosi_bit    (1 << 7)                /* pin 9 */
+
+#define        usi_sck_bit     (1 << 3)                /* pin 5 */
+#define        usi_mosi_bit    (1 << 4)                /* pin 6 */
+
+#define        vcc_bits        ((1 << 6) | (1 << 5))   /* pins 7, 8 */
+
+/* STATUS input bits */
+#define        spi_miso_bit    PARPORT_STATUS_BUSY     /* pin 11 */
+
+#define        usi_miso_bit    PARPORT_STATUS_PAPEROUT /* pin 12 */
+
+/* CONTROL output bits */
+#define        spi_cs_bit      PARPORT_CONTROL_SELECT  /* pin 17 */
+/* USI uses no chipselect */
+
+
+
+static inline struct butterfly *spidev_to_pp(struct spi_device *spi)
+{
+       return spi->controller_data;
+}
+
+static inline int is_usidev(struct spi_device *spi)
+{
+#ifdef HAVE_USI
+       return spi->chip_select != 1;
+#else
+       return 0;
+#endif
+}
+
+
+struct butterfly {
+       /* REVISIT ... for now, this must be first */
+       struct spi_bitbang      bitbang;
+
+       struct parport          *port;
+       struct pardevice        *pd;
+
+       u8                      lastbyte;
+
+       struct spi_device       *dataflash;
+       struct spi_device       *butterfly;
+       struct spi_board_info   info[2];
+
+};
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * these routines may be slower than necessary because they're hiding
+ * the fact that there are two different SPI busses on this cable: one
+ * to the DataFlash chip (or AVR SPI controller), the other to the
+ * AVR USI controller.
+ */
+
+static inline void
+setsck(struct spi_device *spi, int is_on)
+{
+       struct butterfly        *pp = spidev_to_pp(spi);
+       u8                      bit, byte = pp->lastbyte;
+
+       if (is_usidev(spi))
+               bit = usi_sck_bit;
+       else
+               bit = spi_sck_bit;
+
+       if (is_on)
+               byte |= bit;
+       else
+               byte &= ~bit;
+       parport_write_data(pp->port, byte);
+       pp->lastbyte = byte;
+}
+
+static inline void
+setmosi(struct spi_device *spi, int is_on)
+{
+       struct butterfly        *pp = spidev_to_pp(spi);
+       u8                      bit, byte = pp->lastbyte;
+
+       if (is_usidev(spi))
+               bit = usi_mosi_bit;
+       else
+               bit = spi_mosi_bit;
+
+       if (is_on)
+               byte |= bit;
+       else
+               byte &= ~bit;
+       parport_write_data(pp->port, byte);
+       pp->lastbyte = byte;
+}
+
+static inline int getmiso(struct spi_device *spi)
+{
+       struct butterfly        *pp = spidev_to_pp(spi);
+       int                     value;
+       u8                      bit;
+
+       if (is_usidev(spi))
+               bit = usi_miso_bit;
+       else
+               bit = spi_miso_bit;
+
+       /* only STATUS_BUSY is NOT negated */
+       value = !(parport_read_status(pp->port) & bit);
+       return (bit == PARPORT_STATUS_BUSY) ? value : !value;
+}
+
+static void butterfly_chipselect(struct spi_device *spi, int value)
+{
+       struct butterfly        *pp = spidev_to_pp(spi);
+
+       /* set default clock polarity */
+       if (value)
+               setsck(spi, spi->mode & SPI_CPOL);
+
+       /* no chipselect on this USI link config */
+       if (is_usidev(spi))
+               return;
+
+       /* here, value == "activate or not" */
+
+       /* most PARPORT_CONTROL_* bits are negated */
+       if (spi_cs_bit == PARPORT_CONTROL_INIT)
+               value = !value;
+
+       /* here, value == "bit value to write in control register"  */
+
+       parport_frob_control(pp->port, spi_cs_bit, value ? spi_cs_bit : 0);
+}
+
+
+/* we only needed to implement one mode here, and choose SPI_MODE_0 */
+
+#define        spidelay(X)     do{}while(0)
+//#define      spidelay        ndelay
+
+#define        EXPAND_BITBANG_TXRX
+#include <linux/spi/spi_bitbang.h>
+
+static u32
+butterfly_txrx_word_mode0(struct spi_device *spi,
+               unsigned nsecs,
+               u32 word, u8 bits)
+{
+       return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* override default partitioning with cmdlinepart */
+static struct mtd_partition partitions[] = { {
+       /* JFFS2 wants partitions of 4*N blocks for this device ... */
+
+       /* sector 0 = 8 pages * 264 bytes/page (1 block)
+        * sector 1 = 248 pages * 264 bytes/page
+        */
+       .name           = "bookkeeping",        // 66 KB
+       .offset         = 0,
+       .size           = (8 + 248) * 264,
+//     .mask_flags     = MTD_WRITEABLE,
+}, {
+       /* sector 2 = 256 pages * 264 bytes/page
+        * sectors 3-5 = 512 pages * 264 bytes/page
+        */
+       .name           = "filesystem",         // 462 KB
+       .offset         = MTDPART_OFS_APPEND,
+       .size           = MTDPART_SIZ_FULL,
+} };
+
+static struct flash_platform_data flash = {
+       .name           = "butterflash",
+       .parts          = partitions,
+       .nr_parts       = ARRAY_SIZE(partitions),
+};
+
+
+/* REVISIT remove this ugly global and its "only one" limitation */
+static struct butterfly *butterfly;
+
+static void butterfly_attach(struct parport *p)
+{
+       struct pardevice        *pd;
+       int                     status;
+       struct butterfly        *pp;
+       struct spi_master       *master;
+       struct platform_device  *pdev;
+
+       if (butterfly)
+               return;
+
+       /* REVISIT:  this just _assumes_ a butterfly is there ... no probe,
+        * and no way to be selective about what it binds to.
+        */
+
+       /* FIXME where should master->cdev.dev come from?
+        * e.g. /sys/bus/pnp0/00:0b, some PCI thing, etc
+        * setting up a platform device like this is an ugly kluge...
+        */
+       pdev = platform_device_register_simple("butterfly", -1, NULL, 0);
+
+       master = spi_alloc_master(&pdev->dev, sizeof *pp);
+       if (!master) {
+               status = -ENOMEM;
+               goto done;
+       }
+       pp = spi_master_get_devdata(master);
+
+       /*
+        * SPI and bitbang hookup
+        *
+        * use default setup(), cleanup(), and transfer() methods; and
+        * only bother implementing mode 0.  Start it later.
+        */
+       master->bus_num = 42;
+       master->num_chipselect = 2;
+
+       pp->bitbang.master = spi_master_get(master);
+       pp->bitbang.chipselect = butterfly_chipselect;
+       pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0;
+
+       /*
+        * parport hookup
+        */
+       pp->port = p;
+       pd = parport_register_device(p, "spi_butterfly",
+                       NULL, NULL, NULL,
+                       0 /* FLAGS */, pp);
+       if (!pd) {
+               status = -ENOMEM;
+               goto clean0;
+       }
+       pp->pd = pd;
+
+       status = parport_claim(pd);
+       if (status < 0)
+               goto clean1;
+
+       /*
+        * Butterfly reset, powerup, run firmware
+        */
+       pr_debug("%s: powerup/reset Butterfly\n", p->name);
+
+       /* nCS for dataflash (this bit is inverted on output) */
+       parport_frob_control(pp->port, spi_cs_bit, 0);
+
+       /* stabilize power with chip in reset (nRESET), and
+        * both spi_sck_bit and usi_sck_bit clear (CPOL=0)
+        */
+       pp->lastbyte |= vcc_bits;
+       parport_write_data(pp->port, pp->lastbyte);
+       msleep(5);
+
+       /* take it out of reset; assume long reset delay */
+       pp->lastbyte |= butterfly_nreset;
+       parport_write_data(pp->port, pp->lastbyte);
+       msleep(100);
+
+
+       /*
+        * Start SPI ... for now, hide that we're two physical busses.
+        */
+       status = spi_bitbang_start(&pp->bitbang);
+       if (status < 0)
+               goto clean2;
+
+       /* Bus 1 lets us talk to at45db041b (firmware disables AVR)
+        * or AVR (firmware resets at45, acts as spi slave)
+        */
+       pp->info[0].max_speed_hz = 15 * 1000 * 1000;
+       strcpy(pp->info[0].modalias, "mtd_dataflash");
+       pp->info[0].platform_data = &flash;
+       pp->info[0].chip_select = 1;
+       pp->info[0].controller_data = pp;
+       pp->dataflash = spi_new_device(pp->bitbang.master, &pp->info[0]);
+       if (pp->dataflash)
+               pr_debug("%s: dataflash at %s\n", p->name,
+                               pp->dataflash->dev.bus_id);
+
+#ifdef HAVE_USI
+       /* even more custom AVR firmware */
+       pp->info[1].max_speed_hz = 10 /* ?? */ * 1000 * 1000;
+       strcpy(pp->info[1].modalias, "butterfly");
+       // pp->info[1].platform_data = ... TBD ... ;
+       pp->info[1].chip_select = 2,
+       pp->info[1].controller_data = pp;
+       pp->butterfly = spi_new_device(pp->bitbang.master, &pp->info[1]);
+       if (pp->butterfly)
+               pr_debug("%s: butterfly at %s\n", p->name,
+                               pp->butterfly->dev.bus_id);
+
+       /* FIXME setup ACK for the IRQ line ...  */
+#endif
+
+       // dev_info(_what?_, ...)
+       pr_info("%s: AVR Butterfly\n", p->name);
+       butterfly = pp;
+       return;
+
+clean2:
+       /* turn off VCC */
+       parport_write_data(pp->port, 0);
+
+       parport_release(pp->pd);
+clean1:
+       parport_unregister_device(pd);
+clean0:
+       (void) spi_master_put(pp->bitbang.master);
+done:
+       platform_device_unregister(pdev);
+       pr_debug("%s: butterfly probe, fail %d\n", p->name, status);
+}
+
+static void butterfly_detach(struct parport *p)
+{
+       struct butterfly        *pp;
+       struct platform_device  *pdev;
+       int                     status;
+
+       /* FIXME this global is ugly ... but, how to quickly get from
+        * the parport to the "struct butterfly" associated with it?
+        * "old school" driver-internal device lists?
+        */
+       if (!butterfly || butterfly->port != p)
+               return;
+       pp = butterfly;
+       butterfly = NULL;
+
+#ifdef HAVE_USI
+       spi_unregister_device(pp->butterfly);
+       pp->butterfly = NULL;
+#endif
+       spi_unregister_device(pp->dataflash);
+       pp->dataflash = NULL;
+
+       status = spi_bitbang_stop(&pp->bitbang);
+
+       /* turn off VCC */
+       parport_write_data(pp->port, 0);
+       msleep(10);
+
+       parport_release(pp->pd);
+       parport_unregister_device(pp->pd);
+
+       pdev = to_platform_device(pp->bitbang.master->cdev.dev);
+
+       (void) spi_master_put(pp->bitbang.master);
+
+       platform_device_unregister(pdev);
+}
+
+static struct parport_driver butterfly_driver = {
+       .name =         "spi_butterfly",
+       .attach =       butterfly_attach,
+       .detach =       butterfly_detach,
+};
+
+
+static int __init butterfly_init(void)
+{
+       return parport_register_driver(&butterfly_driver);
+}
+device_initcall(butterfly_init);
+
+static void __exit butterfly_exit(void)
+{
+       parport_unregister_driver(&butterfly_driver);
+}
+module_exit(butterfly_exit);
+
+MODULE_LICENSE("GPL");
index 8f402f85e1ca71f09373866151fb51efa6322e2d..afc84cfb61f996fc484308473552deb880839734 100644 (file)
@@ -2534,9 +2534,6 @@ static struct usb_gadget_driver eth_driver = {
        .driver         = {
                .name           = (char *) shortname,
                .owner          = THIS_MODULE,
-               // .shutdown = ...
-               // .suspend = ...
-               // .resume = ...
        },
 };
 
index c6c279de832e40920fd58d0a36ba403b93fbb541..9a4edc5657aa695cb7f98030084433f6369435c9 100644 (file)
@@ -1738,9 +1738,6 @@ static struct usb_gadget_driver gadgetfs_driver = {
 
        .driver         = {
                .name           = (char *) shortname,
-               // .shutdown = ...
-               // .suspend = ...
-               // .resume = ...
        },
 };
 
index 2e6926b33455b0197bb58179c922acd42dadf76c..ba9acd5310247909145ac4493fee7d308650b7ea 100644 (file)
@@ -374,9 +374,6 @@ static struct usb_gadget_driver gs_gadget_driver = {
        .disconnect =           gs_disconnect,
        .driver = {
                .name =         GS_SHORT_NAME,
-               /* .shutdown = ... */
-               /* .suspend = ...  */
-               /* .resume = ...   */
        },
 };
 
index 6c58636e914b697d9dea954d77a48feb31b124ec..2fc110d3ad5ab05b960578280f108de6304b9502 100644 (file)
@@ -1303,9 +1303,6 @@ static struct usb_gadget_driver zero_driver = {
        .driver         = {
                .name           = (char *) shortname,
                .owner          = THIS_MODULE,
-               // .shutdown = ...
-               // .suspend = ...
-               // .resume = ...
        },
 };
 
index 509dd0a04c54f5e8e5bf9d4164a635133095c9ce..5246b35301de9d4e9333d1f14e15e51bea0919b5 100644 (file)
@@ -37,6 +37,16 @@ config USB_HIDINPUT
 
          If unsure, say Y.
 
+config USB_HIDINPUT_POWERBOOK
+       bool "Enable support for iBook/PowerBook special keys"
+       default n
+       depends on USB_HIDINPUT
+       help
+         Say Y here if you want support for the special keys (Fn, Numlock) on
+         Apple iBooks and PowerBooks.
+
+         If unsure, say N.
+
 config HID_FF
        bool "Force feedback support (EXPERIMENTAL)"
        depends on USB_HIDINPUT && EXPERIMENTAL
index 5f52979af1c736905ba0ed08f52dab03135a5d76..a91e72c41415c213b8ece5824c16f4873161dc73 100644 (file)
@@ -1450,6 +1450,9 @@ void hid_init_reports(struct hid_device *hid)
 #define USB_VENDOR_ID_APPLE            0x05ac
 #define USB_DEVICE_ID_APPLE_POWERMOUSE 0x0304
 
+#define USB_VENDOR_ID_CHERRY           0x046a
+#define USB_DEVICE_ID_CHERRY_CYMOTION  0x0023
+
 /*
  * Alphabetically sorted blacklist by quirk type.
  */
@@ -1580,6 +1583,16 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
        { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
 
+       { USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION },
+
+       { USB_VENDOR_ID_APPLE, 0x020E, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, 0x020F, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, 0x0214, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, 0x0215, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, 0x0216, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, 0x030A, HID_QUIRK_POWERBOOK_HAS_FN },
+       { USB_VENDOR_ID_APPLE, 0x030B, HID_QUIRK_POWERBOOK_HAS_FN },
+
        { 0, 0 }
 };
 
@@ -1626,6 +1639,20 @@ static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
                usb_buffer_free(dev, hid->bufsize, hid->ctrlbuf, hid->ctrlbuf_dma);
 }
 
+/*
+ * Cherry Cymotion keyboard have an invalid HID report descriptor,
+ * that needs fixing before we can parse it.
+ */
+
+static void hid_fixup_cymotion_descriptor(char *rdesc, int rsize)
+{
+       if (rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
+               info("Fixing up Cherry Cymotion report descriptor");
+               rdesc[11] = rdesc[16] = 0xff;
+               rdesc[12] = rdesc[17] = 0x03;
+       }
+}
+
 static struct hid_device *usb_hid_configure(struct usb_interface *intf)
 {
        struct usb_host_interface *interface = intf->cur_altsetting;
@@ -1673,6 +1700,9 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
                return NULL;
        }
 
+       if ((quirks & HID_QUIRK_CYMOTION))
+               hid_fixup_cymotion_descriptor(rdesc, rsize);
+
 #ifdef DEBUG_DATA
        printk(KERN_DEBUG __FILE__ ": report descriptor (size %u, read %d) = ", rsize, n);
        for (n = 0; n < rsize; n++)
index 192a03b2897145ba00155ebe8d5e74f887ae2cdd..cb0d80f492520eeb42e5592cccd203ac81032a6a 100644 (file)
@@ -73,6 +73,160 @@ static const struct {
 #define map_key_clear(c)       do { map_key(c); clear_bit(c, bit); } while (0)
 #define map_ff_effect(c)       do { set_bit(c, input->ffbit); } while (0)
 
+#ifdef CONFIG_USB_HIDINPUT_POWERBOOK
+
+struct hidinput_key_translation {
+       u16 from;
+       u16 to;
+       u8 flags;
+};
+
+#define POWERBOOK_FLAG_FKEY 0x01
+
+static struct hidinput_key_translation powerbook_fn_keys[] = {
+       { KEY_BACKSPACE, KEY_DELETE },
+       { KEY_F1,       KEY_BRIGHTNESSDOWN,     POWERBOOK_FLAG_FKEY },
+       { KEY_F2,       KEY_BRIGHTNESSUP,       POWERBOOK_FLAG_FKEY },
+       { KEY_F3,       KEY_MUTE,               POWERBOOK_FLAG_FKEY },
+       { KEY_F4,       KEY_VOLUMEDOWN,         POWERBOOK_FLAG_FKEY },
+       { KEY_F5,       KEY_VOLUMEUP,           POWERBOOK_FLAG_FKEY },
+       { KEY_F6,       KEY_NUMLOCK,            POWERBOOK_FLAG_FKEY },
+       { KEY_F7,       KEY_SWITCHVIDEOMODE,    POWERBOOK_FLAG_FKEY },
+       { KEY_F8,       KEY_KBDILLUMTOGGLE,     POWERBOOK_FLAG_FKEY },
+       { KEY_F9,       KEY_KBDILLUMDOWN,       POWERBOOK_FLAG_FKEY },
+       { KEY_F10,      KEY_KBDILLUMUP,         POWERBOOK_FLAG_FKEY },
+       { KEY_UP,       KEY_PAGEUP },
+       { KEY_DOWN,     KEY_PAGEDOWN },
+       { KEY_LEFT,     KEY_HOME },
+       { KEY_RIGHT,    KEY_END },
+       { }
+};
+
+static struct hidinput_key_translation powerbook_numlock_keys[] = {
+       { KEY_J,        KEY_KP1 },
+       { KEY_K,        KEY_KP2 },
+       { KEY_L,        KEY_KP3 },
+       { KEY_U,        KEY_KP4 },
+       { KEY_I,        KEY_KP5 },
+       { KEY_O,        KEY_KP6 },
+       { KEY_7,        KEY_KP7 },
+       { KEY_8,        KEY_KP8 },
+       { KEY_9,        KEY_KP9 },
+       { KEY_M,        KEY_KP0 },
+       { KEY_DOT,      KEY_KPDOT },
+       { KEY_SLASH,    KEY_KPPLUS },
+       { KEY_SEMICOLON, KEY_KPMINUS },
+       { KEY_P,        KEY_KPASTERISK },
+       { KEY_MINUS,    KEY_KPEQUAL },
+       { KEY_0,        KEY_KPSLASH },
+       { KEY_F6,       KEY_NUMLOCK },
+       { KEY_KPENTER,  KEY_KPENTER },
+       { KEY_BACKSPACE, KEY_BACKSPACE },
+       { }
+};
+
+static int usbhid_pb_fnmode = 1;
+module_param_named(pb_fnmode, usbhid_pb_fnmode, int, 0644);
+MODULE_PARM_DESC(pb_fnmode,
+       "Mode of fn key on PowerBooks (0 = disabled, 1 = fkeyslast, 2 = fkeysfirst)");
+
+static struct hidinput_key_translation *find_translation(struct hidinput_key_translation *table, u16 from)
+{
+       struct hidinput_key_translation *trans;
+
+       /* Look for the translation */
+       for (trans = table; trans->from; trans++)
+               if (trans->from == from)
+                       return trans;
+
+       return NULL;
+}
+
+static int hidinput_pb_event(struct hid_device *hid, struct input_dev *input,
+                            struct hid_usage *usage, __s32 value)
+{
+       struct hidinput_key_translation *trans;
+
+       if (usage->code == KEY_FN) {
+               if (value) hid->quirks |=  HID_QUIRK_POWERBOOK_FN_ON;
+               else       hid->quirks &= ~HID_QUIRK_POWERBOOK_FN_ON;
+
+               input_event(input, usage->type, usage->code, value);
+
+               return 1;
+       }
+
+       if (usbhid_pb_fnmode) {
+               int do_translate;
+
+               trans = find_translation(powerbook_fn_keys, usage->code);
+               if (trans) {
+                       if (test_bit(usage->code, hid->pb_pressed_fn))
+                               do_translate = 1;
+                       else if (trans->flags & POWERBOOK_FLAG_FKEY)
+                               do_translate =
+                                       (usbhid_pb_fnmode == 2 &&  (hid->quirks & HID_QUIRK_POWERBOOK_FN_ON)) ||
+                                       (usbhid_pb_fnmode == 1 && !(hid->quirks & HID_QUIRK_POWERBOOK_FN_ON));
+                       else
+                               do_translate = (hid->quirks & HID_QUIRK_POWERBOOK_FN_ON);
+
+                       if (do_translate) {
+                               if (value)
+                                       set_bit(usage->code, hid->pb_pressed_fn);
+                               else
+                                       clear_bit(usage->code, hid->pb_pressed_fn);
+
+                               input_event(input, usage->type, trans->to, value);
+
+                               return 1;
+                       }
+               }
+
+               if (test_bit(usage->code, hid->pb_pressed_numlock) ||
+                   test_bit(LED_NUML, input->led)) {
+                       trans = find_translation(powerbook_numlock_keys, usage->code);
+
+                       if (trans) {
+                               if (value)
+                                       set_bit(usage->code, hid->pb_pressed_numlock);
+                               else
+                                       clear_bit(usage->code, hid->pb_pressed_numlock);
+
+                               input_event(input, usage->type, trans->to, value);
+                       }
+
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+static void hidinput_pb_setup(struct input_dev *input)
+{
+       struct hidinput_key_translation *trans;
+
+       set_bit(KEY_NUMLOCK, input->keybit);
+
+       /* Enable all needed keys */
+       for (trans = powerbook_fn_keys; trans->from; trans++)
+               set_bit(trans->to, input->keybit);
+
+       for (trans = powerbook_numlock_keys; trans->from; trans++)
+               set_bit(trans->to, input->keybit);
+}
+#else
+static inline int hidinput_pb_event(struct hid_device *hid, struct input_dev *input,
+                                   struct hid_usage *usage, __s32 value)
+{
+       return 0;
+}
+
+static inline void hidinput_pb_setup(struct input_dev *input)
+{
+}
+#endif
+
 static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field,
                                     struct hid_usage *usage)
 {
@@ -135,8 +289,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                case HID_UP_SIMULATION:
 
                        switch (usage->hid & 0xffff) {
-                               case 0xba: map_abs(ABS_RUDDER); break;
+                               case 0xba: map_abs(ABS_RUDDER);   break;
                                case 0xbb: map_abs(ABS_THROTTLE); break;
+                               case 0xc4: map_abs(ABS_GAS);      break;
+                               case 0xc5: map_abs(ABS_BRAKE);    break;
+                               case 0xc8: map_abs(ABS_WHEEL);    break;
                                default:   goto ignore;
                        }
                        break;
@@ -289,11 +446,19 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
                                case 0x226: map_key_clear(KEY_STOP);            break;
                                case 0x227: map_key_clear(KEY_REFRESH);         break;
                                case 0x22a: map_key_clear(KEY_BOOKMARKS);       break;
+                               case 0x233: map_key_clear(KEY_SCROLLUP);        break;
+                               case 0x234: map_key_clear(KEY_SCROLLDOWN);      break;
                                case 0x238: map_rel(REL_HWHEEL);                break;
                                case 0x279: map_key_clear(KEY_REDO);            break;
                                case 0x289: map_key_clear(KEY_REPLY);           break;
                                case 0x28b: map_key_clear(KEY_FORWARDMAIL);     break;
                                case 0x28c: map_key_clear(KEY_SEND);            break;
+
+                               /* Reported on a Cherry Cymotion keyboard */
+                               case 0x301: map_key_clear(KEY_PROG1);           break;
+                               case 0x302: map_key_clear(KEY_PROG2);           break;
+                               case 0x303: map_key_clear(KEY_PROG3);           break;
+
                                default:    goto ignore;
                        }
                        break;
@@ -325,7 +490,12 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
 
                        set_bit(EV_REP, input->evbit);
                        switch(usage->hid & HID_USAGE) {
-                               case 0x003: map_key_clear(KEY_FN);              break;
+                               case 0x003:
+                                       /* The fn key on Apple PowerBooks */
+                                       map_key_clear(KEY_FN);
+                                       hidinput_pb_setup(input);
+                                       break;
+
                                default:    goto ignore;
                        }
                        break;
@@ -482,6 +652,9 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
                return;
        }
 
+       if ((hid->quirks & HID_QUIRK_POWERBOOK_HAS_FN) && hidinput_pb_event(hid, input, usage, value))
+               return;
+
        if (usage->hat_min < usage->hat_max || usage->hat_dir) {
                int hat_dir = usage->hat_dir;
                if (!hat_dir)
@@ -524,7 +697,7 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
                return;
        }
 
-       if((usage->type == EV_KEY) && (usage->code == 0)) /* Key 0 is "unassigned", not KEY_UNKNOWN */
+       if ((usage->type == EV_KEY) && (usage->code == 0)) /* Key 0 is "unassigned", not KEY_UNKNOWN */
                return;
 
        input_event(input, usage->type, usage->code, value);
index ee48a227610420ec42c6c2334430bfb3a2053bfd..8b0d4346ce9c569b67e92036995b88bc62038902 100644 (file)
@@ -235,17 +235,20 @@ struct hid_item {
  * HID device quirks.
  */
 
-#define HID_QUIRK_INVERT                       0x001
-#define HID_QUIRK_NOTOUCH                      0x002
-#define HID_QUIRK_IGNORE                       0x004
-#define HID_QUIRK_NOGET                                0x008
-#define HID_QUIRK_HIDDEV                       0x010
-#define HID_QUIRK_BADPAD                       0x020
-#define HID_QUIRK_MULTI_INPUT                  0x040
-#define HID_QUIRK_2WHEEL_MOUSE_HACK_7          0x080
-#define HID_QUIRK_2WHEEL_MOUSE_HACK_5          0x100
-#define HID_QUIRK_2WHEEL_MOUSE_HACK_ON         0x200
-#define HID_QUIRK_2WHEEL_POWERMOUSE            0x400
+#define HID_QUIRK_INVERT                       0x00000001
+#define HID_QUIRK_NOTOUCH                      0x00000002
+#define HID_QUIRK_IGNORE                       0x00000004
+#define HID_QUIRK_NOGET                                0x00000008
+#define HID_QUIRK_HIDDEV                       0x00000010
+#define HID_QUIRK_BADPAD                       0x00000020
+#define HID_QUIRK_MULTI_INPUT                  0x00000040
+#define HID_QUIRK_2WHEEL_MOUSE_HACK_7          0x00000080
+#define HID_QUIRK_2WHEEL_MOUSE_HACK_5          0x00000100
+#define HID_QUIRK_2WHEEL_MOUSE_HACK_ON         0x00000200
+#define HID_QUIRK_2WHEEL_POWERMOUSE            0x00000400
+#define HID_QUIRK_CYMOTION                     0x00000800
+#define HID_QUIRK_POWERBOOK_HAS_FN             0x00001000
+#define HID_QUIRK_POWERBOOK_FN_ON              0x00002000
 
 /*
  * This is the global environment of the parser. This information is
@@ -431,6 +434,11 @@ struct hid_device {                                                        /* device report descriptor */
        void (*ff_exit)(struct hid_device*);                            /* Called by hid_exit_ff(hid) */
        int (*ff_event)(struct hid_device *hid, struct input_dev *input,
                        unsigned int type, unsigned int code, int value);
+
+#ifdef CONFIG_USB_HIDINPUT_POWERBOOK
+       unsigned long pb_pressed_fn[NBITS(KEY_MAX)];
+       unsigned long pb_pressed_numlock[NBITS(KEY_MAX)];
+#endif
 };
 
 #define HID_GLOBAL_STACK_SIZE 4
index 19e015d171aab767a7df15582dded3958b7b8feb..d9d9f656b8c9ea26f576a11feb451798a886cd4a 100644 (file)
@@ -259,7 +259,7 @@ static int hid_pid_upload_effect(struct input_dev *dev,
 int hid_pid_init(struct hid_device *hid)
 {
        struct hid_ff_pid *private;
-       struct hid_input *hidinput = list_entry(&hid->inputs, struct hid_input, list);
+       struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list);
        struct input_dev *input_dev = hidinput->input;
 
        private = hid->ff_private = kzalloc(sizeof(struct hid_ff_pid), GFP_KERNEL);
index 48df4cfd5a42274b7a756f37ce6869c646a590e3..d3e15df9e815ec32d819aa57b9c42544193cae7b 100644 (file)
@@ -95,7 +95,7 @@ MODULE_LICENSE(DRIVER_LICENSE);
 enum {
        PENPARTNER = 0,
        GRAPHIRE,
-       G4,
+       WACOM_G4,
        PL,
        INTUOS,
        INTUOS3,
@@ -373,7 +373,7 @@ static void wacom_graphire_irq(struct urb *urb, struct pt_regs *regs)
 
                        case 2: /* Mouse with wheel */
                                input_report_key(dev, BTN_MIDDLE, data[1] & 0x04);
-                               if (wacom->features->type == G4) {
+                               if (wacom->features->type == WACOM_G4) {
                                        rw = data[7] & 0x04 ? -(data[7] & 0x03) : (data[7] & 0x03);
                                        input_report_rel(dev, REL_WHEEL, rw);
                                } else
@@ -385,7 +385,7 @@ static void wacom_graphire_irq(struct urb *urb, struct pt_regs *regs)
                                id = CURSOR_DEVICE_ID;
                                input_report_key(dev, BTN_LEFT, data[1] & 0x01);
                                input_report_key(dev, BTN_RIGHT, data[1] & 0x02);
-                               if (wacom->features->type == G4)
+                               if (wacom->features->type == WACOM_G4)
                                        input_report_abs(dev, ABS_DISTANCE, data[6]);
                                else
                                        input_report_abs(dev, ABS_DISTANCE, data[7]);
@@ -410,7 +410,7 @@ static void wacom_graphire_irq(struct urb *urb, struct pt_regs *regs)
        input_sync(dev);
 
        /* send pad data */
-       if (wacom->features->type == G4) {
+       if (wacom->features->type == WACOM_G4) {
                /* fist time sending pad data */
                if (wacom->tool[1] != BTN_TOOL_FINGER) {
                        wacom->id[1] = 0;
@@ -713,8 +713,8 @@ static struct wacom_features wacom_features[] = {
        { "Wacom Graphire2 5x7", 8,  13918, 10206,  511, 32, GRAPHIRE,   wacom_graphire_irq },
        { "Wacom Graphire3",     8,  10208,  7424,  511, 32, GRAPHIRE,   wacom_graphire_irq },
        { "Wacom Graphire3 6x8", 8,  16704, 12064,  511, 32, GRAPHIRE,   wacom_graphire_irq },
-       { "Wacom Graphire4 4x5", 8,  10208,  7424,  511, 32, G4,         wacom_graphire_irq },
-       { "Wacom Graphire4 6x8", 8,  16704, 12064,  511, 32, G4,         wacom_graphire_irq },
+       { "Wacom Graphire4 4x5", 8,  10208,  7424,  511, 32, WACOM_G4,   wacom_graphire_irq },
+       { "Wacom Graphire4 6x8", 8,  16704, 12064,  511, 32, WACOM_G4,   wacom_graphire_irq },
        { "Wacom Volito",        8,   5104,  3712,  511, 32, GRAPHIRE,   wacom_graphire_irq },
        { "Wacom PenStation2",   8,   3250,  2320,  255, 32, GRAPHIRE,   wacom_graphire_irq },
        { "Wacom Volito2 4x5",   8,   5104,  3712,  511, 32, GRAPHIRE,   wacom_graphire_irq },
@@ -859,7 +859,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
        input_set_abs_params(input_dev, ABS_PRESSURE, 0, wacom->features->pressure_max, 0, 0);
 
        switch (wacom->features->type) {
-               case G4:
+               case WACOM_G4:
                        input_dev->evbit[0] |= BIT(EV_MSC);
                        input_dev->mscbit[0] |= BIT(MSC_SERIAL);
                        input_dev->keybit[LONG(BTN_DIGI)] |= BIT(BTN_TOOL_FINGER);
index 664139afcfa9a7a28b09de9fac830592be169fd3..e9f9f4bafa17568eecd17b030e014790d792909b 100644 (file)
@@ -37,11 +37,6 @@ static int usb_serial_device_match (struct device *dev, struct device_driver *dr
        return 0;
 }
 
-struct bus_type usb_serial_bus_type = {
-       .name =         "usb-serial",
-       .match =        usb_serial_device_match,
-};
-
 static int usb_serial_device_probe (struct device *dev)
 {
        struct usb_serial_driver *driver;
@@ -109,14 +104,18 @@ exit:
        return retval;
 }
 
+struct bus_type usb_serial_bus_type = {
+       .name =         "usb-serial",
+       .match =        usb_serial_device_match,
+       .probe =        usb_serial_device_probe,
+       .remove =       usb_serial_device_remove,
+};
+
 int usb_serial_bus_register(struct usb_serial_driver *driver)
 {
        int retval;
 
        driver->driver.bus = &usb_serial_bus_type;
-       driver->driver.probe = usb_serial_device_probe;
-       driver->driver.remove = usb_serial_device_remove;
-
        retval = driver_register(&driver->driver);
 
        return retval;
index 9ffff1938239c97bc878160c73f3e890a28d2d36..0eb883f44adaee3bfde6c2879ca63a7e98ae668a 100644 (file)
@@ -43,8 +43,6 @@ static int debug;
 #define PL2303_BUF_SIZE                1024
 #define PL2303_TMP_BUF_SIZE    1024
 
-static DECLARE_MUTEX(pl2303_tmp_buf_sem);
-
 struct pl2303_buf {
        unsigned int    buf_size;
        char            *buf_buf;
index d549e215f3c57fe944c39583baa6718d9554ee1b..2c42a812655a14a977e963a7aeb7bb0898afec01 100644 (file)
@@ -590,6 +590,8 @@ static u_short maxfmode, chipset;
 #define highw(x)       ((u_long)(x)>>16 & 0xffff)
 #define loww(x)                ((u_long)(x) & 0xffff)
 
+#define custom         amiga_custom
+
 #define VBlankOn()     custom.intena = IF_SETCLR|IF_COPER
 #define VBlankOff()    custom.intena = IF_COPER
 
@@ -1164,8 +1166,8 @@ static void ami_update_display(void);
 static void ami_init_display(void);
 static void ami_do_blank(void);
 static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix);
-static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data);
-static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data);
+static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data);
+static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data);
 static int ami_get_cursorstate(struct fb_cursorstate *state);
 static int ami_set_cursorstate(struct fb_cursorstate *state);
 static void ami_set_sprite(void);
@@ -2179,6 +2181,7 @@ static int amifb_ioctl(struct inode *inode, struct file *file,
                struct fb_var_cursorinfo var;
                struct fb_cursorstate state;
        } crsr;
+       void __user *argp = (void __user *)arg;
        int i;
 
        switch (cmd) {
@@ -2186,33 +2189,32 @@ static int amifb_ioctl(struct inode *inode, struct file *file,
                        i = ami_get_fix_cursorinfo(&crsr.fix);
                        if (i)
                                return i;
-                       return copy_to_user((void *)arg, &crsr.fix,
+                       return copy_to_user(argp, &crsr.fix,
                                            sizeof(crsr.fix)) ? -EFAULT : 0;
 
                case FBIOGET_VCURSORINFO:
                        i = ami_get_var_cursorinfo(&crsr.var,
-                               ((struct fb_var_cursorinfo *)arg)->data);
+                               ((struct fb_var_cursorinfo __user *)arg)->data);
                        if (i)
                                return i;
-                       return copy_to_user((void *)arg, &crsr.var,
+                       return copy_to_user(argp, &crsr.var,
                                            sizeof(crsr.var)) ? -EFAULT : 0;
 
                case FBIOPUT_VCURSORINFO:
-                       if (copy_from_user(&crsr.var, (void *)arg,
-                                          sizeof(crsr.var)))
+                       if (copy_from_user(&crsr.var, argp, sizeof(crsr.var)))
                                return -EFAULT;
                        return ami_set_var_cursorinfo(&crsr.var,
-                               ((struct fb_var_cursorinfo *)arg)->data);
+                               ((struct fb_var_cursorinfo __user *)arg)->data);
 
                case FBIOGET_CURSORSTATE:
                        i = ami_get_cursorstate(&crsr.state);
                        if (i)
                                return i;
-                       return copy_to_user((void *)arg, &crsr.state,
+                       return copy_to_user(argp, &crsr.state,
                                            sizeof(crsr.state)) ? -EFAULT : 0;
 
                case FBIOPUT_CURSORSTATE:
-                       if (copy_from_user(&crsr.state, (void *)arg,
+                       if (copy_from_user(&crsr.state, argp,
                                           sizeof(crsr.state)))
                                return -EFAULT;
                        return ami_set_cursorstate(&crsr.state);
@@ -3325,7 +3327,7 @@ static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix)
        return 0;
 }
 
-static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
+static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data)
 {
        struct amifb_par *par = &currentpar;
        register u_short *lspr, *sspr;
@@ -3347,14 +3349,14 @@ static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
        var->yspot = par->crsr.spot_y;
        if (size > var->height*var->width)
                return -ENAMETOOLONG;
-       if (!access_ok(VERIFY_WRITE, (void *)data, size))
+       if (!access_ok(VERIFY_WRITE, data, size))
                return -EFAULT;
        delta = 1<<par->crsr.fmode;
        lspr = lofsprite + (delta<<1);
        if (par->bplcon0 & BPC0_LACE)
                sspr = shfsprite + (delta<<1);
        else
-               sspr = 0;
+               sspr = NULL;
        for (height = (short)var->height-1; height >= 0; height--) {
                bits = 0; words = delta; datawords = 0;
                for (width = (short)var->width-1; width >= 0; width--) {
@@ -3400,7 +3402,7 @@ static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
        return 0;
 }
 
-static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
+static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data)
 {
        struct amifb_par *par = &currentpar;
        register u_short *lspr, *sspr;
@@ -3427,7 +3429,7 @@ static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
                return -EINVAL;
        if (!var->height)
                return -EINVAL;
-       if (!access_ok(VERIFY_READ, (void *)data, var->width*var->height))
+       if (!access_ok(VERIFY_READ, data, var->width*var->height))
                return -EFAULT;
        delta = 1<<fmode;
        lofsprite = shfsprite = (u_short *)spritememory;
@@ -3442,13 +3444,13 @@ static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char *data)
                if (((var->height+2)<<fmode<<2) > SPRITEMEMSIZE)
                        return -EINVAL;
                memset(lspr, 0, (var->height+2)<<fmode<<2);
-               sspr = 0;
+               sspr = NULL;
        }
        for (height = (short)var->height-1; height >= 0; height--) {
                bits = 16; words = delta; datawords = 0;
                for (width = (short)var->width-1; width >= 0; width--) {
                        unsigned long tdata = 0;
-                       get_user(tdata, (char *)data);
+                       get_user(tdata, data);
                        data++;
 #ifdef __mc68000__
                        asm volatile (
index e370125e4fbc8ebe5a1cf192544d902bdda1e2a6..ed81005cbdba51cdf2fd06c318083fa063cb839c 100644 (file)
@@ -3501,7 +3501,7 @@ err_release_mem:
 
 static int __devinit atyfb_atari_probe(void)
 {
-       struct aty_par *par;
+       struct atyfb_par *par;
        struct fb_info *info;
        int m64_num;
        u32 clock_r;
index cfc748e9427260037a075a0ad78ebdc8e90c4560..e6cbd9de944acdaa0ab28f9bc88a76263289fd11 100644 (file)
@@ -609,18 +609,19 @@ void __init macfb_setup(char *options)
        }
 }
 
-void __init macfb_init(void)
+static int __init macfb_init(void)
 {
        int video_cmap_len, video_is_nubus = 0;
        struct nubus_dev* ndev = NULL;
        char *option = NULL;
+       int err;
 
        if (fb_get_options("macfb", &option))
                return -ENODEV;
        macfb_setup(option);
 
        if (!MACH_IS_MAC) 
-               return;
+               return -ENODEV;
 
        /* There can only be one internal video controller anyway so
           we're not too worried about this */
@@ -958,11 +959,11 @@ void __init macfb_init(void)
 
        fb_alloc_cmap(&fb_info.cmap, video_cmap_len, 0);
        
-       if (register_framebuffer(&fb_info) < 0)
-               return;
-
-       printk("fb%d: %s frame buffer device\n",
-              fb_info.node, fb_info.fix.id);
+       err = register_framebuffer(&fb_info);
+       if (!err)
+               printk("fb%d: %s frame buffer device\n",
+                      fb_info.node, fb_info.fix.id);
+       return err;
 }
 
 module_init(macfb_init);
index 1a409c2c320c800fef3935dd54bbf4d717036e15..7aa2d3de6d37d3378a96378c520c2ac5fef276a2 100644 (file)
@@ -45,7 +45,7 @@ proc_bus_zorro_lseek(struct file *file, loff_t off, int whence)
 }
 
 static ssize_t
-proc_bus_zorro_read(struct file *file, char *buf, size_t nbytes, loff_t *ppos)
+proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
 {
        struct inode *ino = file->f_dentry->d_inode;
        struct proc_dir_entry *dp = PDE(ino);
index ccba227676f274f4321163f7df396f8a1ac4e710..fcbee748c59227b2f344ab483be2558d93e749c5 100644 (file)
@@ -77,7 +77,6 @@ int zorro_register_driver(struct zorro_driver *drv)
        /* initialize common driver fields */
        drv->driver.name = drv->name;
        drv->driver.bus = &zorro_bus_type;
-       drv->driver.probe = zorro_device_probe;
 
        /* register with core */
        count = driver_register(&drv->driver);
@@ -132,7 +131,8 @@ static int zorro_bus_match(struct device *dev, struct device_driver *drv)
 
 struct bus_type zorro_bus_type = {
        .name   = "zorro",
-       .match  = zorro_bus_match
+       .match  = zorro_bus_match,
+       .probe  = zorro_device_probe,
 };
 
 
index f0b7256b2f87f3310c54b5aef9fe5bda25a48f93..5dd0207ffd46607f947aadb9b1c7c1d8e5e39890 100644 (file)
 #include <linux/dvb/dmx.h>
 #include <linux/dvb/frontend.h>
 #include <linux/dvb/video.h>
+#include <linux/lp.h>
 
 /* Aiee. Someone does not find a difference between int and long */
 #define EXT2_IOC32_GETFLAGS               _IOR('f', 1, int)
@@ -2735,6 +2736,20 @@ static int do_ncp_setprivatedata(unsigned int fd, unsigned int cmd, unsigned lon
 }
 #endif
 
+static int
+lp_timeout_trans(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+       struct compat_timeval *tc = (struct compat_timeval *)arg;
+       struct timeval *tn = compat_alloc_user_space(sizeof(struct timeval));
+       struct timeval ts;
+       if (get_user(ts.tv_sec, &tc->tv_sec) ||
+           get_user(ts.tv_usec, &tc->tv_usec) ||
+           put_user(ts.tv_sec, &tn->tv_sec) ||
+           put_user(ts.tv_usec, &tn->tv_usec))
+               return -EFAULT;
+       return sys_ioctl(fd, cmd, (unsigned long)tn);
+}
+
 #define HANDLE_IOCTL(cmd,handler) \
        { (cmd), (ioctl_trans_handler_t)(handler) },
 
@@ -2962,6 +2977,20 @@ HANDLE_IOCTL(DMX_GET_EVENT, do_dmx_get_event)
 HANDLE_IOCTL(VIDEO_GET_EVENT, do_video_get_event)
 HANDLE_IOCTL(VIDEO_STILLPICTURE, do_video_stillpicture)
 HANDLE_IOCTL(VIDEO_SET_SPU_PALETTE, do_video_set_spu_palette)
+
+/* parport */
+COMPATIBLE_IOCTL(LPTIME)
+COMPATIBLE_IOCTL(LPCHAR)
+COMPATIBLE_IOCTL(LPABORTOPEN)
+COMPATIBLE_IOCTL(LPCAREFUL)
+COMPATIBLE_IOCTL(LPWAIT)
+COMPATIBLE_IOCTL(LPSETIRQ)
+COMPATIBLE_IOCTL(LPGETSTATUS)
+COMPATIBLE_IOCTL(LPGETSTATUS)
+COMPATIBLE_IOCTL(LPRESET)
+/*LPGETSTATS not implemented, but no kernels seem to compile it in anyways*/
+COMPATIBLE_IOCTL(LPGETFLAGS)
+HANDLE_IOCTL(LPSETTIMEOUT, lp_timeout_trans)
 };
 
 int ioctl_table_size = ARRAY_SIZE(ioctl_start);
index fb117b74809eca5169b8805a1c83437cd5033105..9bdd077d6f55a020f06e43d93a5f7540b214b523 100644 (file)
@@ -81,6 +81,30 @@ void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop
        __proc_device_tree_add_prop(pde, prop);
 }
 
+void proc_device_tree_remove_prop(struct proc_dir_entry *pde,
+                                 struct property *prop)
+{
+       remove_proc_entry(prop->name, pde);
+}
+
+void proc_device_tree_update_prop(struct proc_dir_entry *pde,
+                                 struct property *newprop,
+                                 struct property *oldprop)
+{
+       struct proc_dir_entry *ent;
+
+       for (ent = pde->subdir; ent != NULL; ent = ent->next)
+               if (ent->data == oldprop)
+                       break;
+       if (ent == NULL) {
+               printk(KERN_WARNING "device-tree: property \"%s\" "
+                      " does not exist\n", oldprop->name);
+       } else {
+               ent->data = newprop;
+               ent->size = newprop->length;
+       }
+}
+
 /*
  * Process a node, adding entries for its children and its properties.
  */
index a9f4421ddb6f6e4b93975c72eb234039f48bd91a..3ada9dcf55b8bd86046a95812d6ad918b9a00f2f 100644 (file)
@@ -49,7 +49,7 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
        
        sb = inode->i_sb;
        uspi = UFS_SB(sb)->s_uspi;
-       usb1 = ubh_get_usb_first(USPI_UBH);
+       usb1 = ubh_get_usb_first(uspi);
        
        UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
        
@@ -81,8 +81,9 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
        for (i = bit; i < end_bit; i++) {
                if (ubh_isclr (UCPI_UBH, ucpi->c_freeoff, i))
                        ubh_setbit (UCPI_UBH, ucpi->c_freeoff, i);
-               else ufs_error (sb, "ufs_free_fragments",
-                       "bit already cleared for fragment %u", i);
+               else 
+                       ufs_error (sb, "ufs_free_fragments",
+                                  "bit already cleared for fragment %u", i);
        }
        
        DQUOT_FREE_BLOCK (inode, count);
@@ -143,7 +144,7 @@ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
        
        sb = inode->i_sb;
        uspi = UFS_SB(sb)->s_uspi;
-       usb1 = ubh_get_usb_first(USPI_UBH);
+       usb1 = ubh_get_usb_first(uspi);
 
        UFSD(("ENTER, fragment %u, count %u\n", fragment, count))
        
@@ -247,7 +248,7 @@ unsigned ufs_new_fragments (struct inode * inode, __fs32 * p, unsigned fragment,
        
        sb = inode->i_sb;
        uspi = UFS_SB(sb)->s_uspi;
-       usb1 = ubh_get_usb_first(USPI_UBH);
+       usb1 = ubh_get_usb_first(uspi);
        *err = -ENOSPC;
 
        lock_super (sb);
@@ -407,7 +408,7 @@ ufs_add_fragments (struct inode * inode, unsigned fragment,
        
        sb = inode->i_sb;
        uspi = UFS_SB(sb)->s_uspi;
-       usb1 = ubh_get_usb_first (USPI_UBH);
+       usb1 = ubh_get_usb_first (uspi);
        count = newcount - oldcount;
        
        cgno = ufs_dtog(fragment);
@@ -490,7 +491,7 @@ static unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
 
        sb = inode->i_sb;
        uspi = UFS_SB(sb)->s_uspi;
-       usb1 = ubh_get_usb_first(USPI_UBH);
+       usb1 = ubh_get_usb_first(uspi);
        oldcg = cgno;
        
        /*
@@ -606,7 +607,7 @@ static unsigned ufs_alloccg_block (struct inode * inode,
 
        sb = inode->i_sb;
        uspi = UFS_SB(sb)->s_uspi;
-       usb1 = ubh_get_usb_first(USPI_UBH);
+       usb1 = ubh_get_usb_first(uspi);
        ucg = ubh_get_ucg(UCPI_UBH);
 
        if (goal == 0) {
@@ -663,7 +664,7 @@ static unsigned ufs_bitmap_search (struct super_block * sb,
        UFSD(("ENTER, cg %u, goal %u, count %u\n", ucpi->c_cgx, goal, count))
 
        uspi = UFS_SB(sb)->s_uspi;
-       usb1 = ubh_get_usb_first (USPI_UBH);
+       usb1 = ubh_get_usb_first (uspi);
        ucg = ubh_get_ucg(UCPI_UBH);
 
        if (goal)
index 0938945b9cbc2c628750061a05f772ebeee670e5..c7a47ed4f430d28ab0613b1d8b3986f79e0b528a 100644 (file)
@@ -72,7 +72,7 @@ void ufs_free_inode (struct inode * inode)
 
        sb = inode->i_sb;
        uspi = UFS_SB(sb)->s_uspi;
-       usb1 = ubh_get_usb_first(USPI_UBH);
+       usb1 = ubh_get_usb_first(uspi);
        
        ino = inode->i_ino;
 
@@ -167,7 +167,7 @@ struct inode * ufs_new_inode(struct inode * dir, int mode)
        ufsi = UFS_I(inode);
        sbi = UFS_SB(sb);
        uspi = sbi->s_uspi;
-       usb1 = ubh_get_usb_first(USPI_UBH);
+       usb1 = ubh_get_usb_first(uspi);
 
        lock_super (sb);
 
index 55f4aa16e3fc930259964fabbb0dce8c905fccbb..e0c04e36a0518a42548d4b4aad82743f7572bb82 100644 (file)
@@ -61,7 +61,7 @@ static int ufs_block_to_path(struct inode *inode, sector_t i_block, sector_t off
        int n = 0;
 
 
-       UFSD(("ptrs=uspi->s_apb = %d,double_blocks=%d \n",ptrs,double_blocks));
+       UFSD(("ptrs=uspi->s_apb = %d,double_blocks=%ld \n",ptrs,double_blocks));
        if (i_block < 0) {
                ufs_warning(inode->i_sb, "ufs_block_to_path", "block < 0");
        } else if (i_block < direct_blocks) {
@@ -104,7 +104,7 @@ u64  ufs_frag_map(struct inode *inode, sector_t frag)
        unsigned flags = UFS_SB(sb)->s_flags;
        u64 temp = 0L;
 
-       UFSD((": frag = %lu  depth = %d\n",frag,depth));
+       UFSD((": frag = %llu  depth = %d\n", (unsigned long long)frag, depth));
        UFSD((": uspi->s_fpbshift = %d ,uspi->s_apbmask = %x, mask=%llx\n",uspi->s_fpbshift,uspi->s_apbmask,mask));
 
        if (depth == 0)
@@ -365,9 +365,10 @@ repeat:
                sync_dirty_buffer(bh);
        inode->i_ctime = CURRENT_TIME_SEC;
        mark_inode_dirty(inode);
+       UFSD(("result %u\n", tmp + blockoff));
 out:
        brelse (bh);
-       UFSD(("EXIT, result %u\n", tmp + blockoff))
+       UFSD(("EXIT\n"));
        return result;
 }
 
@@ -386,7 +387,7 @@ static int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buf
        
        if (!create) {
                phys64 = ufs_frag_map(inode, fragment);
-               UFSD(("phys64 = %lu \n",phys64));
+               UFSD(("phys64 = %llu \n",phys64));
                if (phys64)
                        map_bh(bh_result, sb, phys64);
                return 0;
@@ -401,7 +402,7 @@ static int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buf
 
        lock_kernel();
 
-       UFSD(("ENTER, ino %lu, fragment %u\n", inode->i_ino, fragment))
+       UFSD(("ENTER, ino %lu, fragment %llu\n", inode->i_ino, (unsigned long long)fragment))
        if (fragment < 0)
                goto abort_negative;
        if (fragment >
index e9a42c711a9e386df42f329855e4683600eda25d..d4aacee593ffbb9a2daec491518ff9c4bf3dfec0 100644 (file)
@@ -221,7 +221,7 @@ void ufs_error (struct super_block * sb, const char * function,
        va_list args;
 
        uspi = UFS_SB(sb)->s_uspi;
-       usb1 = ubh_get_usb_first(USPI_UBH);
+       usb1 = ubh_get_usb_first(uspi);
        
        if (!(sb->s_flags & MS_RDONLY)) {
                usb1->fs_clean = UFS_FSBAD;
@@ -253,7 +253,7 @@ void ufs_panic (struct super_block * sb, const char * function,
        va_list args;
        
        uspi = UFS_SB(sb)->s_uspi;
-       usb1 = ubh_get_usb_first(USPI_UBH);
+       usb1 = ubh_get_usb_first(uspi);
        
        if (!(sb->s_flags & MS_RDONLY)) {
                usb1->fs_clean = UFS_FSBAD;
@@ -420,21 +420,18 @@ static int ufs_read_cylinder_structures (struct super_block *sb) {
                if (i + uspi->s_fpb > blks)
                        size = (blks - i) * uspi->s_fsize;
 
-               if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
+               if ((flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) 
                        ubh = ubh_bread(sb,
                                fs64_to_cpu(sb, usb->fs_u11.fs_u2.fs_csaddr) + i, size);
-                       if (!ubh)
-                               goto failed;
-                       ubh_ubhcpymem (space, ubh, size);
-                       sbi->s_csp[ufs_fragstoblks(i)]=(struct ufs_csum *)space;
-               }
-               else {
+               else 
                        ubh = ubh_bread(sb, uspi->s_csaddr + i, size);
-                       if (!ubh)
-                               goto failed;
-                       ubh_ubhcpymem(space, ubh, size);
-                       sbi->s_csp[ufs_fragstoblks(i)]=(struct ufs_csum *)space;
-               }
+               
+               if (!ubh)
+                       goto failed;
+
+               ubh_ubhcpymem (space, ubh, size);
+               sbi->s_csp[ufs_fragstoblks(i)]=(struct ufs_csum *)space;
+
                space += size;
                ubh_brelse (ubh);
                ubh = NULL;
@@ -539,6 +536,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
        struct inode *inode;
        unsigned block_size, super_block_size;
        unsigned flags;
+       unsigned super_block_offset;
 
        uspi = NULL;
        ubh = NULL;
@@ -586,10 +584,11 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
        if (!uspi)
                goto failed;
 
+       super_block_offset=UFS_SBLOCK;
+
        /* Keep 2Gig file limit. Some UFS variants need to override 
           this but as I don't know which I'll let those in the know loosen
           the rules */
-          
        switch (sbi->s_mount_opt & UFS_MOUNT_UFSTYPE) {
        case UFS_MOUNT_UFSTYPE_44BSD:
                UFSD(("ufstype=44bsd\n"))
@@ -601,7 +600,8 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent)
                flags |= UFS_DE_44BSD | UFS_UID_44BSD | UFS_ST_44BSD | UFS_CG_44BSD;
                break;
        case UFS_MOUNT_UFSTYPE_UFS2:
-               UFSD(("ufstype=ufs2\n"))
+               UFSD(("ufstype=ufs2\n"));
+               super_block_offset=SBLOCK_UFS2;
                uspi->s_fsize = block_size = 512;
                uspi->s_fmask = ~(512 - 1);
                uspi->s_fshift = 9;
@@ -725,19 +725,16 @@ again:
        /*
         * read ufs super block from device
         */
-       if ( (flags & UFS_TYPE_MASK) == UFS_TYPE_UFS2) {
-               ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + SBLOCK_UFS2/block_size, super_block_size);
-       }
-       else {
-               ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + UFS_SBLOCK/block_size, super_block_size);
-       }
+
+       ubh = ubh_bread_uspi(uspi, sb, uspi->s_sbbase + super_block_offset/block_size, super_block_size);
+       
        if (!ubh) 
             goto failed;
 
        
-       usb1 = ubh_get_usb_first(USPI_UBH);
-       usb2 = ubh_get_usb_second(USPI_UBH);
-       usb3 = ubh_get_usb_third(USPI_UBH);
+       usb1 = ubh_get_usb_first(uspi);
+       usb2 = ubh_get_usb_second(uspi);
+       usb3 = ubh_get_usb_third(uspi);
        usb  = (struct ufs_super_block *)
                ((struct ufs_buffer_head *)uspi)->bh[0]->b_data ;
 
@@ -1006,8 +1003,8 @@ static void ufs_write_super (struct super_block *sb) {
        UFSD(("ENTER\n"))
        flags = UFS_SB(sb)->s_flags;
        uspi = UFS_SB(sb)->s_uspi;
-       usb1 = ubh_get_usb_first(USPI_UBH);
-       usb3 = ubh_get_usb_third(USPI_UBH);
+       usb1 = ubh_get_usb_first(uspi);
+       usb3 = ubh_get_usb_third(uspi);
 
        if (!(sb->s_flags & MS_RDONLY)) {
                usb1->fs_time = cpu_to_fs32(sb, get_seconds());
@@ -1049,8 +1046,8 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data)
        
        uspi = UFS_SB(sb)->s_uspi;
        flags = UFS_SB(sb)->s_flags;
-       usb1 = ubh_get_usb_first(USPI_UBH);
-       usb3 = ubh_get_usb_third(USPI_UBH);
+       usb1 = ubh_get_usb_first(uspi);
+       usb3 = ubh_get_usb_third(uspi);
        
        /*
         * Allow the "check" option to be passed as a remount option.
@@ -1124,7 +1121,7 @@ static int ufs_statfs (struct super_block *sb, struct kstatfs *buf)
        lock_kernel();
 
        uspi = UFS_SB(sb)->s_uspi;
-       usb1 = ubh_get_usb_first (USPI_UBH);
+       usb1 = ubh_get_usb_first (uspi);
        usb  = (struct ufs_super_block *)
                ((struct ufs_buffer_head *)uspi)->bh[0]->b_data ;
        
index b2640076679a655769de7a8a3a8f3a255d61f9b3..48d6d9bcc15770ce05cd8a8c8bb08e3e5f88e26e 100644 (file)
@@ -249,18 +249,28 @@ extern void _ubh_memcpyubh_(struct ufs_sb_private_info *, struct ufs_buffer_head
 
 
 /*
- * macros to get important structures from ufs_buffer_head
+ * macros and inline function to get important structures from ufs_sb_private_info
  */
-#define ubh_get_usb_first(ubh) \
-       ((struct ufs_super_block_first *)((ubh)->bh[0]->b_data))
 
-#define ubh_get_usb_second(ubh) \
-       ((struct ufs_super_block_second *)(ubh)-> \
-       bh[UFS_SECTOR_SIZE >> uspi->s_fshift]->b_data + (UFS_SECTOR_SIZE & ~uspi->s_fmask))
+static inline void *get_usb_offset(struct ufs_sb_private_info *uspi,
+                                  unsigned int offset)
+{
+       unsigned int index;
+       
+       index = offset >> uspi->s_fshift;
+       offset &= ~uspi->s_fmask;
+       return uspi->s_ubh.bh[index]->b_data + offset;
+}
+
+#define ubh_get_usb_first(uspi) \
+       ((struct ufs_super_block_first *)get_usb_offset((uspi), 0))
+
+#define ubh_get_usb_second(uspi) \
+       ((struct ufs_super_block_second *)get_usb_offset((uspi), UFS_SECTOR_SIZE))
+
+#define ubh_get_usb_third(uspi)        \
+       ((struct ufs_super_block_third *)get_usb_offset((uspi), 2*UFS_SECTOR_SIZE))
 
-#define ubh_get_usb_third(ubh) \
-       ((struct ufs_super_block_third *)((ubh)-> \
-       bh[UFS_SECTOR_SIZE*2 >> uspi->s_fshift]->b_data + (UFS_SECTOR_SIZE*2 & ~uspi->s_fmask)))
 
 #define ubh_get_ucg(ubh) \
        ((struct ufs_cylinder_group *)((ubh)->bh[0]->b_data))
index 94d3cdfbf9b8054a502b976fb8c7825837bbf85c..d1db8c17a74e8eb7b5ccd0a0a971808d1960606f 100644 (file)
 #include "xfs_rw.h"
 #include "xfs_iomap.h"
 #include <linux/mpage.h>
+#include <linux/pagevec.h>
 #include <linux/writeback.h>
 
 STATIC void xfs_count_page_state(struct page *, int *, int *, int *);
-STATIC void xfs_convert_page(struct inode *, struct page *, xfs_iomap_t *,
-               struct writeback_control *wbc, void *, int, int);
 
 #if defined(XFS_RW_TRACE)
 void
@@ -55,17 +54,15 @@ xfs_page_trace(
        int             mask)
 {
        xfs_inode_t     *ip;
-       bhv_desc_t      *bdp;
        vnode_t         *vp = LINVFS_GET_VP(inode);
        loff_t          isize = i_size_read(inode);
-       loff_t          offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
+       loff_t          offset = page_offset(page);
        int             delalloc = -1, unmapped = -1, unwritten = -1;
 
        if (page_has_buffers(page))
                xfs_count_page_state(page, &delalloc, &unmapped, &unwritten);
 
-       bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
-       ip = XFS_BHVTOI(bdp);
+       ip = xfs_vtoi(vp);
        if (!ip->i_rwtrace)
                return;
 
@@ -103,15 +100,56 @@ xfs_finish_ioend(
                queue_work(xfsdatad_workqueue, &ioend->io_work);
 }
 
+/*
+ * We're now finished for good with this ioend structure.
+ * Update the page state via the associated buffer_heads,
+ * release holds on the inode and bio, and finally free
+ * up memory.  Do not use the ioend after this.
+ */
 STATIC void
 xfs_destroy_ioend(
        xfs_ioend_t             *ioend)
 {
+       struct buffer_head      *bh, *next;
+
+       for (bh = ioend->io_buffer_head; bh; bh = next) {
+               next = bh->b_private;
+               bh->b_end_io(bh, ioend->io_uptodate);
+       }
+
        vn_iowake(ioend->io_vnode);
        mempool_free(ioend, xfs_ioend_pool);
 }
 
 /*
+ * Buffered IO write completion for delayed allocate extents.
+ * TODO: Update ondisk isize now that we know the file data
+ * has been flushed (i.e. the notorious "NULL file" problem).
+ */
+STATIC void
+xfs_end_bio_delalloc(
+       void                    *data)
+{
+       xfs_ioend_t             *ioend = data;
+
+       xfs_destroy_ioend(ioend);
+}
+
+/*
+ * Buffered IO write completion for regular, written extents.
+ */
+STATIC void
+xfs_end_bio_written(
+       void                    *data)
+{
+       xfs_ioend_t             *ioend = data;
+
+       xfs_destroy_ioend(ioend);
+}
+
+/*
+ * IO write completion for unwritten extents.
+ *
  * Issue transactions to convert a buffer range from unwritten
  * to written extents.
  */
@@ -123,21 +161,10 @@ xfs_end_bio_unwritten(
        vnode_t                 *vp = ioend->io_vnode;
        xfs_off_t               offset = ioend->io_offset;
        size_t                  size = ioend->io_size;
-       struct buffer_head      *bh, *next;
        int                     error;
 
        if (ioend->io_uptodate)
                VOP_BMAP(vp, offset, size, BMAPI_UNWRITTEN, NULL, NULL, error);
-
-       /* ioend->io_buffer_head is only non-NULL for buffered I/O */
-       for (bh = ioend->io_buffer_head; bh; bh = next) {
-               next = bh->b_private;
-
-               bh->b_end_io = NULL;
-               clear_buffer_unwritten(bh);
-               end_buffer_async_write(bh, ioend->io_uptodate);
-       }
-
        xfs_destroy_ioend(ioend);
 }
 
@@ -149,7 +176,8 @@ xfs_end_bio_unwritten(
  */
 STATIC xfs_ioend_t *
 xfs_alloc_ioend(
-       struct inode            *inode)
+       struct inode            *inode,
+       unsigned int            type)
 {
        xfs_ioend_t             *ioend;
 
@@ -162,45 +190,25 @@ xfs_alloc_ioend(
         */
        atomic_set(&ioend->io_remaining, 1);
        ioend->io_uptodate = 1; /* cleared if any I/O fails */
+       ioend->io_list = NULL;
+       ioend->io_type = type;
        ioend->io_vnode = LINVFS_GET_VP(inode);
        ioend->io_buffer_head = NULL;
+       ioend->io_buffer_tail = NULL;
        atomic_inc(&ioend->io_vnode->v_iocount);
        ioend->io_offset = 0;
        ioend->io_size = 0;
 
-       INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+       if (type == IOMAP_UNWRITTEN)
+               INIT_WORK(&ioend->io_work, xfs_end_bio_unwritten, ioend);
+       else if (type == IOMAP_DELAY)
+               INIT_WORK(&ioend->io_work, xfs_end_bio_delalloc, ioend);
+       else
+               INIT_WORK(&ioend->io_work, xfs_end_bio_written, ioend);
 
        return ioend;
 }
 
-void
-linvfs_unwritten_done(
-       struct buffer_head      *bh,
-       int                     uptodate)
-{
-       xfs_ioend_t             *ioend = bh->b_private;
-       static spinlock_t       unwritten_done_lock = SPIN_LOCK_UNLOCKED;
-       unsigned long           flags;
-
-       ASSERT(buffer_unwritten(bh));
-       bh->b_end_io = NULL;
-
-       if (!uptodate)
-               ioend->io_uptodate = 0;
-
-       /*
-        * Deep magic here.  We reuse b_private in the buffer_heads to build
-        * a chain for completing the I/O from user context after we've issued
-        * a transaction to convert the unwritten extent.
-        */
-       spin_lock_irqsave(&unwritten_done_lock, flags);
-       bh->b_private = ioend->io_buffer_head;
-       ioend->io_buffer_head = bh;
-       spin_unlock_irqrestore(&unwritten_done_lock, flags);
-
-       xfs_finish_ioend(ioend);
-}
-
 STATIC int
 xfs_map_blocks(
        struct inode            *inode,
@@ -218,138 +226,260 @@ xfs_map_blocks(
        return -error;
 }
 
+STATIC inline int
+xfs_iomap_valid(
+       xfs_iomap_t             *iomapp,
+       loff_t                  offset)
+{
+       return offset >= iomapp->iomap_offset &&
+               offset < iomapp->iomap_offset + iomapp->iomap_bsize;
+}
+
 /*
- * Finds the corresponding mapping in block @map array of the
- * given @offset within a @page.
+ * BIO completion handler for buffered IO.
  */
-STATIC xfs_iomap_t *
-xfs_offset_to_map(
+STATIC int
+xfs_end_bio(
+       struct bio              *bio,
+       unsigned int            bytes_done,
+       int                     error)
+{
+       xfs_ioend_t             *ioend = bio->bi_private;
+
+       if (bio->bi_size)
+               return 1;
+
+       ASSERT(ioend);
+       ASSERT(atomic_read(&bio->bi_cnt) >= 1);
+
+       /* Toss bio and pass work off to an xfsdatad thread */
+       if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
+               ioend->io_uptodate = 0;
+       bio->bi_private = NULL;
+       bio->bi_end_io = NULL;
+
+       bio_put(bio);
+       xfs_finish_ioend(ioend);
+       return 0;
+}
+
+STATIC void
+xfs_submit_ioend_bio(
+       xfs_ioend_t     *ioend,
+       struct bio      *bio)
+{
+       atomic_inc(&ioend->io_remaining);
+
+       bio->bi_private = ioend;
+       bio->bi_end_io = xfs_end_bio;
+
+       submit_bio(WRITE, bio);
+       ASSERT(!bio_flagged(bio, BIO_EOPNOTSUPP));
+       bio_put(bio);
+}
+
+STATIC struct bio *
+xfs_alloc_ioend_bio(
+       struct buffer_head      *bh)
+{
+       struct bio              *bio;
+       int                     nvecs = bio_get_nr_vecs(bh->b_bdev);
+
+       do {
+               bio = bio_alloc(GFP_NOIO, nvecs);
+               nvecs >>= 1;
+       } while (!bio);
+
+       ASSERT(bio->bi_private == NULL);
+       bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
+       bio->bi_bdev = bh->b_bdev;
+       bio_get(bio);
+       return bio;
+}
+
+STATIC void
+xfs_start_buffer_writeback(
+       struct buffer_head      *bh)
+{
+       ASSERT(buffer_mapped(bh));
+       ASSERT(buffer_locked(bh));
+       ASSERT(!buffer_delay(bh));
+       ASSERT(!buffer_unwritten(bh));
+
+       mark_buffer_async_write(bh);
+       set_buffer_uptodate(bh);
+       clear_buffer_dirty(bh);
+}
+
+STATIC void
+xfs_start_page_writeback(
        struct page             *page,
-       xfs_iomap_t             *iomapp,
-       unsigned long           offset)
+       struct writeback_control *wbc,
+       int                     clear_dirty,
+       int                     buffers)
+{
+       ASSERT(PageLocked(page));
+       ASSERT(!PageWriteback(page));
+       set_page_writeback(page);
+       if (clear_dirty)
+               clear_page_dirty(page);
+       unlock_page(page);
+       if (!buffers) {
+               end_page_writeback(page);
+               wbc->pages_skipped++;   /* We didn't write this page */
+       }
+}
+
+static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
+{
+       return bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));
+}
+
+/*
+ * Submit all of the bios for all of the ioends we have saved up,
+ * covering the initial writepage page and also any probed pages.
+ */
+STATIC void
+xfs_submit_ioend(
+       xfs_ioend_t             *ioend)
+{
+       xfs_ioend_t             *next;
+       struct buffer_head      *bh;
+       struct bio              *bio;
+       sector_t                lastblock = 0;
+
+       do {
+               next = ioend->io_list;
+               bio = NULL;
+
+               for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
+                       xfs_start_buffer_writeback(bh);
+
+                       if (!bio) {
+ retry:
+                               bio = xfs_alloc_ioend_bio(bh);
+                       } else if (bh->b_blocknr != lastblock + 1) {
+                               xfs_submit_ioend_bio(ioend, bio);
+                               goto retry;
+                       }
+
+                       if (bio_add_buffer(bio, bh) != bh->b_size) {
+                               xfs_submit_ioend_bio(ioend, bio);
+                               goto retry;
+                       }
+
+                       lastblock = bh->b_blocknr;
+               }
+               if (bio)
+                       xfs_submit_ioend_bio(ioend, bio);
+               xfs_finish_ioend(ioend);
+       } while ((ioend = next) != NULL);
+}
+
+/*
+ * Cancel submission of all buffer_heads so far in this endio.
+ * Toss the endio too.  Only ever called for the initial page
+ * in a writepage request, so only ever one page.
+ */
+STATIC void
+xfs_cancel_ioend(
+       xfs_ioend_t             *ioend)
+{
+       xfs_ioend_t             *next;
+       struct buffer_head      *bh, *next_bh;
+
+       do {
+               next = ioend->io_list;
+               bh = ioend->io_buffer_head;
+               do {
+                       next_bh = bh->b_private;
+                       clear_buffer_async_write(bh);
+                       unlock_buffer(bh);
+               } while ((bh = next_bh) != NULL);
+
+               vn_iowake(ioend->io_vnode);
+               mempool_free(ioend, xfs_ioend_pool);
+       } while ((ioend = next) != NULL);
+}
+
+/*
+ * Test to see if we've been building up a completion structure for
+ * earlier buffers -- if so, we try to append to this ioend if we
+ * can, otherwise we finish off any current ioend and start another.
+ * Return true if we've finished the given ioend.
+ */
+STATIC void
+xfs_add_to_ioend(
+       struct inode            *inode,
+       struct buffer_head      *bh,
+       xfs_off_t               offset,
+       unsigned int            type,
+       xfs_ioend_t             **result,
+       int                     need_ioend)
 {
-       loff_t                  full_offset;    /* offset from start of file */
+       xfs_ioend_t             *ioend = *result;
 
-       ASSERT(offset < PAGE_CACHE_SIZE);
+       if (!ioend || need_ioend || type != ioend->io_type) {
+               xfs_ioend_t     *previous = *result;
 
-       full_offset = page->index;              /* NB: using 64bit number */
-       full_offset <<= PAGE_CACHE_SHIFT;       /* offset from file start */
-       full_offset += offset;                  /* offset from page start */
+               ioend = xfs_alloc_ioend(inode, type);
+               ioend->io_offset = offset;
+               ioend->io_buffer_head = bh;
+               ioend->io_buffer_tail = bh;
+               if (previous)
+                       previous->io_list = ioend;
+               *result = ioend;
+       } else {
+               ioend->io_buffer_tail->b_private = bh;
+               ioend->io_buffer_tail = bh;
+       }
 
-       if (full_offset < iomapp->iomap_offset)
-               return NULL;
-       if (iomapp->iomap_offset + (iomapp->iomap_bsize -1) >= full_offset)
-               return iomapp;
-       return NULL;
+       bh->b_private = NULL;
+       ioend->io_size += bh->b_size;
 }
 
 STATIC void
 xfs_map_at_offset(
-       struct page             *page,
        struct buffer_head      *bh,
-       unsigned long           offset,
+       loff_t                  offset,
        int                     block_bits,
        xfs_iomap_t             *iomapp)
 {
        xfs_daddr_t             bn;
-       loff_t                  delta;
        int                     sector_shift;
 
        ASSERT(!(iomapp->iomap_flags & IOMAP_HOLE));
        ASSERT(!(iomapp->iomap_flags & IOMAP_DELAY));
        ASSERT(iomapp->iomap_bn != IOMAP_DADDR_NULL);
 
-       delta = page->index;
-       delta <<= PAGE_CACHE_SHIFT;
-       delta += offset;
-       delta -= iomapp->iomap_offset;
-       delta >>= block_bits;
-
        sector_shift = block_bits - BBSHIFT;
-       bn = iomapp->iomap_bn >> sector_shift;
-       bn += delta;
-       BUG_ON(!bn && !(iomapp->iomap_flags & IOMAP_REALTIME));
+       bn = (iomapp->iomap_bn >> sector_shift) +
+             ((offset - iomapp->iomap_offset) >> block_bits);
+
+       ASSERT(bn || (iomapp->iomap_flags & IOMAP_REALTIME));
        ASSERT((bn << sector_shift) >= iomapp->iomap_bn);
 
        lock_buffer(bh);
        bh->b_blocknr = bn;
-       bh->b_bdev = iomapp->iomap_target->pbr_bdev;
+       bh->b_bdev = iomapp->iomap_target->bt_bdev;
        set_buffer_mapped(bh);
        clear_buffer_delay(bh);
+       clear_buffer_unwritten(bh);
 }
 
 /*
- * Look for a page at index which is unlocked and contains our
- * unwritten extent flagged buffers at its head.  Returns page
- * locked and with an extra reference count, and length of the
- * unwritten extent component on this page that we can write,
- * in units of filesystem blocks.
- */
-STATIC struct page *
-xfs_probe_unwritten_page(
-       struct address_space    *mapping,
-       pgoff_t                 index,
-       xfs_iomap_t             *iomapp,
-       xfs_ioend_t             *ioend,
-       unsigned long           max_offset,
-       unsigned long           *fsbs,
-       unsigned int            bbits)
-{
-       struct page             *page;
-
-       page = find_trylock_page(mapping, index);
-       if (!page)
-               return NULL;
-       if (PageWriteback(page))
-               goto out;
-
-       if (page->mapping && page_has_buffers(page)) {
-               struct buffer_head      *bh, *head;
-               unsigned long           p_offset = 0;
-
-               *fsbs = 0;
-               bh = head = page_buffers(page);
-               do {
-                       if (!buffer_unwritten(bh) || !buffer_uptodate(bh))
-                               break;
-                       if (!xfs_offset_to_map(page, iomapp, p_offset))
-                               break;
-                       if (p_offset >= max_offset)
-                               break;
-                       xfs_map_at_offset(page, bh, p_offset, bbits, iomapp);
-                       set_buffer_unwritten_io(bh);
-                       bh->b_private = ioend;
-                       p_offset += bh->b_size;
-                       (*fsbs)++;
-               } while ((bh = bh->b_this_page) != head);
-
-               if (p_offset)
-                       return page;
-       }
-
-out:
-       unlock_page(page);
-       return NULL;
-}
-
-/*
- * Look for a page at index which is unlocked and not mapped
- * yet - clustering for mmap write case.
+ * Look for a page at index that is suitable for clustering.
  */
 STATIC unsigned int
-xfs_probe_unmapped_page(
-       struct address_space    *mapping,
-       pgoff_t                 index,
-       unsigned int            pg_offset)
+xfs_probe_page(
+       struct page             *page,
+       unsigned int            pg_offset,
+       int                     mapped)
 {
-       struct page             *page;
        int                     ret = 0;
 
-       page = find_trylock_page(mapping, index);
-       if (!page)
-               return 0;
        if (PageWriteback(page))
-               goto out;
+               return 0;
 
        if (page->mapping && PageDirty(page)) {
                if (page_has_buffers(page)) {
@@ -357,79 +487,101 @@ xfs_probe_unmapped_page(
 
                        bh = head = page_buffers(page);
                        do {
-                               if (buffer_mapped(bh) || !buffer_uptodate(bh))
+                               if (!buffer_uptodate(bh))
+                                       break;
+                               if (mapped != buffer_mapped(bh))
                                        break;
                                ret += bh->b_size;
                                if (ret >= pg_offset)
                                        break;
                        } while ((bh = bh->b_this_page) != head);
                } else
-                       ret = PAGE_CACHE_SIZE;
+                       ret = mapped ? 0 : PAGE_CACHE_SIZE;
        }
 
-out:
-       unlock_page(page);
        return ret;
 }
 
-STATIC unsigned int
-xfs_probe_unmapped_cluster(
+STATIC size_t
+xfs_probe_cluster(
        struct inode            *inode,
        struct page             *startpage,
        struct buffer_head      *bh,
-       struct buffer_head      *head)
+       struct buffer_head      *head,
+       int                     mapped)
 {
+       struct pagevec          pvec;
        pgoff_t                 tindex, tlast, tloff;
-       unsigned int            pg_offset, len, total = 0;
-       struct address_space    *mapping = inode->i_mapping;
+       size_t                  total = 0;
+       int                     done = 0, i;
 
        /* First sum forwards in this page */
        do {
-               if (buffer_mapped(bh))
-                       break;
+               if (mapped != buffer_mapped(bh))
+                       return total;
                total += bh->b_size;
        } while ((bh = bh->b_this_page) != head);
 
-       /* If we reached the end of the page, sum forwards in
-        * following pages.
-        */
-       if (bh == head) {
-               tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
-               /* Prune this back to avoid pathological behavior */
-               tloff = min(tlast, startpage->index + 64);
-               for (tindex = startpage->index + 1; tindex < tloff; tindex++) {
-                       len = xfs_probe_unmapped_page(mapping, tindex,
-                                                       PAGE_CACHE_SIZE);
-                       if (!len)
-                               return total;
+       /* if we reached the end of the page, sum forwards in following pages */
+       tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+       tindex = startpage->index + 1;
+
+       /* Prune this back to avoid pathological behavior */
+       tloff = min(tlast, startpage->index + 64);
+
+       pagevec_init(&pvec, 0);
+       while (!done && tindex <= tloff) {
+               unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
+
+               if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
+                       break;
+
+               for (i = 0; i < pagevec_count(&pvec); i++) {
+                       struct page *page = pvec.pages[i];
+                       size_t pg_offset, len = 0;
+
+                       if (tindex == tlast) {
+                               pg_offset =
+                                   i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
+                               if (!pg_offset) {
+                                       done = 1;
+                                       break;
+                               }
+                       } else
+                               pg_offset = PAGE_CACHE_SIZE;
+
+                       if (page->index == tindex && !TestSetPageLocked(page)) {
+                               len = xfs_probe_page(page, pg_offset, mapped);
+                               unlock_page(page);
+                       }
+
+                       if (!len) {
+                               done = 1;
+                               break;
+                       }
+
                        total += len;
+                       tindex++;
                }
-               if (tindex == tlast &&
-                   (pg_offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1))) {
-                       total += xfs_probe_unmapped_page(mapping,
-                                                       tindex, pg_offset);
-               }
+
+               pagevec_release(&pvec);
+               cond_resched();
        }
+
        return total;
 }
 
 /*
- * Probe for a given page (index) in the inode and test if it is delayed
- * and without unwritten buffers.  Returns page locked and with an extra
- * reference count.
+ * Test if a given page is suitable for writing as part of an unwritten
+ * or delayed allocate extent.
  */
-STATIC struct page *
-xfs_probe_delalloc_page(
-       struct inode            *inode,
-       pgoff_t                 index)
+STATIC int
+xfs_is_delayed_page(
+       struct page             *page,
+       unsigned int            type)
 {
-       struct page             *page;
-
-       page = find_trylock_page(inode->i_mapping, index);
-       if (!page)
-               return NULL;
        if (PageWriteback(page))
-               goto out;
+               return 0;
 
        if (page->mapping && page_has_buffers(page)) {
                struct buffer_head      *bh, *head;
@@ -437,243 +589,156 @@ xfs_probe_delalloc_page(
 
                bh = head = page_buffers(page);
                do {
-                       if (buffer_unwritten(bh)) {
-                               acceptable = 0;
+                       if (buffer_unwritten(bh))
+                               acceptable = (type == IOMAP_UNWRITTEN);
+                       else if (buffer_delay(bh))
+                               acceptable = (type == IOMAP_DELAY);
+                       else if (buffer_mapped(bh))
+                               acceptable = (type == 0);
+                       else
                                break;
-                       } else if (buffer_delay(bh)) {
-                               acceptable = 1;
-                       }
                } while ((bh = bh->b_this_page) != head);
 
                if (acceptable)
-                       return page;
-       }
-
-out:
-       unlock_page(page);
-       return NULL;
-}
-
-STATIC int
-xfs_map_unwritten(
-       struct inode            *inode,
-       struct page             *start_page,
-       struct buffer_head      *head,
-       struct buffer_head      *curr,
-       unsigned long           p_offset,
-       int                     block_bits,
-       xfs_iomap_t             *iomapp,
-       struct writeback_control *wbc,
-       int                     startio,
-       int                     all_bh)
-{
-       struct buffer_head      *bh = curr;
-       xfs_iomap_t             *tmp;
-       xfs_ioend_t             *ioend;
-       loff_t                  offset;
-       unsigned long           nblocks = 0;
-
-       offset = start_page->index;
-       offset <<= PAGE_CACHE_SHIFT;
-       offset += p_offset;
-
-       ioend = xfs_alloc_ioend(inode);
-
-       /* First map forwards in the page consecutive buffers
-        * covering this unwritten extent
-        */
-       do {
-               if (!buffer_unwritten(bh))
-                       break;
-               tmp = xfs_offset_to_map(start_page, iomapp, p_offset);
-               if (!tmp)
-                       break;
-               xfs_map_at_offset(start_page, bh, p_offset, block_bits, iomapp);
-               set_buffer_unwritten_io(bh);
-               bh->b_private = ioend;
-               p_offset += bh->b_size;
-               nblocks++;
-       } while ((bh = bh->b_this_page) != head);
-
-       atomic_add(nblocks, &ioend->io_remaining);
-
-       /* If we reached the end of the page, map forwards in any
-        * following pages which are also covered by this extent.
-        */
-       if (bh == head) {
-               struct address_space    *mapping = inode->i_mapping;
-               pgoff_t                 tindex, tloff, tlast;
-               unsigned long           bs;
-               unsigned int            pg_offset, bbits = inode->i_blkbits;
-               struct page             *page;
-
-               tlast = i_size_read(inode) >> PAGE_CACHE_SHIFT;
-               tloff = (iomapp->iomap_offset + iomapp->iomap_bsize) >> PAGE_CACHE_SHIFT;
-               tloff = min(tlast, tloff);
-               for (tindex = start_page->index + 1; tindex < tloff; tindex++) {
-                       page = xfs_probe_unwritten_page(mapping,
-                                               tindex, iomapp, ioend,
-                                               PAGE_CACHE_SIZE, &bs, bbits);
-                       if (!page)
-                               break;
-                       nblocks += bs;
-                       atomic_add(bs, &ioend->io_remaining);
-                       xfs_convert_page(inode, page, iomapp, wbc, ioend,
-                                                       startio, all_bh);
-                       /* stop if converting the next page might add
-                        * enough blocks that the corresponding byte
-                        * count won't fit in our ulong page buf length */
-                       if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
-                               goto enough;
-               }
-
-               if (tindex == tlast &&
-                   (pg_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1)))) {
-                       page = xfs_probe_unwritten_page(mapping,
-                                                       tindex, iomapp, ioend,
-                                                       pg_offset, &bs, bbits);
-                       if (page) {
-                               nblocks += bs;
-                               atomic_add(bs, &ioend->io_remaining);
-                               xfs_convert_page(inode, page, iomapp, wbc, ioend,
-                                                       startio, all_bh);
-                               if (nblocks >= ((ULONG_MAX - PAGE_SIZE) >> block_bits))
-                                       goto enough;
-                       }
-               }
+                       return 1;
        }
 
-enough:
-       ioend->io_size = (xfs_off_t)nblocks << block_bits;
-       ioend->io_offset = offset;
-       xfs_finish_ioend(ioend);
        return 0;
 }
 
-STATIC void
-xfs_submit_page(
-       struct page             *page,
-       struct writeback_control *wbc,
-       struct buffer_head      *bh_arr[],
-       int                     bh_count,
-       int                     probed_page,
-       int                     clear_dirty)
-{
-       struct buffer_head      *bh;
-       int                     i;
-
-       BUG_ON(PageWriteback(page));
-       if (bh_count)
-               set_page_writeback(page);
-       if (clear_dirty)
-               clear_page_dirty(page);
-       unlock_page(page);
-
-       if (bh_count) {
-               for (i = 0; i < bh_count; i++) {
-                       bh = bh_arr[i];
-                       mark_buffer_async_write(bh);
-                       if (buffer_unwritten(bh))
-                               set_buffer_unwritten_io(bh);
-                       set_buffer_uptodate(bh);
-                       clear_buffer_dirty(bh);
-               }
-
-               for (i = 0; i < bh_count; i++)
-                       submit_bh(WRITE, bh_arr[i]);
-
-               if (probed_page && clear_dirty)
-                       wbc->nr_to_write--;     /* Wrote an "extra" page */
-       }
-}
-
 /*
  * Allocate & map buffers for page given the extent map. Write it out.
  * except for the original page of a writepage, this is called on
  * delalloc/unwritten pages only, for the original page it is possible
  * that the page has no mapping at all.
  */
-STATIC void
+STATIC int
 xfs_convert_page(
        struct inode            *inode,
        struct page             *page,
-       xfs_iomap_t             *iomapp,
+       loff_t                  tindex,
+       xfs_iomap_t             *mp,
+       xfs_ioend_t             **ioendp,
        struct writeback_control *wbc,
-       void                    *private,
        int                     startio,
        int                     all_bh)
 {
-       struct buffer_head      *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
-       xfs_iomap_t             *mp = iomapp, *tmp;
-       unsigned long           offset, end_offset;
-       int                     index = 0;
+       struct buffer_head      *bh, *head;
+       xfs_off_t               end_offset;
+       unsigned long           p_offset;
+       unsigned int            type;
        int                     bbits = inode->i_blkbits;
        int                     len, page_dirty;
+       int                     count = 0, done = 0, uptodate = 1;
+       xfs_off_t               offset = page_offset(page);
 
-       end_offset = (i_size_read(inode) & (PAGE_CACHE_SIZE - 1));
+       if (page->index != tindex)
+               goto fail;
+       if (TestSetPageLocked(page))
+               goto fail;
+       if (PageWriteback(page))
+               goto fail_unlock_page;
+       if (page->mapping != inode->i_mapping)
+               goto fail_unlock_page;
+       if (!xfs_is_delayed_page(page, (*ioendp)->io_type))
+               goto fail_unlock_page;
 
        /*
         * page_dirty is initially a count of buffers on the page before
         * EOF and is decrememted as we move each into a cleanable state.
+        *
+        * Derivation:
+        *
+        * End offset is the highest offset that this page should represent.
+        * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
+        * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
+        * hence give us the correct page_dirty count. On any other page,
+        * it will be zero and in that case we need page_dirty to be the
+        * count of buffers on the page.
         */
+       end_offset = min_t(unsigned long long,
+                       (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
+                       i_size_read(inode));
+
        len = 1 << inode->i_blkbits;
-       end_offset = max(end_offset, PAGE_CACHE_SIZE);
-       end_offset = roundup(end_offset, len);
-       page_dirty = end_offset / len;
+       p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
+                                       PAGE_CACHE_SIZE);
+       p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
+       page_dirty = p_offset / len;
 
-       offset = 0;
        bh = head = page_buffers(page);
        do {
                if (offset >= end_offset)
                        break;
-               if (!(PageUptodate(page) || buffer_uptodate(bh)))
+               if (!buffer_uptodate(bh))
+                       uptodate = 0;
+               if (!(PageUptodate(page) || buffer_uptodate(bh))) {
+                       done = 1;
                        continue;
-               if (buffer_mapped(bh) && all_bh &&
-                   !(buffer_unwritten(bh) || buffer_delay(bh))) {
+               }
+
+               if (buffer_unwritten(bh) || buffer_delay(bh)) {
+                       if (buffer_unwritten(bh))
+                               type = IOMAP_UNWRITTEN;
+                       else
+                               type = IOMAP_DELAY;
+
+                       if (!xfs_iomap_valid(mp, offset)) {
+                               done = 1;
+                               continue;
+                       }
+
+                       ASSERT(!(mp->iomap_flags & IOMAP_HOLE));
+                       ASSERT(!(mp->iomap_flags & IOMAP_DELAY));
+
+                       xfs_map_at_offset(bh, offset, bbits, mp);
                        if (startio) {
+                               xfs_add_to_ioend(inode, bh, offset,
+                                               type, ioendp, done);
+                       } else {
+                               set_buffer_dirty(bh);
+                               unlock_buffer(bh);
+                               mark_buffer_dirty(bh);
+                       }
+                       page_dirty--;
+                       count++;
+               } else {
+                       type = 0;
+                       if (buffer_mapped(bh) && all_bh && startio) {
                                lock_buffer(bh);
-                               bh_arr[index++] = bh;
+                               xfs_add_to_ioend(inode, bh, offset,
+                                               type, ioendp, done);
+                               count++;
                                page_dirty--;
+                       } else {
+                               done = 1;
                        }
-                       continue;
                }
-               tmp = xfs_offset_to_map(page, mp, offset);
-               if (!tmp)
-                       continue;
-               ASSERT(!(tmp->iomap_flags & IOMAP_HOLE));
-               ASSERT(!(tmp->iomap_flags & IOMAP_DELAY));
+       } while (offset += len, (bh = bh->b_this_page) != head);
 
-               /* If this is a new unwritten extent buffer (i.e. one
-                * that we haven't passed in private data for, we must
-                * now map this buffer too.
-                */
-               if (buffer_unwritten(bh) && !bh->b_end_io) {
-                       ASSERT(tmp->iomap_flags & IOMAP_UNWRITTEN);
-                       xfs_map_unwritten(inode, page, head, bh, offset,
-                                       bbits, tmp, wbc, startio, all_bh);
-               } else if (! (buffer_unwritten(bh) && buffer_locked(bh))) {
-                       xfs_map_at_offset(page, bh, offset, bbits, tmp);
-                       if (buffer_unwritten(bh)) {
-                               set_buffer_unwritten_io(bh);
-                               bh->b_private = private;
-                               ASSERT(private);
+       if (uptodate && bh == head)
+               SetPageUptodate(page);
+
+       if (startio) {
+               if (count) {
+                       struct backing_dev_info *bdi;
+
+                       bdi = inode->i_mapping->backing_dev_info;
+                       if (bdi_write_congested(bdi)) {
+                               wbc->encountered_congestion = 1;
+                               done = 1;
+                       } else if (--wbc->nr_to_write <= 0) {
+                               done = 1;
                        }
                }
-               if (startio) {
-                       bh_arr[index++] = bh;
-               } else {
-                       set_buffer_dirty(bh);
-                       unlock_buffer(bh);
-                       mark_buffer_dirty(bh);
-               }
-               page_dirty--;
-       } while (offset += len, (bh = bh->b_this_page) != head);
-
-       if (startio && index) {
-               xfs_submit_page(page, wbc, bh_arr, index, 1, !page_dirty);
-       } else {
-               unlock_page(page);
+               xfs_start_page_writeback(page, wbc, !page_dirty, count);
        }
+
+       return done;
+ fail_unlock_page:
+       unlock_page(page);
+ fail:
+       return 1;
 }
 
 /*
@@ -685,19 +750,31 @@ xfs_cluster_write(
        struct inode            *inode,
        pgoff_t                 tindex,
        xfs_iomap_t             *iomapp,
+       xfs_ioend_t             **ioendp,
        struct writeback_control *wbc,
        int                     startio,
        int                     all_bh,
        pgoff_t                 tlast)
 {
-       struct page             *page;
+       struct pagevec          pvec;
+       int                     done = 0, i;
 
-       for (; tindex <= tlast; tindex++) {
-               page = xfs_probe_delalloc_page(inode, tindex);
-               if (!page)
+       pagevec_init(&pvec, 0);
+       while (!done && tindex <= tlast) {
+               unsigned len = min_t(pgoff_t, PAGEVEC_SIZE, tlast - tindex + 1);
+
+               if (!pagevec_lookup(&pvec, inode->i_mapping, tindex, len))
                        break;
-               xfs_convert_page(inode, page, iomapp, wbc, NULL,
-                               startio, all_bh);
+
+               for (i = 0; i < pagevec_count(&pvec); i++) {
+                       done = xfs_convert_page(inode, pvec.pages[i], tindex++,
+                                       iomapp, ioendp, wbc, startio, all_bh);
+                       if (done)
+                               break;
+               }
+
+               pagevec_release(&pvec);
+               cond_resched();
        }
 }
 
@@ -728,18 +805,22 @@ xfs_page_state_convert(
        int             startio,
        int             unmapped) /* also implies page uptodate */
 {
-       struct buffer_head      *bh_arr[MAX_BUF_PER_PAGE], *bh, *head;
-       xfs_iomap_t             *iomp, iomap;
+       struct buffer_head      *bh, *head;
+       xfs_iomap_t             iomap;
+       xfs_ioend_t             *ioend = NULL, *iohead = NULL;
        loff_t                  offset;
        unsigned long           p_offset = 0;
+       unsigned int            type;
        __uint64_t              end_offset;
        pgoff_t                 end_index, last_index, tlast;
-       int                     len, err, i, cnt = 0, uptodate = 1;
-       int                     flags;
-       int                     page_dirty;
+       ssize_t                 size, len;
+       int                     flags, err, iomap_valid = 0, uptodate = 1;
+       int                     page_dirty, count = 0, trylock_flag = 0;
+       int                     all_bh = unmapped;
 
        /* wait for other IO threads? */
-       flags = (startio && wbc->sync_mode != WB_SYNC_NONE) ? 0 : BMAPI_TRYLOCK;
+       if (startio && (wbc->sync_mode == WB_SYNC_NONE && wbc->nonblocking))
+               trylock_flag |= BMAPI_TRYLOCK;
 
        /* Is this page beyond the end of the file? */
        offset = i_size_read(inode);
@@ -754,161 +835,173 @@ xfs_page_state_convert(
                }
        }
 
-       end_offset = min_t(unsigned long long,
-                       (loff_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
-       offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
-
        /*
         * page_dirty is initially a count of buffers on the page before
         * EOF and is decrememted as we move each into a cleanable state.
-        */
+        *
+        * Derivation:
+        *
+        * End offset is the highest offset that this page should represent.
+        * If we are on the last page, (end_offset & (PAGE_CACHE_SIZE - 1))
+        * will evaluate non-zero and be less than PAGE_CACHE_SIZE and
+        * hence give us the correct page_dirty count. On any other page,
+        * it will be zero and in that case we need page_dirty to be the
+        * count of buffers on the page.
+        */
+       end_offset = min_t(unsigned long long,
+                       (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, offset);
        len = 1 << inode->i_blkbits;
-       p_offset = max(p_offset, PAGE_CACHE_SIZE);
-       p_offset = roundup(p_offset, len);
+       p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
+                                       PAGE_CACHE_SIZE);
+       p_offset = p_offset ? roundup(p_offset, len) : PAGE_CACHE_SIZE;
        page_dirty = p_offset / len;
 
-       iomp = NULL;
-       p_offset = 0;
        bh = head = page_buffers(page);
+       offset = page_offset(page);
+       flags = -1;
+       type = 0;
+
+       /* TODO: cleanup count and page_dirty */
 
        do {
                if (offset >= end_offset)
                        break;
                if (!buffer_uptodate(bh))
                        uptodate = 0;
-               if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio)
+               if (!(PageUptodate(page) || buffer_uptodate(bh)) && !startio) {
+                       /*
+                        * the iomap is actually still valid, but the ioend
+                        * isn't.  shouldn't happen too often.
+                        */
+                       iomap_valid = 0;
                        continue;
-
-               if (iomp) {
-                       iomp = xfs_offset_to_map(page, &iomap, p_offset);
                }
 
+               if (iomap_valid)
+                       iomap_valid = xfs_iomap_valid(&iomap, offset);
+
                /*
                 * First case, map an unwritten extent and prepare for
                 * extent state conversion transaction on completion.
-                */
-               if (buffer_unwritten(bh)) {
-                       if (!startio)
-                               continue;
-                       if (!iomp) {
-                               err = xfs_map_blocks(inode, offset, len, &iomap,
-                                               BMAPI_WRITE|BMAPI_IGNSTATE);
-                               if (err) {
-                                       goto error;
-                               }
-                               iomp = xfs_offset_to_map(page, &iomap,
-                                                               p_offset);
+                *
+                * Second case, allocate space for a delalloc buffer.
+                * We can return EAGAIN here in the release page case.
+                *
+                * Third case, an unmapped buffer was found, and we are
+                * in a path where we need to write the whole page out.
+                */
+               if (buffer_unwritten(bh) || buffer_delay(bh) ||
+                   ((buffer_uptodate(bh) || PageUptodate(page)) &&
+                    !buffer_mapped(bh) && (unmapped || startio))) {
+                       /*
+                        * Make sure we don't use a read-only iomap
+                        */
+                       if (flags == BMAPI_READ)
+                               iomap_valid = 0;
+
+                       if (buffer_unwritten(bh)) {
+                               type = IOMAP_UNWRITTEN;
+                               flags = BMAPI_WRITE|BMAPI_IGNSTATE;
+                       } else if (buffer_delay(bh)) {
+                               type = IOMAP_DELAY;
+                               flags = BMAPI_ALLOCATE;
+                               if (!startio)
+                                       flags |= trylock_flag;
+                       } else {
+                               type = IOMAP_NEW;
+                               flags = BMAPI_WRITE|BMAPI_MMAP;
                        }
-                       if (iomp) {
-                               if (!bh->b_end_io) {
-                                       err = xfs_map_unwritten(inode, page,
-                                                       head, bh, p_offset,
-                                                       inode->i_blkbits, iomp,
-                                                       wbc, startio, unmapped);
-                                       if (err) {
-                                               goto error;
-                                       }
+
+                       if (!iomap_valid) {
+                               if (type == IOMAP_NEW) {
+                                       size = xfs_probe_cluster(inode,
+                                                       page, bh, head, 0);
                                } else {
-                                       set_bit(BH_Lock, &bh->b_state);
+                                       size = len;
                                }
-                               BUG_ON(!buffer_locked(bh));
-                               bh_arr[cnt++] = bh;
-                               page_dirty--;
-                       }
-               /*
-                * Second case, allocate space for a delalloc buffer.
-                * We can return EAGAIN here in the release page case.
-                */
-               } else if (buffer_delay(bh)) {
-                       if (!iomp) {
-                               err = xfs_map_blocks(inode, offset, len, &iomap,
-                                               BMAPI_ALLOCATE | flags);
-                               if (err) {
+
+                               err = xfs_map_blocks(inode, offset, size,
+                                               &iomap, flags);
+                               if (err)
                                        goto error;
-                               }
-                               iomp = xfs_offset_to_map(page, &iomap,
-                                                               p_offset);
+                               iomap_valid = xfs_iomap_valid(&iomap, offset);
                        }
-                       if (iomp) {
-                               xfs_map_at_offset(page, bh, p_offset,
-                                               inode->i_blkbits, iomp);
+                       if (iomap_valid) {
+                               xfs_map_at_offset(bh, offset,
+                                               inode->i_blkbits, &iomap);
                                if (startio) {
-                                       bh_arr[cnt++] = bh;
+                                       xfs_add_to_ioend(inode, bh, offset,
+                                                       type, &ioend,
+                                                       !iomap_valid);
                                } else {
                                        set_buffer_dirty(bh);
                                        unlock_buffer(bh);
                                        mark_buffer_dirty(bh);
                                }
                                page_dirty--;
+                               count++;
+                       }
+               } else if (buffer_uptodate(bh) && startio) {
+                       /*
+                        * we got here because the buffer is already mapped.
+                        * That means it must already have extents allocated
+                        * underneath it. Map the extent by reading it.
+                        */
+                       if (!iomap_valid || type != 0) {
+                               flags = BMAPI_READ;
+                               size = xfs_probe_cluster(inode, page, bh,
+                                                               head, 1);
+                               err = xfs_map_blocks(inode, offset, size,
+                                               &iomap, flags);
+                               if (err)
+                                       goto error;
+                               iomap_valid = xfs_iomap_valid(&iomap, offset);
                        }
-               } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
-                          (unmapped || startio)) {
 
-                       if (!buffer_mapped(bh)) {
-                               int     size;
-
-                               /*
-                                * Getting here implies an unmapped buffer
-                                * was found, and we are in a path where we
-                                * need to write the whole page out.
-                                */
-                               if (!iomp) {
-                                       size = xfs_probe_unmapped_cluster(
-                                                       inode, page, bh, head);
-                                       err = xfs_map_blocks(inode, offset,
-                                                       size, &iomap,
-                                                       BMAPI_WRITE|BMAPI_MMAP);
-                                       if (err) {
-                                               goto error;
-                                       }
-                                       iomp = xfs_offset_to_map(page, &iomap,
-                                                                    p_offset);
-                               }
-                               if (iomp) {
-                                       xfs_map_at_offset(page,
-                                                       bh, p_offset,
-                                                       inode->i_blkbits, iomp);
-                                       if (startio) {
-                                               bh_arr[cnt++] = bh;
-                                       } else {
-                                               set_buffer_dirty(bh);
-                                               unlock_buffer(bh);
-                                               mark_buffer_dirty(bh);
-                                       }
-                                       page_dirty--;
-                               }
-                       } else if (startio) {
-                               if (buffer_uptodate(bh) &&
-                                   !test_and_set_bit(BH_Lock, &bh->b_state)) {
-                                       bh_arr[cnt++] = bh;
-                                       page_dirty--;
-                               }
+                       type = 0;
+                       if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
+                               ASSERT(buffer_mapped(bh));
+                               if (iomap_valid)
+                                       all_bh = 1;
+                               xfs_add_to_ioend(inode, bh, offset, type,
+                                               &ioend, !iomap_valid);
+                               page_dirty--;
+                               count++;
+                       } else {
+                               iomap_valid = 0;
                        }
+               } else if ((buffer_uptodate(bh) || PageUptodate(page)) &&
+                          (unmapped || startio)) {
+                       iomap_valid = 0;
                }
-       } while (offset += len, p_offset += len,
-               ((bh = bh->b_this_page) != head));
+
+               if (!iohead)
+                       iohead = ioend;
+
+       } while (offset += len, ((bh = bh->b_this_page) != head));
 
        if (uptodate && bh == head)
                SetPageUptodate(page);
 
-       if (startio) {
-               xfs_submit_page(page, wbc, bh_arr, cnt, 0, !page_dirty);
-       }
+       if (startio)
+               xfs_start_page_writeback(page, wbc, 1, count);
 
-       if (iomp) {
-               offset = (iomp->iomap_offset + iomp->iomap_bsize - 1) >>
+       if (ioend && iomap_valid) {
+               offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
                                        PAGE_CACHE_SHIFT;
                tlast = min_t(pgoff_t, offset, last_index);
-               xfs_cluster_write(inode, page->index + 1, iomp, wbc,
-                                       startio, unmapped, tlast);
+               xfs_cluster_write(inode, page->index + 1, &iomap, &ioend,
+                                       wbc, startio, all_bh, tlast);
        }
 
+       if (iohead)
+               xfs_submit_ioend(iohead);
+
        return page_dirty;
 
 error:
-       for (i = 0; i < cnt; i++) {
-               unlock_buffer(bh_arr[i]);
-       }
+       if (iohead)
+               xfs_cancel_ioend(iohead);
 
        /*
         * If it's delalloc and we have nowhere to put it,
@@ -916,9 +1009,8 @@ error:
         * us to try again.
         */
        if (err != -EAGAIN) {
-               if (!unmapped) {
+               if (!unmapped)
                        block_invalidatepage(page, 0);
-               }
                ClearPageUptodate(page);
        }
        return err;
@@ -982,7 +1074,7 @@ __linvfs_get_block(
        }
 
        /* If this is a realtime file, data might be on a new device */
-       bh_result->b_bdev = iomap.iomap_target->pbr_bdev;
+       bh_result->b_bdev = iomap.iomap_target->bt_bdev;
 
        /* If we previously allocated a block out beyond eof and
         * we are now coming back to use it then we will need to
@@ -1094,10 +1186,10 @@ linvfs_direct_IO(
        if (error)
                return -error;
 
-       iocb->private = xfs_alloc_ioend(inode);
+       iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
 
        ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
-               iomap.iomap_target->pbr_bdev,
+               iomap.iomap_target->bt_bdev,
                iov, offset, nr_segs,
                linvfs_get_blocks_direct,
                linvfs_end_io_direct);
index 4720758a9adebe1ec7671cbd53492f07ea7e667a..55339dd5a30dd196b19e99a55a27deed61eac73d 100644 (file)
@@ -23,14 +23,24 @@ extern mempool_t *xfs_ioend_pool;
 
 typedef void (*xfs_ioend_func_t)(void *);
 
+/*
+ * xfs_ioend struct manages large extent writes for XFS.
+ * It can manage several multi-page bio's at once.
+ */
 typedef struct xfs_ioend {
+       struct xfs_ioend        *io_list;       /* next ioend in chain */
+       unsigned int            io_type;        /* delalloc / unwritten */
        unsigned int            io_uptodate;    /* I/O status register */
        atomic_t                io_remaining;   /* hold count */
        struct vnode            *io_vnode;      /* file being written to */
        struct buffer_head      *io_buffer_head;/* buffer linked list head */
+       struct buffer_head      *io_buffer_tail;/* buffer linked list tail */
        size_t                  io_size;        /* size of the extent */
        xfs_off_t               io_offset;      /* offset in the file */
        struct work_struct      io_work;        /* xfsdatad work queue */
 } xfs_ioend_t;
 
+extern struct address_space_operations linvfs_aops;
+extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
+
 #endif /* __XFS_IOPS_H__ */
index 6fe21d2b884705d05b741cec4ee3e2cee1f03857..e44b7c1a3a36d432617edfd66131c30a259359f9 100644 (file)
 #include <linux/kthread.h>
 #include "xfs_linux.h"
 
-STATIC kmem_cache_t *pagebuf_zone;
-STATIC kmem_shaker_t pagebuf_shake;
+STATIC kmem_zone_t *xfs_buf_zone;
+STATIC kmem_shaker_t xfs_buf_shake;
+STATIC int xfsbufd(void *);
 STATIC int xfsbufd_wakeup(int, gfp_t);
-STATIC void pagebuf_delwri_queue(xfs_buf_t *, int);
+STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
 
 STATIC struct workqueue_struct *xfslogd_workqueue;
 struct workqueue_struct *xfsdatad_workqueue;
 
-#ifdef PAGEBUF_TRACE
+#ifdef XFS_BUF_TRACE
 void
-pagebuf_trace(
-       xfs_buf_t       *pb,
+xfs_buf_trace(
+       xfs_buf_t       *bp,
        char            *id,
        void            *data,
        void            *ra)
 {
-       ktrace_enter(pagebuf_trace_buf,
-               pb, id,
-               (void *)(unsigned long)pb->pb_flags,
-               (void *)(unsigned long)pb->pb_hold.counter,
-               (void *)(unsigned long)pb->pb_sema.count.counter,
+       ktrace_enter(xfs_buf_trace_buf,
+               bp, id,
+               (void *)(unsigned long)bp->b_flags,
+               (void *)(unsigned long)bp->b_hold.counter,
+               (void *)(unsigned long)bp->b_sema.count.counter,
                (void *)current,
                data, ra,
-               (void *)(unsigned long)((pb->pb_file_offset>>32) & 0xffffffff),
-               (void *)(unsigned long)(pb->pb_file_offset & 0xffffffff),
-               (void *)(unsigned long)pb->pb_buffer_length,
+               (void *)(unsigned long)((bp->b_file_offset>>32) & 0xffffffff),
+               (void *)(unsigned long)(bp->b_file_offset & 0xffffffff),
+               (void *)(unsigned long)bp->b_buffer_length,
                NULL, NULL, NULL, NULL, NULL);
 }
-ktrace_t *pagebuf_trace_buf;
-#define PAGEBUF_TRACE_SIZE     4096
-#define PB_TRACE(pb, id, data) \
-       pagebuf_trace(pb, id, (void *)data, (void *)__builtin_return_address(0))
+ktrace_t *xfs_buf_trace_buf;
+#define XFS_BUF_TRACE_SIZE     4096
+#define XB_TRACE(bp, id, data) \
+       xfs_buf_trace(bp, id, (void *)data, (void *)__builtin_return_address(0))
 #else
-#define PB_TRACE(pb, id, data) do { } while (0)
+#define XB_TRACE(bp, id, data) do { } while (0)
 #endif
 
-#ifdef PAGEBUF_LOCK_TRACKING
-# define PB_SET_OWNER(pb)      ((pb)->pb_last_holder = current->pid)
-# define PB_CLEAR_OWNER(pb)    ((pb)->pb_last_holder = -1)
-# define PB_GET_OWNER(pb)      ((pb)->pb_last_holder)
+#ifdef XFS_BUF_LOCK_TRACKING
+# define XB_SET_OWNER(bp)      ((bp)->b_last_holder = current->pid)
+# define XB_CLEAR_OWNER(bp)    ((bp)->b_last_holder = -1)
+# define XB_GET_OWNER(bp)      ((bp)->b_last_holder)
 #else
-# define PB_SET_OWNER(pb)      do { } while (0)
-# define PB_CLEAR_OWNER(pb)    do { } while (0)
-# define PB_GET_OWNER(pb)      do { } while (0)
+# define XB_SET_OWNER(bp)      do { } while (0)
+# define XB_CLEAR_OWNER(bp)    do { } while (0)
+# define XB_GET_OWNER(bp)      do { } while (0)
 #endif
 
-#define pb_to_gfp(flags) \
-       ((((flags) & PBF_READ_AHEAD) ? __GFP_NORETRY : \
-         ((flags) & PBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
+#define xb_to_gfp(flags) \
+       ((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
+         ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
 
-#define pb_to_km(flags) \
-        (((flags) & PBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
+#define xb_to_km(flags) \
+        (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
 
-#define pagebuf_allocate(flags) \
-       kmem_zone_alloc(pagebuf_zone, pb_to_km(flags))
-#define pagebuf_deallocate(pb) \
-       kmem_zone_free(pagebuf_zone, (pb));
+#define xfs_buf_allocate(flags) \
+       kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
+#define xfs_buf_deallocate(bp) \
+       kmem_zone_free(xfs_buf_zone, (bp));
 
 /*
- * Page Region interfaces.
+ *     Page Region interfaces.
  *
- * For pages in filesystems where the blocksize is smaller than the
- * pagesize, we use the page->private field (long) to hold a bitmap
- * of uptodate regions within the page.
+ *     For pages in filesystems where the blocksize is smaller than the
+ *     pagesize, we use the page->private field (long) to hold a bitmap
+ *     of uptodate regions within the page.
  *
- * Each such region is "bytes per page / bits per long" bytes long.
+ *     Each such region is "bytes per page / bits per long" bytes long.
  *
- * NBPPR == number-of-bytes-per-page-region
- * BTOPR == bytes-to-page-region (rounded up)
- * BTOPRT == bytes-to-page-region-truncated (rounded down)
+ *     NBPPR == number-of-bytes-per-page-region
+ *     BTOPR == bytes-to-page-region (rounded up)
+ *     BTOPRT == bytes-to-page-region-truncated (rounded down)
  */
 #if (BITS_PER_LONG == 32)
 #define PRSHIFT                (PAGE_CACHE_SHIFT - 5)  /* (32 == 1<<5) */
@@ -159,7 +160,7 @@ test_page_region(
 }
 
 /*
- * Mapping of multi-page buffers into contiguous virtual space
+ *     Mapping of multi-page buffers into contiguous virtual space
  */
 
 typedef struct a_list {
@@ -172,7 +173,7 @@ STATIC int          as_list_len;
 STATIC DEFINE_SPINLOCK(as_lock);
 
 /*
- * Try to batch vunmaps because they are costly.
+ *     Try to batch vunmaps because they are costly.
  */
 STATIC void
 free_address(
@@ -215,83 +216,83 @@ purge_addresses(void)
 }
 
 /*
- *     Internal pagebuf object manipulation
+ *     Internal xfs_buf_t object manipulation
  */
 
 STATIC void
-_pagebuf_initialize(
-       xfs_buf_t               *pb,
+_xfs_buf_initialize(
+       xfs_buf_t               *bp,
        xfs_buftarg_t           *target,
-       loff_t                  range_base,
+       xfs_off_t               range_base,
        size_t                  range_length,
-       page_buf_flags_t        flags)
+       xfs_buf_flags_t         flags)
 {
        /*
-        * We don't want certain flags to appear in pb->pb_flags.
+        * We don't want certain flags to appear in b_flags.
         */
-       flags &= ~(PBF_LOCK|PBF_MAPPED|PBF_DONT_BLOCK|PBF_READ_AHEAD);
-
-       memset(pb, 0, sizeof(xfs_buf_t));
-       atomic_set(&pb->pb_hold, 1);
-       init_MUTEX_LOCKED(&pb->pb_iodonesema);
-       INIT_LIST_HEAD(&pb->pb_list);
-       INIT_LIST_HEAD(&pb->pb_hash_list);
-       init_MUTEX_LOCKED(&pb->pb_sema); /* held, no waiters */
-       PB_SET_OWNER(pb);
-       pb->pb_target = target;
-       pb->pb_file_offset = range_base;
+       flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);
+
+       memset(bp, 0, sizeof(xfs_buf_t));
+       atomic_set(&bp->b_hold, 1);
+       init_MUTEX_LOCKED(&bp->b_iodonesema);
+       INIT_LIST_HEAD(&bp->b_list);
+       INIT_LIST_HEAD(&bp->b_hash_list);
+       init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
+       XB_SET_OWNER(bp);
+       bp->b_target = target;
+       bp->b_file_offset = range_base;
        /*
         * Set buffer_length and count_desired to the same value initially.
         * I/O routines should use count_desired, which will be the same in
         * most cases but may be reset (e.g. XFS recovery).
         */
-       pb->pb_buffer_length = pb->pb_count_desired = range_length;
-       pb->pb_flags = flags;
-       pb->pb_bn = XFS_BUF_DADDR_NULL;
-       atomic_set(&pb->pb_pin_count, 0);
-       init_waitqueue_head(&pb->pb_waiters);
-
-       XFS_STATS_INC(pb_create);
-       PB_TRACE(pb, "initialize", target);
+       bp->b_buffer_length = bp->b_count_desired = range_length;
+       bp->b_flags = flags;
+       bp->b_bn = XFS_BUF_DADDR_NULL;
+       atomic_set(&bp->b_pin_count, 0);
+       init_waitqueue_head(&bp->b_waiters);
+
+       XFS_STATS_INC(xb_create);
+       XB_TRACE(bp, "initialize", target);
 }
 
 /*
- * Allocate a page array capable of holding a specified number
- * of pages, and point the page buf at it.
+ *     Allocate a page array capable of holding a specified number
+ *     of pages, and point the page buf at it.
  */
 STATIC int
-_pagebuf_get_pages(
-       xfs_buf_t               *pb,
+_xfs_buf_get_pages(
+       xfs_buf_t               *bp,
        int                     page_count,
-       page_buf_flags_t        flags)
+       xfs_buf_flags_t         flags)
 {
        /* Make sure that we have a page list */
-       if (pb->pb_pages == NULL) {
-               pb->pb_offset = page_buf_poff(pb->pb_file_offset);
-               pb->pb_page_count = page_count;
-               if (page_count <= PB_PAGES) {
-                       pb->pb_pages = pb->pb_page_array;
+       if (bp->b_pages == NULL) {
+               bp->b_offset = xfs_buf_poff(bp->b_file_offset);
+               bp->b_page_count = page_count;
+               if (page_count <= XB_PAGES) {
+                       bp->b_pages = bp->b_page_array;
                } else {
-                       pb->pb_pages = kmem_alloc(sizeof(struct page *) *
-                                       page_count, pb_to_km(flags));
-                       if (pb->pb_pages == NULL)
+                       bp->b_pages = kmem_alloc(sizeof(struct page *) *
+                                       page_count, xb_to_km(flags));
+                       if (bp->b_pages == NULL)
                                return -ENOMEM;
                }
-               memset(pb->pb_pages, 0, sizeof(struct page *) * page_count);
+               memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
        }
        return 0;
 }
 
 /*
- *     Frees pb_pages if it was malloced.
+ *     Frees b_pages if it was allocated.
  */
 STATIC void
-_pagebuf_free_pages(
+_xfs_buf_free_pages(
        xfs_buf_t       *bp)
 {
-       if (bp->pb_pages != bp->pb_page_array) {
-               kmem_free(bp->pb_pages,
-                         bp->pb_page_count * sizeof(struct page *));
+       if (bp->b_pages != bp->b_page_array) {
+               kmem_free(bp->b_pages,
+                         bp->b_page_count * sizeof(struct page *));
        }
 }
 
@@ -299,79 +300,79 @@ _pagebuf_free_pages(
  *     Releases the specified buffer.
  *
  *     The modification state of any associated pages is left unchanged.
- *     The buffer most not be on any hash - use pagebuf_rele instead for
+ *     The buffer most not be on any hash - use xfs_buf_rele instead for
  *     hashed and refcounted buffers
  */
 void
-pagebuf_free(
+xfs_buf_free(
        xfs_buf_t               *bp)
 {
-       PB_TRACE(bp, "free", 0);
+       XB_TRACE(bp, "free", 0);
 
-       ASSERT(list_empty(&bp->pb_hash_list));
+       ASSERT(list_empty(&bp->b_hash_list));
 
-       if (bp->pb_flags & _PBF_PAGE_CACHE) {
+       if (bp->b_flags & _XBF_PAGE_CACHE) {
                uint            i;
 
-               if ((bp->pb_flags & PBF_MAPPED) && (bp->pb_page_count > 1))
-                       free_address(bp->pb_addr - bp->pb_offset);
+               if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
+                       free_address(bp->b_addr - bp->b_offset);
 
-               for (i = 0; i < bp->pb_page_count; i++)
-                       page_cache_release(bp->pb_pages[i]);
-               _pagebuf_free_pages(bp);
-       } else if (bp->pb_flags & _PBF_KMEM_ALLOC) {
+               for (i = 0; i < bp->b_page_count; i++)
+                       page_cache_release(bp->b_pages[i]);
+               _xfs_buf_free_pages(bp);
+       } else if (bp->b_flags & _XBF_KMEM_ALLOC) {
                 /*
-                 * XXX(hch): bp->pb_count_desired might be incorrect (see
-                 * pagebuf_associate_memory for details), but fortunately
+                 * XXX(hch): bp->b_count_desired might be incorrect (see
+                 * xfs_buf_associate_memory for details), but fortunately
                  * the Linux version of kmem_free ignores the len argument..
                  */
-               kmem_free(bp->pb_addr, bp->pb_count_desired);
-               _pagebuf_free_pages(bp);
+               kmem_free(bp->b_addr, bp->b_count_desired);
+               _xfs_buf_free_pages(bp);
        }
 
-       pagebuf_deallocate(bp);
+       xfs_buf_deallocate(bp);
 }
 
 /*
  *     Finds all pages for buffer in question and builds it's page list.
  */
 STATIC int
-_pagebuf_lookup_pages(
+_xfs_buf_lookup_pages(
        xfs_buf_t               *bp,
        uint                    flags)
 {
-       struct address_space    *mapping = bp->pb_target->pbr_mapping;
-       size_t                  blocksize = bp->pb_target->pbr_bsize;
-       size_t                  size = bp->pb_count_desired;
+       struct address_space    *mapping = bp->b_target->bt_mapping;
+       size_t                  blocksize = bp->b_target->bt_bsize;
+       size_t                  size = bp->b_count_desired;
        size_t                  nbytes, offset;
-       gfp_t                   gfp_mask = pb_to_gfp(flags);
+       gfp_t                   gfp_mask = xb_to_gfp(flags);
        unsigned short          page_count, i;
        pgoff_t                 first;
-       loff_t                  end;
+       xfs_off_t               end;
        int                     error;
 
-       end = bp->pb_file_offset + bp->pb_buffer_length;
-       page_count = page_buf_btoc(end) - page_buf_btoct(bp->pb_file_offset);
+       end = bp->b_file_offset + bp->b_buffer_length;
+       page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
 
-       error = _pagebuf_get_pages(bp, page_count, flags);
+       error = _xfs_buf_get_pages(bp, page_count, flags);
        if (unlikely(error))
                return error;
-       bp->pb_flags |= _PBF_PAGE_CACHE;
+       bp->b_flags |= _XBF_PAGE_CACHE;
 
-       offset = bp->pb_offset;
-       first = bp->pb_file_offset >> PAGE_CACHE_SHIFT;
+       offset = bp->b_offset;
+       first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
 
-       for (i = 0; i < bp->pb_page_count; i++) {
+       for (i = 0; i < bp->b_page_count; i++) {
                struct page     *page;
                uint            retries = 0;
 
              retry:
                page = find_or_create_page(mapping, first + i, gfp_mask);
                if (unlikely(page == NULL)) {
-                       if (flags & PBF_READ_AHEAD) {
-                               bp->pb_page_count = i;
-                               for (i = 0; i < bp->pb_page_count; i++)
-                                       unlock_page(bp->pb_pages[i]);
+                       if (flags & XBF_READ_AHEAD) {
+                               bp->b_page_count = i;
+                               for (i = 0; i < bp->b_page_count; i++)
+                                       unlock_page(bp->b_pages[i]);
                                return -ENOMEM;
                        }
 
@@ -387,13 +388,13 @@ _pagebuf_lookup_pages(
                                        "deadlock in %s (mode:0x%x)\n",
                                        __FUNCTION__, gfp_mask);
 
-                       XFS_STATS_INC(pb_page_retries);
+                       XFS_STATS_INC(xb_page_retries);
                        xfsbufd_wakeup(0, gfp_mask);
                        blk_congestion_wait(WRITE, HZ/50);
                        goto retry;
                }
 
-               XFS_STATS_INC(pb_page_found);
+               XFS_STATS_INC(xb_page_found);
 
                nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
                size -= nbytes;
@@ -401,27 +402,27 @@ _pagebuf_lookup_pages(
                if (!PageUptodate(page)) {
                        page_count--;
                        if (blocksize >= PAGE_CACHE_SIZE) {
-                               if (flags & PBF_READ)
-                                       bp->pb_locked = 1;
+                               if (flags & XBF_READ)
+                                       bp->b_locked = 1;
                        } else if (!PagePrivate(page)) {
                                if (test_page_region(page, offset, nbytes))
                                        page_count++;
                        }
                }
 
-               bp->pb_pages[i] = page;
+               bp->b_pages[i] = page;
                offset = 0;
        }
 
-       if (!bp->pb_locked) {
-               for (i = 0; i < bp->pb_page_count; i++)
-                       unlock_page(bp->pb_pages[i]);
+       if (!bp->b_locked) {
+               for (i = 0; i < bp->b_page_count; i++)
+                       unlock_page(bp->b_pages[i]);
        }
 
-       if (page_count == bp->pb_page_count)
-               bp->pb_flags |= PBF_DONE;
+       if (page_count == bp->b_page_count)
+               bp->b_flags |= XBF_DONE;
 
-       PB_TRACE(bp, "lookup_pages", (long)page_count);
+       XB_TRACE(bp, "lookup_pages", (long)page_count);
        return error;
 }
 
@@ -429,23 +430,23 @@ _pagebuf_lookup_pages(
  *     Map buffer into kernel address-space if nessecary.
  */
 STATIC int
-_pagebuf_map_pages(
+_xfs_buf_map_pages(
        xfs_buf_t               *bp,
        uint                    flags)
 {
        /* A single page buffer is always mappable */
-       if (bp->pb_page_count == 1) {
-               bp->pb_addr = page_address(bp->pb_pages[0]) + bp->pb_offset;
-               bp->pb_flags |= PBF_MAPPED;
-       } else if (flags & PBF_MAPPED) {
+       if (bp->b_page_count == 1) {
+               bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
+               bp->b_flags |= XBF_MAPPED;
+       } else if (flags & XBF_MAPPED) {
                if (as_list_len > 64)
                        purge_addresses();
-               bp->pb_addr = vmap(bp->pb_pages, bp->pb_page_count,
-                               VM_MAP, PAGE_KERNEL);
-               if (unlikely(bp->pb_addr == NULL))
+               bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
+                                       VM_MAP, PAGE_KERNEL);
+               if (unlikely(bp->b_addr == NULL))
                        return -ENOMEM;
-               bp->pb_addr += bp->pb_offset;
-               bp->pb_flags |= PBF_MAPPED;
+               bp->b_addr += bp->b_offset;
+               bp->b_flags |= XBF_MAPPED;
        }
 
        return 0;
@@ -456,9 +457,7 @@ _pagebuf_map_pages(
  */
 
 /*
- *     _pagebuf_find
- *
- *     Looks up, and creates if absent, a lockable buffer for
+ *     Look up, and creates if absent, a lockable buffer for
  *     a given range of an inode.  The buffer is returned
  *     locked.  If other overlapping buffers exist, they are
  *     released before the new buffer is created and locked,
@@ -466,55 +465,55 @@ _pagebuf_map_pages(
  *     are unlocked.  No I/O is implied by this call.
  */
 xfs_buf_t *
-_pagebuf_find(
+_xfs_buf_find(
        xfs_buftarg_t           *btp,   /* block device target          */
-       loff_t                  ioff,   /* starting offset of range     */
+       xfs_off_t               ioff,   /* starting offset of range     */
        size_t                  isize,  /* length of range              */
-       page_buf_flags_t        flags,  /* PBF_TRYLOCK                  */
-       xfs_buf_t               *new_pb)/* newly allocated buffer       */
+       xfs_buf_flags_t         flags,
+       xfs_buf_t               *new_bp)
 {
-       loff_t                  range_base;
+       xfs_off_t               range_base;
        size_t                  range_length;
        xfs_bufhash_t           *hash;
-       xfs_buf_t               *pb, *n;
+       xfs_buf_t               *bp, *n;
 
        range_base = (ioff << BBSHIFT);
        range_length = (isize << BBSHIFT);
 
        /* Check for IOs smaller than the sector size / not sector aligned */
-       ASSERT(!(range_length < (1 << btp->pbr_sshift)));
-       ASSERT(!(range_base & (loff_t)btp->pbr_smask));
+       ASSERT(!(range_length < (1 << btp->bt_sshift)));
+       ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
 
        hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];
 
        spin_lock(&hash->bh_lock);
 
-       list_for_each_entry_safe(pb, n, &hash->bh_list, pb_hash_list) {
-               ASSERT(btp == pb->pb_target);
-               if (pb->pb_file_offset == range_base &&
-                   pb->pb_buffer_length == range_length) {
+       list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
+               ASSERT(btp == bp->b_target);
+               if (bp->b_file_offset == range_base &&
+                   bp->b_buffer_length == range_length) {
                        /*
-                        * If we look at something bring it to the
+                        * If we look at something, bring it to the
                         * front of the list for next time.
                         */
-                       atomic_inc(&pb->pb_hold);
-                       list_move(&pb->pb_hash_list, &hash->bh_list);
+                       atomic_inc(&bp->b_hold);
+                       list_move(&bp->b_hash_list, &hash->bh_list);
                        goto found;
                }
        }
 
        /* No match found */
-       if (new_pb) {
-               _pagebuf_initialize(new_pb, btp, range_base,
+       if (new_bp) {
+               _xfs_buf_initialize(new_bp, btp, range_base,
                                range_length, flags);
-               new_pb->pb_hash = hash;
-               list_add(&new_pb->pb_hash_list, &hash->bh_list);
+               new_bp->b_hash = hash;
+               list_add(&new_bp->b_hash_list, &hash->bh_list);
        } else {
-               XFS_STATS_INC(pb_miss_locked);
+               XFS_STATS_INC(xb_miss_locked);
        }
 
        spin_unlock(&hash->bh_lock);
-       return new_pb;
+       return new_bp;
 
 found:
        spin_unlock(&hash->bh_lock);
@@ -523,74 +522,72 @@ found:
         * if this does not work then we need to drop the
         * spinlock and do a hard attempt on the semaphore.
         */
-       if (down_trylock(&pb->pb_sema)) {
-               if (!(flags & PBF_TRYLOCK)) {
+       if (down_trylock(&bp->b_sema)) {
+               if (!(flags & XBF_TRYLOCK)) {
                        /* wait for buffer ownership */
-                       PB_TRACE(pb, "get_lock", 0);
-                       pagebuf_lock(pb);
-                       XFS_STATS_INC(pb_get_locked_waited);
+                       XB_TRACE(bp, "get_lock", 0);
+                       xfs_buf_lock(bp);
+                       XFS_STATS_INC(xb_get_locked_waited);
                } else {
                        /* We asked for a trylock and failed, no need
                         * to look at file offset and length here, we
-                        * know that this pagebuf at least overlaps our
-                        * pagebuf and is locked, therefore our buffer
-                        * either does not exist, or is this buffer
+                        * know that this buffer at least overlaps our
+                        * buffer and is locked, therefore our buffer
+                        * either does not exist, or is this buffer.
                         */
-
-                       pagebuf_rele(pb);
-                       XFS_STATS_INC(pb_busy_locked);
-                       return (NULL);
+                       xfs_buf_rele(bp);
+                       XFS_STATS_INC(xb_busy_locked);
+                       return NULL;
                }
        } else {
                /* trylock worked */
-               PB_SET_OWNER(pb);
+               XB_SET_OWNER(bp);
        }
 
-       if (pb->pb_flags & PBF_STALE) {
-               ASSERT((pb->pb_flags & _PBF_DELWRI_Q) == 0);
-               pb->pb_flags &= PBF_MAPPED;
+       if (bp->b_flags & XBF_STALE) {
+               ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
+               bp->b_flags &= XBF_MAPPED;
        }
-       PB_TRACE(pb, "got_lock", 0);
-       XFS_STATS_INC(pb_get_locked);
-       return (pb);
+       XB_TRACE(bp, "got_lock", 0);
+       XFS_STATS_INC(xb_get_locked);
+       return bp;
 }
 
 /*
- *     xfs_buf_get_flags assembles a buffer covering the specified range.
- *
+ *     Assembles a buffer covering the specified range.
  *     Storage in memory for all portions of the buffer will be allocated,
  *     although backing storage may not be.
  */
 xfs_buf_t *
-xfs_buf_get_flags(                     /* allocate a buffer            */
+xfs_buf_get_flags(
        xfs_buftarg_t           *target,/* target for buffer            */
-       loff_t                  ioff,   /* starting offset of range     */
+       xfs_off_t               ioff,   /* starting offset of range     */
        size_t                  isize,  /* length of range              */
-       page_buf_flags_t        flags)  /* PBF_TRYLOCK                  */
+       xfs_buf_flags_t         flags)
 {
-       xfs_buf_t               *pb, *new_pb;
+       xfs_buf_t               *bp, *new_bp;
        int                     error = 0, i;
 
-       new_pb = pagebuf_allocate(flags);
-       if (unlikely(!new_pb))
+       new_bp = xfs_buf_allocate(flags);
+       if (unlikely(!new_bp))
                return NULL;
 
-       pb = _pagebuf_find(target, ioff, isize, flags, new_pb);
-       if (pb == new_pb) {
-               error = _pagebuf_lookup_pages(pb, flags);
+       bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
+       if (bp == new_bp) {
+               error = _xfs_buf_lookup_pages(bp, flags);
                if (error)
                        goto no_buffer;
        } else {
-               pagebuf_deallocate(new_pb);
-               if (unlikely(pb == NULL))
+               xfs_buf_deallocate(new_bp);
+               if (unlikely(bp == NULL))
                        return NULL;
        }
 
-       for (i = 0; i < pb->pb_page_count; i++)
-               mark_page_accessed(pb->pb_pages[i]);
+       for (i = 0; i < bp->b_page_count; i++)
+               mark_page_accessed(bp->b_pages[i]);
 
-       if (!(pb->pb_flags & PBF_MAPPED)) {
-               error = _pagebuf_map_pages(pb, flags);
+       if (!(bp->b_flags & XBF_MAPPED)) {
+               error = _xfs_buf_map_pages(bp, flags);
                if (unlikely(error)) {
                        printk(KERN_WARNING "%s: failed to map pages\n",
                                        __FUNCTION__);
@@ -598,97 +595,97 @@ xfs_buf_get_flags(                        /* allocate a buffer            */
                }
        }
 
-       XFS_STATS_INC(pb_get);
+       XFS_STATS_INC(xb_get);
 
        /*
         * Always fill in the block number now, the mapped cases can do
         * their own overlay of this later.
         */
-       pb->pb_bn = ioff;
-       pb->pb_count_desired = pb->pb_buffer_length;
+       bp->b_bn = ioff;
+       bp->b_count_desired = bp->b_buffer_length;
 
-       PB_TRACE(pb, "get", (unsigned long)flags);
-       return pb;
+       XB_TRACE(bp, "get", (unsigned long)flags);
+       return bp;
 
  no_buffer:
-       if (flags & (PBF_LOCK | PBF_TRYLOCK))
-               pagebuf_unlock(pb);
-       pagebuf_rele(pb);
+       if (flags & (XBF_LOCK | XBF_TRYLOCK))
+               xfs_buf_unlock(bp);
+       xfs_buf_rele(bp);
        return NULL;
 }
 
 xfs_buf_t *
 xfs_buf_read_flags(
        xfs_buftarg_t           *target,
-       loff_t                  ioff,
+       xfs_off_t               ioff,
        size_t                  isize,
-       page_buf_flags_t        flags)
+       xfs_buf_flags_t         flags)
 {
-       xfs_buf_t               *pb;
-
-       flags |= PBF_READ;
-
-       pb = xfs_buf_get_flags(target, ioff, isize, flags);
-       if (pb) {
-               if (!XFS_BUF_ISDONE(pb)) {
-                       PB_TRACE(pb, "read", (unsigned long)flags);
-                       XFS_STATS_INC(pb_get_read);
-                       pagebuf_iostart(pb, flags);
-               } else if (flags & PBF_ASYNC) {
-                       PB_TRACE(pb, "read_async", (unsigned long)flags);
+       xfs_buf_t               *bp;
+
+       flags |= XBF_READ;
+
+       bp = xfs_buf_get_flags(target, ioff, isize, flags);
+       if (bp) {
+               if (!XFS_BUF_ISDONE(bp)) {
+                       XB_TRACE(bp, "read", (unsigned long)flags);
+                       XFS_STATS_INC(xb_get_read);
+                       xfs_buf_iostart(bp, flags);
+               } else if (flags & XBF_ASYNC) {
+                       XB_TRACE(bp, "read_async", (unsigned long)flags);
                        /*
                         * Read ahead call which is already satisfied,
                         * drop the buffer
                         */
                        goto no_buffer;
                } else {
-                       PB_TRACE(pb, "read_done", (unsigned long)flags);
+                       XB_TRACE(bp, "read_done", (unsigned long)flags);
                        /* We do not want read in the flags */
-                       pb->pb_flags &= ~PBF_READ;
+                       bp->b_flags &= ~XBF_READ;
                }
        }
 
-       return pb;
+       return bp;
 
  no_buffer:
-       if (flags & (PBF_LOCK | PBF_TRYLOCK))
-               pagebuf_unlock(pb);
-       pagebuf_rele(pb);
+       if (flags & (XBF_LOCK | XBF_TRYLOCK))
+               xfs_buf_unlock(bp);
+       xfs_buf_rele(bp);
        return NULL;
 }
 
 /*
- * If we are not low on memory then do the readahead in a deadlock
- * safe manner.
+ *     If we are not low on memory then do the readahead in a deadlock
+ *     safe manner.
  */
 void
-pagebuf_readahead(
+xfs_buf_readahead(
        xfs_buftarg_t           *target,
-       loff_t                  ioff,
+       xfs_off_t               ioff,
        size_t                  isize,
-       page_buf_flags_t        flags)
+       xfs_buf_flags_t         flags)
 {
        struct backing_dev_info *bdi;
 
-       bdi = target->pbr_mapping->backing_dev_info;
+       bdi = target->bt_mapping->backing_dev_info;
        if (bdi_read_congested(bdi))
                return;
 
-       flags |= (PBF_TRYLOCK|PBF_ASYNC|PBF_READ_AHEAD);
+       flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
        xfs_buf_read_flags(target, ioff, isize, flags);
 }
 
 xfs_buf_t *
-pagebuf_get_empty(
+xfs_buf_get_empty(
        size_t                  len,
        xfs_buftarg_t           *target)
 {
-       xfs_buf_t               *pb;
+       xfs_buf_t               *bp;
 
-       pb = pagebuf_allocate(0);
-       if (pb)
-               _pagebuf_initialize(pb, target, 0, len, 0);
-       return pb;
+       bp = xfs_buf_allocate(0);
+       if (bp)
+               _xfs_buf_initialize(bp, target, 0, len, 0);
+       return bp;
 }
 
 static inline struct page *
@@ -704,8 +701,8 @@ mem_to_page(
 }
 
 int
-pagebuf_associate_memory(
-       xfs_buf_t               *pb,
+xfs_buf_associate_memory(
+       xfs_buf_t               *bp,
        void                    *mem,
        size_t                  len)
 {
@@ -722,40 +719,40 @@ pagebuf_associate_memory(
                page_count++;
 
        /* Free any previous set of page pointers */
-       if (pb->pb_pages)
-               _pagebuf_free_pages(pb);
+       if (bp->b_pages)
+               _xfs_buf_free_pages(bp);
 
-       pb->pb_pages = NULL;
-       pb->pb_addr = mem;
+       bp->b_pages = NULL;
+       bp->b_addr = mem;
 
-       rval = _pagebuf_get_pages(pb, page_count, 0);
+       rval = _xfs_buf_get_pages(bp, page_count, 0);
        if (rval)
                return rval;
 
-       pb->pb_offset = offset;
+       bp->b_offset = offset;
        ptr = (size_t) mem & PAGE_CACHE_MASK;
        end = PAGE_CACHE_ALIGN((size_t) mem + len);
        end_cur = end;
        /* set up first page */
-       pb->pb_pages[0] = mem_to_page(mem);
+       bp->b_pages[0] = mem_to_page(mem);
 
        ptr += PAGE_CACHE_SIZE;
-       pb->pb_page_count = ++i;
+       bp->b_page_count = ++i;
        while (ptr < end) {
-               pb->pb_pages[i] = mem_to_page((void *)ptr);
-               pb->pb_page_count = ++i;
+               bp->b_pages[i] = mem_to_page((void *)ptr);
+               bp->b_page_count = ++i;
                ptr += PAGE_CACHE_SIZE;
        }
-       pb->pb_locked = 0;
+       bp->b_locked = 0;
 
-       pb->pb_count_desired = pb->pb_buffer_length = len;
-       pb->pb_flags |= PBF_MAPPED;
+       bp->b_count_desired = bp->b_buffer_length = len;
+       bp->b_flags |= XBF_MAPPED;
 
        return 0;
 }
 
 xfs_buf_t *
-pagebuf_get_no_daddr(
+xfs_buf_get_noaddr(
        size_t                  len,
        xfs_buftarg_t           *target)
 {
@@ -764,10 +761,10 @@ pagebuf_get_no_daddr(
        void                    *data;
        int                     error;
 
-       bp = pagebuf_allocate(0);
+       bp = xfs_buf_allocate(0);
        if (unlikely(bp == NULL))
                goto fail;
-       _pagebuf_initialize(bp, target, 0, len, 0);
+       _xfs_buf_initialize(bp, target, 0, len, 0);
 
  try_again:
        data = kmem_alloc(malloc_len, KM_SLEEP | KM_MAYFAIL);
@@ -776,78 +773,73 @@ pagebuf_get_no_daddr(
 
        /* check whether alignment matches.. */
        if ((__psunsigned_t)data !=
-           ((__psunsigned_t)data & ~target->pbr_smask)) {
+           ((__psunsigned_t)data & ~target->bt_smask)) {
                /* .. else double the size and try again */
                kmem_free(data, malloc_len);
                malloc_len <<= 1;
                goto try_again;
        }
 
-       error = pagebuf_associate_memory(bp, data, len);
+       error = xfs_buf_associate_memory(bp, data, len);
        if (error)
                goto fail_free_mem;
-       bp->pb_flags |= _PBF_KMEM_ALLOC;
+       bp->b_flags |= _XBF_KMEM_ALLOC;
 
-       pagebuf_unlock(bp);
+       xfs_buf_unlock(bp);
 
-       PB_TRACE(bp, "no_daddr", data);
+       XB_TRACE(bp, "no_daddr", data);
        return bp;
  fail_free_mem:
        kmem_free(data, malloc_len);
  fail_free_buf:
-       pagebuf_free(bp);
+       xfs_buf_free(bp);
  fail:
        return NULL;
 }
 
 /*
- *     pagebuf_hold
- *
  *     Increment reference count on buffer, to hold the buffer concurrently
  *     with another thread which may release (free) the buffer asynchronously.
- *
  *     Must hold the buffer already to call this function.
  */
 void
-pagebuf_hold(
-       xfs_buf_t               *pb)
+xfs_buf_hold(
+       xfs_buf_t               *bp)
 {
-       atomic_inc(&pb->pb_hold);
-       PB_TRACE(pb, "hold", 0);
+       atomic_inc(&bp->b_hold);
+       XB_TRACE(bp, "hold", 0);
 }
 
 /*
- *     pagebuf_rele
- *
- *     pagebuf_rele releases a hold on the specified buffer.  If the
- *     the hold count is 1, pagebuf_rele calls pagebuf_free.
+ *     Releases a hold on the specified buffer.  If the
+ *     the hold count is 1, calls xfs_buf_free.
  */
 void
-pagebuf_rele(
-       xfs_buf_t               *pb)
+xfs_buf_rele(
+       xfs_buf_t               *bp)
 {
-       xfs_bufhash_t           *hash = pb->pb_hash;
+       xfs_bufhash_t           *hash = bp->b_hash;
 
-       PB_TRACE(pb, "rele", pb->pb_relse);
+       XB_TRACE(bp, "rele", bp->b_relse);
 
-       if (atomic_dec_and_lock(&pb->pb_hold, &hash->bh_lock)) {
-               if (pb->pb_relse) {
-                       atomic_inc(&pb->pb_hold);
+       if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
+               if (bp->b_relse) {
+                       atomic_inc(&bp->b_hold);
                        spin_unlock(&hash->bh_lock);
-                       (*(pb->pb_relse)) (pb);
-               } else if (pb->pb_flags & PBF_FS_MANAGED) {
+                       (*(bp->b_relse)) (bp);
+               } else if (bp->b_flags & XBF_FS_MANAGED) {
                        spin_unlock(&hash->bh_lock);
                } else {
-                       ASSERT(!(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)));
-                       list_del_init(&pb->pb_hash_list);
+                       ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
+                       list_del_init(&bp->b_hash_list);
                        spin_unlock(&hash->bh_lock);
-                       pagebuf_free(pb);
+                       xfs_buf_free(bp);
                }
        } else {
                /*
                 * Catch reference count leaks
                 */
-               ASSERT(atomic_read(&pb->pb_hold) >= 0);
+               ASSERT(atomic_read(&bp->b_hold) >= 0);
        }
 }
 
@@ -863,168 +855,122 @@ pagebuf_rele(
  */
 
 /*
- *     pagebuf_cond_lock
- *
- *     pagebuf_cond_lock locks a buffer object, if it is not already locked.
- *     Note that this in no way
- *     locks the underlying pages, so it is only useful for synchronizing
- *     concurrent use of page buffer objects, not for synchronizing independent
- *     access to the underlying pages.
+ *     Locks a buffer object, if it is not already locked.
+ *     Note that this in no way locks the underlying pages, so it is only
+ *     useful for synchronizing concurrent use of buffer objects, not for
+ *     synchronizing independent access to the underlying pages.
  */
 int
-pagebuf_cond_lock(                     /* lock buffer, if not locked   */
-                                       /* returns -EBUSY if locked)    */
-       xfs_buf_t               *pb)
+xfs_buf_cond_lock(
+       xfs_buf_t               *bp)
 {
        int                     locked;
 
-       locked = down_trylock(&pb->pb_sema) == 0;
+       locked = down_trylock(&bp->b_sema) == 0;
        if (locked) {
-               PB_SET_OWNER(pb);
+               XB_SET_OWNER(bp);
        }
-       PB_TRACE(pb, "cond_lock", (long)locked);
-       return(locked ? 0 : -EBUSY);
+       XB_TRACE(bp, "cond_lock", (long)locked);
+       return locked ? 0 : -EBUSY;
 }
 
 #if defined(DEBUG) || defined(XFS_BLI_TRACE)
-/*
- *     pagebuf_lock_value
- *
- *     Return lock value for a pagebuf
- */
 int
-pagebuf_lock_value(
-       xfs_buf_t               *pb)
+xfs_buf_lock_value(
+       xfs_buf_t               *bp)
 {
-       return(atomic_read(&pb->pb_sema.count));
+       return atomic_read(&bp->b_sema.count);
 }
 #endif
 
 /*
- *     pagebuf_lock
- *
- *     pagebuf_lock locks a buffer object.  Note that this in no way
- *     locks the underlying pages, so it is only useful for synchronizing
- *     concurrent use of page buffer objects, not for synchronizing independent
- *     access to the underlying pages.
+ *     Locks a buffer object.
+ *     Note that this in no way locks the underlying pages, so it is only
+ *     useful for synchronizing concurrent use of buffer objects, not for
+ *     synchronizing independent access to the underlying pages.
  */
-int
-pagebuf_lock(
-       xfs_buf_t               *pb)
+void
+xfs_buf_lock(
+       xfs_buf_t               *bp)
 {
-       PB_TRACE(pb, "lock", 0);
-       if (atomic_read(&pb->pb_io_remaining))
-               blk_run_address_space(pb->pb_target->pbr_mapping);
-       down(&pb->pb_sema);
-       PB_SET_OWNER(pb);
-       PB_TRACE(pb, "locked", 0);
-       return 0;
+       XB_TRACE(bp, "lock", 0);
+       if (atomic_read(&bp->b_io_remaining))
+               blk_run_address_space(bp->b_target->bt_mapping);
+       down(&bp->b_sema);
+       XB_SET_OWNER(bp);
+       XB_TRACE(bp, "locked", 0);
 }
 
 /*
- *     pagebuf_unlock
- *
- *     pagebuf_unlock releases the lock on the buffer object created by
- *     pagebuf_lock or pagebuf_cond_lock (not any pinning of underlying pages
- *     created by pagebuf_pin).
- *
+ *     Releases the lock on the buffer object.
  *     If the buffer is marked delwri but is not queued, do so before we
- *     unlock the buffer as we need to set flags correctly. We also need to
+ *     unlock the buffer as we need to set flags correctly.  We also need to
  *     take a reference for the delwri queue because the unlocker is going to
  *     drop their's and they don't know we just queued it.
  */
 void
-pagebuf_unlock(                                /* unlock buffer                */
-       xfs_buf_t               *pb)    /* buffer to unlock             */
+xfs_buf_unlock(
+       xfs_buf_t               *bp)
 {
-       if ((pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q)) == PBF_DELWRI) {
-               atomic_inc(&pb->pb_hold);
-               pb->pb_flags |= PBF_ASYNC;
-               pagebuf_delwri_queue(pb, 0);
+       if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
+               atomic_inc(&bp->b_hold);
+               bp->b_flags |= XBF_ASYNC;
+               xfs_buf_delwri_queue(bp, 0);
        }
 
-       PB_CLEAR_OWNER(pb);
-       up(&pb->pb_sema);
-       PB_TRACE(pb, "unlock", 0);
+       XB_CLEAR_OWNER(bp);
+       up(&bp->b_sema);
+       XB_TRACE(bp, "unlock", 0);
 }
 
 
 /*
  *     Pinning Buffer Storage in Memory
- */
-
-/*
- *     pagebuf_pin
- *
- *     pagebuf_pin locks all of the memory represented by a buffer in
- *     memory.  Multiple calls to pagebuf_pin and pagebuf_unpin, for
- *     the same or different buffers affecting a given page, will
- *     properly count the number of outstanding "pin" requests.  The
- *     buffer may be released after the pagebuf_pin and a different
- *     buffer used when calling pagebuf_unpin, if desired.
- *     pagebuf_pin should be used by the file system when it wants be
- *     assured that no attempt will be made to force the affected
- *     memory to disk.  It does not assure that a given logical page
- *     will not be moved to a different physical page.
+ *     Ensure that no attempt to force a buffer to disk will succeed.
  */
 void
-pagebuf_pin(
-       xfs_buf_t               *pb)
+xfs_buf_pin(
+       xfs_buf_t               *bp)
 {
-       atomic_inc(&pb->pb_pin_count);
-       PB_TRACE(pb, "pin", (long)pb->pb_pin_count.counter);
+       atomic_inc(&bp->b_pin_count);
+       XB_TRACE(bp, "pin", (long)bp->b_pin_count.counter);
 }
 
-/*
- *     pagebuf_unpin
- *
- *     pagebuf_unpin reverses the locking of memory performed by
- *     pagebuf_pin.  Note that both functions affected the logical
- *     pages associated with the buffer, not the buffer itself.
- */
 void
-pagebuf_unpin(
-       xfs_buf_t               *pb)
+xfs_buf_unpin(
+       xfs_buf_t               *bp)
 {
-       if (atomic_dec_and_test(&pb->pb_pin_count)) {
-               wake_up_all(&pb->pb_waiters);
-       }
-       PB_TRACE(pb, "unpin", (long)pb->pb_pin_count.counter);
+       if (atomic_dec_and_test(&bp->b_pin_count))
+               wake_up_all(&bp->b_waiters);
+       XB_TRACE(bp, "unpin", (long)bp->b_pin_count.counter);
 }
 
 int
-pagebuf_ispin(
-       xfs_buf_t               *pb)
+xfs_buf_ispin(
+       xfs_buf_t               *bp)
 {
-       return atomic_read(&pb->pb_pin_count);
+       return atomic_read(&bp->b_pin_count);
 }
 
-/*
- *     pagebuf_wait_unpin
- *
- *     pagebuf_wait_unpin waits until all of the memory associated
- *     with the buffer is not longer locked in memory.  It returns
- *     immediately if none of the affected pages are locked.
- */
-static inline void
-_pagebuf_wait_unpin(
-       xfs_buf_t               *pb)
+STATIC void
+xfs_buf_wait_unpin(
+       xfs_buf_t               *bp)
 {
        DECLARE_WAITQUEUE       (wait, current);
 
-       if (atomic_read(&pb->pb_pin_count) == 0)
+       if (atomic_read(&bp->b_pin_count) == 0)
                return;
 
-       add_wait_queue(&pb->pb_waiters, &wait);
+       add_wait_queue(&bp->b_waiters, &wait);
        for (;;) {
                set_current_state(TASK_UNINTERRUPTIBLE);
-               if (atomic_read(&pb->pb_pin_count) == 0)
+               if (atomic_read(&bp->b_pin_count) == 0)
                        break;
-               if (atomic_read(&pb->pb_io_remaining))
-                       blk_run_address_space(pb->pb_target->pbr_mapping);
+               if (atomic_read(&bp->b_io_remaining))
+                       blk_run_address_space(bp->b_target->bt_mapping);
                schedule();
        }
-       remove_wait_queue(&pb->pb_waiters, &wait);
+       remove_wait_queue(&bp->b_waiters, &wait);
        set_current_state(TASK_RUNNING);
 }
 
@@ -1032,241 +978,216 @@ _pagebuf_wait_unpin(
  *     Buffer Utility Routines
  */
 
-/*
- *     pagebuf_iodone
- *
- *     pagebuf_iodone marks a buffer for which I/O is in progress
- *     done with respect to that I/O.  The pb_iodone routine, if
- *     present, will be called as a side-effect.
- */
 STATIC void
-pagebuf_iodone_work(
+xfs_buf_iodone_work(
        void                    *v)
 {
        xfs_buf_t               *bp = (xfs_buf_t *)v;
 
-       if (bp->pb_iodone)
-               (*(bp->pb_iodone))(bp);
-       else if (bp->pb_flags & PBF_ASYNC)
+       if (bp->b_iodone)
+               (*(bp->b_iodone))(bp);
+       else if (bp->b_flags & XBF_ASYNC)
                xfs_buf_relse(bp);
 }
 
 void
-pagebuf_iodone(
-       xfs_buf_t               *pb,
+xfs_buf_ioend(
+       xfs_buf_t               *bp,
        int                     schedule)
 {
-       pb->pb_flags &= ~(PBF_READ | PBF_WRITE);
-       if (pb->pb_error == 0)
-               pb->pb_flags |= PBF_DONE;
+       bp->b_flags &= ~(XBF_READ | XBF_WRITE);
+       if (bp->b_error == 0)
+               bp->b_flags |= XBF_DONE;
 
-       PB_TRACE(pb, "iodone", pb->pb_iodone);
+       XB_TRACE(bp, "iodone", bp->b_iodone);
 
-       if ((pb->pb_iodone) || (pb->pb_flags & PBF_ASYNC)) {
+       if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
                if (schedule) {
-                       INIT_WORK(&pb->pb_iodone_work, pagebuf_iodone_work, pb);
-                       queue_work(xfslogd_workqueue, &pb->pb_iodone_work);
+                       INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work, bp);
+                       queue_work(xfslogd_workqueue, &bp->b_iodone_work);
                } else {
-                       pagebuf_iodone_work(pb);
+                       xfs_buf_iodone_work(bp);
                }
        } else {
-               up(&pb->pb_iodonesema);
+               up(&bp->b_iodonesema);
        }
 }
 
-/*
- *     pagebuf_ioerror
- *
- *     pagebuf_ioerror sets the error code for a buffer.
- */
 void
-pagebuf_ioerror(                       /* mark/clear buffer error flag */
-       xfs_buf_t               *pb,    /* buffer to mark               */
-       int                     error)  /* error to store (0 if none)   */
+xfs_buf_ioerror(
+       xfs_buf_t               *bp,
+       int                     error)
 {
        ASSERT(error >= 0 && error <= 0xffff);
-       pb->pb_error = (unsigned short)error;
-       PB_TRACE(pb, "ioerror", (unsigned long)error);
+       bp->b_error = (unsigned short)error;
+       XB_TRACE(bp, "ioerror", (unsigned long)error);
 }
 
 /*
- *     pagebuf_iostart
- *
- *     pagebuf_iostart initiates I/O on a buffer, based on the flags supplied.
- *     If necessary, it will arrange for any disk space allocation required,
- *     and it will break up the request if the block mappings require it.
- *     The pb_iodone routine in the buffer supplied will only be called
+ *     Initiate I/O on a buffer, based on the flags supplied.
+ *     The b_iodone routine in the buffer supplied will only be called
  *     when all of the subsidiary I/O requests, if any, have been completed.
- *     pagebuf_iostart calls the pagebuf_ioinitiate routine or
- *     pagebuf_iorequest, if the former routine is not defined, to start
- *     the I/O on a given low-level request.
  */
 int
-pagebuf_iostart(                       /* start I/O on a buffer          */
-       xfs_buf_t               *pb,    /* buffer to start                */
-       page_buf_flags_t        flags)  /* PBF_LOCK, PBF_ASYNC, PBF_READ, */
-                                       /* PBF_WRITE, PBF_DELWRI,         */
-                                       /* PBF_DONT_BLOCK                 */
+xfs_buf_iostart(
+       xfs_buf_t               *bp,
+       xfs_buf_flags_t         flags)
 {
        int                     status = 0;
 
-       PB_TRACE(pb, "iostart", (unsigned long)flags);
+       XB_TRACE(bp, "iostart", (unsigned long)flags);
 
-       if (flags & PBF_DELWRI) {
-               pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC);
-               pb->pb_flags |= flags & (PBF_DELWRI | PBF_ASYNC);
-               pagebuf_delwri_queue(pb, 1);
+       if (flags & XBF_DELWRI) {
+               bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC);
+               bp->b_flags |= flags & (XBF_DELWRI | XBF_ASYNC);
+               xfs_buf_delwri_queue(bp, 1);
                return status;
        }
 
-       pb->pb_flags &= ~(PBF_READ | PBF_WRITE | PBF_ASYNC | PBF_DELWRI | \
-                       PBF_READ_AHEAD | _PBF_RUN_QUEUES);
-       pb->pb_flags |= flags & (PBF_READ | PBF_WRITE | PBF_ASYNC | \
-                       PBF_READ_AHEAD | _PBF_RUN_QUEUES);
+       bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
+                       XBF_READ_AHEAD | _XBF_RUN_QUEUES);
+       bp->b_flags |= flags & (XBF_READ | XBF_WRITE | XBF_ASYNC | \
+                       XBF_READ_AHEAD | _XBF_RUN_QUEUES);
 
-       BUG_ON(pb->pb_bn == XFS_BUF_DADDR_NULL);
+       BUG_ON(bp->b_bn == XFS_BUF_DADDR_NULL);
 
        /* For writes allow an alternate strategy routine to precede
         * the actual I/O request (which may not be issued at all in
         * a shutdown situation, for example).
         */
-       status = (flags & PBF_WRITE) ?
-               pagebuf_iostrategy(pb) : pagebuf_iorequest(pb);
+       status = (flags & XBF_WRITE) ?
+               xfs_buf_iostrategy(bp) : xfs_buf_iorequest(bp);
 
        /* Wait for I/O if we are not an async request.
         * Note: async I/O request completion will release the buffer,
         * and that can already be done by this point.  So using the
         * buffer pointer from here on, after async I/O, is invalid.
         */
-       if (!status && !(flags & PBF_ASYNC))
-               status = pagebuf_iowait(pb);
+       if (!status && !(flags & XBF_ASYNC))
+               status = xfs_buf_iowait(bp);
 
        return status;
 }
 
-/*
- * Helper routine for pagebuf_iorequest
- */
-
 STATIC __inline__ int
-_pagebuf_iolocked(
-       xfs_buf_t               *pb)
+_xfs_buf_iolocked(
+       xfs_buf_t               *bp)
 {
-       ASSERT(pb->pb_flags & (PBF_READ|PBF_WRITE));
-       if (pb->pb_flags & PBF_READ)
-               return pb->pb_locked;
+       ASSERT(bp->b_flags & (XBF_READ | XBF_WRITE));
+       if (bp->b_flags & XBF_READ)
+               return bp->b_locked;
        return 0;
 }
 
 STATIC __inline__ void
-_pagebuf_iodone(
-       xfs_buf_t               *pb,
+_xfs_buf_ioend(
+       xfs_buf_t               *bp,
        int                     schedule)
 {
-       if (atomic_dec_and_test(&pb->pb_io_remaining) == 1) {
-               pb->pb_locked = 0;
-               pagebuf_iodone(pb, schedule);
+       if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
+               bp->b_locked = 0;
+               xfs_buf_ioend(bp, schedule);
        }
 }
 
 STATIC int
-bio_end_io_pagebuf(
+xfs_buf_bio_end_io(
        struct bio              *bio,
        unsigned int            bytes_done,
        int                     error)
 {
-       xfs_buf_t               *pb = (xfs_buf_t *)bio->bi_private;
-       unsigned int            blocksize = pb->pb_target->pbr_bsize;
+       xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
+       unsigned int            blocksize = bp->b_target->bt_bsize;
        struct bio_vec          *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
 
        if (bio->bi_size)
                return 1;
 
        if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
-               pb->pb_error = EIO;
+               bp->b_error = EIO;
 
        do {
                struct page     *page = bvec->bv_page;
 
-               if (unlikely(pb->pb_error)) {
-                       if (pb->pb_flags & PBF_READ)
+               if (unlikely(bp->b_error)) {
+                       if (bp->b_flags & XBF_READ)
                                ClearPageUptodate(page);
                        SetPageError(page);
-               } else if (blocksize == PAGE_CACHE_SIZE) {
+               } else if (blocksize >= PAGE_CACHE_SIZE) {
                        SetPageUptodate(page);
                } else if (!PagePrivate(page) &&
-                               (pb->pb_flags & _PBF_PAGE_CACHE)) {
+                               (bp->b_flags & _XBF_PAGE_CACHE)) {
                        set_page_region(page, bvec->bv_offset, bvec->bv_len);
                }
 
                if (--bvec >= bio->bi_io_vec)
                        prefetchw(&bvec->bv_page->flags);
 
-               if (_pagebuf_iolocked(pb)) {
+               if (_xfs_buf_iolocked(bp)) {
                        unlock_page(page);
                }
        } while (bvec >= bio->bi_io_vec);
 
-       _pagebuf_iodone(pb, 1);
+       _xfs_buf_ioend(bp, 1);
        bio_put(bio);
        return 0;
 }
 
 STATIC void
-_pagebuf_ioapply(
-       xfs_buf_t               *pb)
+_xfs_buf_ioapply(
+       xfs_buf_t               *bp)
 {
        int                     i, rw, map_i, total_nr_pages, nr_pages;
        struct bio              *bio;
-       int                     offset = pb->pb_offset;
-       int                     size = pb->pb_count_desired;
-       sector_t                sector = pb->pb_bn;
-       unsigned int            blocksize = pb->pb_target->pbr_bsize;
-       int                     locking = _pagebuf_iolocked(pb);
+       int                     offset = bp->b_offset;
+       int                     size = bp->b_count_desired;
+       sector_t                sector = bp->b_bn;
+       unsigned int            blocksize = bp->b_target->bt_bsize;
+       int                     locking = _xfs_buf_iolocked(bp);
 
-       total_nr_pages = pb->pb_page_count;
+       total_nr_pages = bp->b_page_count;
        map_i = 0;
 
-       if (pb->pb_flags & _PBF_RUN_QUEUES) {
-               pb->pb_flags &= ~_PBF_RUN_QUEUES;
-               rw = (pb->pb_flags & PBF_READ) ? READ_SYNC : WRITE_SYNC;
+       if (bp->b_flags & _XBF_RUN_QUEUES) {
+               bp->b_flags &= ~_XBF_RUN_QUEUES;
+               rw = (bp->b_flags & XBF_READ) ? READ_SYNC : WRITE_SYNC;
        } else {
-               rw = (pb->pb_flags & PBF_READ) ? READ : WRITE;
+               rw = (bp->b_flags & XBF_READ) ? READ : WRITE;
        }
 
-       if (pb->pb_flags & PBF_ORDERED) {
-               ASSERT(!(pb->pb_flags & PBF_READ));
+       if (bp->b_flags & XBF_ORDERED) {
+               ASSERT(!(bp->b_flags & XBF_READ));
                rw = WRITE_BARRIER;
        }
 
-       /* Special code path for reading a sub page size pagebuf in --
+       /* Special code path for reading a sub page size buffer in --
         * we populate up the whole page, and hence the other metadata
         * in the same page.  This optimization is only valid when the
-        * filesystem block size and the page size are equal.
+        * filesystem block size is not smaller than the page size.
         */
-       if ((pb->pb_buffer_length < PAGE_CACHE_SIZE) &&
-           (pb->pb_flags & PBF_READ) && locking &&
-           (blocksize == PAGE_CACHE_SIZE)) {
+       if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
+           (bp->b_flags & XBF_READ) && locking &&
+           (blocksize >= PAGE_CACHE_SIZE)) {
                bio = bio_alloc(GFP_NOIO, 1);
 
-               bio->bi_bdev = pb->pb_target->pbr_bdev;
+               bio->bi_bdev = bp->b_target->bt_bdev;
                bio->bi_sector = sector - (offset >> BBSHIFT);
-               bio->bi_end_io = bio_end_io_pagebuf;
-               bio->bi_private = pb;
+               bio->bi_end_io = xfs_buf_bio_end_io;
+               bio->bi_private = bp;
 
-               bio_add_page(bio, pb->pb_pages[0], PAGE_CACHE_SIZE, 0);
+               bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
                size = 0;
 
-               atomic_inc(&pb->pb_io_remaining);
+               atomic_inc(&bp->b_io_remaining);
 
                goto submit_io;
        }
 
        /* Lock down the pages which we need to for the request */
-       if (locking && (pb->pb_flags & PBF_WRITE) && (pb->pb_locked == 0)) {
+       if (locking && (bp->b_flags & XBF_WRITE) && (bp->b_locked == 0)) {
                for (i = 0; size; i++) {
                        int             nbytes = PAGE_CACHE_SIZE - offset;
-                       struct page     *page = pb->pb_pages[i];
+                       struct page     *page = bp->b_pages[i];
 
                        if (nbytes > size)
                                nbytes = size;
@@ -1276,30 +1197,30 @@ _pagebuf_ioapply(
                        size -= nbytes;
                        offset = 0;
                }
-               offset = pb->pb_offset;
-               size = pb->pb_count_desired;
+               offset = bp->b_offset;
+               size = bp->b_count_desired;
        }
 
 next_chunk:
-       atomic_inc(&pb->pb_io_remaining);
+       atomic_inc(&bp->b_io_remaining);
        nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
        if (nr_pages > total_nr_pages)
                nr_pages = total_nr_pages;
 
        bio = bio_alloc(GFP_NOIO, nr_pages);
-       bio->bi_bdev = pb->pb_target->pbr_bdev;
+       bio->bi_bdev = bp->b_target->bt_bdev;
        bio->bi_sector = sector;
-       bio->bi_end_io = bio_end_io_pagebuf;
-       bio->bi_private = pb;
+       bio->bi_end_io = xfs_buf_bio_end_io;
+       bio->bi_private = bp;
 
        for (; size && nr_pages; nr_pages--, map_i++) {
-               int     nbytes = PAGE_CACHE_SIZE - offset;
+               int     rbytes, nbytes = PAGE_CACHE_SIZE - offset;
 
                if (nbytes > size)
                        nbytes = size;
 
-               if (bio_add_page(bio, pb->pb_pages[map_i],
-                                       nbytes, offset) < nbytes)
+               rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
+               if (rbytes < nbytes)
                        break;
 
                offset = 0;
@@ -1315,107 +1236,102 @@ submit_io:
                        goto next_chunk;
        } else {
                bio_put(bio);
-               pagebuf_ioerror(pb, EIO);
+               xfs_buf_ioerror(bp, EIO);
        }
 }
 
-/*
- *     pagebuf_iorequest -- the core I/O request routine.
- */
 int
-pagebuf_iorequest(                     /* start real I/O               */
-       xfs_buf_t               *pb)    /* buffer to convey to device   */
+xfs_buf_iorequest(
+       xfs_buf_t               *bp)
 {
-       PB_TRACE(pb, "iorequest", 0);
+       XB_TRACE(bp, "iorequest", 0);
 
-       if (pb->pb_flags & PBF_DELWRI) {
-               pagebuf_delwri_queue(pb, 1);
+       if (bp->b_flags & XBF_DELWRI) {
+               xfs_buf_delwri_queue(bp, 1);
                return 0;
        }
 
-       if (pb->pb_flags & PBF_WRITE) {
-               _pagebuf_wait_unpin(pb);
+       if (bp->b_flags & XBF_WRITE) {
+               xfs_buf_wait_unpin(bp);
        }
 
-       pagebuf_hold(pb);
+       xfs_buf_hold(bp);
 
        /* Set the count to 1 initially, this will stop an I/O
         * completion callout which happens before we have started
-        * all the I/O from calling pagebuf_iodone too early.
+        * all the I/O from calling xfs_buf_ioend too early.
         */
-       atomic_set(&pb->pb_io_remaining, 1);
-       _pagebuf_ioapply(pb);
-       _pagebuf_iodone(pb, 0);
+       atomic_set(&bp->b_io_remaining, 1);
+       _xfs_buf_ioapply(bp);
+       _xfs_buf_ioend(bp, 0);
 
-       pagebuf_rele(pb);
+       xfs_buf_rele(bp);
        return 0;
 }
 
 /*
- *     pagebuf_iowait
- *
- *     pagebuf_iowait waits for I/O to complete on the buffer supplied.
- *     It returns immediately if no I/O is pending.  In any case, it returns
- *     the error code, if any, or 0 if there is no error.
+ *     Waits for I/O to complete on the buffer supplied.
+ *     It returns immediately if no I/O is pending.
+ *     It returns the I/O error code, if any, or 0 if there was no error.
  */
 int
-pagebuf_iowait(
-       xfs_buf_t               *pb)
+xfs_buf_iowait(
+       xfs_buf_t               *bp)
 {
-       PB_TRACE(pb, "iowait", 0);
-       if (atomic_read(&pb->pb_io_remaining))
-               blk_run_address_space(pb->pb_target->pbr_mapping);
-       down(&pb->pb_iodonesema);
-       PB_TRACE(pb, "iowaited", (long)pb->pb_error);
-       return pb->pb_error;
+       XB_TRACE(bp, "iowait", 0);
+       if (atomic_read(&bp->b_io_remaining))
+               blk_run_address_space(bp->b_target->bt_mapping);
+       down(&bp->b_iodonesema);
+       XB_TRACE(bp, "iowaited", (long)bp->b_error);
+       return bp->b_error;
 }
 
-caddr_t
-pagebuf_offset(
-       xfs_buf_t               *pb,
+xfs_caddr_t
+xfs_buf_offset(
+       xfs_buf_t               *bp,
        size_t                  offset)
 {
        struct page             *page;
 
-       offset += pb->pb_offset;
+       if (bp->b_flags & XBF_MAPPED)
+               return XFS_BUF_PTR(bp) + offset;
 
-       page = pb->pb_pages[offset >> PAGE_CACHE_SHIFT];
-       return (caddr_t) page_address(page) + (offset & (PAGE_CACHE_SIZE - 1));
+       offset += bp->b_offset;
+       page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
+       return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
 }
 
 /*
- *     pagebuf_iomove
- *
  *     Move data into or out of a buffer.
  */
 void
-pagebuf_iomove(
-       xfs_buf_t               *pb,    /* buffer to process            */
+xfs_buf_iomove(
+       xfs_buf_t               *bp,    /* buffer to process            */
        size_t                  boff,   /* starting buffer offset       */
        size_t                  bsize,  /* length to copy               */
        caddr_t                 data,   /* data address                 */
-       page_buf_rw_t           mode)   /* read/write flag              */
+       xfs_buf_rw_t            mode)   /* read/write/zero flag         */
 {
        size_t                  bend, cpoff, csize;
        struct page             *page;
 
        bend = boff + bsize;
        while (boff < bend) {
-               page = pb->pb_pages[page_buf_btoct(boff + pb->pb_offset)];
-               cpoff = page_buf_poff(boff + pb->pb_offset);
+               page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
+               cpoff = xfs_buf_poff(boff + bp->b_offset);
                csize = min_t(size_t,
-                             PAGE_CACHE_SIZE-cpoff, pb->pb_count_desired-boff);
+                             PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
 
                ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));
 
                switch (mode) {
-               case PBRW_ZERO:
+               case XBRW_ZERO:
                        memset(page_address(page) + cpoff, 0, csize);
                        break;
-               case PBRW_READ:
+               case XBRW_READ:
                        memcpy(data, page_address(page) + cpoff, csize);
                        break;
-               case PBRW_WRITE:
+               case XBRW_WRITE:
                        memcpy(page_address(page) + cpoff, data, csize);
                }
 
@@ -1425,12 +1341,12 @@ pagebuf_iomove(
 }
 
 /*
- *     Handling of buftargs.
+ *     Handling of buffer targets (buftargs).
  */
 
 /*
- * Wait for any bufs with callbacks that have been submitted but
- * have not yet returned... walk the hash list for the target.
+ *     Wait for any bufs with callbacks that have been submitted but
+ *     have not yet returned... walk the hash list for the target.
  */
 void
 xfs_wait_buftarg(
@@ -1444,15 +1360,15 @@ xfs_wait_buftarg(
                hash = &btp->bt_hash[i];
 again:
                spin_lock(&hash->bh_lock);
-               list_for_each_entry_safe(bp, n, &hash->bh_list, pb_hash_list) {
-                       ASSERT(btp == bp->pb_target);
-                       if (!(bp->pb_flags & PBF_FS_MANAGED)) {
+               list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
+                       ASSERT(btp == bp->b_target);
+                       if (!(bp->b_flags & XBF_FS_MANAGED)) {
                                spin_unlock(&hash->bh_lock);
                                /*
                                 * Catch superblock reference count leaks
                                 * immediately
                                 */
-                               BUG_ON(bp->pb_bn == 0);
+                               BUG_ON(bp->b_bn == 0);
                                delay(100);
                                goto again;
                        }
@@ -1462,9 +1378,9 @@ again:
 }
 
 /*
- * Allocate buffer hash table for a given target.
- * For devices containing metadata (i.e. not the log/realtime devices)
- * we need to allocate a much larger hash table.
+ *     Allocate buffer hash table for a given target.
+ *     For devices containing metadata (i.e. not the log/realtime devices)
+ *     we need to allocate a much larger hash table.
  */
 STATIC void
 xfs_alloc_bufhash(
@@ -1487,11 +1403,34 @@ STATIC void
 xfs_free_bufhash(
        xfs_buftarg_t           *btp)
 {
-       kmem_free(btp->bt_hash,
-                       (1 << btp->bt_hashshift) * sizeof(xfs_bufhash_t));
+       kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
        btp->bt_hash = NULL;
 }
 
+/*
+ *     buftarg list for delwrite queue processing
+ */
+STATIC LIST_HEAD(xfs_buftarg_list);
+STATIC DEFINE_SPINLOCK(xfs_buftarg_lock);
+
+STATIC void
+xfs_register_buftarg(
+       xfs_buftarg_t           *btp)
+{
+       spin_lock(&xfs_buftarg_lock);
+       list_add(&btp->bt_list, &xfs_buftarg_list);
+       spin_unlock(&xfs_buftarg_lock);
+}
+
+STATIC void
+xfs_unregister_buftarg(
+       xfs_buftarg_t           *btp)
+{
+       spin_lock(&xfs_buftarg_lock);
+       list_del(&btp->bt_list);
+       spin_unlock(&xfs_buftarg_lock);
+}
+
 void
 xfs_free_buftarg(
        xfs_buftarg_t           *btp,
@@ -1499,9 +1438,16 @@ xfs_free_buftarg(
 {
        xfs_flush_buftarg(btp, 1);
        if (external)
-               xfs_blkdev_put(btp->pbr_bdev);
+               xfs_blkdev_put(btp->bt_bdev);
        xfs_free_bufhash(btp);
-       iput(btp->pbr_mapping->host);
+       iput(btp->bt_mapping->host);
+
+       /* Unregister the buftarg first so that we don't get a
+        * wakeup finding a non-existent task
+        */
+       xfs_unregister_buftarg(btp);
+       kthread_stop(btp->bt_task);
+
        kmem_free(btp, sizeof(*btp));
 }
 
@@ -1512,11 +1458,11 @@ xfs_setsize_buftarg_flags(
        unsigned int            sectorsize,
        int                     verbose)
 {
-       btp->pbr_bsize = blocksize;
-       btp->pbr_sshift = ffs(sectorsize) - 1;
-       btp->pbr_smask = sectorsize - 1;
+       btp->bt_bsize = blocksize;
+       btp->bt_sshift = ffs(sectorsize) - 1;
+       btp->bt_smask = sectorsize - 1;
 
-       if (set_blocksize(btp->pbr_bdev, sectorsize)) {
+       if (set_blocksize(btp->bt_bdev, sectorsize)) {
                printk(KERN_WARNING
                        "XFS: Cannot set_blocksize to %u on device %s\n",
                        sectorsize, XFS_BUFTARG_NAME(btp));
@@ -1536,10 +1482,10 @@ xfs_setsize_buftarg_flags(
 }
 
 /*
-* When allocating the initial buffer target we have not yet
-* read in the superblock, so don't know what sized sectors
-* are being used is at this early stage.  Play safe.
-*/
+ *     When allocating the initial buffer target we have not yet
+ *     read in the superblock, so don't know what sized sectors
+ *     are being used is at this early stage.  Play safe.
+ */
 STATIC int
 xfs_setsize_buftarg_early(
        xfs_buftarg_t           *btp,
@@ -1587,10 +1533,30 @@ xfs_mapping_buftarg(
        mapping->a_ops = &mapping_aops;
        mapping->backing_dev_info = bdi;
        mapping_set_gfp_mask(mapping, GFP_NOFS);
-       btp->pbr_mapping = mapping;
+       btp->bt_mapping = mapping;
        return 0;
 }
 
+STATIC int
+xfs_alloc_delwrite_queue(
+       xfs_buftarg_t           *btp)
+{
+       int     error = 0;
+
+       INIT_LIST_HEAD(&btp->bt_list);
+       INIT_LIST_HEAD(&btp->bt_delwrite_queue);
+       spinlock_init(&btp->bt_delwrite_lock, "delwri_lock");
+       btp->bt_flags = 0;
+       btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
+       if (IS_ERR(btp->bt_task)) {
+               error = PTR_ERR(btp->bt_task);
+               goto out_error;
+       }
+       xfs_register_buftarg(btp);
+out_error:
+       return error;
+}
+
 xfs_buftarg_t *
 xfs_alloc_buftarg(
        struct block_device     *bdev,
@@ -1600,12 +1566,14 @@ xfs_alloc_buftarg(
 
        btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);
 
-       btp->pbr_dev =  bdev->bd_dev;
-       btp->pbr_bdev = bdev;
+       btp->bt_dev =  bdev->bd_dev;
+       btp->bt_bdev = bdev;
        if (xfs_setsize_buftarg_early(btp, bdev))
                goto error;
        if (xfs_mapping_buftarg(btp, bdev))
                goto error;
+       if (xfs_alloc_delwrite_queue(btp))
+               goto error;
        xfs_alloc_bufhash(btp, external);
        return btp;
 
@@ -1616,83 +1584,81 @@ error:
 
 
 /*
- * Pagebuf delayed write buffer handling
+ *     Delayed write buffer handling
  */
-
-STATIC LIST_HEAD(pbd_delwrite_queue);
-STATIC DEFINE_SPINLOCK(pbd_delwrite_lock);
-
 STATIC void
-pagebuf_delwri_queue(
-       xfs_buf_t               *pb,
+xfs_buf_delwri_queue(
+       xfs_buf_t               *bp,
        int                     unlock)
 {
-       PB_TRACE(pb, "delwri_q", (long)unlock);
-       ASSERT((pb->pb_flags & (PBF_DELWRI|PBF_ASYNC)) ==
-                                       (PBF_DELWRI|PBF_ASYNC));
+       struct list_head        *dwq = &bp->b_target->bt_delwrite_queue;
+       spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
+
+       XB_TRACE(bp, "delwri_q", (long)unlock);
+       ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
 
-       spin_lock(&pbd_delwrite_lock);
+       spin_lock(dwlk);
        /* If already in the queue, dequeue and place at tail */
-       if (!list_empty(&pb->pb_list)) {
-               ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
-               if (unlock) {
-                       atomic_dec(&pb->pb_hold);
-               }
-               list_del(&pb->pb_list);
+       if (!list_empty(&bp->b_list)) {
+               ASSERT(bp->b_flags & _XBF_DELWRI_Q);
+               if (unlock)
+                       atomic_dec(&bp->b_hold);
+               list_del(&bp->b_list);
        }
 
-       pb->pb_flags |= _PBF_DELWRI_Q;
-       list_add_tail(&pb->pb_list, &pbd_delwrite_queue);
-       pb->pb_queuetime = jiffies;
-       spin_unlock(&pbd_delwrite_lock);
+       bp->b_flags |= _XBF_DELWRI_Q;
+       list_add_tail(&bp->b_list, dwq);
+       bp->b_queuetime = jiffies;
+       spin_unlock(dwlk);
 
        if (unlock)
-               pagebuf_unlock(pb);
+               xfs_buf_unlock(bp);
 }
 
 void
-pagebuf_delwri_dequeue(
-       xfs_buf_t               *pb)
+xfs_buf_delwri_dequeue(
+       xfs_buf_t               *bp)
 {
+       spinlock_t              *dwlk = &bp->b_target->bt_delwrite_lock;
        int                     dequeued = 0;
 
-       spin_lock(&pbd_delwrite_lock);
-       if ((pb->pb_flags & PBF_DELWRI) && !list_empty(&pb->pb_list)) {
-               ASSERT(pb->pb_flags & _PBF_DELWRI_Q);
-               list_del_init(&pb->pb_list);
+       spin_lock(dwlk);
+       if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
+               ASSERT(bp->b_flags & _XBF_DELWRI_Q);
+               list_del_init(&bp->b_list);
                dequeued = 1;
        }
-       pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
-       spin_unlock(&pbd_delwrite_lock);
+       bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
+       spin_unlock(dwlk);
 
        if (dequeued)
-               pagebuf_rele(pb);
+               xfs_buf_rele(bp);
 
-       PB_TRACE(pb, "delwri_dq", (long)dequeued);
+       XB_TRACE(bp, "delwri_dq", (long)dequeued);
 }
 
 STATIC void
-pagebuf_runall_queues(
+xfs_buf_runall_queues(
        struct workqueue_struct *queue)
 {
        flush_workqueue(queue);
 }
 
-/* Defines for pagebuf daemon */
-STATIC struct task_struct *xfsbufd_task;
-STATIC int xfsbufd_force_flush;
-STATIC int xfsbufd_force_sleep;
-
 STATIC int
 xfsbufd_wakeup(
        int                     priority,
        gfp_t                   mask)
 {
-       if (xfsbufd_force_sleep)
-               return 0;
-       xfsbufd_force_flush = 1;
-       barrier();
-       wake_up_process(xfsbufd_task);
+       xfs_buftarg_t           *btp;
+
+       spin_lock(&xfs_buftarg_lock);
+       list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
+               if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
+                       continue;
+               set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
+               wake_up_process(btp->bt_task);
+       }
+       spin_unlock(&xfs_buftarg_lock);
        return 0;
 }
 
@@ -1702,67 +1668,70 @@ xfsbufd(
 {
        struct list_head        tmp;
        unsigned long           age;
-       xfs_buftarg_t           *target;
-       xfs_buf_t               *pb, *n;
+       xfs_buftarg_t           *target = (xfs_buftarg_t *)data;
+       xfs_buf_t               *bp, *n;
+       struct list_head        *dwq = &target->bt_delwrite_queue;
+       spinlock_t              *dwlk = &target->bt_delwrite_lock;
 
        current->flags |= PF_MEMALLOC;
 
        INIT_LIST_HEAD(&tmp);
        do {
                if (unlikely(freezing(current))) {
-                       xfsbufd_force_sleep = 1;
+                       set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
                        refrigerator();
                } else {
-                       xfsbufd_force_sleep = 0;
+                       clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
                }
 
                schedule_timeout_interruptible(
                        xfs_buf_timer_centisecs * msecs_to_jiffies(10));
 
                age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
-               spin_lock(&pbd_delwrite_lock);
-               list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
-                       PB_TRACE(pb, "walkq1", (long)pagebuf_ispin(pb));
-                       ASSERT(pb->pb_flags & PBF_DELWRI);
-
-                       if (!pagebuf_ispin(pb) && !pagebuf_cond_lock(pb)) {
-                               if (!xfsbufd_force_flush &&
+               spin_lock(dwlk);
+               list_for_each_entry_safe(bp, n, dwq, b_list) {
+                       XB_TRACE(bp, "walkq1", (long)xfs_buf_ispin(bp));
+                       ASSERT(bp->b_flags & XBF_DELWRI);
+
+                       if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
+                               if (!test_bit(XBT_FORCE_FLUSH,
+                                               &target->bt_flags) &&
                                    time_before(jiffies,
-                                               pb->pb_queuetime + age)) {
-                                       pagebuf_unlock(pb);
+                                               bp->b_queuetime + age)) {
+                                       xfs_buf_unlock(bp);
                                        break;
                                }
 
-                               pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
-                               pb->pb_flags |= PBF_WRITE;
-                               list_move(&pb->pb_list, &tmp);
+                               bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
+                               bp->b_flags |= XBF_WRITE;
+                               list_move(&bp->b_list, &tmp);
                        }
                }
-               spin_unlock(&pbd_delwrite_lock);
+               spin_unlock(dwlk);
 
                while (!list_empty(&tmp)) {
-                       pb = list_entry(tmp.next, xfs_buf_t, pb_list);
-                       target = pb->pb_target;
+                       bp = list_entry(tmp.next, xfs_buf_t, b_list);
+                       ASSERT(target == bp->b_target);
 
-                       list_del_init(&pb->pb_list);
-                       pagebuf_iostrategy(pb);
+                       list_del_init(&bp->b_list);
+                       xfs_buf_iostrategy(bp);
 
-                       blk_run_address_space(target->pbr_mapping);
+                       blk_run_address_space(target->bt_mapping);
                }
 
                if (as_list_len > 0)
                        purge_addresses();
 
-               xfsbufd_force_flush = 0;
+               clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
        } while (!kthread_should_stop());
 
        return 0;
 }
 
 /*
- * Go through all incore buffers, and release buffers if they belong to
- * the given device. This is used in filesystem error handling to
- * preserve the consistency of its metadata.
+ *     Go through all incore buffers, and release buffers if they belong to
+ *     the given device. This is used in filesystem error handling to
+ *     preserve the consistency of its metadata.
  */
 int
 xfs_flush_buftarg(
@@ -1770,73 +1739,72 @@ xfs_flush_buftarg(
        int                     wait)
 {
        struct list_head        tmp;
-       xfs_buf_t               *pb, *n;
+       xfs_buf_t               *bp, *n;
        int                     pincount = 0;
+       struct list_head        *dwq = &target->bt_delwrite_queue;
+       spinlock_t              *dwlk = &target->bt_delwrite_lock;
 
-       pagebuf_runall_queues(xfsdatad_workqueue);
-       pagebuf_runall_queues(xfslogd_workqueue);
+       xfs_buf_runall_queues(xfsdatad_workqueue);
+       xfs_buf_runall_queues(xfslogd_workqueue);
 
        INIT_LIST_HEAD(&tmp);
-       spin_lock(&pbd_delwrite_lock);
-       list_for_each_entry_safe(pb, n, &pbd_delwrite_queue, pb_list) {
-
-               if (pb->pb_target != target)
-                       continue;
-
-               ASSERT(pb->pb_flags & (PBF_DELWRI|_PBF_DELWRI_Q));
-               PB_TRACE(pb, "walkq2", (long)pagebuf_ispin(pb));
-               if (pagebuf_ispin(pb)) {
+       spin_lock(dwlk);
+       list_for_each_entry_safe(bp, n, dwq, b_list) {
+               ASSERT(bp->b_target == target);
+               ASSERT(bp->b_flags & (XBF_DELWRI | _XBF_DELWRI_Q));
+               XB_TRACE(bp, "walkq2", (long)xfs_buf_ispin(bp));
+               if (xfs_buf_ispin(bp)) {
                        pincount++;
                        continue;
                }
 
-               list_move(&pb->pb_list, &tmp);
+               list_move(&bp->b_list, &tmp);
        }
-       spin_unlock(&pbd_delwrite_lock);
+       spin_unlock(dwlk);
 
        /*
         * Dropped the delayed write list lock, now walk the temporary list
         */
-       list_for_each_entry_safe(pb, n, &tmp, pb_list) {
-               pagebuf_lock(pb);
-               pb->pb_flags &= ~(PBF_DELWRI|_PBF_DELWRI_Q);
-               pb->pb_flags |= PBF_WRITE;
+       list_for_each_entry_safe(bp, n, &tmp, b_list) {
+               xfs_buf_lock(bp);
+               bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
+               bp->b_flags |= XBF_WRITE;
                if (wait)
-                       pb->pb_flags &= ~PBF_ASYNC;
+                       bp->b_flags &= ~XBF_ASYNC;
                else
-                       list_del_init(&pb->pb_list);
+                       list_del_init(&bp->b_list);
 
-               pagebuf_iostrategy(pb);
+               xfs_buf_iostrategy(bp);
        }
 
        /*
         * Remaining list items must be flushed before returning
         */
        while (!list_empty(&tmp)) {
-               pb = list_entry(tmp.next, xfs_buf_t, pb_list);
+               bp = list_entry(tmp.next, xfs_buf_t, b_list);
 
-               list_del_init(&pb->pb_list);
-               xfs_iowait(pb);
-               xfs_buf_relse(pb);
+               list_del_init(&bp->b_list);
+               xfs_iowait(bp);
+               xfs_buf_relse(bp);
        }
 
        if (wait)
-               blk_run_address_space(target->pbr_mapping);
+               blk_run_address_space(target->bt_mapping);
 
        return pincount;
 }
 
 int __init
-pagebuf_init(void)
+xfs_buf_init(void)
 {
        int             error = -ENOMEM;
 
-#ifdef PAGEBUF_TRACE
-       pagebuf_trace_buf = ktrace_alloc(PAGEBUF_TRACE_SIZE, KM_SLEEP);
+#ifdef XFS_BUF_TRACE
+       xfs_buf_trace_buf = ktrace_alloc(XFS_BUF_TRACE_SIZE, KM_SLEEP);
 #endif
 
-       pagebuf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
-       if (!pagebuf_zone)
+       xfs_buf_zone = kmem_zone_init(sizeof(xfs_buf_t), "xfs_buf");
+       if (!xfs_buf_zone)
                goto out_free_trace_buf;
 
        xfslogd_workqueue = create_workqueue("xfslogd");
@@ -1847,42 +1815,33 @@ pagebuf_init(void)
        if (!xfsdatad_workqueue)
                goto out_destroy_xfslogd_workqueue;
 
-       xfsbufd_task = kthread_run(xfsbufd, NULL, "xfsbufd");
-       if (IS_ERR(xfsbufd_task)) {
-               error = PTR_ERR(xfsbufd_task);
+       xfs_buf_shake = kmem_shake_register(xfsbufd_wakeup);
+       if (!xfs_buf_shake)
                goto out_destroy_xfsdatad_workqueue;
-       }
-
-       pagebuf_shake = kmem_shake_register(xfsbufd_wakeup);
-       if (!pagebuf_shake)
-               goto out_stop_xfsbufd;
 
        return 0;
 
- out_stop_xfsbufd:
-       kthread_stop(xfsbufd_task);
  out_destroy_xfsdatad_workqueue:
        destroy_workqueue(xfsdatad_workqueue);
  out_destroy_xfslogd_workqueue:
        destroy_workqueue(xfslogd_workqueue);
  out_free_buf_zone:
-       kmem_zone_destroy(pagebuf_zone);
+       kmem_zone_destroy(xfs_buf_zone);
  out_free_trace_buf:
-#ifdef PAGEBUF_TRACE
-       ktrace_free(pagebuf_trace_buf);
+#ifdef XFS_BUF_TRACE
+       ktrace_free(xfs_buf_trace_buf);
 #endif
        return error;
 }
 
 void
-pagebuf_terminate(void)
+xfs_buf_terminate(void)
 {
-       kmem_shake_deregister(pagebuf_shake);
-       kthread_stop(xfsbufd_task);
+       kmem_shake_deregister(xfs_buf_shake);
        destroy_workqueue(xfsdatad_workqueue);
        destroy_workqueue(xfslogd_workqueue);
-       kmem_zone_destroy(pagebuf_zone);
-#ifdef PAGEBUF_TRACE
-       ktrace_free(pagebuf_trace_buf);
+       kmem_zone_destroy(xfs_buf_zone);
+#ifdef XFS_BUF_TRACE
+       ktrace_free(xfs_buf_trace_buf);
 #endif
 }
index 237a35b915d1ecba283f11aa53ae6df8484bb3de..4dd6592d5a4cbf0c9f3ebdfa7a95fcc5870d253d 100644 (file)
  *     Base types
  */
 
-#define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
-
-#define page_buf_ctob(pp)      ((pp) * PAGE_CACHE_SIZE)
-#define page_buf_btoc(dd)      (((dd) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT)
-#define page_buf_btoct(dd)     ((dd) >> PAGE_CACHE_SHIFT)
-#define page_buf_poff(aa)      ((aa) & ~PAGE_CACHE_MASK)
-
-typedef enum page_buf_rw_e {
-       PBRW_READ = 1,                  /* transfer into target memory */
-       PBRW_WRITE = 2,                 /* transfer from target memory */
-       PBRW_ZERO = 3                   /* Zero target memory */
-} page_buf_rw_t;
-
-
-typedef enum page_buf_flags_e {                /* pb_flags values */
-       PBF_READ = (1 << 0),    /* buffer intended for reading from device */
-       PBF_WRITE = (1 << 1),   /* buffer intended for writing to device   */
-       PBF_MAPPED = (1 << 2),  /* buffer mapped (pb_addr valid)           */
-       PBF_ASYNC = (1 << 4),   /* initiator will not wait for completion  */
-       PBF_DONE = (1 << 5),    /* all pages in the buffer uptodate        */
-       PBF_DELWRI = (1 << 6),  /* buffer has dirty pages                  */
-       PBF_STALE = (1 << 7),   /* buffer has been staled, do not find it  */
-       PBF_FS_MANAGED = (1 << 8),  /* filesystem controls freeing memory  */
-       PBF_ORDERED = (1 << 11),    /* use ordered writes                  */
-       PBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead             */
+#define XFS_BUF_DADDR_NULL     ((xfs_daddr_t) (-1LL))
+
+#define xfs_buf_ctob(pp)       ((pp) * PAGE_CACHE_SIZE)
+#define xfs_buf_btoc(dd)       (((dd) + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT)
+#define xfs_buf_btoct(dd)      ((dd) >> PAGE_CACHE_SHIFT)
+#define xfs_buf_poff(aa)       ((aa) & ~PAGE_CACHE_MASK)
+
+typedef enum {
+       XBRW_READ = 1,                  /* transfer into target memory */
+       XBRW_WRITE = 2,                 /* transfer from target memory */
+       XBRW_ZERO = 3,                  /* Zero target memory */
+} xfs_buf_rw_t;
+
+typedef enum {
+       XBF_READ = (1 << 0),    /* buffer intended for reading from device */
+       XBF_WRITE = (1 << 1),   /* buffer intended for writing to device   */
+       XBF_MAPPED = (1 << 2),  /* buffer mapped (b_addr valid)            */
+       XBF_ASYNC = (1 << 4),   /* initiator will not wait for completion  */
+       XBF_DONE = (1 << 5),    /* all pages in the buffer uptodate        */
+       XBF_DELWRI = (1 << 6),  /* buffer has dirty pages                  */
+       XBF_STALE = (1 << 7),   /* buffer has been staled, do not find it  */
+       XBF_FS_MANAGED = (1 << 8),  /* filesystem controls freeing memory  */
+       XBF_ORDERED = (1 << 11),    /* use ordered writes                  */
+       XBF_READ_AHEAD = (1 << 12), /* asynchronous read-ahead             */
 
        /* flags used only as arguments to access routines */
-       PBF_LOCK = (1 << 14),       /* lock requested                      */
-       PBF_TRYLOCK = (1 << 15),    /* lock requested, but do not wait     */
-       PBF_DONT_BLOCK = (1 << 16), /* do not block in current thread      */
+       XBF_LOCK = (1 << 14),       /* lock requested                      */
+       XBF_TRYLOCK = (1 << 15),    /* lock requested, but do not wait     */
+       XBF_DONT_BLOCK = (1 << 16), /* do not block in current thread      */
 
        /* flags used only internally */
-       _PBF_PAGE_CACHE = (1 << 17),/* backed by pagecache                 */
-       _PBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc()              */
-       _PBF_RUN_QUEUES = (1 << 19),/* run block device task queue         */
-       _PBF_DELWRI_Q = (1 << 21),   /* buffer on delwri queue             */
-} page_buf_flags_t;
+       _XBF_PAGE_CACHE = (1 << 17),/* backed by pagecache                 */
+       _XBF_KMEM_ALLOC = (1 << 18),/* backed by kmem_alloc()              */
+       _XBF_RUN_QUEUES = (1 << 19),/* run block device task queue         */
+       _XBF_DELWRI_Q = (1 << 21),   /* buffer on delwri queue             */
+} xfs_buf_flags_t;
 
+typedef enum {
+       XBT_FORCE_SLEEP = (0 << 1),
+       XBT_FORCE_FLUSH = (1 << 1),
+} xfs_buftarg_flags_t;
 
 typedef struct xfs_bufhash {
        struct list_head        bh_list;
@@ -77,477 +80,350 @@ typedef struct xfs_bufhash {
 } xfs_bufhash_t;
 
 typedef struct xfs_buftarg {
-       dev_t                   pbr_dev;
-       struct block_device     *pbr_bdev;
-       struct address_space    *pbr_mapping;
-       unsigned int            pbr_bsize;
-       unsigned int            pbr_sshift;
-       size_t                  pbr_smask;
-
-       /* per-device buffer hash table */
+       dev_t                   bt_dev;
+       struct block_device     *bt_bdev;
+       struct address_space    *bt_mapping;
+       unsigned int            bt_bsize;
+       unsigned int            bt_sshift;
+       size_t                  bt_smask;
+
+       /* per device buffer hash table */
        uint                    bt_hashmask;
        uint                    bt_hashshift;
        xfs_bufhash_t           *bt_hash;
+
+       /* per device delwri queue */
+       struct task_struct      *bt_task;
+       struct list_head        bt_list;
+       struct list_head        bt_delwrite_queue;
+       spinlock_t              bt_delwrite_lock;
+       unsigned long           bt_flags;
 } xfs_buftarg_t;
 
 /*
- *     xfs_buf_t:  Buffer structure for page cache-based buffers
+ *     xfs_buf_t:  Buffer structure for pagecache-based buffers
+ *
+ * This buffer structure is used by the pagecache buffer management routines
+ * to refer to an assembly of pages forming a logical buffer.
  *
- * This buffer structure is used by the page cache buffer management routines
- * to refer to an assembly of pages forming a logical buffer.  The actual I/O
- * is performed with buffer_head structures, as required by drivers.
- * 
- * The buffer structure is used on temporary basis only, and discarded when
- * released.  The real data storage is recorded in the page cache.  Metadata is
+ * The buffer structure is used on a temporary basis only, and discarded when
+ * released.  The real data storage is recorded in the pagecache. Buffers are
  * hashed to the block device on which the file system resides.
  */
 
 struct xfs_buf;
+typedef void (*xfs_buf_iodone_t)(struct xfs_buf *);
+typedef void (*xfs_buf_relse_t)(struct xfs_buf *);
+typedef int (*xfs_buf_bdstrat_t)(struct xfs_buf *);
 
-/* call-back function on I/O completion */
-typedef void (*page_buf_iodone_t)(struct xfs_buf *);
-/* call-back function on I/O completion */
-typedef void (*page_buf_relse_t)(struct xfs_buf *);
-/* pre-write function */
-typedef int (*page_buf_bdstrat_t)(struct xfs_buf *);
-
-#define PB_PAGES       2
+#define XB_PAGES       2
 
 typedef struct xfs_buf {
-       struct semaphore        pb_sema;        /* semaphore for lockables  */
-       unsigned long           pb_queuetime;   /* time buffer was queued   */
-       atomic_t                pb_pin_count;   /* pin count                */
-       wait_queue_head_t       pb_waiters;     /* unpin waiters            */
-       struct list_head        pb_list;
-       page_buf_flags_t        pb_flags;       /* status flags */
-       struct list_head        pb_hash_list;   /* hash table list */
-       xfs_bufhash_t           *pb_hash;       /* hash table list start */
-       xfs_buftarg_t           *pb_target;     /* buffer target (device) */
-       atomic_t                pb_hold;        /* reference count */
-       xfs_daddr_t             pb_bn;          /* block number for I/O */
-       loff_t                  pb_file_offset; /* offset in file */
-       size_t                  pb_buffer_length; /* size of buffer in bytes */
-       size_t                  pb_count_desired; /* desired transfer size */
-       void                    *pb_addr;       /* virtual address of buffer */
-       struct work_struct      pb_iodone_work;
-       atomic_t                pb_io_remaining;/* #outstanding I/O requests */
-       page_buf_iodone_t       pb_iodone;      /* I/O completion function */
-       page_buf_relse_t        pb_relse;       /* releasing function */
-       page_buf_bdstrat_t      pb_strat;       /* pre-write function */
-       struct semaphore        pb_iodonesema;  /* Semaphore for I/O waiters */
-       void                    *pb_fspriv;
-       void                    *pb_fspriv2;
-       void                    *pb_fspriv3;
-       unsigned short          pb_error;       /* error code on I/O */
-       unsigned short          pb_locked;      /* page array is locked */
-       unsigned int            pb_page_count;  /* size of page array */
-       unsigned int            pb_offset;      /* page offset in first page */
-       struct page             **pb_pages;     /* array of page pointers */
-       struct page             *pb_page_array[PB_PAGES]; /* inline pages */
-#ifdef PAGEBUF_LOCK_TRACKING
-       int                     pb_last_holder;
+       struct semaphore        b_sema;         /* semaphore for lockables */
+       unsigned long           b_queuetime;    /* time buffer was queued */
+       atomic_t                b_pin_count;    /* pin count */
+       wait_queue_head_t       b_waiters;      /* unpin waiters */
+       struct list_head        b_list;
+       xfs_buf_flags_t         b_flags;        /* status flags */
+       struct list_head        b_hash_list;    /* hash table list */
+       xfs_bufhash_t           *b_hash;        /* hash table list start */
+       xfs_buftarg_t           *b_target;      /* buffer target (device) */
+       atomic_t                b_hold;         /* reference count */
+       xfs_daddr_t             b_bn;           /* block number for I/O */
+       xfs_off_t               b_file_offset;  /* offset in file */
+       size_t                  b_buffer_length;/* size of buffer in bytes */
+       size_t                  b_count_desired;/* desired transfer size */
+       void                    *b_addr;        /* virtual address of buffer */
+       struct work_struct      b_iodone_work;
+       atomic_t                b_io_remaining; /* #outstanding I/O requests */
+       xfs_buf_iodone_t        b_iodone;       /* I/O completion function */
+       xfs_buf_relse_t         b_relse;        /* releasing function */
+       xfs_buf_bdstrat_t       b_strat;        /* pre-write function */
+       struct semaphore        b_iodonesema;   /* Semaphore for I/O waiters */
+       void                    *b_fspriv;
+       void                    *b_fspriv2;
+       void                    *b_fspriv3;
+       unsigned short          b_error;        /* error code on I/O */
+       unsigned short          b_locked;       /* page array is locked */
+       unsigned int            b_page_count;   /* size of page array */
+       unsigned int            b_offset;       /* page offset in first page */
+       struct page             **b_pages;      /* array of page pointers */
+       struct page             *b_page_array[XB_PAGES]; /* inline pages */
+#ifdef XFS_BUF_LOCK_TRACKING
+       int                     b_last_holder;
 #endif
 } xfs_buf_t;
 
 
 /* Finding and Reading Buffers */
-
-extern xfs_buf_t *_pagebuf_find(       /* find buffer for block if     */
-                                       /* the block is in memory       */
-               xfs_buftarg_t *,        /* inode for block              */
-               loff_t,                 /* starting offset of range     */
-               size_t,                 /* length of range              */
-               page_buf_flags_t,       /* PBF_LOCK                     */
-               xfs_buf_t *);           /* newly allocated buffer       */
-
+extern xfs_buf_t *_xfs_buf_find(xfs_buftarg_t *, xfs_off_t, size_t,
+                               xfs_buf_flags_t, xfs_buf_t *);
 #define xfs_incore(buftarg,blkno,len,lockit) \
-       _pagebuf_find(buftarg, blkno ,len, lockit, NULL)
-
-extern xfs_buf_t *xfs_buf_get_flags(   /* allocate a buffer            */
-               xfs_buftarg_t *,        /* inode for buffer             */
-               loff_t,                 /* starting offset of range     */
-               size_t,                 /* length of range              */
-               page_buf_flags_t);      /* PBF_LOCK, PBF_READ,          */
-                                       /* PBF_ASYNC                    */
+       _xfs_buf_find(buftarg, blkno ,len, lockit, NULL)
 
+extern xfs_buf_t *xfs_buf_get_flags(xfs_buftarg_t *, xfs_off_t, size_t,
+                               xfs_buf_flags_t);
 #define xfs_buf_get(target, blkno, len, flags) \
-       xfs_buf_get_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
-
-extern xfs_buf_t *xfs_buf_read_flags(  /* allocate and read a buffer   */
-               xfs_buftarg_t *,        /* inode for buffer             */
-               loff_t,                 /* starting offset of range     */
-               size_t,                 /* length of range              */
-               page_buf_flags_t);      /* PBF_LOCK, PBF_ASYNC          */
+       xfs_buf_get_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
 
+extern xfs_buf_t *xfs_buf_read_flags(xfs_buftarg_t *, xfs_off_t, size_t,
+                               xfs_buf_flags_t);
 #define xfs_buf_read(target, blkno, len, flags) \
-       xfs_buf_read_flags((target), (blkno), (len), PBF_LOCK | PBF_MAPPED)
-
-extern xfs_buf_t *pagebuf_get_empty(   /* allocate pagebuf struct with */
-                                       /*  no memory or disk address   */
-               size_t len,
-               xfs_buftarg_t *);       /* mount point "fake" inode     */
-
-extern xfs_buf_t *pagebuf_get_no_daddr(/* allocate pagebuf struct      */
-                                       /* without disk address         */
-               size_t len,
-               xfs_buftarg_t *);       /* mount point "fake" inode     */
-
-extern int pagebuf_associate_memory(
-               xfs_buf_t *,
-               void *,
-               size_t);
-
-extern void pagebuf_hold(              /* increment reference count    */
-               xfs_buf_t *);           /* buffer to hold               */
+       xfs_buf_read_flags((target), (blkno), (len), XBF_LOCK | XBF_MAPPED)
 
-extern void pagebuf_readahead(         /* read ahead into cache        */
-               xfs_buftarg_t  *,       /* target for buffer (or NULL)  */
-               loff_t,                 /* starting offset of range     */
-               size_t,                 /* length of range              */
-               page_buf_flags_t);      /* additional read flags        */
+extern xfs_buf_t *xfs_buf_get_empty(size_t, xfs_buftarg_t *);
+extern xfs_buf_t *xfs_buf_get_noaddr(size_t, xfs_buftarg_t *);
+extern int xfs_buf_associate_memory(xfs_buf_t *, void *, size_t);
+extern void xfs_buf_hold(xfs_buf_t *);
+extern void xfs_buf_readahead(xfs_buftarg_t *, xfs_off_t, size_t,
+                               xfs_buf_flags_t);
 
 /* Releasing Buffers */
-
-extern void pagebuf_free(              /* deallocate a buffer          */
-               xfs_buf_t *);           /* buffer to deallocate         */
-
-extern void pagebuf_rele(              /* release hold on a buffer     */
-               xfs_buf_t *);           /* buffer to release            */
+extern void xfs_buf_free(xfs_buf_t *);
+extern void xfs_buf_rele(xfs_buf_t *);
 
 /* Locking and Unlocking Buffers */
-
-extern int pagebuf_cond_lock(          /* lock buffer, if not locked   */
-                                       /* (returns -EBUSY if locked)   */
-               xfs_buf_t *);           /* buffer to lock               */
-
-extern int pagebuf_lock_value(         /* return count on lock         */
-               xfs_buf_t *);          /* buffer to check              */
-
-extern int pagebuf_lock(               /* lock buffer                  */
-               xfs_buf_t *);          /* buffer to lock               */
-
-extern void pagebuf_unlock(            /* unlock buffer                */
-               xfs_buf_t *);           /* buffer to unlock             */
+extern int xfs_buf_cond_lock(xfs_buf_t *);
+extern int xfs_buf_lock_value(xfs_buf_t *);
+extern void xfs_buf_lock(xfs_buf_t *);
+extern void xfs_buf_unlock(xfs_buf_t *);
 
 /* Buffer Read and Write Routines */
-
-extern void pagebuf_iodone(            /* mark buffer I/O complete     */
-               xfs_buf_t *,            /* buffer to mark               */
-               int);                   /* run completion locally, or in
-                                        * a helper thread.             */
-
-extern void pagebuf_ioerror(           /* mark buffer in error (or not) */
-               xfs_buf_t *,            /* buffer to mark               */
-               int);                   /* error to store (0 if none)   */
-
-extern int pagebuf_iostart(            /* start I/O on a buffer        */
-               xfs_buf_t *,            /* buffer to start              */
-               page_buf_flags_t);      /* PBF_LOCK, PBF_ASYNC,         */
-                                       /* PBF_READ, PBF_WRITE,         */
-                                       /* PBF_DELWRI                   */
-
-extern int pagebuf_iorequest(          /* start real I/O               */
-               xfs_buf_t *);           /* buffer to convey to device   */
-
-extern int pagebuf_iowait(             /* wait for buffer I/O done     */
-               xfs_buf_t *);           /* buffer to wait on            */
-
-extern void pagebuf_iomove(            /* move data in/out of pagebuf  */
-               xfs_buf_t *,            /* buffer to manipulate         */
-               size_t,                 /* starting buffer offset       */
-               size_t,                 /* length in buffer             */
-               caddr_t,                /* data pointer                 */
-               page_buf_rw_t);         /* direction                    */
-
-static inline int pagebuf_iostrategy(xfs_buf_t *pb)
+extern void xfs_buf_ioend(xfs_buf_t *, int);
+extern void xfs_buf_ioerror(xfs_buf_t *, int);
+extern int xfs_buf_iostart(xfs_buf_t *, xfs_buf_flags_t);
+extern int xfs_buf_iorequest(xfs_buf_t *);
+extern int xfs_buf_iowait(xfs_buf_t *);
+extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, xfs_caddr_t,
+                               xfs_buf_rw_t);
+
+static inline int xfs_buf_iostrategy(xfs_buf_t *bp)
 {
-       return pb->pb_strat ? pb->pb_strat(pb) : pagebuf_iorequest(pb);
+       return bp->b_strat ? bp->b_strat(bp) : xfs_buf_iorequest(bp);
 }
 
-static inline int pagebuf_geterror(xfs_buf_t *pb)
+static inline int xfs_buf_geterror(xfs_buf_t *bp)
 {
-       return pb ? pb->pb_error : ENOMEM;
+       return bp ? bp->b_error : ENOMEM;
 }
 
 /* Buffer Utility Routines */
-
-extern caddr_t pagebuf_offset(         /* pointer at offset in buffer  */
-               xfs_buf_t *,            /* buffer to offset into        */
-               size_t);                /* offset                       */
+extern xfs_caddr_t xfs_buf_offset(xfs_buf_t *, size_t);
 
 /* Pinning Buffer Storage in Memory */
-
-extern void pagebuf_pin(               /* pin buffer in memory         */
-               xfs_buf_t *);           /* buffer to pin                */
-
-extern void pagebuf_unpin(             /* unpin buffered data          */
-               xfs_buf_t *);           /* buffer to unpin              */
-
-extern int pagebuf_ispin(              /* check if buffer is pinned    */
-               xfs_buf_t *);           /* buffer to check              */
+extern void xfs_buf_pin(xfs_buf_t *);
+extern void xfs_buf_unpin(xfs_buf_t *);
+extern int xfs_buf_ispin(xfs_buf_t *);
 
 /* Delayed Write Buffer Routines */
-
-extern void pagebuf_delwri_dequeue(xfs_buf_t *);
+extern void xfs_buf_delwri_dequeue(xfs_buf_t *);
 
 /* Buffer Daemon Setup Routines */
+extern int xfs_buf_init(void);
+extern void xfs_buf_terminate(void);
 
-extern int pagebuf_init(void);
-extern void pagebuf_terminate(void);
-
-
-#ifdef PAGEBUF_TRACE
-extern ktrace_t *pagebuf_trace_buf;
-extern void pagebuf_trace(
-               xfs_buf_t *,            /* buffer being traced          */
-               char *,                 /* description of operation     */
-               void *,                 /* arbitrary diagnostic value   */
-               void *);                /* return address               */
+#ifdef XFS_BUF_TRACE
+extern ktrace_t *xfs_buf_trace_buf;
+extern void xfs_buf_trace(xfs_buf_t *, char *, void *, void *);
 #else
-# define pagebuf_trace(pb, id, ptr, ra)        do { } while (0)
+#define xfs_buf_trace(bp,id,ptr,ra)    do { } while (0)
 #endif
 
-#define pagebuf_target_name(target)    \
-       ({ char __b[BDEVNAME_SIZE]; bdevname((target)->pbr_bdev, __b); __b; })
+#define xfs_buf_target_name(target)    \
+       ({ char __b[BDEVNAME_SIZE]; bdevname((target)->bt_bdev, __b); __b; })
 
 
+#define XFS_B_ASYNC            XBF_ASYNC
+#define XFS_B_DELWRI           XBF_DELWRI
+#define XFS_B_READ             XBF_READ
+#define XFS_B_WRITE            XBF_WRITE
+#define XFS_B_STALE            XBF_STALE
 
-/* These are just for xfs_syncsub... it sets an internal variable
- * then passes it to VOP_FLUSH_PAGES or adds the flags to a newly gotten buf_t
- */
-#define XFS_B_ASYNC            PBF_ASYNC
-#define XFS_B_DELWRI           PBF_DELWRI
-#define XFS_B_READ             PBF_READ
-#define XFS_B_WRITE            PBF_WRITE
-#define XFS_B_STALE            PBF_STALE
-
-#define XFS_BUF_TRYLOCK                PBF_TRYLOCK
-#define XFS_INCORE_TRYLOCK     PBF_TRYLOCK
-#define XFS_BUF_LOCK           PBF_LOCK
-#define XFS_BUF_MAPPED         PBF_MAPPED
-
-#define BUF_BUSY               PBF_DONT_BLOCK
-
-#define XFS_BUF_BFLAGS(x)      ((x)->pb_flags)
-#define XFS_BUF_ZEROFLAGS(x)   \
-       ((x)->pb_flags &= ~(PBF_READ|PBF_WRITE|PBF_ASYNC|PBF_DELWRI))
-
-#define XFS_BUF_STALE(x)       ((x)->pb_flags |= XFS_B_STALE)
-#define XFS_BUF_UNSTALE(x)     ((x)->pb_flags &= ~XFS_B_STALE)
-#define XFS_BUF_ISSTALE(x)     ((x)->pb_flags & XFS_B_STALE)
-#define XFS_BUF_SUPER_STALE(x) do {                            \
-                                       XFS_BUF_STALE(x);       \
-                                       pagebuf_delwri_dequeue(x);      \
-                                       XFS_BUF_DONE(x);        \
-                               } while (0)
+#define XFS_BUF_TRYLOCK                XBF_TRYLOCK
+#define XFS_INCORE_TRYLOCK     XBF_TRYLOCK
+#define XFS_BUF_LOCK           XBF_LOCK
+#define XFS_BUF_MAPPED         XBF_MAPPED
 
-#define XFS_BUF_MANAGE         PBF_FS_MANAGED
-#define XFS_BUF_UNMANAGE(x)    ((x)->pb_flags &= ~PBF_FS_MANAGED)
-
-#define XFS_BUF_DELAYWRITE(x)   ((x)->pb_flags |= PBF_DELWRI)
-#define XFS_BUF_UNDELAYWRITE(x)         pagebuf_delwri_dequeue(x)
-#define XFS_BUF_ISDELAYWRITE(x)         ((x)->pb_flags & PBF_DELWRI)
-
-#define XFS_BUF_ERROR(x,no)     pagebuf_ioerror(x,no)
-#define XFS_BUF_GETERROR(x)     pagebuf_geterror(x)
-#define XFS_BUF_ISERROR(x)      (pagebuf_geterror(x)?1:0)
-
-#define XFS_BUF_DONE(x)                 ((x)->pb_flags |= PBF_DONE)
-#define XFS_BUF_UNDONE(x)       ((x)->pb_flags &= ~PBF_DONE)
-#define XFS_BUF_ISDONE(x)       ((x)->pb_flags & PBF_DONE)
-
-#define XFS_BUF_BUSY(x)                 do { } while (0)
-#define XFS_BUF_UNBUSY(x)       do { } while (0)
-#define XFS_BUF_ISBUSY(x)       (1)
-
-#define XFS_BUF_ASYNC(x)        ((x)->pb_flags |= PBF_ASYNC)
-#define XFS_BUF_UNASYNC(x)      ((x)->pb_flags &= ~PBF_ASYNC)
-#define XFS_BUF_ISASYNC(x)      ((x)->pb_flags & PBF_ASYNC)
-
-#define XFS_BUF_ORDERED(x)      ((x)->pb_flags |= PBF_ORDERED)
-#define XFS_BUF_UNORDERED(x)    ((x)->pb_flags &= ~PBF_ORDERED)
-#define XFS_BUF_ISORDERED(x)    ((x)->pb_flags & PBF_ORDERED)
-
-#define XFS_BUF_SHUT(x)                 printk("XFS_BUF_SHUT not implemented yet\n")
-#define XFS_BUF_UNSHUT(x)       printk("XFS_BUF_UNSHUT not implemented yet\n")
-#define XFS_BUF_ISSHUT(x)       (0)
-
-#define XFS_BUF_HOLD(x)                pagebuf_hold(x)
-#define XFS_BUF_READ(x)                ((x)->pb_flags |= PBF_READ)
-#define XFS_BUF_UNREAD(x)      ((x)->pb_flags &= ~PBF_READ)
-#define XFS_BUF_ISREAD(x)      ((x)->pb_flags & PBF_READ)
-
-#define XFS_BUF_WRITE(x)       ((x)->pb_flags |= PBF_WRITE)
-#define XFS_BUF_UNWRITE(x)     ((x)->pb_flags &= ~PBF_WRITE)
-#define XFS_BUF_ISWRITE(x)     ((x)->pb_flags & PBF_WRITE)
-
-#define XFS_BUF_ISUNINITIAL(x)  (0)
-#define XFS_BUF_UNUNINITIAL(x)  (0)
-
-#define XFS_BUF_BP_ISMAPPED(bp)         1
-
-#define XFS_BUF_IODONE_FUNC(buf)       (buf)->pb_iodone
-#define XFS_BUF_SET_IODONE_FUNC(buf, func)     \
-                       (buf)->pb_iodone = (func)
-#define XFS_BUF_CLR_IODONE_FUNC(buf)           \
-                       (buf)->pb_iodone = NULL
-#define XFS_BUF_SET_BDSTRAT_FUNC(buf, func)    \
-                       (buf)->pb_strat = (func)
-#define XFS_BUF_CLR_BDSTRAT_FUNC(buf)          \
-                       (buf)->pb_strat = NULL
-
-#define XFS_BUF_FSPRIVATE(buf, type)           \
-                       ((type)(buf)->pb_fspriv)
-#define XFS_BUF_SET_FSPRIVATE(buf, value)      \
-                       (buf)->pb_fspriv = (void *)(value)
-#define XFS_BUF_FSPRIVATE2(buf, type)          \
-                       ((type)(buf)->pb_fspriv2)
-#define XFS_BUF_SET_FSPRIVATE2(buf, value)     \
-                       (buf)->pb_fspriv2 = (void *)(value)
-#define XFS_BUF_FSPRIVATE3(buf, type)          \
-                       ((type)(buf)->pb_fspriv3)
-#define XFS_BUF_SET_FSPRIVATE3(buf, value)     \
-                       (buf)->pb_fspriv3  = (void *)(value)
-#define XFS_BUF_SET_START(buf)
-
-#define XFS_BUF_SET_BRELSE_FUNC(buf, value) \
-                       (buf)->pb_relse = (value)
-
-#define XFS_BUF_PTR(bp)                (xfs_caddr_t)((bp)->pb_addr)
-
-static inline xfs_caddr_t xfs_buf_offset(xfs_buf_t *bp, size_t offset)
-{
-       if (bp->pb_flags & PBF_MAPPED)
-               return XFS_BUF_PTR(bp) + offset;
-       return (xfs_caddr_t) pagebuf_offset(bp, offset);
-}
+#define BUF_BUSY               XBF_DONT_BLOCK
+
+#define XFS_BUF_BFLAGS(bp)     ((bp)->b_flags)
+#define XFS_BUF_ZEROFLAGS(bp)  \
+       ((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC|XBF_DELWRI))
+
+#define XFS_BUF_STALE(bp)      ((bp)->b_flags |= XFS_B_STALE)
+#define XFS_BUF_UNSTALE(bp)    ((bp)->b_flags &= ~XFS_B_STALE)
+#define XFS_BUF_ISSTALE(bp)    ((bp)->b_flags & XFS_B_STALE)
+#define XFS_BUF_SUPER_STALE(bp)        do {                            \
+                                       XFS_BUF_STALE(bp);      \
+                                       xfs_buf_delwri_dequeue(bp);     \
+                                       XFS_BUF_DONE(bp);       \
+                               } while (0)
 
-#define XFS_BUF_SET_PTR(bp, val, count)                \
-                               pagebuf_associate_memory(bp, val, count)
-#define XFS_BUF_ADDR(bp)       ((bp)->pb_bn)
-#define XFS_BUF_SET_ADDR(bp, blk)              \
-                       ((bp)->pb_bn = (xfs_daddr_t)(blk))
-#define XFS_BUF_OFFSET(bp)     ((bp)->pb_file_offset)
-#define XFS_BUF_SET_OFFSET(bp, off)            \
-                       ((bp)->pb_file_offset = (off))
-#define XFS_BUF_COUNT(bp)      ((bp)->pb_count_desired)
-#define XFS_BUF_SET_COUNT(bp, cnt)             \
-                       ((bp)->pb_count_desired = (cnt))
-#define XFS_BUF_SIZE(bp)       ((bp)->pb_buffer_length)
-#define XFS_BUF_SET_SIZE(bp, cnt)              \
-                       ((bp)->pb_buffer_length = (cnt))
-#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
-#define XFS_BUF_SET_VTYPE(bp, type)
-#define XFS_BUF_SET_REF(bp, ref)
-
-#define XFS_BUF_ISPINNED(bp)   pagebuf_ispin(bp)
-
-#define XFS_BUF_VALUSEMA(bp)   pagebuf_lock_value(bp)
-#define XFS_BUF_CPSEMA(bp)     (pagebuf_cond_lock(bp) == 0)
-#define XFS_BUF_VSEMA(bp)      pagebuf_unlock(bp)
-#define XFS_BUF_PSEMA(bp,x)    pagebuf_lock(bp)
-#define XFS_BUF_V_IODONESEMA(bp) up(&bp->pb_iodonesema);
-
-/* setup the buffer target from a buftarg structure */
-#define XFS_BUF_SET_TARGET(bp, target) \
-               (bp)->pb_target = (target)
-#define XFS_BUF_TARGET(bp)     ((bp)->pb_target)
-#define XFS_BUFTARG_NAME(target)       \
-               pagebuf_target_name(target)
-
-#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)
-#define XFS_BUF_SET_VTYPE(bp, type)
-#define XFS_BUF_SET_REF(bp, ref)
-
-static inline int      xfs_bawrite(void *mp, xfs_buf_t *bp)
+#define XFS_BUF_MANAGE         XBF_FS_MANAGED
+#define XFS_BUF_UNMANAGE(bp)   ((bp)->b_flags &= ~XBF_FS_MANAGED)
+
+#define XFS_BUF_DELAYWRITE(bp)         ((bp)->b_flags |= XBF_DELWRI)
+#define XFS_BUF_UNDELAYWRITE(bp)       xfs_buf_delwri_dequeue(bp)
+#define XFS_BUF_ISDELAYWRITE(bp)       ((bp)->b_flags & XBF_DELWRI)
+
+#define XFS_BUF_ERROR(bp,no)   xfs_buf_ioerror(bp,no)
+#define XFS_BUF_GETERROR(bp)   xfs_buf_geterror(bp)
+#define XFS_BUF_ISERROR(bp)    (xfs_buf_geterror(bp) ? 1 : 0)
+
+#define XFS_BUF_DONE(bp)       ((bp)->b_flags |= XBF_DONE)
+#define XFS_BUF_UNDONE(bp)     ((bp)->b_flags &= ~XBF_DONE)
+#define XFS_BUF_ISDONE(bp)     ((bp)->b_flags & XBF_DONE)
+
+#define XFS_BUF_BUSY(bp)       do { } while (0)
+#define XFS_BUF_UNBUSY(bp)     do { } while (0)
+#define XFS_BUF_ISBUSY(bp)     (1)
+
+#define XFS_BUF_ASYNC(bp)      ((bp)->b_flags |= XBF_ASYNC)
+#define XFS_BUF_UNASYNC(bp)    ((bp)->b_flags &= ~XBF_ASYNC)
+#define XFS_BUF_ISASYNC(bp)    ((bp)->b_flags & XBF_ASYNC)
+
+#define XFS_BUF_ORDERED(bp)    ((bp)->b_flags |= XBF_ORDERED)
+#define XFS_BUF_UNORDERED(bp)  ((bp)->b_flags &= ~XBF_ORDERED)
+#define XFS_BUF_ISORDERED(bp)  ((bp)->b_flags & XBF_ORDERED)
+
+#define XFS_BUF_SHUT(bp)       do { } while (0)
+#define XFS_BUF_UNSHUT(bp)     do { } while (0)
+#define XFS_BUF_ISSHUT(bp)     (0)
+
+#define XFS_BUF_HOLD(bp)       xfs_buf_hold(bp)
+#define XFS_BUF_READ(bp)       ((bp)->b_flags |= XBF_READ)
+#define XFS_BUF_UNREAD(bp)     ((bp)->b_flags &= ~XBF_READ)
+#define XFS_BUF_ISREAD(bp)     ((bp)->b_flags & XBF_READ)
+
+#define XFS_BUF_WRITE(bp)      ((bp)->b_flags |= XBF_WRITE)
+#define XFS_BUF_UNWRITE(bp)    ((bp)->b_flags &= ~XBF_WRITE)
+#define XFS_BUF_ISWRITE(bp)    ((bp)->b_flags & XBF_WRITE)
+
+#define XFS_BUF_ISUNINITIAL(bp)        (0)
+#define XFS_BUF_UNUNINITIAL(bp)        (0)
+
+#define XFS_BUF_BP_ISMAPPED(bp)        (1)
+
+#define XFS_BUF_IODONE_FUNC(bp)                        ((bp)->b_iodone)
+#define XFS_BUF_SET_IODONE_FUNC(bp, func)      ((bp)->b_iodone = (func))
+#define XFS_BUF_CLR_IODONE_FUNC(bp)            ((bp)->b_iodone = NULL)
+#define XFS_BUF_SET_BDSTRAT_FUNC(bp, func)     ((bp)->b_strat = (func))
+#define XFS_BUF_CLR_BDSTRAT_FUNC(bp)           ((bp)->b_strat = NULL)
+
+#define XFS_BUF_FSPRIVATE(bp, type)            ((type)(bp)->b_fspriv)
+#define XFS_BUF_SET_FSPRIVATE(bp, val)         ((bp)->b_fspriv = (void*)(val))
+#define XFS_BUF_FSPRIVATE2(bp, type)           ((type)(bp)->b_fspriv2)
+#define XFS_BUF_SET_FSPRIVATE2(bp, val)                ((bp)->b_fspriv2 = (void*)(val))
+#define XFS_BUF_FSPRIVATE3(bp, type)           ((type)(bp)->b_fspriv3)
+#define XFS_BUF_SET_FSPRIVATE3(bp, val)                ((bp)->b_fspriv3 = (void*)(val))
+#define XFS_BUF_SET_START(bp)                  do { } while (0)
+#define XFS_BUF_SET_BRELSE_FUNC(bp, func)      ((bp)->b_relse = (func))
+
+#define XFS_BUF_PTR(bp)                        (xfs_caddr_t)((bp)->b_addr)
+#define XFS_BUF_SET_PTR(bp, val, cnt)  xfs_buf_associate_memory(bp, val, cnt)
+#define XFS_BUF_ADDR(bp)               ((bp)->b_bn)
+#define XFS_BUF_SET_ADDR(bp, bno)      ((bp)->b_bn = (xfs_daddr_t)(bno))
+#define XFS_BUF_OFFSET(bp)             ((bp)->b_file_offset)
+#define XFS_BUF_SET_OFFSET(bp, off)    ((bp)->b_file_offset = (off))
+#define XFS_BUF_COUNT(bp)              ((bp)->b_count_desired)
+#define XFS_BUF_SET_COUNT(bp, cnt)     ((bp)->b_count_desired = (cnt))
+#define XFS_BUF_SIZE(bp)               ((bp)->b_buffer_length)
+#define XFS_BUF_SET_SIZE(bp, cnt)      ((bp)->b_buffer_length = (cnt))
+
+#define XFS_BUF_SET_VTYPE_REF(bp, type, ref)   do { } while (0)
+#define XFS_BUF_SET_VTYPE(bp, type)            do { } while (0)
+#define XFS_BUF_SET_REF(bp, ref)               do { } while (0)
+
+#define XFS_BUF_ISPINNED(bp)   xfs_buf_ispin(bp)
+
+#define XFS_BUF_VALUSEMA(bp)   xfs_buf_lock_value(bp)
+#define XFS_BUF_CPSEMA(bp)     (xfs_buf_cond_lock(bp) == 0)
+#define XFS_BUF_VSEMA(bp)      xfs_buf_unlock(bp)
+#define XFS_BUF_PSEMA(bp,x)    xfs_buf_lock(bp)
+#define XFS_BUF_V_IODONESEMA(bp) up(&bp->b_iodonesema);
+
+#define XFS_BUF_SET_TARGET(bp, target) ((bp)->b_target = (target))
+#define XFS_BUF_TARGET(bp)             ((bp)->b_target)
+#define XFS_BUFTARG_NAME(target)       xfs_buf_target_name(target)
+
+static inline int xfs_bawrite(void *mp, xfs_buf_t *bp)
 {
-       bp->pb_fspriv3 = mp;
-       bp->pb_strat = xfs_bdstrat_cb;
-       pagebuf_delwri_dequeue(bp);
-       return pagebuf_iostart(bp, PBF_WRITE | PBF_ASYNC | _PBF_RUN_QUEUES);
+       bp->b_fspriv3 = mp;
+       bp->b_strat = xfs_bdstrat_cb;
+       xfs_buf_delwri_dequeue(bp);
+       return xfs_buf_iostart(bp, XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
 }
 
-static inline void     xfs_buf_relse(xfs_buf_t *bp)
+static inline void xfs_buf_relse(xfs_buf_t *bp)
 {
-       if (!bp->pb_relse)
-               pagebuf_unlock(bp);
-       pagebuf_rele(bp);
+       if (!bp->b_relse)
+               xfs_buf_unlock(bp);
+       xfs_buf_rele(bp);
 }
 
-#define xfs_bpin(bp)           pagebuf_pin(bp)
-#define xfs_bunpin(bp)         pagebuf_unpin(bp)
+#define xfs_bpin(bp)           xfs_buf_pin(bp)
+#define xfs_bunpin(bp)         xfs_buf_unpin(bp)
 
 #define xfs_buftrace(id, bp)   \
-           pagebuf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
+           xfs_buf_trace(bp, id, NULL, (void *)__builtin_return_address(0))
 
-#define xfs_biodone(pb)                    \
-           pagebuf_iodone(pb, 0)
+#define xfs_biodone(bp)                xfs_buf_ioend(bp, 0)
 
-#define xfs_biomove(pb, off, len, data, rw) \
-           pagebuf_iomove((pb), (off), (len), (data), \
-               ((rw) == XFS_B_WRITE) ? PBRW_WRITE : PBRW_READ)
+#define xfs_biomove(bp, off, len, data, rw) \
+           xfs_buf_iomove((bp), (off), (len), (data), \
+               ((rw) == XFS_B_WRITE) ? XBRW_WRITE : XBRW_READ)
 
-#define xfs_biozero(pb, off, len) \
-           pagebuf_iomove((pb), (off), (len), NULL, PBRW_ZERO)
+#define xfs_biozero(bp, off, len) \
+           xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
 
 
-static inline int      XFS_bwrite(xfs_buf_t *pb)
+static inline int XFS_bwrite(xfs_buf_t *bp)
 {
-       int     iowait = (pb->pb_flags & PBF_ASYNC) == 0;
+       int     iowait = (bp->b_flags & XBF_ASYNC) == 0;
        int     error = 0;
 
        if (!iowait)
-               pb->pb_flags |= _PBF_RUN_QUEUES;
+               bp->b_flags |= _XBF_RUN_QUEUES;
 
-       pagebuf_delwri_dequeue(pb);
-       pagebuf_iostrategy(pb);
+       xfs_buf_delwri_dequeue(bp);
+       xfs_buf_iostrategy(bp);
        if (iowait) {
-               error = pagebuf_iowait(pb);
-               xfs_buf_relse(pb);
+               error = xfs_buf_iowait(bp);
+               xfs_buf_relse(bp);
        }
        return error;
 }
 
-#define XFS_bdwrite(pb)                     \
-           pagebuf_iostart(pb, PBF_DELWRI | PBF_ASYNC)
+#define XFS_bdwrite(bp)                xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC)
 
 static inline int xfs_bdwrite(void *mp, xfs_buf_t *bp)
 {
-       bp->pb_strat = xfs_bdstrat_cb;
-       bp->pb_fspriv3 = mp;
-
-       return pagebuf_iostart(bp, PBF_DELWRI | PBF_ASYNC);
+       bp->b_strat = xfs_bdstrat_cb;
+       bp->b_fspriv3 = mp;
+       return xfs_buf_iostart(bp, XBF_DELWRI | XBF_ASYNC);
 }
 
-#define XFS_bdstrat(bp) pagebuf_iorequest(bp)
+#define XFS_bdstrat(bp) xfs_buf_iorequest(bp)
 
-#define xfs_iowait(pb) pagebuf_iowait(pb)
+#define xfs_iowait(bp) xfs_buf_iowait(bp)
 
 #define xfs_baread(target, rablkno, ralen)  \
-       pagebuf_readahead((target), (rablkno), (ralen), PBF_DONT_BLOCK)
-
-#define xfs_buf_get_empty(len, target) pagebuf_get_empty((len), (target))
-#define xfs_buf_get_noaddr(len, target)        pagebuf_get_no_daddr((len), (target))
-#define xfs_buf_free(bp)               pagebuf_free(bp)
+       xfs_buf_readahead((target), (rablkno), (ralen), XBF_DONT_BLOCK)
 
 
 /*
  *     Handling of buftargs.
  */
-
 extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
 extern void xfs_free_buftarg(xfs_buftarg_t *, int);
 extern void xfs_wait_buftarg(xfs_buftarg_t *);
 extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
 extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
 
-#define xfs_getsize_buftarg(buftarg) \
-       block_size((buftarg)->pbr_bdev)
-#define xfs_readonly_buftarg(buftarg) \
-       bdev_read_only((buftarg)->pbr_bdev)
-#define xfs_binval(buftarg) \
-       xfs_flush_buftarg(buftarg, 1)
-#define XFS_bflush(buftarg) \
-       xfs_flush_buftarg(buftarg, 1)
+#define xfs_getsize_buftarg(buftarg)   block_size((buftarg)->bt_bdev)
+#define xfs_readonly_buftarg(buftarg)  bdev_read_only((buftarg)->bt_bdev)
+
+#define xfs_binval(buftarg)            xfs_flush_buftarg(buftarg, 1)
+#define XFS_bflush(buftarg)            xfs_flush_buftarg(buftarg, 1)
 
 #endif /* __XFS_BUF_H__ */
index 06111d0bbae4b2fff01bfd220092c4d396aa9f5b..ced4404339c7efd214255aed73ba6110d33d0eaf 100644 (file)
@@ -509,16 +509,14 @@ linvfs_open_exec(
        vnode_t         *vp = LINVFS_GET_VP(inode);
        xfs_mount_t     *mp = XFS_VFSTOM(vp->v_vfsp);
        int             error = 0;
-       bhv_desc_t      *bdp;
        xfs_inode_t     *ip;
 
        if (vp->v_vfsp->vfs_flag & VFS_DMI) {
-               bdp = vn_bhv_lookup(VN_BHV_HEAD(vp), &xfs_vnodeops);
-               if (!bdp) {
+               ip = xfs_vtoi(vp);
+               if (!ip) {
                        error = -EINVAL;
                        goto open_exec_out;
                }
-               ip = XFS_BHVTOI(bdp);
                if (DM_EVENT_ENABLED(vp->v_vfsp, ip, DM_EVENT_READ)) {
                        error = -XFS_SEND_DATA(mp, DM_EVENT_READ, vp,
                                               0, 0, 0, NULL);
index 21667ba6dcd590c6a3a726f27184f948578c4dc0..4db47790415c86f5fb043bf866f20a665a54e2b6 100644 (file)
@@ -146,13 +146,10 @@ xfs_find_handle(
 
        if (cmd != XFS_IOC_PATH_TO_FSHANDLE) {
                xfs_inode_t     *ip;
-               bhv_desc_t      *bhv;
                int             lock_mode;
 
                /* need to get access to the xfs_inode to read the generation */
-               bhv = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops);
-               ASSERT(bhv);
-               ip = XFS_BHVTOI(bhv);
+               ip = xfs_vtoi(vp);
                ASSERT(ip);
                lock_mode = xfs_ilock_map_shared(ip);
 
@@ -751,9 +748,8 @@ xfs_ioctl(
                        (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
                        mp->m_rtdev_targp : mp->m_ddev_targp;
 
-               da.d_mem = da.d_miniosz = 1 << target->pbr_sshift;
-               /* The size dio will do in one go */
-               da.d_maxiosz = 64 * PAGE_CACHE_SIZE;
+               da.d_mem = da.d_miniosz = 1 << target->bt_sshift;
+               da.d_maxiosz = INT_MAX & ~(da.d_miniosz - 1);
 
                if (copy_to_user(arg, &da, sizeof(da)))
                        return -XFS_ERROR(EFAULT);
index 9b8ee3470ecc6448b6527db7de1f4aba10dec775..4bd3d03b23edf6c0ba22bda785336ad0a2e54268 100644 (file)
 #include <linux/capability.h>
 #include <linux/xattr.h>
 #include <linux/namei.h>
+#include <linux/security.h>
 
 #define IS_NOATIME(inode) ((inode->i_sb->s_flags & MS_NOATIME) ||      \
        (S_ISDIR(inode->i_mode) && inode->i_sb->s_flags & MS_NODIRATIME))
 
+/*
+ * Get a XFS inode from a given vnode.
+ */
+xfs_inode_t *
+xfs_vtoi(
+       struct vnode    *vp)
+{
+       bhv_desc_t      *bdp;
+
+       bdp = bhv_lookup_range(VN_BHV_HEAD(vp),
+                       VNODE_POSITION_XFS, VNODE_POSITION_XFS);
+       if (unlikely(bdp == NULL))
+               return NULL;
+       return XFS_BHVTOI(bdp);
+}
+
+/*
+ * Bring the atime in the XFS inode uptodate.
+ * Used before logging the inode to disk or when the Linux inode goes away.
+ */
+void
+xfs_synchronize_atime(
+       xfs_inode_t     *ip)
+{
+       vnode_t         *vp;
+
+       vp = XFS_ITOV_NULL(ip);
+       if (vp) {
+               struct inode *inode = &vp->v_inode;
+               ip->i_d.di_atime.t_sec = (__int32_t)inode->i_atime.tv_sec;
+               ip->i_d.di_atime.t_nsec = (__int32_t)inode->i_atime.tv_nsec;
+       }
+}
+
 /*
  * Change the requested timestamp in the given inode.
  * We don't lock across timestamp updates, and we don't log them but
@@ -77,23 +112,6 @@ xfs_ichgtime(
        struct inode    *inode = LINVFS_GET_IP(XFS_ITOV(ip));
        timespec_t      tv;
 
-       /*
-        * We're not supposed to change timestamps in readonly-mounted
-        * filesystems.  Throw it away if anyone asks us.
-        */
-       if (unlikely(IS_RDONLY(inode)))
-               return;
-
-       /*
-        * Don't update access timestamps on reads if mounted "noatime".
-        * Throw it away if anyone asks us.
-        */
-       if (unlikely(
-           (ip->i_mount->m_flags & XFS_MOUNT_NOATIME || IS_NOATIME(inode)) &&
-           (flags & (XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD|XFS_ICHGTIME_CHG)) ==
-                       XFS_ICHGTIME_ACC))
-               return;
-
        nanotime(&tv);
        if (flags & XFS_ICHGTIME_MOD) {
                inode->i_mtime = tv;
@@ -130,8 +148,6 @@ xfs_ichgtime(
  * Variant on the above which avoids querying the system clock
  * in situations where we know the Linux inode timestamps have
  * just been updated (and so we can update our inode cheaply).
- * We also skip the readonly and noatime checks here, they are
- * also catered for already.
  */
 void
 xfs_ichgtime_fast(
@@ -142,20 +158,16 @@ xfs_ichgtime_fast(
        timespec_t      *tvp;
 
        /*
-        * We're not supposed to change timestamps in readonly-mounted
-        * filesystems.  Throw it away if anyone asks us.
+        * Atime updates for read() & friends are handled lazily now, and
+        * explicit updates must go through xfs_ichgtime()
         */
-       if (unlikely(IS_RDONLY(inode)))
-               return;
+       ASSERT((flags & XFS_ICHGTIME_ACC) == 0);
 
        /*
-        * Don't update access timestamps on reads if mounted "noatime".
-        * Throw it away if anyone asks us.
+        * We're not supposed to change timestamps in readonly-mounted
+        * filesystems.  Throw it away if anyone asks us.
         */
-       if (unlikely(
-           (ip->i_mount->m_flags & XFS_MOUNT_NOATIME || IS_NOATIME(inode)) &&
-           ((flags & (XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD|XFS_ICHGTIME_CHG)) ==
-                       XFS_ICHGTIME_ACC)))
+       if (unlikely(IS_RDONLY(inode)))
                return;
 
        if (flags & XFS_ICHGTIME_MOD) {
@@ -163,11 +175,6 @@ xfs_ichgtime_fast(
                ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec;
                ip->i_d.di_mtime.t_nsec = (__int32_t)tvp->tv_nsec;
        }
-       if (flags & XFS_ICHGTIME_ACC) {
-               tvp = &inode->i_atime;
-               ip->i_d.di_atime.t_sec = (__int32_t)tvp->tv_sec;
-               ip->i_d.di_atime.t_nsec = (__int32_t)tvp->tv_nsec;
-       }
        if (flags & XFS_ICHGTIME_CHG) {
                tvp = &inode->i_ctime;
                ip->i_d.di_ctime.t_sec = (__int32_t)tvp->tv_sec;
@@ -213,6 +220,39 @@ validate_fields(
        }
 }
 
+/*
+ * Hook in SELinux.  This is not quite correct yet, what we really need
+ * here (as we do for default ACLs) is a mechanism by which creation of
+ * these attrs can be journalled at inode creation time (along with the
+ * inode, of course, such that log replay can't cause these to be lost).
+ */
+STATIC int
+linvfs_init_security(
+       struct vnode    *vp,
+       struct inode    *dir)
+{
+       struct inode    *ip = LINVFS_GET_IP(vp);
+       size_t          length;
+       void            *value;
+       char            *name;
+       int             error;
+
+       error = security_inode_init_security(ip, dir, &name, &value, &length);
+       if (error) {
+               if (error == -EOPNOTSUPP)
+                       return 0;
+               return -error;
+       }
+
+       VOP_ATTR_SET(vp, name, value, length, ATTR_SECURE, NULL, error);
+       if (!error)
+               VMODIFY(vp);
+
+       kfree(name);
+       kfree(value);
+       return error;
+}
+
 /*
  * Determine whether a process has a valid fs_struct (kernel daemons
  * like knfsd don't have an fs_struct).
@@ -278,6 +318,9 @@ linvfs_mknod(
                break;
        }
 
+       if (!error)
+               error = linvfs_init_security(vp, dir);
+
        if (default_acl) {
                if (!error) {
                        error = _ACL_INHERIT(vp, &va, default_acl);
@@ -294,8 +337,6 @@ linvfs_mknod(
                                teardown.d_inode = ip = LINVFS_GET_IP(vp);
                                teardown.d_name = dentry->d_name;
 
-                               vn_mark_bad(vp);
-                               
                                if (S_ISDIR(mode))
                                        VOP_RMDIR(dvp, &teardown, NULL, err2);
                                else
@@ -506,7 +547,7 @@ linvfs_follow_link(
        ASSERT(dentry);
        ASSERT(nd);
 
-       link = (char *)kmalloc(MAXNAMELEN+1, GFP_KERNEL);
+       link = (char *)kmalloc(MAXPATHLEN+1, GFP_KERNEL);
        if (!link) {
                nd_set_link(nd, ERR_PTR(-ENOMEM));
                return NULL;
@@ -522,12 +563,12 @@ linvfs_follow_link(
        vp = LINVFS_GET_VP(dentry->d_inode);
 
        iov.iov_base = link;
-       iov.iov_len = MAXNAMELEN;
+       iov.iov_len = MAXPATHLEN;
 
        uio->uio_iov = &iov;
        uio->uio_offset = 0;
        uio->uio_segflg = UIO_SYSSPACE;
-       uio->uio_resid = MAXNAMELEN;
+       uio->uio_resid = MAXPATHLEN;
        uio->uio_iovcnt = 1;
 
        VOP_READLINK(vp, uio, 0, NULL, error);
@@ -535,7 +576,7 @@ linvfs_follow_link(
                kfree(link);
                link = ERR_PTR(-error);
        } else {
-               link[MAXNAMELEN - uio->uio_resid] = '\0';
+               link[MAXPATHLEN - uio->uio_resid] = '\0';
        }
        kfree(uio);
 
index ee784b63acbfcca64d36918359bad96651d02e51..6899a6b4a50acb0fc7428b209bf397f7291b8c64 100644 (file)
@@ -26,11 +26,6 @@ extern struct file_operations linvfs_file_operations;
 extern struct file_operations linvfs_invis_file_operations;
 extern struct file_operations linvfs_dir_operations;
 
-extern struct address_space_operations linvfs_aops;
-
-extern int linvfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
-extern void linvfs_unwritten_done(struct buffer_head *, int);
-
 extern int xfs_ioctl(struct bhv_desc *, struct inode *, struct file *,
                         int, unsigned int, void __user *);
 
index d8e21ba0cccc4df561857cf82bb1d2c0c48c27c2..67389b745526d1e5e4928738623bcbccf83e44ea 100644 (file)
  * delalloc and these ondisk-uninitialised buffers.
  */
 BUFFER_FNS(PrivateStart, unwritten);
-static inline void set_buffer_unwritten_io(struct buffer_head *bh)
-{
-       bh->b_end_io = linvfs_unwritten_done;
-}
 
 #define restricted_chown       xfs_params.restrict_chown.val
 #define irix_sgid_inherit      xfs_params.sgid_inherit.val
@@ -232,7 +228,7 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
 #define xfs_itruncate_data(ip, off)    \
        (-vmtruncate(LINVFS_GET_IP(XFS_ITOV(ip)), (off)))
 #define xfs_statvfs_fsid(statp, mp)    \
-       ({ u64 id = huge_encode_dev((mp)->m_dev);       \
+       ({ u64 id = huge_encode_dev((mp)->m_ddev_targp->bt_dev); \
           __kernel_fsid_t *fsid = &(statp)->f_fsid;    \
        (fsid->val[0] = (u32)id, fsid->val[1] = (u32)(id >> 32)); })
 
index 885dfafeabeee6fb6359c167d203091476e741a9..e0ab45fbfebd4e0cbe6fbc2b5c992ae9deeb952b 100644 (file)
@@ -233,8 +233,8 @@ xfs_read(
                xfs_buftarg_t   *target =
                        (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
                                mp->m_rtdev_targp : mp->m_ddev_targp;
-               if ((*offset & target->pbr_smask) ||
-                   (size & target->pbr_smask)) {
+               if ((*offset & target->bt_smask) ||
+                   (size & target->bt_smask)) {
                        if (*offset == ip->i_d.di_size) {
                                return (0);
                        }
@@ -281,9 +281,6 @@ xfs_read(
 
        xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 
-       if (likely(!(ioflags & IO_INVIS)))
-               xfs_ichgtime_fast(ip, inode, XFS_ICHGTIME_ACC);
-
 unlock_isem:
        if (unlikely(ioflags & IO_ISDIRECT))
                mutex_unlock(&inode->i_mutex);
@@ -346,9 +343,6 @@ xfs_sendfile(
        if (ret > 0)
                XFS_STATS_ADD(xs_read_bytes, ret);
 
-       if (likely(!(ioflags & IO_INVIS)))
-               xfs_ichgtime_fast(ip, LINVFS_GET_IP(vp), XFS_ICHGTIME_ACC);
-
        return ret;
 }
 
@@ -362,7 +356,6 @@ STATIC int                          /* error (positive) */
 xfs_zero_last_block(
        struct inode    *ip,
        xfs_iocore_t    *io,
-       xfs_off_t       offset,
        xfs_fsize_t     isize,
        xfs_fsize_t     end_size)
 {
@@ -371,19 +364,16 @@ xfs_zero_last_block(
        int             nimaps;
        int             zero_offset;
        int             zero_len;
-       int             isize_fsb_offset;
        int             error = 0;
        xfs_bmbt_irec_t imap;
        loff_t          loff;
-       size_t          lsize;
 
        ASSERT(ismrlocked(io->io_lock, MR_UPDATE) != 0);
-       ASSERT(offset > isize);
 
        mp = io->io_mount;
 
-       isize_fsb_offset = XFS_B_FSB_OFFSET(mp, isize);
-       if (isize_fsb_offset == 0) {
+       zero_offset = XFS_B_FSB_OFFSET(mp, isize);
+       if (zero_offset == 0) {
                /*
                 * There are no extra bytes in the last block on disk to
                 * zero, so return.
@@ -413,10 +403,8 @@ xfs_zero_last_block(
         */
        XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL| XFS_EXTSIZE_RD);
        loff = XFS_FSB_TO_B(mp, last_fsb);
-       lsize = XFS_FSB_TO_B(mp, 1);
 
-       zero_offset = isize_fsb_offset;
-       zero_len = mp->m_sb.sb_blocksize - isize_fsb_offset;
+       zero_len = mp->m_sb.sb_blocksize - zero_offset;
 
        error = xfs_iozero(ip, loff + zero_offset, zero_len, end_size);
 
@@ -447,20 +435,17 @@ xfs_zero_eof(
        struct inode    *ip = LINVFS_GET_IP(vp);
        xfs_fileoff_t   start_zero_fsb;
        xfs_fileoff_t   end_zero_fsb;
-       xfs_fileoff_t   prev_zero_fsb;
        xfs_fileoff_t   zero_count_fsb;
        xfs_fileoff_t   last_fsb;
        xfs_extlen_t    buf_len_fsb;
-       xfs_extlen_t    prev_zero_count;
        xfs_mount_t     *mp;
        int             nimaps;
        int             error = 0;
        xfs_bmbt_irec_t imap;
-       loff_t          loff;
-       size_t          lsize;
 
        ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
        ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
+       ASSERT(offset > isize);
 
        mp = io->io_mount;
 
@@ -468,7 +453,7 @@ xfs_zero_eof(
         * First handle zeroing the block on which isize resides.
         * We only zero a part of that block so it is handled specially.
         */
-       error = xfs_zero_last_block(ip, io, offset, isize, end_size);
+       error = xfs_zero_last_block(ip, io, isize, end_size);
        if (error) {
                ASSERT(ismrlocked(io->io_lock, MR_UPDATE));
                ASSERT(ismrlocked(io->io_iolock, MR_UPDATE));
@@ -496,8 +481,6 @@ xfs_zero_eof(
        }
 
        ASSERT(start_zero_fsb <= end_zero_fsb);
-       prev_zero_fsb = NULLFILEOFF;
-       prev_zero_count = 0;
        while (start_zero_fsb <= end_zero_fsb) {
                nimaps = 1;
                zero_count_fsb = end_zero_fsb - start_zero_fsb + 1;
@@ -519,10 +502,7 @@ xfs_zero_eof(
                         * that sits on a hole and sets the page as P_HOLE
                         * and calls remapf if it is a mapped file.
                         */
-                       prev_zero_fsb = NULLFILEOFF;
-                       prev_zero_count = 0;
-                       start_zero_fsb = imap.br_startoff +
-                                        imap.br_blockcount;
+                       start_zero_fsb = imap.br_startoff + imap.br_blockcount;
                        ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
                        continue;
                }
@@ -543,17 +523,15 @@ xfs_zero_eof(
                 */
                XFS_IUNLOCK(mp, io, XFS_ILOCK_EXCL|XFS_EXTSIZE_RD);
 
-               loff = XFS_FSB_TO_B(mp, start_zero_fsb);
-               lsize = XFS_FSB_TO_B(mp, buf_len_fsb);
-
-               error = xfs_iozero(ip, loff, lsize, end_size);
+               error = xfs_iozero(ip,
+                                  XFS_FSB_TO_B(mp, start_zero_fsb),
+                                  XFS_FSB_TO_B(mp, buf_len_fsb),
+                                  end_size);
 
                if (error) {
                        goto out_lock;
                }
 
-               prev_zero_fsb = start_zero_fsb;
-               prev_zero_count = buf_len_fsb;
                start_zero_fsb = imap.br_startoff + buf_len_fsb;
                ASSERT(start_zero_fsb <= (end_zero_fsb + 1));
 
@@ -640,7 +618,7 @@ xfs_write(
                        (xip->i_d.di_flags & XFS_DIFLAG_REALTIME) ?
                                mp->m_rtdev_targp : mp->m_ddev_targp;
 
-               if ((pos & target->pbr_smask) || (count & target->pbr_smask))
+               if ((pos & target->bt_smask) || (count & target->bt_smask))
                        return XFS_ERROR(-EINVAL);
 
                if (!VN_CACHED(vp) && pos < i_size_read(inode))
@@ -831,6 +809,10 @@ retry:
                goto retry;
        }
 
+       isize = i_size_read(inode);
+       if (unlikely(ret < 0 && ret != -EFAULT && *offset > isize))
+               *offset = isize;
+
        if (*offset > xip->i_d.di_size) {
                xfs_ilock(xip, XFS_ILOCK_EXCL);
                if (*offset > xip->i_d.di_size) {
@@ -956,7 +938,7 @@ xfs_bdstrat_cb(struct xfs_buf *bp)
 
        mp = XFS_BUF_FSPRIVATE3(bp, xfs_mount_t *);
        if (!XFS_FORCED_SHUTDOWN(mp)) {
-               pagebuf_iorequest(bp);
+               xfs_buf_iorequest(bp);
                return 0;
        } else {
                xfs_buftrace("XFS__BDSTRAT IOERROR", bp);
@@ -1009,7 +991,7 @@ xfsbdstrat(
                 * if (XFS_BUF_IS_GRIO(bp)) {
                 */
 
-               pagebuf_iorequest(bp);
+               xfs_buf_iorequest(bp);
                return 0;
        }
 
index 6c40a74be7c8d5f3eb206782412c3c36cea549e6..8955720a2c6b0685bde93bdfeff24e1006355771 100644 (file)
@@ -34,7 +34,7 @@ xfs_read_xfsstats(
        __uint64_t      xs_write_bytes = 0;
        __uint64_t      xs_read_bytes = 0;
 
-       static struct xstats_entry {
+       static const struct xstats_entry {
                char    *desc;
                int     endpoint;
        } xstats[] = {
index 50027c4a561867a6771d2eed31cdc0517090cda3..8ba7a2fa6c1d643fb5d35afa2e3605dadd680ef8 100644 (file)
@@ -109,15 +109,15 @@ struct xfsstats {
        __uint32_t              vn_remove;      /* # times vn_remove called */
        __uint32_t              vn_free;        /* # times vn_free called */
 #define XFSSTAT_END_BUF                        (XFSSTAT_END_VNODE_OPS+9)
-       __uint32_t              pb_get;
-       __uint32_t              pb_create;
-       __uint32_t              pb_get_locked;
-       __uint32_t              pb_get_locked_waited;
-       __uint32_t              pb_busy_locked;
-       __uint32_t              pb_miss_locked;
-       __uint32_t              pb_page_retries;
-       __uint32_t              pb_page_found;
-       __uint32_t              pb_get_read;
+       __uint32_t              xb_get;
+       __uint32_t              xb_create;
+       __uint32_t              xb_get_locked;
+       __uint32_t              xb_get_locked_waited;
+       __uint32_t              xb_busy_locked;
+       __uint32_t              xb_miss_locked;
+       __uint32_t              xb_page_retries;
+       __uint32_t              xb_page_found;
+       __uint32_t              xb_get_read;
 /* Extra precision counters */
        __uint64_t              xs_xstrat_bytes;
        __uint64_t              xs_write_bytes;
index 6116b5bf433ef772386bb7513f0e0ea91dc5335a..f22e426d9e4293731be1bdecf43c015c88e7bbdd 100644 (file)
@@ -306,13 +306,15 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
                xfs_fs_cmn_err(CE_NOTE, mp,
                  "Disabling barriers, not supported with external log device");
                mp->m_flags &= ~XFS_MOUNT_BARRIER;
+               return;
        }
 
-       if (mp->m_ddev_targp->pbr_bdev->bd_disk->queue->ordered ==
+       if (mp->m_ddev_targp->bt_bdev->bd_disk->queue->ordered ==
                                        QUEUE_ORDERED_NONE) {
                xfs_fs_cmn_err(CE_NOTE, mp,
                  "Disabling barriers, not supported by the underlying device");
                mp->m_flags &= ~XFS_MOUNT_BARRIER;
+               return;
        }
 
        error = xfs_barrier_test(mp);
@@ -320,6 +322,7 @@ xfs_mountfs_check_barriers(xfs_mount_t *mp)
                xfs_fs_cmn_err(CE_NOTE, mp,
                  "Disabling barriers, trial barrier write failed");
                mp->m_flags &= ~XFS_MOUNT_BARRIER;
+               return;
        }
 }
 
@@ -327,7 +330,7 @@ void
 xfs_blkdev_issue_flush(
        xfs_buftarg_t           *buftarg)
 {
-       blkdev_issue_flush(buftarg->pbr_bdev, NULL);
+       blkdev_issue_flush(buftarg->bt_bdev, NULL);
 }
 
 STATIC struct inode *
@@ -576,7 +579,7 @@ xfssyncd(
                timeleft = schedule_timeout_interruptible(timeleft);
                /* swsusp */
                try_to_freeze();
-               if (kthread_should_stop())
+               if (kthread_should_stop() && list_empty(&vfsp->vfs_sync_list))
                        break;
 
                spin_lock(&vfsp->vfs_sync_lock);
@@ -966,9 +969,9 @@ init_xfs_fs( void )
        if (error < 0)
                goto undo_zones;
 
-       error = pagebuf_init();
+       error = xfs_buf_init();
        if (error < 0)
-               goto undo_pagebuf;
+               goto undo_buffers;
 
        vn_init();
        xfs_init();
@@ -982,9 +985,9 @@ init_xfs_fs( void )
        return 0;
 
 undo_register:
-       pagebuf_terminate();
+       xfs_buf_terminate();
 
-undo_pagebuf:
+undo_buffers:
        linvfs_destroy_zones();
 
 undo_zones:
@@ -998,7 +1001,7 @@ exit_xfs_fs( void )
        XFS_DM_EXIT(&xfs_fs_type);
        unregister_filesystem(&xfs_fs_type);
        xfs_cleanup();
-       pagebuf_terminate();
+       xfs_buf_terminate();
        linvfs_destroy_zones();
        ktrace_uninit();
 }
index e9bbcb4d62430cd98c8c9de443b8a9217896ac71..260dd8415dd7dc78716895340fa4e0b76b1ae058 100644 (file)
@@ -106,7 +106,6 @@ vn_revalidate_core(
        inode->i_blocks     = vap->va_nblocks;
        inode->i_mtime      = vap->va_mtime;
        inode->i_ctime      = vap->va_ctime;
-       inode->i_atime      = vap->va_atime;
        inode->i_blksize    = vap->va_blocksize;
        if (vap->va_xflags & XFS_XFLAG_IMMUTABLE)
                inode->i_flags |= S_IMMUTABLE;
index f2bbb327c08147c9d5e5e6caa55f39a2891fcc44..0fe2419461d69021f3858dd2e9e2e8268f11bc98 100644 (file)
@@ -565,6 +565,25 @@ static inline int VN_BAD(struct vnode *vp)
        return is_bad_inode(LINVFS_GET_IP(vp));
 }
 
+/*
+ * Extracting atime values in various formats
+ */
+static inline void vn_atime_to_bstime(struct vnode *vp, xfs_bstime_t *bs_atime)
+{
+       bs_atime->tv_sec = vp->v_inode.i_atime.tv_sec;
+       bs_atime->tv_nsec = vp->v_inode.i_atime.tv_nsec;
+}
+
+static inline void vn_atime_to_timespec(struct vnode *vp, struct timespec *ts)
+{
+       *ts = vp->v_inode.i_atime;
+}
+
+static inline void vn_atime_to_time_t(struct vnode *vp, time_t *tt)
+{
+       *tt = vp->v_inode.i_atime.tv_sec;
+}
+
 /*
  * Some useful predicates.
  */
index 2f69822344e5e501110a0c74302cf136b9dd7a58..2ec6b441849ccde4cd463b4a721142f506c3252d 100644 (file)
@@ -239,7 +239,7 @@ xfs_qm_dquot_logitem_pushbuf(
         * trying to duplicate our effort.
         */
        ASSERT(qip->qli_pushbuf_flag != 0);
-       ASSERT(qip->qli_push_owner == get_thread_id());
+       ASSERT(qip->qli_push_owner == current_pid());
 
        /*
         * If flushlock isn't locked anymore, chances are that the
@@ -333,7 +333,7 @@ xfs_qm_dquot_logitem_trylock(
                        qip->qli_pushbuf_flag = 1;
                        ASSERT(qip->qli_format.qlf_blkno == dqp->q_blkno);
 #ifdef DEBUG
-                       qip->qli_push_owner = get_thread_id();
+                       qip->qli_push_owner = current_pid();
 #endif
                        /*
                         * The dquot is left locked.
index bb6991a7a617b306c120536395a5543bb2ba7836..7dcdd0640c32ef9f6dea4ce87b960a35f5b5f518 100644 (file)
@@ -1392,11 +1392,12 @@ xfs_qm_qino_alloc(
 {
        xfs_trans_t     *tp;
        int             error;
-       unsigned long s;
+       unsigned long   s;
        cred_t          zerocr;
+       xfs_inode_t     zeroino;
        int             committed;
 
-       tp = xfs_trans_alloc(mp,XFS_TRANS_QM_QINOCREATE);
+       tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
        if ((error = xfs_trans_reserve(tp,
                                      XFS_QM_QINOCREATE_SPACE_RES(mp),
                                      XFS_CREATE_LOG_RES(mp), 0,
@@ -1406,8 +1407,9 @@ xfs_qm_qino_alloc(
                return (error);
        }
        memset(&zerocr, 0, sizeof(zerocr));
+       memset(&zeroino, 0, sizeof(zeroino));
 
-       if ((error = xfs_dir_ialloc(&tp, mp->m_rootip, S_IFREG, 1, 0,
+       if ((error = xfs_dir_ialloc(&tp, &zeroino, S_IFREG, 1, 0,
                                   &zerocr, 0, 1, ip, &committed))) {
                xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
                                 XFS_TRANS_ABORT);
@@ -1918,9 +1920,7 @@ xfs_qm_quotacheck(
         * at this point (because we intentionally didn't in dqget_noattach).
         */
        if (error) {
-               xfs_qm_dqpurge_all(mp,
-                                  XFS_QMOPT_UQUOTA|XFS_QMOPT_GQUOTA|
-                                  XFS_QMOPT_PQUOTA|XFS_QMOPT_QUOTAOFF);
+               xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_QUOTAOFF);
                goto error_return;
        }
        /*
@@ -2743,6 +2743,7 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
                xfs_dqunlock(udqp);
                ASSERT(ip->i_udquot == NULL);
                ip->i_udquot = udqp;
+               ASSERT(XFS_IS_UQUOTA_ON(tp->t_mountp));
                ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
                xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
        }
@@ -2752,7 +2753,10 @@ xfs_qm_vop_dqattach_and_dqmod_newinode(
                xfs_dqunlock(gdqp);
                ASSERT(ip->i_gdquot == NULL);
                ip->i_gdquot = gdqp;
-               ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
+               ASSERT(XFS_IS_OQUOTA_ON(tp->t_mountp));
+               ASSERT((XFS_IS_GQUOTA_ON(tp->t_mountp) ?
+                       ip->i_d.di_gid : ip->i_d.di_projid) ==
+                               be32_to_cpu(gdqp->q_core.d_id));
                xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
        }
 }
index bb6dc91ea261390a426f61ba958f214c34394fa6..b08b3d9345b7660972cb341fb876ac1ba77cb52c 100644 (file)
@@ -27,44 +27,11 @@ static DEFINE_SPINLOCK(xfs_err_lock);
 /* Translate from CE_FOO to KERN_FOO, err_level(CE_FOO) == KERN_FOO */
 #define XFS_MAX_ERR_LEVEL      7
 #define XFS_ERR_MASK           ((1 << 3) - 1)
-static char            *err_level[XFS_MAX_ERR_LEVEL+1] =
+static const char * const      err_level[XFS_MAX_ERR_LEVEL+1] =
                                        {KERN_EMERG, KERN_ALERT, KERN_CRIT,
                                         KERN_ERR, KERN_WARNING, KERN_NOTICE,
                                         KERN_INFO, KERN_DEBUG};
 
-void
-assfail(char *a, char *f, int l)
-{
-    printk("XFS assertion failed: %s, file: %s, line: %d\n", a, f, l);
-    BUG();
-}
-
-#if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM))
-
-unsigned long
-random(void)
-{
-       static unsigned long    RandomValue = 1;
-       /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
-       register long   rv = RandomValue;
-       register long   lo;
-       register long   hi;
-
-       hi = rv / 127773;
-       lo = rv % 127773;
-       rv = 16807 * lo - 2836 * hi;
-       if( rv <= 0 ) rv += 2147483647;
-       return( RandomValue = rv );
-}
-
-int
-get_thread_id(void)
-{
-       return current->pid;
-}
-
-#endif /* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */
-
 void
 cmn_err(register int level, char *fmt, ...)
 {
@@ -90,7 +57,6 @@ cmn_err(register int level, char *fmt, ...)
                BUG();
 }
 
-
 void
 icmn_err(register int level, char *fmt, va_list ap)
 {
@@ -109,3 +75,27 @@ icmn_err(register int level, char *fmt, va_list ap)
        if (level == CE_PANIC)
                BUG();
 }
+
+void
+assfail(char *expr, char *file, int line)
+{
+       printk("Assertion failed: %s, file: %s, line: %d\n", expr, file, line);
+       BUG();
+}
+
+#if ((defined(DEBUG) || defined(INDUCE_IO_ERRROR)) && !defined(NO_WANT_RANDOM))
+unsigned long random(void)
+{
+       static unsigned long    RandomValue = 1;
+       /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
+       register long   rv = RandomValue;
+       register long   lo;
+       register long   hi;
+
+       hi = rv / 127773;
+       lo = rv % 127773;
+       rv = 16807 * lo - 2836 * hi;
+       if (rv <= 0) rv += 2147483647;
+       return RandomValue = rv;
+}
+#endif /* DEBUG || INDUCE_IO_ERRROR || !NO_WANT_RANDOM */
index aff558664c329747f053c6fae94988a2fa144c09..e3bf58112e7ec3492cbe4be9fd546b39a59cbbff 100644 (file)
@@ -31,24 +31,23 @@ extern void icmn_err(int, char *, va_list)
        __attribute__ ((format (printf, 2, 0)));
 extern void cmn_err(int, char *, ...)
        __attribute__ ((format (printf, 2, 3)));
+extern void assfail(char *expr, char *f, int l);
 
-#ifndef STATIC
-# define STATIC static
-#endif
+#define prdev(fmt,targ,args...) \
+       printk("Device %s - " fmt "\n", XFS_BUFTARG_NAME(targ), ## args)
 
-#ifdef DEBUG
-# define ASSERT(EX)    ((EX) ? ((void)0) : assfail(#EX, __FILE__, __LINE__))
-#else
-# define ASSERT(x)     ((void)0)
-#endif
+#define ASSERT_ALWAYS(expr)    \
+       (unlikely((expr) != 0) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
 
-extern void assfail(char *, char *, int);
-#ifdef DEBUG
+#ifndef DEBUG
+# define ASSERT(expr)  ((void)0)
+#else
+# define ASSERT(expr)  ASSERT_ALWAYS(expr)
 extern unsigned long random(void);
-extern int get_thread_id(void);
 #endif
 
-#define ASSERT_ALWAYS(EX)  ((EX)?((void)0):assfail(#EX, __FILE__, __LINE__))
-#define        debug_stop_all_cpus(param)      /* param is "cpumask_t *" */
+#ifndef STATIC
+# define STATIC static
+#endif
 
 #endif  /* __XFS_SUPPORT_DEBUG_H__ */
index 69ec4f540c3ac737f0759848ba8bf3fd372e1ea7..a3d565a67734e0df236b42fd933d24f1a90393e9 100644 (file)
@@ -27,6 +27,16 @@ uuid_init(void)
        mutex_init(&uuid_monitor);
 }
 
+
+/* IRIX interpretation of an uuid_t */
+typedef struct {
+       __be32  uu_timelow;
+       __be16  uu_timemid;
+       __be16  uu_timehi;
+       __be16  uu_clockseq;
+       __be16  uu_node[3];
+} xfs_uu_t;
+
 /*
  * uuid_getnodeuniq - obtain the node unique fields of a UUID.
  *
@@ -36,16 +46,11 @@ uuid_init(void)
 void
 uuid_getnodeuniq(uuid_t *uuid, int fsid [2])
 {
-       char    *uu = (char *)uuid;
-
-       /* on IRIX, this function assumes big-endian fields within
-        * the uuid, so we use INT_GET to get the same result on
-        * little-endian systems
-        */
+       xfs_uu_t *uup = (xfs_uu_t *)uuid;
 
-       fsid[0] = (INT_GET(*(u_int16_t*)(uu+8), ARCH_CONVERT) << 16) +
-                  INT_GET(*(u_int16_t*)(uu+4), ARCH_CONVERT);
-       fsid[1] =  INT_GET(*(u_int32_t*)(uu  ), ARCH_CONVERT);
+       fsid[0] = (be16_to_cpu(uup->uu_clockseq) << 16) |
+                  be16_to_cpu(uup->uu_timemid);
+       fsid[1] = be16_to_cpu(uup->uu_timelow);
 }
 
 void
index 68e5051d8e249cbc89a8d3aec50df12548e248a1..c4836890b7268bce37384b4ed35c261c1ec8805e 100644 (file)
 #undef XFS_NATIVE_HOST
 #endif
 
+#ifdef XFS_NATIVE_HOST
+#define cpu_to_be16(val)       ((__be16)(val))
+#define cpu_to_be32(val)       ((__be32)(val))
+#define cpu_to_be64(val)       ((__be64)(val))
+#define be16_to_cpu(val)       ((__uint16_t)(val))
+#define be32_to_cpu(val)       ((__uint32_t)(val))
+#define be64_to_cpu(val)       ((__uint64_t)(val))
+#else
+#define cpu_to_be16(val)       (__swab16((__uint16_t)(val)))
+#define cpu_to_be32(val)       (__swab32((__uint32_t)(val)))
+#define cpu_to_be64(val)       (__swab64((__uint64_t)(val)))
+#define be16_to_cpu(val)       (__swab16((__be16)(val)))
+#define be32_to_cpu(val)       (__swab32((__be32)(val)))
+#define be64_to_cpu(val)       (__swab64((__be64)(val)))
+#endif
+
 #endif /* __KERNEL__ */
 
 /* do we need conversion? */
@@ -186,7 +202,7 @@ static inline void be64_add(__be64 *a, __s64 b)
  */ 
 
 #define XFS_GET_DIR_INO4(di) \
-       (((u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
+       (((__u32)(di).i[0] << 24) | ((di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
 
 #define XFS_PUT_DIR_INO4(from, di) \
 do { \
@@ -197,9 +213,9 @@ do { \
 } while (0)
 
 #define XFS_DI_HI(di) \
-       (((u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
+       (((__u32)(di).i[1] << 16) | ((di).i[2] << 8) | ((di).i[3]))
 #define XFS_DI_LO(di) \
-       (((u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7]))
+       (((__u32)(di).i[4] << 24) | ((di).i[5] << 16) | ((di).i[6] << 8) | ((di).i[7]))
 
 #define XFS_GET_DIR_INO8(di)        \
        (((xfs_ino_t)XFS_DI_LO(di) & 0xffffffffULL) | \
index 1c7421840c1802441b5345ec766727a0f874b9eb..fe91eac4e2a7b1919228ec9f063a2946e37ec908 100644 (file)
@@ -128,7 +128,7 @@ xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes)
                return (offset >= minforkoff) ? minforkoff : 0;
        }
 
-       if (unlikely(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) {
+       if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
                if (bytes <= XFS_IFORK_ASIZE(dp))
                        return mp->m_attroffset >> 3;
                return 0;
@@ -157,7 +157,7 @@ xfs_sbversion_add_attr2(xfs_mount_t *mp, xfs_trans_t *tp)
 {
        unsigned long s;
 
-       if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR) &&
+       if ((mp->m_flags & XFS_MOUNT_ATTR2) &&
            !(XFS_SB_VERSION_HASATTR2(&mp->m_sb))) {
                s = XFS_SB_LOCK(mp);
                if (!XFS_SB_VERSION_HASATTR2(&mp->m_sb)) {
@@ -311,7 +311,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
         */
        totsize -= size;
        if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname &&
-           !(mp->m_flags & XFS_MOUNT_COMPAT_ATTR)) {
+           (mp->m_flags & XFS_MOUNT_ATTR2)) {
                /*
                 * Last attribute now removed, revert to original
                 * inode format making all literal area available
@@ -330,7 +330,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
                dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
                ASSERT(dp->i_d.di_forkoff);
                ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname ||
-                       (mp->m_flags & XFS_MOUNT_COMPAT_ATTR));
+                       !(mp->m_flags & XFS_MOUNT_ATTR2));
                dp->i_afp->if_ext_max =
                        XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
                dp->i_df.if_ext_max =
@@ -739,7 +739,7 @@ xfs_attr_shortform_allfit(xfs_dabuf_t *bp, xfs_inode_t *dp)
                                + name_loc->namelen
                                + INT_GET(name_loc->valuelen, ARCH_CONVERT);
        }
-       if (!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR) &&
+       if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
            (bytes == sizeof(struct xfs_attr_sf_hdr)))
                return(-1);
        return(xfs_attr_shortform_bytesfit(dp, bytes));
@@ -778,7 +778,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
                goto out;
 
        if (forkoff == -1) {
-               ASSERT(!(dp->i_mount->m_flags & XFS_MOUNT_COMPAT_ATTR));
+               ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
 
                /*
                 * Last attribute was removed, revert to original
index f6143ff251a019e90ec01673bcf69d8a835d3c1a..541e34109bb9834158d0204ef4763f74211553df 100644 (file)
@@ -63,7 +63,7 @@ struct xfs_trans;
  * the leaf_entry.  The namespaces are independent only because we also look
  * at the namespace bit when we are looking for a matching attribute name.
  *
- * We also store a "incomplete" bit in the leaf_entry.  It shows that an
+ * We also store an "incomplete" bit in the leaf_entry.  It shows that an
  * attribute is in the middle of being created and should not be shown to
  * the user if we crash during the time that the bit is set.  We clear the
  * bit when we have finished setting up the attribute.  We do this because
@@ -72,42 +72,48 @@ struct xfs_trans;
  */
 #define XFS_ATTR_LEAF_MAPSIZE  3       /* how many freespace slots */
 
+typedef struct xfs_attr_leaf_map {     /* RLE map of free bytes */
+       __uint16_t      base;           /* base of free region */
+       __uint16_t      size;           /* length of free region */
+} xfs_attr_leaf_map_t;
+
+typedef struct xfs_attr_leaf_hdr {     /* constant-structure header block */
+       xfs_da_blkinfo_t info;          /* block type, links, etc. */
+       __uint16_t      count;          /* count of active leaf_entry's */
+       __uint16_t      usedbytes;      /* num bytes of names/values stored */
+       __uint16_t      firstused;      /* first used byte in name area */
+       __uint8_t       holes;          /* != 0 if blk needs compaction */
+       __uint8_t       pad1;
+       xfs_attr_leaf_map_t freemap[XFS_ATTR_LEAF_MAPSIZE];
+                                       /* N largest free regions */
+} xfs_attr_leaf_hdr_t;
+
+typedef struct xfs_attr_leaf_entry {   /* sorted on key, not name */
+       xfs_dahash_t    hashval;        /* hash value of name */
+       __uint16_t      nameidx;        /* index into buffer of name/value */
+       __uint8_t       flags;          /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
+       __uint8_t       pad2;           /* unused pad byte */
+} xfs_attr_leaf_entry_t;
+
+typedef struct xfs_attr_leaf_name_local {
+       __uint16_t      valuelen;       /* number of bytes in value */
+       __uint8_t       namelen;        /* length of name bytes */
+       __uint8_t       nameval[1];     /* name/value bytes */
+} xfs_attr_leaf_name_local_t;
+
+typedef struct xfs_attr_leaf_name_remote {
+       xfs_dablk_t     valueblk;       /* block number of value bytes */
+       __uint32_t      valuelen;       /* number of bytes in value */
+       __uint8_t       namelen;        /* length of name bytes */
+       __uint8_t       name[1];        /* name bytes */
+} xfs_attr_leaf_name_remote_t;
+
 typedef struct xfs_attr_leafblock {
-       struct xfs_attr_leaf_hdr {      /* constant-structure header block */
-               xfs_da_blkinfo_t info;  /* block type, links, etc. */
-               __uint16_t count;       /* count of active leaf_entry's */
-               __uint16_t usedbytes;   /* num bytes of names/values stored */
-               __uint16_t firstused;   /* first used byte in name area */
-               __uint8_t  holes;       /* != 0 if blk needs compaction */
-               __uint8_t  pad1;
-               struct xfs_attr_leaf_map {        /* RLE map of free bytes */
-                       __uint16_t base;          /* base of free region */
-                       __uint16_t size;          /* length of free region */
-               } freemap[XFS_ATTR_LEAF_MAPSIZE]; /* N largest free regions */
-       } hdr;
-       struct xfs_attr_leaf_entry {    /* sorted on key, not name */
-               xfs_dahash_t hashval;   /* hash value of name */
-               __uint16_t nameidx;     /* index into buffer of name/value */
-               __uint8_t flags;        /* LOCAL/ROOT/SECURE/INCOMPLETE flag */
-               __uint8_t pad2;         /* unused pad byte */
-       } entries[1];                   /* variable sized array */
-       struct xfs_attr_leaf_name_local {
-               __uint16_t valuelen;    /* number of bytes in value */
-               __uint8_t namelen;      /* length of name bytes */
-               __uint8_t nameval[1];   /* name/value bytes */
-       } namelist;                     /* grows from bottom of buf */
-       struct xfs_attr_leaf_name_remote {
-               xfs_dablk_t valueblk;   /* block number of value bytes */
-               __uint32_t valuelen;    /* number of bytes in value */
-               __uint8_t namelen;      /* length of name bytes */
-               __uint8_t name[1];      /* name bytes */
-       } valuelist;                    /* grows from bottom of buf */
+       xfs_attr_leaf_hdr_t     hdr;    /* constant-structure header block */
+       xfs_attr_leaf_entry_t   entries[1];     /* sorted on key, not name */
+       xfs_attr_leaf_name_local_t namelist;    /* grows from bottom of buf */
+       xfs_attr_leaf_name_remote_t valuelist;  /* grows from bottom of buf */
 } xfs_attr_leafblock_t;
-typedef struct xfs_attr_leaf_hdr xfs_attr_leaf_hdr_t;
-typedef struct xfs_attr_leaf_map xfs_attr_leaf_map_t;
-typedef struct xfs_attr_leaf_entry xfs_attr_leaf_entry_t;
-typedef struct xfs_attr_leaf_name_local xfs_attr_leaf_name_local_t;
-typedef struct xfs_attr_leaf_name_remote xfs_attr_leaf_name_remote_t;
 
 /*
  * Flags used in the leaf_entry[i].flags field.
@@ -150,7 +156,8 @@ xfs_attr_leaf_name_local(xfs_attr_leafblock_t *leafp, int idx)
                (leafp))[INT_GET((leafp)->entries[idx].nameidx, ARCH_CONVERT)];
 }
 
-#define XFS_ATTR_LEAF_NAME(leafp,idx)          xfs_attr_leaf_name(leafp,idx)
+#define XFS_ATTR_LEAF_NAME(leafp,idx)          \
+       xfs_attr_leaf_name(leafp,idx)
 static inline char *xfs_attr_leaf_name(xfs_attr_leafblock_t *leafp, int idx)
 {
        return (&((char *)
index e415a4698e9c3e30836f5c2578f40f2f921592c0..70625e577c70b7421ad18a7e180f02962c679952 100644 (file)
@@ -2146,13 +2146,176 @@ xfs_bmap_add_extent_hole_real(
        return 0; /* keep gcc quite */
 }
 
+/*
+ * Adjust the size of the new extent based on di_extsize and rt extsize.
+ */
+STATIC int
+xfs_bmap_extsize_align(
+       xfs_mount_t     *mp,
+       xfs_bmbt_irec_t *gotp,          /* next extent pointer */
+       xfs_bmbt_irec_t *prevp,         /* previous extent pointer */
+       xfs_extlen_t    extsz,          /* align to this extent size */
+       int             rt,             /* is this a realtime inode? */
+       int             eof,            /* is extent at end-of-file? */
+       int             delay,          /* creating delalloc extent? */
+       int             convert,        /* overwriting unwritten extent? */
+       xfs_fileoff_t   *offp,          /* in/out: aligned offset */
+       xfs_extlen_t    *lenp)          /* in/out: aligned length */
+{
+       xfs_fileoff_t   orig_off;       /* original offset */
+       xfs_extlen_t    orig_alen;      /* original length */
+       xfs_fileoff_t   orig_end;       /* original off+len */
+       xfs_fileoff_t   nexto;          /* next file offset */
+       xfs_fileoff_t   prevo;          /* previous file offset */
+       xfs_fileoff_t   align_off;      /* temp for offset */
+       xfs_extlen_t    align_alen;     /* temp for length */
+       xfs_extlen_t    temp;           /* temp for calculations */
+
+       if (convert)
+               return 0;
+
+       orig_off = align_off = *offp;
+       orig_alen = align_alen = *lenp;
+       orig_end = orig_off + orig_alen;
+
+       /*
+        * If this request overlaps an existing extent, then don't
+        * attempt to perform any additional alignment.
+        */
+       if (!delay && !eof &&
+           (orig_off >= gotp->br_startoff) &&
+           (orig_end <= gotp->br_startoff + gotp->br_blockcount)) {
+               return 0;
+       }
+
+       /*
+        * If the file offset is unaligned vs. the extent size
+        * we need to align it.  This will be possible unless
+        * the file was previously written with a kernel that didn't
+        * perform this alignment, or if a truncate shot us in the
+        * foot.
+        */
+       temp = do_mod(orig_off, extsz);
+       if (temp) {
+               align_alen += temp;
+               align_off -= temp;
+       }
+       /*
+        * Same adjustment for the end of the requested area.
+        */
+       if ((temp = (align_alen % extsz))) {
+               align_alen += extsz - temp;
+       }
+       /*
+        * If the previous block overlaps with this proposed allocation
+        * then move the start forward without adjusting the length.
+        */
+       if (prevp->br_startoff != NULLFILEOFF) {
+               if (prevp->br_startblock == HOLESTARTBLOCK)
+                       prevo = prevp->br_startoff;
+               else
+                       prevo = prevp->br_startoff + prevp->br_blockcount;
+       } else
+               prevo = 0;
+       if (align_off != orig_off && align_off < prevo)
+               align_off = prevo;
+       /*
+        * If the next block overlaps with this proposed allocation
+        * then move the start back without adjusting the length,
+        * but not before offset 0.
+        * This may of course make the start overlap previous block,
+        * and if we hit the offset 0 limit then the next block
+        * can still overlap too.
+        */
+       if (!eof && gotp->br_startoff != NULLFILEOFF) {
+               if ((delay && gotp->br_startblock == HOLESTARTBLOCK) ||
+                   (!delay && gotp->br_startblock == DELAYSTARTBLOCK))
+                       nexto = gotp->br_startoff + gotp->br_blockcount;
+               else
+                       nexto = gotp->br_startoff;
+       } else
+               nexto = NULLFILEOFF;
+       if (!eof &&
+           align_off + align_alen != orig_end &&
+           align_off + align_alen > nexto)
+               align_off = nexto > align_alen ? nexto - align_alen : 0;
+       /*
+        * If we're now overlapping the next or previous extent that
+        * means we can't fit an extsz piece in this hole.  Just move
+        * the start forward to the first valid spot and set
+        * the length so we hit the end.
+        */
+       if (align_off != orig_off && align_off < prevo)
+               align_off = prevo;
+       if (align_off + align_alen != orig_end &&
+           align_off + align_alen > nexto &&
+           nexto != NULLFILEOFF) {
+               ASSERT(nexto > prevo);
+               align_alen = nexto - align_off;
+       }
+
+       /*
+        * If realtime, and the result isn't a multiple of the realtime
+        * extent size we need to remove blocks until it is.
+        */
+       if (rt && (temp = (align_alen % mp->m_sb.sb_rextsize))) {
+               /*
+                * We're not covering the original request, or
+                * we won't be able to once we fix the length.
+                */
+               if (orig_off < align_off ||
+                   orig_end > align_off + align_alen ||
+                   align_alen - temp < orig_alen)
+                       return XFS_ERROR(EINVAL);
+               /*
+                * Try to fix it by moving the start up.
+                */
+               if (align_off + temp <= orig_off) {
+                       align_alen -= temp;
+                       align_off += temp;
+               }
+               /*
+                * Try to fix it by moving the end in.
+                */
+               else if (align_off + align_alen - temp >= orig_end)
+                       align_alen -= temp;
+               /*
+                * Set the start to the minimum then trim the length.
+                */
+               else {
+                       align_alen -= orig_off - align_off;
+                       align_off = orig_off;
+                       align_alen -= align_alen % mp->m_sb.sb_rextsize;
+               }
+               /*
+                * Result doesn't cover the request, fail it.
+                */
+               if (orig_off < align_off || orig_end > align_off + align_alen)
+                       return XFS_ERROR(EINVAL);
+       } else {
+               ASSERT(orig_off >= align_off);
+               ASSERT(orig_end <= align_off + align_alen);
+       }
+
+#ifdef DEBUG
+       if (!eof && gotp->br_startoff != NULLFILEOFF)
+               ASSERT(align_off + align_alen <= gotp->br_startoff);
+       if (prevp->br_startoff != NULLFILEOFF)
+               ASSERT(align_off >= prevp->br_startoff + prevp->br_blockcount);
+#endif
+
+       *lenp = align_alen;
+       *offp = align_off;
+       return 0;
+}
+
 #define XFS_ALLOC_GAP_UNITS    4
 
 /*
  * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
  * It figures out where to ask the underlying allocator to put the new extent.
  */
-STATIC int                             /* error */
+STATIC int
 xfs_bmap_alloc(
        xfs_bmalloca_t  *ap)            /* bmap alloc argument struct */
 {
@@ -2163,10 +2326,10 @@ xfs_bmap_alloc(
        xfs_mount_t     *mp;            /* mount point structure */
        int             nullfb;         /* true if ap->firstblock isn't set */
        int             rt;             /* true if inode is realtime */
-#ifdef __KERNEL__
-       xfs_extlen_t    prod=0;         /* product factor for allocators */
-       xfs_extlen_t    ralen=0;        /* realtime allocation length */
-#endif
+       xfs_extlen_t    prod = 0;       /* product factor for allocators */
+       xfs_extlen_t    ralen = 0;      /* realtime allocation length */
+       xfs_extlen_t    align;          /* minimum allocation alignment */
+       xfs_rtblock_t   rtx;
 
 #define        ISVALID(x,y)    \
        (rt ? \
@@ -2182,125 +2345,25 @@ xfs_bmap_alloc(
        nullfb = ap->firstblock == NULLFSBLOCK;
        rt = XFS_IS_REALTIME_INODE(ap->ip) && ap->userdata;
        fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, ap->firstblock);
-#ifdef __KERNEL__
        if (rt) {
-               xfs_extlen_t    extsz;          /* file extent size for rt */
-               xfs_fileoff_t   nexto;          /* next file offset */
-               xfs_extlen_t    orig_alen;      /* original ap->alen */
-               xfs_fileoff_t   orig_end;       /* original off+len */
-               xfs_fileoff_t   orig_off;       /* original ap->off */
-               xfs_extlen_t    mod_off;        /* modulus calculations */
-               xfs_fileoff_t   prevo;          /* previous file offset */
-               xfs_rtblock_t   rtx;            /* realtime extent number */
-               xfs_extlen_t    temp;           /* temp for rt calculations */
-
-               /*
-                * Set prod to match the realtime extent size.
-                */
-               if (!(extsz = ap->ip->i_d.di_extsize))
-                       extsz = mp->m_sb.sb_rextsize;
-               prod = extsz / mp->m_sb.sb_rextsize;
-               orig_off = ap->off;
-               orig_alen = ap->alen;
-               orig_end = orig_off + orig_alen;
-               /*
-                * If the file offset is unaligned vs. the extent size
-                * we need to align it.  This will be possible unless
-                * the file was previously written with a kernel that didn't
-                * perform this alignment.
-                */
-               mod_off = do_mod(orig_off, extsz);
-               if (mod_off) {
-                       ap->alen += mod_off;
-                       ap->off -= mod_off;
-               }
-               /*
-                * Same adjustment for the end of the requested area.
-                */
-               if ((temp = (ap->alen % extsz)))
-                       ap->alen += extsz - temp;
-               /*
-                * If the previous block overlaps with this proposed allocation
-                * then move the start forward without adjusting the length.
-                */
-               prevo =
-                       ap->prevp->br_startoff == NULLFILEOFF ?
-                               0 :
-                               (ap->prevp->br_startoff +
-                                ap->prevp->br_blockcount);
-               if (ap->off != orig_off && ap->off < prevo)
-                       ap->off = prevo;
-               /*
-                * If the next block overlaps with this proposed allocation
-                * then move the start back without adjusting the length,
-                * but not before offset 0.
-                * This may of course make the start overlap previous block,
-                * and if we hit the offset 0 limit then the next block
-                * can still overlap too.
-                */
-               nexto = (ap->eof || ap->gotp->br_startoff == NULLFILEOFF) ?
-                       NULLFILEOFF : ap->gotp->br_startoff;
-               if (!ap->eof &&
-                   ap->off + ap->alen != orig_end &&
-                   ap->off + ap->alen > nexto)
-                       ap->off = nexto > ap->alen ? nexto - ap->alen : 0;
-               /*
-                * If we're now overlapping the next or previous extent that
-                * means we can't fit an extsz piece in this hole.  Just move
-                * the start forward to the first valid spot and set
-                * the length so we hit the end.
-                */
-               if ((ap->off != orig_off && ap->off < prevo) ||
-                   (ap->off + ap->alen != orig_end &&
-                    ap->off + ap->alen > nexto)) {
-                       ap->off = prevo;
-                       ap->alen = nexto - prevo;
-               }
-               /*
-                * If the result isn't a multiple of rtextents we need to
-                * remove blocks until it is.
-                */
-               if ((temp = (ap->alen % mp->m_sb.sb_rextsize))) {
-                       /*
-                        * We're not covering the original request, or
-                        * we won't be able to once we fix the length.
-                        */
-                       if (orig_off < ap->off ||
-                           orig_end > ap->off + ap->alen ||
-                           ap->alen - temp < orig_alen)
-                               return XFS_ERROR(EINVAL);
-                       /*
-                        * Try to fix it by moving the start up.
-                        */
-                       if (ap->off + temp <= orig_off) {
-                               ap->alen -= temp;
-                               ap->off += temp;
-                       }
-                       /*
-                        * Try to fix it by moving the end in.
-                        */
-                       else if (ap->off + ap->alen - temp >= orig_end)
-                               ap->alen -= temp;
-                       /*
-                        * Set the start to the minimum then trim the length.
-                        */
-                       else {
-                               ap->alen -= orig_off - ap->off;
-                               ap->off = orig_off;
-                               ap->alen -= ap->alen % mp->m_sb.sb_rextsize;
-                       }
-                       /*
-                        * Result doesn't cover the request, fail it.
-                        */
-                       if (orig_off < ap->off || orig_end > ap->off + ap->alen)
-                               return XFS_ERROR(EINVAL);
-               }
+               align = ap->ip->i_d.di_extsize ?
+                       ap->ip->i_d.di_extsize : mp->m_sb.sb_rextsize;
+               /* Set prod to match the extent size */
+               prod = align / mp->m_sb.sb_rextsize;
+
+               error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
+                                               align, rt, ap->eof, 0,
+                                               ap->conv, &ap->off, &ap->alen);
+               if (error)
+                       return error;
+               ASSERT(ap->alen);
                ASSERT(ap->alen % mp->m_sb.sb_rextsize == 0);
+
                /*
                 * If the offset & length are not perfectly aligned
                 * then kill prod, it will just get us in trouble.
                 */
-               if (do_mod(ap->off, extsz) || ap->alen % extsz)
+               if (do_mod(ap->off, align) || ap->alen % align)
                        prod = 1;
                /*
                 * Set ralen to be the actual requested length in rtextents.
@@ -2326,15 +2389,24 @@ xfs_bmap_alloc(
                        ap->rval = rtx * mp->m_sb.sb_rextsize;
                } else
                        ap->rval = 0;
+       } else {
+               align = (ap->userdata && ap->ip->i_d.di_extsize &&
+                       (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)) ?
+                       ap->ip->i_d.di_extsize : 0;
+               if (unlikely(align)) {
+                       error = xfs_bmap_extsize_align(mp, ap->gotp, ap->prevp,
+                                                       align, rt,
+                                                       ap->eof, 0, ap->conv,
+                                                       &ap->off, &ap->alen);
+                       ASSERT(!error);
+                       ASSERT(ap->alen);
+               }
+               if (nullfb)
+                       ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
+               else
+                       ap->rval = ap->firstblock;
        }
-#else
-       if (rt)
-               ap->rval = 0;
-#endif /* __KERNEL__ */
-       else if (nullfb)
-               ap->rval = XFS_INO_TO_FSB(mp, ap->ip->i_ino);
-       else
-               ap->rval = ap->firstblock;
+
        /*
         * If allocating at eof, and there's a previous real block,
         * try to use it's last block as our starting point.
@@ -2598,11 +2670,12 @@ xfs_bmap_alloc(
                        args.total = ap->total;
                        args.minlen = ap->minlen;
                }
-               if (ap->ip->i_d.di_extsize) {
+               if (unlikely(ap->userdata && ap->ip->i_d.di_extsize &&
+                           (ap->ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE))) {
                        args.prod = ap->ip->i_d.di_extsize;
                        if ((args.mod = (xfs_extlen_t)do_mod(ap->off, args.prod)))
                                args.mod = (xfs_extlen_t)(args.prod - args.mod);
-               } else if (mp->m_sb.sb_blocksize >= NBPP) {
+               } else if (unlikely(mp->m_sb.sb_blocksize >= NBPP)) {
                        args.prod = 1;
                        args.mod = 0;
                } else {
@@ -3580,14 +3653,16 @@ xfs_bmap_search_extents(
 
        ep = xfs_bmap_do_search_extents(base, lastx, nextents, bno, eofp,
                                          lastxp, gotp, prevp);
-       rt = ip->i_d.di_flags & XFS_DIFLAG_REALTIME;
-       if(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM)) {
+       rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
+       if (unlikely(!rt && !gotp->br_startblock && (*lastxp != NULLEXTNUM))) {
                 cmn_err(CE_PANIC,"Access to block zero: fs: <%s> inode: %lld "
                        "start_block : %llx start_off : %llx blkcnt : %llx "
                        "extent-state : %x \n",
-                       (ip->i_mount)->m_fsname,(long long)ip->i_ino,
-                       gotp->br_startblock, gotp->br_startoff,
-                       gotp->br_blockcount,gotp->br_state);
+                       (ip->i_mount)->m_fsname, (long long)ip->i_ino,
+                       (unsigned long long)gotp->br_startblock,
+                       (unsigned long long)gotp->br_startoff,
+                       (unsigned long long)gotp->br_blockcount,
+                       gotp->br_state);
         }
         return ep;
 }
@@ -3875,7 +3950,7 @@ xfs_bmap_add_attrfork(
                ip->i_d.di_forkoff = xfs_attr_shortform_bytesfit(ip, size);
                if (!ip->i_d.di_forkoff)
                        ip->i_d.di_forkoff = mp->m_attroffset >> 3;
-               else if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR))
+               else if (mp->m_flags & XFS_MOUNT_ATTR2)
                        version = 2;
                break;
        default:
@@ -4023,13 +4098,13 @@ xfs_bmap_compute_maxlevels(
         */
        if (whichfork == XFS_DATA_FORK) {
                maxleafents = MAXEXTNUM;
-               sz = (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) ?
-                       mp->m_attroffset : XFS_BMDR_SPACE_CALC(MINDBTPTRS);
+               sz = (mp->m_flags & XFS_MOUNT_ATTR2) ?
+                       XFS_BMDR_SPACE_CALC(MINDBTPTRS) : mp->m_attroffset;
        } else {
                maxleafents = MAXAEXTNUM;
-               sz = (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) ?
-                       mp->m_sb.sb_inodesize - mp->m_attroffset :
-                       XFS_BMDR_SPACE_CALC(MINABTPTRS);
+               sz = (mp->m_flags & XFS_MOUNT_ATTR2) ?
+                       XFS_BMDR_SPACE_CALC(MINABTPTRS) :
+                       mp->m_sb.sb_inodesize - mp->m_attroffset;
        }
        maxrootrecs = (int)XFS_BTREE_BLOCK_MAXRECS(sz, xfs_bmdr, 0);
        minleafrecs = mp->m_bmap_dmnr[0];
@@ -4418,8 +4493,8 @@ xfs_bmap_read_extents(
                num_recs = be16_to_cpu(block->bb_numrecs);
                if (unlikely(i + num_recs > room)) {
                        ASSERT(i + num_recs <= room);
-                       xfs_fs_cmn_err(CE_WARN, ip->i_mount,
-                               "corrupt dinode %Lu, (btree extents).  Unmount and run xfs_repair.",
+                       xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+                               "corrupt dinode %Lu, (btree extents).",
                                (unsigned long long) ip->i_ino);
                        XFS_ERROR_REPORT("xfs_bmap_read_extents(1)",
                                         XFS_ERRLEVEL_LOW,
@@ -4590,6 +4665,7 @@ xfs_bmapi(
        char            contig;         /* allocation must be one extent */
        char            delay;          /* this request is for delayed alloc */
        char            exact;          /* don't do all of wasdelayed extent */
+       char            convert;        /* unwritten extent I/O completion */
        xfs_bmbt_rec_t  *ep;            /* extent list entry pointer */
        int             error;          /* error return */
        xfs_bmbt_irec_t got;            /* current extent list record */
@@ -4643,7 +4719,7 @@ xfs_bmapi(
        }
        if (XFS_FORCED_SHUTDOWN(mp))
                return XFS_ERROR(EIO);
-       rt = XFS_IS_REALTIME_INODE(ip);
+       rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
        ifp = XFS_IFORK_PTR(ip, whichfork);
        ASSERT(ifp->if_ext_max ==
               XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
@@ -4654,6 +4730,7 @@ xfs_bmapi(
        delay = (flags & XFS_BMAPI_DELAY) != 0;
        trim = (flags & XFS_BMAPI_ENTIRE) == 0;
        userdata = (flags & XFS_BMAPI_METADATA) == 0;
+       convert = (flags & XFS_BMAPI_CONVERT) != 0;
        exact = (flags & XFS_BMAPI_EXACT) != 0;
        rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0;
        contig = (flags & XFS_BMAPI_CONTIG) != 0;
@@ -4748,15 +4825,25 @@ xfs_bmapi(
                        }
                        minlen = contig ? alen : 1;
                        if (delay) {
-                               xfs_extlen_t    extsz = 0;
+                               xfs_extlen_t    extsz;
 
                                /* Figure out the extent size, adjust alen */
                                if (rt) {
                                        if (!(extsz = ip->i_d.di_extsize))
                                                extsz = mp->m_sb.sb_rextsize;
-                                       alen = roundup(alen, extsz);
-                                       extsz = alen / mp->m_sb.sb_rextsize;
+                               } else {
+                                       extsz = ip->i_d.di_extsize;
                                }
+                               if (extsz) {
+                                       error = xfs_bmap_extsize_align(mp,
+                                                       &got, &prev, extsz,
+                                                       rt, eof, delay, convert,
+                                                       &aoff, &alen);
+                                       ASSERT(!error);
+                               }
+
+                               if (rt)
+                                       extsz = alen / mp->m_sb.sb_rextsize;
 
                                /*
                                 * Make a transaction-less quota reservation for
@@ -4785,32 +4872,33 @@ xfs_bmapi(
                                        xfs_bmap_worst_indlen(ip, alen);
                                ASSERT(indlen > 0);
 
-                               if (rt)
+                               if (rt) {
                                        error = xfs_mod_incore_sb(mp,
                                                        XFS_SBS_FREXTENTS,
                                                        -(extsz), rsvd);
-                               else
+                               } else {
                                        error = xfs_mod_incore_sb(mp,
                                                        XFS_SBS_FDBLOCKS,
                                                        -(alen), rsvd);
+                               }
                                if (!error) {
                                        error = xfs_mod_incore_sb(mp,
                                                        XFS_SBS_FDBLOCKS,
                                                        -(indlen), rsvd);
-                                       if (error && rt) {
-                                               xfs_mod_incore_sb(ip->i_mount,
+                                       if (error && rt)
+                                               xfs_mod_incore_sb(mp,
                                                        XFS_SBS_FREXTENTS,
                                                        extsz, rsvd);
-                                       } else if (error) {
-                                               xfs_mod_incore_sb(ip->i_mount,
+                                       else if (error)
+                                               xfs_mod_incore_sb(mp,
                                                        XFS_SBS_FDBLOCKS,
                                                        alen, rsvd);
-                                       }
                                }
 
                                if (error) {
-                                       if (XFS_IS_QUOTA_ON(ip->i_mount))
+                                       if (XFS_IS_QUOTA_ON(mp))
                                                /* unreserve the blocks now */
+                                               (void)
                                                XFS_TRANS_UNRESERVE_QUOTA_NBLKS(
                                                        mp, NULL, ip,
                                                        (long)alen, 0, rt ?
@@ -4849,6 +4937,7 @@ xfs_bmapi(
                                bma.firstblock = *firstblock;
                                bma.alen = alen;
                                bma.off = aoff;
+                               bma.conv = convert;
                                bma.wasdel = wasdelay;
                                bma.minlen = minlen;
                                bma.low = flist->xbf_low;
@@ -5270,8 +5359,7 @@ xfs_bunmapi(
                return 0;
        }
        XFS_STATS_INC(xs_blk_unmap);
-       isrt = (whichfork == XFS_DATA_FORK) &&
-              (ip->i_d.di_flags & XFS_DIFLAG_REALTIME);
+       isrt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(ip);
        start = bno;
        bno = start + len - 1;
        ep = xfs_bmap_search_extents(ip, bno, whichfork, &eof, &lastx, &got,
@@ -5443,7 +5531,7 @@ xfs_bunmapi(
                }
                if (wasdel) {
                        ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
-                       /* Update realtim/data freespace, unreserve quota */
+                       /* Update realtime/data freespace, unreserve quota */
                        if (isrt) {
                                xfs_filblks_t rtexts;
 
@@ -5451,14 +5539,14 @@ xfs_bunmapi(
                                do_div(rtexts, mp->m_sb.sb_rextsize);
                                xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS,
                                                (int)rtexts, rsvd);
-                               XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip,
-                                       -((long)del.br_blockcount), 0,
+                               (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
+                                       NULL, ip, -((long)del.br_blockcount), 0,
                                        XFS_QMOPT_RES_RTBLKS);
                        } else {
                                xfs_mod_incore_sb(mp, XFS_SBS_FDBLOCKS,
                                                (int)del.br_blockcount, rsvd);
-                               XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, NULL, ip,
-                                       -((long)del.br_blockcount), 0,
+                               (void)XFS_TRANS_RESERVE_QUOTA_NBLKS(mp,
+                                       NULL, ip, -((long)del.br_blockcount), 0,
                                        XFS_QMOPT_RES_REGBLKS);
                        }
                        ip->i_delayed_blks -= del.br_blockcount;
@@ -5652,7 +5740,9 @@ xfs_getbmap(
                   ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
                return XFS_ERROR(EINVAL);
        if (whichfork == XFS_DATA_FORK) {
-               if (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC) {
+               if ((ip->i_d.di_extsize && (ip->i_d.di_flags &
+                               (XFS_DIFLAG_REALTIME|XFS_DIFLAG_EXTSIZE))) ||
+                   ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
                        prealloced = 1;
                        fixlen = XFS_MAXIOFFSET(mp);
                } else {
index 2e0717a01309ee5b75ab8ba521aceb31900cd77e..12cc63dfc2c472edfd0831893cf58be5cb2c4107 100644 (file)
@@ -62,6 +62,10 @@ typedef      struct xfs_bmap_free
 #define        XFS_BMAPI_IGSTATE       0x200   /* Ignore state - */
                                        /* combine contig. space */
 #define        XFS_BMAPI_CONTIG        0x400   /* must allocate only one extent */
+/*     XFS_BMAPI_DIRECT_IO     0x800   */
+#define XFS_BMAPI_CONVERT      0x1000  /* unwritten extent conversion - */
+                                       /* need write cache flushing and no */
+                                       /* additional allocation alignments */
 
 #define        XFS_BMAPI_AFLAG(w)      xfs_bmapi_aflag(w)
 static inline int xfs_bmapi_aflag(int w)
@@ -101,7 +105,8 @@ typedef struct xfs_bmalloca {
        char                    wasdel; /* replacing a delayed allocation */
        char                    userdata;/* set if is user data */
        char                    low;    /* low on space, using seq'l ags */
-       char                    aeof;   /* allocated space at eof */
+       char                    aeof;   /* allocated space at eof */
+       char                    conv;   /* overwriting unwritten extents */
 } xfs_bmalloca_t;
 
 #ifdef __KERNEL__
index 328a528b926d0ed028953b5cfb6f102a293efba1..f57cc9ac875ec6b6daecf472ed1cf580fe3ac72d 100644 (file)
@@ -57,7 +57,7 @@ struct xfs_mount_args {
 /*
  * XFS mount option flags -- args->flags1
  */
-#define        XFSMNT_COMPAT_ATTR      0x00000001      /* do not use ATTR2 format */
+#define        XFSMNT_ATTR2            0x00000001      /* allow ATTR2 EA format */
 #define        XFSMNT_WSYNC            0x00000002      /* safe mode nfs mount
                                                 * compatible */
 #define        XFSMNT_INO64            0x00000004      /* move inode numbers up
index 070259a4254c41a6fc370f9c43e42e253fb0e496..c6191d00ad27fb721e11c2639e02a3c199653279 100644 (file)
@@ -60,8 +60,6 @@ xfs_swapext(
        xfs_bstat_t     *sbp;
        struct file     *fp = NULL, *tfp = NULL;
        vnode_t         *vp, *tvp;
-       bhv_desc_t      *bdp, *tbdp;
-       vn_bhv_head_t   *bhp, *tbhp;
        static uint     lock_flags = XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL;
        int             ilf_fields, tilf_fields;
        int             error = 0;
@@ -90,13 +88,10 @@ xfs_swapext(
                goto error0;
        }
 
-       bhp = VN_BHV_HEAD(vp);
-       bdp = vn_bhv_lookup(bhp, &xfs_vnodeops);
-       if (bdp == NULL) {
+       ip = xfs_vtoi(vp);
+       if (ip == NULL) {
                error = XFS_ERROR(EBADF);
                goto error0;
-       } else {
-               ip = XFS_BHVTOI(bdp);
        }
 
        if (((tfp = fget((int)sxp->sx_fdtmp)) == NULL) ||
@@ -105,13 +100,10 @@ xfs_swapext(
                goto error0;
        }
 
-       tbhp = VN_BHV_HEAD(tvp);
-       tbdp = vn_bhv_lookup(tbhp, &xfs_vnodeops);
-       if (tbdp == NULL) {
+       tip = xfs_vtoi(tvp);
+       if (tip == NULL) {
                error = XFS_ERROR(EBADF);
                goto error0;
-       } else {
-               tip = XFS_BHVTOI(tbdp);
        }
 
        if (ip->i_mount != tip->i_mount) {
index c5a0e537ff1ab93c8184f0d0ef55dcdf7845dc1a..79d0d9e1fbabc8d4f38061c1191dd978fc447791 100644 (file)
@@ -199,10 +199,16 @@ typedef enum xfs_dinode_fmt
 
 #define XFS_DFORK_DSIZE(dip,mp) \
        XFS_CFORK_DSIZE_DISK(&(dip)->di_core, mp)
+#define XFS_DFORK_DSIZE_HOST(dip,mp) \
+       XFS_CFORK_DSIZE(&(dip)->di_core, mp)
 #define XFS_DFORK_ASIZE(dip,mp) \
        XFS_CFORK_ASIZE_DISK(&(dip)->di_core, mp)
+#define XFS_DFORK_ASIZE_HOST(dip,mp) \
+       XFS_CFORK_ASIZE(&(dip)->di_core, mp)
 #define        XFS_DFORK_SIZE(dip,mp,w) \
        XFS_CFORK_SIZE_DISK(&(dip)->di_core, mp, w)
+#define        XFS_DFORK_SIZE_HOST(dip,mp,w) \
+       XFS_CFORK_SIZE(&(dip)->di_core, mp, w)
 
 #define        XFS_DFORK_Q(dip)                    XFS_CFORK_Q_DISK(&(dip)->di_core)
 #define        XFS_DFORK_BOFF(dip)                 XFS_CFORK_BOFF_DISK(&(dip)->di_core)
@@ -216,6 +222,7 @@ typedef enum xfs_dinode_fmt
 #define        XFS_CFORK_FMT_SET(dcp,w,n) \
        ((w) == XFS_DATA_FORK ? \
                ((dcp)->di_format = (n)) : ((dcp)->di_aformat = (n)))
+#define        XFS_DFORK_FORMAT(dip,w) XFS_CFORK_FORMAT(&(dip)->di_core, w)
 
 #define        XFS_CFORK_NEXTENTS_DISK(dcp,w) \
        ((w) == XFS_DATA_FORK ? \
@@ -223,13 +230,13 @@ typedef enum xfs_dinode_fmt
                INT_GET((dcp)->di_anextents, ARCH_CONVERT))
 #define XFS_CFORK_NEXTENTS(dcp,w) \
        ((w) == XFS_DATA_FORK ? (dcp)->di_nextents : (dcp)->di_anextents)
+#define        XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w)
+#define        XFS_DFORK_NEXTENTS_HOST(dip,w) XFS_CFORK_NEXTENTS(&(dip)->di_core, w)
 
 #define        XFS_CFORK_NEXT_SET(dcp,w,n) \
        ((w) == XFS_DATA_FORK ? \
                ((dcp)->di_nextents = (n)) : ((dcp)->di_anextents = (n)))
 
-#define        XFS_DFORK_NEXTENTS(dip,w) XFS_CFORK_NEXTENTS_DISK(&(dip)->di_core, w)
-
 #define        XFS_BUF_TO_DINODE(bp)   ((xfs_dinode_t *)XFS_BUF_PTR(bp))
 
 /*
@@ -246,8 +253,10 @@ typedef enum xfs_dinode_fmt
 #define XFS_DIFLAG_NOATIME_BIT   6     /* do not update atime */
 #define XFS_DIFLAG_NODUMP_BIT    7     /* do not dump */
 #define XFS_DIFLAG_RTINHERIT_BIT 8     /* create with realtime bit set */
-#define XFS_DIFLAG_PROJINHERIT_BIT  9  /* create with parents projid */
-#define XFS_DIFLAG_NOSYMLINKS_BIT  10  /* disallow symlink creation */
+#define XFS_DIFLAG_PROJINHERIT_BIT   9 /* create with parents projid */
+#define XFS_DIFLAG_NOSYMLINKS_BIT   10 /* disallow symlink creation */
+#define XFS_DIFLAG_EXTSIZE_BIT      11 /* inode extent size allocator hint */
+#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
 #define XFS_DIFLAG_REALTIME      (1 << XFS_DIFLAG_REALTIME_BIT)
 #define XFS_DIFLAG_PREALLOC      (1 << XFS_DIFLAG_PREALLOC_BIT)
 #define XFS_DIFLAG_NEWRTBM       (1 << XFS_DIFLAG_NEWRTBM_BIT)
@@ -259,11 +268,14 @@ typedef enum xfs_dinode_fmt
 #define XFS_DIFLAG_RTINHERIT     (1 << XFS_DIFLAG_RTINHERIT_BIT)
 #define XFS_DIFLAG_PROJINHERIT   (1 << XFS_DIFLAG_PROJINHERIT_BIT)
 #define XFS_DIFLAG_NOSYMLINKS    (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
+#define XFS_DIFLAG_EXTSIZE       (1 << XFS_DIFLAG_EXTSIZE_BIT)
+#define XFS_DIFLAG_EXTSZINHERIT  (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
 
 #define XFS_DIFLAG_ANY \
        (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
         XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
         XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
-        XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS)
+        XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
+        XFS_DIFLAG_EXTSZINHERIT)
 
 #endif /* __XFS_DINODE_H__ */
index 3dd30391f55119b3ba7dd4cfe85b6e29cbc9b323..bb87d2a700a955dae24bce8b7c10ce9ce19c5e9e 100644 (file)
@@ -176,7 +176,7 @@ xfs_dir_mount(xfs_mount_t *mp)
        uint shortcount, leafcount, count;
 
        mp->m_dirversion = 1;
-       if (mp->m_flags & XFS_MOUNT_COMPAT_ATTR) {
+       if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
                shortcount = (mp->m_attroffset -
                                (uint)sizeof(xfs_dir_sf_hdr_t)) /
                                 (uint)sizeof(xfs_dir_sf_entry_t);
index 488defe86ba6f6af4d5e1494243a72e6d5c0a8d0..8cc8afb9f6c0396ab6dd7f76eef1f8ce562a8be2 100644 (file)
@@ -135,6 +135,8 @@ void        xfs_dir_startup(void);  /* called exactly once */
        ((mp)->m_dirops.xd_shortform_to_single(args))
 
 #define        XFS_DIR_IS_V1(mp)       ((mp)->m_dirversion == 1)
+#define        XFS_DIR_IS_V2(mp)       ((mp)->m_dirversion == 2)
 extern xfs_dirops_t xfsv1_dirops;
+extern xfs_dirops_t xfsv2_dirops;
 
 #endif /* __XFS_DIR_H__ */
index 7e24ffeda9e1f7ae92d0882d940b1fd6e11825af..3158f5dc431f3f17fbacc1f4a12ae1e52b4038d2 100644 (file)
@@ -72,9 +72,6 @@ typedef struct xfs_dir2_put_args {
        struct uio      *uio;           /* uio control structure */
 } xfs_dir2_put_args_t;
 
-#define        XFS_DIR_IS_V2(mp)       ((mp)->m_dirversion == 2)
-extern xfs_dirops_t    xfsv2_dirops;
-
 /*
  * Other interfaces used by the rest of the dir v2 code.
  */
index ab6b09eef9ab840079fba4005e9faec83861a658..eb8cd9a4667f2b9975128cc0c5b9a0761e6d0c83 100644 (file)
@@ -67,34 +67,38 @@ struct xfs_trans;
  */
 #define XFS_DIR_LEAF_MAPSIZE   3       /* how many freespace slots */
 
+typedef struct xfs_dir_leaf_map {      /* RLE map of free bytes */
+       __uint16_t      base;           /* base of free region */
+       __uint16_t      size;           /* run length of free region */
+} xfs_dir_leaf_map_t;
+
+typedef struct xfs_dir_leaf_hdr {      /* constant-structure header block */
+       xfs_da_blkinfo_t info;          /* block type, links, etc. */
+       __uint16_t      count;          /* count of active leaf_entry's */
+       __uint16_t      namebytes;      /* num bytes of name strings stored */
+       __uint16_t      firstused;      /* first used byte in name area */
+       __uint8_t       holes;          /* != 0 if blk needs compaction */
+       __uint8_t       pad1;
+       xfs_dir_leaf_map_t freemap[XFS_DIR_LEAF_MAPSIZE];
+} xfs_dir_leaf_hdr_t;
+
+typedef struct xfs_dir_leaf_entry {    /* sorted on key, not name */
+       xfs_dahash_t    hashval;        /* hash value of name */
+       __uint16_t      nameidx;        /* index into buffer of name */
+       __uint8_t       namelen;        /* length of name string */
+       __uint8_t       pad2;
+} xfs_dir_leaf_entry_t;
+
+typedef struct xfs_dir_leaf_name {
+       xfs_dir_ino_t   inumber;        /* inode number for this key */
+       __uint8_t       name[1];        /* name string itself */
+} xfs_dir_leaf_name_t;
+
 typedef struct xfs_dir_leafblock {
-       struct xfs_dir_leaf_hdr {       /* constant-structure header block */
-               xfs_da_blkinfo_t info;  /* block type, links, etc. */
-               __uint16_t count;       /* count of active leaf_entry's */
-               __uint16_t namebytes;   /* num bytes of name strings stored */
-               __uint16_t firstused;   /* first used byte in name area */
-               __uint8_t  holes;       /* != 0 if blk needs compaction */
-               __uint8_t  pad1;
-               struct xfs_dir_leaf_map {/* RLE map of free bytes */
-                       __uint16_t base; /* base of free region */
-                       __uint16_t size; /* run length of free region */
-               } freemap[XFS_DIR_LEAF_MAPSIZE]; /* N largest free regions */
-       } hdr;
-       struct xfs_dir_leaf_entry {     /* sorted on key, not name */
-               xfs_dahash_t hashval;   /* hash value of name */
-               __uint16_t nameidx;     /* index into buffer of name */
-               __uint8_t namelen;      /* length of name string */
-               __uint8_t pad2;
-       } entries[1];                   /* var sized array */
-       struct xfs_dir_leaf_name {
-               xfs_dir_ino_t inumber;  /* inode number for this key */
-               __uint8_t name[1];      /* name string itself */
-       } namelist[1];                  /* grows from bottom of buf */
+       xfs_dir_leaf_hdr_t      hdr;    /* constant-structure header block */
+       xfs_dir_leaf_entry_t    entries[1];     /* var sized array */
+       xfs_dir_leaf_name_t     namelist[1];    /* grows from bottom of buf */
 } xfs_dir_leafblock_t;
-typedef struct xfs_dir_leaf_hdr xfs_dir_leaf_hdr_t;
-typedef struct xfs_dir_leaf_map xfs_dir_leaf_map_t;
-typedef struct xfs_dir_leaf_entry xfs_dir_leaf_entry_t;
-typedef struct xfs_dir_leaf_name xfs_dir_leaf_name_t;
 
 /*
  * Length of name for which a 512-byte block filesystem
@@ -126,11 +130,10 @@ typedef union {
 #define        XFS_PUT_COOKIE(c,mp,bno,entry,hash)     \
        ((c).s.be = XFS_DA_MAKE_BNOENTRY(mp, bno, entry), (c).s.h = (hash))
 
-typedef struct xfs_dir_put_args
-{
+typedef struct xfs_dir_put_args {
        xfs_dircook_t   cook;           /* cookie of (next) entry */
        xfs_intino_t    ino;            /* inode number */
-       struct xfs_dirent       *dbp;           /* buffer pointer */
+       struct xfs_dirent *dbp;         /* buffer pointer */
        char            *name;          /* directory entry name */
        int             namelen;        /* length of name */
        int             done;           /* output: set if value was stored */
@@ -138,7 +141,8 @@ typedef struct xfs_dir_put_args
        struct uio      *uio;           /* uio control structure */
 } xfs_dir_put_args_t;
 
-#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len)       xfs_dir_leaf_entsize_byname(len)
+#define XFS_DIR_LEAF_ENTSIZE_BYNAME(len)       \
+       xfs_dir_leaf_entsize_byname(len)
 static inline int xfs_dir_leaf_entsize_byname(int len)
 {
        return (uint)sizeof(xfs_dir_leaf_name_t)-1 + len;
index d7b6b5d16704f29c547ca7152c5c715e823c3d08..2a21c5024017374960c40efcfd02a4488d9735b6 100644 (file)
@@ -54,7 +54,6 @@ xfs_error_trap(int e)
                if (e != xfs_etrap[i])
                        continue;
                cmn_err(CE_NOTE, "xfs_error_trap: error %d", e);
-               debug_stop_all_cpus((void *)-1LL);
                BUG();
                break;
        }
index 06d8a8426c16333bccc76ed628b0031ef91c0733..26b8e709a569059f694b0bd9ac6c14dda141af56 100644 (file)
@@ -18,9 +18,6 @@
 #ifndef        __XFS_ERROR_H__
 #define        __XFS_ERROR_H__
 
-#define prdev(fmt,targ,args...) \
-       printk("XFS: device %s - " fmt "\n", XFS_BUFTARG_NAME(targ), ## args)
-
 #define XFS_ERECOVER   1       /* Failure to recover log */
 #define XFS_ELOGSTAT   2       /* Failure to stat log in user space */
 #define XFS_ENOLOGSPACE        3       /* Reservation too large */
@@ -182,8 +179,11 @@ extern int xfs_errortag_clearall_umount(int64_t fsid, char *fsname, int loud);
 struct xfs_mount;
 /* PRINTFLIKE4 */
 extern void xfs_cmn_err(int panic_tag, int level, struct xfs_mount *mp,
-                           char *fmt, ...);
+                       char *fmt, ...);
 /* PRINTFLIKE3 */
 extern void xfs_fs_cmn_err(int level, struct xfs_mount *mp, char *fmt, ...);
 
+#define xfs_fs_repair_cmn_err(level, mp, fmt, args...) \
+       xfs_fs_cmn_err(level, mp, fmt "  Unmount and run xfs_repair.", ## args)
+
 #endif /* __XFS_ERROR_H__ */
index ba096f80f48d5883217d90abc3943550bf64c018..14010f1fa82ffe9c2f85f84290bac1236c21fd2e 100644 (file)
@@ -3,15 +3,15 @@
  * All Rights Reserved.
  *
  * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation.
  *
  * This program is distributed in the hope that it would be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
+ * GNU Lesser General Public License for more details.
  *
- * You should have received a copy of the GNU General Public License
+ * You should have received a copy of the GNU Lesser General Public License
  * along with this program; if not, write the Free Software Foundation,
  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
@@ -65,6 +65,8 @@ struct fsxattr {
 #define XFS_XFLAG_RTINHERIT    0x00000100      /* create with rt bit set */
 #define XFS_XFLAG_PROJINHERIT  0x00000200      /* create with parents projid */
 #define XFS_XFLAG_NOSYMLINKS   0x00000400      /* disallow symlink creation */
+#define XFS_XFLAG_EXTSIZE      0x00000800      /* extent size allocator hint */
+#define XFS_XFLAG_EXTSZINHERIT 0x00001000      /* inherit inode extent size */
 #define XFS_XFLAG_HASATTR      0x80000000      /* no DIFLAG for this   */
 
 /*
index d1236d6f40455c96d188463c188dee42502447db..163031c1e3943d6af4fcad836aecb48550ce29e2 100644 (file)
@@ -540,6 +540,32 @@ xfs_reserve_blocks(
        return(0);
 }
 
+void
+xfs_fs_log_dummy(xfs_mount_t *mp)
+{
+       xfs_trans_t *tp;
+       xfs_inode_t *ip;
+
+
+       tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1);
+       atomic_inc(&mp->m_active_trans);
+       if (xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 0, 0, 0)) {
+               xfs_trans_cancel(tp, 0);
+               return;
+       }
+
+       ip = mp->m_rootip;
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+       xfs_trans_ihold(tp, ip);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+       xfs_trans_set_sync(tp);
+       xfs_trans_commit(tp, 0, NULL);
+
+       xfs_iunlock(ip, XFS_ILOCK_EXCL);
+}
+
 int
 xfs_fs_goingdown(
        xfs_mount_t     *mp,
index f32713f14f9a21c1b752e2e8eb889dea72411f8e..300d0c9d61addd2b5a8cde71ffe7f784ab7a334f 100644 (file)
@@ -25,5 +25,6 @@ extern int xfs_fs_counts(xfs_mount_t *mp, xfs_fsop_counts_t *cnt);
 extern int xfs_reserve_blocks(xfs_mount_t *mp, __uint64_t *inval,
                                xfs_fsop_resblks_t *outval);
 extern int xfs_fs_goingdown(xfs_mount_t *mp, __uint32_t inflags);
+extern void xfs_fs_log_dummy(xfs_mount_t *mp);
 
 #endif /* __XFS_FSOPS_H__ */
index fc19eedbd11b0addaea5ed62386f2f6447988cd4..8e380a1fb79b691f03efa0e0aa126ce9c0e78727 100644 (file)
@@ -493,7 +493,6 @@ xfs_iget(
 
 retry:
        if ((inode = iget_locked(XFS_MTOVFS(mp)->vfs_super, ino))) {
-               bhv_desc_t      *bdp;
                xfs_inode_t     *ip;
 
                vp = LINVFS_GET_VP(inode);
@@ -517,14 +516,12 @@ retry:
                         * to wait for the inode to go away.
                         */
                        if (is_bad_inode(inode) ||
-                           ((bdp = vn_bhv_lookup(VN_BHV_HEAD(vp),
-                                                 &xfs_vnodeops)) == NULL)) {
+                           ((ip = xfs_vtoi(vp)) == NULL)) {
                                iput(inode);
                                delay(1);
                                goto retry;
                        }
 
-                       ip = XFS_BHVTOI(bdp);
                        if (lock_flags != 0)
                                xfs_ilock(ip, lock_flags);
                        XFS_STATS_INC(xs_ig_found);
index df0d4572d70a8a7b18fdfbbd62d4bfb2caccfc32..1d7f5a7e063eb3d34266919d538c3b1d53df41d7 100644 (file)
@@ -404,9 +404,8 @@ xfs_iformat(
            INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) +
                INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) >
            INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) {
-               xfs_fs_cmn_err(CE_WARN, ip->i_mount,
-                       "corrupt dinode %Lu, extent total = %d, nblocks = %Lu."
-                       "  Unmount and run xfs_repair.",
+               xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+                       "corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
                        (unsigned long long)ip->i_ino,
                        (int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT)
                            + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)),
@@ -418,9 +417,8 @@ xfs_iformat(
        }
 
        if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) {
-               xfs_fs_cmn_err(CE_WARN, ip->i_mount,
-                       "corrupt dinode %Lu, forkoff = 0x%x."
-                       "  Unmount and run xfs_repair.",
+               xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+                       "corrupt dinode %Lu, forkoff = 0x%x.",
                        (unsigned long long)ip->i_ino,
                        (int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT)));
                XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
@@ -451,8 +449,9 @@ xfs_iformat(
                         * no local regular files yet
                         */
                        if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & S_IFMT) == S_IFREG)) {
-                               xfs_fs_cmn_err(CE_WARN, ip->i_mount,
-                                       "corrupt inode (local format for regular file) %Lu.  Unmount and run xfs_repair.",
+                               xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+                                       "corrupt inode %Lu "
+                                       "(local format for regular file).",
                                        (unsigned long long) ip->i_ino);
                                XFS_CORRUPTION_ERROR("xfs_iformat(4)",
                                                     XFS_ERRLEVEL_LOW,
@@ -462,8 +461,9 @@ xfs_iformat(
 
                        di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT);
                        if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
-                               xfs_fs_cmn_err(CE_WARN, ip->i_mount,
-                                       "corrupt inode %Lu (bad size %Ld for local inode).  Unmount and run xfs_repair.",
+                               xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+                                       "corrupt inode %Lu "
+                                       "(bad size %Ld for local inode).",
                                        (unsigned long long) ip->i_ino,
                                        (long long) di_size);
                                XFS_CORRUPTION_ERROR("xfs_iformat(5)",
@@ -551,8 +551,9 @@ xfs_iformat_local(
         * kmem_alloc() or memcpy() below.
         */
        if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
-               xfs_fs_cmn_err(CE_WARN, ip->i_mount,
-                       "corrupt inode %Lu (bad size %d for local fork, size = %d).  Unmount and run xfs_repair.",
+               xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+                       "corrupt inode %Lu "
+                       "(bad size %d for local fork, size = %d).",
                        (unsigned long long) ip->i_ino, size,
                        XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
                XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
@@ -610,8 +611,8 @@ xfs_iformat_extents(
         * kmem_alloc() or memcpy() below.
         */
        if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
-               xfs_fs_cmn_err(CE_WARN, ip->i_mount,
-                       "corrupt inode %Lu ((a)extents = %d).  Unmount and run xfs_repair.",
+               xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+                       "corrupt inode %Lu ((a)extents = %d).",
                        (unsigned long long) ip->i_ino, nex);
                XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
                                     ip->i_mount, dip);
@@ -692,8 +693,8 @@ xfs_iformat_btree(
            || XFS_BMDR_SPACE_CALC(nrecs) >
                        XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
            || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
-               xfs_fs_cmn_err(CE_WARN, ip->i_mount,
-                       "corrupt inode %Lu (btree).  Unmount and run xfs_repair.",
+               xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+                       "corrupt inode %Lu (btree).",
                        (unsigned long long) ip->i_ino);
                XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
                                 ip->i_mount);
@@ -809,6 +810,10 @@ _xfs_dic2xflags(
                        flags |= XFS_XFLAG_PROJINHERIT;
                if (di_flags & XFS_DIFLAG_NOSYMLINKS)
                        flags |= XFS_XFLAG_NOSYMLINKS;
+               if (di_flags & XFS_DIFLAG_EXTSIZE)
+                       flags |= XFS_XFLAG_EXTSIZE;
+               if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
+                       flags |= XFS_XFLAG_EXTSZINHERIT;
        }
 
        return flags;
@@ -1192,11 +1197,19 @@ xfs_ialloc(
                        if ((mode & S_IFMT) == S_IFDIR) {
                                if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
                                        di_flags |= XFS_DIFLAG_RTINHERIT;
-                       } else {
+                               if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
+                                       di_flags |= XFS_DIFLAG_EXTSZINHERIT;
+                                       ip->i_d.di_extsize = pip->i_d.di_extsize;
+                               }
+                       } else if ((mode & S_IFMT) == S_IFREG) {
                                if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) {
                                        di_flags |= XFS_DIFLAG_REALTIME;
                                        ip->i_iocore.io_flags |= XFS_IOCORE_RT;
                                }
+                               if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
+                                       di_flags |= XFS_DIFLAG_EXTSIZE;
+                                       ip->i_d.di_extsize = pip->i_d.di_extsize;
+                               }
                        }
                        if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
                            xfs_inherit_noatime)
@@ -1262,7 +1275,7 @@ xfs_isize_check(
        if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
                return;
 
-       if ( ip->i_d.di_flags & XFS_DIFLAG_REALTIME )
+       if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE))
                return;
 
        nimaps = 2;
@@ -1765,22 +1778,19 @@ xfs_igrow_start(
        xfs_fsize_t     new_size,
        cred_t          *credp)
 {
-       xfs_fsize_t     isize;
        int             error;
 
        ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
        ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
        ASSERT(new_size > ip->i_d.di_size);
 
-       error = 0;
-       isize = ip->i_d.di_size;
        /*
         * Zero any pages that may have been created by
         * xfs_write_file() beyond the end of the file
         * and any blocks between the old and new file sizes.
         */
-       error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size, isize,
-                               new_size);
+       error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size,
+                            ip->i_d.di_size, new_size);
        return error;
 }
 
@@ -3355,6 +3365,11 @@ xfs_iflush_int(
        ip->i_update_core = 0;
        SYNCHRONIZE();
 
+       /*
+        * Make sure to get the latest atime from the Linux inode.
+        */
+       xfs_synchronize_atime(ip);
+
        if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC,
                               mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
                xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
index 124d30e6143b7ddfe986d405e6eafdb2b4c19036..1cfbcf18ce86375a2a1ee6bacafd45cefab44361 100644 (file)
@@ -436,6 +436,10 @@ void               xfs_ichgtime(xfs_inode_t *, int);
 xfs_fsize_t    xfs_file_last_byte(xfs_inode_t *);
 void           xfs_lock_inodes(xfs_inode_t **, int, int, uint);
 
+xfs_inode_t    *xfs_vtoi(struct vnode *vp);
+
+void           xfs_synchronize_atime(xfs_inode_t *);
+
 #define xfs_ipincount(ip)      ((unsigned int) atomic_read(&ip->i_pincount))
 
 #ifdef DEBUG
index 7f3363c621e1d1bddca28b7c9e3cc7fe4150fe45..36aa1fcb90a599e58e934ab647f7c9f2b4f981ef 100644 (file)
@@ -271,6 +271,11 @@ xfs_inode_item_format(
        if (ip->i_update_size)
                ip->i_update_size = 0;
 
+       /*
+        * Make sure to get the latest atime from the Linux inode.
+        */
+       xfs_synchronize_atime(ip);
+
        vecp->i_addr = (xfs_caddr_t)&ip->i_d;
        vecp->i_len  = sizeof(xfs_dinode_core_t);
        XLOG_VEC_SET_TYPE(vecp, XLOG_REG_TYPE_ICORE);
@@ -603,7 +608,7 @@ xfs_inode_item_trylock(
                if (iip->ili_pushbuf_flag == 0) {
                        iip->ili_pushbuf_flag = 1;
 #ifdef DEBUG
-                       iip->ili_push_owner = get_thread_id();
+                       iip->ili_push_owner = current_pid();
 #endif
                        /*
                         * Inode is left locked in shared mode.
@@ -782,7 +787,7 @@ xfs_inode_item_pushbuf(
         * trying to duplicate our effort.
         */
        ASSERT(iip->ili_pushbuf_flag != 0);
-       ASSERT(iip->ili_push_owner == get_thread_id());
+       ASSERT(iip->ili_push_owner == current_pid());
 
        /*
         * If flushlock isn't locked anymore, chances are that the
index ca7afc83a8931dabdd00d8ed4a45d802afa695ba..788917f355c4a71ebc4c9b68c5d28aec72614e51 100644 (file)
@@ -262,7 +262,7 @@ phase2:
        case BMAPI_WRITE:
                /* If we found an extent, return it */
                if (nimaps &&
-                   (imap.br_startblock != HOLESTARTBLOCK) && 
+                   (imap.br_startblock != HOLESTARTBLOCK) &&
                    (imap.br_startblock != DELAYSTARTBLOCK)) {
                        xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, io,
                                        offset, count, iomapp, &imap, flags);
@@ -316,6 +316,58 @@ out:
        return XFS_ERROR(error);
 }
 
+STATIC int
+xfs_iomap_eof_align_last_fsb(
+       xfs_mount_t     *mp,
+       xfs_iocore_t    *io,
+       xfs_fsize_t     isize,
+       xfs_extlen_t    extsize,
+       xfs_fileoff_t   *last_fsb)
+{
+       xfs_fileoff_t   new_last_fsb = 0;
+       xfs_extlen_t    align;
+       int             eof, error;
+
+       if (io->io_flags & XFS_IOCORE_RT)
+               ;
+       /*
+        * If mounted with the "-o swalloc" option, roundup the allocation
+        * request to a stripe width boundary if the file size is >=
+        * stripe width and we are allocating past the allocation eof.
+        */
+       else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) &&
+               (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)))
+               new_last_fsb = roundup_64(*last_fsb, mp->m_swidth);
+       /*
+        * Roundup the allocation request to a stripe unit (m_dalign) boundary
+        * if the file size is >= stripe unit size, and we are allocating past
+        * the allocation eof.
+        */
+       else if (mp->m_dalign && (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)))
+               new_last_fsb = roundup_64(*last_fsb, mp->m_dalign);
+
+       /*
+        * Always round up the allocation request to an extent boundary
+        * (when file on a real-time subvolume or has di_extsize hint).
+        */
+       if (extsize) {
+               if (new_last_fsb)
+                       align = roundup_64(new_last_fsb, extsize);
+               else
+                       align = extsize;
+               new_last_fsb = roundup_64(*last_fsb, align);
+       }
+
+       if (new_last_fsb) {
+               error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
+               if (error)
+                       return error;
+               if (eof)
+                       *last_fsb = new_last_fsb;
+       }
+       return 0;
+}
+
 STATIC int
 xfs_flush_space(
        xfs_inode_t     *ip,
@@ -362,19 +414,20 @@ xfs_iomap_write_direct(
        xfs_iocore_t    *io = &ip->i_iocore;
        xfs_fileoff_t   offset_fsb;
        xfs_fileoff_t   last_fsb;
-       xfs_filblks_t   count_fsb;
+       xfs_filblks_t   count_fsb, resaligned;
        xfs_fsblock_t   firstfsb;
+       xfs_extlen_t    extsz, temp;
+       xfs_fsize_t     isize;
        int             nimaps;
-       int             error;
        int             bmapi_flag;
        int             quota_flag;
        int             rt;
        xfs_trans_t     *tp;
        xfs_bmbt_irec_t imap;
        xfs_bmap_free_t free_list;
-       xfs_filblks_t   qblocks, resblks;
+       uint            qblocks, resblks, resrtextents;
        int             committed;
-       int             resrtextents;
+       int             error;
 
        /*
         * Make sure that the dquots are there. This doesn't hold
@@ -384,37 +437,52 @@ xfs_iomap_write_direct(
        if (error)
                return XFS_ERROR(error);
 
-       offset_fsb = XFS_B_TO_FSBT(mp, offset);
-       last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
-       count_fsb = last_fsb - offset_fsb;
-       if (found && (ret_imap->br_startblock == HOLESTARTBLOCK)) {
-               xfs_fileoff_t   map_last_fsb;
-
-               map_last_fsb = ret_imap->br_blockcount + ret_imap->br_startoff;
-               if (map_last_fsb < last_fsb) {
-                       last_fsb = map_last_fsb;
-                       count_fsb = last_fsb - offset_fsb;
-               }
-               ASSERT(count_fsb > 0);
+       rt = XFS_IS_REALTIME_INODE(ip);
+       if (unlikely(rt)) {
+               if (!(extsz = ip->i_d.di_extsize))
+                       extsz = mp->m_sb.sb_rextsize;
+       } else {
+               extsz = ip->i_d.di_extsize;
        }
 
-       /*
-        * Determine if reserving space on the data or realtime partition.
-        */
-       if ((rt = XFS_IS_REALTIME_INODE(ip))) {
-               xfs_extlen_t    extsz;
+       isize = ip->i_d.di_size;
+       if (io->io_new_size > isize)
+               isize = io->io_new_size;
 
-               if (!(extsz = ip->i_d.di_extsize))
-                       extsz = mp->m_sb.sb_rextsize;
-               resrtextents = qblocks = (count_fsb + extsz - 1);
-               do_div(resrtextents, mp->m_sb.sb_rextsize);
-               resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
-               quota_flag = XFS_QMOPT_RES_RTBLKS;
+       offset_fsb = XFS_B_TO_FSBT(mp, offset);
+       last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
+       if ((offset + count) > isize) {
+               error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz,
+                                                       &last_fsb);
+               if (error)
+                       goto error_out;
        } else {
-               resrtextents = 0;
-               resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, count_fsb);
-               quota_flag = XFS_QMOPT_RES_REGBLKS;
+               if (found && (ret_imap->br_startblock == HOLESTARTBLOCK))
+                       last_fsb = MIN(last_fsb, (xfs_fileoff_t)
+                                       ret_imap->br_blockcount +
+                                       ret_imap->br_startoff);
        }
+       count_fsb = last_fsb - offset_fsb;
+       ASSERT(count_fsb > 0);
+
+       resaligned = count_fsb;
+       if (unlikely(extsz)) {
+               if ((temp = do_mod(offset_fsb, extsz)))
+                       resaligned += temp;
+               if ((temp = do_mod(resaligned, extsz)))
+                       resaligned += extsz - temp;
+       }
+
+       if (unlikely(rt)) {
+               resrtextents = qblocks = resaligned;
+               resrtextents /= mp->m_sb.sb_rextsize;
+               resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+               quota_flag = XFS_QMOPT_RES_RTBLKS;
+       } else {
+               resrtextents = 0;
+               resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned);
+               quota_flag = XFS_QMOPT_RES_REGBLKS;
+       }
 
        /*
         * Allocate and setup the transaction
@@ -425,7 +493,6 @@ xfs_iomap_write_direct(
                        XFS_WRITE_LOG_RES(mp), resrtextents,
                        XFS_TRANS_PERM_LOG_RES,
                        XFS_WRITE_LOG_COUNT);
-
        /*
         * Check for running out of space, note: need lock to return
         */
@@ -435,20 +502,20 @@ xfs_iomap_write_direct(
        if (error)
                goto error_out;
 
-       if (XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag)) {
-               error = (EDQUOT);
+       error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip,
+                                             qblocks, 0, quota_flag);
+       if (error)
                goto error1;
-       }
 
-       bmapi_flag = XFS_BMAPI_WRITE;
        xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
        xfs_trans_ihold(tp, ip);
 
-       if (!(flags & BMAPI_MMAP) && (offset < ip->i_d.di_size || rt))
+       bmapi_flag = XFS_BMAPI_WRITE;
+       if ((flags & BMAPI_DIRECT) && (offset < ip->i_d.di_size || extsz))
                bmapi_flag |= XFS_BMAPI_PREALLOC;
 
        /*
-        * Issue the bmapi() call to allocate the blocks
+        * Issue the xfs_bmapi() call to allocate the blocks
         */
        XFS_BMAP_INIT(&free_list, &firstfsb);
        nimaps = 1;
@@ -483,8 +550,10 @@ xfs_iomap_write_direct(
                         "extent-state : %x \n",
                         (ip->i_mount)->m_fsname,
                         (long long)ip->i_ino,
-                        ret_imap->br_startblock, ret_imap->br_startoff,
-                        ret_imap->br_blockcount,ret_imap->br_state);
+                        (unsigned long long)ret_imap->br_startblock,
+                       (unsigned long long)ret_imap->br_startoff,
+                        (unsigned long long)ret_imap->br_blockcount,
+                       ret_imap->br_state);
         }
        return 0;
 
@@ -500,6 +569,63 @@ error_out:
        return XFS_ERROR(error);
 }
 
+/*
+ * If the caller is doing a write at the end of the file,
+ * then extend the allocation out to the file system's write
+ * iosize.  We clean up any extra space left over when the
+ * file is closed in xfs_inactive().
+ *
+ * For sync writes, we are flushing delayed allocate space to
+ * try to make additional space available for allocation near
+ * the filesystem full boundary - preallocation hurts in that
+ * situation, of course.
+ */
+STATIC int
+xfs_iomap_eof_want_preallocate(
+       xfs_mount_t     *mp,
+       xfs_iocore_t    *io,
+       xfs_fsize_t     isize,
+       xfs_off_t       offset,
+       size_t          count,
+       int             ioflag,
+       xfs_bmbt_irec_t *imap,
+       int             nimaps,
+       int             *prealloc)
+{
+       xfs_fileoff_t   start_fsb;
+       xfs_filblks_t   count_fsb;
+       xfs_fsblock_t   firstblock;
+       int             n, error, imaps;
+
+       *prealloc = 0;
+       if ((ioflag & BMAPI_SYNC) || (offset + count) <= isize)
+               return 0;
+
+       /*
+        * If there are any real blocks past eof, then don't
+        * do any speculative allocation.
+        */
+       start_fsb = XFS_B_TO_FSBT(mp, ((xfs_ufsize_t)(offset + count - 1)));
+       count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
+       while (count_fsb > 0) {
+               imaps = nimaps;
+               firstblock = NULLFSBLOCK;
+               error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
+                                 0, &firstblock, 0, imap, &imaps, NULL);
+               if (error)
+                       return error;
+               for (n = 0; n < imaps; n++) {
+                       if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
+                           (imap[n].br_startblock != DELAYSTARTBLOCK))
+                               return 0;
+                       start_fsb += imap[n].br_blockcount;
+                       count_fsb -= imap[n].br_blockcount;
+               }
+       }
+       *prealloc = 1;
+       return 0;
+}
+
 int
 xfs_iomap_write_delay(
        xfs_inode_t     *ip,
@@ -513,13 +639,15 @@ xfs_iomap_write_delay(
        xfs_iocore_t    *io = &ip->i_iocore;
        xfs_fileoff_t   offset_fsb;
        xfs_fileoff_t   last_fsb;
-       xfs_fsize_t     isize;
+       xfs_off_t       aligned_offset;
+       xfs_fileoff_t   ioalign;
        xfs_fsblock_t   firstblock;
+       xfs_extlen_t    extsz;
+       xfs_fsize_t     isize;
        int             nimaps;
-       int             error;
        xfs_bmbt_irec_t imap[XFS_WRITE_IMAPS];
-       int             aeof;
-       int             fsynced = 0;
+       int             prealloc, fsynced = 0;
+       int             error;
 
        ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
 
@@ -527,152 +655,57 @@ xfs_iomap_write_delay(
         * Make sure that the dquots are there. This doesn't hold
         * the ilock across a disk read.
         */
-
        error = XFS_QM_DQATTACH(mp, ip, XFS_QMOPT_ILOCKED);
        if (error)
                return XFS_ERROR(error);
 
+       if (XFS_IS_REALTIME_INODE(ip)) {
+               if (!(extsz = ip->i_d.di_extsize))
+                       extsz = mp->m_sb.sb_rextsize;
+       } else {
+               extsz = ip->i_d.di_extsize;
+       }
+
+       offset_fsb = XFS_B_TO_FSBT(mp, offset);
+
 retry:
        isize = ip->i_d.di_size;
-       if (io->io_new_size > isize) {
+       if (io->io_new_size > isize)
                isize = io->io_new_size;
-       }
 
-       aeof = 0;
-       offset_fsb = XFS_B_TO_FSBT(mp, offset);
-       last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
-       /*
-        * If the caller is doing a write at the end of the file,
-        * then extend the allocation (and the buffer used for the write)
-        * out to the file system's write iosize.  We clean up any extra
-        * space left over when the file is closed in xfs_inactive().
-        *
-        * For sync writes, we are flushing delayed allocate space to
-        * try to make additional space available for allocation near
-        * the filesystem full boundary - preallocation hurts in that
-        * situation, of course.
-        */
-       if (!(ioflag & BMAPI_SYNC) && ((offset + count) > ip->i_d.di_size)) {
-               xfs_off_t       aligned_offset;
-               xfs_filblks_t   count_fsb;
-               unsigned int    iosize;
-               xfs_fileoff_t   ioalign;
-               int             n;
-               xfs_fileoff_t   start_fsb;
+       error = xfs_iomap_eof_want_preallocate(mp, io, isize, offset, count,
+                               ioflag, imap, XFS_WRITE_IMAPS, &prealloc);
+       if (error)
+               return error;
 
-               /*
-                * If there are any real blocks past eof, then don't
-                * do any speculative allocation.
-                */
-               start_fsb = XFS_B_TO_FSBT(mp,
-                                       ((xfs_ufsize_t)(offset + count - 1)));
-               count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
-               while (count_fsb > 0) {
-                       nimaps = XFS_WRITE_IMAPS;
-                       error = XFS_BMAPI(mp, NULL, io, start_fsb, count_fsb,
-                                       0, &firstblock, 0, imap, &nimaps, NULL);
-                       if (error) {
-                               return error;
-                       }
-                       for (n = 0; n < nimaps; n++) {
-                               if ( !(io->io_flags & XFS_IOCORE_RT)  && 
-                                       !imap[n].br_startblock) {
-                                       cmn_err(CE_PANIC,"Access to block "
-                                               "zero:  fs <%s> inode: %lld "
-                                               "start_block : %llx start_off "
-                                               ": %llx blkcnt : %llx "
-                                               "extent-state : %x \n",
-                                               (ip->i_mount)->m_fsname,
-                                               (long long)ip->i_ino,
-                                               imap[n].br_startblock,
-                                               imap[n].br_startoff,
-                                               imap[n].br_blockcount,
-                                               imap[n].br_state);
-                               }
-                               if ((imap[n].br_startblock != HOLESTARTBLOCK) &&
-                                   (imap[n].br_startblock != DELAYSTARTBLOCK)) {
-                                       goto write_map;
-                               }
-                               start_fsb += imap[n].br_blockcount;
-                               count_fsb -= imap[n].br_blockcount;
-                       }
-               }
-               iosize = mp->m_writeio_blocks;
+       if (prealloc) {
                aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
                ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
-               last_fsb = ioalign + iosize;
-               aeof = 1;
+               last_fsb = ioalign + mp->m_writeio_blocks;
+       } else {
+               last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
        }
-write_map:
-       nimaps = XFS_WRITE_IMAPS;
-       firstblock = NULLFSBLOCK;
 
-       /*
-        * If mounted with the "-o swalloc" option, roundup the allocation
-        * request to a stripe width boundary if the file size is >=
-        * stripe width and we are allocating past the allocation eof.
-        */
-       if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_swidth 
-           && (mp->m_flags & XFS_MOUNT_SWALLOC)
-           && (isize >= XFS_FSB_TO_B(mp, mp->m_swidth)) && aeof) {
-               int eof;
-               xfs_fileoff_t new_last_fsb;
-
-               new_last_fsb = roundup_64(last_fsb, mp->m_swidth);
-               error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
-               if (error) {
-                       return error;
-               }
-               if (eof) {
-                       last_fsb = new_last_fsb;
-               }
-       /*
-        * Roundup the allocation request to a stripe unit (m_dalign) boundary
-        * if the file size is >= stripe unit size, and we are allocating past
-        * the allocation eof.
-        */
-       } else if (!(io->io_flags & XFS_IOCORE_RT) && mp->m_dalign &&
-                  (isize >= XFS_FSB_TO_B(mp, mp->m_dalign)) && aeof) {
-               int eof;
-               xfs_fileoff_t new_last_fsb;
-               new_last_fsb = roundup_64(last_fsb, mp->m_dalign);
-               error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
-               if (error) {
-                       return error;
-               }
-               if (eof) {
-                       last_fsb = new_last_fsb;
-               }
-       /*
-        * Round up the allocation request to a real-time extent boundary
-        * if the file is on the real-time subvolume.
-        */
-       } else if (io->io_flags & XFS_IOCORE_RT && aeof) {
-               int eof;
-               xfs_fileoff_t new_last_fsb;
-
-               new_last_fsb = roundup_64(last_fsb, mp->m_sb.sb_rextsize);
-               error = XFS_BMAP_EOF(mp, io, new_last_fsb, XFS_DATA_FORK, &eof);
-               if (error) {
+       if (prealloc || extsz) {
+               error = xfs_iomap_eof_align_last_fsb(mp, io, isize, extsz,
+                                                       &last_fsb);
+               if (error)
                        return error;
-               }
-               if (eof)
-                       last_fsb = new_last_fsb;
        }
+
+       nimaps = XFS_WRITE_IMAPS;
+       firstblock = NULLFSBLOCK;
        error = xfs_bmapi(NULL, ip, offset_fsb,
                          (xfs_filblks_t)(last_fsb - offset_fsb),
                          XFS_BMAPI_DELAY | XFS_BMAPI_WRITE |
                          XFS_BMAPI_ENTIRE, &firstblock, 1, imap,
                          &nimaps, NULL);
-       /*
-        * This can be EDQUOT, if nimaps == 0
-        */
-       if (error && (error != ENOSPC)) {
+       if (error && (error != ENOSPC))
                return XFS_ERROR(error);
-       }
+
        /*
         * If bmapi returned us nothing, and if we didn't get back EDQUOT,
-        * then we must have run out of space.
+        * then we must have run out of space - flush delalloc, and retry..
         */
        if (nimaps == 0) {
                xfs_iomap_enter_trace(XFS_IOMAP_WRITE_NOSPACE,
@@ -684,17 +717,21 @@ write_map:
                goto retry;
        }
 
-       *ret_imap = imap[0];
-       *nmaps = 1;
-       if ( !(io->io_flags & XFS_IOCORE_RT)  && !ret_imap->br_startblock) {
+       if (!(io->io_flags & XFS_IOCORE_RT)  && !ret_imap->br_startblock) {
                cmn_err(CE_PANIC,"Access to block zero:  fs <%s> inode: %lld "
                         "start_block : %llx start_off : %llx blkcnt : %llx "
                         "extent-state : %x \n",
                         (ip->i_mount)->m_fsname,
                         (long long)ip->i_ino,
-                        ret_imap->br_startblock, ret_imap->br_startoff,
-                        ret_imap->br_blockcount,ret_imap->br_state);
+                        (unsigned long long)ret_imap->br_startblock,
+                       (unsigned long long)ret_imap->br_startoff,
+                        (unsigned long long)ret_imap->br_blockcount,
+                       ret_imap->br_state);
        }
+
+       *ret_imap = imap[0];
+       *nmaps = 1;
+
        return 0;
 }
 
@@ -820,17 +857,21 @@ xfs_iomap_write_allocate(
                 */
 
                for (i = 0; i < nimaps; i++) {
-                       if ( !(io->io_flags & XFS_IOCORE_RT)  && 
-                               !imap[i].br_startblock) {
+                       if (!(io->io_flags & XFS_IOCORE_RT)  &&
+                           !imap[i].br_startblock) {
                                cmn_err(CE_PANIC,"Access to block zero:  "
                                        "fs <%s> inode: %lld "
-                                       "start_block : %llx start_off : %llx " 
+                                       "start_block : %llx start_off : %llx "
                                        "blkcnt : %llx extent-state : %x \n",
                                        (ip->i_mount)->m_fsname,
                                        (long long)ip->i_ino,
-                                       imap[i].br_startblock,
-                                       imap[i].br_startoff,
-                                       imap[i].br_blockcount,imap[i].br_state);
+                                       (unsigned long long)
+                                               imap[i].br_startblock,
+                                       (unsigned long long)
+                                               imap[i].br_startoff,
+                                       (unsigned long long)
+                                               imap[i].br_blockcount,
+                                       imap[i].br_state);
                         }
                        if ((offset_fsb >= imap[i].br_startoff) &&
                            (offset_fsb < (imap[i].br_startoff +
@@ -867,17 +908,17 @@ xfs_iomap_write_unwritten(
 {
        xfs_mount_t     *mp = ip->i_mount;
        xfs_iocore_t    *io = &ip->i_iocore;
-       xfs_trans_t     *tp;
        xfs_fileoff_t   offset_fsb;
        xfs_filblks_t   count_fsb;
        xfs_filblks_t   numblks_fsb;
-       xfs_bmbt_irec_t imap;
+       xfs_fsblock_t   firstfsb;
+       int             nimaps;
+       xfs_trans_t     *tp;
+       xfs_bmbt_irec_t imap;
+       xfs_bmap_free_t free_list;
+       uint            resblks;
        int             committed;
        int             error;
-       int             nres;
-       int             nimaps;
-       xfs_fsblock_t   firstfsb;
-       xfs_bmap_free_t free_list;
 
        xfs_iomap_enter_trace(XFS_IOMAP_UNWRITTEN,
                                &ip->i_iocore, offset, count);
@@ -886,9 +927,9 @@ xfs_iomap_write_unwritten(
        count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
        count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb);
 
-       do {
-               nres = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+       resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1;
 
+       do {
                /*
                 * set up a transaction to convert the range of extents
                 * from unwritten to real. Do allocations in a loop until
@@ -896,7 +937,7 @@ xfs_iomap_write_unwritten(
                 */
 
                tp = xfs_trans_alloc(mp, XFS_TRANS_STRAT_WRITE);
-               error = xfs_trans_reserve(tp, nres,
+               error = xfs_trans_reserve(tp, resblks,
                                XFS_WRITE_LOG_RES(mp), 0,
                                XFS_TRANS_PERM_LOG_RES,
                                XFS_WRITE_LOG_COUNT);
@@ -915,7 +956,7 @@ xfs_iomap_write_unwritten(
                XFS_BMAP_INIT(&free_list, &firstfsb);
                nimaps = 1;
                error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
-                                 XFS_BMAPI_WRITE, &firstfsb,
+                                 XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,
                                  1, &imap, &nimaps, &free_list);
                if (error)
                        goto error_on_bmapi_transaction;
@@ -929,15 +970,17 @@ xfs_iomap_write_unwritten(
                xfs_iunlock(ip, XFS_ILOCK_EXCL);
                if (error)
                        goto error0;
-               
+
                if ( !(io->io_flags & XFS_IOCORE_RT)  && !imap.br_startblock) {
                        cmn_err(CE_PANIC,"Access to block zero:  fs <%s> "
                                "inode: %lld start_block : %llx start_off : "
                                "%llx blkcnt : %llx extent-state : %x \n",
                                (ip->i_mount)->m_fsname,
                                (long long)ip->i_ino,
-                               imap.br_startblock,imap.br_startoff,
-                               imap.br_blockcount,imap.br_state);
+                               (unsigned long long)imap.br_startblock,
+                               (unsigned long long)imap.br_startoff,
+                               (unsigned long long)imap.br_blockcount,
+                               imap.br_state);
                }
 
                if ((numblks_fsb = imap.br_blockcount) == 0) {
index f63646ead8168eea1c6c939115ac4f032bb3016b..c59450e1be40a5af3758857267018eabb7b5f586 100644 (file)
@@ -56,6 +56,7 @@ xfs_bulkstat_one_iget(
 {
        xfs_dinode_core_t *dic;         /* dinode core info pointer */
        xfs_inode_t     *ip;            /* incore inode pointer */
+       vnode_t         *vp;
        int             error;
 
        error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, bno);
@@ -72,6 +73,7 @@ xfs_bulkstat_one_iget(
                goto out_iput;
        }
 
+       vp = XFS_ITOV(ip);
        dic = &ip->i_d;
 
        /* xfs_iget returns the following without needing
@@ -84,8 +86,7 @@ xfs_bulkstat_one_iget(
        buf->bs_uid = dic->di_uid;
        buf->bs_gid = dic->di_gid;
        buf->bs_size = dic->di_size;
-       buf->bs_atime.tv_sec = dic->di_atime.t_sec;
-       buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
+       vn_atime_to_bstime(vp, &buf->bs_atime);
        buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
        buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
        buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
index 29af51275ca90a09d99c2146f8704c0e0eca630c..3d9a36e7736312f6e125c5d1b085ef17cc9d5d58 100644 (file)
@@ -178,6 +178,83 @@ xlog_trace_iclog(xlog_in_core_t *iclog, uint state)
 #define        xlog_trace_iclog(iclog,state)
 #endif /* XFS_LOG_TRACE */
 
+
+static void
+xlog_ins_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
+{
+       if (*qp) {
+               tic->t_next         = (*qp);
+               tic->t_prev         = (*qp)->t_prev;
+               (*qp)->t_prev->t_next = tic;
+               (*qp)->t_prev       = tic;
+       } else {
+               tic->t_prev = tic->t_next = tic;
+               *qp = tic;
+       }
+
+       tic->t_flags |= XLOG_TIC_IN_Q;
+}
+
+static void
+xlog_del_ticketq(struct xlog_ticket **qp, struct xlog_ticket *tic)
+{
+       if (tic == tic->t_next) {
+               *qp = NULL;
+       } else {
+               *qp = tic->t_next;
+               tic->t_next->t_prev = tic->t_prev;
+               tic->t_prev->t_next = tic->t_next;
+       }
+
+       tic->t_next = tic->t_prev = NULL;
+       tic->t_flags &= ~XLOG_TIC_IN_Q;
+}
+
+static void
+xlog_grant_sub_space(struct log *log, int bytes)
+{
+       log->l_grant_write_bytes -= bytes;
+       if (log->l_grant_write_bytes < 0) {
+               log->l_grant_write_bytes += log->l_logsize;
+               log->l_grant_write_cycle--;
+       }
+
+       log->l_grant_reserve_bytes -= bytes;
+       if ((log)->l_grant_reserve_bytes < 0) {
+               log->l_grant_reserve_bytes += log->l_logsize;
+               log->l_grant_reserve_cycle--;
+       }
+
+}
+
+static void
+xlog_grant_add_space_write(struct log *log, int bytes)
+{
+       log->l_grant_write_bytes += bytes;
+       if (log->l_grant_write_bytes > log->l_logsize) {
+               log->l_grant_write_bytes -= log->l_logsize;
+               log->l_grant_write_cycle++;
+       }
+}
+
+static void
+xlog_grant_add_space_reserve(struct log *log, int bytes)
+{
+       log->l_grant_reserve_bytes += bytes;
+       if (log->l_grant_reserve_bytes > log->l_logsize) {
+               log->l_grant_reserve_bytes -= log->l_logsize;
+               log->l_grant_reserve_cycle++;
+       }
+}
+
+static inline void
+xlog_grant_add_space(struct log *log, int bytes)
+{
+       xlog_grant_add_space_write(log, bytes);
+       xlog_grant_add_space_reserve(log, bytes);
+}
+
+
 /*
  * NOTES:
  *
@@ -428,7 +505,7 @@ xfs_log_mount(xfs_mount_t   *mp,
                if (readonly)
                        vfsp->vfs_flag &= ~VFS_RDONLY;
 
-               error = xlog_recover(mp->m_log, readonly);
+               error = xlog_recover(mp->m_log);
 
                if (readonly)
                        vfsp->vfs_flag |= VFS_RDONLY;
@@ -1320,8 +1397,7 @@ xlog_sync(xlog_t          *log,
 
        /* move grant heads by roundoff in sync */
        s = GRANT_LOCK(log);
-       XLOG_GRANT_ADD_SPACE(log, roundoff, 'w');
-       XLOG_GRANT_ADD_SPACE(log, roundoff, 'r');
+       xlog_grant_add_space(log, roundoff);
        GRANT_UNLOCK(log, s);
 
        /* put cycle number in every block */
@@ -1515,7 +1591,6 @@ xlog_state_finish_copy(xlog_t             *log,
  * print out info relating to regions written which consume
  * the reservation
  */
-#if defined(XFS_LOG_RES_DEBUG)
 STATIC void
 xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
 {
@@ -1605,11 +1680,11 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
                        ticket->t_res_arr_sum, ticket->t_res_o_flow,
                        ticket->t_res_num_ophdrs, ophdr_spc,
                        ticket->t_res_arr_sum + 
-                         ticket->t_res_o_flow + ophdr_spc,
+                       ticket->t_res_o_flow + ophdr_spc,
                        ticket->t_res_num);
 
        for (i = 0; i < ticket->t_res_num; i++) {
-               uint r_type = ticket->t_res_arr[i].r_type; 
+               uint r_type = ticket->t_res_arr[i].r_type; 
                cmn_err(CE_WARN,
                            "region[%u]: %s - %u bytes\n",
                            i, 
@@ -1618,9 +1693,6 @@ xlog_print_tic_res(xfs_mount_t *mp, xlog_ticket_t *ticket)
                            ticket->t_res_arr[i].r_len);
        }
 }
-#else
-#define xlog_print_tic_res(mp, ticket)
-#endif
 
 /*
  * Write some region out to in-core log
@@ -2389,7 +2461,7 @@ xlog_grant_log_space(xlog_t          *log,
 
        /* something is already sleeping; insert new transaction at end */
        if (log->l_reserve_headq) {
-               XLOG_INS_TICKETQ(log->l_reserve_headq, tic);
+               xlog_ins_ticketq(&log->l_reserve_headq, tic);
                xlog_trace_loggrant(log, tic,
                                    "xlog_grant_log_space: sleep 1");
                /*
@@ -2422,7 +2494,7 @@ redo:
                                     log->l_grant_reserve_bytes);
        if (free_bytes < need_bytes) {
                if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
-                       XLOG_INS_TICKETQ(log->l_reserve_headq, tic);
+                       xlog_ins_ticketq(&log->l_reserve_headq, tic);
                xlog_trace_loggrant(log, tic,
                                    "xlog_grant_log_space: sleep 2");
                XFS_STATS_INC(xs_sleep_logspace);
@@ -2439,11 +2511,10 @@ redo:
                s = GRANT_LOCK(log);
                goto redo;
        } else if (tic->t_flags & XLOG_TIC_IN_Q)
-               XLOG_DEL_TICKETQ(log->l_reserve_headq, tic);
+               xlog_del_ticketq(&log->l_reserve_headq, tic);
 
        /* we've got enough space */
-       XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w');
-       XLOG_GRANT_ADD_SPACE(log, need_bytes, 'r');
+       xlog_grant_add_space(log, need_bytes);
 #ifdef DEBUG
        tail_lsn = log->l_tail_lsn;
        /*
@@ -2464,7 +2535,7 @@ redo:
 
  error_return:
        if (tic->t_flags & XLOG_TIC_IN_Q)
-               XLOG_DEL_TICKETQ(log->l_reserve_headq, tic);
+               xlog_del_ticketq(&log->l_reserve_headq, tic);
        xlog_trace_loggrant(log, tic, "xlog_grant_log_space: err_ret");
        /*
         * If we are failing, make sure the ticket doesn't have any
@@ -2533,7 +2604,7 @@ xlog_regrant_write_log_space(xlog_t          *log,
 
                if (ntic != log->l_write_headq) {
                        if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
-                               XLOG_INS_TICKETQ(log->l_write_headq, tic);
+                               xlog_ins_ticketq(&log->l_write_headq, tic);
 
                        xlog_trace_loggrant(log, tic,
                                    "xlog_regrant_write_log_space: sleep 1");
@@ -2565,7 +2636,7 @@ redo:
                                     log->l_grant_write_bytes);
        if (free_bytes < need_bytes) {
                if ((tic->t_flags & XLOG_TIC_IN_Q) == 0)
-                       XLOG_INS_TICKETQ(log->l_write_headq, tic);
+                       xlog_ins_ticketq(&log->l_write_headq, tic);
                XFS_STATS_INC(xs_sleep_logspace);
                sv_wait(&tic->t_sema, PINOD|PLTWAIT, &log->l_grant_lock, s);
 
@@ -2581,9 +2652,10 @@ redo:
                s = GRANT_LOCK(log);
                goto redo;
        } else if (tic->t_flags & XLOG_TIC_IN_Q)
-               XLOG_DEL_TICKETQ(log->l_write_headq, tic);
+               xlog_del_ticketq(&log->l_write_headq, tic);
 
-       XLOG_GRANT_ADD_SPACE(log, need_bytes, 'w'); /* we've got enough space */
+       /* we've got enough space */
+       xlog_grant_add_space_write(log, need_bytes);
 #ifdef DEBUG
        tail_lsn = log->l_tail_lsn;
        if (CYCLE_LSN(tail_lsn) != log->l_grant_write_cycle) {
@@ -2600,7 +2672,7 @@ redo:
 
  error_return:
        if (tic->t_flags & XLOG_TIC_IN_Q)
-               XLOG_DEL_TICKETQ(log->l_reserve_headq, tic);
+               xlog_del_ticketq(&log->l_reserve_headq, tic);
        xlog_trace_loggrant(log, tic, "xlog_regrant_write_log_space: err_ret");
        /*
         * If we are failing, make sure the ticket doesn't have any
@@ -2633,8 +2705,7 @@ xlog_regrant_reserve_log_space(xlog_t          *log,
                ticket->t_cnt--;
 
        s = GRANT_LOCK(log);
-       XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w');
-       XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r');
+       xlog_grant_sub_space(log, ticket->t_curr_res);
        ticket->t_curr_res = ticket->t_unit_res;
        XLOG_TIC_RESET_RES(ticket);
        xlog_trace_loggrant(log, ticket,
@@ -2647,7 +2718,7 @@ xlog_regrant_reserve_log_space(xlog_t          *log,
                return;
        }
 
-       XLOG_GRANT_ADD_SPACE(log, ticket->t_unit_res, 'r');
+       xlog_grant_add_space_reserve(log, ticket->t_unit_res);
        xlog_trace_loggrant(log, ticket,
                            "xlog_regrant_reserve_log_space: exit");
        xlog_verify_grant_head(log, 0);
@@ -2683,8 +2754,7 @@ xlog_ungrant_log_space(xlog_t          *log,
        s = GRANT_LOCK(log);
        xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: enter");
 
-       XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'w');
-       XLOG_GRANT_SUB_SPACE(log, ticket->t_curr_res, 'r');
+       xlog_grant_sub_space(log, ticket->t_curr_res);
 
        xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: sub current");
 
@@ -2693,8 +2763,7 @@ xlog_ungrant_log_space(xlog_t          *log,
         */
        if (ticket->t_cnt > 0) {
                ASSERT(ticket->t_flags & XLOG_TIC_PERM_RESERV);
-               XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'w');
-               XLOG_GRANT_SUB_SPACE(log, ticket->t_unit_res*ticket->t_cnt,'r');
+               xlog_grant_sub_space(log, ticket->t_unit_res*ticket->t_cnt);
        }
 
        xlog_trace_loggrant(log, ticket, "xlog_ungrant_log_space: exit");
index f40d4391fcfcc0f89cdd2a5f658906f88b5fbca4..4b2ac88dbb8320504e1f3149623fa46dc9e5c0c5 100644 (file)
@@ -96,7 +96,6 @@ static inline xfs_lsn_t       _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
 
 
 /* Region types for iovec's i_type */
-#if defined(XFS_LOG_RES_DEBUG)
 #define XLOG_REG_TYPE_BFORMAT          1
 #define XLOG_REG_TYPE_BCHUNK           2
 #define XLOG_REG_TYPE_EFI_FORMAT       3
@@ -117,21 +116,13 @@ static inline xfs_lsn_t   _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
 #define XLOG_REG_TYPE_COMMIT           18
 #define XLOG_REG_TYPE_TRANSHDR         19
 #define XLOG_REG_TYPE_MAX              19
-#endif
 
-#if defined(XFS_LOG_RES_DEBUG)
 #define XLOG_VEC_SET_TYPE(vecp, t) ((vecp)->i_type = (t))
-#else
-#define XLOG_VEC_SET_TYPE(vecp, t)
-#endif
-
 
 typedef struct xfs_log_iovec {
        xfs_caddr_t             i_addr;         /* beginning address of region */
        int             i_len;          /* length in bytes of region */
-#if defined(XFS_LOG_RES_DEBUG)
-       uint            i_type;         /* type of region */
-#endif
+       uint            i_type;         /* type of region */
 } xfs_log_iovec_t;
 
 typedef void* xfs_log_ticket_t;
index 4518b188ade69d5e1ff44f850f2a2a5e0e9fd768..34bcbf50789c1e614e474427341cdcd6889e7efa 100644 (file)
@@ -253,7 +253,6 @@ typedef __uint32_t xlog_tid_t;
 
 
 /* Ticket reservation region accounting */ 
-#if defined(XFS_LOG_RES_DEBUG)
 #define XLOG_TIC_LEN_MAX       15
 #define XLOG_TIC_RESET_RES(t) ((t)->t_res_num = \
                                (t)->t_res_arr_sum = (t)->t_res_num_ophdrs = 0)
@@ -278,15 +277,9 @@ typedef __uint32_t xlog_tid_t;
  * we don't care about.
  */
 typedef struct xlog_res {
-       uint    r_len;
-       uint    r_type;
+       uint    r_len;  /* region length                :4 */
+       uint    r_type; /* region's transaction type    :4 */
 } xlog_res_t;
-#else
-#define XLOG_TIC_RESET_RES(t)
-#define XLOG_TIC_ADD_OPHDR(t)
-#define XLOG_TIC_ADD_REGION(t, len, type)
-#endif
-
 
 typedef struct xlog_ticket {
        sv_t               t_sema;       /* sleep on this semaphore      : 20 */
@@ -301,14 +294,12 @@ typedef struct xlog_ticket {
        char               t_flags;      /* properties of reservation    : 1  */
        uint               t_trans_type; /* transaction type             : 4  */
 
-#if defined (XFS_LOG_RES_DEBUG)
         /* reservation array fields */
        uint               t_res_num;                    /* num in array : 4 */
-       xlog_res_t         t_res_arr[XLOG_TIC_LEN_MAX];  /* array of res : X */ 
        uint               t_res_num_ophdrs;             /* num op hdrs  : 4 */
        uint               t_res_arr_sum;                /* array sum    : 4 */
        uint               t_res_o_flow;                 /* sum overflow : 4 */
-#endif
+       xlog_res_t         t_res_arr[XLOG_TIC_LEN_MAX];  /* array of res : 8 * 15 */ 
 } xlog_ticket_t;
 
 #endif
@@ -494,71 +485,13 @@ typedef struct log {
 
 #define XLOG_FORCED_SHUTDOWN(log)      ((log)->l_flags & XLOG_IO_ERROR)
 
-#define XLOG_GRANT_SUB_SPACE(log,bytes,type)                           \
-    {                                                                  \
-       if (type == 'w') {                                              \
-               (log)->l_grant_write_bytes -= (bytes);                  \
-               if ((log)->l_grant_write_bytes < 0) {                   \
-                       (log)->l_grant_write_bytes += (log)->l_logsize; \
-                       (log)->l_grant_write_cycle--;                   \
-               }                                                       \
-       } else {                                                        \
-               (log)->l_grant_reserve_bytes -= (bytes);                \
-               if ((log)->l_grant_reserve_bytes < 0) {                 \
-                       (log)->l_grant_reserve_bytes += (log)->l_logsize;\
-                       (log)->l_grant_reserve_cycle--;                 \
-               }                                                       \
-        }                                                              \
-    }
-#define XLOG_GRANT_ADD_SPACE(log,bytes,type)                           \
-    {                                                                  \
-       if (type == 'w') {                                              \
-               (log)->l_grant_write_bytes += (bytes);                  \
-               if ((log)->l_grant_write_bytes > (log)->l_logsize) {    \
-                       (log)->l_grant_write_bytes -= (log)->l_logsize; \
-                       (log)->l_grant_write_cycle++;                   \
-               }                                                       \
-       } else {                                                        \
-               (log)->l_grant_reserve_bytes += (bytes);                \
-               if ((log)->l_grant_reserve_bytes > (log)->l_logsize) {  \
-                       (log)->l_grant_reserve_bytes -= (log)->l_logsize;\
-                       (log)->l_grant_reserve_cycle++;                 \
-               }                                                       \
-        }                                                              \
-    }
-#define XLOG_INS_TICKETQ(q, tic)                       \
-    {                                                  \
-       if (q) {                                        \
-               (tic)->t_next       = (q);              \
-               (tic)->t_prev       = (q)->t_prev;      \
-               (q)->t_prev->t_next = (tic);            \
-               (q)->t_prev         = (tic);            \
-       } else {                                        \
-               (tic)->t_prev = (tic)->t_next = (tic);  \
-               (q) = (tic);                            \
-       }                                               \
-       (tic)->t_flags |= XLOG_TIC_IN_Q;                \
-    }
-#define XLOG_DEL_TICKETQ(q, tic)                       \
-    {                                                  \
-       if ((tic) == (tic)->t_next) {                   \
-               (q) = NULL;                             \
-       } else {                                        \
-               (q) = (tic)->t_next;                    \
-               (tic)->t_next->t_prev = (tic)->t_prev;  \
-               (tic)->t_prev->t_next = (tic)->t_next;  \
-       }                                               \
-       (tic)->t_next = (tic)->t_prev = NULL;           \
-       (tic)->t_flags &= ~XLOG_TIC_IN_Q;               \
-    }
 
 /* common routines */
 extern xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
 extern int      xlog_find_tail(xlog_t  *log,
                                xfs_daddr_t *head_blk,
-                               xfs_daddr_t *tail_blk,
-                               int readonly);
-extern int      xlog_recover(xlog_t *log, int readonly);
+                               xfs_daddr_t *tail_blk);
+extern int      xlog_recover(xlog_t *log);
 extern int      xlog_recover_finish(xlog_t *log, int mfsi_flags);
 extern void     xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
 extern void     xlog_recover_process_iunlinks(xlog_t *log);
index 8ab7df768063deb4824ee52e7e4d55ff0ceb113f..7d46cbd6a07ad448588c4fcc50c8695cc9008a66 100644 (file)
@@ -783,8 +783,7 @@ int
 xlog_find_tail(
        xlog_t                  *log,
        xfs_daddr_t             *head_blk,
-       xfs_daddr_t             *tail_blk,
-       int                     readonly)
+       xfs_daddr_t             *tail_blk)
 {
        xlog_rec_header_t       *rhead;
        xlog_op_header_t        *op_head;
@@ -2563,10 +2562,12 @@ xlog_recover_do_quotaoff_trans(
 
        /*
         * The logitem format's flag tells us if this was user quotaoff,
-        * group quotaoff or both.
+        * group/project quotaoff or both.
         */
        if (qoff_f->qf_flags & XFS_UQUOTA_ACCT)
                log->l_quotaoffs_flag |= XFS_DQ_USER;
+       if (qoff_f->qf_flags & XFS_PQUOTA_ACCT)
+               log->l_quotaoffs_flag |= XFS_DQ_PROJ;
        if (qoff_f->qf_flags & XFS_GQUOTA_ACCT)
                log->l_quotaoffs_flag |= XFS_DQ_GROUP;
 
@@ -3890,14 +3891,13 @@ xlog_do_recover(
  */
 int
 xlog_recover(
-       xlog_t          *log,
-       int             readonly)
+       xlog_t          *log)
 {
        xfs_daddr_t     head_blk, tail_blk;
        int             error;
 
        /* find the tail of the log */
-       if ((error = xlog_find_tail(log, &head_blk, &tail_blk, readonly)))
+       if ((error = xlog_find_tail(log, &head_blk, &tail_blk)))
                return error;
 
        if (tail_blk != head_blk) {
index 303af86739bf724956d8f15cc55652b9acefebdf..6088e14f84e3418d573eaf9147c7c82224335382 100644 (file)
@@ -51,7 +51,7 @@ STATIC int    xfs_uuid_mount(xfs_mount_t *);
 STATIC void    xfs_uuid_unmount(xfs_mount_t *mp);
 STATIC void    xfs_unmountfs_wait(xfs_mount_t *);
 
-static struct {
+static const struct {
     short offset;
     short type;     /* 0 = integer
                * 1 = binary / string (no translation)
@@ -1077,8 +1077,7 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
 
        xfs_iflush_all(mp);
 
-       XFS_QM_DQPURGEALL(mp,
-               XFS_QMOPT_UQUOTA | XFS_QMOPT_GQUOTA | XFS_QMOPT_UMOUNTING);
+       XFS_QM_DQPURGEALL(mp, XFS_QMOPT_QUOTALL | XFS_QMOPT_UMOUNTING);
 
        /*
         * Flush out the log synchronously so that we know for sure
index 3432fd5a3986745ebee217f9ba84a0797853318f..cd3cf9613a0069be1569cb70242048927f78b021 100644 (file)
@@ -308,7 +308,6 @@ typedef struct xfs_mount {
        xfs_buftarg_t           *m_ddev_targp;  /* saves taking the address */
        xfs_buftarg_t           *m_logdev_targp;/* ptr to log device */
        xfs_buftarg_t           *m_rtdev_targp; /* ptr to rt device */
-#define m_dev          m_ddev_targp->pbr_dev
        __uint8_t               m_dircook_elog; /* log d-cookie entry bits */
        __uint8_t               m_blkbit_log;   /* blocklog + NBBY */
        __uint8_t               m_blkbb_log;    /* blocklog - BBSHIFT */
@@ -393,7 +392,7 @@ typedef struct xfs_mount {
                                                   user */
 #define XFS_MOUNT_NOALIGN      (1ULL << 7)     /* turn off stripe alignment
                                                   allocations */
-#define XFS_MOUNT_COMPAT_ATTR  (1ULL << 8)     /* do not use attr2 format */
+#define XFS_MOUNT_ATTR2                (1ULL << 8)     /* allow use of attr2 format */
                             /* (1ULL << 9)     -- currently unused */
 #define XFS_MOUNT_NORECOVERY   (1ULL << 10)    /* no recovery - dirty fs */
 #define XFS_MOUNT_SHARED       (1ULL << 11)    /* shared mount */
index 4d4e8f4e768e09640b48dc3f843b82414448b5a1..81a05cfd77d2b5f60fa2429366ae07af19ca4843 100644 (file)
@@ -243,7 +243,6 @@ xfs_rename(
        xfs_inode_t     *inodes[4];
        int             target_ip_dropped = 0;  /* dropped target_ip link? */
        vnode_t         *src_dir_vp;
-       bhv_desc_t      *target_dir_bdp;
        int             spaceres;
        int             target_link_zero = 0;
        int             num_inodes;
@@ -260,14 +259,12 @@ xfs_rename(
         * Find the XFS behavior descriptor for the target directory
         * vnode since it was not handed to us.
         */
-       target_dir_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(target_dir_vp),
-                                               &xfs_vnodeops);
-       if (target_dir_bdp == NULL) {
+       target_dp = xfs_vtoi(target_dir_vp);
+       if (target_dp == NULL) {
                return XFS_ERROR(EXDEV);
        }
 
        src_dp = XFS_BHVTOI(src_dir_bdp);
-       target_dp = XFS_BHVTOI(target_dir_bdp);
        mp = src_dp->i_mount;
 
        if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_RENAME) ||
index c4b20872f07ddea19bf44f8b46c42b72c7980926..a59c102cf214b08e1bd47493439159ad59dd4b6b 100644 (file)
@@ -238,6 +238,7 @@ xfs_bioerror_relse(
        }
        return (EIO);
 }
+
 /*
  * Prints out an ALERT message about I/O error.
  */
@@ -252,11 +253,9 @@ xfs_ioerror_alert(
  "I/O error in filesystem (\"%s\") meta-data dev %s block 0x%llx"
  "       (\"%s\") error %d buf count %zd",
                (!mp || !mp->m_fsname) ? "(fs name not set)" : mp->m_fsname,
-               XFS_BUFTARG_NAME(bp->pb_target),
-               (__uint64_t)blkno,
-               func,
-               XFS_BUF_GETERROR(bp),
-               XFS_BUF_COUNT(bp));
+               XFS_BUFTARG_NAME(XFS_BUF_TARGET(bp)),
+               (__uint64_t)blkno, func,
+               XFS_BUF_GETERROR(bp), XFS_BUF_COUNT(bp));
 }
 
 /*
index 4a17d335f897684abe59bba281b1742dd1288571..bf168a91ddb83ffd4e91f7456fe83fd5bf937e8f 100644 (file)
@@ -68,18 +68,6 @@ struct xfs_mount;
        (XFS_SB_VERSION_NUMBITS | \
         XFS_SB_VERSION_OKREALFBITS | \
         XFS_SB_VERSION_OKSASHFBITS)
-#define XFS_SB_VERSION_MKFS(ia,dia,extflag,dirv2,na,sflag,morebits)    \
-       (((ia) || (dia) || (extflag) || (dirv2) || (na) || (sflag) || \
-         (morebits)) ? \
-               (XFS_SB_VERSION_4 | \
-                ((ia) ? XFS_SB_VERSION_ALIGNBIT : 0) | \
-                ((dia) ? XFS_SB_VERSION_DALIGNBIT : 0) | \
-                ((extflag) ? XFS_SB_VERSION_EXTFLGBIT : 0) | \
-                ((dirv2) ? XFS_SB_VERSION_DIRV2BIT : 0) | \
-                ((na) ? XFS_SB_VERSION_LOGV2BIT : 0) | \
-                ((sflag) ? XFS_SB_VERSION_SECTORBIT : 0) | \
-                ((morebits) ? XFS_SB_VERSION_MOREBITSBIT : 0)) : \
-               XFS_SB_VERSION_1)
 
 /*
  * There are two words to hold XFS "feature" bits: the original
@@ -105,11 +93,6 @@ struct xfs_mount;
        (XFS_SB_VERSION2_OKREALFBITS |  \
         XFS_SB_VERSION2_OKSASHFBITS )
 
-/*
- * mkfs macro to set up sb_features2 word
- */
-#define        XFS_SB_VERSION2_MKFS(resvd1, sbcntr)    0
-
 typedef struct xfs_sb
 {
        __uint32_t      sb_magicnum;    /* magic number == XFS_SB_MAGIC */
index 279e043d73239ace1ac9951ed6accaf71ba4abb8..d3d714e6b32a6ff5fc2b800229806ed385aa3a05 100644 (file)
@@ -1014,6 +1014,7 @@ xfs_trans_cancel(
        xfs_log_item_t          *lip;
        int                     i;
 #endif
+       xfs_mount_t             *mp = tp->t_mountp;
 
        /*
         * See if the caller is being too lazy to figure out if
@@ -1026,9 +1027,10 @@ xfs_trans_cancel(
         * filesystem.  This happens in paths where we detect
         * corruption and decide to give up.
         */
-       if ((tp->t_flags & XFS_TRANS_DIRTY) &&
-           !XFS_FORCED_SHUTDOWN(tp->t_mountp))
-               xfs_force_shutdown(tp->t_mountp, XFS_CORRUPT_INCORE);
+       if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) {
+               XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp);
+               xfs_force_shutdown(mp, XFS_CORRUPT_INCORE);
+       }
 #ifdef DEBUG
        if (!(flags & XFS_TRANS_ABORT)) {
                licp = &(tp->t_items);
@@ -1040,7 +1042,7 @@ xfs_trans_cancel(
                                }
 
                                lip = lidp->lid_item;
-                               if (!XFS_FORCED_SHUTDOWN(tp->t_mountp))
+                               if (!XFS_FORCED_SHUTDOWN(mp))
                                        ASSERT(!(lip->li_type == XFS_LI_EFD));
                        }
                        licp = licp->lic_next;
@@ -1048,7 +1050,7 @@ xfs_trans_cancel(
        }
 #endif
        xfs_trans_unreserve_and_mod_sb(tp);
-       XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(tp->t_mountp, tp);
+       XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp);
 
        if (tp->t_ticket) {
                if (flags & XFS_TRANS_RELEASE_LOG_RES) {
@@ -1057,7 +1059,7 @@ xfs_trans_cancel(
                } else {
                        log_flags = 0;
                }
-               xfs_log_done(tp->t_mountp, tp->t_ticket, NULL, log_flags);
+               xfs_log_done(mp, tp->t_ticket, NULL, log_flags);
        }
 
        /* mark this thread as no longer being in a transaction */
index a889963fdd142297b7a8f9879e04eb79a6403168..d77901c07f6339e5ece08a21f8ef736e3d68337d 100644 (file)
@@ -973,7 +973,6 @@ void                xfs_trans_bhold(xfs_trans_t *, struct xfs_buf *);
 void           xfs_trans_bhold_release(xfs_trans_t *, struct xfs_buf *);
 void           xfs_trans_binval(xfs_trans_t *, struct xfs_buf *);
 void           xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
-void           xfs_trans_inode_buf(xfs_trans_t *, struct xfs_buf *);
 void           xfs_trans_stale_inode_buf(xfs_trans_t *, struct xfs_buf *);
 void           xfs_trans_dquot_buf(xfs_trans_t *, struct xfs_buf *, uint);
 void           xfs_trans_inode_alloc_buf(xfs_trans_t *, struct xfs_buf *);
index fefe1d60377fc825eecbf688ede1e2e6a4fce4ed..34654ec6ae106e45a2d508556cb09e70b18a40dd 100644 (file)
@@ -55,16 +55,13 @@ xfs_get_dir_entry(
        xfs_inode_t     **ipp)
 {
        vnode_t         *vp;
-       bhv_desc_t      *bdp;
 
        vp = VNAME_TO_VNODE(dentry);
-       bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(vp), &xfs_vnodeops);
-       if (!bdp) {
-               *ipp = NULL;
+
+       *ipp = xfs_vtoi(vp);
+       if (!*ipp)
                return XFS_ERROR(ENOENT);
-       }
        VN_HOLD(vp);
-       *ipp = XFS_BHVTOI(bdp);
        return 0;
 }
 
index 7bdbd991ab1cdd63300cc1eb7897b469d1a11abf..b6ad370fab3d839d7ff792ca9c03a33bf17a8b3f 100644 (file)
@@ -53,6 +53,7 @@
 #include "xfs_acl.h"
 #include "xfs_attr.h"
 #include "xfs_clnt.h"
+#include "xfs_fsops.h"
 
 STATIC int xfs_sync(bhv_desc_t *, int, cred_t *);
 
@@ -290,8 +291,8 @@ xfs_start_flags(
                mp->m_flags |= XFS_MOUNT_IDELETE;
        if (ap->flags & XFSMNT_DIRSYNC)
                mp->m_flags |= XFS_MOUNT_DIRSYNC;
-       if (ap->flags & XFSMNT_COMPAT_ATTR)
-               mp->m_flags |= XFS_MOUNT_COMPAT_ATTR;
+       if (ap->flags & XFSMNT_ATTR2)
+               mp->m_flags |= XFS_MOUNT_ATTR2;
 
        if (ap->flags2 & XFSMNT2_COMPAT_IOSIZE)
                mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
@@ -312,6 +313,8 @@ xfs_start_flags(
                mp->m_flags |= XFS_MOUNT_NOUUID;
        if (ap->flags & XFSMNT_BARRIER)
                mp->m_flags |= XFS_MOUNT_BARRIER;
+       else
+               mp->m_flags &= ~XFS_MOUNT_BARRIER;
 
        return 0;
 }
@@ -330,10 +333,11 @@ xfs_finish_flags(
 
        /* Fail a mount where the logbuf is smaller then the log stripe */
        if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) {
-               if ((ap->logbufsize == -1) &&
+               if ((ap->logbufsize <= 0) &&
                    (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) {
                        mp->m_logbsize = mp->m_sb.sb_logsunit;
-               } else if (ap->logbufsize < mp->m_sb.sb_logsunit) {
+               } else if (ap->logbufsize > 0 &&
+                          ap->logbufsize < mp->m_sb.sb_logsunit) {
                        cmn_err(CE_WARN,
        "XFS: logbuf size must be greater than or equal to log stripe size");
                        return XFS_ERROR(EINVAL);
@@ -347,6 +351,10 @@ xfs_finish_flags(
                }
        }
 
+       if (XFS_SB_VERSION_HASATTR2(&mp->m_sb)) {
+               mp->m_flags |= XFS_MOUNT_ATTR2;
+       }
+
        /*
         * prohibit r/w mounts of read-only filesystems
         */
@@ -382,10 +390,6 @@ xfs_finish_flags(
                        return XFS_ERROR(EINVAL);
        }
 
-       if (XFS_SB_VERSION_HASATTR2(&mp->m_sb)) {
-               mp->m_flags &= ~XFS_MOUNT_COMPAT_ATTR;
-       }
-
        return 0;
 }
 
@@ -504,13 +508,13 @@ xfs_mount(
        if (error)
                goto error2;
 
+       if ((mp->m_flags & XFS_MOUNT_BARRIER) && !(vfsp->vfs_flag & VFS_RDONLY))
+               xfs_mountfs_check_barriers(mp);
+
        error = XFS_IOINIT(vfsp, args, flags);
        if (error)
                goto error2;
 
-       if ((args->flags & XFSMNT_BARRIER) &&
-           !(XFS_MTOVFS(mp)->vfs_flag & VFS_RDONLY))
-               xfs_mountfs_check_barriers(mp);
        return 0;
 
 error2:
@@ -655,6 +659,11 @@ xfs_mntupdate(
        else
                mp->m_flags &= ~XFS_MOUNT_NOATIME;
 
+       if (args->flags & XFSMNT_BARRIER)
+               mp->m_flags |= XFS_MOUNT_BARRIER;
+       else
+               mp->m_flags &= ~XFS_MOUNT_BARRIER;
+
        if ((vfsp->vfs_flag & VFS_RDONLY) &&
            !(*flags & MS_RDONLY)) {
                vfsp->vfs_flag &= ~VFS_RDONLY;
@@ -1634,6 +1643,7 @@ xfs_vget(
 #define MNTOPT_NORECOVERY   "norecovery"   /* don't run XFS recovery */
 #define MNTOPT_BARRIER "barrier"       /* use writer barriers for log write and
                                         * unwritten extent conversion */
+#define MNTOPT_NOBARRIER "nobarrier"   /* .. disable */
 #define MNTOPT_OSYNCISOSYNC "osyncisosync" /* o_sync is REALLY o_sync */
 #define MNTOPT_64BITINODE   "inode64"  /* inodes can be allocated anywhere */
 #define MNTOPT_IKEEP   "ikeep"         /* do not free empty inode clusters */
@@ -1680,7 +1690,6 @@ xfs_parseargs(
        int                     iosize;
 
        args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
-       args->flags |= XFSMNT_COMPAT_ATTR;
 
 #if 0  /* XXX: off by default, until some remaining issues ironed out */
        args->flags |= XFSMNT_IDELETE; /* default to on */
@@ -1806,6 +1815,8 @@ xfs_parseargs(
                        args->flags |= XFSMNT_NOUUID;
                } else if (!strcmp(this_char, MNTOPT_BARRIER)) {
                        args->flags |= XFSMNT_BARRIER;
+               } else if (!strcmp(this_char, MNTOPT_NOBARRIER)) {
+                       args->flags &= ~XFSMNT_BARRIER;
                } else if (!strcmp(this_char, MNTOPT_IKEEP)) {
                        args->flags &= ~XFSMNT_IDELETE;
                } else if (!strcmp(this_char, MNTOPT_NOIKEEP)) {
@@ -1815,9 +1826,9 @@ xfs_parseargs(
                } else if (!strcmp(this_char, MNTOPT_NOLARGEIO)) {
                        args->flags2 |= XFSMNT2_COMPAT_IOSIZE;
                } else if (!strcmp(this_char, MNTOPT_ATTR2)) {
-                       args->flags &= ~XFSMNT_COMPAT_ATTR;
+                       args->flags |= XFSMNT_ATTR2;
                } else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
-                       args->flags |= XFSMNT_COMPAT_ATTR;
+                       args->flags &= ~XFSMNT_ATTR2;
                } else if (!strcmp(this_char, "osyncisdsync")) {
                        /* no-op, this is now the default */
 printk("XFS: osyncisdsync is now the default, option is deprecated.\n");
@@ -1892,7 +1903,6 @@ xfs_showargs(
                { XFS_MOUNT_NOUUID,             "," MNTOPT_NOUUID },
                { XFS_MOUNT_NORECOVERY,         "," MNTOPT_NORECOVERY },
                { XFS_MOUNT_OSYNCISOSYNC,       "," MNTOPT_OSYNCISOSYNC },
-               { XFS_MOUNT_BARRIER,            "," MNTOPT_BARRIER },
                { XFS_MOUNT_IDELETE,            "," MNTOPT_NOIKEEP },
                { 0, NULL }
        };
@@ -1914,33 +1924,28 @@ xfs_showargs(
 
        if (mp->m_logbufs > 0)
                seq_printf(m, "," MNTOPT_LOGBUFS "=%d", mp->m_logbufs);
-
        if (mp->m_logbsize > 0)
                seq_printf(m, "," MNTOPT_LOGBSIZE "=%dk", mp->m_logbsize >> 10);
 
        if (mp->m_logname)
                seq_printf(m, "," MNTOPT_LOGDEV "=%s", mp->m_logname);
-
        if (mp->m_rtname)
                seq_printf(m, "," MNTOPT_RTDEV "=%s", mp->m_rtname);
 
        if (mp->m_dalign > 0)
                seq_printf(m, "," MNTOPT_SUNIT "=%d",
                                (int)XFS_FSB_TO_BB(mp, mp->m_dalign));
-
        if (mp->m_swidth > 0)
                seq_printf(m, "," MNTOPT_SWIDTH "=%d",
                                (int)XFS_FSB_TO_BB(mp, mp->m_swidth));
 
-       if (!(mp->m_flags & XFS_MOUNT_COMPAT_ATTR))
-               seq_printf(m, "," MNTOPT_ATTR2);
-
        if (!(mp->m_flags & XFS_MOUNT_COMPAT_IOSIZE))
                seq_printf(m, "," MNTOPT_LARGEIO);
+       if (mp->m_flags & XFS_MOUNT_BARRIER)
+               seq_printf(m, "," MNTOPT_BARRIER);
 
        if (!(vfsp->vfs_flag & VFS_32BITINODES))
                seq_printf(m, "," MNTOPT_64BITINODE);
-
        if (vfsp->vfs_flag & VFS_GRPID)
                seq_printf(m, "," MNTOPT_GRPID);
 
@@ -1959,6 +1964,7 @@ xfs_freeze(
        /* Push the superblock and write an unmount record */
        xfs_log_unmount_write(mp);
        xfs_unmountfs_writesb(mp);
+       xfs_fs_log_dummy(mp);
 }
 
 
index e92cacde02f5966b8e289806ea829dbea1008d91..8076cc981e11f60cf011df7870066e1d5cd1bd3b 100644 (file)
@@ -185,8 +185,7 @@ xfs_getattr(
                break;
        }
 
-       vap->va_atime.tv_sec = ip->i_d.di_atime.t_sec;
-       vap->va_atime.tv_nsec = ip->i_d.di_atime.t_nsec;
+       vn_atime_to_timespec(vp, &vap->va_atime);
        vap->va_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
        vap->va_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
        vap->va_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
@@ -543,24 +542,6 @@ xfs_setattr(
                        goto error_return;
                }
 
-               /*
-                * Can't set extent size unless the file is marked, or
-                * about to be marked as a realtime file.
-                *
-                * This check will be removed when fixed size extents
-                * with buffered data writes is implemented.
-                *
-                */
-               if ((mask & XFS_AT_EXTSIZE)                     &&
-                   ((ip->i_d.di_extsize << mp->m_sb.sb_blocklog) !=
-                    vap->va_extsize) &&
-                   (!((ip->i_d.di_flags & XFS_DIFLAG_REALTIME) ||
-                      ((mask & XFS_AT_XFLAGS) &&
-                       (vap->va_xflags & XFS_XFLAG_REALTIME))))) {
-                       code = XFS_ERROR(EINVAL);
-                       goto error_return;
-               }
-
                /*
                 * Can't change realtime flag if any extents are allocated.
                 */
@@ -823,13 +804,17 @@ xfs_setattr(
                                        di_flags |= XFS_DIFLAG_RTINHERIT;
                                if (vap->va_xflags & XFS_XFLAG_NOSYMLINKS)
                                        di_flags |= XFS_DIFLAG_NOSYMLINKS;
-                       } else {
+                               if (vap->va_xflags & XFS_XFLAG_EXTSZINHERIT)
+                                       di_flags |= XFS_DIFLAG_EXTSZINHERIT;
+                       } else if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
                                if (vap->va_xflags & XFS_XFLAG_REALTIME) {
                                        di_flags |= XFS_DIFLAG_REALTIME;
                                        ip->i_iocore.io_flags |= XFS_IOCORE_RT;
                                } else {
                                        ip->i_iocore.io_flags &= ~XFS_IOCORE_RT;
                                }
+                               if (vap->va_xflags & XFS_XFLAG_EXTSIZE)
+                                       di_flags |= XFS_DIFLAG_EXTSIZE;
                        }
                        ip->i_d.di_flags = di_flags;
                }
@@ -999,10 +984,6 @@ xfs_readlink(
                goto error_return;
        }
 
-       if (!(ioflags & IO_INVIS)) {
-               xfs_ichgtime(ip, XFS_ICHGTIME_ACC);
-       }
-
        /*
         * See if the symlink is stored inline.
         */
@@ -1234,7 +1215,8 @@ xfs_inactive_free_eofblocks(
        xfs_iunlock(ip, XFS_ILOCK_SHARED);
 
        if (!error && (nimaps != 0) &&
-           (imap.br_startblock != HOLESTARTBLOCK)) {
+           (imap.br_startblock != HOLESTARTBLOCK ||
+            ip->i_delayed_blks)) {
                /*
                 * Attach the dquots to the inode up front.
                 */
@@ -1569,9 +1551,11 @@ xfs_release(
 
        if (ip->i_d.di_nlink != 0) {
                if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
-                    ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) &&
+                    ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 ||
+                      ip->i_delayed_blks > 0)) &&
                     (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
-                   (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)))) {
+                   (!(ip->i_d.di_flags &
+                               (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
                        if ((error = xfs_inactive_free_eofblocks(mp, ip)))
                                return (error);
                        /* Update linux inode block count after free above */
@@ -1628,7 +1612,8 @@ xfs_inactive(
         * only one with a reference to the inode.
         */
        truncate = ((ip->i_d.di_nlink == 0) &&
-           ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0)) &&
+            ((ip->i_d.di_size != 0) || (ip->i_d.di_nextents > 0) ||
+             (ip->i_delayed_blks > 0)) &&
            ((ip->i_d.di_mode & S_IFMT) == S_IFREG));
 
        mp = ip->i_mount;
@@ -1646,10 +1631,12 @@ xfs_inactive(
 
        if (ip->i_d.di_nlink != 0) {
                if ((((ip->i_d.di_mode & S_IFMT) == S_IFREG) &&
-                    ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0)) &&
-                    (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
-                   (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)) ||
-                    (ip->i_delayed_blks != 0))) {
+                     ((ip->i_d.di_size > 0) || (VN_CACHED(vp) > 0 ||
+                       ip->i_delayed_blks > 0)) &&
+                     (ip->i_df.if_flags & XFS_IFEXTENTS) &&
+                    (!(ip->i_d.di_flags &
+                               (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
+                     (ip->i_delayed_blks != 0)))) {
                        if ((error = xfs_inactive_free_eofblocks(mp, ip)))
                                return (VN_INACTIVE_CACHE);
                        /* Update linux inode block count after free above */
@@ -2593,7 +2580,6 @@ xfs_link(
        int                     cancel_flags;
        int                     committed;
        vnode_t                 *target_dir_vp;
-       bhv_desc_t              *src_bdp;
        int                     resblks;
        char                    *target_name = VNAME(dentry);
        int                     target_namelen;
@@ -2606,8 +2592,7 @@ xfs_link(
        if (VN_ISDIR(src_vp))
                return XFS_ERROR(EPERM);
 
-       src_bdp = vn_bhv_lookup_unlocked(VN_BHV_HEAD(src_vp), &xfs_vnodeops);
-       sip = XFS_BHVTOI(src_bdp);
+       sip = xfs_vtoi(src_vp);
        tdp = XFS_BHVTOI(target_dir_bdp);
        mp = tdp->i_mount;
        if (XFS_FORCED_SHUTDOWN(mp))
@@ -3240,7 +3225,6 @@ xfs_readdir(
        xfs_trans_t     *tp = NULL;
        int             error = 0;
        uint            lock_mode;
-       xfs_off_t       start_offset;
 
        vn_trace_entry(BHV_TO_VNODE(dir_bdp), __FUNCTION__,
                                               (inst_t *)__return_address);
@@ -3251,11 +3235,7 @@ xfs_readdir(
        }
 
        lock_mode = xfs_ilock_map_shared(dp);
-       start_offset = uiop->uio_offset;
        error = XFS_DIR_GETDENTS(dp->i_mount, tp, dp, uiop, eofp);
-       if (start_offset != uiop->uio_offset) {
-               xfs_ichgtime(dp, XFS_ICHGTIME_ACC);
-       }
        xfs_iunlock_map_shared(dp, lock_mode);
        return error;
 }
@@ -3832,7 +3812,12 @@ xfs_reclaim(
        vn_iowait(vp);
 
        ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || ip->i_delayed_blks == 0);
-       ASSERT(VN_CACHED(vp) == 0);
+
+       /*
+        * Make sure the atime in the XFS inode is correct before freeing the
+        * Linux inode.
+        */
+       xfs_synchronize_atime(ip);
 
        /* If we have nothing to flush with this inode then complete the
         * teardown now, otherwise break the link between the xfs inode
@@ -4002,42 +3987,36 @@ xfs_alloc_file_space(
        int                     alloc_type,
        int                     attr_flags)
 {
+       xfs_mount_t             *mp = ip->i_mount;
+       xfs_off_t               count;
        xfs_filblks_t           allocated_fsb;
        xfs_filblks_t           allocatesize_fsb;
-       int                     committed;
-       xfs_off_t               count;
-       xfs_filblks_t           datablocks;
-       int                     error;
+       xfs_extlen_t            extsz, temp;
+       xfs_fileoff_t           startoffset_fsb;
        xfs_fsblock_t           firstfsb;
-       xfs_bmap_free_t         free_list;
-       xfs_bmbt_irec_t         *imapp;
-       xfs_bmbt_irec_t         imaps[1];
-       xfs_mount_t             *mp;
-       int                     numrtextents;
-       int                     reccount;
-       uint                    resblks;
+       int                     nimaps;
+       int                     bmapi_flag;
+       int                     quota_flag;
        int                     rt;
-       int                     rtextsize;
-       xfs_fileoff_t           startoffset_fsb;
        xfs_trans_t             *tp;
-       int                     xfs_bmapi_flags;
+       xfs_bmbt_irec_t         imaps[1], *imapp;
+       xfs_bmap_free_t         free_list;
+       uint                    qblocks, resblks, resrtextents;
+       int                     committed;
+       int                     error;
 
        vn_trace_entry(XFS_ITOV(ip), __FUNCTION__, (inst_t *)__return_address);
-       mp = ip->i_mount;
 
        if (XFS_FORCED_SHUTDOWN(mp))
                return XFS_ERROR(EIO);
 
-       /*
-        * determine if this is a realtime file
-        */
-       if ((rt = XFS_IS_REALTIME_INODE(ip)) != 0) {
-               if (ip->i_d.di_extsize)
-                       rtextsize = ip->i_d.di_extsize;
-               else
-                       rtextsize = mp->m_sb.sb_rextsize;
-       } else
-               rtextsize = 0;
+       rt = XFS_IS_REALTIME_INODE(ip);
+       if (unlikely(rt)) {
+               if (!(extsz = ip->i_d.di_extsize))
+                       extsz = mp->m_sb.sb_rextsize;
+       } else {
+               extsz = ip->i_d.di_extsize;
+       }
 
        if ((error = XFS_QM_DQATTACH(mp, ip, 0)))
                return error;
@@ -4048,8 +4027,8 @@ xfs_alloc_file_space(
        count = len;
        error = 0;
        imapp = &imaps[0];
-       reccount = 1;
-       xfs_bmapi_flags = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0);
+       nimaps = 1;
+       bmapi_flag = XFS_BMAPI_WRITE | (alloc_type ? XFS_BMAPI_PREALLOC : 0);
        startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
        allocatesize_fsb = XFS_B_TO_FSB(mp, count);
 
@@ -4070,43 +4049,51 @@ xfs_alloc_file_space(
        }
 
        /*
-        * allocate file space until done or until there is an error
+        * Allocate file space until done or until there is an error
         */
 retry:
        while (allocatesize_fsb && !error) {
+               xfs_fileoff_t   s, e;
+
                /*
-                * determine if reserving space on
-                * the data or realtime partition.
+                * Determine space reservations for data/realtime.
                 */
-               if (rt) {
-                       xfs_fileoff_t s, e;
-
+               if (unlikely(extsz)) {
                        s = startoffset_fsb;
-                       do_div(s, rtextsize);
-                       s *= rtextsize;
-                       e = roundup_64(startoffset_fsb + allocatesize_fsb,
-                               rtextsize);
-                       numrtextents = (int)(e - s) / mp->m_sb.sb_rextsize;
-                       datablocks = 0;
+                       do_div(s, extsz);
+                       s *= extsz;
+                       e = startoffset_fsb + allocatesize_fsb;
+                       if ((temp = do_mod(startoffset_fsb, extsz)))
+                               e += temp;
+                       if ((temp = do_mod(e, extsz)))
+                               e += extsz - temp;
+               } else {
+                       s = 0;
+                       e = allocatesize_fsb;
+               }
+
+               if (unlikely(rt)) {
+                       resrtextents = qblocks = (uint)(e - s);
+                       resrtextents /= mp->m_sb.sb_rextsize;
+                       resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
+                       quota_flag = XFS_QMOPT_RES_RTBLKS;
                } else {
-                       datablocks = allocatesize_fsb;
-                       numrtextents = 0;
+                       resrtextents = 0;
+                       resblks = qblocks = \
+                               XFS_DIOSTRAT_SPACE_RES(mp, (uint)(e - s));
+                       quota_flag = XFS_QMOPT_RES_REGBLKS;
                }
 
                /*
-                * allocate and setup the transaction
+                * Allocate and setup the transaction.
                 */
                tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
-               resblks = XFS_DIOSTRAT_SPACE_RES(mp, datablocks);
-               error = xfs_trans_reserve(tp,
-                                         resblks,
-                                         XFS_WRITE_LOG_RES(mp),
-                                         numrtextents,
+               error = xfs_trans_reserve(tp, resblks,
+                                         XFS_WRITE_LOG_RES(mp), resrtextents,
                                          XFS_TRANS_PERM_LOG_RES,
                                          XFS_WRITE_LOG_COUNT);
-
                /*
-                * check for running out of space
+                * Check for running out of space
                 */
                if (error) {
                        /*
@@ -4117,8 +4104,8 @@ retry:
                        break;
                }
                xfs_ilock(ip, XFS_ILOCK_EXCL);
-               error = XFS_TRANS_RESERVE_QUOTA(mp, tp,
-                               ip->i_udquot, ip->i_gdquot, resblks, 0, 0);
+               error = XFS_TRANS_RESERVE_QUOTA_NBLKS(mp, tp, ip,
+                                                     qblocks, 0, quota_flag);
                if (error)
                        goto error1;
 
@@ -4126,19 +4113,19 @@ retry:
                xfs_trans_ihold(tp, ip);
 
                /*
-                * issue the bmapi() call to allocate the blocks
+                * Issue the xfs_bmapi() call to allocate the blocks
                 */
                XFS_BMAP_INIT(&free_list, &firstfsb);
                error = xfs_bmapi(tp, ip, startoffset_fsb,
-                                 allocatesize_fsb, xfs_bmapi_flags,
-                                 &firstfsb, 0, imapp, &reccount,
+                                 allocatesize_fsb, bmapi_flag,
+                                 &firstfsb, 0, imapp, &nimaps,
                                  &free_list);
                if (error) {
                        goto error0;
                }
 
                /*
-                * complete the transaction
+                * Complete the transaction
                 */
                error = xfs_bmap_finish(&tp, &free_list, firstfsb, &committed);
                if (error) {
@@ -4153,7 +4140,7 @@ retry:
 
                allocated_fsb = imapp->br_blockcount;
 
-               if (reccount == 0) {
+               if (nimaps == 0) {
                        error = XFS_ERROR(ENOSPC);
                        break;
                }
@@ -4176,9 +4163,11 @@ dmapi_enospc_check:
 
        return error;
 
- error0:
+error0:        /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
        xfs_bmap_cancel(&free_list);
- error1:
+       XFS_TRANS_UNRESERVE_QUOTA_NBLKS(mp, tp, ip, qblocks, 0, quota_flag);
+
+error1:        /* Just cancel transaction */
        xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
        goto dmapi_enospc_check;
@@ -4423,8 +4412,8 @@ xfs_free_file_space(
                }
                xfs_ilock(ip, XFS_ILOCK_EXCL);
                error = XFS_TRANS_RESERVE_QUOTA(mp, tp,
-                               ip->i_udquot, ip->i_gdquot, resblks, 0, rt ?
-                               XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS);
+                               ip->i_udquot, ip->i_gdquot, resblks, 0,
+                               XFS_QMOPT_RES_REGBLKS);
                if (error)
                        goto error1;
 
index a714d0cdc204dff18b84390763114ac99fc04f09..6f92482cc96c6ebbd360c98899b4f4fc465828aa 100644 (file)
@@ -156,7 +156,7 @@ ev5_switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm,
        /* Always update the PCB ASN.  Another thread may have allocated
           a new mm->context (via flush_tlb_mm) without the ASN serial
           number wrapping.  We have no way to detect when this is needed.  */
-       next->thread_info->pcb.asn = mmc & HARDWARE_ASN_MASK;
+       task_thread_info(next)->pcb.asn = mmc & HARDWARE_ASN_MASK;
 }
 
 __EXTERN_INLINE void
@@ -235,7 +235,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
                if (cpu_online(i))
                        mm->context[i] = 0;
        if (tsk != current)
-               tsk->thread_info->pcb.ptbr
+               task_thread_info(tsk)->pcb.ptbr
                  = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
        return 0;
 }
@@ -249,7 +249,7 @@ destroy_context(struct mm_struct *mm)
 static inline void
 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
 {
-       tsk->thread_info->pcb.ptbr
+       task_thread_info(tsk)->pcb.ptbr
          = ((unsigned long)mm->pgd - IDENT_ADDR) >> PAGE_SHIFT;
 }
 
index bb1a7a3abb8b55ac2c80aa10d608299eac295ad0..425b7b6d28cbba8e99ec8c1bbd9af578b77752df 100644 (file)
@@ -52,19 +52,10 @@ extern long kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
 
 unsigned long get_wchan(struct task_struct *p);
 
-/* See arch/alpha/kernel/ptrace.c for details.  */
-#define PT_REG(reg) \
-  (PAGE_SIZE*2 - sizeof(struct pt_regs) + offsetof(struct pt_regs, reg))
-
-#define SW_REG(reg) \
- (PAGE_SIZE*2 - sizeof(struct pt_regs) - sizeof(struct switch_stack) \
-  + offsetof(struct switch_stack, reg))
-
-#define KSTK_EIP(tsk) \
-  (*(unsigned long *)(PT_REG(pc) + (unsigned long) ((tsk)->thread_info)))
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
 
 #define KSTK_ESP(tsk) \
-  ((tsk) == current ? rdusp() : (tsk)->thread_info->pcb.usp)
+  ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp)
 
 #define cpu_relax()    barrier()
 
index 072375c135b4a008799173b655837fb39c3d2b91..9933b8b3612ee2a6173d22ba5bd441d07c80eab5 100644 (file)
@@ -75,10 +75,10 @@ struct switch_stack {
 #define profile_pc(regs) instruction_pointer(regs)
 extern void show_regs(struct pt_regs *);
 
-#define alpha_task_regs(task) \
-  ((struct pt_regs *) ((long) (task)->thread_info + 2*PAGE_SIZE) - 1)
+#define task_pt_regs(task) \
+  ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1)
 
-#define force_successful_syscall_return() (alpha_task_regs(current)->r0 = 0)
+#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0)
 
 #endif
 
index 050e86d12891ef704762adba37cabe4a5cb19fe4..cc9c7e8cced5c2de6f88f3557de2bd6c4c3de385 100644 (file)
@@ -131,15 +131,25 @@ struct el_common_EV6_mcheck {
 extern void halt(void) __attribute__((noreturn));
 #define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt))
 
-#define switch_to(P,N,L)                                               \
-  do {                                                                 \
-    (L) = alpha_switch_to(virt_to_phys(&(N)->thread_info->pcb), (P));  \
-    check_mmu_context();                                               \
+#define switch_to(P,N,L)                                                \
+  do {                                                                  \
+    (L) = alpha_switch_to(virt_to_phys(&task_thread_info(N)->pcb), (P)); \
+    check_mmu_context();                                                \
   } while (0)
 
 struct task_struct;
 extern struct task_struct *alpha_switch_to(unsigned long, struct task_struct*);
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 #define imb() \
 __asm__ __volatile__ ("call_pal %0 #imb" : : "i" (PAL_imb) : "memory")
 
index d51491ed00b81de8ae49426d5cc07e6c6fa97f5a..69ffd93f8e223a3aafd4eb0cc2400c799a8be317 100644 (file)
@@ -54,8 +54,6 @@ register struct thread_info *__current_thread_info __asm__("$8");
 #define alloc_thread_info(tsk) \
   ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
 #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
 
 #endif /* __ASSEMBLY__ */
 
diff --git a/include/asm-arm/arch-at91rm9200/at91rm9200_pdc.h b/include/asm-arm/arch-at91rm9200/at91rm9200_pdc.h
new file mode 100644 (file)
index 0000000..ce1150d
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * include/asm-arm/arch-at91rm9200/at91rm9200_pdc.h
+ *
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * Peripheral Data Controller (PDC) registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91RM9200_PDC_H
+#define AT91RM9200_PDC_H
+
+#define AT91_PDC_RPR           0x100   /* Receive Pointer Register */
+#define AT91_PDC_RCR           0x104   /* Receive Counter Register */
+#define AT91_PDC_TPR           0x108   /* Transmit Pointer Register */
+#define AT91_PDC_TCR           0x10c   /* Transmit Counter Register */
+#define AT91_PDC_RNPR          0x110   /* Receive Next Pointer Register */
+#define AT91_PDC_RNCR          0x114   /* Receive Next Counter Register */
+#define AT91_PDC_TNPR          0x118   /* Transmit Next Pointer Register */
+#define AT91_PDC_TNCR          0x11c   /* Transmit Next Counter Register */
+
+#define AT91_PDC_PTCR          0x120   /* Transfer Control Register */
+#define                AT91_PDC_RXTEN          (1 << 0)        /* Receiver Transfer Enable */
+#define                AT91_PDC_RXTDIS         (1 << 1)        /* Receiver Transfer Disable */
+#define                AT91_PDC_TXTEN          (1 << 8)        /* Transmitter Transfer Enable */
+#define                AT91_PDC_TXTDIS         (1 << 9)        /* Transmitter Transfer Disable */
+
+#define AT91_PDC_PTSR          0x124   /* Transfer Status Register */
+
+#endif
diff --git a/include/asm-arm/arch-at91rm9200/at91rm9200_usart.h b/include/asm-arm/arch-at91rm9200/at91rm9200_usart.h
new file mode 100644 (file)
index 0000000..79f851e
--- /dev/null
@@ -0,0 +1,123 @@
+/*
+ * include/asm-arm/arch-at91rm9200/at91rm9200_usart.h
+ *
+ * Copyright (C) 2005 Ivan Kokshaysky
+ * Copyright (C) SAN People
+ *
+ * USART registers.
+ * Based on AT91RM9200 datasheet revision E.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef AT91RM9200_USART_H
+#define AT91RM9200_USART_H
+
+#define AT91_US_CR             0x00                    /* Control Register */
+#define                AT91_US_RSTRX           (1 <<  2)               /* Reset Receiver */
+#define                AT91_US_RSTTX           (1 <<  3)               /* Reset Transmitter */
+#define                AT91_US_RXEN            (1 <<  4)               /* Receiver Enable */
+#define                AT91_US_RXDIS           (1 <<  5)               /* Receiver Disable */
+#define                AT91_US_TXEN            (1 <<  6)               /* Transmitter Enable */
+#define                AT91_US_TXDIS           (1 <<  7)               /* Transmitter Disable */
+#define                AT91_US_RSTSTA          (1 <<  8)               /* Reset Status Bits */
+#define                AT91_US_STTBRK          (1 <<  9)               /* Start Break */
+#define                AT91_US_STPBRK          (1 << 10)               /* Stop Break */
+#define                AT91_US_STTTO           (1 << 11)               /* Start Time-out */
+#define                AT91_US_SENDA           (1 << 12)               /* Send Address */
+#define                AT91_US_RSTIT           (1 << 13)               /* Reset Iterations */
+#define                AT91_US_RSTNACK         (1 << 14)               /* Reset Non Acknowledge */
+#define                AT91_US_RETTO           (1 << 15)               /* Rearm Time-out */
+#define                AT91_US_DTREN           (1 << 16)               /* Data Terminal Ready Enable */
+#define                AT91_US_DTRDIS          (1 << 17)               /* Data Terminal Ready Disable */
+#define                AT91_US_RTSEN           (1 << 18)               /* Request To Send Enable */
+#define                AT91_US_RTSDIS          (1 << 19)               /* Request To Send Disable */
+
+#define AT91_US_MR             0x04                    /* Mode Register */
+#define                AT91_US_USMODE          (0xf <<  0)             /* Mode of the USART */
+#define                        AT91_US_USMODE_NORMAL           0
+#define                        AT91_US_USMODE_RS485            1
+#define                        AT91_US_USMODE_HWHS             2
+#define                        AT91_US_USMODE_MODEM            3
+#define                        AT91_US_USMODE_ISO7816_T0       4
+#define                        AT91_US_USMODE_ISO7816_T1       6
+#define                        AT91_US_USMODE_IRDA             8
+#define                AT91_US_USCLKS          (3   <<  4)             /* Clock Selection */
+#define                AT91_US_CHRL            (3   <<  6)             /* Character Length */
+#define                        AT91_US_CHRL_5                  (0 <<  6)
+#define                        AT91_US_CHRL_6                  (1 <<  6)
+#define                        AT91_US_CHRL_7                  (2 <<  6)
+#define                        AT91_US_CHRL_8                  (3 <<  6)
+#define                AT91_US_SYNC            (1 <<  8)               /* Synchronous Mode Select */
+#define                AT91_US_PAR             (7 <<  9)               /* Parity Type */
+#define                        AT91_US_PAR_EVEN                (0 <<  9)
+#define                        AT91_US_PAR_ODD                 (1 <<  9)
+#define                        AT91_US_PAR_SPACE               (2 <<  9)
+#define                        AT91_US_PAR_MARK                (3 <<  9)
+#define                        AT91_US_PAR_NONE                (4 <<  9)
+#define                        AT91_US_PAR_MULTI_DROP          (6 <<  9)
+#define                AT91_US_NBSTOP          (3 << 12)               /* Number of Stop Bits */
+#define                        AT91_US_NBSTOP_1                (0 << 12)
+#define                        AT91_US_NBSTOP_1_5              (1 << 12)
+#define                        AT91_US_NBSTOP_2                (2 << 12)
+#define                AT91_US_CHMODE          (3 << 14)               /* Channel Mode */
+#define                        AT91_US_CHMODE_NORMAL           (0 << 14)
+#define                        AT91_US_CHMODE_ECHO             (1 << 14)
+#define                        AT91_US_CHMODE_LOC_LOOP         (2 << 14)
+#define                        AT91_US_CHMODE_REM_LOOP         (3 << 14)
+#define                AT91_US_MSBF            (1 << 16)               /* Bit Order */
+#define                AT91_US_MODE9           (1 << 17)               /* 9-bit Character Length */
+#define                AT91_US_CLKO            (1 << 18)               /* Clock Output Select */
+#define                AT91_US_OVER            (1 << 19)               /* Oversampling Mode */
+#define                AT91_US_INACK           (1 << 20)               /* Inhibit Non Acknowledge */
+#define                AT91_US_DSNACK          (1 << 21)               /* Disable Successive NACK */
+#define                AT91_US_MAX_ITER        (7 << 24)               /* Max Iterations */
+#define                AT91_US_FILTER          (1 << 28)               /* Infrared Receive Line Filter */
+
+#define AT91_US_IER            0x08                    /* Interrupt Enable Register */
+#define                AT91_US_RXRDY           (1 <<  0)               /* Receiver Ready */
+#define                AT91_US_TXRDY           (1 <<  1)               /* Transmitter Ready */
+#define                AT91_US_RXBRK           (1 <<  2)               /* Break Received / End of Break */
+#define                AT91_US_ENDRX           (1 <<  3)               /* End of Receiver Transfer */
+#define                AT91_US_ENDTX           (1 <<  4)               /* End of Transmitter Transfer */
+#define                AT91_US_OVRE            (1 <<  5)               /* Overrun Error */
+#define                AT91_US_FRAME           (1 <<  6)               /* Framing Error */
+#define                AT91_US_PARE            (1 <<  7)               /* Parity Error */
+#define                AT91_US_TIMEOUT         (1 <<  8)               /* Receiver Time-out */
+#define                AT91_US_TXEMPTY         (1 <<  9)               /* Transmitter Empty */
+#define                AT91_US_ITERATION       (1 << 10)               /* Max number of Repetitions Reached */
+#define                AT91_US_TXBUFE          (1 << 11)               /* Transmission Buffer Empty */
+#define                AT91_US_RXBUFF          (1 << 12)               /* Reception Buffer Full */
+#define                AT91_US_NACK            (1 << 13)               /* Non Acknowledge */
+#define                AT91_US_RIIC            (1 << 16)               /* Ring Indicator Input Change */
+#define                AT91_US_DSRIC           (1 << 17)               /* Data Set Ready Input Change */
+#define                AT91_US_DCDIC           (1 << 18)               /* Data Carrier Detect Input Change */
+#define                AT91_US_CTSIC           (1 << 19)               /* Clear to Send Input Change */
+#define                AT91_US_RI              (1 << 20)               /* RI */
+#define                AT91_US_DSR             (1 << 21)               /* DSR */
+#define                AT91_US_DCD             (1 << 22)               /* DCD */
+#define                AT91_US_CTS             (1 << 23)               /* CTS */
+
+#define AT91_US_IDR            0x0c                    /* Interrupt Disable Register */
+#define AT91_US_IMR            0x10                    /* Interrupt Mask Register */
+#define AT91_US_CSR            0x14                    /* Channel Status Register */
+#define AT91_US_RHR            0x18                    /* Receiver Holding Register */
+#define AT91_US_THR            0x1c                    /* Transmitter Holding Register */
+
+#define AT91_US_BRGR           0x20                    /* Baud Rate Generator Register */
+#define                AT91_US_CD              (0xffff << 0)           /* Clock Divider */
+
+#define AT91_US_RTOR           0x24                    /* Receiver Time-out Register */
+#define                AT91_US_TO              (0xffff << 0)           /* Time-out Value */
+
+#define AT91_US_TTGR           0x28                    /* Transmitter Timeguard Register */
+#define                AT91_US_TG              (0xff << 0)             /* Timeguard Value */
+
+#define AT91_US_FIDI           0x40                    /* FI DI Ratio Register */
+#define AT91_US_NER            0x44                    /* Number of Errors Register */
+#define AT91_US_IF             0x4c                    /* IrDA Filter Register */
+
+#endif
index 741f5bc5d016ea5888eb95290d909fc2c027a32f..17eaf8bdf0925a7b0146df2d6e35f8ac08cf3484 100644 (file)
@@ -22,7 +22,16 @@ static inline __attribute_const__ __u32 ___arch__swab32(__u32 x)
 {
        __u32 t;
 
-       t = x ^ ((x << 16) | (x >> 16));        /* eor r1,r0,r0,ror #16  */
+       if (__builtin_constant_p(x)) {
+               t = x ^ ((x << 16) | (x >> 16)); /* eor r1,r0,r0,ror #16 */
+       } else {
+               /*
+                * The compiler needs a bit of a hint here to always do the
+                * right thing and not screw it up to different degrees
+                * depending on the gcc version.
+                */
+               asm ("eor\t%0, %1, %1, ror #16" : "=r" (t) : "r" (x));
+       }
        x = (x << 24) | (x >> 8);               /* mov r0,r0,ror #8      */
        t &= ~0x00FF0000;                       /* bic r1,r1,#0x00FF0000 */
        x ^= (t >> 8);                          /* eor r0,r0,r1,lsr #8   */
diff --git a/include/asm-arm/mach/serial_at91rm9200.h b/include/asm-arm/mach/serial_at91rm9200.h
new file mode 100644 (file)
index 0000000..98f4b0c
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ *  linux/include/asm-arm/mach/serial_at91rm9200.h
+ *
+ *  Based on serial_sa1100.h  by Nicolas Pitre
+ *
+ *  Copyright (C) 2002 ATMEL Rousset
+ *
+ *  Low level machine dependent UART functions.
+ */
+#include <linux/config.h>
+
+struct uart_port;
+
+/*
+ * This is a temporary structure for registering these
+ * functions; it is intended to be discarded after boot.
+ */
+struct at91rm9200_port_fns {
+       void    (*set_mctrl)(struct uart_port *, u_int);
+       u_int   (*get_mctrl)(struct uart_port *);
+       void    (*enable_ms)(struct uart_port *);
+       void    (*pm)(struct uart_port *, u_int, u_int);
+       int     (*set_wake)(struct uart_port *, u_int);
+       int     (*open)(struct uart_port *);
+       void    (*close)(struct uart_port *);
+};
+
+#if defined(CONFIG_SERIAL_AT91)
+void at91_register_uart_fns(struct at91rm9200_port_fns *fns);
+void at91_register_uart(int idx, int port);
+#else
+#define at91_register_uart_fns(fns) do { } while (0)
+#define at91_register_uart(idx,port) do { } while (0)
+#endif
+
+
index 3d7f08bd9030f84e7828c99db876d216032066d6..b4e1146ab682cf97fce022c83c8be81b1f02fc7e 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/config.h>
 #include <linux/compiler.h>
 #include <asm/arch/memory.h>
+#include <asm/sizes.h>
 
 #ifndef TASK_SIZE
 /*
 #define PAGE_OFFSET            UL(0xc0000000)
 #endif
 
+/*
+ * Size of DMA-consistent memory region.  Must be multiple of 2M,
+ * between 2MB and 14MB inclusive.
+ */
+#ifndef CONSISTENT_DMA_SIZE
+#define CONSISTENT_DMA_SIZE SZ_2M
+#endif
+
 /*
  * Physical vs virtual RAM address space conversion.  These are
  * private definitions which should NOT be used outside memory.h
index 7d4118e090542222ec87020f9840e21f121b43ec..31290694648b095d2a4595c91a9b2825bb72bf6b 100644 (file)
@@ -85,9 +85,11 @@ unsigned long get_wchan(struct task_struct *p);
  */
 extern int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
 
-#define KSTK_REGS(tsk) (((struct pt_regs *)(THREAD_START_SP + (unsigned long)(tsk)->thread_info)) - 1)
-#define KSTK_EIP(tsk)  KSTK_REGS(tsk)->ARM_pc
-#define KSTK_ESP(tsk)  KSTK_REGS(tsk)->ARM_sp
+#define task_pt_regs(p) \
+       ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1)
+
+#define KSTK_EIP(tsk)  task_pt_regs(tsk)->ARM_pc
+#define KSTK_ESP(tsk)  task_pt_regs(tsk)->ARM_sp
 
 /*
  * Prefetching support - only ARMv5.
index 5621d61ebc07958e9e7a671def64f410fe26dcb1..eb2de8c10515d118aa8616e27adb2f6accd149ad 100644 (file)
@@ -168,9 +168,19 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
 
 #define switch_to(prev,next,last)                                      \
 do {                                                                   \
-       last = __switch_to(prev,prev->thread_info,next->thread_info);   \
+       last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));        \
 } while (0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 /*
  * CPU interrupt mask handling.
  */
index 7c98557b717ff048003e0d07c6a5e820c9ccf4e6..33a33cbb63295640fc5664aa7ba69255eecbdd6f 100644 (file)
@@ -96,13 +96,10 @@ static inline struct thread_info *current_thread_info(void)
 extern struct thread_info *alloc_thread_info(struct task_struct *task);
 extern void free_thread_info(struct thread_info *);
 
-#define get_thread_info(ti)    get_task_struct((ti)->task)
-#define put_thread_info(ti)    put_task_struct((ti)->task)
-
 #define thread_saved_pc(tsk)   \
-       ((unsigned long)(pc_pointer((tsk)->thread_info->cpu_context.pc)))
+       ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
 #define thread_saved_fp(tsk)   \
-       ((unsigned long)((tsk)->thread_info->cpu_context.fp))
+       ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
 
 extern void iwmmxt_task_disable(struct thread_info *);
 extern void iwmmxt_task_copy(struct thread_info *, void *);
index f23fac1938f31d035872e39fd46688b69064824e..ca4ccfc4b578cc44184667aaac94b5b0d9764022 100644 (file)
@@ -111,9 +111,19 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
 
 #define switch_to(prev,next,last)                                      \
 do {                                                                   \
-       last = __switch_to(prev,prev->thread_info,next->thread_info);   \
+       last = __switch_to(prev,task_thread_info(prev),task_thread_info(next)); \
 } while (0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 /*
  * Save the current interrupt enable state & disable IRQs
  */
index aff3e5699c64cfd79f1ea1f3624e2d094a947877..a65e58a0a767986a5756bfa353e3d9057ba6a08d 100644 (file)
@@ -82,18 +82,15 @@ static inline struct thread_info *current_thread_info(void)
 
 /* FIXME - PAGE_SIZE < 32K */
 #define THREAD_SIZE            (8*32768) // FIXME - this needs attention (see kernel/fork.c which gets a nice div by zero if this is lower than 8*32768
-#define __get_user_regs(x) (((struct pt_regs *)((unsigned long)(x) + THREAD_SIZE - 8)) - 1)
+#define task_pt_regs(task) ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE - 8) - 1)
 
 extern struct thread_info *alloc_thread_info(struct task_struct *task);
 extern void free_thread_info(struct thread_info *);
 
-#define get_thread_info(ti)    get_task_struct((ti)->task)
-#define put_thread_info(ti)    put_task_struct((ti)->task)
-
 #define thread_saved_pc(tsk)   \
-       ((unsigned long)(pc_pointer((tsk)->thread_info->cpu_context.pc)))
+       ((unsigned long)(pc_pointer(task_thread_info(tsk)->cpu_context.pc)))
 #define thread_saved_fp(tsk)   \
-       ((unsigned long)((tsk)->thread_info->cpu_context.fp))
+       ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
 
 #else /* !__ASSEMBLY__ */
 
index e23df8dc96e8c7d80633e0165e6757ffd2467583..cc692c7a0660cb79b5644ba324068d825e5a94b7 100644 (file)
@@ -40,7 +40,7 @@ struct thread_struct {
 #define KSTK_EIP(tsk)  \
 ({                     \
        unsigned long eip = 0;   \
-       unsigned long regs = (unsigned long)user_regs(tsk); \
+       unsigned long regs = (unsigned long)task_pt_regs(tsk); \
        if (regs > PAGE_SIZE && \
                virt_addr_valid(regs)) \
        eip = ((struct pt_regs *)regs)->irp; \
index 8c939bf27987f36a6c89cb3acfe6db6d874f2419..32bf2e538ced1913b7618e2b13360a09ee65bb9a 100644 (file)
@@ -36,7 +36,7 @@ struct thread_struct {
 #define KSTK_EIP(tsk)          \
 ({                             \
        unsigned long eip = 0;  \
-       unsigned long regs = (unsigned long)user_regs(tsk); \
+       unsigned long regs = (unsigned long)task_pt_regs(tsk); \
        if (regs > PAGE_SIZE && virt_addr_valid(regs))      \
                eip = ((struct pt_regs *)regs)->erp;        \
        eip; \
index dce41009eeb086a17811c7e1095709672ddedac4..961e2bceadbc645915fd1379ed939cf008db19ff 100644 (file)
@@ -45,7 +45,8 @@ struct task_struct;
  * Dito but for the currently running task
  */
 
-#define current_regs() user_regs(current->thread_info)
+#define task_pt_regs(task) user_regs(task_thread_info(task))
+#define current_regs() task_pt_regs(current)
 
 static inline void prepare_to_copy(struct task_struct *tsk)
 {
index cef0140fc10425b99fbe9eba492c05ac9a86a9ac..7ad853c3f74e871afcef861d84685c5360b82dce 100644 (file)
@@ -69,8 +69,6 @@ struct thread_info {
 /* thread information allocation */
 #define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
 #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
 
 #endif /* !__ASSEMBLY__ */
 
index 60f6b2aee76d7dc94666b4fc6e5bd24ef6a0e6e7..a5576e02dd1d08a8bb4d4970e641751b17e05a87 100644 (file)
@@ -110,8 +110,6 @@ register struct thread_info *__current_thread_info asm("gr15");
 #endif
 
 #define free_thread_info(info) kfree(info)
-#define get_thread_info(ti)    get_task_struct((ti)->task)
-#define put_thread_info(ti)    put_task_struct((ti)->task)
 
 #else /* !__ASSEMBLY__ */
 
index bfcc755c3bb1f647057648d0a5e935534a336265..45f09dc9caff4afe5692b0a05ae38b7a144acc1d 100644 (file)
@@ -69,8 +69,6 @@ static inline struct thread_info *current_thread_info(void)
 #define alloc_thread_info(tsk) ((struct thread_info *) \
                                __get_free_pages(GFP_KERNEL, 1))
 #define free_thread_info(ti)   free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti)    get_task_struct((ti)->task)
-#define put_thread_info(ti)    put_task_struct((ti)->task)
 #endif /* __ASSEMBLY__ */
 
 /*
index 6747006743f94f49abf8339cdced9f988ce2e359..152d0baa576a675b39bfa408a6b69f670e8ad118 100644 (file)
@@ -49,19 +49,19 @@ static inline void __save_init_fpu( struct task_struct *tsk )
                X86_FEATURE_FXSR,
                "m" (tsk->thread.i387.fxsave)
                :"memory");
-       tsk->thread_info->status &= ~TS_USEDFPU;
+       task_thread_info(tsk)->status &= ~TS_USEDFPU;
 }
 
 #define __unlazy_fpu( tsk ) do { \
-       if ((tsk)->thread_info->status & TS_USEDFPU) \
+       if (task_thread_info(tsk)->status & TS_USEDFPU) \
                save_init_fpu( tsk ); \
 } while (0)
 
 #define __clear_fpu( tsk )                                     \
 do {                                                           \
-       if ((tsk)->thread_info->status & TS_USEDFPU) {          \
+       if (task_thread_info(tsk)->status & TS_USEDFPU) {               \
                asm volatile("fnclex ; fwait");                         \
-               (tsk)->thread_info->status &= ~TS_USEDFPU;      \
+               task_thread_info(tsk)->status &= ~TS_USEDFPU;   \
                stts();                                         \
        }                                                       \
 } while (0)
index 13ecf66b098cd6fe165546fe8581422485ca1d45..feca5d961e2b0c26a20c89f2b67c1cac230866fc 100644 (file)
@@ -561,10 +561,20 @@ unsigned long get_wchan(struct task_struct *p);
        (unsigned long)(&__ptr[THREAD_SIZE_LONGS]);                     \
 })
 
+/*
+ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
+ * This is necessary to guarantee that the entire "struct pt_regs"
+ * is accessable even if the CPU haven't stored the SS/ESP registers
+ * on the stack (interrupt gate does not save these registers
+ * when switching to the same priv ring).
+ * Therefore beware: accessing the xss/esp fields of the
+ * "struct pt_regs" is possible, but they may contain the
+ * completely wrong values.
+ */
 #define task_pt_regs(task)                                             \
 ({                                                                     \
        struct pt_regs *__regs__;                                       \
-       __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info);     \
+       __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
        __regs__ - 1;                                                   \
 })
 
index 9c0593b7a94e9220c238c9a9b895b694d940ad7f..36a92ed6a9d0d7a916c60f33313bec9d4fb0c494 100644 (file)
@@ -548,6 +548,15 @@ void enable_hlt(void);
 extern int es7000_plat;
 void cpu_idle_wait(void);
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible:
+ */
+static inline void sched_cacheflush(void)
+{
+       wbinvd();
+}
+
 extern unsigned long arch_align_stack(unsigned long sp);
 
 #endif
index 8fbf791651bf84cfaf8e2bbb3396e76b13c87517..2493e77e8c3052da1da84626d187cc5a312f922f 100644 (file)
@@ -111,8 +111,6 @@ register unsigned long current_stack_pointer asm("esp") __attribute_used__;
 #endif
 
 #define free_thread_info(info) kfree(info)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
 
 #else /* !__ASSEMBLY__ */
 
index 0ec27c9e8e45ba696be1a06a81df49d27d222729..d7e19eb344b7ab4f83c6611ec3afb675edf86d13 100644 (file)
@@ -72,7 +72,6 @@ static inline int node_to_first_cpu(int node)
        .max_interval           = 32,                   \
        .busy_factor            = 32,                   \
        .imbalance_pct          = 125,                  \
-       .cache_hot_time         = (10*1000000),         \
        .cache_nice_tries       = 1,                    \
        .busy_idx               = 3,                    \
        .idle_idx               = 1,                    \
index aaf11f4e916997643d60c7d45fb4718ac5092363..c0b19106665ca1d45bc0e8e3c20bd76190a981e1 100644 (file)
@@ -192,7 +192,7 @@ compat_ptr (compat_uptr_t uptr)
 static __inline__ void __user *
 compat_alloc_user_space (long len)
 {
-       struct pt_regs *regs = ia64_task_regs(current);
+       struct pt_regs *regs = task_pt_regs(current);
        return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len);
 }
 
index a74b68104559a556e335e80b7262c46594086a89..8c0fc227f0fb6934c4f3f7573456d5d21150c6e0 100644 (file)
@@ -68,10 +68,14 @@ struct prev_kprobe {
        unsigned long status;
 };
 
+#define        MAX_PARAM_RSE_SIZE      (0x60+0x60/0x3f)
 /* per-cpu kprobe control block */
 struct kprobe_ctlblk {
        unsigned long kprobe_status;
        struct pt_regs jprobe_saved_regs;
+       unsigned long jprobes_saved_stacked_regs[MAX_PARAM_RSE_SIZE];
+       unsigned long *bsp;
+       unsigned long cfm;
        struct prev_kprobe prev_kprobe;
 };
 
@@ -118,5 +122,7 @@ extern int kprobe_exceptions_notify(struct notifier_block *self,
 static inline void jprobe_return(void)
 {
 }
+extern void invalidate_stacked_regs(void);
+extern void flush_register_stack(void);
 
 #endif                         /* _ASM_KPROBES_H */
index 94e07e7273955b1174537b9f0093612c057c1ee0..8c648bf72bbdb796a6321713a42c19fb18b62d71 100644 (file)
@@ -352,7 +352,7 @@ extern unsigned long get_wchan (struct task_struct *p);
 /* Return instruction pointer of blocked task TSK.  */
 #define KSTK_EIP(tsk)                                  \
   ({                                                   \
-       struct pt_regs *_regs = ia64_task_regs(tsk);    \
+       struct pt_regs *_regs = task_pt_regs(tsk);      \
        _regs->cr_iip + ia64_psr(_regs)->ri;            \
   })
 
index 2c703d6e0c8630aa234039d0ebdca4d9ce04dc09..9471cdc3f4c0989a3a8d34a37035f06918e96232 100644 (file)
@@ -248,7 +248,7 @@ struct switch_stack {
 })
 
   /* given a pointer to a task_struct, return the user's pt_regs */
-# define ia64_task_regs(t)             (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
+# define task_pt_regs(t)               (((struct pt_regs *) ((char *) (t) + IA64_STK_OFFSET)) - 1)
 # define ia64_psr(regs)                        ((struct ia64_psr *) &(regs)->cr_ipsr)
 # define user_mode(regs)               (((struct ia64_psr *) &(regs)->cr_ipsr)->cpl != 0)
 # define user_stack(task,regs) ((long) regs - (long) task == IA64_STK_OFFSET - sizeof(*regs))
@@ -271,7 +271,7 @@ struct switch_stack {
    *
    * On ia64, we can clear the user's pt_regs->r8 to force a successful syscall.
    */
-# define force_successful_syscall_return()     (ia64_task_regs(current)->r8 = 0)
+# define force_successful_syscall_return()     (task_pt_regs(current)->r8 = 0)
 
   struct task_struct;                  /* forward decl */
   struct unw_frame_info;               /* forward decl */
index 2a8b0d92a5d62f139ec98cb0e125883c7e39e122..8b9e10e7cdba89bc9546ab1c6d06a41a35475487 100644 (file)
@@ -75,7 +75,8 @@
 #define  SN_SAL_IOIF_GET_HUBDEV_INFO              0x02000055
 #define  SN_SAL_IOIF_GET_PCIBUS_INFO              0x02000056
 #define  SN_SAL_IOIF_GET_PCIDEV_INFO              0x02000057
-#define  SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST     0x02000058
+#define  SN_SAL_IOIF_GET_WIDGET_DMAFLUSH_LIST     0x02000058   // deprecated
+#define  SN_SAL_IOIF_GET_DEVICE_DMAFLUSH_LIST     0x0200005a
 
 #define SN_SAL_HUB_ERROR_INTERRUPT                0x02000060
 #define SN_SAL_BTE_RECOVER                        0x02000061
@@ -1100,7 +1101,7 @@ ia64_sn_bte_recovery(nasid_t nasid)
        struct ia64_sal_retval rv;
 
        rv.status = 0;
-       SAL_CALL_NOLOCK(rv, SN_SAL_BTE_RECOVER, 0, 0, 0, 0, 0, 0, 0);
+       SAL_CALL_NOLOCK(rv, SN_SAL_BTE_RECOVER, (u64)nasid, 0, 0, 0, 0, 0, 0);
        if (rv.status == SALRET_NOT_IMPLEMENTED)
                return 0;
        return (int) rv.status;
index 49faf8f26430806f6fa19fc39a6d8b65f914fe04..203945ae034e1c70e5a8aee672a6b40e22bbfdc1 100644 (file)
@@ -227,7 +227,9 @@ enum xpc_retval {
 
        xpcOpenCloseError,      /* 50: channel open/close protocol error */
 
-       xpcUnknownReason        /* 51: unknown reason -- must be last in list */
+       xpcDisconnected,        /* 51: channel disconnected (closed) */
+
+       xpcUnknownReason        /* 52: unknown reason -- must be last in list */
 };
 
 
diff --git a/include/asm-ia64/sn/xpc.h b/include/asm-ia64/sn/xpc.h
new file mode 100644 (file)
index 0000000..87e9cd5
--- /dev/null
@@ -0,0 +1,1274 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2004-2006 Silicon Graphics, Inc.  All Rights Reserved.
+ */
+
+
+/*
+ * Cross Partition Communication (XPC) structures and macros.
+ */
+
+#ifndef _ASM_IA64_SN_XPC_H
+#define _ASM_IA64_SN_XPC_H
+
+
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <linux/sysctl.h>
+#include <linux/device.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/sn/bte.h>
+#include <asm/sn/clksupport.h>
+#include <asm/sn/addrs.h>
+#include <asm/sn/mspec.h>
+#include <asm/sn/shub_mmr.h>
+#include <asm/sn/xp.h>
+
+
+/*
+ * XPC Version numbers consist of a major and minor number. XPC can always
+ * talk to versions with same major #, and never talk to versions with a
+ * different major #.
+ */
+#define _XPC_VERSION(_maj, _min)       (((_maj) << 4) | ((_min) & 0xf))
+#define XPC_VERSION_MAJOR(_v)          ((_v) >> 4)
+#define XPC_VERSION_MINOR(_v)          ((_v) & 0xf)
+
+
+/*
+ * The next macros define word or bit representations for given
+ * C-brick nasid in either the SAL provided bit array representing
+ * nasids in the partition/machine or the AMO_t array used for
+ * inter-partition initiation communications.
+ *
+ * For SN2 machines, C-Bricks are alway even numbered NASIDs.  As
+ * such, some space will be saved by insisting that nasid information
+ * passed from SAL always be packed for C-Bricks and the
+ * cross-partition interrupts use the same packing scheme.
+ */
+#define XPC_NASID_W_INDEX(_n)  (((_n) / 64) / 2)
+#define XPC_NASID_B_INDEX(_n)  (((_n) / 2) & (64 - 1))
+#define XPC_NASID_IN_ARRAY(_n, _p) ((_p)[XPC_NASID_W_INDEX(_n)] & \
+                                   (1UL << XPC_NASID_B_INDEX(_n)))
+#define XPC_NASID_FROM_W_B(_w, _b) (((_w) * 64 + (_b)) * 2)
+
+#define XPC_HB_DEFAULT_INTERVAL                5       /* incr HB every x secs */
+#define XPC_HB_CHECK_DEFAULT_INTERVAL  20      /* check HB every x secs */
+
+/* define the process name of HB checker and the CPU it is pinned to */
+#define XPC_HB_CHECK_THREAD_NAME       "xpc_hb"
+#define XPC_HB_CHECK_CPU               0
+
+/* define the process name of the discovery thread */
+#define XPC_DISCOVERY_THREAD_NAME      "xpc_discovery"
+
+
+/*
+ * the reserved page
+ *
+ *   SAL reserves one page of memory per partition for XPC. Though a full page
+ *   in length (16384 bytes), its starting address is not page aligned, but it
+ *   is cacheline aligned. The reserved page consists of the following:
+ *
+ *   reserved page header
+ *
+ *     The first cacheline of the reserved page contains the header
+ *     (struct xpc_rsvd_page). Before SAL initialization has completed,
+ *     SAL has set up the following fields of the reserved page header:
+ *     SAL_signature, SAL_version, partid, and nasids_size. The other
+ *     fields are set up by XPC. (xpc_rsvd_page points to the local
+ *     partition's reserved page.)
+ *
+ *   part_nasids mask
+ *   mach_nasids mask
+ *
+ *     SAL also sets up two bitmaps (or masks), one that reflects the actual
+ *     nasids in this partition (part_nasids), and the other that reflects
+ *     the actual nasids in the entire machine (mach_nasids). We're only
+ *     interested in the even numbered nasids (which contain the processors
+ *     and/or memory), so we only need half as many bits to represent the
+ *     nasids. The part_nasids mask is located starting at the first cacheline
+ *     following the reserved page header. The mach_nasids mask follows right
+ *     after the part_nasids mask. The size in bytes of each mask is reflected
+ *     by the reserved page header field 'nasids_size'. (Local partition's
+ *     mask pointers are xpc_part_nasids and xpc_mach_nasids.)
+ *
+ *   vars
+ *   vars part
+ *
+ *     Immediately following the mach_nasids mask are the XPC variables
+ *     required by other partitions. First are those that are generic to all
+ *     partitions (vars), followed on the next available cacheline by those
+ *     which are partition specific (vars part). These are setup by XPC.
+ *     (Local partition's vars pointers are xpc_vars and xpc_vars_part.)
+ *
+ * Note: Until vars_pa is set, the partition XPC code has not been initialized.
+ */
+struct xpc_rsvd_page {
+       u64 SAL_signature;      /* SAL: unique signature */
+       u64 SAL_version;        /* SAL: version */
+       u8 partid;              /* SAL: partition ID */
+       u8 version;
+       u8 pad1[6];             /* align to next u64 in cacheline */
+       volatile u64 vars_pa;
+       struct timespec stamp;  /* time when reserved page was setup by XPC */
+       u64 pad2[9];            /* align to last u64 in cacheline */
+       u64 nasids_size;        /* SAL: size of each nasid mask in bytes */
+};
+
+#define XPC_RP_VERSION _XPC_VERSION(1,1) /* version 1.1 of the reserved page */
+
+#define XPC_SUPPORTS_RP_STAMP(_version) \
+                       (_version >= _XPC_VERSION(1,1))
+
+/*
+ * compare stamps - the return value is:
+ *
+ *     < 0,    if stamp1 < stamp2
+ *     = 0,    if stamp1 == stamp2
+ *     > 0,    if stamp1 > stamp2
+ */
+static inline int
+xpc_compare_stamps(struct timespec *stamp1, struct timespec *stamp2)
+{
+       int ret;
+
+
+       if ((ret = stamp1->tv_sec - stamp2->tv_sec) == 0) {
+               ret = stamp1->tv_nsec - stamp2->tv_nsec;
+       }
+       return ret;
+}
+
+
+/*
+ * Define the structures by which XPC variables can be exported to other
+ * partitions. (There are two: struct xpc_vars and struct xpc_vars_part)
+ */
+
+/*
+ * The following structure describes the partition generic variables
+ * needed by other partitions in order to properly initialize.
+ *
+ * struct xpc_vars version number also applies to struct xpc_vars_part.
+ * Changes to either structure and/or related functionality should be
+ * reflected by incrementing either the major or minor version numbers
+ * of struct xpc_vars.
+ */
+struct xpc_vars {
+       u8 version;
+       u64 heartbeat;
+       u64 heartbeating_to_mask;
+       u64 heartbeat_offline;  /* if 0, heartbeat should be changing */
+       int act_nasid;
+       int act_phys_cpuid;
+       u64 vars_part_pa;
+       u64 amos_page_pa;       /* paddr of page of AMOs from MSPEC driver */
+       AMO_t *amos_page;       /* vaddr of page of AMOs from MSPEC driver */
+};
+
+#define XPC_V_VERSION _XPC_VERSION(3,1) /* version 3.1 of the cross vars */
+
+#define XPC_SUPPORTS_DISENGAGE_REQUEST(_version) \
+                       (_version >= _XPC_VERSION(3,1))
+
+
+static inline int
+xpc_hb_allowed(partid_t partid, struct xpc_vars *vars)
+{
+       return ((vars->heartbeating_to_mask & (1UL << partid)) != 0);
+}
+
+static inline void
+xpc_allow_hb(partid_t partid, struct xpc_vars *vars)
+{
+       u64 old_mask, new_mask;
+
+       do {
+               old_mask = vars->heartbeating_to_mask;
+               new_mask = (old_mask | (1UL << partid));
+       } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
+                                                       old_mask);
+}
+
+static inline void
+xpc_disallow_hb(partid_t partid, struct xpc_vars *vars)
+{
+       u64 old_mask, new_mask;
+
+       do {
+               old_mask = vars->heartbeating_to_mask;
+               new_mask = (old_mask & ~(1UL << partid));
+       } while (cmpxchg(&vars->heartbeating_to_mask, old_mask, new_mask) !=
+                                                       old_mask);
+}
+
+
+/*
+ * The AMOs page consists of a number of AMO variables which are divided into
+ * four groups, The first two groups are used to identify an IRQ's sender.
+ * These two groups consist of 64 and 128 AMO variables respectively. The last
+ * two groups, consisting of just one AMO variable each, are used to identify
+ * the remote partitions that are currently engaged (from the viewpoint of
+ * the XPC running on the remote partition).
+ */
+#define XPC_NOTIFY_IRQ_AMOS       0
+#define XPC_ACTIVATE_IRQ_AMOS     (XPC_NOTIFY_IRQ_AMOS + XP_MAX_PARTITIONS)
+#define XPC_ENGAGED_PARTITIONS_AMO (XPC_ACTIVATE_IRQ_AMOS + XP_NASID_MASK_WORDS)
+#define XPC_DISENGAGE_REQUEST_AMO  (XPC_ENGAGED_PARTITIONS_AMO + 1)
+
+
+/*
+ * The following structure describes the per partition specific variables.
+ *
+ * An array of these structures, one per partition, will be defined. As a
+ * partition becomes active XPC will copy the array entry corresponding to
+ * itself from that partition. It is desirable that the size of this
+ * structure evenly divide into a cacheline, such that none of the entries
+ * in this array crosses a cacheline boundary. As it is now, each entry
+ * occupies half a cacheline.
+ */
+struct xpc_vars_part {
+       volatile u64 magic;
+
+       u64 openclose_args_pa;  /* physical address of open and close args */
+       u64 GPs_pa;             /* physical address of Get/Put values */
+
+       u64 IPI_amo_pa;         /* physical address of IPI AMO_t structure */
+       int IPI_nasid;          /* nasid of where to send IPIs */
+       int IPI_phys_cpuid;     /* physical CPU ID of where to send IPIs */
+
+       u8 nchannels;           /* #of defined channels supported */
+
+       u8 reserved[23];        /* pad to a full 64 bytes */
+};
+
+/*
+ * The vars_part MAGIC numbers play a part in the first contact protocol.
+ *
+ * MAGIC1 indicates that the per partition specific variables for a remote
+ * partition have been initialized by this partition.
+ *
+ * MAGIC2 indicates that this partition has pulled the remote partititions
+ * per partition variables that pertain to this partition.
+ */
+#define XPC_VP_MAGIC1  0x0053524156435058L  /* 'XPCVARS\0'L (little endian) */
+#define XPC_VP_MAGIC2  0x0073726176435058L  /* 'XPCvars\0'L (little endian) */
+
+
+/* the reserved page sizes and offsets */
+
+#define XPC_RP_HEADER_SIZE     L1_CACHE_ALIGN(sizeof(struct xpc_rsvd_page))
+#define XPC_RP_VARS_SIZE       L1_CACHE_ALIGN(sizeof(struct xpc_vars))
+
+#define XPC_RP_PART_NASIDS(_rp) (u64 *) ((u8 *) _rp + XPC_RP_HEADER_SIZE)
+#define XPC_RP_MACH_NASIDS(_rp) (XPC_RP_PART_NASIDS(_rp) + xp_nasid_mask_words)
+#define XPC_RP_VARS(_rp)       ((struct xpc_vars *) XPC_RP_MACH_NASIDS(_rp) + xp_nasid_mask_words)
+#define XPC_RP_VARS_PART(_rp)  (struct xpc_vars_part *) ((u8 *) XPC_RP_VARS(rp) + XPC_RP_VARS_SIZE)
+
+
+/*
+ * Functions registered by add_timer() or called by kernel_thread() only
+ * allow for a single 64-bit argument. The following macros can be used to
+ * pack and unpack two (32-bit, 16-bit or 8-bit) arguments into or out from
+ * the passed argument.
+ */
+#define XPC_PACK_ARGS(_arg1, _arg2) \
+                       ((((u64) _arg1) & 0xffffffff) | \
+                       ((((u64) _arg2) & 0xffffffff) << 32))
+
+#define XPC_UNPACK_ARG1(_args) (((u64) _args) & 0xffffffff)
+#define XPC_UNPACK_ARG2(_args) ((((u64) _args) >> 32) & 0xffffffff)
+
+
+
+/*
+ * Define a Get/Put value pair (pointers) used with a message queue.
+ */
+struct xpc_gp {
+       volatile s64 get;       /* Get value */
+       volatile s64 put;       /* Put value */
+};
+
+#define XPC_GP_SIZE \
+               L1_CACHE_ALIGN(sizeof(struct xpc_gp) * XPC_NCHANNELS)
+
+
+
+/*
+ * Define a structure that contains arguments associated with opening and
+ * closing a channel.
+ */
+struct xpc_openclose_args {
+       u16 reason;             /* reason why channel is closing */
+       u16 msg_size;           /* sizeof each message entry */
+       u16 remote_nentries;    /* #of message entries in remote msg queue */
+       u16 local_nentries;     /* #of message entries in local msg queue */
+       u64 local_msgqueue_pa;  /* physical address of local message queue */
+};
+
+#define XPC_OPENCLOSE_ARGS_SIZE \
+             L1_CACHE_ALIGN(sizeof(struct xpc_openclose_args) * XPC_NCHANNELS)
+
+
+
+/* struct xpc_msg flags */
+
+#define        XPC_M_DONE              0x01    /* msg has been received/consumed */
+#define        XPC_M_READY             0x02    /* msg is ready to be sent */
+#define        XPC_M_INTERRUPT         0x04    /* send interrupt when msg consumed */
+
+
+#define XPC_MSG_ADDRESS(_payload) \
+               ((struct xpc_msg *)((u8 *)(_payload) - XPC_MSG_PAYLOAD_OFFSET))
+
+
+
+/*
+ * Defines notify entry.
+ *
+ * This is used to notify a message's sender that their message was received
+ * and consumed by the intended recipient.
+ */
+struct xpc_notify {
+       struct semaphore sema;          /* notify semaphore */
+       volatile u8 type;                       /* type of notification */
+
+       /* the following two fields are only used if type == XPC_N_CALL */
+       xpc_notify_func func;           /* user's notify function */
+       void *key;                      /* pointer to user's key */
+};
+
+/* struct xpc_notify type of notification */
+
+#define        XPC_N_CALL              0x01    /* notify function provided by user */
+
+
+
+/*
+ * Define the structure that manages all the stuff required by a channel. In
+ * particular, they are used to manage the messages sent across the channel.
+ *
+ * This structure is private to a partition, and is NOT shared across the
+ * partition boundary.
+ *
+ * There is an array of these structures for each remote partition. It is
+ * allocated at the time a partition becomes active. The array contains one
+ * of these structures for each potential channel connection to that partition.
+ *
+ * Each of these structures manages two message queues (circular buffers).
+ * They are allocated at the time a channel connection is made. One of
+ * these message queues (local_msgqueue) holds the locally created messages
+ * that are destined for the remote partition. The other of these message
+ * queues (remote_msgqueue) is a locally cached copy of the remote partition's
+ * own local_msgqueue.
+ *
+ * The following is a description of the Get/Put pointers used to manage these
+ * two message queues. Consider the local_msgqueue to be on one partition
+ * and the remote_msgqueue to be its cached copy on another partition. A
+ * description of what each of the lettered areas contains is included.
+ *
+ *
+ *                     local_msgqueue      remote_msgqueue
+ *
+ *                        |/////////|      |/////////|
+ *    w_remote_GP.get --> +---------+      |/////////|
+ *                        |    F    |      |/////////|
+ *     remote_GP.get  --> +---------+      +---------+ <-- local_GP->get
+ *                        |         |      |         |
+ *                        |         |      |    E    |
+ *                        |         |      |         |
+ *                        |         |      +---------+ <-- w_local_GP.get
+ *                        |    B    |      |/////////|
+ *                        |         |      |////D////|
+ *                        |         |      |/////////|
+ *                        |         |      +---------+ <-- w_remote_GP.put
+ *                        |         |      |////C////|
+ *      local_GP->put --> +---------+      +---------+ <-- remote_GP.put
+ *                        |         |      |/////////|
+ *                        |    A    |      |/////////|
+ *                        |         |      |/////////|
+ *     w_local_GP.put --> +---------+      |/////////|
+ *                        |/////////|      |/////////|
+ *
+ *
+ *         ( remote_GP.[get|put] are cached copies of the remote
+ *           partition's local_GP->[get|put], and thus their values can
+ *           lag behind their counterparts on the remote partition. )
+ *
+ *
+ *  A - Messages that have been allocated, but have not yet been sent to the
+ *     remote partition.
+ *
+ *  B - Messages that have been sent, but have not yet been acknowledged by the
+ *      remote partition as having been received.
+ *
+ *  C - Area that needs to be prepared for the copying of sent messages, by
+ *     the clearing of the message flags of any previously received messages.
+ *
+ *  D - Area into which sent messages are to be copied from the remote
+ *     partition's local_msgqueue and then delivered to their intended
+ *     recipients. [ To allow for a multi-message copy, another pointer
+ *     (next_msg_to_pull) has been added to keep track of the next message
+ *     number needing to be copied (pulled). It chases after w_remote_GP.put.
+ *     Any messages lying between w_local_GP.get and next_msg_to_pull have
+ *     been copied and are ready to be delivered. ]
+ *
+ *  E - Messages that have been copied and delivered, but have not yet been
+ *     acknowledged by the recipient as having been received.
+ *
+ *  F - Messages that have been acknowledged, but XPC has not yet notified the
+ *     sender that the message was received by its intended recipient.
+ *     This is also an area that needs to be prepared for the allocating of
+ *     new messages, by the clearing of the message flags of the acknowledged
+ *     messages.
+ */
+struct xpc_channel {
+       partid_t partid;                /* ID of remote partition connected */
+       spinlock_t lock;                /* lock for updating this structure */
+       u32 flags;                      /* general flags */
+
+       enum xpc_retval reason;         /* reason why channel is disconnect'g */
+       int reason_line;                /* line# disconnect initiated from */
+
+       u16 number;                     /* channel # */
+
+       u16 msg_size;                   /* sizeof each msg entry */
+       u16 local_nentries;             /* #of msg entries in local msg queue */
+       u16 remote_nentries;            /* #of msg entries in remote msg queue*/
+
+       void *local_msgqueue_base;      /* base address of kmalloc'd space */
+       struct xpc_msg *local_msgqueue; /* local message queue */
+       void *remote_msgqueue_base;     /* base address of kmalloc'd space */
+       struct xpc_msg *remote_msgqueue;/* cached copy of remote partition's */
+                                       /* local message queue */
+       u64 remote_msgqueue_pa;         /* phys addr of remote partition's */
+                                       /* local message queue */
+
+       atomic_t references;            /* #of external references to queues */
+
+       atomic_t n_on_msg_allocate_wq;   /* #on msg allocation wait queue */
+       wait_queue_head_t msg_allocate_wq; /* msg allocation wait queue */
+
+       u8 delayed_IPI_flags;           /* IPI flags received, but delayed */
+                                       /* action until channel disconnected */
+
+       /* queue of msg senders who want to be notified when msg received */
+
+       atomic_t n_to_notify;           /* #of msg senders to notify */
+       struct xpc_notify *notify_queue;/* notify queue for messages sent */
+
+       xpc_channel_func func;          /* user's channel function */
+       void *key;                      /* pointer to user's key */
+
+       struct semaphore msg_to_pull_sema; /* next msg to pull serialization */
+       struct semaphore wdisconnect_sema; /* wait for channel disconnect */
+
+       struct xpc_openclose_args *local_openclose_args; /* args passed on */
+                                       /* opening or closing of channel */
+
+       /* various flavors of local and remote Get/Put values */
+
+       struct xpc_gp *local_GP;        /* local Get/Put values */
+       struct xpc_gp remote_GP;        /* remote Get/Put values */
+       struct xpc_gp w_local_GP;       /* working local Get/Put values */
+       struct xpc_gp w_remote_GP;      /* working remote Get/Put values */
+       s64 next_msg_to_pull;           /* Put value of next msg to pull */
+
+       /* kthread management related fields */
+
+// >>> rethink having kthreads_assigned_limit and kthreads_idle_limit; perhaps
+// >>> allow the assigned limit be unbounded and let the idle limit be dynamic
+// >>> dependent on activity over the last interval of time
+       atomic_t kthreads_assigned;     /* #of kthreads assigned to channel */
+       u32 kthreads_assigned_limit;    /* limit on #of kthreads assigned */
+       atomic_t kthreads_idle;         /* #of kthreads idle waiting for work */
+       u32 kthreads_idle_limit;        /* limit on #of kthreads idle */
+       atomic_t kthreads_active;       /* #of kthreads actively working */
+       // >>> following field is temporary
+       u32 kthreads_created;           /* total #of kthreads created */
+
+       wait_queue_head_t idle_wq;      /* idle kthread wait queue */
+
+} ____cacheline_aligned;
+
+
+/* struct xpc_channel flags */
+
+#define        XPC_C_WASCONNECTED      0x00000001 /* channel was connected */
+
+#define        XPC_C_ROPENREPLY        0x00000002 /* remote open channel reply */
+#define        XPC_C_OPENREPLY         0x00000004 /* local open channel reply */
+#define        XPC_C_ROPENREQUEST      0x00000008 /* remote open channel request */
+#define        XPC_C_OPENREQUEST       0x00000010 /* local open channel request */
+
+#define        XPC_C_SETUP             0x00000020 /* channel's msgqueues are alloc'd */
+#define        XPC_C_CONNECTCALLOUT    0x00000040 /* channel connected callout made */
+#define        XPC_C_CONNECTED         0x00000080 /* local channel is connected */
+#define        XPC_C_CONNECTING        0x00000100 /* channel is being connected */
+
+#define        XPC_C_RCLOSEREPLY       0x00000200 /* remote close channel reply */
+#define        XPC_C_CLOSEREPLY        0x00000400 /* local close channel reply */
+#define        XPC_C_RCLOSEREQUEST     0x00000800 /* remote close channel request */
+#define        XPC_C_CLOSEREQUEST      0x00001000 /* local close channel request */
+
+#define        XPC_C_DISCONNECTED      0x00002000 /* channel is disconnected */
+#define        XPC_C_DISCONNECTING     0x00004000 /* channel is being disconnected */
+#define        XPC_C_DISCONNECTCALLOUT 0x00008000 /* chan disconnected callout made */
+#define        XPC_C_WDISCONNECT       0x00010000 /* waiting for channel disconnect */
+
+
+
+/*
+ * Manages channels on a partition basis. There is one of these structures
+ * for each partition (a partition will never utilize the structure that
+ * represents itself).
+ */
+struct xpc_partition {
+
+       /* XPC HB infrastructure */
+
+       u8 remote_rp_version;           /* version# of partition's rsvd pg */
+       struct timespec remote_rp_stamp;/* time when rsvd pg was initialized */
+       u64 remote_rp_pa;               /* phys addr of partition's rsvd pg */
+       u64 remote_vars_pa;             /* phys addr of partition's vars */
+       u64 remote_vars_part_pa;        /* phys addr of partition's vars part */
+       u64 last_heartbeat;             /* HB at last read */
+       u64 remote_amos_page_pa;        /* phys addr of partition's amos page */
+       int remote_act_nasid;           /* active part's act/deact nasid */
+       int remote_act_phys_cpuid;      /* active part's act/deact phys cpuid */
+       u32 act_IRQ_rcvd;               /* IRQs since activation */
+       spinlock_t act_lock;            /* protect updating of act_state */
+       u8 act_state;                   /* from XPC HB viewpoint */
+       u8 remote_vars_version;         /* version# of partition's vars */
+       enum xpc_retval reason;         /* reason partition is deactivating */
+       int reason_line;                /* line# deactivation initiated from */
+       int reactivate_nasid;           /* nasid in partition to reactivate */
+
+       unsigned long disengage_request_timeout; /* timeout in jiffies */
+       struct timer_list disengage_request_timer;
+
+
+       /* XPC infrastructure referencing and teardown control */
+
+       volatile u8 setup_state;        /* infrastructure setup state */
+       wait_queue_head_t teardown_wq;  /* kthread waiting to teardown infra */
+       atomic_t references;            /* #of references to infrastructure */
+
+
+       /*
+        * NONE OF THE PRECEDING FIELDS OF THIS STRUCTURE WILL BE CLEARED WHEN
+        * XPC SETS UP THE NECESSARY INFRASTRUCTURE TO SUPPORT CROSS PARTITION
+        * COMMUNICATION. ALL OF THE FOLLOWING FIELDS WILL BE CLEARED. (THE
+        * 'nchannels' FIELD MUST BE THE FIRST OF THE FIELDS TO BE CLEARED.)
+        */
+
+
+       u8 nchannels;              /* #of defined channels supported */
+       atomic_t nchannels_active; /* #of channels that are not DISCONNECTED */
+       atomic_t nchannels_engaged;/* #of channels engaged with remote part */
+       struct xpc_channel *channels;/* array of channel structures */
+
+       void *local_GPs_base;     /* base address of kmalloc'd space */
+       struct xpc_gp *local_GPs; /* local Get/Put values */
+       void *remote_GPs_base;    /* base address of kmalloc'd space */
+       struct xpc_gp *remote_GPs;/* copy of remote partition's local Get/Put */
+                                 /* values */
+       u64 remote_GPs_pa;        /* phys address of remote partition's local */
+                                 /* Get/Put values */
+
+
+       /* fields used to pass args when opening or closing a channel */
+
+       void *local_openclose_args_base;  /* base address of kmalloc'd space */
+       struct xpc_openclose_args *local_openclose_args;  /* local's args */
+       void *remote_openclose_args_base; /* base address of kmalloc'd space */
+       struct xpc_openclose_args *remote_openclose_args; /* copy of remote's */
+                                         /* args */
+       u64 remote_openclose_args_pa;     /* phys addr of remote's args */
+
+
+       /* IPI sending, receiving and handling related fields */
+
+       int remote_IPI_nasid;       /* nasid of where to send IPIs */
+       int remote_IPI_phys_cpuid;  /* phys CPU ID of where to send IPIs */
+       AMO_t *remote_IPI_amo_va;   /* address of remote IPI AMO_t structure */
+
+       AMO_t *local_IPI_amo_va;    /* address of IPI AMO_t structure */
+       u64 local_IPI_amo;          /* IPI amo flags yet to be handled */
+       char IPI_owner[8];          /* IPI owner's name */
+       struct timer_list dropped_IPI_timer; /* dropped IPI timer */
+
+       spinlock_t IPI_lock;        /* IPI handler lock */
+
+
+       /* channel manager related fields */
+
+       atomic_t channel_mgr_requests;  /* #of requests to activate chan mgr */
+       wait_queue_head_t channel_mgr_wq; /* channel mgr's wait queue */
+
+} ____cacheline_aligned;
+
+
+/* struct xpc_partition act_state values (for XPC HB) */
+
+#define        XPC_P_INACTIVE          0x00    /* partition is not active */
+#define XPC_P_ACTIVATION_REQ   0x01    /* created thread to activate */
+#define XPC_P_ACTIVATING       0x02    /* activation thread started */
+#define XPC_P_ACTIVE           0x03    /* xpc_partition_up() was called */
+#define XPC_P_DEACTIVATING     0x04    /* partition deactivation initiated */
+
+
+#define XPC_DEACTIVATE_PARTITION(_p, _reason) \
+                       xpc_deactivate_partition(__LINE__, (_p), (_reason))
+
+
+/* struct xpc_partition setup_state values */
+
+#define XPC_P_UNSET            0x00    /* infrastructure was never setup */
+#define XPC_P_SETUP            0x01    /* infrastructure is setup */
+#define XPC_P_WTEARDOWN                0x02    /* waiting to teardown infrastructure */
+#define XPC_P_TORNDOWN         0x03    /* infrastructure is torndown */
+
+
+
+/*
+ * struct xpc_partition IPI_timer #of seconds to wait before checking for
+ * dropped IPIs. These occur whenever an IPI amo write doesn't complete until
+ * after the IPI was received.
+ */
+#define XPC_P_DROPPED_IPI_WAIT (0.25 * HZ)
+
+
+/* number of seconds to wait for other partitions to disengage */
+#define XPC_DISENGAGE_REQUEST_DEFAULT_TIMELIMIT        90
+
+/* interval in seconds to print 'waiting disengagement' messages */
+#define XPC_DISENGAGE_PRINTMSG_INTERVAL                10
+
+
+#define XPC_PARTID(_p) ((partid_t) ((_p) - &xpc_partitions[0]))
+
+
+
+/* found in xp_main.c */
+extern struct xpc_registration xpc_registrations[];
+
+
+/* found in xpc_main.c */
+extern struct device *xpc_part;
+extern struct device *xpc_chan;
+extern int xpc_disengage_request_timelimit;
+extern int xpc_disengage_request_timedout;
+extern irqreturn_t xpc_notify_IRQ_handler(int, void *, struct pt_regs *);
+extern void xpc_dropped_IPI_check(struct xpc_partition *);
+extern void xpc_activate_partition(struct xpc_partition *);
+extern void xpc_activate_kthreads(struct xpc_channel *, int);
+extern void xpc_create_kthreads(struct xpc_channel *, int);
+extern void xpc_disconnect_wait(int);
+
+
+/* found in xpc_partition.c */
+extern int xpc_exiting;
+extern struct xpc_vars *xpc_vars;
+extern struct xpc_rsvd_page *xpc_rsvd_page;
+extern struct xpc_vars_part *xpc_vars_part;
+extern struct xpc_partition xpc_partitions[XP_MAX_PARTITIONS + 1];
+extern char xpc_remote_copy_buffer[];
+extern struct xpc_rsvd_page *xpc_rsvd_page_init(void);
+extern void xpc_allow_IPI_ops(void);
+extern void xpc_restrict_IPI_ops(void);
+extern int xpc_identify_act_IRQ_sender(void);
+extern int xpc_partition_disengaged(struct xpc_partition *);
+extern enum xpc_retval xpc_mark_partition_active(struct xpc_partition *);
+extern void xpc_mark_partition_inactive(struct xpc_partition *);
+extern void xpc_discovery(void);
+extern void xpc_check_remote_hb(void);
+extern void xpc_deactivate_partition(const int, struct xpc_partition *,
+                                               enum xpc_retval);
+extern enum xpc_retval xpc_initiate_partid_to_nasids(partid_t, void *);
+
+
+/* found in xpc_channel.c */
+extern void xpc_initiate_connect(int);
+extern void xpc_initiate_disconnect(int);
+extern enum xpc_retval xpc_initiate_allocate(partid_t, int, u32, void **);
+extern enum xpc_retval xpc_initiate_send(partid_t, int, void *);
+extern enum xpc_retval xpc_initiate_send_notify(partid_t, int, void *,
+                                               xpc_notify_func, void *);
+extern void xpc_initiate_received(partid_t, int, void *);
+extern enum xpc_retval xpc_setup_infrastructure(struct xpc_partition *);
+extern enum xpc_retval xpc_pull_remote_vars_part(struct xpc_partition *);
+extern void xpc_process_channel_activity(struct xpc_partition *);
+extern void xpc_connected_callout(struct xpc_channel *);
+extern void xpc_deliver_msg(struct xpc_channel *);
+extern void xpc_disconnect_channel(const int, struct xpc_channel *,
+                                       enum xpc_retval, unsigned long *);
+extern void xpc_disconnect_callout(struct xpc_channel *, enum xpc_retval);
+extern void xpc_partition_going_down(struct xpc_partition *, enum xpc_retval);
+extern void xpc_teardown_infrastructure(struct xpc_partition *);
+
+
+
+static inline void
+xpc_wakeup_channel_mgr(struct xpc_partition *part)
+{
+       if (atomic_inc_return(&part->channel_mgr_requests) == 1) {
+               wake_up(&part->channel_mgr_wq);
+       }
+}
+
+
+
+/*
+ * These next two inlines are used to keep us from tearing down a channel's
+ * msg queues while a thread may be referencing them.
+ */
+static inline void
+xpc_msgqueue_ref(struct xpc_channel *ch)
+{
+       atomic_inc(&ch->references);
+}
+
+static inline void
+xpc_msgqueue_deref(struct xpc_channel *ch)
+{
+       s32 refs = atomic_dec_return(&ch->references);
+
+       DBUG_ON(refs < 0);
+       if (refs == 0) {
+               xpc_wakeup_channel_mgr(&xpc_partitions[ch->partid]);
+       }
+}
+
+
+
+#define XPC_DISCONNECT_CHANNEL(_ch, _reason, _irqflgs) \
+               xpc_disconnect_channel(__LINE__, _ch, _reason, _irqflgs)
+
+
+/*
+ * These two inlines are used to keep us from tearing down a partition's
+ * setup infrastructure while a thread may be referencing it.
+ */
+static inline void
+xpc_part_deref(struct xpc_partition *part)
+{
+       s32 refs = atomic_dec_return(&part->references);
+
+
+       DBUG_ON(refs < 0);
+       if (refs == 0 && part->setup_state == XPC_P_WTEARDOWN) {
+               wake_up(&part->teardown_wq);
+       }
+}
+
+static inline int
+xpc_part_ref(struct xpc_partition *part)
+{
+       int setup;
+
+
+       atomic_inc(&part->references);
+       setup = (part->setup_state == XPC_P_SETUP);
+       if (!setup) {
+               xpc_part_deref(part);
+       }
+       return setup;
+}
+
+
+
+/*
+ * The following macro is to be used for the setting of the reason and
+ * reason_line fields in both the struct xpc_channel and struct xpc_partition
+ * structures.
+ */
+#define XPC_SET_REASON(_p, _reason, _line) \
+       { \
+               (_p)->reason = _reason; \
+               (_p)->reason_line = _line; \
+       }
+
+
+
+/*
+ * This next set of inlines are used to keep track of when a partition is
+ * potentially engaged in accessing memory belonging to another partition.
+ */
+
+static inline void
+xpc_mark_partition_engaged(struct xpc_partition *part)
+{
+       unsigned long irq_flags;
+       AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
+                               (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
+
+
+       local_irq_save(irq_flags);
+
+       /* set bit corresponding to our partid in remote partition's AMO */
+       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
+                                               (1UL << sn_partition_id));
+       /*
+        * We must always use the nofault function regardless of whether we
+        * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
+        * didn't, we'd never know that the other partition is down and would
+        * keep sending IPIs and AMOs to it until the heartbeat times out.
+        */
+       (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
+                               variable), xp_nofault_PIOR_target));
+
+       local_irq_restore(irq_flags);
+}
+
+static inline void
+xpc_mark_partition_disengaged(struct xpc_partition *part)
+{
+       unsigned long irq_flags;
+       AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
+                               (XPC_ENGAGED_PARTITIONS_AMO * sizeof(AMO_t)));
+
+
+       local_irq_save(irq_flags);
+
+       /* clear bit corresponding to our partid in remote partition's AMO */
+       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
+                                               ~(1UL << sn_partition_id));
+       /*
+        * We must always use the nofault function regardless of whether we
+        * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
+        * didn't, we'd never know that the other partition is down and would
+        * keep sending IPIs and AMOs to it until the heartbeat times out.
+        */
+       (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
+                               variable), xp_nofault_PIOR_target));
+
+       local_irq_restore(irq_flags);
+}
+
+static inline void
+xpc_request_partition_disengage(struct xpc_partition *part)
+{
+       unsigned long irq_flags;
+       AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
+                               (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
+
+
+       local_irq_save(irq_flags);
+
+       /* set bit corresponding to our partid in remote partition's AMO */
+       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR,
+                                               (1UL << sn_partition_id));
+       /*
+        * We must always use the nofault function regardless of whether we
+        * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
+        * didn't, we'd never know that the other partition is down and would
+        * keep sending IPIs and AMOs to it until the heartbeat times out.
+        */
+       (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
+                               variable), xp_nofault_PIOR_target));
+
+       local_irq_restore(irq_flags);
+}
+
+static inline void
+xpc_cancel_partition_disengage_request(struct xpc_partition *part)
+{
+       unsigned long irq_flags;
+       AMO_t *amo = (AMO_t *) __va(part->remote_amos_page_pa +
+                               (XPC_DISENGAGE_REQUEST_AMO * sizeof(AMO_t)));
+
+
+       local_irq_save(irq_flags);
+
+       /* clear bit corresponding to our partid in remote partition's AMO */
+       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
+                                               ~(1UL << sn_partition_id));
+       /*
+        * We must always use the nofault function regardless of whether we
+        * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
+        * didn't, we'd never know that the other partition is down and would
+        * keep sending IPIs and AMOs to it until the heartbeat times out.
+        */
+       (void) xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->
+                               variable), xp_nofault_PIOR_target));
+
+       local_irq_restore(irq_flags);
+}
+
+static inline u64
+xpc_partition_engaged(u64 partid_mask)
+{
+       AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
+
+
+       /* return our partition's AMO variable ANDed with partid_mask */
+       return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
+                                                               partid_mask);
+}
+
+static inline u64
+xpc_partition_disengage_requested(u64 partid_mask)
+{
+       AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
+
+
+       /* return our partition's AMO variable ANDed with partid_mask */
+       return (FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_LOAD) &
+                                                               partid_mask);
+}
+
+static inline void
+xpc_clear_partition_engaged(u64 partid_mask)
+{
+       AMO_t *amo = xpc_vars->amos_page + XPC_ENGAGED_PARTITIONS_AMO;
+
+
+       /* clear bit(s) based on partid_mask in our partition's AMO */
+       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
+                                                               ~partid_mask);
+}
+
+static inline void
+xpc_clear_partition_disengage_request(u64 partid_mask)
+{
+       AMO_t *amo = xpc_vars->amos_page + XPC_DISENGAGE_REQUEST_AMO;
+
+
+       /* clear bit(s) based on partid_mask in our partition's AMO */
+       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_AND,
+                                                               ~partid_mask);
+}
+
+
+
+/*
+ * The following set of macros and inlines are used for the sending and
+ * receiving of IPIs (also known as IRQs). There are two flavors of IPIs,
+ * one that is associated with partition activity (SGI_XPC_ACTIVATE) and
+ * the other that is associated with channel activity (SGI_XPC_NOTIFY).
+ */
+
+static inline u64
+xpc_IPI_receive(AMO_t *amo)
+{
+       return FETCHOP_LOAD_OP(TO_AMO((u64) &amo->variable), FETCHOP_CLEAR);
+}
+
+
+static inline enum xpc_retval
+xpc_IPI_send(AMO_t *amo, u64 flag, int nasid, int phys_cpuid, int vector)
+{
+       int ret = 0;
+       unsigned long irq_flags;
+
+
+       local_irq_save(irq_flags);
+
+       FETCHOP_STORE_OP(TO_AMO((u64) &amo->variable), FETCHOP_OR, flag);
+       sn_send_IPI_phys(nasid, phys_cpuid, vector, 0);
+
+       /*
+        * We must always use the nofault function regardless of whether we
+        * are on a Shub 1.1 system or a Shub 1.2 slice 0xc processor. If we
+        * didn't, we'd never know that the other partition is down and would
+        * keep sending IPIs and AMOs to it until the heartbeat times out.
+        */
+       ret = xp_nofault_PIOR((u64 *) GLOBAL_MMR_ADDR(NASID_GET(&amo->variable),
+                               xp_nofault_PIOR_target));
+
+       local_irq_restore(irq_flags);
+
+       return ((ret == 0) ? xpcSuccess : xpcPioReadError);
+}
+
+
+/*
+ * IPIs associated with SGI_XPC_ACTIVATE IRQ.
+ */
+
+/*
+ * Flag the appropriate AMO variable and send an IPI to the specified node.
+ */
+static inline void
+xpc_activate_IRQ_send(u64 amos_page_pa, int from_nasid, int to_nasid,
+                       int to_phys_cpuid)
+{
+       int w_index = XPC_NASID_W_INDEX(from_nasid);
+       int b_index = XPC_NASID_B_INDEX(from_nasid);
+       AMO_t *amos = (AMO_t *) __va(amos_page_pa +
+                               (XPC_ACTIVATE_IRQ_AMOS * sizeof(AMO_t)));
+
+
+       (void) xpc_IPI_send(&amos[w_index], (1UL << b_index), to_nasid,
+                               to_phys_cpuid, SGI_XPC_ACTIVATE);
+}
+
+static inline void
+xpc_IPI_send_activate(struct xpc_vars *vars)
+{
+       xpc_activate_IRQ_send(vars->amos_page_pa, cnodeid_to_nasid(0),
+                               vars->act_nasid, vars->act_phys_cpuid);
+}
+
+static inline void
+xpc_IPI_send_activated(struct xpc_partition *part)
+{
+       xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
+                       part->remote_act_nasid, part->remote_act_phys_cpuid);
+}
+
+static inline void
+xpc_IPI_send_reactivate(struct xpc_partition *part)
+{
+       xpc_activate_IRQ_send(xpc_vars->amos_page_pa, part->reactivate_nasid,
+                               xpc_vars->act_nasid, xpc_vars->act_phys_cpuid);
+}
+
+static inline void
+xpc_IPI_send_disengage(struct xpc_partition *part)
+{
+       xpc_activate_IRQ_send(part->remote_amos_page_pa, cnodeid_to_nasid(0),
+                       part->remote_act_nasid, part->remote_act_phys_cpuid);
+}
+
+
+/*
+ * IPIs associated with SGI_XPC_NOTIFY IRQ.
+ */
+
+/*
+ * Send an IPI to the remote partition that is associated with the
+ * specified channel.
+ */
+#define XPC_NOTIFY_IRQ_SEND(_ch, _ipi_f, _irq_f) \
+               xpc_notify_IRQ_send(_ch, _ipi_f, #_ipi_f, _irq_f)
+
+static inline void
+xpc_notify_IRQ_send(struct xpc_channel *ch, u8 ipi_flag, char *ipi_flag_string,
+                       unsigned long *irq_flags)
+{
+       struct xpc_partition *part = &xpc_partitions[ch->partid];
+       enum xpc_retval ret;
+
+
+       if (likely(part->act_state != XPC_P_DEACTIVATING)) {
+               ret = xpc_IPI_send(part->remote_IPI_amo_va,
+                                       (u64) ipi_flag << (ch->number * 8),
+                                       part->remote_IPI_nasid,
+                                       part->remote_IPI_phys_cpuid,
+                                       SGI_XPC_NOTIFY);
+               dev_dbg(xpc_chan, "%s sent to partid=%d, channel=%d, ret=%d\n",
+                       ipi_flag_string, ch->partid, ch->number, ret);
+               if (unlikely(ret != xpcSuccess)) {
+                       if (irq_flags != NULL) {
+                               spin_unlock_irqrestore(&ch->lock, *irq_flags);
+                       }
+                       XPC_DEACTIVATE_PARTITION(part, ret);
+                       if (irq_flags != NULL) {
+                               spin_lock_irqsave(&ch->lock, *irq_flags);
+                       }
+               }
+       }
+}
+
+
+/*
+ * Make it look like the remote partition, which is associated with the
+ * specified channel, sent us an IPI. This faked IPI will be handled
+ * by xpc_dropped_IPI_check().
+ */
+#define XPC_NOTIFY_IRQ_SEND_LOCAL(_ch, _ipi_f) \
+               xpc_notify_IRQ_send_local(_ch, _ipi_f, #_ipi_f)
+
+static inline void
+xpc_notify_IRQ_send_local(struct xpc_channel *ch, u8 ipi_flag,
+                               char *ipi_flag_string)
+{
+       struct xpc_partition *part = &xpc_partitions[ch->partid];
+
+
+       FETCHOP_STORE_OP(TO_AMO((u64) &part->local_IPI_amo_va->variable),
+                       FETCHOP_OR, ((u64) ipi_flag << (ch->number * 8)));
+       dev_dbg(xpc_chan, "%s sent local from partid=%d, channel=%d\n",
+               ipi_flag_string, ch->partid, ch->number);
+}
+
+
+/*
+ * The sending and receiving of IPIs includes the setting of an AMO variable
+ * to indicate the reason the IPI was sent. The 64-bit variable is divided
+ * up into eight bytes, ordered from right to left. Byte zero pertains to
+ * channel 0, byte one to channel 1, and so on. Each byte is described by
+ * the following IPI flags.
+ */
+
+#define        XPC_IPI_CLOSEREQUEST    0x01
+#define        XPC_IPI_CLOSEREPLY      0x02
+#define        XPC_IPI_OPENREQUEST     0x04
+#define        XPC_IPI_OPENREPLY       0x08
+#define        XPC_IPI_MSGREQUEST      0x10
+
+
+/* given an AMO variable and a channel#, get its associated IPI flags */
+#define XPC_GET_IPI_FLAGS(_amo, _c)    ((u8) (((_amo) >> ((_c) * 8)) & 0xff))
+#define XPC_SET_IPI_FLAGS(_amo, _c, _f)        (_amo) |= ((u64) (_f) << ((_c) * 8))
+
+#define        XPC_ANY_OPENCLOSE_IPI_FLAGS_SET(_amo) ((_amo) & 0x0f0f0f0f0f0f0f0f)
+#define XPC_ANY_MSG_IPI_FLAGS_SET(_amo)       ((_amo) & 0x1010101010101010)
+
+
+static inline void
+xpc_IPI_send_closerequest(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+       struct xpc_openclose_args *args = ch->local_openclose_args;
+
+
+       args->reason = ch->reason;
+
+       XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREQUEST, irq_flags);
+}
+
+static inline void
+xpc_IPI_send_closereply(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+       XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_CLOSEREPLY, irq_flags);
+}
+
+static inline void
+xpc_IPI_send_openrequest(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+       struct xpc_openclose_args *args = ch->local_openclose_args;
+
+
+       args->msg_size = ch->msg_size;
+       args->local_nentries = ch->local_nentries;
+
+       XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREQUEST, irq_flags);
+}
+
+static inline void
+xpc_IPI_send_openreply(struct xpc_channel *ch, unsigned long *irq_flags)
+{
+       struct xpc_openclose_args *args = ch->local_openclose_args;
+
+
+       args->remote_nentries = ch->remote_nentries;
+       args->local_nentries = ch->local_nentries;
+       args->local_msgqueue_pa = __pa(ch->local_msgqueue);
+
+       XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_OPENREPLY, irq_flags);
+}
+
+static inline void
+xpc_IPI_send_msgrequest(struct xpc_channel *ch)
+{
+       XPC_NOTIFY_IRQ_SEND(ch, XPC_IPI_MSGREQUEST, NULL);
+}
+
+static inline void
+xpc_IPI_send_local_msgrequest(struct xpc_channel *ch)
+{
+       XPC_NOTIFY_IRQ_SEND_LOCAL(ch, XPC_IPI_MSGREQUEST);
+}
+
+
+/*
+ * Memory for XPC's AMO variables is allocated by the MSPEC driver. These
+ * pages are located in the lowest granule. The lowest granule uses 4k pages
+ * for cached references and an alternate TLB handler to never provide a
+ * cacheable mapping for the entire region. This will prevent speculative
+ * reading of cached copies of our lines from being issued which will cause
+ * a PI FSB Protocol error to be generated by the SHUB. For XPC, we need 64
+ * AMO variables (based on XP_MAX_PARTITIONS) for message notification and an
+ * additional 128 AMO variables (based on XP_NASID_MASK_WORDS) for partition
+ * activation and 2 AMO variables for partition deactivation.
+ */
+static inline AMO_t *
+xpc_IPI_init(int index)
+{
+       AMO_t *amo = xpc_vars->amos_page + index;
+
+
+       (void) xpc_IPI_receive(amo);    /* clear AMO variable */
+       return amo;
+}
+
+
+
+static inline enum xpc_retval
+xpc_map_bte_errors(bte_result_t error)
+{
+       switch (error) {
+       case BTE_SUCCESS:       return xpcSuccess;
+       case BTEFAIL_DIR:       return xpcBteDirectoryError;
+       case BTEFAIL_POISON:    return xpcBtePoisonError;
+       case BTEFAIL_WERR:      return xpcBteWriteError;
+       case BTEFAIL_ACCESS:    return xpcBteAccessError;
+       case BTEFAIL_PWERR:     return xpcBtePWriteError;
+       case BTEFAIL_PRERR:     return xpcBtePReadError;
+       case BTEFAIL_TOUT:      return xpcBteTimeOutError;
+       case BTEFAIL_XTERR:     return xpcBteXtalkError;
+       case BTEFAIL_NOTAVAIL:  return xpcBteNotAvailable;
+       default:                return xpcBteUnmappedError;
+       }
+}
+
+
+
+static inline void *
+xpc_kmalloc_cacheline_aligned(size_t size, gfp_t flags, void **base)
+{
+       /* see if kmalloc will give us cachline aligned memory by default */
+       *base = kmalloc(size, flags);
+       if (*base == NULL) {
+               return NULL;
+       }
+       if ((u64) *base == L1_CACHE_ALIGN((u64) *base)) {
+               return *base;
+       }
+       kfree(*base);
+
+       /* nope, we'll have to do it ourselves */
+       *base = kmalloc(size + L1_CACHE_BYTES, flags);
+       if (*base == NULL) {
+               return NULL;
+       }
+       return (void *) L1_CACHE_ALIGN((u64) *base);
+}
+
+
+/*
+ * Check to see if there is any channel activity to/from the specified
+ * partition.
+ */
+static inline void
+xpc_check_for_channel_activity(struct xpc_partition *part)
+{
+       u64 IPI_amo;
+       unsigned long irq_flags;
+
+
+       IPI_amo = xpc_IPI_receive(part->local_IPI_amo_va);
+       if (IPI_amo == 0) {
+               return;
+       }
+
+       spin_lock_irqsave(&part->IPI_lock, irq_flags);
+       part->local_IPI_amo |= IPI_amo;
+       spin_unlock_irqrestore(&part->IPI_lock, irq_flags);
+
+       dev_dbg(xpc_chan, "received IPI from partid=%d, IPI_amo=0x%lx\n",
+               XPC_PARTID(part), IPI_amo);
+
+       xpc_wakeup_channel_mgr(part);
+}
+
+
+#endif /* _ASM_IA64_SN_XPC_H */
+
index 635235fa1e32606c5955653bed40d0a77cb4899c..80c5a234e2599c90006de76e27b47f49b6197ade 100644 (file)
@@ -219,14 +219,14 @@ extern void ia64_load_extra (struct task_struct *task);
 
 #define IA64_HAS_EXTRA_STATE(t)                                                        \
        ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID)       \
-        || IS_IA32_PROCESS(ia64_task_regs(t)) || PERFMON_IS_SYSWIDE())
+        || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE())
 
 #define __switch_to(prev,next,last) do {                                                        \
        if (IA64_HAS_EXTRA_STATE(prev))                                                          \
                ia64_save_extra(prev);                                                           \
        if (IA64_HAS_EXTRA_STATE(next))                                                          \
                ia64_load_extra(next);                                                           \
-       ia64_psr(ia64_task_regs(next))->dfh = !ia64_is_local_fpu_owner(next);                    \
+       ia64_psr(task_pt_regs(next))->dfh = !ia64_is_local_fpu_owner(next);                      \
        (last) = ia64_switch_to((next));                                                         \
 } while (0)
 
@@ -238,8 +238,8 @@ extern void ia64_load_extra (struct task_struct *task);
  * the latest fph state from another CPU.  In other words: eager save, lazy restore.
  */
 # define switch_to(prev,next,last) do {                                                \
-       if (ia64_psr(ia64_task_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) {                             \
-               ia64_psr(ia64_task_regs(prev))->mfh = 0;                        \
+       if (ia64_psr(task_pt_regs(prev))->mfh && ia64_is_local_fpu_owner(prev)) {                               \
+               ia64_psr(task_pt_regs(prev))->mfh = 0;                  \
                (prev)->thread.flags |= IA64_THREAD_FPH_VALID;                  \
                __ia64_save_fpu((prev)->thread.fph);                            \
        }                                                                       \
@@ -279,6 +279,7 @@ extern void ia64_load_extra (struct task_struct *task);
 #define ia64_platform_is(x) (strcmp(x, platform_name) == 0)
 
 void cpu_idle_wait(void);
+void sched_cacheflush(void);
 
 #define arch_align_stack(x) (x)
 
index 171b2207bde4e70a9936a19a56899eca95962a6d..1d6518fe1f024ddcaf7a563904af38149dc10e8b 100644 (file)
@@ -57,11 +57,20 @@ struct thread_info {
 /* how to get the thread information struct from C */
 #define current_thread_info()  ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
 #define alloc_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define task_thread_info(tsk)  ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
 #else
 #define current_thread_info()  ((struct thread_info *) 0)
 #define alloc_thread_info(tsk) ((struct thread_info *) 0)
+#define task_thread_info(tsk)  ((struct thread_info *) 0)
 #endif
 #define free_thread_info(ti)   /* nothing */
+#define task_stack_page(tsk)   ((void *)(tsk))
+
+#define __HAVE_THREAD_FUNCTIONS
+#define setup_thread_stack(p, org) \
+       *task_thread_info(p) = *task_thread_info(org); \
+       task_thread_info(p)->task = (p);
+#define end_of_stack(p) (unsigned long *)((void *)(p) + IA64_RBS_OFFSET)
 
 #define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
 #define alloc_task_struct()    ((task_t *)__get_free_pages(GFP_KERNEL, KERNEL_STACK_SIZE_ORDER))
@@ -84,6 +93,7 @@ struct thread_info {
 #define TIF_POLLING_NRFLAG     16      /* true if poll_idle() is polling TIF_NEED_RESCHED */
 #define TIF_MEMDIE             17
 #define TIF_MCA_INIT           18      /* this task is processing MCA or INIT */
+#define TIF_DB_DISABLED                19      /* debug trap disabled for fsyscall */
 
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
@@ -91,9 +101,10 @@ struct thread_info {
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
-#define _TIF_SIGDELAYED        (1 << TIF_SIGDELAYED)
+#define _TIF_SIGDELAYED                (1 << TIF_SIGDELAYED)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_MCA_INIT          (1 << TIF_MCA_INIT)
+#define _TIF_DB_DISABLED       (1 << TIF_DB_DISABLED)
 
 /* "work to do on user-return" bits */
 #define TIF_ALLWORK_MASK       (_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SIGDELAYED)
index f7c330467e7e92247993d6b1fe3b1b3c7ed950a6..d8aae4da3978945adae91064c3ebbe8d0b29f01b 100644 (file)
@@ -55,7 +55,6 @@ void build_cpu_to_node_map(void);
        .max_interval           = 4,                    \
        .busy_factor            = 64,                   \
        .imbalance_pct          = 125,                  \
-       .cache_hot_time         = (10*1000000),         \
        .per_cpu_gain           = 100,                  \
        .cache_nice_tries       = 2,                    \
        .busy_idx               = 2,                    \
@@ -81,7 +80,6 @@ void build_cpu_to_node_map(void);
        .max_interval           = 8*(min(num_online_cpus(), 32)), \
        .busy_factor            = 64,                   \
        .imbalance_pct          = 125,                  \
-       .cache_hot_time         = (10*1000000),         \
        .cache_nice_tries       = 2,                    \
        .busy_idx               = 3,                    \
        .idle_idx               = 2,                    \
index 55cd7ecfde4370c6f34c1e88bef4df48e18b6643..0d058b2d844e468ac6d6ac448a553dfa1d752528 100644 (file)
@@ -163,6 +163,9 @@ extern void show_regs(struct pt_regs *);
 
 extern void withdraw_debug_trap(struct pt_regs *regs);
 
+#define task_pt_regs(task) \
+        ((struct pt_regs *)(task_stack_page(task) + THREAD_SIZE) - 1)
+
 #endif /* __KERNEL */
 
 #endif /* _ASM_M32R_PTRACE_H */
index dcf619a0a0b03285fd55976f2b0a1fc3ec6c7e49..06c12a037cba557aab39a27eb302adbdf97d67f7 100644 (file)
        last = __last; \
 } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 /* Interrupt Control */
 #if !defined(CONFIG_CHIP_M32102) && !defined(CONFIG_CHIP_M32104)
 #define local_irq_enable() \
index 0f589363f619cd6f26e08d668e0de604dd7cd6eb..22aff3222d22a3790b22791fcacd1cdc94baf72e 100644 (file)
@@ -110,8 +110,6 @@ static inline struct thread_info *current_thread_info(void)
 #endif
 
 #define free_thread_info(info) kfree(info)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
 
 #define TI_FLAG_FAULT_CODE_SHIFT       28
 
index 3ae5d8d55ba991536f450aebb2e35c6664ee60b7..a16fe4e5a28a4159edc4c0c08ffad39313c74fad 100644 (file)
@@ -274,7 +274,7 @@ struct CIA {
 #define ZTWO_VADDR(x) (((unsigned long)(x))+zTwoBase)
 
 #define CUSTOM_PHYSADDR     (0xdff000)
-#define custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR)))
+#define amiga_custom ((*(volatile struct CUSTOM *)(zTwoBase+CUSTOM_PHYSADDR)))
 
 #define CIAA_PHYSADDR    (0xbfe001)
 #define CIAB_PHYSADDR    (0xbfd000)
@@ -294,12 +294,12 @@ static inline void amifb_video_off(void)
 {
        if (amiga_chipset == CS_ECS || amiga_chipset == CS_AGA) {
                /* program Denise/Lisa for a higher maximum play rate */
-               custom.htotal = 113;        /* 31 kHz */
-               custom.vtotal = 223;        /* 70 Hz */
-               custom.beamcon0 = 0x4390;   /* HARDDIS, VAR{BEAM,VSY,HSY,CSY}EN */
+               amiga_custom.htotal = 113;        /* 31 kHz */
+               amiga_custom.vtotal = 223;        /* 70 Hz */
+               amiga_custom.beamcon0 = 0x4390;   /* HARDDIS, VAR{BEAM,VSY,HSY,CSY}EN */
                /* suspend the monitor */
-               custom.hsstrt = custom.hsstop = 116;
-               custom.vsstrt = custom.vsstop = 226;
+               amiga_custom.hsstrt = amiga_custom.hsstop = 116;
+               amiga_custom.vsstrt = amiga_custom.vsstop = 226;
                amiga_audio_min_period = 57;
        }
 }
index 2aff4cfbf7b3837013f4f3b94a177f52a41f2028..aa968d014bb6b9502a55160682a0b101a34f9f29 100644 (file)
 extern void amiga_do_irq(int irq, struct pt_regs *fp);
 extern void amiga_do_irq_list(int irq, struct pt_regs *fp);
 
-extern unsigned short amiga_intena_vals[];
-
 /* CIA interrupt control register bits */
 
 #define CIA_ICR_TA     0x01
index 78860c20db01b29260b000868a84ee6bdc771b30..17280ef719f561844a67fe4ffe233f108cdd7867 100644 (file)
@@ -25,7 +25,7 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
  * better 64-bit) boundary
  */
 
-extern unsigned int csum_partial_copy_from_user(const unsigned char *src,
+extern unsigned int csum_partial_copy_from_user(const unsigned char __user *src,
                                                unsigned char *dst,
                                                int len, int sum,
                                                int *csum_err);
index ab3dd33e23a116e81916a93cce09960981301940..2d8c0c9f794b3dfc314ba4d357ec63900473daac 100644 (file)
@@ -13,7 +13,7 @@
 /* Used for uploading DSP binary code */
 struct dsp56k_upload {
        int len;
-       char *bin;
+       char __user *bin;
 };
 
 /* For the DSP host flags */
index c6e708dd9f624fd14dcb298c4055442008b98e2e..63a05ed95c17f1fd833385364c1fab00c1bf9bab 100644 (file)
@@ -46,7 +46,7 @@ asmlinkage irqreturn_t floppy_hardint(int irq, void *dev_id,
 
 static int virtual_dma_count=0;
 static int virtual_dma_residue=0;
-static char *virtual_dma_addr=0;
+static char *virtual_dma_addr=NULL;
 static int virtual_dma_mode=0;
 static int doing_pdma=0;
 
index 728318bf7f0e996f25e9764ceb366ef2cd54c101..5e1c5826c83d179e3d4f3228a884f7673b476e21 100644 (file)
@@ -14,13 +14,4 @@ typedef struct {
 
 #define HARDIRQ_BITS   8
 
-/*
- * The hardirq mask has to be large enough to have
- * space for potentially all IRQ sources in the system
- * nesting on a single CPU:
- */
-#if (1 << HARDIRQ_BITS) < NR_IRQS
-# error HARDIRQ_BITS is too low!
-#endif
-
 #endif
index 6bb8b0d8f99d10f5229cd0467ed229657a8aa113..dcfaa352d34c072fcee570059d5d29d62da4768b 100644 (file)
@@ -24,6 +24,7 @@
 #ifdef __KERNEL__
 
 #include <linux/config.h>
+#include <linux/compiler.h>
 #include <asm/raw_io.h>
 #include <asm/virtconvert.h>
 
@@ -120,68 +121,68 @@ extern int isa_sex;
  * be compiled in so the case statement will be optimised away
  */
 
-static inline u8 *isa_itb(unsigned long addr)
+static inline u8 __iomem *isa_itb(unsigned long addr)
 {
   switch(ISA_TYPE)
     {
 #ifdef CONFIG_Q40
-    case Q40_ISA: return (u8 *)Q40_ISA_IO_B(addr);
+    case Q40_ISA: return (u8 __iomem *)Q40_ISA_IO_B(addr);
 #endif
 #ifdef CONFIG_GG2
-    case GG2_ISA: return (u8 *)GG2_ISA_IO_B(addr);
+    case GG2_ISA: return (u8 __iomem *)GG2_ISA_IO_B(addr);
 #endif
 #ifdef CONFIG_AMIGA_PCMCIA
-    case AG_ISA: return (u8 *)AG_ISA_IO_B(addr);
+    case AG_ISA: return (u8 __iomem *)AG_ISA_IO_B(addr);
 #endif
-    default: return 0; /* avoid warnings, just in case */
+    default: return NULL; /* avoid warnings, just in case */
     }
 }
-static inline u16 *isa_itw(unsigned long addr)
+static inline u16 __iomem *isa_itw(unsigned long addr)
 {
   switch(ISA_TYPE)
     {
 #ifdef CONFIG_Q40
-    case Q40_ISA: return (u16 *)Q40_ISA_IO_W(addr);
+    case Q40_ISA: return (u16 __iomem *)Q40_ISA_IO_W(addr);
 #endif
 #ifdef CONFIG_GG2
-    case GG2_ISA: return (u16 *)GG2_ISA_IO_W(addr);
+    case GG2_ISA: return (u16 __iomem *)GG2_ISA_IO_W(addr);
 #endif
 #ifdef CONFIG_AMIGA_PCMCIA
-    case AG_ISA: return (u16 *)AG_ISA_IO_W(addr);
+    case AG_ISA: return (u16 __iomem *)AG_ISA_IO_W(addr);
 #endif
-    default: return 0; /* avoid warnings, just in case */
+    default: return NULL; /* avoid warnings, just in case */
     }
 }
-static inline u8 *isa_mtb(unsigned long addr)
+static inline u8 __iomem *isa_mtb(unsigned long addr)
 {
   switch(ISA_TYPE)
     {
 #ifdef CONFIG_Q40
-    case Q40_ISA: return (u8 *)Q40_ISA_MEM_B(addr);
+    case Q40_ISA: return (u8 __iomem *)Q40_ISA_MEM_B(addr);
 #endif
 #ifdef CONFIG_GG2
-    case GG2_ISA: return (u8 *)GG2_ISA_MEM_B(addr);
+    case GG2_ISA: return (u8 __iomem *)GG2_ISA_MEM_B(addr);
 #endif
 #ifdef CONFIG_AMIGA_PCMCIA
-    case AG_ISA: return (u8 *)addr;
+    case AG_ISA: return (u8 __iomem *)addr;
 #endif
-    default: return 0; /* avoid warnings, just in case */
+    default: return NULL; /* avoid warnings, just in case */
     }
 }
-static inline u16 *isa_mtw(unsigned long addr)
+static inline u16 __iomem *isa_mtw(unsigned long addr)
 {
   switch(ISA_TYPE)
     {
 #ifdef CONFIG_Q40
-    case Q40_ISA: return (u16 *)Q40_ISA_MEM_W(addr);
+    case Q40_ISA: return (u16 __iomem *)Q40_ISA_MEM_W(addr);
 #endif
 #ifdef CONFIG_GG2
-    case GG2_ISA: return (u16 *)GG2_ISA_MEM_W(addr);
+    case GG2_ISA: return (u16 __iomem *)GG2_ISA_MEM_W(addr);
 #endif
 #ifdef CONFIG_AMIGA_PCMCIA
-    case AG_ISA: return (u16 *)addr;
+    case AG_ISA: return (u16 __iomem *)addr;
 #endif
-    default: return 0; /* avoid warnings, just in case */
+    default: return NULL; /* avoid warnings, just in case */
     }
 }
 
@@ -326,20 +327,20 @@ static inline void isa_delay(void)
 
 #define mmiowb()
 
-static inline void *ioremap(unsigned long physaddr, unsigned long size)
+static inline void __iomem *ioremap(unsigned long physaddr, unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
 }
-static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
+static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
 }
-static inline void *ioremap_writethrough(unsigned long physaddr,
+static inline void __iomem *ioremap_writethrough(unsigned long physaddr,
                                         unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
 }
-static inline void *ioremap_fullcache(unsigned long physaddr,
+static inline void __iomem *ioremap_fullcache(unsigned long physaddr,
                                      unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
index 127ad190cf2d6e895dd8c2d4990c750a1b6b94bd..325c86f8512d08b50e9dd9e3ed049c1bd975b93a 100644 (file)
 #define NR_IRQS (24+SYS_IRQS)
 #endif
 
+/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially all IRQ sources in the system
+ * nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+
 /*
  * Interrupt source definitions
  * General interrupt sources are the level 1-7.
index a0dd5c47002c9ef43705e80b09344318382e4db3..7d3fee342369e7c169bd7af227ffe2118da36044 100644 (file)
@@ -34,7 +34,6 @@ extern void (*mach_power_off)( void );
 extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
 extern void (*mach_hd_setup)(char *, int *);
 extern long mach_max_dma_address;
-extern void (*mach_floppy_setup)(char *, int *);
 extern void (*mach_heartbeat) (int);
 extern void (*mach_l2_flush) (int);
 extern void (*mach_beep) (unsigned int, unsigned int);
index 041f0a87b25daae492af9f50976bff1948a7a850..5439bcaa57c6476f39b24b61d66f52611e6873db 100644 (file)
@@ -19,9 +19,9 @@
 #define IOMAP_NOCACHE_NONSER           2
 #define IOMAP_WRITETHROUGH             3
 
-extern void iounmap(void *addr);
+extern void iounmap(void __iomem *addr);
 
-extern void *__ioremap(unsigned long physaddr, unsigned long size,
+extern void __iomem *__ioremap(unsigned long physaddr, unsigned long size,
                       int cacheflag);
 extern void __iounmap(void *addr, unsigned long size);
 
@@ -30,21 +30,21 @@ extern void __iounmap(void *addr, unsigned long size);
  * two accesses to memory, which may be undesirable for some devices.
  */
 #define in_8(addr) \
-    ({ u8 __v = (*(volatile u8 *) (addr)); __v; })
+    ({ u8 __v = (*(__force volatile u8 *) (addr)); __v; })
 #define in_be16(addr) \
-    ({ u16 __v = (*(volatile u16 *) (addr)); __v; })
+    ({ u16 __v = (*(__force volatile u16 *) (addr)); __v; })
 #define in_be32(addr) \
-    ({ u32 __v = (*(volatile u32 *) (addr)); __v; })
+    ({ u32 __v = (*(__force volatile u32 *) (addr)); __v; })
 #define in_le16(addr) \
-    ({ u16 __v = le16_to_cpu(*(volatile u16 *) (addr)); __v; })
+    ({ u16 __v = le16_to_cpu(*(__force volatile u16 *) (addr)); __v; })
 #define in_le32(addr) \
-    ({ u32 __v = le32_to_cpu(*(volatile u32 *) (addr)); __v; })
+    ({ u32 __v = le32_to_cpu(*(__force volatile u32 *) (addr)); __v; })
 
-#define out_8(addr,b) (void)((*(volatile u8 *) (addr)) = (b))
-#define out_be16(addr,w) (void)((*(volatile u16 *) (addr)) = (w))
-#define out_be32(addr,l) (void)((*(volatile u32 *) (addr)) = (l))
-#define out_le16(addr,w) (void)((*(volatile u16 *) (addr)) = cpu_to_le16(w))
-#define out_le32(addr,l) (void)((*(volatile u32 *) (addr)) = cpu_to_le32(l))
+#define out_8(addr,b) (void)((*(__force volatile u8 *) (addr)) = (b))
+#define out_be16(addr,w) (void)((*(__force volatile u16 *) (addr)) = (w))
+#define out_be32(addr,l) (void)((*(__force volatile u32 *) (addr)) = (l))
+#define out_le16(addr,w) (void)((*(__force volatile u16 *) (addr)) = cpu_to_le16(w))
+#define out_le32(addr,l) (void)((*(__force volatile u32 *) (addr)) = cpu_to_le32(l))
 
 #define raw_inb in_8
 #define raw_inw in_be16
@@ -54,7 +54,7 @@ extern void __iounmap(void *addr, unsigned long size);
 #define raw_outw(val,port) out_be16((port),(val))
 #define raw_outl(val,port) out_be32((port),(val))
 
-static inline void raw_insb(volatile u8 *port, u8 *buf, unsigned int len)
+static inline void raw_insb(volatile u8 __iomem *port, u8 *buf, unsigned int len)
 {
        unsigned int i;
 
@@ -62,7 +62,7 @@ static inline void raw_insb(volatile u8 *port, u8 *buf, unsigned int len)
                *buf++ = in_8(port);
 }
 
-static inline void raw_outsb(volatile u8 *port, const u8 *buf,
+static inline void raw_outsb(volatile u8 __iomem *port, const u8 *buf,
                             unsigned int len)
 {
        unsigned int i;
@@ -71,7 +71,7 @@ static inline void raw_outsb(volatile u8 *port, const u8 *buf,
                out_8(port, *buf++);
 }
 
-static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr)
+static inline void raw_insw(volatile u16 __iomem *port, u16 *buf, unsigned int nr)
 {
        unsigned int tmp;
 
@@ -110,7 +110,7 @@ static inline void raw_insw(volatile u16 *port, u16 *buf, unsigned int nr)
        }
 }
 
-static inline void raw_outsw(volatile u16 *port, const u16 *buf,
+static inline void raw_outsw(volatile u16 __iomem *port, const u16 *buf,
                             unsigned int nr)
 {
        unsigned int tmp;
@@ -150,7 +150,7 @@ static inline void raw_outsw(volatile u16 *port, const u16 *buf,
        }
 }
 
-static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr)
+static inline void raw_insl(volatile u32 __iomem *port, u32 *buf, unsigned int nr)
 {
        unsigned int tmp;
 
@@ -189,7 +189,7 @@ static inline void raw_insl(volatile u32 *port, u32 *buf, unsigned int nr)
        }
 }
 
-static inline void raw_outsl(volatile u32 *port, const u32 *buf,
+static inline void raw_outsl(volatile u32 __iomem *port, const u32 *buf,
                             unsigned int nr)
 {
        unsigned int tmp;
@@ -230,7 +230,7 @@ static inline void raw_outsl(volatile u32 *port, const u32 *buf,
 }
 
 
-static inline void raw_insw_swapw(volatile u16 *port, u16 *buf,
+static inline void raw_insw_swapw(volatile u16 __iomem *port, u16 *buf,
                                  unsigned int nr)
 {
     if ((nr) % 8)
@@ -283,7 +283,7 @@ static inline void raw_insw_swapw(volatile u16 *port, u16 *buf,
                : "d0", "a0", "a1", "d6");
 }
 
-static inline void raw_outsw_swapw(volatile u16 *port, const u16 *buf,
+static inline void raw_outsw_swapw(volatile u16 __iomem *port, const u16 *buf,
                                   unsigned int nr)
 {
     if ((nr) % 8)
index a0cdf908237244d598f2a63104822ea5bba3bb51..b7b7ea20caaba7097438391491de756cad26bbc4 100644 (file)
@@ -144,7 +144,7 @@ struct sigaction {
 #endif /* __KERNEL__ */
 
 typedef struct sigaltstack {
-       void *ss_sp;
+       void __user *ss_sp;
        int ss_flags;
        size_t ss_size;
 } stack_t;
index e974bb072047b2fa9e0072f59da5ba729fb3aed5..5156a28a18d82a722fb9b92879d6c5c413cfeb90 100644 (file)
@@ -211,7 +211,7 @@ static inline unsigned long pte_to_pgoff(pte_t pte)
        return pte.pte & SUN3_PAGE_PGNUM_MASK;
 }
 
-static inline pte_t pgoff_to_pte(inline unsigned off)
+static inline pte_t pgoff_to_pte(unsigned off)
 {
        pte_t pte = { off + SUN3_PAGE_ACCESSED };
        return pte;
index fd838eb14213298e72087e2db892064449599705..bd038fccb64b8c7f6dbb85b545cff2375000074d 100644 (file)
@@ -31,7 +31,6 @@ int sun3_request_irq(unsigned int irq,
                    );
 extern void sun3_init_IRQ (void);
 extern irqreturn_t (*sun3_default_handler[]) (int, void *, struct pt_regs *);
-extern irqreturn_t (*sun3_inthandler[]) (int, void *, struct pt_regs *);
 extern void sun3_free_irq (unsigned int irq, void *dev_id);
 extern void sun3_enable_interrupts (void);
 extern void sun3_disable_interrupts (void);
index fda1eccf10aa0a27f56797487d5ec20cd1e3206a..98a9f79dab29b0c6288cae4fd10e2a9db0355c87 100644 (file)
@@ -208,7 +208,7 @@ static int sun3xflop_request_irq(void)
 
        if(!once) {
                once = 1;
-               error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, SA_INTERRUPT, "floppy", 0);
+               error = request_irq(FLOPPY_IRQ, sun3xflop_hardint, SA_INTERRUPT, "floppy", NULL);
                return ((error == 0) ? 0 : -1);
        } else return 0;
 }
@@ -238,7 +238,7 @@ static int sun3xflop_init(void)
        *sun3x_fdc.fcr_r = 0;
 
        /* Success... */
-       floppy_set_flags(0, 1, FD_BROKEN_DCL); // I don't know how to detect this.
+       floppy_set_flags(NULL, 1, FD_BROKEN_DCL); // I don't know how to detect this.
        allowed_drive_mask = 0x01;
        return (int) SUN3X_FDC;
 }
index 9532ca3c45cbea08906689234ad6a3e0f6224515..c4d622a57dfbf5d3bfb9913bfa257bc8c0ab58da 100644 (file)
@@ -37,6 +37,7 @@ struct thread_info {
 #define init_stack             (init_thread_union.stack)
 
 #define task_thread_info(tsk)  (&(tsk)->thread.info)
+#define task_stack_page(tsk)   ((void *)(tsk)->thread_info)
 #define current_thread_info()  task_thread_info(current)
 
 #define __HAVE_THREAD_FUNCTIONS
index f5cedf19cf682c51349cd4b0856162ad0d6a1bf0..2ffd87b0a7694894547e5672655339c3062af058 100644 (file)
@@ -42,6 +42,7 @@ struct exception_table_entry
 ({                                                     \
     int __pu_err;                                      \
     typeof(*(ptr)) __pu_val = (x);                     \
+    __chk_user_ptr(ptr);                               \
     switch (sizeof (*(ptr))) {                         \
     case 1:                                            \
        __put_user_asm(__pu_err, __pu_val, ptr, b);     \
@@ -91,6 +92,7 @@ __asm__ __volatile__                                  \
 ({                                                             \
     int __gu_err;                                              \
     typeof(*(ptr)) __gu_val;                                   \
+    __chk_user_ptr(ptr);                                       \
     switch (sizeof(*(ptr))) {                                  \
     case 1:                                                    \
        __get_user_asm(__gu_err, __gu_val, ptr, b, "=d");       \
@@ -105,7 +107,7 @@ __asm__ __volatile__                                        \
         __gu_err = __constant_copy_from_user(&__gu_val, ptr, 8);  \
         break;                                                  \
     default:                                                   \
-       __gu_val = 0;                                           \
+       __gu_val = (typeof(*(ptr)))0;                           \
        __gu_err = __get_user_bad();                            \
        break;                                                  \
     }                                                          \
@@ -134,7 +136,7 @@ __asm__ __volatile__                                \
      : "m"(*(ptr)), "i" (-EFAULT), "0"(0))
 
 static inline unsigned long
-__generic_copy_from_user(void *to, const void *from, unsigned long n)
+__generic_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
     unsigned long tmp;
     __asm__ __volatile__
@@ -189,7 +191,7 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n)
 }
 
 static inline unsigned long
-__generic_copy_to_user(void *to, const void *from, unsigned long n)
+__generic_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
     unsigned long tmp;
     __asm__ __volatile__
@@ -264,7 +266,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n)
         : "d0", "memory")
 
 static inline unsigned long
-__constant_copy_from_user(void *to, const void *from, unsigned long n)
+__constant_copy_from_user(void *to, const void __user *from, unsigned long n)
 {
     switch (n) {
     case 0:
@@ -520,7 +522,7 @@ __constant_copy_from_user(void *to, const void *from, unsigned long n)
 #define __copy_from_user_inatomic __copy_from_user
 
 static inline unsigned long
-__constant_copy_to_user(void *to, const void *from, unsigned long n)
+__constant_copy_to_user(void __user *to, const void *from, unsigned long n)
 {
     switch (n) {
     case 0:
@@ -766,7 +768,7 @@ __constant_copy_to_user(void *to, const void *from, unsigned long n)
  */
 
 static inline long
-strncpy_from_user(char *dst, const char *src, long count)
+strncpy_from_user(char *dst, const char __user *src, long count)
 {
     long res;
     if (count == 0) return count;
@@ -799,11 +801,11 @@ strncpy_from_user(char *dst, const char *src, long count)
  *
  * Return 0 on exception, a value greater than N if too long
  */
-static inline long strnlen_user(const char *src, long n)
+static inline long strnlen_user(const char __user *src, long n)
 {
        long res;
 
-       res = -(long)src;
+       res = -(unsigned long)src;
        __asm__ __volatile__
                ("1:\n"
                 "   tstl %2\n"
@@ -842,7 +844,7 @@ static inline long strnlen_user(const char *src, long n)
  */
 
 static inline unsigned long
-clear_user(void *to, unsigned long n)
+clear_user(void __user *to, unsigned long n)
 {
     __asm__ __volatile__
        ("   tstl %1\n"
index cf816588bedb9300fcd968aa5b1bf83c6ffbd4ec..5ce97c22b582e316aac672db4887ee8486aa239d 100644 (file)
 #define z_memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
 #define z_memcpy_toio(a,b,c)   memcpy((void *)(a),(b),(c))
 
-static inline void *z_remap_nocache_ser(unsigned long physaddr,
+static inline void __iomem *z_remap_nocache_ser(unsigned long physaddr,
                                        unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
 }
 
-static inline void *z_remap_nocache_nonser(unsigned long physaddr,
+static inline void __iomem *z_remap_nocache_nonser(unsigned long physaddr,
                                           unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_NOCACHE_NONSER);
 }
 
-static inline void *z_remap_writethrough(unsigned long physaddr,
+static inline void __iomem *z_remap_writethrough(unsigned long physaddr,
                                         unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
 }
-static inline void *z_remap_fullcache(unsigned long physaddr,
+static inline void __iomem *z_remap_fullcache(unsigned long physaddr,
                                      unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_FULL_CACHING);
index 5a9f9c297f79f1c122c6c83b41f11e967017dbf6..27c90afd3339cfc29054635bcf301dfe180e259e 100644 (file)
@@ -38,7 +38,6 @@ extern void (*mach_power_off)( void );
 extern unsigned long (*mach_hd_init) (unsigned long, unsigned long);
 extern void (*mach_hd_setup)(char *, int *);
 extern long mach_max_dma_address;
-extern void (*mach_floppy_setup)(char *, int *);
 extern void (*mach_floppy_eject)(void);
 extern void (*mach_heartbeat) (int);
 extern void (*mach_l2_flush) (int);
index 7b9a3fa3af5d1bd45809958c8b809eb474ea6bbb..b8f009edf2b2cf5a2c88c756a7e3151f68a8e6e9 100644 (file)
@@ -75,8 +75,6 @@ static inline struct thread_info *current_thread_info(void)
 #define alloc_thread_info(tsk) ((struct thread_info *) \
                                __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER))
 #define free_thread_info(ti)   free_pages((unsigned long) (ti), THREAD_SIZE_ORDER)
-#define get_thread_info(ti)    get_task_struct((ti)->task)
-#define put_thread_info(ti)    put_task_struct((ti)->task)
 #endif /* __ASSEMBLY__ */
 
 #define        PREEMPT_ACTIVE  0x4000000
index 82141c711c338ad5a64c9cef3009b0e99205e208..59d26b52ba321af8775bfee6edcb9de3191f1252 100644 (file)
@@ -27,7 +27,6 @@ extern unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
        .max_interval           = 32,                   \
        .busy_factor            = 32,                   \
        .imbalance_pct          = 125,                  \
-       .cache_hot_time         = (10*1000),            \
        .cache_nice_tries       = 1,                    \
        .per_cpu_gain           = 100,                  \
        .flags                  = SD_LOAD_BALANCE       \
index de53055a62aea78884d5de09f52a4fa5829bc8cd..39d2bd50fecede26d52090657e08b5fe71e29f39 100644 (file)
@@ -200,11 +200,11 @@ extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long
 
 unsigned long get_wchan(struct task_struct *p);
 
-#define __PT_REG(reg) ((long)&((struct pt_regs *)0)->reg - sizeof(struct pt_regs))
-#define __KSTK_TOS(tsk) ((unsigned long)(tsk->thread_info) + THREAD_SIZE - 32)
-#define KSTK_EIP(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(cp0_epc)))
-#define KSTK_ESP(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(regs[29])))
-#define KSTK_STATUS(tsk) (*(unsigned long *)(__KSTK_TOS(tsk) + __PT_REG(cp0_status)))
+#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + THREAD_SIZE - 32)
+#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk) - 1)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->cp0_epc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[29])
+#define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status)
 
 #define cpu_relax()    barrier()
 
index 330c4e497af38d248d3ff5e6a8ff584040ffd6f1..e8e5d414337797bf9e20f0db7006ada9f7b6e533 100644 (file)
@@ -159,11 +159,21 @@ struct task_struct;
 do {                                                                   \
        if (cpu_has_dsp)                                                \
                __save_dsp(prev);                                       \
-       (last) = resume(prev, next, next->thread_info);                 \
+       (last) = resume(prev, next, task_thread_info(next));            \
        if (cpu_has_dsp)                                                \
                __restore_dsp(current);                                 \
 } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
 {
        __u32 retval;
index e6c24472e03fa64ffd95837137b0528e12083df5..1612b3fe10801471ee0f4f1c6d19fed91c1c3934 100644 (file)
@@ -97,8 +97,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
 #endif
 
 #define free_thread_info(info) kfree(info)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
 
 #endif /* !__ASSEMBLY__ */
 
index f3928d3a80cb01b2afd25569a59fd37e178a4576..a5a973c0c07f55f272d4fccd7c6587f516afff84 100644 (file)
@@ -49,6 +49,15 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
        (last) = _switch_to(prev, next);                        \
 } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
 
 
 /* interrupt control */
index 57bbb76cb6c1db66000b11f48bddc4309f991b18..ac32f140b83aa1135259d63d615eb4baf4eafdac 100644 (file)
@@ -43,9 +43,6 @@ struct thread_info {
 #define alloc_thread_info(tsk) ((struct thread_info *) \
                        __get_free_pages(GFP_KERNEL, THREAD_ORDER))
 #define free_thread_info(ti)    free_pages((unsigned long) (ti), THREAD_ORDER)
-#define get_thread_info(ti)     get_task_struct((ti)->task)
-#define put_thread_info(ti)     put_task_struct((ti)->task)
-
 
 /* how to get the thread information struct from C */
 #define current_thread_info()  ((struct thread_info *)mfctl(30))
index 248f9aec959c69a93f43a6bc15ceb94e3bd90bc0..147a38dcc766ed20129a497080bbcb42cd1026b2 100644 (file)
@@ -36,7 +36,7 @@ static __inline__ int atomic_add_return(int a, atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    lwarx   %0,0,%2         # atomic_add_return\n\
        add     %0,%1,%0\n"
        PPC405_ERR77(0,%2)
@@ -72,7 +72,7 @@ static __inline__ int atomic_sub_return(int a, atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    lwarx   %0,0,%2         # atomic_sub_return\n\
        subf    %0,%1,%0\n"
        PPC405_ERR77(0,%2)
@@ -106,7 +106,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    lwarx   %0,0,%1         # atomic_inc_return\n\
        addic   %0,%0,1\n"
        PPC405_ERR77(0,%1)
@@ -150,7 +150,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    lwarx   %0,0,%1         # atomic_dec_return\n\
        addic   %0,%0,-1\n"
        PPC405_ERR77(0,%1)
@@ -176,19 +176,19 @@ static __inline__ int atomic_dec_return(atomic_t *v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic_add_unless(v, a, u)                                                      \
-({                                                                                                                      \
-          int c, old;                                                                                   \
-          c = atomic_read(v);                                                                   \
-          for (;;) {                                                                                     \
-                          if (unlikely(c == (u)))                                               \
-                                          break;                                                                 \
-                          old = atomic_cmpxchg((v), c, c + (a));                 \
-                          if (likely(old == c))                                                   \
-                                          break;                                                                 \
-                          c = old;                                                                             \
-          }                                                                                                       \
-          c != (u);                                                                                       \
+#define atomic_add_unless(v, a, u)                     \
+({                                                     \
+       int c, old;                                     \
+       c = atomic_read(v);                             \
+       for (;;) {                                      \
+               if (unlikely(c == (u)))                 \
+                       break;                          \
+               old = atomic_cmpxchg((v), c, c + (a));  \
+               if (likely(old == c))                   \
+                       break;                          \
+               c = old;                                \
+       }                                               \
+       c != (u);                                       \
 })
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
@@ -204,7 +204,7 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
        int t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    lwarx   %0,0,%1         # atomic_dec_if_positive\n\
        addic.  %0,%0,-1\n\
        blt-    2f\n"
@@ -253,7 +253,7 @@ static __inline__ long atomic64_add_return(long a, atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    ldarx   %0,0,%2         # atomic64_add_return\n\
        add     %0,%1,%0\n\
        stdcx.  %0,0,%2 \n\
@@ -287,7 +287,7 @@ static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    ldarx   %0,0,%2         # atomic64_sub_return\n\
        subf    %0,%1,%0\n\
        stdcx.  %0,0,%2 \n\
@@ -319,7 +319,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    ldarx   %0,0,%1         # atomic64_inc_return\n\
        addic   %0,%0,1\n\
        stdcx.  %0,0,%1 \n\
@@ -361,7 +361,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    ldarx   %0,0,%1         # atomic64_dec_return\n\
        addic   %0,%0,-1\n\
        stdcx.  %0,0,%1\n\
@@ -386,7 +386,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
        long t;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    ldarx   %0,0,%1         # atomic64_dec_if_positive\n\
        addic.  %0,%0,-1\n\
        blt-    2f\n\
index 1996eaa8aeae9abe0644277c87e384809d378957..bf6941a810b85badf664f56343a59b50cbdec289 100644 (file)
@@ -112,7 +112,7 @@ static __inline__ int test_and_set_bit(unsigned long nr,
        unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:"   PPC_LLARX "%0,0,%3              # test_and_set_bit\n"
        "or     %1,%0,%2 \n"
        PPC405_ERR77(0,%3)
@@ -134,7 +134,7 @@ static __inline__ int test_and_clear_bit(unsigned long nr,
        unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:"   PPC_LLARX "%0,0,%3              # test_and_clear_bit\n"
        "andc   %1,%0,%2 \n"
        PPC405_ERR77(0,%3)
@@ -156,7 +156,7 @@ static __inline__ int test_and_change_bit(unsigned long nr,
        unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:"   PPC_LLARX "%0,0,%3              # test_and_change_bit\n"
        "xor    %1,%0,%2 \n"
        PPC405_ERR77(0,%3)
index ef6ead34a773c358aa45dc6e802df198ff4d4898..64210549f56b2b52cca83cddd84c489daec3c4e4 100644 (file)
@@ -19,6 +19,7 @@
 #define PPC_FEATURE_POWER5             0x00040000
 #define PPC_FEATURE_POWER5_PLUS                0x00020000
 #define PPC_FEATURE_CELL               0x00010000
+#define PPC_FEATURE_BOOKE              0x00008000
 
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
@@ -31,11 +32,11 @@ struct cpu_spec;
 typedef        void (*cpu_setup_t)(unsigned long offset, struct cpu_spec* spec);
 
 enum powerpc_oprofile_type {
-       INVALID = 0,
-       RS64 = 1,
-       POWER4 = 2,
-       G4 = 3,
-       BOOKE = 4,
+       PPC_OPROFILE_INVALID = 0,
+       PPC_OPROFILE_RS64 = 1,
+       PPC_OPROFILE_POWER4 = 2,
+       PPC_OPROFILE_G4 = 3,
+       PPC_OPROFILE_BOOKE = 4,
 };
 
 struct cpu_spec {
@@ -64,6 +65,9 @@ struct cpu_spec {
 
        /* Processor specific oprofile operations */
        enum powerpc_oprofile_type oprofile_type;
+
+       /* Name of processor class, for the ELF AT_PLATFORM entry */
+       char            *platform;
 };
 
 extern struct cpu_spec         *cur_cpu_spec;
index 45f2af6f89c47a209ac17d3207901b6260c404ee..94d228f9c6ac0a799e2b431f6464b5e124c1258e 100644 (file)
@@ -221,20 +221,18 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
    instruction set this cpu supports.  This could be done in userspace,
    but it's not easy, and we've already done it here.  */
 # define ELF_HWCAP     (cur_cpu_spec->cpu_user_features)
-#ifdef __powerpc64__
-# define ELF_PLAT_INIT(_r, load_addr)  do {    \
-       _r->gpr[2] = load_addr;                 \
-} while (0)
-#endif /* __powerpc64__ */
 
 /* This yields a string that ld.so will use to load implementation
    specific libraries for optimization.  This is more specific in
-   intent than poking at uname or /proc/cpuinfo.
+   intent than poking at uname or /proc/cpuinfo.  */
 
-   For the moment, we have only optimizations for the Intel generations,
-   but that could change... */
+#define ELF_PLATFORM   (cur_cpu_spec->platform)
 
-#define ELF_PLATFORM   (NULL)
+#ifdef __powerpc64__
+# define ELF_PLAT_INIT(_r, load_addr)  do {    \
+       _r->gpr[2] = load_addr;                 \
+} while (0)
+#endif /* __powerpc64__ */
 
 #ifdef __KERNEL__
 
index f0319d50b129e034cebbbd2da463d0e8a1242723..39e85f320a76e74d010e8ce76bcec130b16245a7 100644 (file)
@@ -11,7 +11,7 @@
 
 #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
   __asm__ __volatile ( \
-       SYNC_ON_SMP \
+       LWSYNC_ON_SMP \
 "1:    lwarx   %0,0,%2\n" \
        insn \
        PPC405_ERR77(0, %2) \
index da7af5a720e098adf14ad1ccf9b0817e9d429fde..38ca9ad6110d6640b37a1b0e06bd8ed50599abbc 100644 (file)
@@ -6,7 +6,10 @@
 
 #define H_Success      0
 #define H_Busy         1       /* Hardware busy -- retry later */
+#define H_Closed       2       /* Resource closed */
 #define H_Constrained  4       /* Resource request constrained to max allowed */
+#define H_InProgress   14      /* Kind of like busy */
+#define H_Continue     18      /* Returned from H_Join on success */
 #define H_LongBusyStartRange   9900  /* Start of long busy range */
 #define H_LongBusyOrder1msec   9900  /* Long busy, hint that 1msec is a good time to retry */
 #define H_LongBusyOrder10msec  9901  /* Long busy, hint that 10msec is a good time to retry */
 #define H_REGISTER_VTERM       0x154
 #define H_FREE_VTERM           0x158
 #define H_POLL_PENDING         0x1D8
+#define H_JOIN                 0x298
+#define H_ENABLE_CRQ           0x2B0
 
 #ifndef __ASSEMBLY__
 
index 8a8393e507747fd8e3c089df81cc539782fb63cd..d5677cbec200fc748ee9802f969760c7f4510707 100644 (file)
@@ -64,25 +64,6 @@ extern void iommu_free_table(struct device_node *dn);
 
 #endif /* CONFIG_PPC_MULTIPLATFORM */
 
-#ifdef CONFIG_PPC_PSERIES
-
-/* Creates table for an individual device node */
-extern void iommu_devnode_init_pSeries(struct device_node *dn);
-
-#endif /* CONFIG_PPC_PSERIES */
-
-#ifdef CONFIG_PPC_ISERIES
-
-/* Creates table for an individual device node */
-extern void iommu_devnode_init_iSeries(struct device_node *dn);
-/* Get table parameters from HV */
-extern void iommu_table_getparms_iSeries(unsigned long busno,
-                                        unsigned char slotno,
-                                        unsigned char virtbus,
-                                        struct iommu_table* tbl);
-
-#endif /* CONFIG_PPC_ISERIES */
-
 /* Initializes an iommu_table based in values set in the passed-in
  * structure
  */
index e9f831c9a5e507111bfd87362410cbe7835c5d3c..162d653ad51fc430d692d4436707eeecb6149af7 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * HvCall.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -15,8 +14,7 @@
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-/*
+ *
  * This file contains the "hypervisor call" interface which is used to
  * drive the hypervisor from the OS.
  */
index 46763a30590acef0ed4c8cac0b29a1d0d5651749..4cec4762076dffa99ce78ddc86401f2721af0ede 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * HvCallEvent.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -15,8 +14,7 @@
  * You should have received a copy of the GNU General Public License
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
- */
-/*
+ *
  * This file contains the "hypervisor call" interface which is used to
  * drive the hypervisor from the OS.
  */
@@ -33,32 +31,9 @@ typedef u8 HvLpEvent_Type;
 typedef u8 HvLpEvent_AckInd;
 typedef u8 HvLpEvent_AckType;
 
-struct HvCallEvent_PackedParms {
-       u8              xAckType:1;
-       u8              xAckInd:1;
-       u8              xRsvd:1;
-       u8              xTargetLp:5;
-       u8              xType;
-       u16             xSubtype;
-       HvLpInstanceId  xSourceInstId;
-       HvLpInstanceId  xTargetInstId;
-};
-
 typedef u8 HvLpDma_Direction;
 typedef u8 HvLpDma_AddressType;
 
-struct HvCallEvent_PackedDmaParms {
-       u8              xDirection:1;
-       u8              xLocalAddrType:1;
-       u8              xRemoteAddrType:1;
-       u8              xRsvd1:5;
-       HvLpIndex       xRemoteLp;
-       u8              xType;
-       u8              xRsvd2;
-       HvLpInstanceId  xLocalInstId;
-       HvLpInstanceId  xRemoteInstId;
-};
-
 typedef u64 HvLpEvent_Rc;
 typedef u64 HvLpDma_Rc;
 
@@ -92,11 +67,8 @@ static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex)
 static inline void HvCallEvent_setLpEventStack(u8 queueIndex,
                char *eventStackAddr, u32 eventStackSize)
 {
-       u64 abs_addr;
-
-       abs_addr = virt_to_abs(eventStackAddr);
-       HvCall3(HvCallEventSetLpEventStack, queueIndex, abs_addr,
-                       eventStackSize);
+       HvCall3(HvCallEventSetLpEventStack, queueIndex,
+                       virt_to_abs(eventStackAddr), eventStackSize);
 }
 
 static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
@@ -108,14 +80,7 @@ static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
 
 static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event)
 {
-       u64 abs_addr;
-
-#ifdef DEBUG_SENDEVENT
-       printk("HvCallEvent_signalLpEvent: *event = %016lx\n ",
-                       (unsigned long)event);
-#endif
-       abs_addr = virt_to_abs(event);
-       return HvCall1(HvCallEventSignalLpEvent, abs_addr);
+       return HvCall1(HvCallEventSignalLpEvent, virt_to_abs(event));
 }
 
 static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
@@ -127,17 +92,21 @@ static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
 {
        /* Pack the misc bits into a single Dword to pass to PLIC */
        union {
-               struct HvCallEvent_PackedParms  parms;
+               struct {
+                       u8              ack_and_target;
+                       u8              type;
+                       u16             subtype;
+                       HvLpInstanceId  src_inst;
+                       HvLpInstanceId  target_inst;
+               } parms;
                u64             dword;
        } packed;
-       packed.parms.xAckType   = ackType;
-       packed.parms.xAckInd    = ackInd;
-       packed.parms.xRsvd      = 0;
-       packed.parms.xTargetLp  = targetLp;
-       packed.parms.xType      = type;
-       packed.parms.xSubtype   = subtype;
-       packed.parms.xSourceInstId      = sourceInstanceId;
-       packed.parms.xTargetInstId      = targetInstanceId;
+
+       packed.parms.ack_and_target = (ackType << 7) | (ackInd << 6) | targetLp;
+       packed.parms.type = type;
+       packed.parms.subtype = subtype;
+       packed.parms.src_inst = sourceInstanceId;
+       packed.parms.target_inst = targetInstanceId;
 
        return HvCall7(HvCallEventSignalLpEventParms, packed.dword,
                        correlationToken, eventData1, eventData2,
@@ -146,18 +115,12 @@ static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
 
 static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event)
 {
-       u64 abs_addr;
-
-       abs_addr = virt_to_abs(event);
-       return HvCall1(HvCallEventAckLpEvent, abs_addr);
+       return HvCall1(HvCallEventAckLpEvent, virt_to_abs(event));
 }
 
 static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event)
 {
-       u64 abs_addr;
-
-       abs_addr = virt_to_abs(event);
-       return HvCall1(HvCallEventCancelLpEvent, abs_addr);
+       return HvCall1(HvCallEventCancelLpEvent, virt_to_abs(event));
 }
 
 static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(
@@ -195,59 +158,34 @@ static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
 {
        /* Pack the misc bits into a single Dword to pass to PLIC */
        union {
-               struct HvCallEvent_PackedDmaParms       parms;
+               struct {
+                       u8              flags;
+                       HvLpIndex       remote;
+                       u8              type;
+                       u8              reserved;
+                       HvLpInstanceId  local_inst;
+                       HvLpInstanceId  remote_inst;
+               } parms;
                u64             dword;
        } packed;
 
-       packed.parms.xDirection         = direction;
-       packed.parms.xLocalAddrType     = localAddressType;
-       packed.parms.xRemoteAddrType    = remoteAddressType;
-       packed.parms.xRsvd1             = 0;
-       packed.parms.xRemoteLp          = remoteLp;
-       packed.parms.xType              = type;
-       packed.parms.xRsvd2             = 0;
-       packed.parms.xLocalInstId       = localInstanceId;
-       packed.parms.xRemoteInstId      = remoteInstanceId;
+       packed.parms.flags = (direction << 7) |
+               (localAddressType << 6) | (remoteAddressType << 5);
+       packed.parms.remote = remoteLp;
+       packed.parms.type = type;
+       packed.parms.reserved = 0;
+       packed.parms.local_inst = localInstanceId;
+       packed.parms.remote_inst = remoteInstanceId;
 
        return HvCall4(HvCallEventDmaBufList, packed.dword, localBufList,
                        remoteBufList, transferLength);
 }
 
-static inline HvLpDma_Rc HvCallEvent_dmaSingle(HvLpEvent_Type type,
-               HvLpIndex remoteLp, HvLpDma_Direction direction,
-               HvLpInstanceId localInstanceId,
-               HvLpInstanceId remoteInstanceId,
-               HvLpDma_AddressType localAddressType,
-               HvLpDma_AddressType remoteAddressType,
-               u64 localAddrOrTce, u64 remoteAddrOrTce, u32 transferLength)
-{
-       /* Pack the misc bits into a single Dword to pass to PLIC */
-       union {
-               struct HvCallEvent_PackedDmaParms       parms;
-               u64             dword;
-       } packed;
-
-       packed.parms.xDirection         = direction;
-       packed.parms.xLocalAddrType     = localAddressType;
-       packed.parms.xRemoteAddrType    = remoteAddressType;
-       packed.parms.xRsvd1             = 0;
-       packed.parms.xRemoteLp          = remoteLp;
-       packed.parms.xType              = type;
-       packed.parms.xRsvd2             = 0;
-       packed.parms.xLocalInstId       = localInstanceId;
-       packed.parms.xRemoteInstId      = remoteInstanceId;
-
-       return (HvLpDma_Rc)HvCall4(HvCallEventDmaSingle, packed.dword,
-                       localAddrOrTce, remoteAddrOrTce, transferLength);
-}
-
 static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote,
                u32 length, HvLpDma_Direction dir)
 {
-       u64 abs_addr;
-
-       abs_addr = virt_to_abs(local);
-       return HvCall4(HvCallEventDmaToSp, abs_addr, remote, length, dir);
+       return HvCall4(HvCallEventDmaToSp, virt_to_abs(local), remote,
+                       length, dir);
 }
 
 #endif /* _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H */
index dec7e9d9ab7886c982f4fe97df5c301b025ec2d4..f5d2109592504cf319209241373ad91008755da5 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * HvCallSc.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
index bc00f036bca03d6f1abd9cb640964e6332da3b14..df8b20739719f87a80de2bf8a37a7fb3da25e4b5 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * HvLpConfig.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
index 499ab1ad01854ae9deefaa3bb2b38ea17ec46ab1..4065a4de4935f8ec86e19079dab71538542f53cf 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * HvLpEvent.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * partitions through PLIC.
  */
 
-struct HvEventFlags {
-       u8      xValid:1;       /* Indicates a valid request    x00-x00 */
-       u8      xRsvd1:4;       /* Reserved                     ... */
-       u8      xAckType:1;     /* Immediate or deferred        ... */
-       u8      xAckInd:1;      /* Indicates if ACK required    ... */
-       u8      xFunction:1;    /* Interrupt or Acknowledge     ... */
-};
-
-
 struct HvLpEvent {
-       struct HvEventFlags xFlags;     /* Event flags                x00-x00 */
+       u8      flags;                  /* Event flags                x00-x00 */
        u8      xType;                  /* Type of message            x01-x01 */
        u16     xSubtype;               /* Subtype for event          x02-x03 */
        u8      xSourceLp;              /* Source LP                  x04-x04 */
@@ -126,6 +116,11 @@ extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
 #define HvLpEvent_AckType_ImmediateAck 0
 #define HvLpEvent_AckType_DeferredAck 1
 
+#define HV_LP_EVENT_INT                        0x01
+#define HV_LP_EVENT_DO_ACK             0x02
+#define HV_LP_EVENT_DEFERRED_ACK       0x04
+#define HV_LP_EVENT_VALID              0x80
+
 #define HvLpDma_Direction_LocalToRemote 0
 #define HvLpDma_Direction_RemoteToLocal 1
 
@@ -139,4 +134,29 @@ extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
 #define HvLpDma_Rc_InvalidAddress 4
 #define HvLpDma_Rc_InvalidLength 5
 
+static inline int hvlpevent_is_valid(struct HvLpEvent *h)
+{
+       return h->flags & HV_LP_EVENT_VALID;
+}
+
+static inline void hvlpevent_invalidate(struct HvLpEvent *h)
+{
+       h->flags &= ~ HV_LP_EVENT_VALID;
+}
+
+static inline int hvlpevent_is_int(struct HvLpEvent *h)
+{
+       return h->flags & HV_LP_EVENT_INT;
+}
+
+static inline int hvlpevent_is_ack(struct HvLpEvent *h)
+{
+       return !hvlpevent_is_int(h);
+}
+
+static inline int hvlpevent_need_ack(struct HvLpEvent *h)
+{
+       return h->flags & HV_LP_EVENT_DO_ACK;
+}
+
 #endif /* _ASM_POWERPC_ISERIES_HV_LP_EVENT_H */
index c38f7e3d01dceab2c0ba4a67646200a2b775e0ae..c3e6d2a1d1c3d4b6535914b779953d76b3fbf401 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * HvTypes.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
index 56b2113ff0f5743fa8063067e9f818de6b5065e8..496aa852b6170f8738442ce7f9b699bef07c1883 100644 (file)
@@ -6,7 +6,7 @@
 #ifdef CONFIG_PPC_ISERIES
 #include <linux/types.h>
 /*
- * File iSeries_io.h created by Allan Trautman on Thu Dec 28 2000.
+ * Created by Allan Trautman on Thu Dec 28 2000.
  *
  * Remaps the io.h for the iSeries Io
  * Copyright (C) 2000  Allan H Trautman, IBM Corporation
@@ -32,6 +32,7 @@
  * End Change Activity
  */
 
+#ifdef CONFIG_PCI
 extern u8   iSeries_Read_Byte(const volatile void __iomem * IoAddress);
 extern u16  iSeries_Read_Word(const volatile void __iomem * IoAddress);
 extern u32  iSeries_Read_Long(const volatile void __iomem * IoAddress);
@@ -44,6 +45,17 @@ extern void iSeries_memcpy_toio(volatile void __iomem *dest, void *source,
                size_t n);
 extern void iSeries_memcpy_fromio(void *dest,
                const volatile void __iomem *source, size_t n);
+#else
+static inline u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
+{
+       return 0xff;
+}
+
+static inline void iSeries_Write_Byte(u8 IoData,
+               volatile void __iomem *IoAddress)
+{
+}
+#endif /* CONFIG_PCI */
 
 #endif /* CONFIG_PPC_ISERIES */
 #endif /* _ASM_POWERPC_ISERIES_ISERIES_IO_H */
index 66a17a230c529f1d795f7c5b64a5406dfa88bbaa..304a609ae21a979b7fa807fa97bdf615c38a9b9c 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * ItExtVpdPanel.h
  * Copyright (C) 2002  Dave Boutcher IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
index c3ef1de45d82e751488ab966f9267ceafd6f26d1..4fdcf052927fff690057ecb82460f5bb9c575160 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * ItLpNaca.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -37,17 +36,13 @@ struct ItLpNaca {
        u8      xLpIndex;               // LP Index                     x0B-x0B
        u16     xMaxLpQueues;           // Number of allocated queues   x0C-x0D
        u16     xLpQueueOffset;         // Offset to start of LP queues x0E-x0F
-       u8      xPirEnvironMode:8;      // Piranha or hardware          x10-x10
-       u8      xPirConsoleMode:8;      // Piranha console indicator    x11-x11
-       u8      xPirDasdMode:8;         // Piranha dasd indicator       x12-x12
+       u8      xPirEnvironMode;        // Piranha or hardware          x10-x10
+       u8      xPirConsoleMode;        // Piranha console indicator    x11-x11
+       u8      xPirDasdMode;           // Piranha dasd indicator       x12-x12
        u8      xRsvd1_0[5];            // Reserved for Piranha related x13-x17
-       u8      xLparInstalled:1;       // Is LPAR installed on system  x18-x1F
-       u8      xSysPartitioned:1;      // Is the system partitioned    ...
-       u8      xHwSyncedTBs:1;         // Hardware synced TBs          ...
-       u8      xIntProcUtilHmt:1;      // Utilize HMT for interrupts   ...
-       u8      xRsvd1_1:4;             // Reserved                     ...
-       u8      xSpVpdFormat:8;         // VPD areas are in CSP format  ...
-       u8      xIntProcRatio:8;        // Ratio of int procs to procs  ...
+       u8      flags;                  // flags, see below             x18-x1F
+       u8      xSpVpdFormat;           // VPD areas are in CSP format  ...
+       u8      xIntProcRatio;          // Ratio of int procs to procs  ...
        u8      xRsvd1_2[5];            // Reserved                     ...
        u16     xRsvd1_3;               // Reserved                     x20-x21
        u16     xPlicVrmIndex;          // VRM index of PLIC            x22-x23
@@ -77,4 +72,9 @@ struct ItLpNaca {
 
 extern struct ItLpNaca         itLpNaca;
 
+#define ITLPNACA_LPAR          0x80    /* Is LPAR installed on the system */
+#define ITLPNACA_PARTITIONED   0x40    /* Is the system partitioned */
+#define ITLPNACA_HWSYNCEDTBS   0x20    /* Hardware synced TBs */
+#define ITLPNACA_HMTINT                0x10    /* Utilize MHT for interrupts */
+
 #endif /* _ASM_POWERPC_ISERIES_IT_LP_NACA_H */
index a60d03afbf95be5b79f61f4d7b0607ae382afcf8..b7c6fc12cce22e0cb90da14178f9071693a82bc6 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * ItLpQueue.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
index 81824e1bb7675ddf70f797d47d0946ff721560bb..5403b756f654da7f0448264420bad2332a4d7ca7 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * ItLpRegSave.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
@@ -83,4 +82,4 @@ struct ItLpRegSave {
 
 extern struct ItLpRegSave iseries_reg_save[];
 
-#endif /* _ITLPREGSAVE_H */
+#endif /* _ASM_POWERPC_ISERIES_IT_LP_REG_SAVE_H */
index 84fc321615bff77949bf7ff6faf268c7fb08b6f1..2ec384d66abb90578e897df9b0fd42f8cc5c058e 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * LparMap.h
  * Copyright (C) 2001  Mike Corrigan IBM Corporation
  *
  * This program is free software; you can redistribute it and/or modify
index e7bd57a03fb17a20c38b7cb92debb5cec5eb126c..857e5202fc7847262ffa22c3f397181b68a20bc6 100644 (file)
@@ -1,5 +1,4 @@
 /*
- * mf.h
  * Copyright (C) 2001  Troy D. Armstrong IBM Corporation
  * Copyright (C) 2004  Stephen Rothwell IBM Corporation
  *
index 7e3a469420ddbd7e7592749bd7d7f54f4c3909d6..72a97d37aac35d52a5a7a7b43acdd6dc6bb47534 100644 (file)
@@ -1,5 +1,4 @@
 /* -*- linux-c -*-
- *  drivers/char/vio.h
  *
  *  iSeries Virtual I/O Message Path header
  *
index ff82ea7c48292bcfcf194f06d1a7c88e5459b370..cd9f11f1ef14b7ac8f44d2868f5a35b6c7548eca 100644 (file)
@@ -29,7 +29,9 @@
 //----------------------------------------------------------------------------
 #include <asm/types.h>
 
-struct lppaca {
+/* The Hypervisor barfs if the lppaca crosses a page boundary.  A 1k
+ * alignment is sufficient to prevent this */
+struct __attribute__((__aligned__(0x400))) lppaca {
 //=============================================================================
 // CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
 // NOTE: The xDynXyz fields are fields that will be dynamically changed by
@@ -129,5 +131,7 @@ struct lppaca {
        u8      pmc_save_area[256];     // PMC interrupt Area           x00-xFF
 };
 
+extern struct lppaca lppaca[];
+
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_LPPACA_H */
index a64b4d425dab3c4b6041422464da182ed3713009..c9add8f1ad94b1e30d6430de06ad7d1fba5a63b0 100644 (file)
@@ -23,6 +23,7 @@
 
 register struct paca_struct *local_paca asm("r13");
 #define get_paca()     local_paca
+#define get_lppaca()   (get_paca()->lppaca_ptr)
 
 struct task_struct;
 
@@ -95,19 +96,6 @@ struct paca_struct {
        u64 saved_r1;                   /* r1 save for RTAS calls */
        u64 saved_msr;                  /* MSR saved here by enter_rtas */
        u8 proc_enabled;                /* irq soft-enable flag */
-
-       /*
-        * iSeries structure which the hypervisor knows about -
-        * this structure should not cross a page boundary.
-        * The vpa_init/register_vpa call is now known to fail if the
-        * lppaca structure crosses a page boundary.
-        * The lppaca is also used on POWER5 pSeries boxes.
-        * The lppaca is 640 bytes long, and cannot readily change
-        * since the hypervisor knows its layout, so a 1kB
-        * alignment will suffice to ensure that it doesn't
-        * cross a page boundary.
-        */
-       struct lppaca lppaca __attribute__((__aligned__(0x400)));
 };
 
 extern struct paca_struct paca[];
index b0d816fe2e27ef3bf0069fa34547782bd19557ec..38de92d41a148a2cdfd1083af2bbe35f41dba3d0 100644 (file)
@@ -142,8 +142,6 @@ void pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus);
 
 extern int pcibios_remove_root_bus(struct pci_controller *phb);
 
-extern void phbs_remap_io(void);
-
 static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
 {
        struct device_node *busdn = bus->sysdata;
index 0dc798d46ea462bc33759f1637f708928c61bbc7..ab8688d39024763ca01cd626035330015a06fab8 100644 (file)
@@ -156,52 +156,56 @@ n:
 #endif
 
 /* 
- * LOADADDR( rn, name )
- *   loads the address of 'name' into 'rn'
+ * LOAD_REG_IMMEDIATE(rn, expr)
+ *   Loads the value of the constant expression 'expr' into register 'rn'
+ *   using immediate instructions only.  Use this when it's important not
+ *   to reference other data (i.e. on ppc64 when the TOC pointer is not
+ *   valid).
  *
- * LOADBASE( rn, name )
- *   loads the address (possibly without the low 16 bits) of 'name' into 'rn'
- *   suitable for base+disp addressing
+ * LOAD_REG_ADDR(rn, name)
+ *   Loads the address of label 'name' into register 'rn'.  Use this when
+ *   you don't particularly need immediate instructions only, but you need
+ *   the whole address in one register (e.g. it's a structure address and
+ *   you want to access various offsets within it).  On ppc32 this is
+ *   identical to LOAD_REG_IMMEDIATE.
+ *
+ * LOAD_REG_ADDRBASE(rn, name)
+ * ADDROFF(name)
+ *   LOAD_REG_ADDRBASE loads part of the address of label 'name' into
+ *   register 'rn'.  ADDROFF(name) returns the remainder of the address as
+ *   a constant expression.  ADDROFF(name) is a signed expression < 16 bits
+ *   in size, so is suitable for use directly as an offset in load and store
+ *   instructions.  Use this when loading/storing a single word or less as:
+ *      LOAD_REG_ADDRBASE(rX, name)
+ *      ld     rY,ADDROFF(name)(rX)
  */
 #ifdef __powerpc64__
-#define LOADADDR(rn,name) \
-       lis     rn,name##@highest;      \
-       ori     rn,rn,name##@higher;    \
-       rldicr  rn,rn,32,31;            \
-       oris    rn,rn,name##@h;         \
-       ori     rn,rn,name##@l
-
-#define LOADBASE(rn,name)              \
-       ld      rn,name@got(r2)
-
-#define OFF(name)      0
-
-#define SET_REG_TO_CONST(reg, value)                   \
-       lis     reg,(((value)>>48)&0xFFFF);             \
-       ori     reg,reg,(((value)>>32)&0xFFFF);         \
-       rldicr  reg,reg,32,31;                          \
-       oris    reg,reg,(((value)>>16)&0xFFFF);         \
-       ori     reg,reg,((value)&0xFFFF);
-
-#define SET_REG_TO_LABEL(reg, label)                   \
-       lis     reg,(label)@highest;                    \
-       ori     reg,reg,(label)@higher;                 \
-       rldicr  reg,reg,32,31;                          \
-       oris    reg,reg,(label)@h;                      \
-       ori     reg,reg,(label)@l;
+#define LOAD_REG_IMMEDIATE(reg,expr)           \
+       lis     (reg),(expr)@highest;           \
+       ori     (reg),(reg),(expr)@higher;      \
+       rldicr  (reg),(reg),32,31;              \
+       oris    (reg),(reg),(expr)@h;           \
+       ori     (reg),(reg),(expr)@l;
+
+#define LOAD_REG_ADDR(reg,name)                        \
+       ld      (reg),name@got(r2)
+
+#define LOAD_REG_ADDRBASE(reg,name)    LOAD_REG_ADDR(reg,name)
+#define ADDROFF(name)                  0
 
 /* offsets for stack frame layout */
 #define LRSAVE 16
 
 #else /* 32-bit */
-#define LOADADDR(rn,name) \
-       lis     rn,name@ha;     \
-       addi    rn,rn,name@l
 
-#define LOADBASE(rn,name)      \
-       lis     rn,name@ha
+#define LOAD_REG_IMMEDIATE(reg,expr)           \
+       lis     (reg),(expr)@ha;                \
+       addi    (reg),(reg),(expr)@l;
+
+#define LOAD_REG_ADDR(reg,name)                LOAD_REG_IMMEDIATE(reg, name)
 
-#define OFF(name)      name@l
+#define LOAD_REG_ADDRBASE(reg, name)   lis     (reg),name@ha
+#define ADDROFF(name)                  name@l
 
 /* offsets for stack frame layout */
 #define LRSAVE 4
index 329e9bf62260cd69c074430224b547855c70bd12..5b2bd4eefb01870e4a31647232331a1d10bbd962 100644 (file)
@@ -87,6 +87,7 @@ struct device_node {
        char    *full_name;
 
        struct  property *properties;
+       struct  property *deadprops; /* removed properties */
        struct  device_node *parent;
        struct  device_node *child;
        struct  device_node *sibling;
@@ -135,6 +136,9 @@ extern struct device_node *of_find_all_nodes(struct device_node *prev);
 extern struct device_node *of_get_parent(const struct device_node *node);
 extern struct device_node *of_get_next_child(const struct device_node *node,
                                             struct device_node *prev);
+extern struct property *of_find_property(struct device_node *np,
+                                        const char *name,
+                                        int *lenp);
 extern struct device_node *of_node_get(struct device_node *node);
 extern void of_node_put(struct device_node *node);
 
@@ -164,6 +168,10 @@ extern int prom_n_size_cells(struct device_node* np);
 extern int prom_n_intr_cells(struct device_node* np);
 extern void prom_get_irq_senses(unsigned char *senses, int off, int max);
 extern int prom_add_property(struct device_node* np, struct property* prop);
+extern int prom_remove_property(struct device_node *np, struct property *prop);
+extern int prom_update_property(struct device_node *np,
+                               struct property *newprop,
+                               struct property *oldprop);
 
 #ifdef CONFIG_PPC32
 /*
index 754900901cd8fbeb05673bdb086a6f41a1b92655..895cb6d3a42a9cd1436670338f7a9fe3c0d53ae6 100644 (file)
@@ -46,7 +46,7 @@ static __inline__ unsigned long __spin_trylock(raw_spinlock_t *lock)
 
        token = LOCK_TOKEN;
        __asm__ __volatile__(
-"1:    lwarx           %0,0,%2         # __spin_trylock\n\
+"1:    lwarx           %0,0,%2\n\
        cmpwi           0,%0,0\n\
        bne-            2f\n\
        stwcx.          %1,0,%2\n\
@@ -80,7 +80,7 @@ static int __inline__ __raw_spin_trylock(raw_spinlock_t *lock)
 
 #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES)
 /* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (get_paca()->lppaca.shared_proc)
+#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
 extern void __spin_yield(raw_spinlock_t *lock);
 extern void __rw_yield(raw_rwlock_t *lock);
 #else /* SPLPAR || ISERIES */
@@ -124,8 +124,8 @@ static void __inline__ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long
 
 static __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
 {
-       __asm__ __volatile__(SYNC_ON_SMP"       # __raw_spin_unlock"
-                            : : :"memory");
+       __asm__ __volatile__("# __raw_spin_unlock\n\t"
+                               LWSYNC_ON_SMP: : :"memory");
        lock->slock = 0;
 }
 
@@ -167,7 +167,7 @@ static long __inline__ __read_trylock(raw_rwlock_t *rw)
        long tmp;
 
        __asm__ __volatile__(
-"1:    lwarx           %0,0,%1         # read_trylock\n"
+"1:    lwarx           %0,0,%1\n"
        __DO_SIGN_EXTEND
 "      addic.          %0,%0,1\n\
        ble-            2f\n"
@@ -192,7 +192,7 @@ static __inline__ long __write_trylock(raw_rwlock_t *rw)
 
        token = WRLOCK_TOKEN;
        __asm__ __volatile__(
-"1:    lwarx           %0,0,%2 # write_trylock\n\
+"1:    lwarx           %0,0,%2\n\
        cmpwi           0,%0,0\n\
        bne-            2f\n"
        PPC405_ERR77(0,%1)
@@ -249,8 +249,9 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
        long tmp;
 
        __asm__ __volatile__(
-       "eieio                          # read_unlock\n\
-1:     lwarx           %0,0,%1\n\
+       "# read_unlock\n\t"
+       LWSYNC_ON_SMP
+"1:    lwarx           %0,0,%1\n\
        addic           %0,%0,-1\n"
        PPC405_ERR77(0,%1)
 "      stwcx.          %0,0,%1\n\
@@ -262,8 +263,8 @@ static void __inline__ __raw_read_unlock(raw_rwlock_t *rw)
 
 static __inline__ void __raw_write_unlock(raw_rwlock_t *rw)
 {
-       __asm__ __volatile__(SYNC_ON_SMP"       # write_unlock"
-                            : : :"memory");
+       __asm__ __volatile__("# write_unlock\n\t"
+                               LWSYNC_ON_SMP: : :"memory");
        rw->lock = 0;
 }
 
index 794870ab8fd3edfb802fa22cb2d513c94794de8b..c90d9d9aae720f6638e6896e0437442a1c65bb47 100644 (file)
@@ -2,6 +2,8 @@
 #define _ASM_POWERPC_SYNCH_H 
 #ifdef __KERNEL__
 
+#include <linux/stringify.h>
+
 #ifdef __powerpc64__
 #define __SUBARCH_HAS_LWSYNC
 #endif
 #    define LWSYNC     sync
 #endif
 
-
-/*
- * Arguably the bitops and *xchg operations don't imply any memory barrier
- * or SMP ordering, but in fact a lot of drivers expect them to imply
- * both, since they do on x86 cpus.
- */
 #ifdef CONFIG_SMP
-#define EIEIO_ON_SMP   "eieio\n"
 #define ISYNC_ON_SMP   "\n\tisync"
-#define SYNC_ON_SMP    __stringify(LWSYNC) "\n"
+#define LWSYNC_ON_SMP  __stringify(LWSYNC) "\n"
 #else
-#define EIEIO_ON_SMP
 #define ISYNC_ON_SMP
-#define SYNC_ON_SMP
+#define LWSYNC_ON_SMP
 #endif
 
 static inline void eieio(void)
@@ -38,14 +32,5 @@ static inline void isync(void)
        __asm__ __volatile__ ("isync" : : : "memory");
 }
 
-#ifdef CONFIG_SMP
-#define eieio_on_smp() eieio()
-#define isync_on_smp() isync()
-#else
-#define eieio_on_smp() __asm__ __volatile__("": : :"memory")
-#define isync_on_smp() __asm__ __volatile__("": : :"memory")
-#endif
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_SYNCH_H */
-
index 0c58e32a9570525ec4a2f036413d4cabfa69c10d..d9bf53653b10de8175fba20f16be1b9840a5f61a 100644 (file)
@@ -133,6 +133,14 @@ extern int fix_alignment(struct pt_regs *);
 extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
 extern void cvt_df(double *from, float *to, struct thread_struct *thread);
 
+#ifndef CONFIG_SMP
+extern void discard_lazy_cpu_state(void);
+#else
+static inline void discard_lazy_cpu_state(void)
+{
+}
+#endif
+
 #ifdef CONFIG_ALTIVEC
 extern void flush_altivec_to_thread(struct task_struct *);
 #else
@@ -175,6 +183,16 @@ struct thread_struct;
 extern struct task_struct *_switch(struct thread_struct *prev,
                                   struct thread_struct *next);
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 extern unsigned int rtas_data;
 extern int mem_init_done;      /* set on boot once kmalloc can be called */
 extern unsigned long memory_limit;
@@ -194,7 +212,7 @@ __xchg_u32(volatile void *p, unsigned long val)
        unsigned long prev;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    lwarx   %0,0,%2 \n"
        PPC405_ERR77(0,%2)
 "      stwcx.  %3,0,%2 \n\
@@ -214,7 +232,7 @@ __xchg_u64(volatile void *p, unsigned long val)
        unsigned long prev;
 
        __asm__ __volatile__(
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    ldarx   %0,0,%2 \n"
        PPC405_ERR77(0,%2)
 "      stdcx.  %3,0,%2 \n\
@@ -269,7 +287,7 @@ __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
        unsigned int prev;
 
        __asm__ __volatile__ (
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    lwarx   %0,0,%2         # __cmpxchg_u32\n\
        cmpw    0,%0,%3\n\
        bne-    2f\n"
@@ -293,7 +311,7 @@ __cmpxchg_u64(volatile unsigned long *p, unsigned long old, unsigned long new)
        unsigned long prev;
 
        __asm__ __volatile__ (
-       EIEIO_ON_SMP
+       LWSYNC_ON_SMP
 "1:    ldarx   %0,0,%2         # __cmpxchg_u64\n\
        cmpd    0,%0,%3\n\
        bne-    2f\n\
index ac1e80e6033ee13c22b4a8cc4ce1aa806e173582..7e09d7cda933c2919d16c51adcec0d30f24d39ad 100644 (file)
@@ -89,9 +89,6 @@ struct thread_info {
 
 #endif /* THREAD_SHIFT < PAGE_SHIFT */
 
-#define get_thread_info(ti)    get_task_struct((ti)->task)
-#define put_thread_info(ti)    put_task_struct((ti)->task)
-
 /* how to get the thread information struct from C */
 static inline struct thread_info *current_thread_info(void)
 {
index d9b86a17271ba95ed500061b3a83e7d98d0104f5..baddc9ab57adc1e561c6805be229449ffc64582b 100644 (file)
@@ -175,11 +175,10 @@ static inline void set_dec(int val)
        set_dec_cpu6(val);
 #else
 #ifdef CONFIG_PPC_ISERIES
-       struct paca_struct *lpaca = get_paca();
        int cur_dec;
 
-       if (lpaca->lppaca.shared_proc) {
-               lpaca->lppaca.virtual_decr = val;
+       if (get_lppaca()->shared_proc) {
+               get_lppaca()->virtual_decr = val;
                cur_dec = get_dec();
                if (cur_dec > val)
                        HvCall_setVirtualDecr();
index 9f3d4da261c478876f27352ce1d925de35427323..1e19cd00af25856445a44912051964b1fe456671 100644 (file)
@@ -39,7 +39,6 @@ static inline int node_to_first_cpu(int node)
        .max_interval           = 32,                   \
        .busy_factor            = 32,                   \
        .imbalance_pct          = 125,                  \
-       .cache_hot_time         = (10*1000000),         \
        .cache_nice_tries       = 1,                    \
        .per_cpu_gain           = 100,                  \
        .busy_idx               = 3,                    \
index bd99cb53a19fb512b024e6a184f8f7b3359102ea..fb49c0c49ea1078fdea6df52b55dd09a71c302dd 100644 (file)
@@ -4,7 +4,6 @@
 #ifndef __PPC_SYSTEM_H
 #define __PPC_SYSTEM_H
 
-#include <linux/config.h>
 #include <linux/kernel.h>
 
 #include <asm/atomic.h>
@@ -39,7 +38,7 @@
 #ifdef CONFIG_SMP
 #define smp_mb()       mb()
 #define smp_rmb()      rmb()
-#define smp_wmb()      wmb()
+#define smp_wmb()      __asm__ __volatile__ ("eieio" : : : "memory")
 #define smp_read_barrier_depends()     read_barrier_depends()
 #else
 #define smp_mb()       barrier()
@@ -74,6 +73,7 @@ extern void chrp_nvram_init(void);
 extern void read_rtc_time(void);
 extern void pmac_find_display(void);
 extern void giveup_fpu(struct task_struct *);
+extern void disable_kernel_fp(void);
 extern void enable_kernel_fp(void);
 extern void flush_fp_to_thread(struct task_struct *);
 extern void enable_kernel_altivec(void);
@@ -86,6 +86,14 @@ extern int fix_alignment(struct pt_regs *);
 extern void cvt_fd(float *from, double *to, struct thread_struct *thread);
 extern void cvt_df(double *from, float *to, struct thread_struct *thread);
 
+#ifndef CONFIG_SMP
+extern void discard_lazy_cpu_state(void);
+#else
+static inline void discard_lazy_cpu_state(void)
+{
+}
+#endif
+
 #ifdef CONFIG_ALTIVEC
 extern void flush_altivec_to_thread(struct task_struct *);
 #else
@@ -123,6 +131,16 @@ extern struct task_struct *__switch_to(struct task_struct *,
        struct task_struct *);
 #define switch_to(prev, next, last)    ((last) = __switch_to((prev), (next)))
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 struct thread_struct;
 extern struct task_struct *_switch(struct thread_struct *prev,
                                   struct thread_struct *next);
index 372d51cccd5306a46494744898132a4929551c19..710646e64f7d6e4a062596d0796d0eb7b19c1a6a 100644 (file)
@@ -163,7 +163,7 @@ static inline int dump_regs(struct pt_regs *ptregs, elf_gregset_t *regs)
 
 static inline int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
 {
-       struct pt_regs *ptregs = __KSTK_PTREGS(tsk);
+       struct pt_regs *ptregs = task_pt_regs(tsk);
        memcpy(&regs->psw, &ptregs->psw, sizeof(regs->psw)+sizeof(regs->gprs));
        memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
        regs->orig_gpr2 = ptregs->orig_gpr2;
index 4ec652ebb3b1910bb80d80c4926080eb49216244..c5cbc4bd8414e12c571982cb5c5d0f85bec31406 100644 (file)
@@ -191,10 +191,10 @@ extern void show_registers(struct pt_regs *regs);
 extern void show_trace(struct task_struct *task, unsigned long *sp);
 
 unsigned long get_wchan(struct task_struct *p);
-#define __KSTK_PTREGS(tsk) ((struct pt_regs *) \
-        ((unsigned long) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs)))
-#define KSTK_EIP(tsk)  (__KSTK_PTREGS(tsk)->psw.addr)
-#define KSTK_ESP(tsk)  (__KSTK_PTREGS(tsk)->gprs[15])
+#define task_pt_regs(tsk) ((struct pt_regs *) \
+        (task_stack_page(tsk) + THREAD_SIZE) - 1)
+#define KSTK_EIP(tsk)  (task_pt_regs(tsk)->psw.addr)
+#define KSTK_ESP(tsk)  (task_pt_regs(tsk)->gprs[15])
 
 /*
  * Give up the time slice of the virtual PU.
index 864cae7e1fd66382ae919b96ebe781f05fd237e5..c7c3a9ad593f49682a35f3c7cb0d2a28a952cf17 100644 (file)
@@ -104,6 +104,16 @@ static inline void restore_access_regs(unsigned int *acrs)
        prev = __switch_to(prev,next);                                       \
 } while (0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING
 extern void account_user_vtime(struct task_struct *);
 extern void account_system_vtime(struct task_struct *);
index 6c18a3f24316ad69defc3f680edefe695702fca7..f3797a52c4ea5e06d5bca508c14706123ecd116e 100644 (file)
@@ -81,8 +81,6 @@ static inline struct thread_info *current_thread_info(void)
 #define alloc_thread_info(tsk) ((struct thread_info *) \
        __get_free_pages(GFP_KERNEL,THREAD_ORDER))
 #define free_thread_info(ti) free_pages((unsigned long) (ti),THREAD_ORDER)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
 
 #endif
 
index 0f75e16a74150ec04eabc711c702fc2d12436fc6..792fc35bd6245c8551909a6dbf57b385b761fea6 100644 (file)
@@ -91,6 +91,16 @@ struct pt_dspregs {
 #define instruction_pointer(regs) ((regs)->pc)
 extern void show_regs(struct pt_regs *);
 
+#ifdef CONFIG_SH_DSP
+#define task_pt_regs(task) \
+       ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
+                - sizeof(struct pt_dspregs) - sizeof(unsigned long)) - 1)
+#else
+#define task_pt_regs(task) \
+       ((struct pt_regs *) (task_stack_page(task) + THREAD_SIZE \
+                - sizeof(unsigned long)) - 1)
+#endif
+
 static inline unsigned long profile_pc(struct pt_regs *regs)
 {
        unsigned long pc = instruction_pointer(regs);
index 28a3c2d8bcd7c132afb75f87b16d7a684678500d..bb0330499bdfef3987cc88d7dd454ead0df3a929 100644 (file)
        last = __last;                                                  \
 } while (0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 #define nop() __asm__ __volatile__ ("nop")
 
 
index 46080cefaff896d12e867f76976e7894b84b88fa..85f0c11b4319e111a6135e5e20c4a0c301e7cac1 100644 (file)
@@ -60,8 +60,6 @@ static inline struct thread_info *current_thread_info(void)
 #define THREAD_SIZE (2*PAGE_SIZE)
 #define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
 #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
 
 #else /* !__ASSEMBLY__ */
 
index 10f024c6a2e3724b02982a162080eb0402979cdc..1f825cb163c3295756d13b4529b1147e4c77f2a7 100644 (file)
@@ -66,8 +66,6 @@ static inline struct thread_info *current_thread_info(void)
 
 #define alloc_thread_info(ti) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
 #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
 
 #endif /* __ASSEMBLY__ */
 
index 1f6b71f9e1b637f9c773e26db22dbc26e690402d..58dd162927bbdd076ff1f6c11a80fadcdb68b324 100644 (file)
@@ -155,7 +155,7 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
        "here:\n"                                                                       \
         : "=&r" (last)                                                                 \
         : "r" (&(current_set[hard_smp_processor_id()])),       \
-         "r" ((next)->thread_info),                            \
+         "r" (task_thread_info(next)),                         \
          "i" (TI_KPSR),                                        \
          "i" (TI_KSP),                                         \
          "i" (TI_TASK)                                         \
@@ -165,6 +165,16 @@ extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
          "o0", "o1", "o2", "o3",                   "o7");      \
        } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 /*
  * Changing the IRQ level on the Sparc.
  */
index ff6ccb3d24c6121bb1c1da217ee3075f3da033a2..65f060b040abdabab9c9a1efa12001ba60ff804d 100644 (file)
@@ -92,9 +92,6 @@ BTFIXUPDEF_CALL(struct thread_info *, alloc_thread_info, void)
 BTFIXUPDEF_CALL(void, free_thread_info, struct thread_info *)
 #define free_thread_info(ti) BTFIXUP_CALL(free_thread_info)(ti)
 
-#define get_thread_info(ti)    get_task_struct((ti)->task)
-#define put_thread_info(ti)    put_task_struct((ti)->task)
-
 #endif /* __ASSEMBLY__ */
 
 /*
index 91458118277e4decbf19f2f7445563d7354a7cb7..69539a8ab833d4645eaab4ccd2536357af2540d9 100644 (file)
@@ -119,7 +119,7 @@ typedef struct {
 #endif
 
 #define ELF_CORE_COPY_TASK_REGS(__tsk, __elf_regs)     \
-       ({ ELF_CORE_COPY_REGS((*(__elf_regs)), (__tsk)->thread_info->kregs); 1; })
+       ({ ELF_CORE_COPY_REGS((*(__elf_regs)), task_pt_regs(__tsk)); 1; })
 
 /*
  * This is used to ensure we don't load something for the wrong architecture.
index 08ba72d7722c9c4be4fe7d96eb534ceb75d968c4..57ee7b3061897d9810593b53d0c66eabe3b414ce 100644 (file)
@@ -60,7 +60,7 @@ do { \
        register unsigned long pgd_cache asm("o4"); \
        paddr = __pa((__mm)->pgd); \
        pgd_cache = 0UL; \
-       if ((__tsk)->thread_info->flags & _TIF_32BIT) \
+       if (task_thread_info(__tsk)->flags & _TIF_32BIT) \
                pgd_cache = get_pgd_cache((__mm)->pgd); \
        __asm__ __volatile__("wrpr      %%g0, 0x494, %%pstate\n\t" \
                             "mov       %3, %%g4\n\t" \
index 3169f3e2237efb91769521f0b66a9b898d7df060..cd8d9b4c86587073821ab5144cec09803a5fcaa5 100644 (file)
@@ -186,8 +186,9 @@ extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
 
 extern unsigned long get_wchan(struct task_struct *task);
 
-#define KSTK_EIP(tsk)  ((tsk)->thread_info->kregs->tpc)
-#define KSTK_ESP(tsk)  ((tsk)->thread_info->kregs->u_regs[UREG_FP])
+#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
+#define KSTK_EIP(tsk)  (task_pt_regs(tsk)->tpc)
+#define KSTK_ESP(tsk)  (task_pt_regs(tsk)->u_regs[UREG_FP])
 
 #define cpu_relax()    barrier()
 
index 309f1466b6fa1125edd34f933aa56ab645dc76b0..af254e5818348095aa62c841ede600765a4a9065 100644 (file)
@@ -208,7 +208,7 @@ do {        if (test_thread_flag(TIF_PERFCTR)) {                            \
        /* If you are tempted to conditionalize the following */        \
        /* so that ASI is only written if it changes, think again. */   \
        __asm__ __volatile__("wr %%g0, %0, %%asi"                       \
-       : : "r" (__thread_flag_byte_ptr(next->thread_info)[TI_FLAG_BYTE_CURRENT_DS]));\
+       : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\
        __asm__ __volatile__(                                           \
        "mov    %%g4, %%g7\n\t"                                         \
        "wrpr   %%g0, 0x95, %%pstate\n\t"                               \
@@ -238,7 +238,7 @@ do {        if (test_thread_flag(TIF_PERFCTR)) {                            \
        "b,a ret_from_syscall\n\t"                                      \
        "1:\n\t"                                                        \
        : "=&r" (last)                                                  \
-       : "0" (next->thread_info),                                      \
+       : "0" (task_thread_info(next)),                                 \
          "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD),            \
          "i" (TI_CWP), "i" (TI_TASK)                                   \
        : "cc",                                                         \
@@ -253,6 +253,16 @@ do {       if (test_thread_flag(TIF_PERFCTR)) {                            \
        }                                                               \
 } while(0)
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ *
+ * TODO: fill this in!
+ */
+static inline void sched_cacheflush(void)
+{
+}
+
 static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
 {
        unsigned long tmp1, tmp2;
index 97267f059ef5317a4c629383992cc3a3642bb2be..705c71972c326ed5057e1683cc2e02a9e0ca0262 100644 (file)
@@ -56,9 +56,6 @@ static inline struct thread_info *current_thread_info(void)
        ((struct thread_info *) kmalloc(THREAD_SIZE, GFP_KERNEL))
 #define free_thread_info(ti) kfree(ti)
 
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
-
 #endif
 
 #define PREEMPT_ACTIVE         0x10000000
index 98f929427d3dcfde6e8a825ba0f7620f3743511d..2d31308935a00ab0263feab4b75ce8be482724be 100644 (file)
@@ -98,10 +98,10 @@ unsigned long get_wchan (struct task_struct *p);
 
 
 /* Return some info about the user process TASK.  */
-#define task_tos(task) ((unsigned long)(task)->thread_info + THREAD_SIZE)
-#define task_regs(task) ((struct pt_regs *)task_tos (task) - 1)
-#define task_sp(task)  (task_regs (task)->gpr[GPR_SP])
-#define task_pc(task)  (task_regs (task)->pc)
+#define task_tos(task) ((unsigned long)task_stack_page(task) + THREAD_SIZE)
+#define task_pt_regs(task) ((struct pt_regs *)task_tos (task) - 1)
+#define task_sp(task)  (task_pt_regs (task)->gpr[GPR_SP])
+#define task_pc(task)  (task_pt_regs (task)->pc)
 /* Grotty old names for some.  */
 #define KSTK_EIP(task) task_pc (task)
 #define KSTK_ESP(task) task_sp (task)
index e4cfad94a5530f528834692346aa905fdf20bdc4..82b8f2846207519f7d4b0deb7380e857f27dffbc 100644 (file)
@@ -58,8 +58,6 @@ struct thread_info {
 #define alloc_thread_info(tsk) ((struct thread_info *) \
                                __get_free_pages(GFP_KERNEL, 1))
 #define free_thread_info(ti)   free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti)    get_task_struct((ti)->task)
-#define put_thread_info(ti)    put_task_struct((ti)->task)
 
 #endif /* __ASSEMBLY__ */
 
index 3863a7da372bc2e5347a6fdb2270bc8192b71d06..b37ab8218ef03634e90d5dd8d59b2546e088481c 100644 (file)
@@ -198,7 +198,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
 
 static __inline__ void __user *compat_alloc_user_space(long len)
 {
-       struct pt_regs *regs = (void *)current->thread.rsp0 - sizeof(struct pt_regs); 
+       struct pt_regs *regs = task_pt_regs(current);
        return (void __user *)regs->rsp - len; 
 }
 
index 57f7e14338492df5c8c1b37f54658d6f61894afd..876eb9a2fe7868a7c7ce01ef694403b3150f4601 100644 (file)
@@ -30,7 +30,7 @@ extern int save_i387(struct _fpstate __user *buf);
  */
 
 #define unlazy_fpu(tsk) do { \
-       if ((tsk)->thread_info->status & TS_USEDFPU) \
+       if (task_thread_info(tsk)->status & TS_USEDFPU) \
                save_init_fpu(tsk); \
 } while (0)
 
@@ -46,9 +46,9 @@ static inline void tolerant_fwait(void)
 }
 
 #define clear_fpu(tsk) do { \
-       if ((tsk)->thread_info->status & TS_USEDFPU) {          \
+       if (task_thread_info(tsk)->status & TS_USEDFPU) {       \
                tolerant_fwait();                               \
-               (tsk)->thread_info->status &= ~TS_USEDFPU;      \
+               task_thread_info(tsk)->status &= ~TS_USEDFPU;   \
                stts();                                         \
        }                                                       \
 } while (0)
@@ -170,10 +170,10 @@ static inline void kernel_fpu_end(void)
        preempt_enable();
 }
 
-static inline void save_init_fpu( struct task_struct *tsk )
+static inline void save_init_fpu(struct task_struct *tsk)
 {
        __fxsave_clear(tsk);
-       tsk->thread_info->status &= ~TS_USEDFPU;
+       task_thread_info(tsk)->status &= ~TS_USEDFPU;
        stts();
 }
 
index 394dd729752d7420844c94c9ea54af2cc631469e..87a282b1043a65cb2188e6c0a17a080c22bd2df6 100644 (file)
@@ -321,8 +321,8 @@ extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
 #define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
 
 extern unsigned long get_wchan(struct task_struct *p);
-#define KSTK_EIP(tsk) \
-       (((struct pt_regs *)(tsk->thread.rsp0 - sizeof(struct pt_regs)))->rip)
+#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
 #define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
 
 
index 38c1e8a69c9c03a91c290d3e004a0d55f85bead9..0eacbefb7dd04aede495c7832af37588df5279ef 100644 (file)
@@ -193,6 +193,15 @@ static inline void write_cr4(unsigned long val)
 #define wbinvd() \
        __asm__ __volatile__ ("wbinvd": : :"memory");
 
+/*
+ * On SMP systems, when the scheduler does migration-cost autodetection,
+ * it needs a way to flush as much of the CPU's caches as possible.
+ */
+static inline void sched_cacheflush(void)
+{
+       wbinvd();
+}
+
 #endif /* __KERNEL__ */
 
 #define nop() __asm__ __volatile__ ("nop")
index eb7c5fda1870d758b5610ca14bf860d228749942..4ac0e0a36934834fb63d0bffa05d3bdbddf66a2b 100644 (file)
@@ -76,8 +76,6 @@ static inline struct thread_info *stack_thread_info(void)
 #define alloc_thread_info(tsk) \
        ((struct thread_info *) __get_free_pages(GFP_KERNEL,THREAD_ORDER))
 #define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
 
 #else /* !__ASSEMBLY__ */
 
index 7d82bc56b9fae2f123326a936699819b4a5effa3..2fa7f27381b40b69d39562a5e6394b1a975ec803 100644 (file)
@@ -39,7 +39,6 @@ extern int __node_distance(int, int);
        .max_interval           = 32,                   \
        .busy_factor            = 32,                   \
        .imbalance_pct          = 125,                  \
-       .cache_hot_time         = (10*1000000),         \
        .cache_nice_tries       = 2,                    \
        .busy_idx               = 3,                    \
        .idle_idx               = 2,                    \
index 9cab5e4298b943c168b267700eae848462b7f367..d1d72ad36f08e4506460b7e1aeaeb6b1b75d337b 100644 (file)
@@ -184,12 +184,12 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
 #define release_segments(mm)   do { } while(0)
 #define forget_segments()      do { } while (0)
 
-#define thread_saved_pc(tsk)   (xtensa_pt_regs(tsk)->pc)
+#define thread_saved_pc(tsk)   (task_pt_regs(tsk)->pc)
 
 extern unsigned long get_wchan(struct task_struct *p);
 
-#define KSTK_EIP(tsk)          (xtensa_pt_regs(tsk)->pc)
-#define KSTK_ESP(tsk)          (xtensa_pt_regs(tsk)->areg[1])
+#define KSTK_EIP(tsk)          (task_pt_regs(tsk)->pc)
+#define KSTK_ESP(tsk)          (task_pt_regs(tsk)->areg[1])
 
 #define cpu_relax()  do { } while (0)
 
index aa4fd7fb3ce716db5fdc02680056afd130308859..a5ac71a5205c268cd9cc51bbe4e82d31f6444137 100644 (file)
@@ -113,8 +113,8 @@ struct pt_regs {
 };
 
 #ifdef __KERNEL__
-# define xtensa_pt_regs(tsk) ((struct pt_regs*) \
-  (((long)(tsk)->thread_info + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4)) - 1)
+# define task_pt_regs(tsk) ((struct pt_regs*) \
+  (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
 # define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
 # define instruction_pointer(regs) ((regs)->pc)
 extern void show_regs(struct pt_regs *);
index af208d41fd821e842913a326f446315cba4bf57f..5ae34ab715972a812ef81a8e634e18d8ea50b776 100644 (file)
@@ -93,8 +93,6 @@ static inline struct thread_info *current_thread_info(void)
 /* thread information allocation */
 #define alloc_thread_info(tsk) ((struct thread_info *) __get_free_pages(GFP_KERNEL,1))
 #define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
-#define get_thread_info(ti) get_task_struct((ti)->task)
-#define put_thread_info(ti) put_task_struct((ti)->task)
 
 #else /* !__ASSEMBLY__ */
 
index 9a7b374c9fb41a50890b77305131a99ae1a27468..d2bc0d66e65d49ed3df74f2666b3d079a023eeca 100644 (file)
@@ -26,6 +26,6 @@
 
 #define AT_SECURE 23   /* secure mode boolean */
 
-#define AT_VECTOR_SIZE  42 /* Size of auxiliary table.  */
+#define AT_VECTOR_SIZE  44 /* Size of auxiliary table.  */
 
 #endif /* _LINUX_AUXVEC_H */
index 0cdee78e5ce16f790142ae146f30d30927fe133a..58df18d9cd3ebe4f218d1d13d119293879a77088 100644 (file)
@@ -49,6 +49,9 @@ struct bus_type {
        int             (*match)(struct device * dev, struct device_driver * drv);
        int             (*uevent)(struct device *dev, char **envp,
                                  int num_envp, char *buffer, int buffer_size);
+       int             (*probe)(struct device * dev);
+       int             (*remove)(struct device * dev);
+       void            (*shutdown)(struct device * dev);
        int             (*suspend)(struct device * dev, pm_message_t state);
        int             (*resume)(struct device * dev);
 };
index 934aa9bda481b29bb83ba045aba298c20f3825c6..a9f1cfd096ff1cec7f50745688a11651e45ff29a 100644 (file)
@@ -50,14 +50,12 @@ struct gianfar_platform_data {
 
        /* board specific information */
        u32 board_flags;
-       const char *bus_id;
+       u32 bus_id;
+       u32 phy_id;
        u8 mac_addr[6];
 };
 
 struct gianfar_mdio_data {
-       /* device specific information */
-       u32 paddr;
-
        /* board specific information */
        int irq[32];
 };
index cf5cfdf8d61304b319486724f9e0ec7515b63148..089bfb1fa01a771d7c11ee7de4227deb88d29b00 100644 (file)
@@ -49,8 +49,6 @@ struct hrtimer_base;
  * struct hrtimer - the basic hrtimer structure
  *
  * @node:      red black tree node for time ordered insertion
- * @list:      list head for easier access to the time ordered list,
- *             without walking the red black tree.
  * @expires:   the absolute expiry time in the hrtimers internal
  *             representation. The time is related to the clock on
  *             which the timer is based.
@@ -63,7 +61,6 @@ struct hrtimer_base;
  */
 struct hrtimer {
        struct rb_node          node;
-       struct list_head        list;
        ktime_t                 expires;
        enum hrtimer_state      state;
        int                     (*function)(void *);
@@ -78,7 +75,7 @@ struct hrtimer {
  *             to a base on another cpu.
  * @lock:      lock protecting the base and associated timers
  * @active:    red black tree root node for the active timers
- * @pending:   list of pending timers for simple time ordered access
+ * @first:     pointer to the timer node which expires first
  * @resolution:        the resolution of the clock, in nanoseconds
  * @get_time:  function to retrieve the current time of the clock
  * @curr_timer:        the timer which is executing a callback right now
@@ -87,8 +84,8 @@ struct hrtimer_base {
        clockid_t               index;
        spinlock_t              lock;
        struct rb_root          active;
-       struct list_head        pending;
-       unsigned long           resolution;
+       struct rb_node          *first;
+       ktime_t                 resolution;
        ktime_t                 (*get_time)(void);
        struct hrtimer          *curr_timer;
 };
@@ -125,8 +122,7 @@ static inline int hrtimer_active(const struct hrtimer *timer)
 }
 
 /* Forward a hrtimer so it expires after now: */
-extern unsigned long hrtimer_forward(struct hrtimer *timer,
-                                    const ktime_t interval);
+extern unsigned long hrtimer_forward(struct hrtimer *timer, ktime_t interval);
 
 /* Precise sleep: */
 extern long hrtimer_nanosleep(struct timespec *rqtp,
index 9a8c05dbe4f38b6aafef22e48d8e46e6de742f15..110b3cfac021af3be75a1feb2c65fbec359fb72c 100644 (file)
@@ -983,8 +983,13 @@ typedef struct ide_driver_s {
        ide_startstop_t (*abort)(ide_drive_t *, struct request *rq);
        ide_proc_entry_t        *proc;
        struct device_driver    gen_driver;
+       int             (*probe)(ide_drive_t *);
+       void            (*remove)(ide_drive_t *);
+       void            (*shutdown)(ide_drive_t *);
 } ide_driver_t;
 
+#define to_ide_driver(drv) container_of(drv, ide_driver_t, gen_driver)
+
 int generic_ide_ioctl(ide_drive_t *, struct file *, struct block_device *, unsigned, unsigned long);
 
 /*
@@ -1002,7 +1007,6 @@ extern int noautodma;
 
 extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
 extern int __ide_end_request (ide_drive_t *drive, struct request *rq, int uptodate, int nrsecs);
-extern void ide_softirq_done(struct request *rq);
 
 /*
  * This is used on exit from the driver to designate the next irq handler
index e6ee2d95da7a3604d0a7c473ef7396896d21ead9..323924edb26a5f686361bfa6cb188f9b5b4d9641 100644 (file)
@@ -216,6 +216,7 @@ extern void dump_stack(void);
        ((unsigned char *)&addr)[1], \
        ((unsigned char *)&addr)[2], \
        ((unsigned char *)&addr)[3]
+#define NIPQUAD_FMT "%u.%u.%u.%u"
 
 #define NIP6(addr) \
        ntohs((addr).s6_addr16[0]), \
@@ -226,6 +227,7 @@ extern void dump_stack(void);
        ntohs((addr).s6_addr16[5]), \
        ntohs((addr).s6_addr16[6]), \
        ntohs((addr).s6_addr16[7])
+#define NIP6_FMT "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
 
 #if defined(__LITTLE_ENDIAN)
 #define HIPQUAD(addr) \
index 222a047cc145e8d25c8d68894c86bc390d2a929b..1bd6552cc34134c4e19d5565e992fc91b9785910 100644 (file)
@@ -272,8 +272,8 @@ static inline u64 ktime_to_ns(const ktime_t kt)
  * idea of the (in)accuracy of timers. Timer values are rounded up to
  * this resolution values.
  */
-#define KTIME_REALTIME_RES     (NSEC_PER_SEC/HZ)
-#define KTIME_MONOTONIC_RES    (NSEC_PER_SEC/HZ)
+#define KTIME_REALTIME_RES     (ktime_t){ .tv64 = TICK_NSEC }
+#define KTIME_MONOTONIC_RES    (ktime_t){ .tv64 = TICK_NSEC }
 
 /* Get the monotonic time in timespec format: */
 extern void ktime_get_ts(struct timespec *ts);
index 6d39b518486b94dcda7368a6791919403604ffc7..3ff88c8783083fc620a880ecfbdb44d7840cc8f2 100644 (file)
@@ -154,6 +154,9 @@ struct ip_conntrack_stat
        unsigned int expect_delete;
 };
 
+/* call to create an explicit dependency on nf_conntrack. */
+extern void need_conntrack(void);
+
 #endif /* __KERNEL__ */
 
 #endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
new file mode 100644 (file)
index 0000000..472f048
--- /dev/null
@@ -0,0 +1,224 @@
+#ifndef _X_TABLES_H
+#define _X_TABLES_H
+
+#define XT_FUNCTION_MAXNAMELEN 30
+#define XT_TABLE_MAXNAMELEN 32
+
+/* The argument to IPT_SO_GET_REVISION_*.  Returns highest revision
+ * kernel supports, if >= revision. */
+struct xt_get_revision
+{
+       char name[XT_FUNCTION_MAXNAMELEN-1];
+
+       u_int8_t revision;
+};
+
+/* CONTINUE verdict for targets */
+#define XT_CONTINUE 0xFFFFFFFF
+
+/* For standard target */
+#define XT_RETURN (-NF_REPEAT - 1)
+
+#define XT_ALIGN(s) (((s) + (__alignof__(void *)-1)) & ~(__alignof__(void *)-1))
+
+/* Standard return verdict, or do jump. */
+#define XT_STANDARD_TARGET ""
+/* Error verdict. */
+#define XT_ERROR_TARGET "ERROR"
+
+/*
+ * New IP firewall options for [gs]etsockopt at the RAW IP level.
+ * Unlike BSD Linux inherits IP options so you don't have to use a raw
+ * socket for this. Instead we check rights in the calls. */
+#define XT_BASE_CTL            64      /* base for firewall socket options */
+
+#define XT_SO_SET_REPLACE      (XT_BASE_CTL)
+#define XT_SO_SET_ADD_COUNTERS (XT_BASE_CTL + 1)
+#define XT_SO_SET_MAX          XT_SO_SET_ADD_COUNTERS
+
+#define XT_SO_GET_INFO                 (XT_BASE_CTL)
+#define XT_SO_GET_ENTRIES              (XT_BASE_CTL + 1)
+#define XT_SO_GET_REVISION_MATCH       (XT_BASE_CTL + 2)
+#define XT_SO_GET_REVISION_TARGET      (XT_BASE_CTL + 3)
+#define XT_SO_GET_MAX                  XT_SO_GET_REVISION_TARGET
+
+#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
+#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
+
+struct xt_counters
+{
+       u_int64_t pcnt, bcnt;                   /* Packet and byte counters */
+};
+
+/* The argument to IPT_SO_ADD_COUNTERS. */
+struct xt_counters_info
+{
+       /* Which table. */
+       char name[XT_TABLE_MAXNAMELEN];
+
+       unsigned int num_counters;
+
+       /* The counters (actually `number' of these). */
+       struct xt_counters counters[0];
+};
+
+#define XT_INV_PROTO           0x40    /* Invert the sense of PROTO. */
+
+#ifdef __KERNEL__
+
+#include <linux/netdevice.h>
+
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
+#include <linux/netfilter_ipv4/listhelp.h>
+
+struct xt_match
+{
+       struct list_head list;
+
+       const char name[XT_FUNCTION_MAXNAMELEN-1];
+
+       u_int8_t revision;
+
+       /* Return true or false: return FALSE and set *hotdrop = 1 to
+           force immediate packet drop. */
+       /* Arguments changed since 2.6.9, as this must now handle
+          non-linear skb, using skb_header_pointer and
+          skb_ip_make_writable. */
+       int (*match)(const struct sk_buff *skb,
+                    const struct net_device *in,
+                    const struct net_device *out,
+                    const void *matchinfo,
+                    int offset,
+                    unsigned int protoff,
+                    int *hotdrop);
+
+       /* Called when user tries to insert an entry of this type. */
+       /* Should return true or false. */
+       int (*checkentry)(const char *tablename,
+                         const void *ip,
+                         void *matchinfo,
+                         unsigned int matchinfosize,
+                         unsigned int hook_mask);
+
+       /* Called when entry of this type deleted. */
+       void (*destroy)(void *matchinfo, unsigned int matchinfosize);
+
+       /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+       struct module *me;
+};
+
+/* Registration hooks for targets. */
+struct xt_target
+{
+       struct list_head list;
+
+       const char name[XT_FUNCTION_MAXNAMELEN-1];
+
+       u_int8_t revision;
+
+       /* Returns verdict. Argument order changed since 2.6.9, as this
+          must now handle non-linear skbs, using skb_copy_bits and
+          skb_ip_make_writable. */
+       unsigned int (*target)(struct sk_buff **pskb,
+                              const struct net_device *in,
+                              const struct net_device *out,
+                              unsigned int hooknum,
+                              const void *targinfo,
+                              void *userdata);
+
+       /* Called when user tries to insert an entry of this type:
+           hook_mask is a bitmask of hooks from which it can be
+           called. */
+       /* Should return true or false. */
+       int (*checkentry)(const char *tablename,
+                         const void *entry,
+                         void *targinfo,
+                         unsigned int targinfosize,
+                         unsigned int hook_mask);
+
+       /* Called when entry of this type deleted. */
+       void (*destroy)(void *targinfo, unsigned int targinfosize);
+
+       /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+       struct module *me;
+};
+
+/* Furniture shopping... */
+struct xt_table
+{
+       struct list_head list;
+
+       /* A unique name... */
+       char name[XT_TABLE_MAXNAMELEN];
+
+       /* What hooks you will enter on */
+       unsigned int valid_hooks;
+
+       /* Lock for the curtain */
+       rwlock_t lock;
+
+       /* Man behind the curtain... */
+       //struct ip6t_table_info *private;
+       void *private;
+
+       /* Set this to THIS_MODULE if you are a module, otherwise NULL */
+       struct module *me;
+
+       int af;         /* address/protocol family */
+};
+
+#include <linux/netfilter_ipv4.h>
+
+/* The table itself */
+struct xt_table_info
+{
+       /* Size per table */
+       unsigned int size;
+       /* Number of entries: FIXME. --RR */
+       unsigned int number;
+       /* Initial number of entries. Needed for module usage count */
+       unsigned int initial_entries;
+
+       /* Entry points and underflows */
+       unsigned int hook_entry[NF_IP_NUMHOOKS];
+       unsigned int underflow[NF_IP_NUMHOOKS];
+
+       /* ipt_entry tables: one per CPU */
+       char *entries[NR_CPUS];
+};
+
+extern int xt_register_target(int af, struct xt_target *target);
+extern void xt_unregister_target(int af, struct xt_target *target);
+extern int xt_register_match(int af, struct xt_match *target);
+extern void xt_unregister_match(int af, struct xt_match *target);
+
+extern int xt_register_table(struct xt_table *table,
+                            struct xt_table_info *bootstrap,
+                            struct xt_table_info *newinfo);
+extern void *xt_unregister_table(struct xt_table *table);
+
+extern struct xt_table_info *xt_replace_table(struct xt_table *table,
+                                             unsigned int num_counters,
+                                             struct xt_table_info *newinfo,
+                                             int *error);
+
+extern struct xt_match *xt_find_match(int af, const char *name, u8 revision);
+extern struct xt_target *xt_find_target(int af, const char *name, u8 revision);
+extern struct xt_target *xt_request_find_target(int af, const char *name, 
+                                               u8 revision);
+extern int xt_find_revision(int af, const char *name, u8 revision, int target,
+                           int *err);
+
+extern struct xt_table *xt_find_table_lock(int af, const char *name);
+extern void xt_table_unlock(struct xt_table *t);
+
+extern int xt_proto_init(int af);
+extern void xt_proto_fini(int af);
+
+extern struct xt_table_info *xt_alloc_table_info(unsigned int size);
+extern void xt_free_table_info(struct xt_table_info *info);
+
+#endif /* __KERNEL__ */
+
+#endif /* _X_TABLES_H */
diff --git a/include/linux/netfilter/xt_CLASSIFY.h b/include/linux/netfilter/xt_CLASSIFY.h
new file mode 100644 (file)
index 0000000..5811135
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _XT_CLASSIFY_H
+#define _XT_CLASSIFY_H
+
+struct xt_classify_target_info {
+       u_int32_t priority;
+};
+
+#endif /*_XT_CLASSIFY_H */
diff --git a/include/linux/netfilter/xt_CONNMARK.h b/include/linux/netfilter/xt_CONNMARK.h
new file mode 100644 (file)
index 0000000..9f74468
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef _XT_CONNMARK_H_target
+#define _XT_CONNMARK_H_target
+
+/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
+ * by Henrik Nordstrom <hno@marasystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+enum {
+       XT_CONNMARK_SET = 0,
+       XT_CONNMARK_SAVE,
+       XT_CONNMARK_RESTORE
+};
+
+struct xt_connmark_target_info {
+       unsigned long mark;
+       unsigned long mask;
+       u_int8_t mode;
+};
+
+#endif /*_XT_CONNMARK_H_target*/
diff --git a/include/linux/netfilter/xt_MARK.h b/include/linux/netfilter/xt_MARK.h
new file mode 100644 (file)
index 0000000..b021e93
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef _XT_MARK_H_target
+#define _XT_MARK_H_target
+
+/* Version 0 */
+struct xt_mark_target_info {
+       unsigned long mark;
+};
+
+/* Version 1 */
+enum {
+       XT_MARK_SET=0,
+       XT_MARK_AND,
+       XT_MARK_OR,
+};
+
+struct xt_mark_target_info_v1 {
+       unsigned long mark;
+       u_int8_t mode;
+};
+
+#endif /*_XT_MARK_H_target */
diff --git a/include/linux/netfilter/xt_NFQUEUE.h b/include/linux/netfilter/xt_NFQUEUE.h
new file mode 100644 (file)
index 0000000..9a9af79
--- /dev/null
@@ -0,0 +1,16 @@
+/* iptables module for using NFQUEUE mechanism
+ *
+ * (C) 2005 Harald Welte <laforge@netfilter.org>
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ * 
+*/
+#ifndef _XT_NFQ_TARGET_H
+#define _XT_NFQ_TARGET_H
+
+/* target info */
+struct xt_NFQ_info {
+       u_int16_t queuenum;
+};
+
+#endif /* _XT_NFQ_TARGET_H */
diff --git a/include/linux/netfilter/xt_comment.h b/include/linux/netfilter/xt_comment.h
new file mode 100644 (file)
index 0000000..eacfedc
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef _XT_COMMENT_H
+#define _XT_COMMENT_H
+
+#define XT_MAX_COMMENT_LEN 256
+
+struct xt_comment_info {
+       unsigned char comment[XT_MAX_COMMENT_LEN];
+};
+
+#endif /* XT_COMMENT_H */
diff --git a/include/linux/netfilter/xt_connbytes.h b/include/linux/netfilter/xt_connbytes.h
new file mode 100644 (file)
index 0000000..c022c98
--- /dev/null
@@ -0,0 +1,25 @@
+#ifndef _XT_CONNBYTES_H
+#define _XT_CONNBYTES_H
+
+enum xt_connbytes_what {
+       XT_CONNBYTES_PKTS,
+       XT_CONNBYTES_BYTES,
+       XT_CONNBYTES_AVGPKT,
+};
+
+enum xt_connbytes_direction {
+       XT_CONNBYTES_DIR_ORIGINAL,
+       XT_CONNBYTES_DIR_REPLY,
+       XT_CONNBYTES_DIR_BOTH,
+};
+
+struct xt_connbytes_info
+{
+       struct {
+               aligned_u64 from;       /* count to be matched */
+               aligned_u64 to;         /* count to be matched */
+       } count;
+       u_int8_t what;          /* ipt_connbytes_what */
+       u_int8_t direction;     /* ipt_connbytes_direction */
+};
+#endif
diff --git a/include/linux/netfilter/xt_connmark.h b/include/linux/netfilter/xt_connmark.h
new file mode 100644 (file)
index 0000000..c592f6a
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef _XT_CONNMARK_H
+#define _XT_CONNMARK_H
+
+/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
+ * by Henrik Nordstrom <hno@marasystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+struct xt_connmark_info {
+       unsigned long mark, mask;
+       u_int8_t invert;
+};
+
+#endif /*_XT_CONNMARK_H*/
diff --git a/include/linux/netfilter/xt_conntrack.h b/include/linux/netfilter/xt_conntrack.h
new file mode 100644 (file)
index 0000000..34f63cf
--- /dev/null
@@ -0,0 +1,63 @@
+/* Header file for kernel module to match connection tracking information.
+ * GPL (C) 2001  Marc Boucher (marc@mbsi.ca).
+ */
+
+#ifndef _XT_CONNTRACK_H
+#define _XT_CONNTRACK_H
+
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <linux/in.h>
+
+#define XT_CONNTRACK_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1))
+#define XT_CONNTRACK_STATE_INVALID (1 << 0)
+
+#define XT_CONNTRACK_STATE_SNAT (1 << (IP_CT_NUMBER + 1))
+#define XT_CONNTRACK_STATE_DNAT (1 << (IP_CT_NUMBER + 2))
+#define XT_CONNTRACK_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 3))
+
+/* flags, invflags: */
+#define XT_CONNTRACK_STATE     0x01
+#define XT_CONNTRACK_PROTO     0x02
+#define XT_CONNTRACK_ORIGSRC   0x04
+#define XT_CONNTRACK_ORIGDST   0x08
+#define XT_CONNTRACK_REPLSRC   0x10
+#define XT_CONNTRACK_REPLDST   0x20
+#define XT_CONNTRACK_STATUS    0x40
+#define XT_CONNTRACK_EXPIRES   0x80
+
+/* This is exposed to userspace, so remains frozen in time. */
+struct ip_conntrack_old_tuple
+{
+       struct {
+               __u32 ip;
+               union {
+                       __u16 all;
+               } u;
+       } src;
+
+       struct {
+               __u32 ip;
+               union {
+                       __u16 all;
+               } u;
+
+               /* The protocol. */
+               u16 protonum;
+       } dst;
+};
+
+struct xt_conntrack_info
+{
+       unsigned int statemask, statusmask;
+
+       struct ip_conntrack_old_tuple tuple[IP_CT_DIR_MAX];
+       struct in_addr sipmsk[IP_CT_DIR_MAX], dipmsk[IP_CT_DIR_MAX];
+
+       unsigned long expires_min, expires_max;
+
+       /* Flags word */
+       u_int8_t flags;
+       /* Inverse flags */
+       u_int8_t invflags;
+};
+#endif /*_XT_CONNTRACK_H*/
diff --git a/include/linux/netfilter/xt_dccp.h b/include/linux/netfilter/xt_dccp.h
new file mode 100644 (file)
index 0000000..e0221b9
--- /dev/null
@@ -0,0 +1,23 @@
+#ifndef _XT_DCCP_H_
+#define _XT_DCCP_H_
+
+#define XT_DCCP_SRC_PORTS              0x01
+#define XT_DCCP_DEST_PORTS             0x02
+#define XT_DCCP_TYPE                   0x04
+#define XT_DCCP_OPTION                 0x08
+
+#define XT_DCCP_VALID_FLAGS            0x0f
+
+struct xt_dccp_info {
+       u_int16_t dpts[2];  /* Min, Max */
+       u_int16_t spts[2];  /* Min, Max */
+
+       u_int16_t flags;
+       u_int16_t invflags;
+
+       u_int16_t typemask;
+       u_int8_t option;
+};
+
+#endif /* _XT_DCCP_H_ */
+
diff --git a/include/linux/netfilter/xt_helper.h b/include/linux/netfilter/xt_helper.h
new file mode 100644 (file)
index 0000000..6b42763
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _XT_HELPER_H
+#define _XT_HELPER_H
+
+struct xt_helper_info {
+       int invert;
+       char name[30];
+};
+#endif /* _XT_HELPER_H */
diff --git a/include/linux/netfilter/xt_length.h b/include/linux/netfilter/xt_length.h
new file mode 100644 (file)
index 0000000..7c2b439
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _XT_LENGTH_H
+#define _XT_LENGTH_H
+
+struct xt_length_info {
+    u_int16_t  min, max;
+    u_int8_t   invert;
+};
+
+#endif /*_XT_LENGTH_H*/
diff --git a/include/linux/netfilter/xt_limit.h b/include/linux/netfilter/xt_limit.h
new file mode 100644 (file)
index 0000000..b3ce653
--- /dev/null
@@ -0,0 +1,21 @@
+#ifndef _XT_RATE_H
+#define _XT_RATE_H
+
+/* timings are in milliseconds. */
+#define XT_LIMIT_SCALE 10000
+
+/* 1/10,000 sec period => max of 10,000/sec.  Min rate is then 429490
+   seconds, or one every 59 hours. */
+struct xt_rateinfo {
+       u_int32_t avg;    /* Average secs between packets * scale */
+       u_int32_t burst;  /* Period multiplier for upper limit. */
+
+       /* Used internally by the kernel */
+       unsigned long prev;
+       u_int32_t credit;
+       u_int32_t credit_cap, cost;
+
+       /* Ugly, ugly fucker. */
+       struct xt_rateinfo *master;
+};
+#endif /*_XT_RATE_H*/
diff --git a/include/linux/netfilter/xt_mac.h b/include/linux/netfilter/xt_mac.h
new file mode 100644 (file)
index 0000000..b892cdc
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _XT_MAC_H
+#define _XT_MAC_H
+
+struct xt_mac_info {
+    unsigned char srcaddr[ETH_ALEN];
+    int invert;
+};
+#endif /*_XT_MAC_H*/
diff --git a/include/linux/netfilter/xt_mark.h b/include/linux/netfilter/xt_mark.h
new file mode 100644 (file)
index 0000000..802dd48
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _XT_MARK_H
+#define _XT_MARK_H
+
+struct xt_mark_info {
+    unsigned long mark, mask;
+    u_int8_t invert;
+};
+
+#endif /*_XT_MARK_H*/
diff --git a/include/linux/netfilter/xt_physdev.h b/include/linux/netfilter/xt_physdev.h
new file mode 100644 (file)
index 0000000..25a7a18
--- /dev/null
@@ -0,0 +1,24 @@
+#ifndef _XT_PHYSDEV_H
+#define _XT_PHYSDEV_H
+
+#ifdef __KERNEL__
+#include <linux/if.h>
+#endif
+
+#define XT_PHYSDEV_OP_IN               0x01
+#define XT_PHYSDEV_OP_OUT              0x02
+#define XT_PHYSDEV_OP_BRIDGED          0x04
+#define XT_PHYSDEV_OP_ISIN             0x08
+#define XT_PHYSDEV_OP_ISOUT            0x10
+#define XT_PHYSDEV_OP_MASK             (0x20 - 1)
+
+struct xt_physdev_info {
+       char physindev[IFNAMSIZ];
+       char in_mask[IFNAMSIZ];
+       char physoutdev[IFNAMSIZ];
+       char out_mask[IFNAMSIZ];
+       u_int8_t invert;
+       u_int8_t bitmask;
+};
+
+#endif /*_XT_PHYSDEV_H*/
diff --git a/include/linux/netfilter/xt_pkttype.h b/include/linux/netfilter/xt_pkttype.h
new file mode 100644 (file)
index 0000000..f265cf5
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef _XT_PKTTYPE_H
+#define _XT_PKTTYPE_H
+
+struct xt_pkttype_info {
+       int     pkttype;
+       int     invert;
+};
+#endif /*_XT_PKTTYPE_H*/
diff --git a/include/linux/netfilter/xt_realm.h b/include/linux/netfilter/xt_realm.h
new file mode 100644 (file)
index 0000000..220e872
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef _XT_REALM_H
+#define _XT_REALM_H
+
+struct xt_realm_info {
+       u_int32_t id;
+       u_int32_t mask;
+       u_int8_t invert;
+};
+
+#endif /* _XT_REALM_H */
diff --git a/include/linux/netfilter/xt_sctp.h b/include/linux/netfilter/xt_sctp.h
new file mode 100644 (file)
index 0000000..b157897
--- /dev/null
@@ -0,0 +1,107 @@
+#ifndef _XT_SCTP_H_
+#define _XT_SCTP_H_
+
+#define XT_SCTP_SRC_PORTS              0x01
+#define XT_SCTP_DEST_PORTS             0x02
+#define XT_SCTP_CHUNK_TYPES            0x04
+
+#define XT_SCTP_VALID_FLAGS            0x07
+
+#define ELEMCOUNT(x) (sizeof(x)/sizeof(x[0]))
+
+
+struct xt_sctp_flag_info {
+       u_int8_t chunktype;
+       u_int8_t flag;
+       u_int8_t flag_mask;
+};
+
+#define XT_NUM_SCTP_FLAGS      4
+
+struct xt_sctp_info {
+       u_int16_t dpts[2];  /* Min, Max */
+       u_int16_t spts[2];  /* Min, Max */
+
+       u_int32_t chunkmap[256 / sizeof (u_int32_t)];  /* Bit mask of chunks to be matched according to RFC 2960 */
+
+#define SCTP_CHUNK_MATCH_ANY   0x01  /* Match if any of the chunk types are present */
+#define SCTP_CHUNK_MATCH_ALL   0x02  /* Match if all of the chunk types are present */
+#define SCTP_CHUNK_MATCH_ONLY  0x04  /* Match if these are the only chunk types present */
+
+       u_int32_t chunk_match_type;
+       struct xt_sctp_flag_info flag_info[XT_NUM_SCTP_FLAGS];
+       int flag_count;
+
+       u_int32_t flags;
+       u_int32_t invflags;
+};
+
+#define bytes(type) (sizeof(type) * 8)
+
+#define SCTP_CHUNKMAP_SET(chunkmap, type)              \
+       do {                                            \
+               chunkmap[type / bytes(u_int32_t)] |=    \
+                       1 << (type % bytes(u_int32_t)); \
+       } while (0)
+
+#define SCTP_CHUNKMAP_CLEAR(chunkmap, type)                    \
+       do {                                                    \
+               chunkmap[type / bytes(u_int32_t)] &=            \
+                       ~(1 << (type % bytes(u_int32_t)));      \
+       } while (0)
+
+#define SCTP_CHUNKMAP_IS_SET(chunkmap, type)                   \
+({                                                             \
+       (chunkmap[type / bytes (u_int32_t)] &                   \
+               (1 << (type % bytes (u_int32_t)))) ? 1: 0;      \
+})
+
+#define SCTP_CHUNKMAP_RESET(chunkmap)                          \
+       do {                                                    \
+               int i;                                          \
+               for (i = 0; i < ELEMCOUNT(chunkmap); i++)       \
+                       chunkmap[i] = 0;                        \
+       } while (0)
+
+#define SCTP_CHUNKMAP_SET_ALL(chunkmap)                        \
+       do {                                                    \
+               int i;                                          \
+               for (i = 0; i < ELEMCOUNT(chunkmap); i++)       \
+                       chunkmap[i] = ~0;                       \
+       } while (0)
+
+#define SCTP_CHUNKMAP_COPY(destmap, srcmap)                    \
+       do {                                                    \
+               int i;                                          \
+               for (i = 0; i < ELEMCOUNT(chunkmap); i++)       \
+                       destmap[i] = srcmap[i];                 \
+       } while (0)
+
+#define SCTP_CHUNKMAP_IS_CLEAR(chunkmap)               \
+({                                                     \
+       int i;                                          \
+       int flag = 1;                                   \
+       for (i = 0; i < ELEMCOUNT(chunkmap); i++) {     \
+               if (chunkmap[i]) {                      \
+                       flag = 0;                       \
+                       break;                          \
+               }                                       \
+       }                                               \
+        flag;                                          \
+})
+
+#define SCTP_CHUNKMAP_IS_ALL_SET(chunkmap)             \
+({                                                     \
+       int i;                                          \
+       int flag = 1;                                   \
+       for (i = 0; i < ELEMCOUNT(chunkmap); i++) {     \
+               if (chunkmap[i] != ~0) {                \
+                       flag = 0;                       \
+                               break;                  \
+               }                                       \
+       }                                               \
+        flag;                                          \
+})
+
+#endif /* _XT_SCTP_H_ */
+
diff --git a/include/linux/netfilter/xt_state.h b/include/linux/netfilter/xt_state.h
new file mode 100644 (file)
index 0000000..c06f32e
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef _XT_STATE_H
+#define _XT_STATE_H
+
+#define XT_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1))
+#define XT_STATE_INVALID (1 << 0)
+
+#define XT_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 1))
+
+struct xt_state_info
+{
+       unsigned int statemask;
+};
+#endif /*_XT_STATE_H*/
diff --git a/include/linux/netfilter/xt_string.h b/include/linux/netfilter/xt_string.h
new file mode 100644 (file)
index 0000000..3b3419f
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef _XT_STRING_H
+#define _XT_STRING_H
+
+#define XT_STRING_MAX_PATTERN_SIZE 128
+#define XT_STRING_MAX_ALGO_NAME_SIZE 16
+
+struct xt_string_info
+{
+       u_int16_t from_offset;
+       u_int16_t to_offset;
+       char      algo[XT_STRING_MAX_ALGO_NAME_SIZE];
+       char      pattern[XT_STRING_MAX_PATTERN_SIZE];
+       u_int8_t  patlen;
+       u_int8_t  invert;
+       struct ts_config __attribute__((aligned(8))) *config;
+};
+
+#endif /*_XT_STRING_H*/
diff --git a/include/linux/netfilter/xt_tcpmss.h b/include/linux/netfilter/xt_tcpmss.h
new file mode 100644 (file)
index 0000000..e03274c
--- /dev/null
@@ -0,0 +1,9 @@
+#ifndef _XT_TCPMSS_MATCH_H
+#define _XT_TCPMSS_MATCH_H
+
+struct xt_tcpmss_match_info {
+    u_int16_t mss_min, mss_max;
+    u_int8_t invert;
+};
+
+#endif /*_XT_TCPMSS_MATCH_H*/
diff --git a/include/linux/netfilter/xt_tcpudp.h b/include/linux/netfilter/xt_tcpudp.h
new file mode 100644 (file)
index 0000000..78bc65f
--- /dev/null
@@ -0,0 +1,36 @@
+#ifndef _XT_TCPUDP_H
+#define _XT_TCPUDP_H
+
+/* TCP matching stuff */
+struct xt_tcp
+{
+       u_int16_t spts[2];                      /* Source port range. */
+       u_int16_t dpts[2];                      /* Destination port range. */
+       u_int8_t option;                        /* TCP Option iff non-zero*/
+       u_int8_t flg_mask;                      /* TCP flags mask byte */
+       u_int8_t flg_cmp;                       /* TCP flags compare byte */
+       u_int8_t invflags;                      /* Inverse flags */
+};
+
+/* Values for "inv" field in struct ipt_tcp. */
+#define XT_TCP_INV_SRCPT       0x01    /* Invert the sense of source ports. */
+#define XT_TCP_INV_DSTPT       0x02    /* Invert the sense of dest ports. */
+#define XT_TCP_INV_FLAGS       0x04    /* Invert the sense of TCP flags. */
+#define XT_TCP_INV_OPTION      0x08    /* Invert the sense of option test. */
+#define XT_TCP_INV_MASK                0x0F    /* All possible flags. */
+
+/* UDP matching stuff */
+struct xt_udp
+{
+       u_int16_t spts[2];                      /* Source port range. */
+       u_int16_t dpts[2];                      /* Destination port range. */
+       u_int8_t invflags;                      /* Inverse flags */
+};
+
+/* Values for "invflags" field in struct ipt_udp. */
+#define XT_UDP_INV_SRCPT       0x01    /* Invert the sense of source ports. */
+#define XT_UDP_INV_DSTPT       0x02    /* Invert the sense of dest ports. */
+#define XT_UDP_INV_MASK        0x03    /* All possible flags. */
+
+
+#endif
index e98a870a20be368e099a688dddd8e92f7dbec4f7..fd21796e513141af6f77239a9ddd9a1df06f956e 100644 (file)
 #include <linux/compiler.h>
 #include <linux/netfilter_arp.h>
 
-#define ARPT_FUNCTION_MAXNAMELEN 30
-#define ARPT_TABLE_MAXNAMELEN 32
+#include <linux/netfilter/x_tables.h>
+
+#define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
+#define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN
+#define arpt_target xt_target
+#define arpt_table xt_table
 
 #define ARPT_DEV_ADDR_LEN_MAX 16
 
@@ -91,11 +95,6 @@ struct arpt_standard_target
        int verdict;
 };
 
-struct arpt_counters
-{
-       u_int64_t pcnt, bcnt;                   /* Packet and byte counters */
-};
-
 /* Values for "flag" field in struct arpt_ip (general arp structure).
  * No flags defined yet.
  */
@@ -130,7 +129,7 @@ struct arpt_entry
        unsigned int comefrom;
 
        /* Packet and byte counters. */
-       struct arpt_counters counters;
+       struct xt_counters counters;
 
        /* The matches (if any), then the target. */
        unsigned char elems[0];
@@ -141,23 +140,24 @@ struct arpt_entry
  * Unlike BSD Linux inherits IP options so you don't have to use a raw
  * socket for this. Instead we check rights in the calls.
  */
-#define ARPT_BASE_CTL          96      /* base for firewall socket options */
+#define ARPT_CTL_OFFSET                32
+#define ARPT_BASE_CTL          (XT_BASE_CTL+ARPT_CTL_OFFSET)
 
-#define ARPT_SO_SET_REPLACE            (ARPT_BASE_CTL)
-#define ARPT_SO_SET_ADD_COUNTERS       (ARPT_BASE_CTL + 1)
-#define ARPT_SO_SET_MAX                        ARPT_SO_SET_ADD_COUNTERS
+#define ARPT_SO_SET_REPLACE            (XT_SO_SET_REPLACE+ARPT_CTL_OFFSET)
+#define ARPT_SO_SET_ADD_COUNTERS       (XT_SO_SET_ADD_COUNTERS+ARPT_CTL_OFFSET)
+#define ARPT_SO_SET_MAX                        (XT_SO_SET_MAX+ARPT_CTL_OFFSET)
 
-#define ARPT_SO_GET_INFO               (ARPT_BASE_CTL)
-#define ARPT_SO_GET_ENTRIES            (ARPT_BASE_CTL + 1)
-/* #define ARPT_SO_GET_REVISION_MATCH  (ARPT_BASE_CTL + 2)*/
-#define ARPT_SO_GET_REVISION_TARGET    (ARPT_BASE_CTL + 3)
-#define ARPT_SO_GET_MAX                        ARPT_SO_GET_REVISION_TARGET
+#define ARPT_SO_GET_INFO               (XT_SO_GET_INFO+ARPT_CTL_OFFSET)
+#define ARPT_SO_GET_ENTRIES            (XT_SO_GET_ENTRIES+ARPT_CTL_OFFSET)
+/* #define ARPT_SO_GET_REVISION_MATCH  XT_SO_GET_REVISION_MATCH  */
+#define ARPT_SO_GET_REVISION_TARGET    (XT_SO_GET_REVISION_TARGET+ARPT_CTL_OFFSET)
+#define ARPT_SO_GET_MAX                        (XT_SO_GET_REVISION_TARGET+ARPT_CTL_OFFSET)
 
 /* CONTINUE verdict for targets */
-#define ARPT_CONTINUE 0xFFFFFFFF
+#define ARPT_CONTINUE XT_CONTINUE
 
 /* For standard target */
-#define ARPT_RETURN (-NF_REPEAT - 1)
+#define ARPT_RETURN XT_RETURN
 
 /* The argument to ARPT_SO_GET_INFO */
 struct arpt_getinfo
@@ -208,23 +208,14 @@ struct arpt_replace
        /* Number of counters (must be equal to current number of entries). */
        unsigned int num_counters;
        /* The old entries' counters. */
-       struct arpt_counters __user *counters;
+       struct xt_counters __user *counters;
 
        /* The entries (hang off end: not really an array). */
        struct arpt_entry entries[0];
 };
 
 /* The argument to ARPT_SO_ADD_COUNTERS. */
-struct arpt_counters_info
-{
-       /* Which table. */
-       char name[ARPT_TABLE_MAXNAMELEN];
-
-       unsigned int num_counters;
-
-       /* The counters (actually `number' of these). */
-       struct arpt_counters counters[0];
-};
+#define arpt_counters_info xt_counters_info
 
 /* The argument to ARPT_SO_GET_ENTRIES. */
 struct arpt_get_entries
@@ -239,19 +230,10 @@ struct arpt_get_entries
        struct arpt_entry entrytable[0];
 };
 
-/* The argument to ARPT_SO_GET_REVISION_*.  Returns highest revision
- * kernel supports, if >= revision. */
-struct arpt_get_revision
-{
-       char name[ARPT_FUNCTION_MAXNAMELEN-1];
-
-       u_int8_t revision;
-};
-
 /* Standard return verdict, or do jump. */
-#define ARPT_STANDARD_TARGET ""
+#define ARPT_STANDARD_TARGET XT_STANDARD_TARGET
 /* Error verdict. */
-#define ARPT_ERROR_TARGET "ERROR"
+#define ARPT_ERROR_TARGET XT_ERROR_TARGET
 
 /* Helper functions */
 static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e)
@@ -281,63 +263,8 @@ static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e
  */
 #ifdef __KERNEL__
 
-/* Registration hooks for targets. */
-struct arpt_target
-{
-       struct list_head list;
-
-       const char name[ARPT_FUNCTION_MAXNAMELEN-1];
-
-       u_int8_t revision;
-
-       /* Returns verdict. */
-       unsigned int (*target)(struct sk_buff **pskb,
-                              unsigned int hooknum,
-                              const struct net_device *in,
-                              const struct net_device *out,
-                              const void *targinfo,
-                              void *userdata);
-
-       /* Called when user tries to insert an entry of this type:
-           hook_mask is a bitmask of hooks from which it can be
-           called. */
-       /* Should return true or false. */
-       int (*checkentry)(const char *tablename,
-                         const struct arpt_entry *e,
-                         void *targinfo,
-                         unsigned int targinfosize,
-                         unsigned int hook_mask);
-
-       /* Called when entry of this type deleted. */
-       void (*destroy)(void *targinfo, unsigned int targinfosize);
-
-       /* Set this to THIS_MODULE if you are a module, otherwise NULL */
-       struct module *me;
-};
-
-extern int arpt_register_target(struct arpt_target *target);
-extern void arpt_unregister_target(struct arpt_target *target);
-
-/* Furniture shopping... */
-struct arpt_table
-{
-       struct list_head list;
-
-       /* A unique name... */
-       char name[ARPT_TABLE_MAXNAMELEN];
-
-       /* What hooks you will enter on */
-       unsigned int valid_hooks;
-
-       /* Lock for the curtain */
-       rwlock_t lock;
-
-       /* Man behind the curtain... */
-       struct arpt_table_info *private;
-
-       /* Set this to THIS_MODULE if you are a module, otherwise NULL */
-       struct module *me;
-};
+#define arpt_register_target(tgt) xt_register_target(NF_ARP, tgt)
+#define arpt_unregister_target(tgt) xt_unregister_target(NF_ARP, tgt)
 
 extern int arpt_register_table(struct arpt_table *table,
                               const struct arpt_replace *repl);
index b3432ab59a175d23304f7dfb5739197b0168f739..215765f043e6cc23503108c9e016ae0e0058b8fe 100644 (file)
@@ -199,9 +199,6 @@ ip_conntrack_put(struct ip_conntrack *ct)
        nf_conntrack_put(&ct->ct_general);
 }
 
-/* call to create an explicit dependency on ip_conntrack. */
-extern void need_ip_conntrack(void);
-
 extern int invert_tuplepr(struct ip_conntrack_tuple *inverse,
                          const struct ip_conntrack_tuple *orig);
 
index d19d65cf453046ba28724c80f7cea09d2fda4dc0..76ba24b68515db87fb7e05ba635bb30926f4f915 100644 (file)
 #include <linux/compiler.h>
 #include <linux/netfilter_ipv4.h>
 
-#define IPT_FUNCTION_MAXNAMELEN 30
-#define IPT_TABLE_MAXNAMELEN 32
+#include <linux/netfilter/x_tables.h>
+
+#define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
+#define IPT_TABLE_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
+#define ipt_match xt_match
+#define ipt_target xt_target
+#define ipt_table xt_table
+#define ipt_get_revision xt_get_revision
 
 /* Yes, Virginia, you have to zero the padding. */
 struct ipt_ip {
@@ -102,10 +108,7 @@ struct ipt_standard_target
        int verdict;
 };
 
-struct ipt_counters
-{
-       u_int64_t pcnt, bcnt;                   /* Packet and byte counters */
-};
+#define ipt_counters xt_counters
 
 /* Values for "flag" field in struct ipt_ip (general ip structure). */
 #define IPT_F_FRAG             0x01    /* Set if rule is a fragment rule */
@@ -119,7 +122,7 @@ struct ipt_counters
 #define IPT_INV_SRCIP          0x08    /* Invert the sense of SRC IP. */
 #define IPT_INV_DSTIP          0x10    /* Invert the sense of DST OP. */
 #define IPT_INV_FRAG           0x20    /* Invert the sense of FRAG. */
-#define IPT_INV_PROTO          0x40    /* Invert the sense of PROTO. */
+#define IPT_INV_PROTO          XT_INV_PROTO
 #define IPT_INV_MASK           0x7F    /* All possible flag bits mask. */
 
 /* This structure defines each of the firewall rules.  Consists of 3
@@ -141,7 +144,7 @@ struct ipt_entry
        unsigned int comefrom;
 
        /* Packet and byte counters. */
-       struct ipt_counters counters;
+       struct xt_counters counters;
 
        /* The matches (if any), then the target. */
        unsigned char elems[0];
@@ -151,54 +154,34 @@ struct ipt_entry
  * New IP firewall options for [gs]etsockopt at the RAW IP level.
  * Unlike BSD Linux inherits IP options so you don't have to use a raw
  * socket for this. Instead we check rights in the calls. */
-#define IPT_BASE_CTL           64      /* base for firewall socket options */
+#define IPT_BASE_CTL           XT_BASE_CTL
 
-#define IPT_SO_SET_REPLACE     (IPT_BASE_CTL)
-#define IPT_SO_SET_ADD_COUNTERS        (IPT_BASE_CTL + 1)
-#define IPT_SO_SET_MAX         IPT_SO_SET_ADD_COUNTERS
+#define IPT_SO_SET_REPLACE     XT_SO_SET_REPLACE
+#define IPT_SO_SET_ADD_COUNTERS        XT_SO_SET_ADD_COUNTERS
+#define IPT_SO_SET_MAX         XT_SO_SET_MAX
 
-#define IPT_SO_GET_INFO                        (IPT_BASE_CTL)
-#define IPT_SO_GET_ENTRIES             (IPT_BASE_CTL + 1)
-#define IPT_SO_GET_REVISION_MATCH      (IPT_BASE_CTL + 2)
-#define IPT_SO_GET_REVISION_TARGET     (IPT_BASE_CTL + 3)
-#define IPT_SO_GET_MAX                 IPT_SO_GET_REVISION_TARGET
+#define IPT_SO_GET_INFO                        XT_SO_GET_INFO
+#define IPT_SO_GET_ENTRIES             XT_SO_GET_ENTRIES
+#define IPT_SO_GET_REVISION_MATCH      XT_SO_GET_REVISION_MATCH
+#define IPT_SO_GET_REVISION_TARGET     XT_SO_GET_REVISION_TARGET
+#define IPT_SO_GET_MAX                 XT_SO_GET_REVISION_TARGET
 
-/* CONTINUE verdict for targets */
-#define IPT_CONTINUE 0xFFFFFFFF
+#define IPT_CONTINUE XT_CONTINUE
+#define IPT_RETURN XT_RETURN
 
-/* For standard target */
-#define IPT_RETURN (-NF_REPEAT - 1)
+#include <linux/netfilter/xt_tcpudp.h>
+#define ipt_udp xt_udp
+#define ipt_tcp xt_tcp
 
-/* TCP matching stuff */
-struct ipt_tcp
-{
-       u_int16_t spts[2];                      /* Source port range. */
-       u_int16_t dpts[2];                      /* Destination port range. */
-       u_int8_t option;                        /* TCP Option iff non-zero*/
-       u_int8_t flg_mask;                      /* TCP flags mask byte */
-       u_int8_t flg_cmp;                       /* TCP flags compare byte */
-       u_int8_t invflags;                      /* Inverse flags */
-};
-
-/* Values for "inv" field in struct ipt_tcp. */
-#define IPT_TCP_INV_SRCPT      0x01    /* Invert the sense of source ports. */
-#define IPT_TCP_INV_DSTPT      0x02    /* Invert the sense of dest ports. */
-#define IPT_TCP_INV_FLAGS      0x04    /* Invert the sense of TCP flags. */
-#define IPT_TCP_INV_OPTION     0x08    /* Invert the sense of option test. */
-#define IPT_TCP_INV_MASK       0x0F    /* All possible flags. */
-
-/* UDP matching stuff */
-struct ipt_udp
-{
-       u_int16_t spts[2];                      /* Source port range. */
-       u_int16_t dpts[2];                      /* Destination port range. */
-       u_int8_t invflags;                      /* Inverse flags */
-};
+#define IPT_TCP_INV_SRCPT      XT_TCP_INV_SRCPT
+#define IPT_TCP_INV_DSTPT      XT_TCP_INV_DSTPT
+#define IPT_TCP_INV_FLAGS      XT_TCP_INV_FLAGS
+#define IPT_TCP_INV_OPTION     XT_TCP_INV_OPTION
+#define IPT_TCP_INV_MASK       XT_TCP_INV_MASK
 
-/* Values for "invflags" field in struct ipt_udp. */
-#define IPT_UDP_INV_SRCPT      0x01    /* Invert the sense of source ports. */
-#define IPT_UDP_INV_DSTPT      0x02    /* Invert the sense of dest ports. */
-#define IPT_UDP_INV_MASK       0x03    /* All possible flags. */
+#define IPT_UDP_INV_SRCPT      XT_UDP_INV_SRCPT
+#define IPT_UDP_INV_DSTPT      XT_UDP_INV_DSTPT
+#define IPT_UDP_INV_MASK       XT_UDP_INV_MASK
 
 /* ICMP matching stuff */
 struct ipt_icmp
@@ -260,23 +243,14 @@ struct ipt_replace
        /* Number of counters (must be equal to current number of entries). */
        unsigned int num_counters;
        /* The old entries' counters. */
-       struct ipt_counters __user *counters;
+       struct xt_counters __user *counters;
 
        /* The entries (hang off end: not really an array). */
        struct ipt_entry entries[0];
 };
 
 /* The argument to IPT_SO_ADD_COUNTERS. */
-struct ipt_counters_info
-{
-       /* Which table. */
-       char name[IPT_TABLE_MAXNAMELEN];
-
-       unsigned int num_counters;
-
-       /* The counters (actually `number' of these). */
-       struct ipt_counters counters[0];
-};
+#define ipt_counters_info xt_counters_info
 
 /* The argument to IPT_SO_GET_ENTRIES. */
 struct ipt_get_entries
@@ -291,19 +265,10 @@ struct ipt_get_entries
        struct ipt_entry entrytable[0];
 };
 
-/* The argument to IPT_SO_GET_REVISION_*.  Returns highest revision
- * kernel supports, if >= revision. */
-struct ipt_get_revision
-{
-       char name[IPT_FUNCTION_MAXNAMELEN-1];
-
-       u_int8_t revision;
-};
-
 /* Standard return verdict, or do jump. */
-#define IPT_STANDARD_TARGET ""
+#define IPT_STANDARD_TARGET XT_STANDARD_TARGET
 /* Error verdict. */
-#define IPT_ERROR_TARGET "ERROR"
+#define IPT_ERROR_TARGET XT_ERROR_TARGET
 
 /* Helper functions */
 static __inline__ struct ipt_entry_target *
@@ -356,103 +321,18 @@ ipt_get_target(struct ipt_entry *e)
 #include <linux/init.h>
 extern void ipt_init(void) __init;
 
-struct ipt_match
-{
-       struct list_head list;
-
-       const char name[IPT_FUNCTION_MAXNAMELEN-1];
-
-       u_int8_t revision;
-
-       /* Return true or false: return FALSE and set *hotdrop = 1 to
-           force immediate packet drop. */
-       /* Arguments changed since 2.4, as this must now handle
-           non-linear skbs, using skb_copy_bits and
-           skb_ip_make_writable. */
-       int (*match)(const struct sk_buff *skb,
-                    const struct net_device *in,
-                    const struct net_device *out,
-                    const void *matchinfo,
-                    int offset,
-                    int *hotdrop);
-
-       /* Called when user tries to insert an entry of this type. */
-       /* Should return true or false. */
-       int (*checkentry)(const char *tablename,
-                         const struct ipt_ip *ip,
-                         void *matchinfo,
-                         unsigned int matchinfosize,
-                         unsigned int hook_mask);
-
-       /* Called when entry of this type deleted. */
-       void (*destroy)(void *matchinfo, unsigned int matchinfosize);
-
-       /* Set this to THIS_MODULE. */
-       struct module *me;
-};
-
-/* Registration hooks for targets. */
-struct ipt_target
-{
-       struct list_head list;
-
-       const char name[IPT_FUNCTION_MAXNAMELEN-1];
-
-       u_int8_t revision;
-
-       /* Called when user tries to insert an entry of this type:
-           hook_mask is a bitmask of hooks from which it can be
-           called. */
-       /* Should return true or false. */
-       int (*checkentry)(const char *tablename,
-                         const struct ipt_entry *e,
-                         void *targinfo,
-                         unsigned int targinfosize,
-                         unsigned int hook_mask);
-
-       /* Called when entry of this type deleted. */
-       void (*destroy)(void *targinfo, unsigned int targinfosize);
-
-       /* Returns verdict.  Argument order changed since 2.4, as this
-           must now handle non-linear skbs, using skb_copy_bits and
-           skb_ip_make_writable. */
-       unsigned int (*target)(struct sk_buff **pskb,
-                              const struct net_device *in,
-                              const struct net_device *out,
-                              unsigned int hooknum,
-                              const void *targinfo,
-                              void *userdata);
-
-       /* Set this to THIS_MODULE. */
-       struct module *me;
-};
+#define ipt_register_target(tgt) xt_register_target(AF_INET, tgt)
+#define ipt_unregister_target(tgt) xt_unregister_target(AF_INET, tgt)
 
-extern int ipt_register_target(struct ipt_target *target);
-extern void ipt_unregister_target(struct ipt_target *target);
+#define ipt_register_match(mtch) xt_register_match(AF_INET, mtch)
+#define ipt_unregister_match(mtch) xt_unregister_match(AF_INET, mtch)
 
-extern int ipt_register_match(struct ipt_match *match);
-extern void ipt_unregister_match(struct ipt_match *match);
+//#define ipt_register_table(tbl, repl) xt_register_table(AF_INET, tbl, repl)
+//#define ipt_unregister_table(tbl) xt_unregister_table(AF_INET, tbl)
 
-/* Furniture shopping... */
-struct ipt_table
-{
-       struct list_head list;
-
-       /* A unique name... */
-       char name[IPT_TABLE_MAXNAMELEN];
-
-       /* What hooks you will enter on */
-       unsigned int valid_hooks;
-
-       /* Lock for the curtain */
-       rwlock_t lock;
-
-       /* Man behind the curtain... */
-       struct ipt_table_info *private;
-
-       /* Set to THIS_MODULE. */
-       struct module *me;
-};
+extern int ipt_register_table(struct ipt_table *table,
+                             const struct ipt_replace *repl);
+extern void ipt_unregister_table(struct ipt_table *table);
 
 /* net/sched/ipt.c: Gimme access to your targets!  Gets target->me. */
 extern struct ipt_target *ipt_find_target(const char *name, u8 revision);
@@ -476,9 +356,6 @@ struct ipt_error
        struct ipt_error_target target;
 };
 
-extern int ipt_register_table(struct ipt_table *table,
-                             const struct ipt_replace *repl);
-extern void ipt_unregister_table(struct ipt_table *table);
 extern unsigned int ipt_do_table(struct sk_buff **pskb,
                                 unsigned int hook,
                                 const struct net_device *in,
@@ -486,6 +363,6 @@ extern unsigned int ipt_do_table(struct sk_buff **pskb,
                                 struct ipt_table *table,
                                 void *userdata);
 
-#define IPT_ALIGN(s) (((s) + (__alignof__(struct ipt_entry)-1)) & ~(__alignof__(struct ipt_entry)-1))
+#define IPT_ALIGN(s) XT_ALIGN(s)
 #endif /*__KERNEL__*/
 #endif /* _IPTABLES_H */
index 7596e3dd00cad645966f7cb289cc8e3bc5d41f01..a46d511b5c363f12e5b7f90a2450377900b19015 100644 (file)
@@ -1,8 +1,7 @@
 #ifndef _IPT_CLASSIFY_H
 #define _IPT_CLASSIFY_H
 
-struct ipt_classify_target_info {
-       u_int32_t priority;
-};
+#include <linux/netfilter/xt_CLASSIFY.h>
+#define ipt_classify_target_info xt_classify_target_info
 
 #endif /*_IPT_CLASSIFY_H */
index d3c02536fc4c471063f56930673cc24a94ede8cb..9ecfee0a9e33c487d26fb9debcee984086cb9e7d 100644 (file)
@@ -9,17 +9,11 @@
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
  */
+#include <linux/netfilter/xt_CONNMARK.h>
+#define IPT_CONNMARK_SET       XT_CONNMARK_SET
+#define IPT_CONNMARK_SAVE      XT_CONNMARK_SAVE
+#define        IPT_CONNMARK_RESTORE    XT_CONNMARK_RESTORE
 
-enum {
-       IPT_CONNMARK_SET = 0,
-       IPT_CONNMARK_SAVE,
-       IPT_CONNMARK_RESTORE
-};
-
-struct ipt_connmark_target_info {
-       unsigned long mark;
-       unsigned long mask;
-       u_int8_t mode;
-};
+#define ipt_connmark_target_info xt_connmark_target_info
 
 #endif /*_IPT_CONNMARK_H_target*/
index f47485790ed440ea2a240500cb4874b712a2a0db..697a486a96d337aa2bdaa471b97c871ffdcff819 100644 (file)
@@ -1,20 +1,18 @@
 #ifndef _IPT_MARK_H_target
 #define _IPT_MARK_H_target
 
+/* Backwards compatibility for old userspace */
+
+#include <linux/netfilter/xt_MARK.h>
+
 /* Version 0 */
-struct ipt_mark_target_info {
-       unsigned long mark;
-};
+#define ipt_mark_target_info xt_mark_target_info
 
 /* Version 1 */
-enum {
-       IPT_MARK_SET=0,
-       IPT_MARK_AND,
-       IPT_MARK_OR
-};
+#define IPT_MARK_SET   XT_MARK_SET
+#define IPT_MARK_AND   XT_MARK_AND
+#define        IPT_MARK_OR     XT_MARK_OR
+
+#define ipt_mark_target_info_v1 xt_mark_target_info_v1
 
-struct ipt_mark_target_info_v1 {
-       unsigned long mark;
-       u_int8_t mode;
-};
 #endif /*_IPT_MARK_H_target*/
index b5b2943b0c6646b61dec2b12b49208cda310c6b0..97a2a7557cb908cbc4275a4205ef3898de58b6be 100644 (file)
@@ -8,9 +8,9 @@
 #ifndef _IPT_NFQ_TARGET_H
 #define _IPT_NFQ_TARGET_H
 
-/* target info */
-struct ipt_NFQ_info {
-       u_int16_t queuenum;
-};
+/* Backwards compatibility for old userspace */
+#include <linux/netfilter/xt_NFQUEUE.h>
+
+#define ipt_NFQ_info xt_NFQ_info
 
 #endif /* _IPT_DSCP_TARGET_H */
index 85c1123c29ce1e026fc74961454ce1a5e0497fe9..ae2afc2f74810123f02e1f629307d5beb5e2cddf 100644 (file)
@@ -1,10 +1,10 @@
 #ifndef _IPT_COMMENT_H
 #define _IPT_COMMENT_H
 
-#define IPT_MAX_COMMENT_LEN 256
+#include <linux/netfilter/xt_comment.h>
 
-struct ipt_comment_info {
-       unsigned char comment[IPT_MAX_COMMENT_LEN];
-};
+#define IPT_MAX_COMMENT_LEN XT_MAX_COMMENT_LEN
+
+#define ipt_comment_info xt_comment_info
 
 #endif /* _IPT_COMMENT_H */
index 9e5532f8d8ac8c3595d33089e08dc6d18438a393..b04dfa3083c9527d59ceec1e6fe095ed63ebecde 100644 (file)
@@ -1,25 +1,18 @@
 #ifndef _IPT_CONNBYTES_H
 #define _IPT_CONNBYTES_H
 
-enum ipt_connbytes_what {
-       IPT_CONNBYTES_PKTS,
-       IPT_CONNBYTES_BYTES,
-       IPT_CONNBYTES_AVGPKT,
-};
+#include <net/netfilter/xt_connbytes.h>
+#define ipt_connbytes_what xt_connbytes_what
 
-enum ipt_connbytes_direction {
-       IPT_CONNBYTES_DIR_ORIGINAL,
-       IPT_CONNBYTES_DIR_REPLY,
-       IPT_CONNBYTES_DIR_BOTH,
-};
+#define IPT_CONNBYTES_PKTS     XT_CONNBYTES_PACKETS
+#define IPT_CONNBYTES_BYTES    XT_CONNBYTES_BYTES
+#define IPT_CONNBYTES_AVGPKT   XT_CONNBYTES_AVGPKT
+
+#define ipt_connbytes_direction        xt_connbytes_direction
+#define IPT_CONNBYTES_DIR_ORIGINAL     XT_CONNBYTES_DIR_ORIGINAL
+#define IPT_CONNBYTES_DIR_REPLY        XT_CONNBYTES_DIR_REPLY
+#define IPT_CONNBYTES_DIR_BOTH         XT_CONNBYTES_DIR_BOTH
+
+#define ipt_connbytes_info xt_connbytes_info
 
-struct ipt_connbytes_info
-{
-       struct {
-               aligned_u64 from;       /* count to be matched */
-               aligned_u64 to;         /* count to be matched */
-       } count;
-       u_int8_t what;          /* ipt_connbytes_what */
-       u_int8_t direction;     /* ipt_connbytes_direction */
-};
 #endif
index 46573270d9aa23781d0d2dd4a2bbf3688149b24b..c7ba6560d44c5d196322de703502c4dc71852730 100644 (file)
@@ -1,18 +1,7 @@
 #ifndef _IPT_CONNMARK_H
 #define _IPT_CONNMARK_H
 
-/* Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
- * by Henrik Nordstrom <hno@marasystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-struct ipt_connmark_info {
-       unsigned long mark, mask;
-       u_int8_t invert;
-};
+#include <linux/netfilter/xt_connmark.h>
+#define ipt_connmark_info xt_connmark_info
 
 #endif /*_IPT_CONNMARK_H*/
index 413c5658bd3adc7b2041107abc53b4b9f1ad48b6..cde6762949c5b4d4462cde281fc71325db3a9c57 100644 (file)
@@ -5,56 +5,24 @@
 #ifndef _IPT_CONNTRACK_H
 #define _IPT_CONNTRACK_H
 
-#define IPT_CONNTRACK_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1))
-#define IPT_CONNTRACK_STATE_INVALID (1 << 0)
+#include <linux/netfilter/xt_conntrack.h>
 
-#define IPT_CONNTRACK_STATE_SNAT (1 << (IP_CT_NUMBER + 1))
-#define IPT_CONNTRACK_STATE_DNAT (1 << (IP_CT_NUMBER + 2))
-#define IPT_CONNTRACK_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 3))
+#define IPT_CONNTRACK_STATE_BIT(ctinfo) XT_CONNTRACK_STATE_BIT(ctinfo)
+#define IPT_CONNTRACK_STATE_INVALID    XT_CONNTRACK_STATE_INVALID
 
-/* flags, invflags: */
-#define IPT_CONNTRACK_STATE    0x01
-#define IPT_CONNTRACK_PROTO    0x02
-#define IPT_CONNTRACK_ORIGSRC  0x04
-#define IPT_CONNTRACK_ORIGDST  0x08
-#define IPT_CONNTRACK_REPLSRC  0x10
-#define IPT_CONNTRACK_REPLDST  0x20
-#define IPT_CONNTRACK_STATUS   0x40
-#define IPT_CONNTRACK_EXPIRES  0x80
-
-/* This is exposed to userspace, so remains frozen in time. */
-struct ip_conntrack_old_tuple
-{
-       struct {
-               __u32 ip;
-               union {
-                       __u16 all;
-               } u;
-       } src;
-
-       struct {
-               __u32 ip;
-               union {
-                       __u16 all;
-               } u;
-
-               /* The protocol. */
-               u16 protonum;
-       } dst;
-};
+#define IPT_CONNTRACK_STATE_SNAT       XT_CONNTRACK_STATE_SNAT
+#define IPT_CONNTRACK_STATE_DNAT       XT_CONNTRACK_STATE_DNAT
+#define IPT_CONNTRACK_STATE_UNTRACKED  XT_CONNTRACK_STATE_UNTRACKED
 
-struct ipt_conntrack_info
-{
-       unsigned int statemask, statusmask;
-
-       struct ip_conntrack_old_tuple tuple[IP_CT_DIR_MAX];
-       struct in_addr sipmsk[IP_CT_DIR_MAX], dipmsk[IP_CT_DIR_MAX];
-
-       unsigned long expires_min, expires_max;
-
-       /* Flags word */
-       u_int8_t flags;
-       /* Inverse flags */
-       u_int8_t invflags;
-};
+/* flags, invflags: */
+#define IPT_CONNTRACK_STATE            XT_CONNTRACK_STATE
+#define IPT_CONNTRACK_PROTO            XT_CONNTRACK_PROTO
+#define IPT_CONNTRACK_ORIGSRC          XT_CONNTRACK_ORIGSRC
+#define IPT_CONNTRACK_ORIGDST          XT_CONNTRACK_ORIGDST
+#define IPT_CONNTRACK_REPLSRC          XT_CONNTRACK_REPLSRC
+#define IPT_CONNTRACK_REPLDST          XT_CONNTRACK_REPLDST
+#define IPT_CONNTRACK_STATUS           XT_CONNTRACK_STATUS
+#define IPT_CONNTRACK_EXPIRES          XT_CONNTRACK_EXPIRES
+
+#define ipt_conntrack_info             xt_conntrack_info
 #endif /*_IPT_CONNTRACK_H*/
index 3cb3a522e62b78cd32ecbcaee5a6a65816ae1822..e70d11e1f53cf8018805f7ecd5d12e12a22989e8 100644 (file)
@@ -1,23 +1,15 @@
 #ifndef _IPT_DCCP_H_
 #define _IPT_DCCP_H_
 
-#define IPT_DCCP_SRC_PORTS             0x01
-#define IPT_DCCP_DEST_PORTS            0x02
-#define IPT_DCCP_TYPE                  0x04
-#define IPT_DCCP_OPTION                        0x08
+#include <linux/netfilter/xt_dccp.h>
+#define IPT_DCCP_SRC_PORTS     XT_DCCP_SRC_PORTS
+#define IPT_DCCP_DEST_PORTS    XT_DCCP_DEST_PORTS
+#define IPT_DCCP_TYPE          XT_DCCP_TYPE
+#define IPT_DCCP_OPTION                XT_DCCP_OPTION
 
-#define IPT_DCCP_VALID_FLAGS           0x0f
+#define IPT_DCCP_VALID_FLAGS   XT_DCCP_VALID_FLAGS
 
-struct ipt_dccp_info {
-       u_int16_t dpts[2];  /* Min, Max */
-       u_int16_t spts[2];  /* Min, Max */
-
-       u_int16_t flags;
-       u_int16_t invflags;
-
-       u_int16_t typemask;
-       u_int8_t option;
-};
+#define ipt_dccp_info xt_dccp_info
 
 #endif /* _IPT_DCCP_H_ */
 
index 6f12ecb8c93df8d48dc9907b0ab5688f1a4f2a37..80452c218551f172dc61c77e2ba0eea588aa3cd3 100644 (file)
@@ -1,8 +1,7 @@
 #ifndef _IPT_HELPER_H
 #define _IPT_HELPER_H
 
-struct ipt_helper_info {
-       int invert;
-       char name[30];
-};
+#include <linux/netfilter/xt_helper.h>
+#define ipt_helper_info xt_helper_info
+
 #endif /* _IPT_HELPER_H */
index 6e0885229615c22c7e85963315462e5d5c33dff2..9b45206ffcef75610e5804a91ba5e4deedad4f08 100644 (file)
@@ -1,9 +1,7 @@
 #ifndef _IPT_LENGTH_H
 #define _IPT_LENGTH_H
 
-struct ipt_length_info {
-    u_int16_t  min, max;
-    u_int8_t   invert;
-};
+#include <linux/netfilter/xt_length.h>
+#define ipt_length_info xt_length_info
 
 #endif /*_IPT_LENGTH_H*/
index 256453409e21f87b26a623fb18a3fb05b45d781f..92f5cd07bbc40d45948fed145029a3a0ded7b3a2 100644 (file)
@@ -1,21 +1,8 @@
 #ifndef _IPT_RATE_H
 #define _IPT_RATE_H
 
-/* timings are in milliseconds. */
-#define IPT_LIMIT_SCALE 10000
+#include <linux/netfilter/xt_limit.h>
+#define IPT_LIMIT_SCALE XT_LIMIT_SCALE
+#define ipt_rateinfo xt_rateinfo
 
-/* 1/10,000 sec period => max of 10,000/sec.  Min rate is then 429490
-   seconds, or one every 59 hours. */
-struct ipt_rateinfo {
-       u_int32_t avg;    /* Average secs between packets * scale */
-       u_int32_t burst;  /* Period multiplier for upper limit. */
-
-       /* Used internally by the kernel */
-       unsigned long prev;
-       u_int32_t credit;
-       u_int32_t credit_cap, cost;
-
-       /* Ugly, ugly fucker. */
-       struct ipt_rateinfo *master;
-};
 #endif /*_IPT_RATE_H*/
index f8d5b8e7ccdb69a6d8dad214fab1761c94735f2a..b186008a3c477df76921b6c92bff305d439dec54 100644 (file)
@@ -1,8 +1,7 @@
 #ifndef _IPT_MAC_H
 #define _IPT_MAC_H
 
-struct ipt_mac_info {
-    unsigned char srcaddr[ETH_ALEN];
-    int invert;
-};
+#include <linux/netfilter/xt_mac.h>
+#define ipt_mac_info xt_mac_info
+
 #endif /*_IPT_MAC_H*/
index f3952b563d4cbceb53f2da2c8192cc9e433943f6..bfde67c6122484bf9126b6a0e9997accaa19ecda 100644 (file)
@@ -1,9 +1,9 @@
 #ifndef _IPT_MARK_H
 #define _IPT_MARK_H
 
-struct ipt_mark_info {
-    unsigned long mark, mask;
-    u_int8_t invert;
-};
+/* Backwards compatibility for old userspace */
+#include <linux/netfilter/xt_mark.h>
+
+#define ipt_mark_info xt_mark_info
 
 #endif /*_IPT_MARK_H*/
index 7538c8655ec046cdd99fd417fdf3d9d31c84990b..2400e7140f26e7afb669a7b67473bb84c4c1e2ac 100644 (file)
@@ -1,24 +1,17 @@
 #ifndef _IPT_PHYSDEV_H
 #define _IPT_PHYSDEV_H
 
-#ifdef __KERNEL__
-#include <linux/if.h>
-#endif
+/* Backwards compatibility for old userspace */
 
-#define IPT_PHYSDEV_OP_IN              0x01
-#define IPT_PHYSDEV_OP_OUT             0x02
-#define IPT_PHYSDEV_OP_BRIDGED         0x04
-#define IPT_PHYSDEV_OP_ISIN            0x08
-#define IPT_PHYSDEV_OP_ISOUT           0x10
-#define IPT_PHYSDEV_OP_MASK            (0x20 - 1)
+#include <linux/netfilter/xt_physdev.h>
 
-struct ipt_physdev_info {
-       char physindev[IFNAMSIZ];
-       char in_mask[IFNAMSIZ];
-       char physoutdev[IFNAMSIZ];
-       char out_mask[IFNAMSIZ];
-       u_int8_t invert;
-       u_int8_t bitmask;
-};
+#define IPT_PHYSDEV_OP_IN              XT_PHYSDEV_OP_IN
+#define IPT_PHYSDEV_OP_OUT             XT_PHYSDEV_OP_OUT
+#define IPT_PHYSDEV_OP_BRIDGED         XT_PHYSDEV_OP_BRIDGED
+#define IPT_PHYSDEV_OP_ISIN            XT_PHYSDEV_OP_ISIN
+#define IPT_PHYSDEV_OP_ISOUT           XT_PHYSDEV_OP_ISOUT
+#define IPT_PHYSDEV_OP_MASK            XT_PHYSDEV_OP_MASK
+
+#define ipt_physdev_info xt_physdev_info
 
 #endif /*_IPT_PHYSDEV_H*/
index d53a65848683ccf97f214a889d77d839ac87f808..ff1fbc949a0c2e7c9ab0efa81b95188f208e7568 100644 (file)
@@ -1,8 +1,7 @@
 #ifndef _IPT_PKTTYPE_H
 #define _IPT_PKTTYPE_H
 
-struct ipt_pkttype_info {
-       int     pkttype;
-       int     invert;
-};
+#include <linux/netfilter/xt_pkttype.h>
+#define ipt_pkttype_info xt_pkttype_info
+
 #endif /*_IPT_PKTTYPE_H*/
index a4d6698723acf280c77cc362ab0d8f9396d5433b..b3996eaa0188aa1352e13699256e92ffa6f32edf 100644 (file)
@@ -1,10 +1,7 @@
 #ifndef _IPT_REALM_H
 #define _IPT_REALM_H
 
-struct ipt_realm_info {
-       u_int32_t id;
-       u_int32_t mask;
-       u_int8_t invert;
-};
+#include <linux/netfilter/xt_realm.h>
+#define ipt_realm_info xt_realm_info
 
 #endif /* _IPT_REALM_H */
index 5df37868933d9d91795dff8728f0b7a9c6000d81..a44a99cc28ccd2434d42d5b916bcf8d518a01aca 100644 (file)
@@ -1,13 +1,15 @@
 #ifndef _IPT_STATE_H
 #define _IPT_STATE_H
 
-#define IPT_STATE_BIT(ctinfo) (1 << ((ctinfo)%IP_CT_IS_REPLY+1))
-#define IPT_STATE_INVALID (1 << 0)
+/* Backwards compatibility for old userspace */
 
-#define IPT_STATE_UNTRACKED (1 << (IP_CT_NUMBER + 1))
+#include <linux/netfilter/xt_state.h>
+
+#define IPT_STATE_BIT          XT_STATE_BIT
+#define IPT_STATE_INVALID      XT_STATE_INVALID
+
+#define IPT_STATE_UNTRACKED    XT_STATE_UNTRACKED
+
+#define ipt_state_info         xt_state_info
 
-struct ipt_state_info
-{
-       unsigned int statemask;
-};
 #endif /*_IPT_STATE_H*/
index a265f6e44eabc0f54d27a8062b9803d5ff21a2c1..c26de3059903eeb21a91de18c067db084de67a68 100644 (file)
@@ -1,18 +1,10 @@
 #ifndef _IPT_STRING_H
 #define _IPT_STRING_H
 
-#define IPT_STRING_MAX_PATTERN_SIZE 128
-#define IPT_STRING_MAX_ALGO_NAME_SIZE 16
+#include <linux/netfilter/xt_string.h>
 
-struct ipt_string_info
-{
-       u_int16_t from_offset;
-       u_int16_t to_offset;
-       char      algo[IPT_STRING_MAX_ALGO_NAME_SIZE];
-       char      pattern[IPT_STRING_MAX_PATTERN_SIZE];
-       u_int8_t  patlen;
-       u_int8_t  invert;
-       struct ts_config __attribute__((aligned(8))) *config;
-};
+#define IPT_STRING_MAX_PATTERN_SIZE XT_STRING_MAX_PATTERN_SIZE
+#define IPT_STRING_MAX_ALGO_NAME_SIZE XT_STRING_MAX_ALGO_NAME_SIZE
+#define ipt_string_info xt_string_info
 
 #endif /*_IPT_STRING_H*/
index e2b14397f701b72e858ea942607a3c19176e5eb0..18bbc8e8e00937b67df04d661791fb5a58094541 100644 (file)
@@ -1,9 +1,7 @@
 #ifndef _IPT_TCPMSS_MATCH_H
 #define _IPT_TCPMSS_MATCH_H
 
-struct ipt_tcpmss_match_info {
-    u_int16_t mss_min, mss_max;
-    u_int8_t invert;
-};
+#include <linux/netfilter/xt_tcpmss.h>
+#define ipt_tcpmss_match_info xt_tcpmss_match_info
 
 #endif /*_IPT_TCPMSS_MATCH_H*/
index c163ba31aab7bc82745ab8d27f54ff7932ea4f21..f249b574f0fa0e5ef8cb7fb2ff4cc1fcd7c2278b 100644 (file)
 #include <linux/compiler.h>
 #include <linux/netfilter_ipv6.h>
 
-#define IP6T_FUNCTION_MAXNAMELEN 30
-#define IP6T_TABLE_MAXNAMELEN 32
+#include <linux/netfilter/x_tables.h>
+
+#define IP6T_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN
+#define IP6T_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN
+
+#define ip6t_match xt_match
+#define ip6t_target xt_target
+#define ip6t_table xt_table
+#define ip6t_get_revision xt_get_revision
 
 /* Yes, Virginia, you have to zero the padding. */
 struct ip6t_ip6 {
@@ -104,10 +111,7 @@ struct ip6t_standard_target
        int verdict;
 };
 
-struct ip6t_counters
-{
-       u_int64_t pcnt, bcnt;                   /* Packet and byte counters */
-};
+#define ip6t_counters  xt_counters
 
 /* Values for "flag" field in struct ip6t_ip6 (general ip6 structure). */
 #define IP6T_F_PROTO           0x01    /* Set if rule cares about upper 
@@ -123,7 +127,7 @@ struct ip6t_counters
 #define IP6T_INV_SRCIP         0x08    /* Invert the sense of SRC IP. */
 #define IP6T_INV_DSTIP         0x10    /* Invert the sense of DST OP. */
 #define IP6T_INV_FRAG          0x20    /* Invert the sense of FRAG. */
-#define IP6T_INV_PROTO         0x40    /* Invert the sense of PROTO. */
+#define IP6T_INV_PROTO         XT_INV_PROTO
 #define IP6T_INV_MASK          0x7F    /* All possible flag bits mask. */
 
 /* This structure defines each of the firewall rules.  Consists of 3
@@ -145,7 +149,7 @@ struct ip6t_entry
        unsigned int comefrom;
 
        /* Packet and byte counters. */
-       struct ip6t_counters counters;
+       struct xt_counters counters;
 
        /* The matches (if any), then the target. */
        unsigned char elems[0];
@@ -155,54 +159,41 @@ struct ip6t_entry
  * New IP firewall options for [gs]etsockopt at the RAW IP level.
  * Unlike BSD Linux inherits IP options so you don't have to use
  * a raw socket for this. Instead we check rights in the calls. */
-#define IP6T_BASE_CTL                  64      /* base for firewall socket options */
+#define IP6T_BASE_CTL                  XT_BASE_CTL
 
-#define IP6T_SO_SET_REPLACE            (IP6T_BASE_CTL)
-#define IP6T_SO_SET_ADD_COUNTERS       (IP6T_BASE_CTL + 1)
-#define IP6T_SO_SET_MAX                        IP6T_SO_SET_ADD_COUNTERS
+#define IP6T_SO_SET_REPLACE            XT_SO_SET_REPLACE
+#define IP6T_SO_SET_ADD_COUNTERS       XT_SO_SET_ADD_COUNTERS
+#define IP6T_SO_SET_MAX                        XT_SO_SET_MAX
 
-#define IP6T_SO_GET_INFO               (IP6T_BASE_CTL)
-#define IP6T_SO_GET_ENTRIES            (IP6T_BASE_CTL + 1)
-#define        IP6T_SO_GET_REVISION_MATCH      (IP6T_BASE_CTL + 2)
-#define        IP6T_SO_GET_REVISION_TARGET     (IP6T_BASE_CTL + 3)
-#define IP6T_SO_GET_MAX                        IP6T_SO_GET_REVISION_TARGET
+#define IP6T_SO_GET_INFO               XT_SO_GET_INFO
+#define IP6T_SO_GET_ENTRIES            XT_SO_GET_ENTRIES
+#define        IP6T_SO_GET_REVISION_MATCH      XT_SO_GET_REVISION_MATCH
+#define        IP6T_SO_GET_REVISION_TARGET     XT_SO_GET_REVISION_TARGET
+#define IP6T_SO_GET_MAX                        XT_SO_GET_REVISION_TARGET
 
 /* CONTINUE verdict for targets */
-#define IP6T_CONTINUE 0xFFFFFFFF
+#define IP6T_CONTINUE XT_CONTINUE
 
 /* For standard target */
-#define IP6T_RETURN (-NF_REPEAT - 1)
+#define IP6T_RETURN XT_RETURN
 
-/* TCP matching stuff */
-struct ip6t_tcp
-{
-       u_int16_t spts[2];                      /* Source port range. */
-       u_int16_t dpts[2];                      /* Destination port range. */
-       u_int8_t option;                        /* TCP Option iff non-zero*/
-       u_int8_t flg_mask;                      /* TCP flags mask byte */
-       u_int8_t flg_cmp;                       /* TCP flags compare byte */
-       u_int8_t invflags;                      /* Inverse flags */
-};
+/* TCP/UDP matching stuff */
+#include <linux/netfilter/xt_tcpudp.h>
+
+#define ip6t_tcp xt_tcp
+#define ip6t_udp xt_udp
 
 /* Values for "inv" field in struct ipt_tcp. */
-#define IP6T_TCP_INV_SRCPT     0x01    /* Invert the sense of source ports. */
-#define IP6T_TCP_INV_DSTPT     0x02    /* Invert the sense of dest ports. */
-#define IP6T_TCP_INV_FLAGS     0x04    /* Invert the sense of TCP flags. */
-#define IP6T_TCP_INV_OPTION    0x08    /* Invert the sense of option test. */
-#define IP6T_TCP_INV_MASK      0x0F    /* All possible flags. */
-
-/* UDP matching stuff */
-struct ip6t_udp
-{
-       u_int16_t spts[2];                      /* Source port range. */
-       u_int16_t dpts[2];                      /* Destination port range. */
-       u_int8_t invflags;                      /* Inverse flags */
-};
+#define IP6T_TCP_INV_SRCPT     XT_TCP_INV_SRCPT
+#define IP6T_TCP_INV_DSTPT     XT_TCP_INV_DSTPT
+#define IP6T_TCP_INV_FLAGS     XT_TCP_INV_FLAGS
+#define IP6T_TCP_INV_OPTION    XT_TCP_INV_OPTION
+#define IP6T_TCP_INV_MASK      XT_TCP_INV_MASK
 
 /* Values for "invflags" field in struct ipt_udp. */
-#define IP6T_UDP_INV_SRCPT     0x01    /* Invert the sense of source ports. */
-#define IP6T_UDP_INV_DSTPT     0x02    /* Invert the sense of dest ports. */
-#define IP6T_UDP_INV_MASK      0x03    /* All possible flags. */
+#define IP6T_UDP_INV_SRCPT     XT_UDP_INV_SRCPT
+#define IP6T_UDP_INV_DSTPT     XT_UDP_INV_DSTPT
+#define IP6T_UDP_INV_MASK      XT_UDP_INV_MASK
 
 /* ICMP matching stuff */
 struct ip6t_icmp
@@ -264,23 +255,14 @@ struct ip6t_replace
        /* Number of counters (must be equal to current number of entries). */
        unsigned int num_counters;
        /* The old entries' counters. */
-       struct ip6t_counters __user *counters;
+       struct xt_counters __user *counters;
 
        /* The entries (hang off end: not really an array). */
        struct ip6t_entry entries[0];
 };
 
 /* The argument to IP6T_SO_ADD_COUNTERS. */
-struct ip6t_counters_info
-{
-       /* Which table. */
-       char name[IP6T_TABLE_MAXNAMELEN];
-
-       unsigned int num_counters;
-
-       /* The counters (actually `number' of these). */
-       struct ip6t_counters counters[0];
-};
+#define ip6t_counters_info xt_counters_info
 
 /* The argument to IP6T_SO_GET_ENTRIES. */
 struct ip6t_get_entries
@@ -295,19 +277,10 @@ struct ip6t_get_entries
        struct ip6t_entry entrytable[0];
 };
 
-/* The argument to IP6T_SO_GET_REVISION_*.  Returns highest revision
- * kernel supports, if >= revision. */
-struct ip6t_get_revision
-{
-       char name[IP6T_FUNCTION_MAXNAMELEN-1];
-
-       u_int8_t revision;
-};
-
 /* Standard return verdict, or do jump. */
-#define IP6T_STANDARD_TARGET ""
+#define IP6T_STANDARD_TARGET XT_STANDARD_TARGET
 /* Error verdict. */
-#define IP6T_ERROR_TARGET "ERROR"
+#define IP6T_ERROR_TARGET XT_ERROR_TARGET
 
 /* Helper functions */
 static __inline__ struct ip6t_entry_target *
@@ -361,104 +334,11 @@ ip6t_get_target(struct ip6t_entry *e)
 #include <linux/init.h>
 extern void ip6t_init(void) __init;
 
-struct ip6t_match
-{
-       struct list_head list;
-
-       const char name[IP6T_FUNCTION_MAXNAMELEN-1];
-
-       u_int8_t revision;
-
-       /* Return true or false: return FALSE and set *hotdrop = 1 to
-           force immediate packet drop. */
-       /* Arguments changed since 2.6.9, as this must now handle
-          non-linear skb, using skb_header_pointer and
-          skb_ip_make_writable. */
-       int (*match)(const struct sk_buff *skb,
-                    const struct net_device *in,
-                    const struct net_device *out,
-                    const void *matchinfo,
-                    int offset,
-                    unsigned int protoff,
-                    int *hotdrop);
-
-       /* Called when user tries to insert an entry of this type. */
-       /* Should return true or false. */
-       int (*checkentry)(const char *tablename,
-                         const struct ip6t_ip6 *ip,
-                         void *matchinfo,
-                         unsigned int matchinfosize,
-                         unsigned int hook_mask);
-
-       /* Called when entry of this type deleted. */
-       void (*destroy)(void *matchinfo, unsigned int matchinfosize);
-
-       /* Set this to THIS_MODULE if you are a module, otherwise NULL */
-       struct module *me;
-};
-
-/* Registration hooks for targets. */
-struct ip6t_target
-{
-       struct list_head list;
-
-       const char name[IP6T_FUNCTION_MAXNAMELEN-1];
-
-       u_int8_t revision;
-
-       /* Returns verdict. Argument order changed since 2.6.9, as this
-          must now handle non-linear skbs, using skb_copy_bits and
-          skb_ip_make_writable. */
-       unsigned int (*target)(struct sk_buff **pskb,
-                              const struct net_device *in,
-                              const struct net_device *out,
-                              unsigned int hooknum,
-                              const void *targinfo,
-                              void *userdata);
-
-       /* Called when user tries to insert an entry of this type:
-           hook_mask is a bitmask of hooks from which it can be
-           called. */
-       /* Should return true or false. */
-       int (*checkentry)(const char *tablename,
-                         const struct ip6t_entry *e,
-                         void *targinfo,
-                         unsigned int targinfosize,
-                         unsigned int hook_mask);
-
-       /* Called when entry of this type deleted. */
-       void (*destroy)(void *targinfo, unsigned int targinfosize);
-
-       /* Set this to THIS_MODULE if you are a module, otherwise NULL */
-       struct module *me;
-};
-
-extern int ip6t_register_target(struct ip6t_target *target);
-extern void ip6t_unregister_target(struct ip6t_target *target);
-
-extern int ip6t_register_match(struct ip6t_match *match);
-extern void ip6t_unregister_match(struct ip6t_match *match);
+#define ip6t_register_target(tgt) xt_register_target(AF_INET6, tgt)
+#define ip6t_unregister_target(tgt) xt_unregister_target(AF_INET6, tgt)
 
-/* Furniture shopping... */
-struct ip6t_table
-{
-       struct list_head list;
-
-       /* A unique name... */
-       char name[IP6T_TABLE_MAXNAMELEN];
-
-       /* What hooks you will enter on */
-       unsigned int valid_hooks;
-
-       /* Lock for the curtain */
-       rwlock_t lock;
-
-       /* Man behind the curtain... */
-       struct ip6t_table_info *private;
-
-       /* Set this to THIS_MODULE if you are a module, otherwise NULL */
-       struct module *me;
-};
+#define ip6t_register_match(match) xt_register_match(AF_INET6, match)
+#define ip6t_unregister_match(match) xt_unregister_match(AF_INET6, match)
 
 extern int ip6t_register_table(struct ip6t_table *table,
                               const struct ip6t_replace *repl);
index 7ade8d8f52468c5725249b296ab55a62d89f1cae..7cf629a8ab923a296eb46b4d3c1e315919f854ac 100644 (file)
@@ -1,8 +1,9 @@
 #ifndef _IP6T_MARK_H_target
 #define _IP6T_MARK_H_target
 
-struct ip6t_mark_target_info {
-       unsigned long mark;
-};
+/* Backwards compatibility for old userspace */
+#include <linux/netfilter/xt_MARK.h>
 
-#endif /*_IPT_MARK_H_target*/
+#define ip6t_mark_target_info xt_mark_target_info
+
+#endif /*_IP6T_MARK_H_target*/
index 7fc09f9f9d639c85d530766d82e40f78d20944e9..9e9689d03ed742d5422198afc4041a09154f2f36 100644 (file)
@@ -1,10 +1,8 @@
 #ifndef _IP6T_LENGTH_H
 #define _IP6T_LENGTH_H
 
-struct ip6t_length_info {
-       u_int16_t  min, max;
-       u_int8_t   invert;
-};
+#include <linux/netfilter/xt_length.h>
+#define ip6t_length_info xt_length_info
 
 #endif /*_IP6T_LENGTH_H*/
        
index f2866e50f3b446ad3e233f950ccf777514502942..487e5ea342c6e1366976f0caaa6e79f9e1229ed2 100644 (file)
@@ -1,21 +1,8 @@
 #ifndef _IP6T_RATE_H
 #define _IP6T_RATE_H
 
-/* timings are in milliseconds. */
-#define IP6T_LIMIT_SCALE 10000
+#include <linux/netfilter/xt_limit.h>
+#define IP6T_LIMIT_SCALE XT_LIMIT_SCALE
+#define ip6t_rateinfo xt_rateinfo
 
-/* 1/10,000 sec period => max of 10,000/sec.  Min rate is then 429490
-   seconds, or one every 59 hours. */
-struct ip6t_rateinfo {
-       u_int32_t avg;    /* Average secs between packets * scale */
-       u_int32_t burst;  /* Period multiplier for upper limit. */
-
-       /* Used internally by the kernel */
-       unsigned long prev;
-       u_int32_t credit;
-       u_int32_t credit_cap, cost;
-
-       /* Ugly, ugly fucker. */
-       struct ip6t_rateinfo *master;
-};
-#endif /*_IPT_RATE_H*/
+#endif /*_IP6T_RATE_H*/
index 87c088c21848e23bc8efa86fbbdcf67e200b4fd6..ac58e83e9423e7556273e84f0ae39d52c921ae48 100644 (file)
@@ -1,8 +1,7 @@
 #ifndef _IP6T_MAC_H
 #define _IP6T_MAC_H
 
-struct ip6t_mac_info {
-    unsigned char srcaddr[ETH_ALEN];
-    int invert;
-};
-#endif /*_IPT_MAC_H*/
+#include <linux/netfilter/xt_mac.h>
+#define ip6t_mac_info xt_mac_info
+
+#endif /*_IP6T_MAC_H*/
index a734441e1c19a581d32c35b979f591ece86c895f..ff204951ddc3ad4196c5f1d42eb6bcb86cfaf739 100644 (file)
@@ -1,9 +1,9 @@
 #ifndef _IP6T_MARK_H
 #define _IP6T_MARK_H
 
-struct ip6t_mark_info {
-    unsigned long mark, mask;
-    u_int8_t invert;
-};
+/* Backwards compatibility for old userspace */
+#include <linux/netfilter/xt_mark.h>
+
+#define ip6t_mark_info xt_mark_info
 
 #endif /*_IPT_MARK_H*/
index c234731cd66bc33f7f1450f389b62090365d7aae..c161c0a81b55d458fa5db0ab87814acf3c46b86d 100644 (file)
@@ -1,24 +1,17 @@
 #ifndef _IP6T_PHYSDEV_H
 #define _IP6T_PHYSDEV_H
 
-#ifdef __KERNEL__
-#include <linux/if.h>
-#endif
+/* Backwards compatibility for old userspace */
 
-#define IP6T_PHYSDEV_OP_IN             0x01
-#define IP6T_PHYSDEV_OP_OUT            0x02
-#define IP6T_PHYSDEV_OP_BRIDGED                0x04
-#define IP6T_PHYSDEV_OP_ISIN           0x08
-#define IP6T_PHYSDEV_OP_ISOUT          0x10
-#define IP6T_PHYSDEV_OP_MASK           (0x20 - 1)
+#include <linux/netfilter/xt_physdev.h>
 
-struct ip6t_physdev_info {
-       char physindev[IFNAMSIZ];
-       char in_mask[IFNAMSIZ];
-       char physoutdev[IFNAMSIZ];
-       char out_mask[IFNAMSIZ];
-       u_int8_t invert;
-       u_int8_t bitmask;
-};
+#define IP6T_PHYSDEV_OP_IN             XT_PHYSDEV_OP_IN
+#define IP6T_PHYSDEV_OP_OUT            XT_PHYSDEV_OP_OUT
+#define IP6T_PHYSDEV_OP_BRIDGED                XT_PHYSDEV_OP_BRIDGED
+#define IP6T_PHYSDEV_OP_ISIN           XT_PHYSDEV_OP_ISIN
+#define IP6T_PHYSDEV_OP_ISOUT          XT_PHYSDEV_OP_ISOUT
+#define IP6T_PHYSDEV_OP_MASK           XT_PHYSDEV_OP_MASK
+
+#define ip6t_physdev_info xt_physdev_info
 
 #endif /*_IP6T_PHYSDEV_H*/
index 92a9696fdebe0ecb7bad1922bffe3c8e360026d5..331521a10a2d5072be85698afd37c1300016d53d 100644 (file)
@@ -53,6 +53,9 @@
 
 #define PHY_MAX_ADDR 32
 
+/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
+#define PHY_ID_FMT "%x:%02x"
+
 /* The Bus class for PHYs.  Devices which provide access to
  * PHYs should register using this structure */
 struct mii_bus {
index 74488e49166d88c40018c91f247e5873ad842510..aa6322d4519828c4598cbab4c07116a2a615c385 100644 (file)
@@ -146,6 +146,11 @@ struct property;
 extern void proc_device_tree_init(void);
 extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
 extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
+extern void proc_device_tree_remove_prop(struct proc_dir_entry *pde,
+                                        struct property *prop);
+extern void proc_device_tree_update_prop(struct proc_dir_entry *pde,
+                                        struct property *newprop,
+                                        struct property *oldprop);
 #endif /* CONFIG_PROC_DEVICETREE */
 
 extern struct proc_dir_entry *proc_symlink(const char *,
index 3b74c4bf2934b4e3a1bb561dbee934ebb79d787a..a72e17135421890ec73b11c8b89404652a6e6b43 100644 (file)
@@ -631,7 +631,14 @@ struct sched_domain {
 
 extern void partition_sched_domains(cpumask_t *partition1,
                                    cpumask_t *partition2);
-#endif /* CONFIG_SMP */
+
+/*
+ * Maximum cache size the migration-costs auto-tuning code will
+ * search from:
+ */
+extern unsigned int max_cache_size;
+
+#endif /* CONFIG_SMP */
 
 
 struct io_context;                     /* See blkdev.h */
@@ -689,8 +696,11 @@ struct task_struct {
 
        int lock_depth;         /* BKL lock depth */
 
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
+       int last_waker_cpu;     /* CPU that last woke this task up */
+#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
        int oncpu;
+#endif
 #endif
        int prio, static_prio;
        struct list_head run_list;
@@ -1230,6 +1240,7 @@ static inline void task_unlock(struct task_struct *p)
 #ifndef __HAVE_THREAD_FUNCTIONS
 
 #define task_thread_info(task) (task)->thread_info
+#define task_stack_page(task) ((void*)((task)->thread_info))
 
 static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
 {
index e3710d7e260aa6d31b6d5f46f811ca49f3c65b86..a8187c3c8a7b0606d4358ba392997011ea73d04c 100644 (file)
@@ -67,6 +67,9 @@
 /* Parisc type numbers. */
 #define PORT_MUX       48
 
+/* Atmel AT91RM9200 SoC */
+#define PORT_AT91RM9200 49
+
 /* Macintosh Zilog type numbers */
 #define PORT_MAC_ZILOG 50      /* m68k : not yet implemented */
 #define PORT_PMAC_ZILOG        51
index 9f4019156fd8e7045e442df7f7661d7032b0642e..b02dda4ee83d113f6e4783ba02a68629593fe5d9 100644 (file)
@@ -186,6 +186,7 @@ struct ucred {
 #define AF_PPPOX       24      /* PPPoX sockets                */
 #define AF_WANPIPE     25      /* Wanpipe API Sockets */
 #define AF_LLC         26      /* Linux LLC                    */
+#define AF_TIPC                30      /* TIPC sockets                 */
 #define AF_BLUETOOTH   31      /* Bluetooth sockets            */
 #define AF_MAX         32      /* For now.. */
 
@@ -218,6 +219,7 @@ struct ucred {
 #define PF_PPPOX       AF_PPPOX
 #define PF_WANPIPE     AF_WANPIPE
 #define PF_LLC         AF_LLC
+#define PF_TIPC                AF_TIPC
 #define PF_BLUETOOTH   AF_BLUETOOTH
 #define PF_MAX         AF_MAX
 
@@ -279,6 +281,7 @@ struct ucred {
 #define SOL_LLC                268
 #define SOL_DCCP       269
 #define SOL_NETLINK    270
+#define SOL_TIPC       271
 
 /* IPX options */
 #define IPX_TYPE       1
diff --git a/include/linux/spi/ads7846.h b/include/linux/spi/ads7846.h
new file mode 100644 (file)
index 0000000..72261e0
--- /dev/null
@@ -0,0 +1,18 @@
+/* linux/spi/ads7846.h */
+
+/* Touchscreen characteristics vary between boards and models.  The
+ * platform_data for the device's "struct device" holds this information.
+ *
+ * It's OK if the min/max values are zero.
+ */
+struct ads7846_platform_data {
+       u16     model;                  /* 7843, 7845, 7846. */
+       u16     vref_delay_usecs;       /* 0 for external vref; etc */
+       u16     x_plate_ohms;
+       u16     y_plate_ohms;
+
+       u16     x_min, x_max;
+       u16     y_min, y_max;
+       u16     pressure_min, pressure_max;
+};
+
diff --git a/include/linux/spi/flash.h b/include/linux/spi/flash.h
new file mode 100644 (file)
index 0000000..3f22932
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef LINUX_SPI_FLASH_H
+#define LINUX_SPI_FLASH_H
+
+struct mtd_partition;
+
+/**
+ * struct flash_platform_data: board-specific flash data
+ * @name: optional flash device name (eg, as used with mtdparts=)
+ * @parts: optional array of mtd_partitions for static partitioning
+ * @nr_parts: number of mtd_partitions for static partitoning
+ * @type: optional flash device type (e.g. m25p80 vs m25p64), for use
+ *     with chips that can't be queried for JEDEC or other IDs
+ *
+ * Board init code (in arch/.../mach-xxx/board-yyy.c files) can
+ * provide information about SPI flash parts (such as DataFlash) to
+ * help set up the device and its appropriate default partitioning.
+ *
+ * Note that for DataFlash, sizes for pages, blocks, and sectors are
+ * rarely powers of two; and partitions should be sector-aligned.
+ */
+struct flash_platform_data {
+       char            *name;
+       struct mtd_partition *parts;
+       unsigned int    nr_parts;
+
+       char            *type;
+
+       /* we'll likely add more ... use JEDEC IDs, etc */
+};
+
+#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
new file mode 100644 (file)
index 0000000..b05f146
--- /dev/null
@@ -0,0 +1,668 @@
+/*
+ * Copyright (C) 2005 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_SPI_H
+#define __LINUX_SPI_H
+
+/*
+ * INTERFACES between SPI master-side drivers and SPI infrastructure.
+ * (There's no SPI slave support for Linux yet...)
+ */
+extern struct bus_type spi_bus_type;
+
+/**
+ * struct spi_device - Master side proxy for an SPI slave device
+ * @dev: Driver model representation of the device.
+ * @master: SPI controller used with the device.
+ * @max_speed_hz: Maximum clock rate to be used with this chip
+ *     (on this board); may be changed by the device's driver.
+ * @chip-select: Chipselect, distinguishing chips handled by "master".
+ * @mode: The spi mode defines how data is clocked out and in.
+ *     This may be changed by the device's driver.
+ * @bits_per_word: Data transfers involve one or more words; word sizes
+ *     like eight or 12 bits are common.  In-memory wordsizes are
+ *     powers of two bytes (e.g. 20 bit samples use 32 bits).
+ *     This may be changed by the device's driver.
+ * @irq: Negative, or the number passed to request_irq() to receive
+ *     interrupts from this device.
+ * @controller_state: Controller's runtime state
+ * @controller_data: Board-specific definitions for controller, such as
+ *     FIFO initialization parameters; from board_info.controller_data
+ *
+ * An spi_device is used to interchange data between an SPI slave
+ * (usually a discrete chip) and CPU memory.
+ *
+ * In "dev", the platform_data is used to hold information about this
+ * device that's meaningful to the device's protocol driver, but not
+ * to its controller.  One example might be an identifier for a chip
+ * variant with slightly different functionality.
+ */
+struct spi_device {
+       struct device           dev;
+       struct spi_master       *master;
+       u32                     max_speed_hz;
+       u8                      chip_select;
+       u8                      mode;
+#define        SPI_CPHA        0x01                    /* clock phase */
+#define        SPI_CPOL        0x02                    /* clock polarity */
+#define        SPI_MODE_0      (0|0)                   /* (original MicroWire) */
+#define        SPI_MODE_1      (0|SPI_CPHA)
+#define        SPI_MODE_2      (SPI_CPOL|0)
+#define        SPI_MODE_3      (SPI_CPOL|SPI_CPHA)
+#define        SPI_CS_HIGH     0x04                    /* chipselect active high? */
+       u8                      bits_per_word;
+       int                     irq;
+       void                    *controller_state;
+       void                    *controller_data;
+       const char              *modalias;
+
+       // likely need more hooks for more protocol options affecting how
+       // the controller talks to each chip, like:
+       //  - bit order (default is wordwise msb-first)
+       //  - memory packing (12 bit samples into low bits, others zeroed)
+       //  - priority
+       //  - drop chipselect after each word
+       //  - chipselect delays
+       //  - ...
+};
+
+static inline struct spi_device *to_spi_device(struct device *dev)
+{
+       return dev ? container_of(dev, struct spi_device, dev) : NULL;
+}
+
+/* most drivers won't need to care about device refcounting */
+static inline struct spi_device *spi_dev_get(struct spi_device *spi)
+{
+       return (spi && get_device(&spi->dev)) ? spi : NULL;
+}
+
+static inline void spi_dev_put(struct spi_device *spi)
+{
+       if (spi)
+               put_device(&spi->dev);
+}
+
+/* ctldata is for the bus_master driver's runtime state */
+static inline void *spi_get_ctldata(struct spi_device *spi)
+{
+       return spi->controller_state;
+}
+
+static inline void spi_set_ctldata(struct spi_device *spi, void *state)
+{
+       spi->controller_state = state;
+}
+
+
+struct spi_message;
+
+
+
+struct spi_driver {
+       int                     (*probe)(struct spi_device *spi);
+       int                     (*remove)(struct spi_device *spi);
+       void                    (*shutdown)(struct spi_device *spi);
+       int                     (*suspend)(struct spi_device *spi, pm_message_t mesg);
+       int                     (*resume)(struct spi_device *spi);
+       struct device_driver    driver;
+};
+
+static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
+{
+       return drv ? container_of(drv, struct spi_driver, driver) : NULL;
+}
+
+extern int spi_register_driver(struct spi_driver *sdrv);
+
+static inline void spi_unregister_driver(struct spi_driver *sdrv)
+{
+       if (!sdrv)
+               return;
+       driver_unregister(&sdrv->driver);
+}
+
+
+
+/**
+ * struct spi_master - interface to SPI master controller
+ * @cdev: class interface to this driver
+ * @bus_num: board-specific (and often SOC-specific) identifier for a
+ *     given SPI controller.
+ * @num_chipselect: chipselects are used to distinguish individual
+ *     SPI slaves, and are numbered from zero to num_chipselects.
+ *     each slave has a chipselect signal, but it's common that not
+ *     every chipselect is connected to a slave.
+ * @setup: updates the device mode and clocking records used by a
+ *     device's SPI controller; protocol code may call this.
+ * @transfer: adds a message to the controller's transfer queue.
+ * @cleanup: frees controller-specific state
+ *
+ * Each SPI master controller can communicate with one or more spi_device
+ * children.  These make a small bus, sharing MOSI, MISO and SCK signals
+ * but not chip select signals.  Each device may be configured to use a
+ * different clock rate, since those shared signals are ignored unless
+ * the chip is selected.
+ *
+ * The driver for an SPI controller manages access to those devices through
+ * a queue of spi_message transactions, copyin data between CPU memory and
+ * an SPI slave device).  For each such message it queues, it calls the
+ * message's completion function when the transaction completes.
+ */
+struct spi_master {
+       struct class_device     cdev;
+
+       /* other than zero (== assign one dynamically), bus_num is fully
+        * board-specific.  usually that simplifies to being SOC-specific.
+        * example:  one SOC has three SPI controllers, numbered 1..3,
+        * and one board's schematics might show it using SPI-2.  software
+        * would normally use bus_num=2 for that controller.
+        */
+       u16                     bus_num;
+
+       /* chipselects will be integral to many controllers; some others
+        * might use board-specific GPIOs.
+        */
+       u16                     num_chipselect;
+
+       /* setup mode and clock, etc (spi driver may call many times) */
+       int                     (*setup)(struct spi_device *spi);
+
+       /* bidirectional bulk transfers
+        *
+        * + The transfer() method may not sleep; its main role is
+        *   just to add the message to the queue.
+        * + For now there's no remove-from-queue operation, or
+        *   any other request management
+        * + To a given spi_device, message queueing is pure fifo
+        *
+        * + The master's main job is to process its message queue,
+        *   selecting a chip then transferring data
+        * + If there are multiple spi_device children, the i/o queue
+        *   arbitration algorithm is unspecified (round robin, fifo,
+        *   priority, reservations, preemption, etc)
+        *
+        * + Chipselect stays active during the entire message
+        *   (unless modified by spi_transfer.cs_change != 0).
+        * + The message transfers use clock and SPI mode parameters
+        *   previously established by setup() for this device
+        */
+       int                     (*transfer)(struct spi_device *spi,
+                                               struct spi_message *mesg);
+
+       /* called on release() to free memory provided by spi_master */
+       void                    (*cleanup)(const struct spi_device *spi);
+};
+
+static inline void *spi_master_get_devdata(struct spi_master *master)
+{
+       return class_get_devdata(&master->cdev);
+}
+
+static inline void spi_master_set_devdata(struct spi_master *master, void *data)
+{
+       class_set_devdata(&master->cdev, data);
+}
+
+static inline struct spi_master *spi_master_get(struct spi_master *master)
+{
+       if (!master || !class_device_get(&master->cdev))
+               return NULL;
+       return master;
+}
+
+static inline void spi_master_put(struct spi_master *master)
+{
+       if (master)
+               class_device_put(&master->cdev);
+}
+
+
+/* the spi driver core manages memory for the spi_master classdev */
+extern struct spi_master *
+spi_alloc_master(struct device *host, unsigned size);
+
+extern int spi_register_master(struct spi_master *master);
+extern void spi_unregister_master(struct spi_master *master);
+
+extern struct spi_master *spi_busnum_to_master(u16 busnum);
+
+/*---------------------------------------------------------------------------*/
+
+/*
+ * I/O INTERFACE between SPI controller and protocol drivers
+ *
+ * Protocol drivers use a queue of spi_messages, each transferring data
+ * between the controller and memory buffers.
+ *
+ * The spi_messages themselves consist of a series of read+write transfer
+ * segments.  Those segments always read the same number of bits as they
+ * write; but one or the other is easily ignored by passing a null buffer
+ * pointer.  (This is unlike most types of I/O API, because SPI hardware
+ * is full duplex.)
+ *
+ * NOTE:  Allocation of spi_transfer and spi_message memory is entirely
+ * up to the protocol driver, which guarantees the integrity of both (as
+ * well as the data buffers) for as long as the message is queued.
+ */
+
+/**
+ * struct spi_transfer - a read/write buffer pair
+ * @tx_buf: data to be written (dma-safe memory), or NULL
+ * @rx_buf: data to be read (dma-safe memory), or NULL
+ * @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped
+ * @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped
+ * @len: size of rx and tx buffers (in bytes)
+ * @cs_change: affects chipselect after this transfer completes
+ * @delay_usecs: microseconds to delay after this transfer before
+ *     (optionally) changing the chipselect status, then starting
+ *     the next transfer or completing this spi_message.
+ * @transfer_list: transfers are sequenced through spi_message.transfers
+ *
+ * SPI transfers always write the same number of bytes as they read.
+ * Protocol drivers should always provide rx_buf and/or tx_buf.
+ * In some cases, they may also want to provide DMA addresses for
+ * the data being transferred; that may reduce overhead, when the
+ * underlying driver uses dma.
+ *
+ * If the transmit buffer is null, undefined data will be shifted out
+ * while filling rx_buf.  If the receive buffer is null, the data
+ * shifted in will be discarded.  Only "len" bytes shift out (or in).
+ * It's an error to try to shift out a partial word.  (For example, by
+ * shifting out three bytes with word size of sixteen or twenty bits;
+ * the former uses two bytes per word, the latter uses four bytes.)
+ *
+ * All SPI transfers start with the relevant chipselect active.  Normally
+ * it stays selected until after the last transfer in a message.  Drivers
+ * can affect the chipselect signal using cs_change:
+ *
+ * (i) If the transfer isn't the last one in the message, this flag is
+ * used to make the chipselect briefly go inactive in the middle of the
+ * message.  Toggling chipselect in this way may be needed to terminate
+ * a chip command, letting a single spi_message perform all of group of
+ * chip transactions together.
+ *
+ * (ii) When the transfer is the last one in the message, the chip may
+ * stay selected until the next transfer.  This is purely a performance
+ * hint; the controller driver may need to select a different device
+ * for the next message.
+ *
+ * The code that submits an spi_message (and its spi_transfers)
+ * to the lower layers is responsible for managing its memory.
+ * Zero-initialize every field you don't set up explicitly, to
+ * insulate against future API updates.  After you submit a message
+ * and its transfers, ignore them until its completion callback.
+ */
+struct spi_transfer {
+       /* it's ok if tx_buf == rx_buf (right?)
+        * for MicroWire, one buffer must be null
+        * buffers must work with dma_*map_single() calls, unless
+        *   spi_message.is_dma_mapped reports a pre-existing mapping
+        */
+       const void      *tx_buf;
+       void            *rx_buf;
+       unsigned        len;
+
+       dma_addr_t      tx_dma;
+       dma_addr_t      rx_dma;
+
+       unsigned        cs_change:1;
+       u16             delay_usecs;
+
+       struct list_head transfer_list;
+};
+
+/**
+ * struct spi_message - one multi-segment SPI transaction
+ * @transfers: list of transfer segments in this transaction
+ * @spi: SPI device to which the transaction is queued
+ * @is_dma_mapped: if true, the caller provided both dma and cpu virtual
+ *     addresses for each transfer buffer
+ * @complete: called to report transaction completions
+ * @context: the argument to complete() when it's called
+ * @actual_length: the total number of bytes that were transferred in all
+ *     successful segments
+ * @status: zero for success, else negative errno
+ * @queue: for use by whichever driver currently owns the message
+ * @state: for use by whichever driver currently owns the message
+ *
+ * An spi_message is used to execute an atomic sequence of data transfers,
+ * each represented by a struct spi_transfer.  The sequence is "atomic"
+ * in the sense that no other spi_message may use that SPI bus until that
+ * sequence completes.  On some systems, many such sequences can execute as
+ * as single programmed DMA transfer.  On all systems, these messages are
+ * queued, and might complete after transactions to other devices.  Messages
+ * sent to a given spi_device are alway executed in FIFO order.
+ *
+ * The code that submits an spi_message (and its spi_transfers)
+ * to the lower layers is responsible for managing its memory.
+ * Zero-initialize every field you don't set up explicitly, to
+ * insulate against future API updates.  After you submit a message
+ * and its transfers, ignore them until its completion callback.
+ */
+struct spi_message {
+       struct list_head        transfers;
+
+       struct spi_device       *spi;
+
+       unsigned                is_dma_mapped:1;
+
+       /* REVISIT:  we might want a flag affecting the behavior of the
+        * last transfer ... allowing things like "read 16 bit length L"
+        * immediately followed by "read L bytes".  Basically imposing
+        * a specific message scheduling algorithm.
+        *
+        * Some controller drivers (message-at-a-time queue processing)
+        * could provide that as their default scheduling algorithm.  But
+        * others (with multi-message pipelines) could need a flag to
+        * tell them about such special cases.
+        */
+
+       /* completion is reported through a callback */
+       void                    (*complete)(void *context);
+       void                    *context;
+       unsigned                actual_length;
+       int                     status;
+
+       /* for optional use by whatever driver currently owns the
+        * spi_message ...  between calls to spi_async and then later
+        * complete(), that's the spi_master controller driver.
+        */
+       struct list_head        queue;
+       void                    *state;
+};
+
+static inline void spi_message_init(struct spi_message *m)
+{
+       memset(m, 0, sizeof *m);
+       INIT_LIST_HEAD(&m->transfers);
+}
+
+static inline void
+spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
+{
+       list_add_tail(&t->transfer_list, &m->transfers);
+}
+
+static inline void
+spi_transfer_del(struct spi_transfer *t)
+{
+       list_del(&t->transfer_list);
+}
+
+/* It's fine to embed message and transaction structures in other data
+ * structures so long as you don't free them while they're in use.
+ */
+
+static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
+{
+       struct spi_message *m;
+
+       m = kzalloc(sizeof(struct spi_message)
+                       + ntrans * sizeof(struct spi_transfer),
+                       flags);
+       if (m) {
+               int i;
+               struct spi_transfer *t = (struct spi_transfer *)(m + 1);
+
+               INIT_LIST_HEAD(&m->transfers);
+               for (i = 0; i < ntrans; i++, t++)
+                       spi_message_add_tail(t, m);
+       }
+       return m;
+}
+
+static inline void spi_message_free(struct spi_message *m)
+{
+       kfree(m);
+}
+
+/**
+ * spi_setup -- setup SPI mode and clock rate
+ * @spi: the device whose settings are being modified
+ *
+ * SPI protocol drivers may need to update the transfer mode if the
+ * device doesn't work with the mode 0 default.  They may likewise need
+ * to update clock rates or word sizes from initial values.  This function
+ * changes those settings, and must be called from a context that can sleep.
+ * The changes take effect the next time the device is selected and data
+ * is transferred to or from it.
+ */
+static inline int
+spi_setup(struct spi_device *spi)
+{
+       return spi->master->setup(spi);
+}
+
+
+/**
+ * spi_async -- asynchronous SPI transfer
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers, including completion callback
+ *
+ * This call may be used in_irq and other contexts which can't sleep,
+ * as well as from task contexts which can sleep.
+ *
+ * The completion callback is invoked in a context which can't sleep.
+ * Before that invocation, the value of message->status is undefined.
+ * When the callback is issued, message->status holds either zero (to
+ * indicate complete success) or a negative error code.  After that
+ * callback returns, the driver which issued the transfer request may
+ * deallocate the associated memory; it's no longer in use by any SPI
+ * core or controller driver code.
+ *
+ * Note that although all messages to a spi_device are handled in
+ * FIFO order, messages may go to different devices in other orders.
+ * Some device might be higher priority, or have various "hard" access
+ * time requirements, for example.
+ *
+ * On detection of any fault during the transfer, processing of
+ * the entire message is aborted, and the device is deselected.
+ * Until returning from the associated message completion callback,
+ * no other spi_message queued to that device will be processed.
+ * (This rule applies equally to all the synchronous transfer calls,
+ * which are wrappers around this core asynchronous primitive.)
+ */
+static inline int
+spi_async(struct spi_device *spi, struct spi_message *message)
+{
+       message->spi = spi;
+       return spi->master->transfer(spi, message);
+}
+
+/*---------------------------------------------------------------------------*/
+
+/* All these synchronous SPI transfer routines are utilities layered
+ * over the core async transfer primitive.  Here, "synchronous" means
+ * they will sleep uninterruptibly until the async transfer completes.
+ */
+
+extern int spi_sync(struct spi_device *spi, struct spi_message *message);
+
+/**
+ * spi_write - SPI synchronous write
+ * @spi: device to which data will be written
+ * @buf: data buffer
+ * @len: data buffer size
+ *
+ * This writes the buffer and returns zero or a negative error code.
+ * Callable only from contexts that can sleep.
+ */
+static inline int
+spi_write(struct spi_device *spi, const u8 *buf, size_t len)
+{
+       struct spi_transfer     t = {
+                       .tx_buf         = buf,
+                       .len            = len,
+               };
+       struct spi_message      m;
+
+       spi_message_init(&m);
+       spi_message_add_tail(&t, &m);
+       return spi_sync(spi, &m);
+}
+
+/**
+ * spi_read - SPI synchronous read
+ * @spi: device from which data will be read
+ * @buf: data buffer
+ * @len: data buffer size
+ *
+ * This writes the buffer and returns zero or a negative error code.
+ * Callable only from contexts that can sleep.
+ */
+static inline int
+spi_read(struct spi_device *spi, u8 *buf, size_t len)
+{
+       struct spi_transfer     t = {
+                       .rx_buf         = buf,
+                       .len            = len,
+               };
+       struct spi_message      m;
+
+       spi_message_init(&m);
+       spi_message_add_tail(&t, &m);
+       return spi_sync(spi, &m);
+}
+
+/* this copies txbuf and rxbuf data; for small transfers only! */
+extern int spi_write_then_read(struct spi_device *spi,
+               const u8 *txbuf, unsigned n_tx,
+               u8 *rxbuf, unsigned n_rx);
+
+/**
+ * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read
+ * @spi: device with which data will be exchanged
+ * @cmd: command to be written before data is read back
+ *
+ * This returns the (unsigned) eight bit number returned by the
+ * device, or else a negative error code.  Callable only from
+ * contexts that can sleep.
+ */
+static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
+{
+       ssize_t                 status;
+       u8                      result;
+
+       status = spi_write_then_read(spi, &cmd, 1, &result, 1);
+
+       /* return negative errno or unsigned value */
+       return (status < 0) ? status : result;
+}
+
+/**
+ * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read
+ * @spi: device with which data will be exchanged
+ * @cmd: command to be written before data is read back
+ *
+ * This returns the (unsigned) sixteen bit number returned by the
+ * device, or else a negative error code.  Callable only from
+ * contexts that can sleep.
+ *
+ * The number is returned in wire-order, which is at least sometimes
+ * big-endian.
+ */
+static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
+{
+       ssize_t                 status;
+       u16                     result;
+
+       status = spi_write_then_read(spi, &cmd, 1, (u8 *) &result, 2);
+
+       /* return negative errno or unsigned value */
+       return (status < 0) ? status : result;
+}
+
+/*---------------------------------------------------------------------------*/
+
+/*
+ * INTERFACE between board init code and SPI infrastructure.
+ *
+ * No SPI driver ever sees these SPI device table segments, but
+ * it's how the SPI core (or adapters that get hotplugged) grows
+ * the driver model tree.
+ *
+ * As a rule, SPI devices can't be probed.  Instead, board init code
+ * provides a table listing the devices which are present, with enough
+ * information to bind and set up the device's driver.  There's basic
+ * support for nonstatic configurations too; enough to handle adding
+ * parport adapters, or microcontrollers acting as USB-to-SPI bridges.
+ */
+
+/* board-specific information about each SPI device */
+struct spi_board_info {
+       /* the device name and module name are coupled, like platform_bus;
+        * "modalias" is normally the driver name.
+        *
+        * platform_data goes to spi_device.dev.platform_data,
+        * controller_data goes to spi_device.controller_data,
+        * irq is copied too
+        */
+       char            modalias[KOBJ_NAME_LEN];
+       const void      *platform_data;
+       void            *controller_data;
+       int             irq;
+
+       /* slower signaling on noisy or low voltage boards */
+       u32             max_speed_hz;
+
+
+       /* bus_num is board specific and matches the bus_num of some
+        * spi_master that will probably be registered later.
+        *
+        * chip_select reflects how this chip is wired to that master;
+        * it's less than num_chipselect.
+        */
+       u16             bus_num;
+       u16             chip_select;
+
+       /* ... may need additional spi_device chip config data here.
+        * avoid stuff protocol drivers can set; but include stuff
+        * needed to behave without being bound to a driver:
+        *  - chipselect polarity
+        *  - quirks like clock rate mattering when not selected
+        */
+};
+
+#ifdef CONFIG_SPI
+extern int
+spi_register_board_info(struct spi_board_info const *info, unsigned n);
+#else
+/* board init code may ignore whether SPI is configured or not */
+static inline int
+spi_register_board_info(struct spi_board_info const *info, unsigned n)
+       { return 0; }
+#endif
+
+
+/* If you're hotplugging an adapter with devices (parport, usb, etc)
+ * use spi_new_device() to describe each device.  You can also call
+ * spi_unregister_device() to start making that device vanish, but
+ * normally that would be handled by spi_unregister_master().
+ */
+extern struct spi_device *
+spi_new_device(struct spi_master *, struct spi_board_info *);
+
+static inline void
+spi_unregister_device(struct spi_device *spi)
+{
+       if (spi)
+               device_unregister(&spi->dev);
+}
+
+#endif /* __LINUX_SPI_H */
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
new file mode 100644 (file)
index 0000000..c961fe9
--- /dev/null
@@ -0,0 +1,135 @@
+#ifndef        __SPI_BITBANG_H
+#define        __SPI_BITBANG_H
+
+/*
+ * Mix this utility code with some glue code to get one of several types of
+ * simple SPI master driver.  Two do polled word-at-a-time I/O:
+ *
+ *   - GPIO/parport bitbangers.  Provide chipselect() and txrx_word[](),
+ *     expanding the per-word routines from the inline templates below.
+ *
+ *   - Drivers for controllers resembling bare shift registers.  Provide
+ *     chipselect() and txrx_word[](), with custom setup()/cleanup() methods
+ *     that use your controller's clock and chipselect registers.
+ *
+ * Some hardware works well with requests at spi_transfer scope:
+ *
+ *   - Drivers leveraging smarter hardware, with fifos or DMA; or for half
+ *     duplex (MicroWire) controllers.  Provide chipslect() and txrx_bufs(),
+ *     and custom setup()/cleanup() methods.
+ */
+struct spi_bitbang {
+       struct workqueue_struct *workqueue;
+       struct work_struct      work;
+
+       spinlock_t              lock;
+       struct list_head        queue;
+       u8                      busy;
+       u8                      shutdown;
+       u8                      use_dma;
+
+       struct spi_master       *master;
+
+       void    (*chipselect)(struct spi_device *spi, int is_on);
+#define        BITBANG_CS_ACTIVE       1       /* normally nCS, active low */
+#define        BITBANG_CS_INACTIVE     0
+
+       /* txrx_bufs() may handle dma mapping for transfers that don't
+        * already have one (transfer.{tx,rx}_dma is zero), or use PIO
+        */
+       int     (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
+
+       /* txrx_word[SPI_MODE_*]() just looks like a shift register */
+       u32     (*txrx_word[4])(struct spi_device *spi,
+                       unsigned nsecs,
+                       u32 word, u8 bits);
+};
+
+/* you can call these default bitbang->master methods from your custom
+ * methods, if you like.
+ */
+extern int spi_bitbang_setup(struct spi_device *spi);
+extern void spi_bitbang_cleanup(const struct spi_device *spi);
+extern int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m);
+
+/* start or stop queue processing */
+extern int spi_bitbang_start(struct spi_bitbang *spi);
+extern int spi_bitbang_stop(struct spi_bitbang *spi);
+
+#endif /* __SPI_BITBANG_H */
+
+/*-------------------------------------------------------------------------*/
+
+#ifdef EXPAND_BITBANG_TXRX
+
+/*
+ * The code that knows what GPIO pins do what should have declared four
+ * functions, ideally as inlines, before #defining EXPAND_BITBANG_TXRX
+ * and including this header:
+ *
+ *  void setsck(struct spi_device *, int is_on);
+ *  void setmosi(struct spi_device *, int is_on);
+ *  int getmiso(struct spi_device *);
+ *  void spidelay(unsigned);
+ *
+ * A non-inlined routine would call bitbang_txrx_*() routines.  The
+ * main loop could easily compile down to a handful of instructions,
+ * especially if the delay is a NOP (to run at peak speed).
+ *
+ * Since this is software, the timings may not be exactly what your board's
+ * chips need ... there may be several reasons you'd need to tweak timings
+ * in these routines, not just make to make it faster or slower to match a
+ * particular CPU clock rate.
+ */
+
+static inline u32
+bitbang_txrx_be_cpha0(struct spi_device *spi,
+               unsigned nsecs, unsigned cpol,
+               u32 word, u8 bits)
+{
+       /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+
+       /* clock starts at inactive polarity */
+       for (word <<= (32 - bits); likely(bits); bits--) {
+
+               /* setup MSB (to slave) on trailing edge */
+               setmosi(spi, word & (1 << 31));
+               spidelay(nsecs);        /* T(setup) */
+
+               setsck(spi, !cpol);
+               spidelay(nsecs);
+
+               /* sample MSB (from slave) on leading edge */
+               word <<= 1;
+               word |= getmiso(spi);
+               setsck(spi, cpol);
+       }
+       return word;
+}
+
+static inline u32
+bitbang_txrx_be_cpha1(struct spi_device *spi,
+               unsigned nsecs, unsigned cpol,
+               u32 word, u8 bits)
+{
+       /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+
+       /* clock starts at inactive polarity */
+       for (word <<= (32 - bits); likely(bits); bits--) {
+
+               /* setup MSB (to slave) on leading edge */
+               setsck(spi, !cpol);
+               setmosi(spi, word & (1 << 31));
+               spidelay(nsecs); /* T(setup) */
+
+               setsck(spi, cpol);
+               spidelay(nsecs);
+
+               /* sample MSB (from slave) on trailing edge */
+               word <<= 1;
+               word |= getmiso(spi);
+       }
+       return word;
+}
+
+#endif /* EXPAND_BITBANG_TXRX */
diff --git a/include/linux/tipc.h b/include/linux/tipc.h
new file mode 100644 (file)
index 0000000..243a15f
--- /dev/null
@@ -0,0 +1,212 @@
+/*
+ * include/linux/tipc.h: Include file for TIPC socket interface
+ * 
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_TIPC_H_
+#define _LINUX_TIPC_H_
+
+#include <linux/types.h>
+
+/*
+ * TIPC addressing primitives
+ */
+struct tipc_portid {
+       __u32 ref;
+       __u32 node;
+};
+
+struct tipc_name {
+       __u32 type;
+       __u32 instance;
+};
+
+struct tipc_name_seq {
+       __u32 type;
+       __u32 lower;
+       __u32 upper;
+};
+
+static inline __u32 tipc_addr(unsigned int zone,
+                             unsigned int cluster,
+                             unsigned int node)
+{
+       return (zone << 24) | (cluster << 12) | node;
+}
+
+static inline unsigned int tipc_zone(__u32 addr)
+{
+       return addr >> 24;
+}
+
+static inline unsigned int tipc_cluster(__u32 addr)
+{
+       return (addr >> 12) & 0xfff;
+}
+
+static inline unsigned int tipc_node(__u32 addr)
+{
+       return addr & 0xfff;
+}
+
+/*
+ * Application-accessible port name types
+ */
+
+#define TIPC_CFG_SRV           0       /* configuration service name type */
+#define TIPC_TOP_SRV           1       /* topology service name type */
+#define TIPC_RESERVED_TYPES    64      /* lowest user-publishable name type */
+
+/* 
+ * Publication scopes when binding port names and port name sequences
+ */
+
+#define TIPC_ZONE_SCOPE                1
+#define TIPC_CLUSTER_SCOPE     2
+#define TIPC_NODE_SCOPE                3
+
+/*
+ * Limiting values for messages
+ */
+
+#define TIPC_MAX_USER_MSG_SIZE 66000
+
+/*
+ * Message importance levels
+ */
+
+#define TIPC_LOW_IMPORTANCE            0  /* default */
+#define TIPC_MEDIUM_IMPORTANCE         1
+#define TIPC_HIGH_IMPORTANCE           2
+#define TIPC_CRITICAL_IMPORTANCE       3
+
+/* 
+ * Msg rejection/connection shutdown reasons
+ */
+
+#define TIPC_OK                        0
+#define TIPC_ERR_NO_NAME       1
+#define TIPC_ERR_NO_PORT       2
+#define TIPC_ERR_NO_NODE       3
+#define TIPC_ERR_OVERLOAD      4
+#define TIPC_CONN_SHUTDOWN     5
+
+/*
+ * TIPC topology subscription service definitions
+ */
+
+#define TIPC_SUB_PORTS         0x01    /* filter for port availability */
+#define TIPC_SUB_SERVICE       0x02    /* filter for service availability */
+#if 0
+/* The following filter options are not currently implemented */
+#define TIPC_SUB_NO_BIND_EVTS  0x04    /* filter out "publish" events */
+#define TIPC_SUB_NO_UNBIND_EVTS        0x08    /* filter out "withdraw" events */
+#define TIPC_SUB_SINGLE_EVT    0x10    /* expire after first event */
+#endif
+
+#define TIPC_WAIT_FOREVER      ~0      /* timeout for permanent subscription */
+
+struct tipc_subscr {
+       struct tipc_name_seq seq;       /* name sequence of interest */
+       __u32 timeout;                  /* subscription duration (in ms) */
+        __u32 filter;                  /* bitmask of filter options */
+       char usr_handle[8];             /* available for subscriber use */
+};
+
+#define TIPC_PUBLISHED         1       /* publication event */
+#define TIPC_WITHDRAWN         2       /* withdraw event */
+#define TIPC_SUBSCR_TIMEOUT    3       /* subscription timeout event */
+
+struct tipc_event {
+       __u32 event;                    /* event type */
+       __u32 found_lower;              /* matching name seq instances */
+       __u32 found_upper;              /*    "      "    "     "      */
+       struct tipc_portid port;        /* associated port */
+       struct tipc_subscr s;           /* associated subscription */
+};
+
+/*
+ * Socket API
+ */
+
+#ifndef AF_TIPC
+#define AF_TIPC                30
+#endif
+
+#ifndef PF_TIPC
+#define PF_TIPC                AF_TIPC
+#endif
+
+#ifndef SOL_TIPC
+#define SOL_TIPC       271
+#endif
+
+#define TIPC_ADDR_NAMESEQ      1
+#define TIPC_ADDR_MCAST                1
+#define TIPC_ADDR_NAME         2
+#define TIPC_ADDR_ID           3
+
+struct sockaddr_tipc {
+       unsigned short family;
+       unsigned char  addrtype;
+       signed   char  scope;
+       union {
+               struct tipc_portid id;
+               struct tipc_name_seq nameseq;
+               struct {
+                       struct tipc_name name;
+                       __u32 domain; /* 0: own zone */
+               } name;
+       } addr;
+};
+
+/*
+ * Ancillary data objects supported by recvmsg()
+ */
+
+#define TIPC_ERRINFO   1       /* error info */
+#define TIPC_RETDATA   2       /* returned data */
+#define TIPC_DESTNAME  3       /* destination name */
+
+/*
+ * TIPC-specific socket option values
+ */
+
+#define TIPC_IMPORTANCE                127     /* Default: TIPC_LOW_IMPORTANCE */
+#define TIPC_SRC_DROPPABLE     128     /* Default: 0 (resend congested msg) */
+#define TIPC_DEST_DROPPABLE    129     /* Default: based on socket type */
+#define TIPC_CONN_TIMEOUT      130     /* Default: 8000 (ms)  */
+
+#endif
diff --git a/include/linux/tipc_config.h b/include/linux/tipc_config.h
new file mode 100644 (file)
index 0000000..a52c8c6
--- /dev/null
@@ -0,0 +1,407 @@
+/*
+ * include/linux/tipc_config.h: Include file for TIPC configuration interface
+ * 
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_TIPC_CONFIG_H_
+#define _LINUX_TIPC_CONFIG_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <asm/byteorder.h>
+
+/*
+ * Configuration
+ *
+ * All configuration management messaging involves sending a request message
+ * to the TIPC configuration service on a node, which sends a reply message
+ * back.  (In the future multi-message replies may be supported.)
+ *
+ * Both request and reply messages consist of a transport header and payload.
+ * The transport header contains info about the desired operation;
+ * the payload consists of zero or more type/length/value (TLV) items
+ * which specify parameters or results for the operation.
+ *
+ * For many operations, the request and reply messages have a fixed number
+ * of TLVs (usually zero or one); however, some reply messages may return 
+ * a variable number of TLVs.  A failed request is denoted by the presence
+ * of an "error string" TLV in the reply message instead of the TLV(s) the
+ * reply should contain if the request succeeds.
+ */
+/* 
+ * Public commands:
+ * May be issued by any process.
+ * Accepted by own node, or by remote node only if remote management enabled.                       
+ */
+#define  TIPC_CMD_NOOP             0x0000    /* tx none, rx none */
+#define  TIPC_CMD_GET_NODES         0x0001    /* tx net_addr, rx node_info(s) */
+#define  TIPC_CMD_GET_MEDIA_NAMES   0x0002    /* tx none, rx media_name(s) */
+#define  TIPC_CMD_GET_BEARER_NAMES  0x0003    /* tx none, rx bearer_name(s) */
+#define  TIPC_CMD_GET_LINKS         0x0004    /* tx net_addr, rx link_info(s) */
+#define  TIPC_CMD_SHOW_NAME_TABLE   0x0005    /* tx name_tbl_query, rx ultra_string */
+#define  TIPC_CMD_SHOW_PORTS        0x0006    /* tx none, rx ultra_string */
+#define  TIPC_CMD_SHOW_LINK_STATS   0x000B    /* tx link_name, rx ultra_string */
+
+#if 0
+#define  TIPC_CMD_SHOW_PORT_STATS   0x0008    /* tx port_ref, rx ultra_string */
+#define  TIPC_CMD_RESET_PORT_STATS  0x0009    /* tx port_ref, rx none */
+#define  TIPC_CMD_GET_ROUTES        0x000A    /* tx ?, rx ? */
+#define  TIPC_CMD_GET_LINK_PEER     0x000D    /* tx link_name, rx ? */
+#endif
+
+/* 
+ * Protected commands:
+ * May only be issued by "network administration capable" process.
+ * Accepted by own node, or by remote node only if remote management enabled
+ * and this node is zone manager.                       
+ */
+
+#define  TIPC_CMD_GET_REMOTE_MNG    0x4003    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_PORTS     0x4004    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_PUBL      0x4005    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_SUBSCR    0x4006    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_ZONES     0x4007    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_CLUSTERS  0x4008    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_NODES     0x4009    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_MAX_SLAVES    0x400A    /* tx none, rx unsigned */
+#define  TIPC_CMD_GET_NETID         0x400B    /* tx none, rx unsigned */
+
+#define  TIPC_CMD_ENABLE_BEARER     0x4101    /* tx bearer_config, rx none */
+#define  TIPC_CMD_DISABLE_BEARER    0x4102    /* tx bearer_name, rx none */
+#define  TIPC_CMD_SET_LINK_TOL      0x4107    /* tx link_config, rx none */
+#define  TIPC_CMD_SET_LINK_PRI      0x4108    /* tx link_config, rx none */
+#define  TIPC_CMD_SET_LINK_WINDOW   0x4109    /* tx link_config, rx none */
+#define  TIPC_CMD_SET_LOG_SIZE      0x410A    /* tx unsigned, rx none */
+#define  TIPC_CMD_DUMP_LOG          0x410B    /* tx none, rx ultra_string */
+#define  TIPC_CMD_RESET_LINK_STATS  0x410C    /* tx link_name, rx none */
+
+#if 0
+#define  TIPC_CMD_CREATE_LINK       0x4103    /* tx link_create, rx none */
+#define  TIPC_CMD_REMOVE_LINK       0x4104    /* tx link_name, rx none */
+#define  TIPC_CMD_BLOCK_LINK        0x4105    /* tx link_name, rx none */
+#define  TIPC_CMD_UNBLOCK_LINK      0x4106    /* tx link_name, rx none */
+#endif
+
+/* 
+ * Private commands:
+ * May only be issued by "network administration capable" process.
+ * Accepted by own node only; cannot be used on a remote node.                       
+ */
+
+#define  TIPC_CMD_SET_NODE_ADDR     0x8001    /* tx net_addr, rx none */
+#if 0
+#define  TIPC_CMD_SET_ZONE_MASTER   0x8002    /* tx none, rx none */
+#endif
+#define  TIPC_CMD_SET_REMOTE_MNG    0x8003    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_PORTS     0x8004    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_PUBL      0x8005    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_SUBSCR    0x8006    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_ZONES     0x8007    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_CLUSTERS  0x8008    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_NODES     0x8009    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_MAX_SLAVES    0x800A    /* tx unsigned, rx none */
+#define  TIPC_CMD_SET_NETID         0x800B    /* tx unsigned, rx none */
+
+/*
+ * TLV types defined for TIPC
+ */
+
+#define TIPC_TLV_NONE          0       /* no TLV present */
+#define TIPC_TLV_VOID          1       /* empty TLV (0 data bytes)*/
+#define TIPC_TLV_UNSIGNED      2       /* 32-bit integer */
+#define TIPC_TLV_STRING                3       /* char[128] (max) */
+#define TIPC_TLV_LARGE_STRING  4       /* char[2048] (max) */
+#define TIPC_TLV_ULTRA_STRING  5       /* char[32768] (max) */
+
+#define TIPC_TLV_ERROR_STRING  16      /* char[128] containing "error code" */
+#define TIPC_TLV_NET_ADDR      17      /* 32-bit integer denoting <Z.C.N> */
+#define TIPC_TLV_MEDIA_NAME    18      /* char[TIPC_MAX_MEDIA_NAME] */
+#define TIPC_TLV_BEARER_NAME   19      /* char[TIPC_MAX_BEARER_NAME] */
+#define TIPC_TLV_LINK_NAME     20      /* char[TIPC_MAX_LINK_NAME] */
+#define TIPC_TLV_NODE_INFO     21      /* struct tipc_node_info */
+#define TIPC_TLV_LINK_INFO     22      /* struct tipc_link_info */
+#define TIPC_TLV_BEARER_CONFIG  23     /* struct tipc_bearer_config */
+#define TIPC_TLV_LINK_CONFIG    24     /* struct tipc_link_config */
+#define TIPC_TLV_NAME_TBL_QUERY        25      /* struct tipc_name_table_query */
+#define TIPC_TLV_PORT_REF      26      /* 32-bit port reference */
+
+/*
+ * Maximum sizes of TIPC bearer-related names (including terminating NUL)
+ */ 
+
+#define TIPC_MAX_MEDIA_NAME    16      /* format = media */
+#define TIPC_MAX_IF_NAME       16      /* format = interface */
+#define TIPC_MAX_BEARER_NAME   32      /* format = media:interface */
+#define TIPC_MAX_LINK_NAME     60      /* format = Z.C.N:interface-Z.C.N:interface */
+
+/*
+ * Link priority limits (range from 0 to # priorities - 1)
+ */
+
+#define TIPC_NUM_LINK_PRI 32
+
+/*
+ * Link tolerance limits (min, default, max), in ms
+ */
+
+#define TIPC_MIN_LINK_TOL 50
+#define TIPC_DEF_LINK_TOL 1500
+#define TIPC_MAX_LINK_TOL 30000
+
+/*
+ * Link window limits (min, default, max), in packets
+ */
+
+#define TIPC_MIN_LINK_WIN 16
+#define TIPC_DEF_LINK_WIN 50
+#define TIPC_MAX_LINK_WIN 150
+
+
+struct tipc_node_info {
+       __u32 addr;                     /* network address of node */
+       __u32 up;                       /* 0=down, 1= up */
+};
+
+struct tipc_link_info {
+       __u32 dest;                     /* network address of peer node */
+       __u32 up;                       /* 0=down, 1=up */
+       char str[TIPC_MAX_LINK_NAME];   /* link name */
+};
+
+struct tipc_bearer_config {
+       __u32 priority;                 /* Range [1,31]. Override per link  */
+       __u32 detect_scope;     
+       char name[TIPC_MAX_BEARER_NAME];
+};
+
+struct tipc_link_config {
+       __u32 value;
+       char name[TIPC_MAX_LINK_NAME];
+};
+
+#define TIPC_NTQ_ALLTYPES 0x80000000
+
+struct tipc_name_table_query {
+       __u32 depth;    /* 1:type, 2:+name info, 3:+port info, 4+:+debug info */
+       __u32 type;     /* {t,l,u} info ignored if high bit of "depth" is set */
+       __u32 lowbound; /* (i.e. displays all entries of name table) */
+       __u32 upbound;
+};
+
+/*
+ * The error string TLV is a null-terminated string describing the cause 
+ * of the request failure.  To simplify error processing (and to save space)
+ * the first character of the string can be a special error code character
+ * (lying by the range 0x80 to 0xFF) which represents a pre-defined reason.
+ */
+
+#define TIPC_CFG_TLV_ERROR      "\x80"  /* request contains incorrect TLV(s) */
+#define TIPC_CFG_NOT_NET_ADMIN  "\x81" /* must be network administrator */
+#define TIPC_CFG_NOT_ZONE_MSTR "\x82"  /* must be zone master */
+#define TIPC_CFG_NO_REMOTE     "\x83"  /* remote management not enabled */
+#define TIPC_CFG_NOT_SUPPORTED  "\x84" /* request is not supported by TIPC */
+#define TIPC_CFG_INVALID_VALUE  "\x85"  /* request has invalid argument value */
+
+#if 0
+/* prototypes TLV structures for proposed commands */
+struct tipc_link_create {
+       __u32   domain;
+       struct tipc_media_addr peer_addr;
+       char bearer_name[TIPC_MAX_BEARER_NAME];
+};
+
+struct tipc_route_info {
+       __u32 dest;
+       __u32 router;
+};
+#endif
+
+/*
+ * A TLV consists of a descriptor, followed by the TLV value.
+ * TLV descriptor fields are stored in network byte order; 
+ * TLV values must also be stored in network byte order (where applicable).
+ * TLV descriptors must be aligned to addresses which are multiple of 4,
+ * so up to 3 bytes of padding may exist at the end of the TLV value area.
+ * There must not be any padding between the TLV descriptor and its value.
+ */
+
+struct tlv_desc {
+       __u16 tlv_len;          /* TLV length (descriptor + value) */
+       __u16 tlv_type;         /* TLV identifier */
+};
+
+#define TLV_ALIGNTO 4
+
+#define TLV_ALIGN(datalen) (((datalen)+(TLV_ALIGNTO-1)) & ~(TLV_ALIGNTO-1))
+#define TLV_LENGTH(datalen) (sizeof(struct tlv_desc) + (datalen))
+#define TLV_SPACE(datalen) (TLV_ALIGN(TLV_LENGTH(datalen)))
+#define TLV_DATA(tlv) ((void *)((char *)(tlv) + TLV_LENGTH(0)))
+
+static inline int TLV_OK(const void *tlv, __u16 space)
+{
+       /*
+        * Would also like to check that "tlv" is a multiple of 4,
+        * but don't know how to do this in a portable way.
+        * - Tried doing (!(tlv & (TLV_ALIGNTO-1))), but GCC compiler
+        *   won't allow binary "&" with a pointer.
+        * - Tried casting "tlv" to integer type, but causes warning about size
+        *   mismatch when pointer is bigger than chosen type (int, long, ...).
+        */
+
+       return (space >= TLV_SPACE(0)) &&
+               (ntohs(((struct tlv_desc *)tlv)->tlv_len) <= space);
+}
+
+static inline int TLV_CHECK(const void *tlv, __u16 space, __u16 exp_type)
+{
+       return TLV_OK(tlv, space) && 
+               (ntohs(((struct tlv_desc *)tlv)->tlv_type) == exp_type);
+}
+
+static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
+{
+       struct tlv_desc *tlv_ptr;
+       int tlv_len;
+
+       tlv_len = TLV_LENGTH(len);
+       tlv_ptr = (struct tlv_desc *)tlv;
+       tlv_ptr->tlv_type = htons(type);
+       tlv_ptr->tlv_len  = htons(tlv_len);
+       if (len && data)
+               memcpy(TLV_DATA(tlv_ptr), data, tlv_len);
+       return TLV_SPACE(len);
+}
+
+/*
+ * A TLV list descriptor simplifies processing of messages 
+ * containing multiple TLVs.
+ */
+
+struct tlv_list_desc {
+       struct tlv_desc *tlv_ptr;       /* ptr to current TLV */
+       __u32 tlv_space;                /* # bytes from curr TLV to list end */
+};
+
+static inline void TLV_LIST_INIT(struct tlv_list_desc *list, 
+                                void *data, __u32 space)
+{
+       list->tlv_ptr = (struct tlv_desc *)data;
+       list->tlv_space = space;
+}
+            
+static inline int TLV_LIST_EMPTY(struct tlv_list_desc *list)
+{ 
+       return (list->tlv_space == 0);
+}
+
+static inline int TLV_LIST_CHECK(struct tlv_list_desc *list, __u16 exp_type)
+{
+       return TLV_CHECK(list->tlv_ptr, list->tlv_space, exp_type);
+}
+
+static inline void *TLV_LIST_DATA(struct tlv_list_desc *list)
+{
+       return TLV_DATA(list->tlv_ptr);
+}
+
+static inline void TLV_LIST_STEP(struct tlv_list_desc *list)
+{
+       __u16 tlv_space = TLV_ALIGN(ntohs(list->tlv_ptr->tlv_len));
+
+        list->tlv_ptr = (struct tlv_desc *)((char *)list->tlv_ptr + tlv_space);
+       list->tlv_space -= tlv_space;
+}
+
+/*
+ * Configuration messages exchanged via NETLINK_GENERIC use the following
+ * family id, name, version and command.
+ */
+#define TIPC_GENL_NAME         "TIPC"
+#define TIPC_GENL_VERSION      0x1
+#define TIPC_GENL_CMD          0x1
+
+/*
+ * TIPC specific header used in NETLINK_GENERIC requests.
+ */
+struct tipc_genlmsghdr {
+       __u32 dest;             /* Destination address */
+       __u16 cmd;              /* Command */
+       __u16 reserved;         /* Unused */
+};
+
+#define TIPC_GENL_HDRLEN       NLMSG_ALIGN(sizeof(struct tipc_genlmsghdr))
+
+/*
+ * Configuration messages exchanged via TIPC sockets use the TIPC configuration 
+ * message header, which is defined below.  This structure is analogous 
+ * to the Netlink message header, but fields are stored in network byte order 
+ * and no padding is permitted between the header and the message data 
+ * that follows.
+ */
+
+struct tipc_cfg_msg_hdr
+{
+       __u32 tcm_len;          /* Message length (including header) */
+       __u16 tcm_type;         /* Command type */
+       __u16 tcm_flags;        /* Additional flags */
+       char  tcm_reserved[8];  /* Unused */
+};
+
+#define TCM_F_REQUEST  0x1     /* Flag: Request message */
+#define TCM_F_MORE     0x2     /* Flag: Message to be continued */
+
+#define TCM_ALIGN(datalen)  (((datalen)+3) & ~3)
+#define TCM_LENGTH(datalen) (sizeof(struct tipc_cfg_msg_hdr) + datalen)
+#define TCM_SPACE(datalen)  (TCM_ALIGN(TCM_LENGTH(datalen)))
+#define TCM_DATA(tcm_hdr)   ((void *)((char *)(tcm_hdr) + TCM_LENGTH(0)))
+
+static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
+                         void *data, __u16 data_len)
+{
+       struct tipc_cfg_msg_hdr *tcm_hdr;
+       int msg_len;
+
+       msg_len = TCM_LENGTH(data_len);
+       tcm_hdr = (struct tipc_cfg_msg_hdr *)msg;
+       tcm_hdr->tcm_len   = htonl(msg_len);
+       tcm_hdr->tcm_type  = htons(cmd);
+       tcm_hdr->tcm_flags = htons(flags);
+       if (data_len && data)
+               memcpy(TCM_DATA(msg), data, data_len);
+       return TCM_SPACE(data_len);
+}
+
+#endif
index 3df1d474e5c56dcd4ce2ca4d371ad86fc945368b..315a5163d6a01a7f891251550feaf89716219803 100644 (file)
@@ -86,7 +86,6 @@
        .max_interval           = 2,                    \
        .busy_factor            = 8,                    \
        .imbalance_pct          = 110,                  \
-       .cache_hot_time         = 0,                    \
        .cache_nice_tries       = 0,                    \
        .per_cpu_gain           = 25,                   \
        .busy_idx               = 0,                    \
        .max_interval           = 4,                    \
        .busy_factor            = 64,                   \
        .imbalance_pct          = 125,                  \
-       .cache_hot_time         = (5*1000000/2),        \
        .cache_nice_tries       = 1,                    \
        .per_cpu_gain           = 100,                  \
        .busy_idx               = 2,                    \
index c5b96b2b81554e452630774254c1a2ae4e9a9f34..805de50df00da8255f02edc3757e440654d16762 100644 (file)
@@ -22,7 +22,6 @@ struct genl_family
        char                    name[GENL_NAMSIZ];
        unsigned int            version;
        unsigned int            maxattr;
-       struct module *         owner;
        struct nlattr **        attrbuf;        /* private */
        struct list_head        ops_list;       /* private */
        struct list_head        family_list;    /* private */
index cde2f4f4f501775d968a343d75ec8a2b0859c7e5..df05f468fa5c312c028f45c9dc1ad81cbd1a2336 100644 (file)
@@ -363,8 +363,9 @@ enum ieee80211_reasoncode {
 #define IEEE80211_OFDM_SHIFT_MASK_A         4
 
 /* NOTE: This data is for statistical purposes; not all hardware provides this
- *       information for frames received.  Not setting these will not cause
- *       any adverse affects. */
+ *       information for frames received.
+ *       For ieee80211_rx_mgt, you need to set at least the 'len' parameter.
+ */
 struct ieee80211_rx_stats {
        u32 mac_time;
        s8 rssi;
@@ -1088,6 +1089,7 @@ extern int ieee80211_tx_frame(struct ieee80211_device *ieee,
 /* ieee80211_rx.c */
 extern int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
                        struct ieee80211_rx_stats *rx_stats);
+/* make sure to set stats->len */
 extern void ieee80211_rx_mgt(struct ieee80211_device *ieee,
                             struct ieee80211_hdr_4addr *header,
                             struct ieee80211_rx_stats *stats);
index 25b081a730e60716fe6b010161f41ca9c3b7ae15..91684436af8e96dfa004f5d3510edf99b4aa9593 100644 (file)
@@ -37,7 +37,4 @@ struct nf_conntrack_ipv4 {
 struct sk_buff *
 nf_ct_ipv4_ct_gather_frags(struct sk_buff *skb);
 
-/* call to create an explicit dependency on nf_conntrack_l3proto_ipv4. */
-extern void need_ip_conntrack(void);
-
 #endif /*_NF_CONNTRACK_IPV4_H*/
index 64b82b74a65089d3f66dbf8a98c563dacb5e0359..6d075ca16e6eb1cf7b1a7a9e111a39dda0f2e3fd 100644 (file)
@@ -221,9 +221,6 @@ extern void nf_ct_helper_put(struct nf_conntrack_helper *helper);
 extern struct nf_conntrack_helper *
 __nf_conntrack_helper_find_byname(const char *name);
 
-/* call to create an explicit dependency on nf_conntrack. */
-extern void need_nf_conntrack(void);
-
 extern int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
                                const struct nf_conntrack_tuple *orig);
 
index 14ce790e5c65c81163ede74f7b7a02cb18b43b06..530ef1f752836df846a49812982c4ba5635bd8ab 100644 (file)
@@ -111,7 +111,7 @@ struct nf_conntrack_tuple
 #ifdef __KERNEL__
 
 #define NF_CT_DUMP_TUPLE(tp)                                               \
-DEBUGP("tuple %p: %u %u %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x %hu -> %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x %hu\n",                                     \
+DEBUGP("tuple %p: %u %u " NIP6_FMT " %hu -> " NIP6_FMT " %hu\n",           \
        (tp), (tp)->src.l3num, (tp)->dst.protonum,                          \
        NIP6(*(struct in6_addr *)(tp)->src.u3.all), ntohs((tp)->src.u.all), \
        NIP6(*(struct in6_addr *)(tp)->dst.u3.all), ntohs((tp)->dst.u.all))
index 8f241216f46bdb23e29425b58b308bea5a6b5092..a553f39f6aee66ec66e56c920fa46569251911e3 100644 (file)
@@ -225,13 +225,13 @@ extern int sctp_debug_flag;
        if (sctp_debug_flag) { \
                if (saddr->sa.sa_family == AF_INET6) { \
                        printk(KERN_DEBUG \
-                              lead "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x" trail, \
+                              lead NIP6_FMT trail, \
                               leadparm, \
                               NIP6(saddr->v6.sin6_addr), \
                               otherparms); \
                } else { \
                        printk(KERN_DEBUG \
-                              lead "%u.%u.%u.%u" trail, \
+                              lead NIPQUAD_FMT trail, \
                               leadparm, \
                               NIPQUAD(saddr->v4.sin_addr.s_addr), \
                               otherparms); \
diff --git a/include/net/tipc/tipc.h b/include/net/tipc/tipc.h
new file mode 100644 (file)
index 0000000..9566608
--- /dev/null
@@ -0,0 +1,257 @@
+/*
+ * include/net/tipc/tipc.h: Main include file for TIPC users
+ * 
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_H_
+#define _NET_TIPC_H_
+
+#ifdef __KERNEL__
+
+#include <linux/tipc.h>
+#include <linux/skbuff.h>
+
+/* 
+ * Native API
+ */
+
+/*
+ * TIPC operating mode routines
+ */
+
+u32 tipc_get_addr(void);
+
+#define TIPC_NOT_RUNNING  0
+#define TIPC_NODE_MODE    1
+#define TIPC_NET_MODE     2
+
+typedef void (*tipc_mode_event)(void *usr_handle, int mode, u32 addr);
+
+int tipc_attach(unsigned int *userref, tipc_mode_event, void *usr_handle);
+
+void tipc_detach(unsigned int userref);
+
+int tipc_get_mode(void);
+
+/*
+ * TIPC port manipulation routines
+ */
+
+typedef void (*tipc_msg_err_event) (void *usr_handle,
+                                   u32 portref,
+                                   struct sk_buff **buf,
+                                   unsigned char const *data,
+                                   unsigned int size,
+                                   int reason, 
+                                   struct tipc_portid const *attmpt_destid);
+
+typedef void (*tipc_named_msg_err_event) (void *usr_handle,
+                                         u32 portref,
+                                         struct sk_buff **buf,
+                                         unsigned char const *data,
+                                         unsigned int size,
+                                         int reason, 
+                                         struct tipc_name_seq const *attmpt_dest);
+
+typedef void (*tipc_conn_shutdown_event) (void *usr_handle,
+                                         u32 portref,
+                                         struct sk_buff **buf,
+                                         unsigned char const *data,
+                                         unsigned int size,
+                                         int reason);
+
+typedef void (*tipc_msg_event) (void *usr_handle,
+                               u32 portref,
+                               struct sk_buff **buf,
+                               unsigned char const *data,
+                               unsigned int size,
+                               unsigned int importance, 
+                               struct tipc_portid const *origin);
+
+typedef void (*tipc_named_msg_event) (void *usr_handle,
+                                     u32 portref,
+                                     struct sk_buff **buf,
+                                     unsigned char const *data,
+                                     unsigned int size,
+                                     unsigned int importance, 
+                                     struct tipc_portid const *orig,
+                                     struct tipc_name_seq const *dest);
+
+typedef void (*tipc_conn_msg_event) (void *usr_handle,
+                                    u32 portref,
+                                    struct sk_buff **buf,
+                                    unsigned char const *data,
+                                    unsigned int size);
+
+typedef void (*tipc_continue_event) (void *usr_handle, 
+                                    u32 portref);
+
+int tipc_createport(unsigned int tipc_user, 
+                   void *usr_handle, 
+                   unsigned int importance, 
+                   tipc_msg_err_event error_cb, 
+                   tipc_named_msg_err_event named_error_cb, 
+                   tipc_conn_shutdown_event conn_error_cb, 
+                   tipc_msg_event message_cb, 
+                   tipc_named_msg_event named_message_cb, 
+                   tipc_conn_msg_event conn_message_cb, 
+                   tipc_continue_event continue_event_cb,/* May be zero */
+                   u32 *portref);
+
+int tipc_deleteport(u32 portref);
+
+int tipc_ownidentity(u32 portref, struct tipc_portid *port);
+
+int tipc_portimportance(u32 portref, unsigned int *importance);
+int tipc_set_portimportance(u32 portref, unsigned int importance);
+
+int tipc_portunreliable(u32 portref, unsigned int *isunreliable);
+int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
+
+int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
+int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
+
+int tipc_publish(u32 portref, unsigned int scope, 
+                struct tipc_name_seq const *name_seq);
+int tipc_withdraw(u32 portref, unsigned int scope,
+                 struct tipc_name_seq const *name_seq); /* 0: all */
+
+int tipc_connect2port(u32 portref, struct tipc_portid const *port);
+
+int tipc_disconnect(u32 portref);
+
+int tipc_shutdown(u32 ref); /* Sends SHUTDOWN msg */
+
+int tipc_isconnected(u32 portref, int *isconnected);
+
+int tipc_peer(u32 portref, struct tipc_portid *peer);
+
+int tipc_ref_valid(u32 portref); 
+
+/*
+ * TIPC messaging routines
+ */
+
+#define TIPC_PORT_IMPORTANCE 100       /* send using current port setting */
+
+
+int tipc_send(u32 portref,
+             unsigned int num_sect,
+             struct iovec const *msg_sect);
+
+int tipc_send_buf(u32 portref,
+                 struct sk_buff *buf,
+                 unsigned int dsz);
+
+int tipc_send2name(u32 portref, 
+                  struct tipc_name const *name, 
+                  u32 domain,  /* 0:own zone */
+                  unsigned int num_sect,
+                  struct iovec const *msg_sect);
+
+int tipc_send_buf2name(u32 portref,
+                      struct tipc_name const *name,
+                      u32 domain,
+                      struct sk_buff *buf,
+                      unsigned int dsz);
+
+int tipc_forward2name(u32 portref, 
+                     struct tipc_name const *name, 
+                     u32 domain,   /*0: own zone */
+                     unsigned int section_count,
+                     struct iovec const *msg_sect,
+                     struct tipc_portid const *origin,
+                     unsigned int importance);
+
+int tipc_forward_buf2name(u32 portref,
+                         struct tipc_name const *name,
+                         u32 domain,
+                         struct sk_buff *buf,
+                         unsigned int dsz,
+                         struct tipc_portid const *orig,
+                         unsigned int importance);
+
+int tipc_send2port(u32 portref,
+                  struct tipc_portid const *dest,
+                  unsigned int num_sect,
+                  struct iovec const *msg_sect);
+
+int tipc_send_buf2port(u32 portref,
+                      struct tipc_portid const *dest,
+                      struct sk_buff *buf,
+                      unsigned int dsz);
+
+int tipc_forward2port(u32 portref,
+                     struct tipc_portid const *dest,
+                     unsigned int num_sect,
+                     struct iovec const *msg_sect,
+                     struct tipc_portid const *origin,
+                     unsigned int importance);
+
+int tipc_forward_buf2port(u32 portref,
+                         struct tipc_portid const *dest,
+                         struct sk_buff *buf,
+                         unsigned int dsz,
+                         struct tipc_portid const *orig,
+                         unsigned int importance);
+
+int tipc_multicast(u32 portref, 
+                  struct tipc_name_seq const *seq, 
+                  u32 domain,  /* 0:own zone */
+                  unsigned int section_count,
+                  struct iovec const *msg);
+
+#if 0
+int tipc_multicast_buf(u32 portref, 
+                      struct tipc_name_seq const *seq, 
+                      u32 domain,      /* 0:own zone */
+                      void *buf,
+                      unsigned int size);
+#endif
+
+/*
+ * TIPC subscription routines
+ */
+
+int tipc_ispublished(struct tipc_name const *name);
+
+/*
+ * Get number of available nodes within specified domain (excluding own node)
+ */
+
+unsigned int tipc_available_nodes(const u32 domain);
+
+#endif
+
+#endif
diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h
new file mode 100644 (file)
index 0000000..098607c
--- /dev/null
@@ -0,0 +1,121 @@
+/*
+ * include/net/tipc/tipc_bearer.h: Include file for privileged access to TIPC bearers
+ * 
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_BEARER_H_
+#define _NET_TIPC_BEARER_H_
+
+#ifdef __KERNEL__
+
+#include <linux/tipc_config.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+/*
+ * Identifiers of supported TIPC media types
+ */
+
+#define TIPC_MEDIA_TYPE_ETH    1
+
+struct tipc_media_addr {
+       __u32  type;
+       union {
+               __u8   eth_addr[6];     /* Ethernet bearer */ 
+#if 0
+               /* Prototypes for other possible bearer types */
+
+               struct {
+                       __u16 sin_family;
+                       __u16 sin_port;
+                       struct {
+                               __u32 s_addr;
+                       } sin_addr;
+                       char pad[4];
+               } addr_in;              /* IP-based bearer */
+               __u16  sock_descr;      /* generic socket bearer */
+#endif
+       } dev_addr;
+};
+
+/**
+ * struct tipc_bearer - TIPC bearer info available to privileged users
+ * @usr_handle: pointer to additional user-defined information about bearer
+ * @mtu: max packet size bearer can support
+ * @blocked: non-zero if bearer is blocked
+ * @lock: spinlock for controlling access to bearer
+ * @addr: media-specific address associated with bearer
+ * @name: bearer name (format = media:interface)
+ * 
+ * Note: TIPC initializes "name" and "lock" fields; user is responsible for
+ * initialization all other fields when a bearer is enabled.
+ */
+
+struct tipc_bearer {
+       void *usr_handle;
+       u32 mtu;
+       int blocked;
+       spinlock_t lock;
+       struct tipc_media_addr addr;
+       char name[TIPC_MAX_BEARER_NAME];
+};
+
+
+int  tipc_register_media(u32 media_type,
+                        char *media_name, 
+                        int (*enable)(struct tipc_bearer *), 
+                        void (*disable)(struct tipc_bearer *), 
+                        int (*send_msg)(struct sk_buff *, 
+                                        struct tipc_bearer *,
+                                        struct tipc_media_addr *), 
+                        char *(*addr2str)(struct tipc_media_addr *a,
+                                          char *str_buf,
+                                          int str_size),
+                        struct tipc_media_addr *bcast_addr,
+                        const u32 bearer_priority,
+                        const u32 link_tolerance,  /* [ms] */
+                        const u32 send_window_limit); 
+
+void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
+
+int  tipc_block_bearer(const char *name);
+void tipc_continue(struct tipc_bearer *tb_ptr); 
+
+int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority);
+int tipc_disable_bearer(const char *name);
+
+
+#endif
+
+#endif
diff --git a/include/net/tipc/tipc_msg.h b/include/net/tipc/tipc_msg.h
new file mode 100644 (file)
index 0000000..4d096ee
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * include/net/tipc/tipc_msg.h: Include file for privileged access to TIPC message headers
+ * 
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_MSG_H_
+#define _NET_TIPC_MSG_H_
+
+#ifdef __KERNEL__
+
+struct tipc_msg {
+       u32 hdr[15];
+};
+
+
+/*
+               TIPC user data message header format, version 2:
+
+
+       1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0 
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w0:|vers | user  |hdr sz |n|d|s|-|          message size           |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w1:|mstyp| error |rer cnt|lsc|opt p|      broadcast ack no         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w2:|        link level ack no      |   broadcast/link level seq no |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w3:|                       previous node                           |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w4:|                      originating port                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w5:|                      destination port                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    
+   w6:|                      originating node                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w7:|                      destination node                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w8:|            name type / transport sequence number              |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w9:|              name instance/multicast lower bound              |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    
+   wA:|                    multicast upper bound                      |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    
+      /                                                               /
+      \                           options                             \
+      /                                                               /
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+*/
+
+#define TIPC_CONN_MSG  0
+#define TIPC_MCAST_MSG 1
+#define TIPC_NAMED_MSG 2
+#define TIPC_DIRECT_MSG        3
+
+
+static inline u32 msg_word(struct tipc_msg *m, u32 pos)
+{
+       return ntohl(m->hdr[pos]);
+}
+
+static inline u32 msg_bits(struct tipc_msg *m, u32 w, u32 pos, u32 mask)
+{
+       return (msg_word(m, w) >> pos) & mask;
+}
+
+static inline u32 msg_importance(struct tipc_msg *m)
+{
+       return msg_bits(m, 0, 25, 0xf);
+}
+
+static inline u32 msg_hdr_sz(struct tipc_msg *m)
+{
+       return msg_bits(m, 0, 21, 0xf) << 2;
+}
+
+static inline int msg_short(struct tipc_msg *m)
+{
+       return (msg_hdr_sz(m) == 24);
+}
+
+static inline u32 msg_size(struct tipc_msg *m)
+{
+       return msg_bits(m, 0, 0, 0x1ffff);
+}
+
+static inline u32 msg_data_sz(struct tipc_msg *m)
+{
+       return (msg_size(m) - msg_hdr_sz(m));
+}
+
+static inline unchar *msg_data(struct tipc_msg *m)
+{
+       return ((unchar *)m) + msg_hdr_sz(m);
+}
+
+static inline u32 msg_type(struct tipc_msg *m)
+{
+       return msg_bits(m, 1, 29, 0x7);
+}
+
+static inline u32 msg_direct(struct tipc_msg *m)
+{
+       return (msg_type(m) == TIPC_DIRECT_MSG);
+}
+
+static inline u32 msg_named(struct tipc_msg *m)
+{
+       return (msg_type(m) == TIPC_NAMED_MSG);
+}
+
+static inline u32 msg_mcast(struct tipc_msg *m)
+{
+       return (msg_type(m) == TIPC_MCAST_MSG);
+}
+
+static inline u32 msg_connected(struct tipc_msg *m)
+{
+       return (msg_type(m) == TIPC_CONN_MSG);
+}
+
+static inline u32 msg_errcode(struct tipc_msg *m)
+{
+       return msg_bits(m, 1, 25, 0xf);
+}
+
+static inline u32 msg_prevnode(struct tipc_msg *m)
+{
+       return msg_word(m, 3);
+}
+
+static inline u32 msg_origport(struct tipc_msg *m)
+{
+       return msg_word(m, 4);
+}
+
+static inline u32 msg_destport(struct tipc_msg *m)
+{
+       return msg_word(m, 5);
+}
+
+static inline u32 msg_mc_netid(struct tipc_msg *m)
+{
+       return msg_word(m, 5);
+}
+
+static inline u32 msg_orignode(struct tipc_msg *m)
+{
+       if (likely(msg_short(m)))
+               return msg_prevnode(m);
+       return msg_word(m, 6);
+}
+
+static inline u32 msg_destnode(struct tipc_msg *m)
+{
+       return msg_word(m, 7);
+}
+
+static inline u32 msg_nametype(struct tipc_msg *m)
+{
+       return msg_word(m, 8);
+}
+
+static inline u32 msg_nameinst(struct tipc_msg *m)
+{
+       return msg_word(m, 9);
+}
+
+static inline u32 msg_namelower(struct tipc_msg *m)
+{
+       return msg_nameinst(m);
+}
+
+static inline u32 msg_nameupper(struct tipc_msg *m)
+{
+       return msg_word(m, 10);
+}
+
+static inline char *msg_options(struct tipc_msg *m, u32 *len)
+{
+       u32 pos = msg_bits(m, 1, 16, 0x7);
+
+       if (!pos)
+               return 0;
+       pos = (pos * 4) + 28;
+       *len = msg_hdr_sz(m) - pos;
+       return (char *)&m->hdr[pos/4];
+}
+
+#endif
+
+#endif
diff --git a/include/net/tipc/tipc_port.h b/include/net/tipc/tipc_port.h
new file mode 100644 (file)
index 0000000..333bba6
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * include/net/tipc/tipc_port.h: Include file for privileged access to TIPC ports
+ * 
+ * Copyright (c) 1994-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _NET_TIPC_PORT_H_
+#define _NET_TIPC_PORT_H_
+
+#ifdef __KERNEL__
+
+#include <linux/tipc.h>
+#include <linux/skbuff.h>
+#include <net/tipc/tipc_msg.h>
+
+#define TIPC_FLOW_CONTROL_WIN 512
+
+/**
+ * struct tipc_port - native TIPC port info available to privileged users
+ * @usr_handle: pointer to additional user-defined information about port
+ * @lock: pointer to spinlock for controlling access to port
+ * @connected: non-zero if port is currently connected to a peer port
+ * @conn_type: TIPC type used when connection was established
+ * @conn_instance: TIPC instance used when connection was established
+ * @conn_unacked: number of unacknowledged messages received from peer port
+ * @published: non-zero if port has one or more associated names
+ * @congested: non-zero if cannot send because of link or port congestion
+ * @ref: unique reference to port in TIPC object registry
+ * @phdr: preformatted message header used when sending messages
+ */
+
+struct tipc_port {
+        void *usr_handle;
+        spinlock_t *lock;
+       int connected;
+        u32 conn_type;
+        u32 conn_instance;
+       u32 conn_unacked;
+       int published;
+       u32 congested;
+       u32 ref;
+       struct tipc_msg phdr;
+};
+
+
+/**
+ * tipc_createport_raw - create a native TIPC port and return it's reference
+ *
+ * Note: 'dispatcher' and 'wakeup' deliver a locked port.
+ */
+
+u32 tipc_createport_raw(void *usr_handle,
+                       u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
+                       void (*wakeup)(struct tipc_port *),
+                       const u32 importance);
+
+/*
+ * tipc_set_msg_option(): port must be locked.
+ */
+int tipc_set_msg_option(struct tipc_port *tp_ptr,
+                       const char *opt,
+                       const u32 len);
+
+int tipc_reject_msg(struct sk_buff *buf, u32 err);
+
+int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
+
+void tipc_acknowledge(u32 port_ref,u32 ack);
+
+struct tipc_port *tipc_get_port(const u32 ref);
+
+void *tipc_get_handle(const u32 ref);
+
+
+#endif
+
+#endif
+
index a7f4c355a91f76f0fbb8b41cc4edc0edb7b3d7e6..22fc886b9695f94fdca32e4d57b36a2391de7bc4 100644 (file)
@@ -88,7 +88,6 @@ enum ib_atomic_cap {
 
 struct ib_device_attr {
        u64                     fw_ver;
-       __be64                  node_guid;
        __be64                  sys_image_guid;
        u64                     max_mr_size;
        u64                     page_size_cap;
@@ -951,6 +950,7 @@ struct ib_device {
        u64                          uverbs_cmd_mask;
        int                          uverbs_abi_ver;
 
+       __be64                       node_guid;
        u8                           node_type;
        u8                           phys_port_cnt;
 };
index f073a2461faa0b2090c4094215aa9d7804882eae..04ccab099e84e3ce01125a540e02f49d216f5943 100644 (file)
@@ -275,7 +275,7 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  * The number of overruns is added to the overrun field.
  */
 unsigned long
-hrtimer_forward(struct hrtimer *timer, const ktime_t interval)
+hrtimer_forward(struct hrtimer *timer, ktime_t interval)
 {
        unsigned long orun = 1;
        ktime_t delta, now;
@@ -287,6 +287,9 @@ hrtimer_forward(struct hrtimer *timer, const ktime_t interval)
        if (delta.tv64 < 0)
                return 0;
 
+       if (interval.tv64 < timer->base->resolution.tv64)
+               interval.tv64 = timer->base->resolution.tv64;
+
        if (unlikely(delta.tv64 >= interval.tv64)) {
                nsec_t incr = ktime_to_ns(interval);
 
@@ -314,7 +317,6 @@ hrtimer_forward(struct hrtimer *timer, const ktime_t interval)
 static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
 {
        struct rb_node **link = &base->active.rb_node;
-       struct list_head *prev = &base->pending;
        struct rb_node *parent = NULL;
        struct hrtimer *entry;
 
@@ -330,22 +332,23 @@ static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
                 */
                if (timer->expires.tv64 < entry->expires.tv64)
                        link = &(*link)->rb_left;
-               else {
+               else
                        link = &(*link)->rb_right;
-                       prev = &entry->list;
-               }
        }
 
        /*
-        * Insert the timer to the rbtree and to the sorted list:
+        * Insert the timer to the rbtree and check whether it
+        * replaces the first pending timer
         */
        rb_link_node(&timer->node, parent, link);
        rb_insert_color(&timer->node, &base->active);
-       list_add(&timer->list, prev);
 
        timer->state = HRTIMER_PENDING;
-}
 
+       if (!base->first || timer->expires.tv64 <
+           rb_entry(base->first, struct hrtimer, node)->expires.tv64)
+               base->first = &timer->node;
+}
 
 /*
  * __remove_hrtimer - internal function to remove a timer
@@ -355,9 +358,11 @@ static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
 static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base)
 {
        /*
-        * Remove the timer from the sorted list and from the rbtree:
+        * Remove the timer from the rbtree and replace the
+        * first entry pointer if necessary.
         */
-       list_del(&timer->list);
+       if (base->first == &timer->node)
+               base->first = rb_next(&timer->node);
        rb_erase(&timer->node, &base->active);
 }
 
@@ -516,9 +521,8 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
 {
        struct hrtimer_base *bases;
 
-       tp->tv_sec = 0;
        bases = per_cpu(hrtimer_bases, raw_smp_processor_id());
-       tp->tv_nsec = bases[which_clock].resolution;
+       *tp = ktime_to_timespec(bases[which_clock].resolution);
 
        return 0;
 }
@@ -529,16 +533,17 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
 static inline void run_hrtimer_queue(struct hrtimer_base *base)
 {
        ktime_t now = base->get_time();
+       struct rb_node *node;
 
        spin_lock_irq(&base->lock);
 
-       while (!list_empty(&base->pending)) {
+       while ((node = base->first)) {
                struct hrtimer *timer;
                int (*fn)(void *);
                int restart;
                void *data;
 
-               timer = list_entry(base->pending.next, struct hrtimer, list);
+               timer = rb_entry(node, struct hrtimer, node);
                if (now.tv64 <= timer->expires.tv64)
                        break;
 
@@ -732,7 +737,6 @@ static void __devinit init_hrtimers_cpu(int cpu)
 
        for (i = 0; i < MAX_HRTIMER_BASES; i++) {
                spin_lock_init(&base->lock);
-               INIT_LIST_HEAD(&base->pending);
                base++;
        }
 }
index c0c60c926d5eafe1ebd57a0f298324933cb46507..c9dec2aa19760c9dd54905556181692054ade31e 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/notifier.h>
 #include <linux/profile.h>
 #include <linux/suspend.h>
+#include <linux/vmalloc.h>
 #include <linux/blkdev.h>
 #include <linux/delay.h>
 #include <linux/smp.h>
@@ -1289,6 +1290,9 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
                }
        }
 
+       if (p->last_waker_cpu != this_cpu)
+               goto out_set_cpu;
+
        if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
                goto out_set_cpu;
 
@@ -1359,6 +1363,8 @@ out_set_cpu:
                cpu = task_cpu(p);
        }
 
+       p->last_waker_cpu = this_cpu;
+
 out_activate:
 #endif /* CONFIG_SMP */
        if (old_state == TASK_UNINTERRUPTIBLE) {
@@ -1440,9 +1446,12 @@ void fastcall sched_fork(task_t *p, int clone_flags)
 #ifdef CONFIG_SCHEDSTATS
        memset(&p->sched_info, 0, sizeof(p->sched_info));
 #endif
-#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
+#if defined(CONFIG_SMP)
+       p->last_waker_cpu = cpu;
+#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
        p->oncpu = 0;
 #endif
+#endif
 #ifdef CONFIG_PREEMPT
        /* Want to start with kernel preemption disabled. */
        task_thread_info(p)->preempt_count = 1;
@@ -5082,7 +5091,470 @@ static void init_sched_build_groups(struct sched_group groups[], cpumask_t span,
 
 #define SD_NODES_PER_DOMAIN 16
 
+/*
+ * Self-tuning task migration cost measurement between source and target CPUs.
+ *
+ * This is done by measuring the cost of manipulating buffers of varying
+ * sizes. For a given buffer-size here are the steps that are taken:
+ *
+ * 1) the source CPU reads+dirties a shared buffer
+ * 2) the target CPU reads+dirties the same shared buffer
+ *
+ * We measure how long they take, in the following 4 scenarios:
+ *
+ *  - source: CPU1, target: CPU2 | cost1
+ *  - source: CPU2, target: CPU1 | cost2
+ *  - source: CPU1, target: CPU1 | cost3
+ *  - source: CPU2, target: CPU2 | cost4
+ *
+ * We then calculate the cost3+cost4-cost1-cost2 difference - this is
+ * the cost of migration.
+ *
+ * We then start off from a small buffer-size and iterate up to larger
+ * buffer sizes, in 5% steps - measuring each buffer-size separately, and
+ * doing a maximum search for the cost. (The maximum cost for a migration
+ * normally occurs when the working set size is around the effective cache
+ * size.)
+ */
+#define SEARCH_SCOPE           2
+#define MIN_CACHE_SIZE         (64*1024U)
+#define DEFAULT_CACHE_SIZE     (5*1024*1024U)
+#define ITERATIONS             2
+#define SIZE_THRESH            130
+#define COST_THRESH            130
+
+/*
+ * The migration cost is a function of 'domain distance'. Domain
+ * distance is the number of steps a CPU has to iterate down its
+ * domain tree to share a domain with the other CPU. The farther
+ * two CPUs are from each other, the larger the distance gets.
+ *
+ * Note that we use the distance only to cache measurement results,
+ * the distance value is not used numerically otherwise. When two
+ * CPUs have the same distance it is assumed that the migration
+ * cost is the same. (this is a simplification but quite practical)
+ */
+#define MAX_DOMAIN_DISTANCE 32
+
+static unsigned long long migration_cost[MAX_DOMAIN_DISTANCE] =
+               { [ 0 ... MAX_DOMAIN_DISTANCE-1 ] = -1LL };
+
+/*
+ * Allow override of migration cost - in units of microseconds.
+ * E.g. migration_cost=1000,2000,3000 will set up a level-1 cost
+ * of 1 msec, level-2 cost of 2 msecs and level3 cost of 3 msecs:
+ */
+static int __init migration_cost_setup(char *str)
+{
+       int ints[MAX_DOMAIN_DISTANCE+1], i;
+
+       str = get_options(str, ARRAY_SIZE(ints), ints);
+
+       printk("#ints: %d\n", ints[0]);
+       for (i = 1; i <= ints[0]; i++) {
+               migration_cost[i-1] = (unsigned long long)ints[i]*1000;
+               printk("migration_cost[%d]: %Ld\n", i-1, migration_cost[i-1]);
+       }
+       return 1;
+}
+
+__setup ("migration_cost=", migration_cost_setup);
+
+/*
+ * Global multiplier (divisor) for migration-cutoff values,
+ * in percentiles. E.g. use a value of 150 to get 1.5 times
+ * longer cache-hot cutoff times.
+ *
+ * (We scale it from 100 to 128 to long long handling easier.)
+ */
+
+#define MIGRATION_FACTOR_SCALE 128
+
+static unsigned int migration_factor = MIGRATION_FACTOR_SCALE;
+
+static int __init setup_migration_factor(char *str)
+{
+       get_option(&str, &migration_factor);
+       migration_factor = migration_factor * MIGRATION_FACTOR_SCALE / 100;
+       return 1;
+}
+
+__setup("migration_factor=", setup_migration_factor);
+
+/*
+ * Estimated distance of two CPUs, measured via the number of domains
+ * we have to pass for the two CPUs to be in the same span:
+ */
+static unsigned long domain_distance(int cpu1, int cpu2)
+{
+       unsigned long distance = 0;
+       struct sched_domain *sd;
+
+       for_each_domain(cpu1, sd) {
+               WARN_ON(!cpu_isset(cpu1, sd->span));
+               if (cpu_isset(cpu2, sd->span))
+                       return distance;
+               distance++;
+       }
+       if (distance >= MAX_DOMAIN_DISTANCE) {
+               WARN_ON(1);
+               distance = MAX_DOMAIN_DISTANCE-1;
+       }
+
+       return distance;
+}
+
+static unsigned int migration_debug;
+
+static int __init setup_migration_debug(char *str)
+{
+       get_option(&str, &migration_debug);
+       return 1;
+}
+
+__setup("migration_debug=", setup_migration_debug);
+
+/*
+ * Maximum cache-size that the scheduler should try to measure.
+ * Architectures with larger caches should tune this up during
+ * bootup. Gets used in the domain-setup code (i.e. during SMP
+ * bootup).
+ */
+unsigned int max_cache_size;
+
+static int __init setup_max_cache_size(char *str)
+{
+       get_option(&str, &max_cache_size);
+       return 1;
+}
+
+__setup("max_cache_size=", setup_max_cache_size);
+
+/*
+ * Dirty a big buffer in a hard-to-predict (for the L2 cache) way. This
+ * is the operation that is timed, so we try to generate unpredictable
+ * cachemisses that still end up filling the L2 cache:
+ */
+static void touch_cache(void *__cache, unsigned long __size)
+{
+       unsigned long size = __size/sizeof(long), chunk1 = size/3,
+                       chunk2 = 2*size/3;
+       unsigned long *cache = __cache;
+       int i;
+
+       for (i = 0; i < size/6; i += 8) {
+               switch (i % 6) {
+                       case 0: cache[i]++;
+                       case 1: cache[size-1-i]++;
+                       case 2: cache[chunk1-i]++;
+                       case 3: cache[chunk1+i]++;
+                       case 4: cache[chunk2-i]++;
+                       case 5: cache[chunk2+i]++;
+               }
+       }
+}
+
+/*
+ * Measure the cache-cost of one task migration. Returns in units of nsec.
+ */
+static unsigned long long measure_one(void *cache, unsigned long size,
+                                     int source, int target)
+{
+       cpumask_t mask, saved_mask;
+       unsigned long long t0, t1, t2, t3, cost;
+
+       saved_mask = current->cpus_allowed;
+
+       /*
+        * Flush source caches to RAM and invalidate them:
+        */
+       sched_cacheflush();
+
+       /*
+        * Migrate to the source CPU:
+        */
+       mask = cpumask_of_cpu(source);
+       set_cpus_allowed(current, mask);
+       WARN_ON(smp_processor_id() != source);
+
+       /*
+        * Dirty the working set:
+        */
+       t0 = sched_clock();
+       touch_cache(cache, size);
+       t1 = sched_clock();
+
+       /*
+        * Migrate to the target CPU, dirty the L2 cache and access
+        * the shared buffer. (which represents the working set
+        * of a migrated task.)
+        */
+       mask = cpumask_of_cpu(target);
+       set_cpus_allowed(current, mask);
+       WARN_ON(smp_processor_id() != target);
+
+       t2 = sched_clock();
+       touch_cache(cache, size);
+       t3 = sched_clock();
+
+       cost = t1-t0 + t3-t2;
+
+       if (migration_debug >= 2)
+               printk("[%d->%d]: %8Ld %8Ld %8Ld => %10Ld.\n",
+                       source, target, t1-t0, t1-t0, t3-t2, cost);
+       /*
+        * Flush target caches to RAM and invalidate them:
+        */
+       sched_cacheflush();
+
+       set_cpus_allowed(current, saved_mask);
+
+       return cost;
+}
+
+/*
+ * Measure a series of task migrations and return the average
+ * result. Since this code runs early during bootup the system
+ * is 'undisturbed' and the average latency makes sense.
+ *
+ * The algorithm in essence auto-detects the relevant cache-size,
+ * so it will properly detect different cachesizes for different
+ * cache-hierarchies, depending on how the CPUs are connected.
+ *
+ * Architectures can prime the upper limit of the search range via
+ * max_cache_size, otherwise the search range defaults to 20MB...64K.
+ */
+static unsigned long long
+measure_cost(int cpu1, int cpu2, void *cache, unsigned int size)
+{
+       unsigned long long cost1, cost2;
+       int i;
+
+       /*
+        * Measure the migration cost of 'size' bytes, over an
+        * average of 10 runs:
+        *
+        * (We perturb the cache size by a small (0..4k)
+        *  value to compensate size/alignment related artifacts.
+        *  We also subtract the cost of the operation done on
+        *  the same CPU.)
+        */
+       cost1 = 0;
+
+       /*
+        * dry run, to make sure we start off cache-cold on cpu1,
+        * and to get any vmalloc pagefaults in advance:
+        */
+       measure_one(cache, size, cpu1, cpu2);
+       for (i = 0; i < ITERATIONS; i++)
+               cost1 += measure_one(cache, size - i*1024, cpu1, cpu2);
+
+       measure_one(cache, size, cpu2, cpu1);
+       for (i = 0; i < ITERATIONS; i++)
+               cost1 += measure_one(cache, size - i*1024, cpu2, cpu1);
+
+       /*
+        * (We measure the non-migrating [cached] cost on both
+        *  cpu1 and cpu2, to handle CPUs with different speeds)
+        */
+       cost2 = 0;
+
+       measure_one(cache, size, cpu1, cpu1);
+       for (i = 0; i < ITERATIONS; i++)
+               cost2 += measure_one(cache, size - i*1024, cpu1, cpu1);
+
+       measure_one(cache, size, cpu2, cpu2);
+       for (i = 0; i < ITERATIONS; i++)
+               cost2 += measure_one(cache, size - i*1024, cpu2, cpu2);
+
+       /*
+        * Get the per-iteration migration cost:
+        */
+       do_div(cost1, 2*ITERATIONS);
+       do_div(cost2, 2*ITERATIONS);
+
+       return cost1 - cost2;
+}
+
+static unsigned long long measure_migration_cost(int cpu1, int cpu2)
+{
+       unsigned long long max_cost = 0, fluct = 0, avg_fluct = 0;
+       unsigned int max_size, size, size_found = 0;
+       long long cost = 0, prev_cost;
+       void *cache;
+
+       /*
+        * Search from max_cache_size*5 down to 64K - the real relevant
+        * cachesize has to lie somewhere inbetween.
+        */
+       if (max_cache_size) {
+               max_size = max(max_cache_size * SEARCH_SCOPE, MIN_CACHE_SIZE);
+               size = max(max_cache_size / SEARCH_SCOPE, MIN_CACHE_SIZE);
+       } else {
+               /*
+                * Since we have no estimation about the relevant
+                * search range
+                */
+               max_size = DEFAULT_CACHE_SIZE * SEARCH_SCOPE;
+               size = MIN_CACHE_SIZE;
+       }
+
+       if (!cpu_online(cpu1) || !cpu_online(cpu2)) {
+               printk("cpu %d and %d not both online!\n", cpu1, cpu2);
+               return 0;
+       }
+
+       /*
+        * Allocate the working set:
+        */
+       cache = vmalloc(max_size);
+       if (!cache) {
+               printk("could not vmalloc %d bytes for cache!\n", 2*max_size);
+               return 1000000; // return 1 msec on very small boxen
+       }
+
+       while (size <= max_size) {
+               prev_cost = cost;
+               cost = measure_cost(cpu1, cpu2, cache, size);
+
+               /*
+                * Update the max:
+                */
+               if (cost > 0) {
+                       if (max_cost < cost) {
+                               max_cost = cost;
+                               size_found = size;
+                       }
+               }
+               /*
+                * Calculate average fluctuation, we use this to prevent
+                * noise from triggering an early break out of the loop:
+                */
+               fluct = abs(cost - prev_cost);
+               avg_fluct = (avg_fluct + fluct)/2;
+
+               if (migration_debug)
+                       printk("-> [%d][%d][%7d] %3ld.%ld [%3ld.%ld] (%ld): (%8Ld %8Ld)\n",
+                               cpu1, cpu2, size,
+                               (long)cost / 1000000,
+                               ((long)cost / 100000) % 10,
+                               (long)max_cost / 1000000,
+                               ((long)max_cost / 100000) % 10,
+                               domain_distance(cpu1, cpu2),
+                               cost, avg_fluct);
+
+               /*
+                * If we iterated at least 20% past the previous maximum,
+                * and the cost has dropped by more than 20% already,
+                * (taking fluctuations into account) then we assume to
+                * have found the maximum and break out of the loop early:
+                */
+               if (size_found && (size*100 > size_found*SIZE_THRESH))
+                       if (cost+avg_fluct <= 0 ||
+                               max_cost*100 > (cost+avg_fluct)*COST_THRESH) {
+
+                               if (migration_debug)
+                                       printk("-> found max.\n");
+                               break;
+                       }
+               /*
+                * Increase the cachesize in 5% steps:
+                */
+               size = size * 20 / 19;
+       }
+
+       if (migration_debug)
+               printk("[%d][%d] working set size found: %d, cost: %Ld\n",
+                       cpu1, cpu2, size_found, max_cost);
+
+       vfree(cache);
+
+       /*
+        * A task is considered 'cache cold' if at least 2 times
+        * the worst-case cost of migration has passed.
+        *
+        * (this limit is only listened to if the load-balancing
+        * situation is 'nice' - if there is a large imbalance we
+        * ignore it for the sake of CPU utilization and
+        * processing fairness.)
+        */
+       return 2 * max_cost * migration_factor / MIGRATION_FACTOR_SCALE;
+}
+
+static void calibrate_migration_costs(const cpumask_t *cpu_map)
+{
+       int cpu1 = -1, cpu2 = -1, cpu, orig_cpu = raw_smp_processor_id();
+       unsigned long j0, j1, distance, max_distance = 0;
+       struct sched_domain *sd;
+
+       j0 = jiffies;
+
+       /*
+        * First pass - calculate the cacheflush times:
+        */
+       for_each_cpu_mask(cpu1, *cpu_map) {
+               for_each_cpu_mask(cpu2, *cpu_map) {
+                       if (cpu1 == cpu2)
+                               continue;
+                       distance = domain_distance(cpu1, cpu2);
+                       max_distance = max(max_distance, distance);
+                       /*
+                        * No result cached yet?
+                        */
+                       if (migration_cost[distance] == -1LL)
+                               migration_cost[distance] =
+                                       measure_migration_cost(cpu1, cpu2);
+               }
+       }
+       /*
+        * Second pass - update the sched domain hierarchy with
+        * the new cache-hot-time estimations:
+        */
+       for_each_cpu_mask(cpu, *cpu_map) {
+               distance = 0;
+               for_each_domain(cpu, sd) {
+                       sd->cache_hot_time = migration_cost[distance];
+                       distance++;
+               }
+       }
+       /*
+        * Print the matrix:
+        */
+       if (migration_debug)
+               printk("migration: max_cache_size: %d, cpu: %d MHz:\n",
+                       max_cache_size,
+#ifdef CONFIG_X86
+                       cpu_khz/1000
+#else
+                       -1
+#endif
+               );
+       printk("migration_cost=");
+       for (distance = 0; distance <= max_distance; distance++) {
+               if (distance)
+                       printk(",");
+               printk("%ld", (long)migration_cost[distance] / 1000);
+       }
+       printk("\n");
+       j1 = jiffies;
+       if (migration_debug)
+               printk("migration: %ld seconds\n", (j1-j0)/HZ);
+
+       /*
+        * Move back to the original CPU. NUMA-Q gets confused
+        * if we migrate to another quad during bootup.
+        */
+       if (raw_smp_processor_id() != orig_cpu) {
+               cpumask_t mask = cpumask_of_cpu(orig_cpu),
+                       saved_mask = current->cpus_allowed;
+
+               set_cpus_allowed(current, mask);
+               set_cpus_allowed(current, saved_mask);
+       }
+}
+
 #ifdef CONFIG_NUMA
+
 /**
  * find_next_best_node - find the next node to include in a sched_domain
  * @node: node whose sched_domain we're building
@@ -5448,6 +5920,10 @@ next_sg:
 #endif
                cpu_attach_domain(sd, i);
        }
+       /*
+        * Tune cache-hot values:
+        */
+       calibrate_migration_costs(cpu_map);
 }
 /*
  * Set up scheduler domains and groups.  Callers must hold the hotplug lock.
index 1850d0aef4ac3aba3abc99caf9b479c75319e368..b62cab575a84bb241dad5c1c91717d97a97d2c4d 100644 (file)
@@ -208,6 +208,8 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
                page = vm_normal_page(vma, addr, *pte);
                if (!page)
                        continue;
+               if (PageReserved(page))
+                       continue;
                nid = page_to_nid(page);
                if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
                        continue;
@@ -290,7 +292,7 @@ static inline int check_pgd_range(struct vm_area_struct *vma,
 static inline int vma_migratable(struct vm_area_struct *vma)
 {
        if (vma->vm_flags & (
-               VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP))
+               VM_LOCKED|VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
                return 0;
        return 1;
 }
index d41a0662d4da6abbc596cdbbce461d6af92c7688..8c960b469593a9a6818fa4dd9c08a4b591916c86 100644 (file)
@@ -1742,7 +1742,7 @@ void __devinit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
        unsigned long end_pfn = start_pfn + size;
        unsigned long pfn;
 
-       for (pfn = start_pfn; pfn < end_pfn; pfn++, page++) {
+       for (pfn = start_pfn; pfn < end_pfn; pfn++) {
                if (!early_pfn_valid(pfn))
                        continue;
                page = pfn_to_page(pfn);
index ee6d71ccfa56fe77173bc7806797b5fc01ec1112..cbb48e721ab9f21fd56b380eed49c8c0e4a21031 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -384,6 +384,8 @@ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
        return pagevec_count(pvec);
 }
 
+EXPORT_SYMBOL(pagevec_lookup);
+
 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
                pgoff_t *index, int tag, unsigned nr_pages)
 {
index cdc6d431972b74c8aacd9e07ec32f95e73d5d55e..f9d6a9cc91c4bc806700c6119f45e34283abdeb7 100644 (file)
@@ -90,7 +90,7 @@ struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags)
        file->f_mode = FMODE_WRITE | FMODE_READ;
 
        /* notify everyone as to the change of file size */
-       error = do_truncate(dentry, size, file);
+       error = do_truncate(dentry, size, 0, file);
        if (error < 0)
                goto close_file;
 
index 60f6f321bd7667a9d57ed03edb17c88ac0e4a0dc..9296b269d675771861bf1b2fc596dd31284faeeb 100644 (file)
@@ -159,6 +159,7 @@ source "net/ipx/Kconfig"
 source "drivers/net/appletalk/Kconfig"
 source "net/x25/Kconfig"
 source "net/lapb/Kconfig"
+source "net/tipc/Kconfig"
 
 config NET_DIVERT
        bool "Frame Diverter (EXPERIMENTAL)"
index f5141b9d4f38ff94e7a696438bbaa2e9de9f3214..065796f5fb17701f550bcd5197fe30d35ec986cb 100644 (file)
@@ -45,6 +45,7 @@ obj-$(CONFIG_VLAN_8021Q)      += 8021q/
 obj-$(CONFIG_IP_DCCP)          += dccp/
 obj-$(CONFIG_IP_SCTP)          += sctp/
 obj-$(CONFIG_IEEE80211)                += ieee80211/
+obj-$(CONFIG_TIPC)             += tipc/
 
 ifeq ($(CONFIG_NET),y)
 obj-$(CONFIG_SYSCTL)           += sysctl_net.o
index 9f6e0193ae100cef83b4952b7a3fc20c70b7a1ae..a29c1232c4204e5a66c45e251039c14575930c97 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/netfilter.h>
 #include <linux/module.h>
 #include <linux/ip.h>
+#include <linux/in.h>
 #include <linux/if_arp.h>
 #include <linux/spinlock.h>
 
index 9eb9d0017a01810e37c42f142bbdadbdb3cfee3e..a52665f752240a6e48300d89403d1706675c6ee5 100644 (file)
@@ -287,7 +287,9 @@ load_b:
  * no references or jumps that are out of range, no illegal
  * instructions, and must end with a RET instruction.
  *
- * Returns 0 if the rule set is legal or a negative errno code if not.
+ * All jumps are forward as they are not signed.
+ *
+ * Returns 0 if the rule set is legal or -EINVAL if not.
  */
 int sk_chk_filter(struct sock_filter *filter, int flen)
 {
@@ -299,7 +301,6 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
 
        /* check the filter code now */
        for (pc = 0; pc < flen; pc++) {
-               /* all jumps are forward as they are not signed */
                ftest = &filter[pc];
 
                /* Only allow valid instructions */
@@ -383,11 +384,6 @@ int sk_chk_filter(struct sock_filter *filter, int flen)
                }
        }
 
-       /*
-        * The program must end with a return. We don't care where they
-        * jumped within the script (its always forwards) but in the end
-        * they _will_ hit this.
-        */
         return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
 }
 
index a9893ec03e029e8b4a656c980399d66bbed5a753..db783036e4d8fbdf691418c114acf39396cba258 100644 (file)
@@ -182,6 +182,7 @@ config IP_NF_QUEUE
 
 config IP_NF_IPTABLES
        tristate "IP tables support (required for filtering/masq/NAT)"
+       depends on NETFILTER_XTABLES
        help
          iptables is a general, extensible packet identification framework.
          The packet filtering and full NAT (masquerading, port forwarding,
@@ -191,16 +192,6 @@ config IP_NF_IPTABLES
          To compile it as a module, choose M here.  If unsure, say N.
 
 # The matches.
-config IP_NF_MATCH_LIMIT
-       tristate "limit match support"
-       depends on IP_NF_IPTABLES
-       help
-         limit matching allows you to control the rate at which a rule can be
-         matched: mainly useful in combination with the LOG target ("LOG
-         target support", below) and to avoid some Denial of Service attacks.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP_NF_MATCH_IPRANGE
        tristate "IP range match support"
        depends on IP_NF_IPTABLES
@@ -210,37 +201,6 @@ config IP_NF_MATCH_IPRANGE
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP_NF_MATCH_MAC
-       tristate "MAC address match support"
-       depends on IP_NF_IPTABLES
-       help
-         MAC matching allows you to match packets based on the source
-         Ethernet address of the packet.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
-config IP_NF_MATCH_PKTTYPE
-       tristate "Packet type match support"
-       depends on IP_NF_IPTABLES
-       help
-         Packet type matching allows you to match a packet by
-         its "class", eg. BROADCAST, MULTICAST, ...
-
-         Typical usage:
-         iptables -A INPUT -m pkttype --pkt-type broadcast -j LOG
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
-config IP_NF_MATCH_MARK
-       tristate "netfilter MARK match support"
-       depends on IP_NF_IPTABLES
-       help
-         Netfilter mark matching allows you to match packets based on the
-         `nfmark' value in the packet.  This can be set by the MARK target
-         (see below).
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP_NF_MATCH_MULTIPORT
        tristate "Multiple port match support"
        depends on IP_NF_IPTABLES
@@ -301,15 +261,6 @@ config IP_NF_MATCH_AH_ESP
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP_NF_MATCH_LENGTH
-       tristate "LENGTH match support"
-       depends on IP_NF_IPTABLES
-       help
-         This option allows you to match the length of a packet against a
-         specific value or range of values.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP_NF_MATCH_TTL
        tristate "TTL match support"
        depends on IP_NF_IPTABLES
@@ -319,50 +270,6 @@ config IP_NF_MATCH_TTL
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP_NF_MATCH_TCPMSS
-       tristate "tcpmss match support"
-       depends on IP_NF_IPTABLES
-       help
-         This option adds a `tcpmss' match, which allows you to examine the
-         MSS value of TCP SYN packets, which control the maximum packet size
-         for that connection.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
-config IP_NF_MATCH_HELPER
-       tristate "Helper match support"
-       depends on IP_NF_IPTABLES
-       depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
-       help
-         Helper matching allows you to match packets in dynamic connections
-         tracked by a conntrack-helper, ie. ip_conntrack_ftp
-
-         To compile it as a module, choose M here.  If unsure, say Y.
-
-config IP_NF_MATCH_STATE
-       tristate "Connection state match support"
-       depends on IP_NF_IPTABLES
-       depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
-       help
-         Connection state matching allows you to match packets based on their
-         relationship to a tracked connection (ie. previous packets).  This
-         is a powerful tool for packet classification.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
-config IP_NF_MATCH_CONNTRACK
-       tristate "Connection tracking match support"
-       depends on IP_NF_IPTABLES
-       depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
-       help
-         This is a general conntrack match module, a superset of the state match.
-
-         It allows matching on additional conntrack information, which is
-         useful in complex configurations, such as NAT gateways with multiple
-         internet links or tunnels.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP_NF_MATCH_OWNER
        tristate "Owner match support"
        depends on IP_NF_IPTABLES
@@ -372,15 +279,6 @@ config IP_NF_MATCH_OWNER
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP_NF_MATCH_PHYSDEV
-       tristate "Physdev match support"
-       depends on IP_NF_IPTABLES && BRIDGE_NETFILTER
-       help
-         Physdev packet matching matches against the physical bridge ports
-         the IP packet arrived on or will leave by.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP_NF_MATCH_ADDRTYPE
        tristate  'address type match support'
        depends on IP_NF_IPTABLES
@@ -391,75 +289,6 @@ config IP_NF_MATCH_ADDRTYPE
          If you want to compile it as a module, say M here and read
          <file:Documentation/modules.txt>.  If unsure, say `N'.
 
-config IP_NF_MATCH_REALM
-       tristate  'realm match support'
-       depends on IP_NF_IPTABLES
-       select NET_CLS_ROUTE
-       help
-         This option adds a `realm' match, which allows you to use the realm
-         key from the routing subsystem inside iptables.
-       
-         This match pretty much resembles the CONFIG_NET_CLS_ROUTE4 option 
-         in tc world.
-       
-         If you want to compile it as a module, say M here and read
-         <file:Documentation/modules.txt>.  If unsure, say `N'.
-
-config IP_NF_MATCH_SCTP
-       tristate  'SCTP protocol match support'
-       depends on IP_NF_IPTABLES
-       help
-         With this option enabled, you will be able to use the iptables
-         `sctp' match in order to match on SCTP source/destination ports
-         and SCTP chunk types.
-
-         If you want to compile it as a module, say M here and read
-         <file:Documentation/modules.txt>.  If unsure, say `N'.
-
-config IP_NF_MATCH_DCCP
-       tristate  'DCCP protocol match support'
-       depends on IP_NF_IPTABLES
-       help
-         With this option enabled, you will be able to use the iptables
-         `dccp' match in order to match on DCCP source/destination ports
-         and DCCP flags.
-
-         If you want to compile it as a module, say M here and read
-         <file:Documentation/modules.txt>.  If unsure, say `N'.
-
-config IP_NF_MATCH_COMMENT
-       tristate  'comment match support'
-       depends on IP_NF_IPTABLES
-       help
-         This option adds a `comment' dummy-match, which allows you to put
-         comments in your iptables ruleset.
-
-         If you want to compile it as a module, say M here and read
-         <file:Documentation/modules.txt>.  If unsure, say `N'.
-
-config IP_NF_MATCH_CONNMARK
-       tristate  'Connection mark match support'
-       depends on IP_NF_IPTABLES
-       depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
-       help
-         This option adds a `connmark' match, which allows you to match the
-         connection mark value previously set for the session by `CONNMARK'. 
-       
-         If you want to compile it as a module, say M here and read
-         <file:Documentation/modules.txt>.  The module will be called
-         ipt_connmark.o.  If unsure, say `N'.
-
-config IP_NF_MATCH_CONNBYTES
-       tristate  'Connection byte/packet counter match support'
-       depends on IP_NF_IPTABLES
-       depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || (NF_CT_ACCT && NF_CONNTRACK_IPV4)
-       help
-         This option adds a `connbytes' match, which allows you to match the
-         number of bytes and/or packets for each direction within a connection.
-
-         If you want to compile it as a module, say M here and read
-         <file:Documentation/modules.txt>.  If unsure, say `N'.
-
 config IP_NF_MATCH_HASHLIMIT
        tristate  'hashlimit match support'
        depends on IP_NF_IPTABLES
@@ -474,19 +303,6 @@ config IP_NF_MATCH_HASHLIMIT
          destination IP' or `500pps from any given source IP'  with a single
          IPtables rule.
 
-config IP_NF_MATCH_STRING
-       tristate  'string match support'
-       depends on IP_NF_IPTABLES 
-       select TEXTSEARCH
-       select TEXTSEARCH_KMP
-       select TEXTSEARCH_BM
-       select TEXTSEARCH_FSM
-       help
-         This option adds a `string' match, which allows you to look for
-         pattern matchings in packets.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP_NF_MATCH_POLICY
        tristate "IPsec policy match support"
        depends on IP_NF_IPTABLES && XFRM
@@ -572,17 +388,6 @@ config IP_NF_TARGET_TCPMSS
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP_NF_TARGET_NFQUEUE
-       tristate "NFQUEUE Target Support"
-       depends on IP_NF_IPTABLES
-       help
-         This Target replaced the old obsolete QUEUE target.
-
-         As opposed to QUEUE, it supports 65535 different queues,
-         not just one.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 # NAT + specific targets
 config IP_NF_NAT
        tristate "Full NAT"
@@ -735,31 +540,6 @@ config IP_NF_TARGET_DSCP
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP_NF_TARGET_MARK
-       tristate "MARK target support"
-       depends on IP_NF_MANGLE
-       help
-         This option adds a `MARK' target, which allows you to create rules
-         in the `mangle' table which alter the netfilter mark (nfmark) field
-         associated with the packet prior to routing. This can change
-         the routing method (see `Use netfilter MARK value as routing
-         key') and can also be used by other subsystems to change their
-         behavior.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
-config IP_NF_TARGET_CLASSIFY
-       tristate "CLASSIFY target support"
-       depends on IP_NF_MANGLE
-       help
-         This option adds a `CLASSIFY' target, which enables the user to set
-         the priority of a packet. Some qdiscs can use this value for
-         classification, among these are:
-
-         atm, cbq, dsmark, pfifo_fast, htb, prio
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP_NF_TARGET_TTL
        tristate  'TTL target support'
        depends on IP_NF_MANGLE
@@ -774,19 +554,6 @@ config IP_NF_TARGET_TTL
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP_NF_TARGET_CONNMARK
-       tristate  'CONNMARK target support'
-       depends on IP_NF_MANGLE
-       depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
-       help
-         This option adds a `CONNMARK' target, which allows one to manipulate
-         the connection mark value.  Similar to the MARK target, but
-         affects the connection mark value rather than the packet mark value.
-       
-         If you want to compile it as a module, say M here and read
-         <file:Documentation/modules.txt>.  The module will be called
-         ipt_CONNMARK.o.  If unsure, say `N'.
-
 config IP_NF_TARGET_CLUSTERIP
        tristate "CLUSTERIP target support (EXPERIMENTAL)"
        depends on IP_NF_MANGLE && EXPERIMENTAL
@@ -810,23 +577,10 @@ config IP_NF_RAW
          If you want to compile it as a module, say M here and read
          <file:Documentation/modules.txt>.  If unsure, say `N'.
 
-config IP_NF_TARGET_NOTRACK
-       tristate  'NOTRACK target support'
-       depends on IP_NF_RAW
-       depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
-       help
-         The NOTRACK target allows a select rule to specify
-         which packets *not* to enter the conntrack/NAT
-         subsystem with all the consequences (no ICMP error tracking,
-         no protocol helpers for the selected packets).
-       
-         If you want to compile it as a module, say M here and read
-         <file:Documentation/modules.txt>.  If unsure, say `N'.
-
-
 # ARP tables
 config IP_NF_ARPTABLES
        tristate "ARP tables support"
+       depends on NETFILTER_XTABLES
        help
          arptables is a general, extensible packet identification framework.
          The ARP packet filtering and mangling (manipulation)subsystems
index 549b01a648b31e41f755621b849c5c5164e4afb6..bcefe64b93177c7e4705065c6f909f46eb7a06e2 100644 (file)
@@ -47,14 +47,8 @@ obj-$(CONFIG_IP_NF_RAW) += iptable_raw.o
 
 # matches
 obj-$(CONFIG_IP_NF_MATCH_HELPER) += ipt_helper.o
-obj-$(CONFIG_IP_NF_MATCH_LIMIT) += ipt_limit.o
 obj-$(CONFIG_IP_NF_MATCH_HASHLIMIT) += ipt_hashlimit.o
-obj-$(CONFIG_IP_NF_MATCH_SCTP) += ipt_sctp.o
-obj-$(CONFIG_IP_NF_MATCH_DCCP) += ipt_dccp.o
-obj-$(CONFIG_IP_NF_MATCH_MARK) += ipt_mark.o
-obj-$(CONFIG_IP_NF_MATCH_MAC) += ipt_mac.o
 obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
-obj-$(CONFIG_IP_NF_MATCH_PKTTYPE) += ipt_pkttype.o
 obj-$(CONFIG_IP_NF_MATCH_MULTIPORT) += ipt_multiport.o
 obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
 obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
@@ -62,40 +56,25 @@ obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
 obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
 obj-$(CONFIG_IP_NF_MATCH_DSCP) += ipt_dscp.o
 obj-$(CONFIG_IP_NF_MATCH_AH_ESP) += ipt_ah.o ipt_esp.o
-obj-$(CONFIG_IP_NF_MATCH_LENGTH) += ipt_length.o
 obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
-obj-$(CONFIG_IP_NF_MATCH_STATE) += ipt_state.o
-obj-$(CONFIG_IP_NF_MATCH_CONNMARK) += ipt_connmark.o
-obj-$(CONFIG_IP_NF_MATCH_CONNTRACK) += ipt_conntrack.o
-obj-$(CONFIG_IP_NF_MATCH_CONNBYTES) += ipt_connbytes.o
-obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o
-obj-$(CONFIG_IP_NF_MATCH_REALM) += ipt_realm.o
 obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
-obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
 obj-$(CONFIG_IP_NF_MATCH_POLICY) += ipt_policy.o
-obj-$(CONFIG_IP_NF_MATCH_COMMENT) += ipt_comment.o
-obj-$(CONFIG_IP_NF_MATCH_STRING) += ipt_string.o
 
 # targets
 obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
 obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o
 obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
 obj-$(CONFIG_IP_NF_TARGET_DSCP) += ipt_DSCP.o
-obj-$(CONFIG_IP_NF_TARGET_MARK) += ipt_MARK.o
 obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
 obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
 obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
 obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o
-obj-$(CONFIG_IP_NF_TARGET_CLASSIFY) += ipt_CLASSIFY.o
 obj-$(CONFIG_IP_NF_NAT_SNMP_BASIC) += ip_nat_snmp_basic.o
 obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
-obj-$(CONFIG_IP_NF_TARGET_CONNMARK) += ipt_CONNMARK.o
 obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
 obj-$(CONFIG_IP_NF_TARGET_TCPMSS) += ipt_TCPMSS.o
-obj-$(CONFIG_IP_NF_TARGET_NOTRACK) += ipt_NOTRACK.o
 obj-$(CONFIG_IP_NF_TARGET_CLUSTERIP) += ipt_CLUSTERIP.o
 obj-$(CONFIG_IP_NF_TARGET_TTL) += ipt_TTL.o
-obj-$(CONFIG_IP_NF_TARGET_NFQUEUE) += ipt_NFQUEUE.o
 
 # generic ARP tables
 obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
index b6d5284c8020760685bcbf529e65525a915b8af4..afe3d8f8177d7df83df0264e20f3072077ff1317 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/uaccess.h>
 #include <asm/semaphore.h>
 
+#include <linux/netfilter/x_tables.h>
 #include <linux/netfilter_arp/arp_tables.h>
 
 MODULE_LICENSE("GPL");
@@ -55,28 +56,9 @@ do {                                                         \
 #else
 #define ARP_NF_ASSERT(x)
 #endif
-#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
 
-static DECLARE_MUTEX(arpt_mutex);
-
-#define ASSERT_READ_LOCK(x) ARP_NF_ASSERT(down_trylock(&arpt_mutex) != 0)
-#define ASSERT_WRITE_LOCK(x) ARP_NF_ASSERT(down_trylock(&arpt_mutex) != 0)
 #include <linux/netfilter_ipv4/listhelp.h>
 
-struct arpt_table_info {
-       unsigned int size;
-       unsigned int number;
-       unsigned int initial_entries;
-       unsigned int hook_entry[NF_ARP_NUMHOOKS];
-       unsigned int underflow[NF_ARP_NUMHOOKS];
-       void *entries[NR_CPUS];
-};
-
-static LIST_HEAD(arpt_target);
-static LIST_HEAD(arpt_tables);
-#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
-#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
-
 static inline int arp_devaddr_compare(const struct arpt_devaddr_info *ap,
                                      char *hdr_addr, int len)
 {
@@ -223,9 +205,9 @@ static inline int arp_checkentry(const struct arpt_arp *arp)
 }
 
 static unsigned int arpt_error(struct sk_buff **pskb,
-                              unsigned int hooknum,
                               const struct net_device *in,
                               const struct net_device *out,
+                              unsigned int hooknum,
                               const void *targinfo,
                               void *userinfo)
 {
@@ -254,6 +236,7 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
        struct arpt_entry *e, *back;
        const char *indev, *outdev;
        void *table_base;
+       struct xt_table_info *private = table->private;
 
        /* ARP header, plus 2 device addresses, plus 2 IP addresses.  */
        if (!pskb_may_pull((*pskb), (sizeof(struct arphdr) +
@@ -265,9 +248,9 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
        outdev = out ? out->name : nulldevname;
 
        read_lock_bh(&table->lock);
-       table_base = (void *)table->private->entries[smp_processor_id()];
-       e = get_entry(table_base, table->private->hook_entry[hook]);
-       back = get_entry(table_base, table->private->underflow[hook]);
+       table_base = (void *)private->entries[smp_processor_id()];
+       e = get_entry(table_base, private->hook_entry[hook]);
+       back = get_entry(table_base, private->underflow[hook]);
 
        arp = (*pskb)->nh.arph;
        do {
@@ -315,8 +298,8 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
                                 * abs. verdicts
                                 */
                                verdict = t->u.kernel.target->target(pskb,
-                                                                    hook,
                                                                     in, out,
+                                                                    hook,
                                                                     t->data,
                                                                     userdata);
 
@@ -341,106 +324,6 @@ unsigned int arpt_do_table(struct sk_buff **pskb,
                return verdict;
 }
 
-/*
- * These are weird, but module loading must not be done with mutex
- * held (since they will register), and we have to have a single
- * function to use try_then_request_module().
- */
-
-/* Find table by name, grabs mutex & ref.  Returns ERR_PTR() on error. */
-static inline struct arpt_table *find_table_lock(const char *name)
-{
-       struct arpt_table *t;
-
-       if (down_interruptible(&arpt_mutex) != 0)
-               return ERR_PTR(-EINTR);
-
-       list_for_each_entry(t, &arpt_tables, list)
-               if (strcmp(t->name, name) == 0 && try_module_get(t->me))
-                       return t;
-       up(&arpt_mutex);
-       return NULL;
-}
-
-
-/* Find target, grabs ref.  Returns ERR_PTR() on error. */
-static inline struct arpt_target *find_target(const char *name, u8 revision)
-{
-       struct arpt_target *t;
-       int err = 0;
-
-       if (down_interruptible(&arpt_mutex) != 0)
-               return ERR_PTR(-EINTR);
-
-       list_for_each_entry(t, &arpt_target, list) {
-               if (strcmp(t->name, name) == 0) {
-                       if (t->revision == revision) {
-                               if (try_module_get(t->me)) {
-                                       up(&arpt_mutex);
-                                       return t;
-                               }
-                       } else
-                               err = -EPROTOTYPE; /* Found something. */
-               }
-       }
-       up(&arpt_mutex);
-       return ERR_PTR(err);
-}
-
-struct arpt_target *arpt_find_target(const char *name, u8 revision)
-{
-       struct arpt_target *target;
-
-       target = try_then_request_module(find_target(name, revision),
-                                        "arpt_%s", name);
-       if (IS_ERR(target) || !target)
-               return NULL;
-       return target;
-}
-
-static int target_revfn(const char *name, u8 revision, int *bestp)
-{
-       struct arpt_target *t;
-       int have_rev = 0;
-
-       list_for_each_entry(t, &arpt_target, list) {
-               if (strcmp(t->name, name) == 0) {
-                       if (t->revision > *bestp)
-                               *bestp = t->revision;
-                       if (t->revision == revision)
-                               have_rev =1;
-               }
-       }
-       return have_rev;
-}
-
-/* Returns true or false (if no such extension at all) */
-static inline int find_revision(const char *name, u8 revision,
-                               int (*revfn)(const char *, u8, int *),
-                               int *err)
-{
-       int have_rev, best = -1;
-
-       if (down_interruptible(&arpt_mutex) != 0) {
-               *err = -EINTR;
-               return 1;
-       }
-       have_rev = revfn(name, revision, &best);
-       up(&arpt_mutex);
-
-       /* Nothing at all?  Return 0 to try loading module. */
-       if (best == -1) {
-               *err = -ENOENT;
-               return 0;
-       }
-
-       *err = best;
-       if (!have_rev)
-               *err = -EPROTONOSUPPORT;
-       return 1;
-}
-
-
 /* All zeroes == unconditional rule. */
 static inline int unconditional(const struct arpt_arp *arp)
 {
@@ -456,7 +339,7 @@ static inline int unconditional(const struct arpt_arp *arp)
 /* Figures out from what hook each rule can be called: returns 0 if
  * there are loops.  Puts hook bitmask in comefrom.
  */
-static int mark_source_chains(struct arpt_table_info *newinfo,
+static int mark_source_chains(struct xt_table_info *newinfo,
                              unsigned int valid_hooks, void *entry0)
 {
        unsigned int hook;
@@ -587,8 +470,8 @@ static inline int check_entry(struct arpt_entry *e, const char *name, unsigned i
        }
 
        t = arpt_get_target(e);
-       target = try_then_request_module(find_target(t->u.user.name,
-                                                    t->u.user.revision),
+       target = try_then_request_module(xt_find_target(NF_ARP, t->u.user.name,
+                                                       t->u.user.revision),
                                         "arpt_%s", t->u.user.name);
        if (IS_ERR(target) || !target) {
                duprintf("check_entry: `%s' not found\n", t->u.user.name);
@@ -622,7 +505,7 @@ out:
 }
 
 static inline int check_entry_size_and_hooks(struct arpt_entry *e,
-                                            struct arpt_table_info *newinfo,
+                                            struct xt_table_info *newinfo,
                                             unsigned char *base,
                                             unsigned char *limit,
                                             const unsigned int *hook_entries,
@@ -656,7 +539,7 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
            < 0 (not ARPT_RETURN). --RR */
 
        /* Clear counters and comefrom */
-       e->counters = ((struct arpt_counters) { 0, 0 });
+       e->counters = ((struct xt_counters) { 0, 0 });
        e->comefrom = 0;
 
        (*i)++;
@@ -683,7 +566,7 @@ static inline int cleanup_entry(struct arpt_entry *e, unsigned int *i)
  */
 static int translate_table(const char *name,
                           unsigned int valid_hooks,
-                          struct arpt_table_info *newinfo,
+                          struct xt_table_info *newinfo,
                           void *entry0,
                           unsigned int size,
                           unsigned int number,
@@ -764,34 +647,9 @@ static int translate_table(const char *name,
        return ret;
 }
 
-static struct arpt_table_info *replace_table(struct arpt_table *table,
-                                            unsigned int num_counters,
-                                            struct arpt_table_info *newinfo,
-                                            int *error)
-{
-       struct arpt_table_info *oldinfo;
-
-       /* Do the substitution. */
-       write_lock_bh(&table->lock);
-       /* Check inside lock: is the old number correct? */
-       if (num_counters != table->private->number) {
-               duprintf("num_counters != table->private->number (%u/%u)\n",
-                        num_counters, table->private->number);
-               write_unlock_bh(&table->lock);
-               *error = -EAGAIN;
-               return NULL;
-       }
-       oldinfo = table->private;
-       table->private = newinfo;
-       newinfo->initial_entries = oldinfo->initial_entries;
-       write_unlock_bh(&table->lock);
-
-       return oldinfo;
-}
-
 /* Gets counters. */
 static inline int add_entry_to_counter(const struct arpt_entry *e,
-                                      struct arpt_counters total[],
+                                      struct xt_counters total[],
                                       unsigned int *i)
 {
        ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
@@ -801,7 +659,7 @@ static inline int add_entry_to_counter(const struct arpt_entry *e,
 }
 
 static inline int set_entry_to_counter(const struct arpt_entry *e,
-                                      struct arpt_counters total[],
+                                      struct xt_counters total[],
                                       unsigned int *i)
 {
        SET_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
@@ -810,8 +668,8 @@ static inline int set_entry_to_counter(const struct arpt_entry *e,
        return 0;
 }
 
-static void get_counters(const struct arpt_table_info *t,
-                        struct arpt_counters counters[])
+static void get_counters(const struct xt_table_info *t,
+                        struct xt_counters counters[])
 {
        unsigned int cpu;
        unsigned int i;
@@ -849,7 +707,8 @@ static int copy_entries_to_user(unsigned int total_size,
 {
        unsigned int off, num, countersize;
        struct arpt_entry *e;
-       struct arpt_counters *counters;
+       struct xt_counters *counters;
+       struct xt_table_info *private = table->private;
        int ret = 0;
        void *loc_cpu_entry;
 
@@ -857,18 +716,18 @@ static int copy_entries_to_user(unsigned int total_size,
         * (other than comefrom, which userspace doesn't care
         * about).
         */
-       countersize = sizeof(struct arpt_counters) * table->private->number;
-       counters = vmalloc(countersize);
+       countersize = sizeof(struct xt_counters) * private->number;
+       counters = vmalloc_node(countersize, numa_node_id());
 
        if (counters == NULL)
                return -ENOMEM;
 
        /* First, sum counters... */
        write_lock_bh(&table->lock);
-       get_counters(table->private, counters);
+       get_counters(private, counters);
        write_unlock_bh(&table->lock);
 
-       loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
+       loc_cpu_entry = private->entries[raw_smp_processor_id()];
        /* ... then copy entire thing ... */
        if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
                ret = -EFAULT;
@@ -911,75 +770,34 @@ static int get_entries(const struct arpt_get_entries *entries,
        int ret;
        struct arpt_table *t;
 
-       t = find_table_lock(entries->name);
+       t = xt_find_table_lock(NF_ARP, entries->name);
        if (t || !IS_ERR(t)) {
+               struct xt_table_info *private = t->private;
                duprintf("t->private->number = %u\n",
-                        t->private->number);
-               if (entries->size == t->private->size)
-                       ret = copy_entries_to_user(t->private->size,
+                        private->number);
+               if (entries->size == private->size)
+                       ret = copy_entries_to_user(private->size,
                                                   t, uptr->entrytable);
                else {
                        duprintf("get_entries: I've got %u not %u!\n",
-                                t->private->size,
-                                entries->size);
+                                private->size, entries->size);
                        ret = -EINVAL;
                }
                module_put(t->me);
-               up(&arpt_mutex);
+               xt_table_unlock(t);
        } else
                ret = t ? PTR_ERR(t) : -ENOENT;
 
        return ret;
 }
 
-static void free_table_info(struct arpt_table_info *info)
-{
-       int cpu;
-       for_each_cpu(cpu) {
-               if (info->size <= PAGE_SIZE)
-                       kfree(info->entries[cpu]);
-               else
-                       vfree(info->entries[cpu]);
-       }
-       kfree(info);
-}
-
-static struct arpt_table_info *alloc_table_info(unsigned int size)
-{
-       struct arpt_table_info *newinfo;
-       int cpu;
-       
-       newinfo = kzalloc(sizeof(struct arpt_table_info), GFP_KERNEL);
-       if (!newinfo)
-               return NULL;
-
-       newinfo->size = size;
-
-       for_each_cpu(cpu) {
-               if (size <= PAGE_SIZE)
-                       newinfo->entries[cpu] = kmalloc_node(size,
-                                                       GFP_KERNEL,
-                                                       cpu_to_node(cpu));
-               else
-                       newinfo->entries[cpu] = vmalloc_node(size,
-                                                            cpu_to_node(cpu));
-
-               if (newinfo->entries[cpu] == NULL) {
-                       free_table_info(newinfo);
-                       return NULL;
-               }
-       }
-
-       return newinfo;
-}
-
 static int do_replace(void __user *user, unsigned int len)
 {
        int ret;
        struct arpt_replace tmp;
        struct arpt_table *t;
-       struct arpt_table_info *newinfo, *oldinfo;
-       struct arpt_counters *counters;
+       struct xt_table_info *newinfo, *oldinfo;
+       struct xt_counters *counters;
        void *loc_cpu_entry, *loc_cpu_old_entry;
 
        if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
@@ -989,11 +807,7 @@ static int do_replace(void __user *user, unsigned int len)
        if (len != sizeof(tmp) + tmp.size)
                return -ENOPROTOOPT;
 
-       /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
-       if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
-               return -ENOMEM;
-
-       newinfo = alloc_table_info(tmp.size);
+       newinfo = xt_alloc_table_info(tmp.size);
        if (!newinfo)
                return -ENOMEM;
 
@@ -1005,7 +819,7 @@ static int do_replace(void __user *user, unsigned int len)
                goto free_newinfo;
        }
 
-       counters = vmalloc(tmp.num_counters * sizeof(struct arpt_counters));
+       counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
        if (!counters) {
                ret = -ENOMEM;
                goto free_newinfo;
@@ -1019,7 +833,7 @@ static int do_replace(void __user *user, unsigned int len)
 
        duprintf("arp_tables: Translated table\n");
 
-       t = try_then_request_module(find_table_lock(tmp.name),
+       t = try_then_request_module(xt_find_table_lock(NF_ARP, tmp.name),
                                    "arptable_%s", tmp.name);
        if (!t || IS_ERR(t)) {
                ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1034,7 +848,7 @@ static int do_replace(void __user *user, unsigned int len)
                goto put_module;
        }
 
-       oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret);
+       oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
        if (!oldinfo)
                goto put_module;
 
@@ -1054,23 +868,23 @@ static int do_replace(void __user *user, unsigned int len)
        loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
        ARPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
 
-       free_table_info(oldinfo);
+       xt_free_table_info(oldinfo);
        if (copy_to_user(tmp.counters, counters,
-                        sizeof(struct arpt_counters) * tmp.num_counters) != 0)
+                        sizeof(struct xt_counters) * tmp.num_counters) != 0)
                ret = -EFAULT;
        vfree(counters);
-       up(&arpt_mutex);
+       xt_table_unlock(t);
        return ret;
 
  put_module:
        module_put(t->me);
-       up(&arpt_mutex);
+       xt_table_unlock(t);
  free_newinfo_counters_untrans:
        ARPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry, NULL);
  free_newinfo_counters:
        vfree(counters);
  free_newinfo:
-       free_table_info(newinfo);
+       xt_free_table_info(newinfo);
        return ret;
 }
 
@@ -1078,7 +892,7 @@ static int do_replace(void __user *user, unsigned int len)
  * and everything is OK.
  */
 static inline int add_counter_to_entry(struct arpt_entry *e,
-                                      const struct arpt_counters addme[],
+                                      const struct xt_counters addme[],
                                       unsigned int *i)
 {
 
@@ -1091,15 +905,16 @@ static inline int add_counter_to_entry(struct arpt_entry *e,
 static int do_add_counters(void __user *user, unsigned int len)
 {
        unsigned int i;
-       struct arpt_counters_info tmp, *paddc;
+       struct xt_counters_info tmp, *paddc;
        struct arpt_table *t;
+       struct xt_table_info *private;
        int ret = 0;
        void *loc_cpu_entry;
 
        if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
                return -EFAULT;
 
-       if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct arpt_counters))
+       if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
                return -EINVAL;
 
        paddc = vmalloc(len);
@@ -1111,29 +926,30 @@ static int do_add_counters(void __user *user, unsigned int len)
                goto free;
        }
 
-       t = find_table_lock(tmp.name);
+       t = xt_find_table_lock(NF_ARP, tmp.name);
        if (!t || IS_ERR(t)) {
                ret = t ? PTR_ERR(t) : -ENOENT;
                goto free;
        }
 
        write_lock_bh(&t->lock);
-       if (t->private->number != paddc->num_counters) {
+       private = t->private;
+       if (private->number != paddc->num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
        }
 
        i = 0;
        /* Choose the copy that is on our node */
-       loc_cpu_entry = t->private->entries[smp_processor_id()];
+       loc_cpu_entry = private->entries[smp_processor_id()];
        ARPT_ENTRY_ITERATE(loc_cpu_entry,
-                          t->private->size,
+                          private->size,
                           add_counter_to_entry,
                           paddc->counters,
                           &i);
  unlock_up_free:
        write_unlock_bh(&t->lock);
-       up(&arpt_mutex);
+       xt_table_unlock(t);
        module_put(t->me);
  free:
        vfree(paddc);
@@ -1190,25 +1006,26 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
                }
                name[ARPT_TABLE_MAXNAMELEN-1] = '\0';
 
-               t = try_then_request_module(find_table_lock(name),
+               t = try_then_request_module(xt_find_table_lock(NF_ARP, name),
                                            "arptable_%s", name);
                if (t && !IS_ERR(t)) {
                        struct arpt_getinfo info;
+                       struct xt_table_info *private = t->private;
 
                        info.valid_hooks = t->valid_hooks;
-                       memcpy(info.hook_entry, t->private->hook_entry,
+                       memcpy(info.hook_entry, private->hook_entry,
                               sizeof(info.hook_entry));
-                       memcpy(info.underflow, t->private->underflow,
+                       memcpy(info.underflow, private->underflow,
                               sizeof(info.underflow));
-                       info.num_entries = t->private->number;
-                       info.size = t->private->size;
+                       info.num_entries = private->number;
+                       info.size = private->size;
                        strcpy(info.name, name);
 
                        if (copy_to_user(user, &info, *len) != 0)
                                ret = -EFAULT;
                        else
                                ret = 0;
-                       up(&arpt_mutex);
+                       xt_table_unlock(t);
                        module_put(t->me);
                } else
                        ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1233,7 +1050,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
        }
 
        case ARPT_SO_GET_REVISION_TARGET: {
-               struct arpt_get_revision rev;
+               struct xt_get_revision rev;
 
                if (*len != sizeof(rev)) {
                        ret = -EINVAL;
@@ -1244,8 +1061,8 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
                        break;
                }
 
-               try_then_request_module(find_revision(rev.name, rev.revision,
-                                                     target_revfn, &ret),
+               try_then_request_module(xt_find_revision(NF_ARP, rev.name,
+                                                        rev.revision, 1, &ret),
                                        "arpt_%s", rev.name);
                break;
        }
@@ -1258,38 +1075,16 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
        return ret;
 }
 
-/* Registration hooks for targets. */
-int arpt_register_target(struct arpt_target *target)
-{
-       int ret;
-
-       ret = down_interruptible(&arpt_mutex);
-       if (ret != 0)
-               return ret;
-
-       list_add(&target->list, &arpt_target);
-       up(&arpt_mutex);
-
-       return ret;
-}
-
-void arpt_unregister_target(struct arpt_target *target)
-{
-       down(&arpt_mutex);
-       LIST_DELETE(&arpt_target, target);
-       up(&arpt_mutex);
-}
-
 int arpt_register_table(struct arpt_table *table,
                        const struct arpt_replace *repl)
 {
        int ret;
-       struct arpt_table_info *newinfo;
-       static struct arpt_table_info bootstrap
+       struct xt_table_info *newinfo;
+       static struct xt_table_info bootstrap
                = { 0, 0, 0, { 0 }, { 0 }, { } };
        void *loc_cpu_entry;
 
-       newinfo = alloc_table_info(repl->size);
+       newinfo = xt_alloc_table_info(repl->size);
        if (!newinfo) {
                ret = -ENOMEM;
                return ret;
@@ -1304,60 +1099,33 @@ int arpt_register_table(struct arpt_table *table,
                              repl->num_entries,
                              repl->hook_entry,
                              repl->underflow);
+
        duprintf("arpt_register_table: translate table gives %d\n", ret);
        if (ret != 0) {
-               free_table_info(newinfo);
+               xt_free_table_info(newinfo);
                return ret;
        }
 
-       ret = down_interruptible(&arpt_mutex);
-       if (ret != 0) {
-               free_table_info(newinfo);
+       if (xt_register_table(table, &bootstrap, newinfo) != 0) {
+               xt_free_table_info(newinfo);
                return ret;
        }
 
-       /* Don't autoload: we'd eat our tail... */
-       if (list_named_find(&arpt_tables, table->name)) {
-               ret = -EEXIST;
-               goto free_unlock;
-       }
-
-       /* Simplifies replace_table code. */
-       table->private = &bootstrap;
-       if (!replace_table(table, 0, newinfo, &ret))
-               goto free_unlock;
-
-       duprintf("table->private->number = %u\n",
-                table->private->number);
-       
-       /* save number of initial entries */
-       table->private->initial_entries = table->private->number;
-
-       rwlock_init(&table->lock);
-       list_prepend(&arpt_tables, table);
-
- unlock:
-       up(&arpt_mutex);
-       return ret;
-
- free_unlock:
-       free_table_info(newinfo);
-       goto unlock;
+       return 0;
 }
 
 void arpt_unregister_table(struct arpt_table *table)
 {
+       struct xt_table_info *private;
        void *loc_cpu_entry;
 
-       down(&arpt_mutex);
-       LIST_DELETE(&arpt_tables, table);
-       up(&arpt_mutex);
+       private = xt_unregister_table(table);
 
        /* Decrease module usage counts and free resources */
-       loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
-       ARPT_ENTRY_ITERATE(loc_cpu_entry, table->private->size,
+       loc_cpu_entry = private->entries[raw_smp_processor_id()];
+       ARPT_ENTRY_ITERATE(loc_cpu_entry, private->size,
                           cleanup_entry, NULL);
-       free_table_info(table->private);
+       xt_free_table_info(private);
 }
 
 /* The built-in targets: standard (NULL) and error. */
@@ -1380,52 +1148,15 @@ static struct nf_sockopt_ops arpt_sockopts = {
        .get            = do_arpt_get_ctl,
 };
 
-#ifdef CONFIG_PROC_FS
-static inline int print_name(const struct arpt_table *t,
-                            off_t start_offset, char *buffer, int length,
-                            off_t *pos, unsigned int *count)
-{
-       if ((*count)++ >= start_offset) {
-               unsigned int namelen;
-
-               namelen = sprintf(buffer + *pos, "%s\n", t->name);
-               if (*pos + namelen > length) {
-                       /* Stop iterating */
-                       return 1;
-               }
-               *pos += namelen;
-       }
-       return 0;
-}
-
-static int arpt_get_tables(char *buffer, char **start, off_t offset, int length)
-{
-       off_t pos = 0;
-       unsigned int count = 0;
-
-       if (down_interruptible(&arpt_mutex) != 0)
-               return 0;
-
-       LIST_FIND(&arpt_tables, print_name, struct arpt_table *,
-                 offset, buffer, length, &pos, &count);
-
-       up(&arpt_mutex);
-
-       /* `start' hack - see fs/proc/generic.c line ~105 */
-       *start=(char *)((unsigned long)count-offset);
-       return pos;
-}
-#endif /*CONFIG_PROC_FS*/
-
 static int __init init(void)
 {
        int ret;
 
+       xt_proto_init(NF_ARP);
+
        /* Noone else will be downing sem now, so we won't sleep */
-       down(&arpt_mutex);
-       list_append(&arpt_target, &arpt_standard_target);
-       list_append(&arpt_target, &arpt_error_target);
-       up(&arpt_mutex);
+       xt_register_target(NF_ARP, &arpt_standard_target);
+       xt_register_target(NF_ARP, &arpt_error_target);
 
        /* Register setsockopt */
        ret = nf_register_sockopt(&arpt_sockopts);
@@ -1434,19 +1165,6 @@ static int __init init(void)
                return ret;
        }
 
-#ifdef CONFIG_PROC_FS
-       {
-               struct proc_dir_entry *proc;
-
-               proc = proc_net_create("arp_tables_names", 0, arpt_get_tables);
-               if (!proc) {
-                       nf_unregister_sockopt(&arpt_sockopts);
-                       return -ENOMEM;
-               }
-               proc->owner = THIS_MODULE;
-       }
-#endif
-
        printk("arp_tables: (C) 2002 David S. Miller\n");
        return 0;
 }
@@ -1454,16 +1172,12 @@ static int __init init(void)
 static void __exit fini(void)
 {
        nf_unregister_sockopt(&arpt_sockopts);
-#ifdef CONFIG_PROC_FS
-       proc_net_remove("arp_tables_names");
-#endif
+       xt_proto_fini(NF_ARP);
 }
 
 EXPORT_SYMBOL(arpt_register_table);
 EXPORT_SYMBOL(arpt_unregister_table);
 EXPORT_SYMBOL(arpt_do_table);
-EXPORT_SYMBOL(arpt_register_target);
-EXPORT_SYMBOL(arpt_unregister_target);
 
 module_init(init);
 module_exit(fini);
index 3e592ec864826b1aef8aa2997d3410fb54d3af88..c97650a16a5b61607b1c0f7463fb43deb2ba3b79 100644 (file)
@@ -8,8 +8,9 @@ MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
 MODULE_DESCRIPTION("arptables arp payload mangle target");
 
 static unsigned int
-target(struct sk_buff **pskb, unsigned int hooknum, const struct net_device *in,
-   const struct net_device *out, const void *targinfo, void *userinfo)
+target(struct sk_buff **pskb, const struct net_device *in,
+   const struct net_device *out, unsigned int hooknum, const void *targinfo,
+   void *userinfo)
 {
        const struct arpt_mangle *mangle = targinfo;
        struct arphdr *arp;
@@ -64,7 +65,7 @@ target(struct sk_buff **pskb, unsigned int hooknum, const struct net_device *in,
 }
 
 static int
-checkentry(const char *tablename, const struct arpt_entry *e, void *targinfo,
+checkentry(const char *tablename, const void *e, void *targinfo,
    unsigned int targinfosize, unsigned int hook_mask)
 {
        const struct arpt_mangle *mangle = targinfo;
index 0d759f5a4ef0deb605f5463399e0ed773a43ab94..f6ab45f48681dca680d4fddfea7695907fc406cf 100644 (file)
@@ -145,6 +145,7 @@ static struct arpt_table packet_filter = {
        .lock           = RW_LOCK_UNLOCKED,
        .private        = NULL,
        .me             = THIS_MODULE,
+       .af             = NF_ARP,
 };
 
 /* The work comes in here from netfilter.c */
index 9dec1293f67aac7b4ee33156d13af9f3a08e6f5d..833fcb4be5e7df4d3519989c3ec8d3260b7e7a55 100644 (file)
@@ -944,7 +944,7 @@ module_exit(fini);
 
 /* Some modules need us, but don't depend directly on any symbol.
    They should call this. */
-void need_ip_conntrack(void)
+void need_conntrack(void)
 {
 }
 
@@ -962,7 +962,7 @@ EXPORT_SYMBOL(ip_ct_get_tuple);
 EXPORT_SYMBOL(invert_tuplepr);
 EXPORT_SYMBOL(ip_conntrack_alter_reply);
 EXPORT_SYMBOL(ip_conntrack_destroyed);
-EXPORT_SYMBOL(need_ip_conntrack);
+EXPORT_SYMBOL(need_conntrack);
 EXPORT_SYMBOL(ip_conntrack_helper_register);
 EXPORT_SYMBOL(ip_conntrack_helper_unregister);
 EXPORT_SYMBOL(ip_ct_iterate_cleanup);
index cb66b8bddeb3d63eb2ef23ee359a2491c6511c63..1de86282d23249c4181f92c80106c1dafcb81f8f 100644 (file)
@@ -95,6 +95,7 @@ static struct ipt_table nat_table = {
        .valid_hooks    = NAT_VALID_HOOKS,
        .lock           = RW_LOCK_UNLOCKED,
        .me             = THIS_MODULE,
+       .af             = AF_INET,
 };
 
 /* Source NAT */
@@ -168,7 +169,7 @@ static unsigned int ipt_dnat_target(struct sk_buff **pskb,
 }
 
 static int ipt_snat_checkentry(const char *tablename,
-                              const struct ipt_entry *e,
+                              const void *entry,
                               void *targinfo,
                               unsigned int targinfosize,
                               unsigned int hook_mask)
@@ -201,7 +202,7 @@ static int ipt_snat_checkentry(const char *tablename,
 }
 
 static int ipt_dnat_checkentry(const char *tablename,
-                              const struct ipt_entry *e,
+                              const void *entry,
                               void *targinfo,
                               unsigned int targinfosize,
                               unsigned int hook_mask)
index 8b8a1f00bbf4407e2a8f09d17831dae33fcd656f..ad438fb185b8943dfafde63c13fcbb405a5caf46 100644 (file)
@@ -364,7 +364,7 @@ static int init_or_cleanup(int init)
 {
        int ret = 0;
 
-       need_ip_conntrack();
+       need_conntrack();
 
        if (!init) goto cleanup;
 
index 877bc96d333684fae54d3af394d2e13aaf73d598..2371b2062c2d812468ad62f4fe19d4360e748f41 100644 (file)
@@ -2,7 +2,7 @@
  * Packet matching code.
  *
  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
- * Copyright (C) 2000-2004 Netfilter Core Team <coreteam@netfilter.org>
+ * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -11,6 +11,8 @@
  * 19 Jan 2002 Harald Welte <laforge@gnumonks.org>
  *     - increase module usage count as soon as we have rules inside
  *       a table
+ * 08 Oct 2005 Harald Welte <lafore@netfilter.org>
+ *     - Generalize into "x_tables" layer and "{ip,ip6,arp}_tables"
  */
 #include <linux/config.h>
 #include <linux/cache.h>
@@ -20,8 +22,6 @@
 #include <linux/vmalloc.h>
 #include <linux/netdevice.h>
 #include <linux/module.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
 #include <linux/icmp.h>
 #include <net/ip.h>
 #include <asm/uaccess.h>
@@ -30,6 +30,7 @@
 #include <linux/err.h>
 #include <linux/cpumask.h>
 
+#include <linux/netfilter/x_tables.h>
 #include <linux/netfilter_ipv4/ip_tables.h>
 
 MODULE_LICENSE("GPL");
@@ -62,14 +63,6 @@ do {                                                         \
 #else
 #define IP_NF_ASSERT(x)
 #endif
-#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
-
-static DECLARE_MUTEX(ipt_mutex);
-
-/* Must have mutex */
-#define ASSERT_READ_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
-#define ASSERT_WRITE_LOCK(x) IP_NF_ASSERT(down_trylock(&ipt_mutex) != 0)
-#include <linux/netfilter_ipv4/listhelp.h>
 
 #if 0
 /* All the better to debug you with... */
@@ -86,36 +79,6 @@ static DECLARE_MUTEX(ipt_mutex);
 
    Hence the start of any table is given by get_table() below.  */
 
-/* The table itself */
-struct ipt_table_info
-{
-       /* Size per table */
-       unsigned int size;
-       /* Number of entries: FIXME. --RR */
-       unsigned int number;
-       /* Initial number of entries. Needed for module usage count */
-       unsigned int initial_entries;
-
-       /* Entry points and underflows */
-       unsigned int hook_entry[NF_IP_NUMHOOKS];
-       unsigned int underflow[NF_IP_NUMHOOKS];
-
-       /* ipt_entry tables: one per CPU */
-       void *entries[NR_CPUS];
-};
-
-static LIST_HEAD(ipt_target);
-static LIST_HEAD(ipt_match);
-static LIST_HEAD(ipt_tables);
-#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
-#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
-
-#if 0
-#define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
-#define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
-#define up(x) do { printk("UP:%u:" #x "\n", __LINE__); up(x); } while(0)
-#endif
-
 /* Returns whether matches rule or not. */
 static inline int
 ip_packet_match(const struct iphdr *ip,
@@ -234,7 +197,8 @@ int do_match(struct ipt_entry_match *m,
             int *hotdrop)
 {
        /* Stop iteration if it doesn't match */
-       if (!m->u.kernel.match->match(skb, in, out, m->data, offset, hotdrop))
+       if (!m->u.kernel.match->match(skb, in, out, m->data, offset, 
+           skb->nh.iph->ihl*4, hotdrop))
                return 1;
        else
                return 0;
@@ -265,6 +229,7 @@ ipt_do_table(struct sk_buff **pskb,
        const char *indev, *outdev;
        void *table_base;
        struct ipt_entry *e, *back;
+       struct xt_table_info *private = table->private;
 
        /* Initialization */
        ip = (*pskb)->nh.iph;
@@ -281,24 +246,11 @@ ipt_do_table(struct sk_buff **pskb,
 
        read_lock_bh(&table->lock);
        IP_NF_ASSERT(table->valid_hooks & (1 << hook));
-       table_base = (void *)table->private->entries[smp_processor_id()];
-       e = get_entry(table_base, table->private->hook_entry[hook]);
-
-#ifdef CONFIG_NETFILTER_DEBUG
-       /* Check noone else using our table */
-       if (((struct ipt_entry *)table_base)->comefrom != 0xdead57ac
-           && ((struct ipt_entry *)table_base)->comefrom != 0xeeeeeeec) {
-               printk("ASSERT: CPU #%u, %s comefrom(%p) = %X\n",
-                      smp_processor_id(),
-                      table->name,
-                      &((struct ipt_entry *)table_base)->comefrom,
-                      ((struct ipt_entry *)table_base)->comefrom);
-       }
-       ((struct ipt_entry *)table_base)->comefrom = 0x57acc001;
-#endif
+       table_base = (void *)private->entries[smp_processor_id()];
+       e = get_entry(table_base, private->hook_entry[hook]);
 
        /* For return from builtin chain */
-       back = get_entry(table_base, table->private->underflow[hook]);
+       back = get_entry(table_base, private->underflow[hook]);
 
        do {
                IP_NF_ASSERT(e);
@@ -384,9 +336,6 @@ ipt_do_table(struct sk_buff **pskb,
                }
        } while (!hotdrop);
 
-#ifdef CONFIG_NETFILTER_DEBUG
-       ((struct ipt_entry *)table_base)->comefrom = 0xdead57ac;
-#endif
        read_unlock_bh(&table->lock);
 
 #ifdef DEBUG_ALLOW_ALL
@@ -398,145 +347,6 @@ ipt_do_table(struct sk_buff **pskb,
 #endif
 }
 
-/*
- * These are weird, but module loading must not be done with mutex
- * held (since they will register), and we have to have a single
- * function to use try_then_request_module().
- */
-
-/* Find table by name, grabs mutex & ref.  Returns ERR_PTR() on error. */
-static inline struct ipt_table *find_table_lock(const char *name)
-{
-       struct ipt_table *t;
-
-       if (down_interruptible(&ipt_mutex) != 0)
-               return ERR_PTR(-EINTR);
-
-       list_for_each_entry(t, &ipt_tables, list)
-               if (strcmp(t->name, name) == 0 && try_module_get(t->me))
-                       return t;
-       up(&ipt_mutex);
-       return NULL;
-}
-
-/* Find match, grabs ref.  Returns ERR_PTR() on error. */
-static inline struct ipt_match *find_match(const char *name, u8 revision)
-{
-       struct ipt_match *m;
-       int err = 0;
-
-       if (down_interruptible(&ipt_mutex) != 0)
-               return ERR_PTR(-EINTR);
-
-       list_for_each_entry(m, &ipt_match, list) {
-               if (strcmp(m->name, name) == 0) {
-                       if (m->revision == revision) {
-                               if (try_module_get(m->me)) {
-                                       up(&ipt_mutex);
-                                       return m;
-                               }
-                       } else
-                               err = -EPROTOTYPE; /* Found something. */
-               }
-       }
-       up(&ipt_mutex);
-       return ERR_PTR(err);
-}
-
-/* Find target, grabs ref.  Returns ERR_PTR() on error. */
-static inline struct ipt_target *find_target(const char *name, u8 revision)
-{
-       struct ipt_target *t;
-       int err = 0;
-
-       if (down_interruptible(&ipt_mutex) != 0)
-               return ERR_PTR(-EINTR);
-
-       list_for_each_entry(t, &ipt_target, list) {
-               if (strcmp(t->name, name) == 0) {
-                       if (t->revision == revision) {
-                               if (try_module_get(t->me)) {
-                                       up(&ipt_mutex);
-                                       return t;
-                               }
-                       } else
-                               err = -EPROTOTYPE; /* Found something. */
-               }
-       }
-       up(&ipt_mutex);
-       return ERR_PTR(err);
-}
-
-struct ipt_target *ipt_find_target(const char *name, u8 revision)
-{
-       struct ipt_target *target;
-
-       target = try_then_request_module(find_target(name, revision),
-                                        "ipt_%s", name);
-       if (IS_ERR(target) || !target)
-               return NULL;
-       return target;
-}
-
-static int match_revfn(const char *name, u8 revision, int *bestp)
-{
-       struct ipt_match *m;
-       int have_rev = 0;
-
-       list_for_each_entry(m, &ipt_match, list) {
-               if (strcmp(m->name, name) == 0) {
-                       if (m->revision > *bestp)
-                               *bestp = m->revision;
-                       if (m->revision == revision)
-                               have_rev = 1;
-               }
-       }
-       return have_rev;
-}
-
-static int target_revfn(const char *name, u8 revision, int *bestp)
-{
-       struct ipt_target *t;
-       int have_rev = 0;
-
-       list_for_each_entry(t, &ipt_target, list) {
-               if (strcmp(t->name, name) == 0) {
-                       if (t->revision > *bestp)
-                               *bestp = t->revision;
-                       if (t->revision == revision)
-                               have_rev = 1;
-               }
-       }
-       return have_rev;
-}
-
-/* Returns true or false (if no such extension at all) */
-static inline int find_revision(const char *name, u8 revision,
-                               int (*revfn)(const char *, u8, int *),
-                               int *err)
-{
-       int have_rev, best = -1;
-
-       if (down_interruptible(&ipt_mutex) != 0) {
-               *err = -EINTR;
-               return 1;
-       }
-       have_rev = revfn(name, revision, &best);
-       up(&ipt_mutex);
-
-       /* Nothing at all?  Return 0 to try loading module. */
-       if (best == -1) {
-               *err = -ENOENT;
-               return 0;
-       }
-
-       *err = best;
-       if (!have_rev)
-               *err = -EPROTONOSUPPORT;
-       return 1;
-}
-
-
 /* All zeroes == unconditional rule. */
 static inline int
 unconditional(const struct ipt_ip *ip)
@@ -553,7 +363,7 @@ unconditional(const struct ipt_ip *ip)
 /* Figures out from what hook each rule can be called: returns 0 if
    there are loops.  Puts hook bitmask in comefrom. */
 static int
-mark_source_chains(struct ipt_table_info *newinfo,
+mark_source_chains(struct xt_table_info *newinfo,
                   unsigned int valid_hooks, void *entry0)
 {
        unsigned int hook;
@@ -699,7 +509,7 @@ check_match(struct ipt_entry_match *m,
 {
        struct ipt_match *match;
 
-       match = try_then_request_module(find_match(m->u.user.name,
+       match = try_then_request_module(xt_find_match(AF_INET, m->u.user.name,
                                                   m->u.user.revision),
                                        "ipt_%s", m->u.user.name);
        if (IS_ERR(match) || !match) {
@@ -744,7 +554,8 @@ check_entry(struct ipt_entry *e, const char *name, unsigned int size,
                goto cleanup_matches;
 
        t = ipt_get_target(e);
-       target = try_then_request_module(find_target(t->u.user.name,
+       target = try_then_request_module(xt_find_target(AF_INET,
+                                                    t->u.user.name,
                                                     t->u.user.revision),
                                         "ipt_%s", t->u.user.name);
        if (IS_ERR(target) || !target) {
@@ -781,7 +592,7 @@ check_entry(struct ipt_entry *e, const char *name, unsigned int size,
 
 static inline int
 check_entry_size_and_hooks(struct ipt_entry *e,
-                          struct ipt_table_info *newinfo,
+                          struct xt_table_info *newinfo,
                           unsigned char *base,
                           unsigned char *limit,
                           const unsigned int *hook_entries,
@@ -815,7 +626,7 @@ check_entry_size_and_hooks(struct ipt_entry *e,
            < 0 (not IPT_RETURN). --RR */
 
        /* Clear counters and comefrom */
-       e->counters = ((struct ipt_counters) { 0, 0 });
+       e->counters = ((struct xt_counters) { 0, 0 });
        e->comefrom = 0;
 
        (*i)++;
@@ -845,7 +656,7 @@ cleanup_entry(struct ipt_entry *e, unsigned int *i)
 static int
 translate_table(const char *name,
                unsigned int valid_hooks,
-               struct ipt_table_info *newinfo,
+               struct xt_table_info *newinfo,
                void *entry0,
                unsigned int size,
                unsigned int number,
@@ -922,48 +733,10 @@ translate_table(const char *name,
        return ret;
 }
 
-static struct ipt_table_info *
-replace_table(struct ipt_table *table,
-             unsigned int num_counters,
-             struct ipt_table_info *newinfo,
-             int *error)
-{
-       struct ipt_table_info *oldinfo;
-
-#ifdef CONFIG_NETFILTER_DEBUG
-       {
-               int cpu;
-
-               for_each_cpu(cpu) {
-                       struct ipt_entry *table_base = newinfo->entries[cpu];
-                       if (table_base)
-                               table_base->comefrom = 0xdead57ac;
-               }
-       }
-#endif
-
-       /* Do the substitution. */
-       write_lock_bh(&table->lock);
-       /* Check inside lock: is the old number correct? */
-       if (num_counters != table->private->number) {
-               duprintf("num_counters != table->private->number (%u/%u)\n",
-                        num_counters, table->private->number);
-               write_unlock_bh(&table->lock);
-               *error = -EAGAIN;
-               return NULL;
-       }
-       oldinfo = table->private;
-       table->private = newinfo;
-       newinfo->initial_entries = oldinfo->initial_entries;
-       write_unlock_bh(&table->lock);
-
-       return oldinfo;
-}
-
 /* Gets counters. */
 static inline int
 add_entry_to_counter(const struct ipt_entry *e,
-                    struct ipt_counters total[],
+                    struct xt_counters total[],
                     unsigned int *i)
 {
        ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
@@ -984,8 +757,8 @@ set_entry_to_counter(const struct ipt_entry *e,
 }
 
 static void
-get_counters(const struct ipt_table_info *t,
-            struct ipt_counters counters[])
+get_counters(const struct xt_table_info *t,
+            struct xt_counters counters[])
 {
        unsigned int cpu;
        unsigned int i;
@@ -1024,14 +797,15 @@ copy_entries_to_user(unsigned int total_size,
 {
        unsigned int off, num, countersize;
        struct ipt_entry *e;
-       struct ipt_counters *counters;
+       struct xt_counters *counters;
+       struct xt_table_info *private = table->private;
        int ret = 0;
        void *loc_cpu_entry;
 
        /* We need atomic snapshot of counters: rest doesn't change
           (other than comefrom, which userspace doesn't care
           about). */
-       countersize = sizeof(struct ipt_counters) * table->private->number;
+       countersize = sizeof(struct xt_counters) * private->number;
        counters = vmalloc_node(countersize, numa_node_id());
 
        if (counters == NULL)
@@ -1039,14 +813,14 @@ copy_entries_to_user(unsigned int total_size,
 
        /* First, sum counters... */
        write_lock_bh(&table->lock);
-       get_counters(table->private, counters);
+       get_counters(private, counters);
        write_unlock_bh(&table->lock);
 
        /* choose the copy that is on our node/cpu, ...
         * This choice is lazy (because current thread is
         * allowed to migrate to another cpu)
         */
-       loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
+       loc_cpu_entry = private->entries[raw_smp_processor_id()];
        /* ... then copy entire thing ... */
        if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
                ret = -EFAULT;
@@ -1108,74 +882,36 @@ get_entries(const struct ipt_get_entries *entries,
        int ret;
        struct ipt_table *t;
 
-       t = find_table_lock(entries->name);
+       t = xt_find_table_lock(AF_INET, entries->name);
        if (t && !IS_ERR(t)) {
+               struct xt_table_info *private = t->private;
                duprintf("t->private->number = %u\n",
-                        t->private->number);
-               if (entries->size == t->private->size)
-                       ret = copy_entries_to_user(t->private->size,
+                        private->number);
+               if (entries->size == private->size)
+                       ret = copy_entries_to_user(private->size,
                                                   t, uptr->entrytable);
                else {
                        duprintf("get_entries: I've got %u not %u!\n",
-                                t->private->size,
+                                private->size,
                                 entries->size);
                        ret = -EINVAL;
                }
                module_put(t->me);
-               up(&ipt_mutex);
+               xt_table_unlock(t);
        } else
                ret = t ? PTR_ERR(t) : -ENOENT;
 
        return ret;
 }
 
-static void free_table_info(struct ipt_table_info *info)
-{
-       int cpu;
-       for_each_cpu(cpu) {
-               if (info->size <= PAGE_SIZE)
-                       kfree(info->entries[cpu]);
-               else
-                       vfree(info->entries[cpu]);
-       }
-       kfree(info);
-}
-
-static struct ipt_table_info *alloc_table_info(unsigned int size)
-{
-       struct ipt_table_info *newinfo;
-       int cpu;
-
-       newinfo = kzalloc(sizeof(struct ipt_table_info), GFP_KERNEL);
-       if (!newinfo)
-               return NULL;
-
-       newinfo->size = size;
-
-       for_each_cpu(cpu) {
-               if (size <= PAGE_SIZE)
-                       newinfo->entries[cpu] = kmalloc_node(size,
-                               GFP_KERNEL,
-                               cpu_to_node(cpu));
-               else
-                       newinfo->entries[cpu] = vmalloc_node(size, cpu_to_node(cpu));
-               if (newinfo->entries[cpu] == 0) {
-                       free_table_info(newinfo);
-                       return NULL;
-               }
-       }
-
-       return newinfo;
-}
-
 static int
 do_replace(void __user *user, unsigned int len)
 {
        int ret;
        struct ipt_replace tmp;
        struct ipt_table *t;
-       struct ipt_table_info *newinfo, *oldinfo;
-       struct ipt_counters *counters;
+       struct xt_table_info *newinfo, *oldinfo;
+       struct xt_counters *counters;
        void *loc_cpu_entry, *loc_cpu_old_entry;
 
        if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
@@ -1185,11 +921,7 @@ do_replace(void __user *user, unsigned int len)
        if (len != sizeof(tmp) + tmp.size)
                return -ENOPROTOOPT;
 
-       /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
-       if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
-               return -ENOMEM;
-
-       newinfo = alloc_table_info(tmp.size);
+       newinfo = xt_alloc_table_info(tmp.size);
        if (!newinfo)
                return -ENOMEM;
 
@@ -1201,7 +933,7 @@ do_replace(void __user *user, unsigned int len)
                goto free_newinfo;
        }
 
-       counters = vmalloc(tmp.num_counters * sizeof(struct ipt_counters));
+       counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
        if (!counters) {
                ret = -ENOMEM;
                goto free_newinfo;
@@ -1215,7 +947,7 @@ do_replace(void __user *user, unsigned int len)
 
        duprintf("ip_tables: Translated table\n");
 
-       t = try_then_request_module(find_table_lock(tmp.name),
+       t = try_then_request_module(xt_find_table_lock(AF_INET, tmp.name),
                                    "iptable_%s", tmp.name);
        if (!t || IS_ERR(t)) {
                ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1230,7 +962,7 @@ do_replace(void __user *user, unsigned int len)
                goto put_module;
        }
 
-       oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret);
+       oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
        if (!oldinfo)
                goto put_module;
 
@@ -1249,23 +981,23 @@ do_replace(void __user *user, unsigned int len)
        /* Decrease module usage counts and free resource */
        loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
        IPT_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
-       free_table_info(oldinfo);
+       xt_free_table_info(oldinfo);
        if (copy_to_user(tmp.counters, counters,
-                        sizeof(struct ipt_counters) * tmp.num_counters) != 0)
+                        sizeof(struct xt_counters) * tmp.num_counters) != 0)
                ret = -EFAULT;
        vfree(counters);
-       up(&ipt_mutex);
+       xt_table_unlock(t);
        return ret;
 
  put_module:
        module_put(t->me);
-       up(&ipt_mutex);
+       xt_table_unlock(t);
  free_newinfo_counters_untrans:
        IPT_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
  free_newinfo_counters:
        vfree(counters);
  free_newinfo:
-       free_table_info(newinfo);
+       xt_free_table_info(newinfo);
        return ret;
 }
 
@@ -1273,7 +1005,7 @@ do_replace(void __user *user, unsigned int len)
  * and everything is OK. */
 static inline int
 add_counter_to_entry(struct ipt_entry *e,
-                    const struct ipt_counters addme[],
+                    const struct xt_counters addme[],
                     unsigned int *i)
 {
 #if 0
@@ -1295,15 +1027,16 @@ static int
 do_add_counters(void __user *user, unsigned int len)
 {
        unsigned int i;
-       struct ipt_counters_info tmp, *paddc;
+       struct xt_counters_info tmp, *paddc;
        struct ipt_table *t;
+       struct xt_table_info *private;
        int ret = 0;
        void *loc_cpu_entry;
 
        if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
                return -EFAULT;
 
-       if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct ipt_counters))
+       if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
                return -EINVAL;
 
        paddc = vmalloc_node(len, numa_node_id());
@@ -1315,29 +1048,30 @@ do_add_counters(void __user *user, unsigned int len)
                goto free;
        }
 
-       t = find_table_lock(tmp.name);
+       t = xt_find_table_lock(AF_INET, tmp.name);
        if (!t || IS_ERR(t)) {
                ret = t ? PTR_ERR(t) : -ENOENT;
                goto free;
        }
 
        write_lock_bh(&t->lock);
-       if (t->private->number != paddc->num_counters) {
+       private = t->private;
+       if (private->number != paddc->num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
        }
 
        i = 0;
        /* Choose the copy that is on our node */
-       loc_cpu_entry = t->private->entries[raw_smp_processor_id()];
+       loc_cpu_entry = private->entries[raw_smp_processor_id()];
        IPT_ENTRY_ITERATE(loc_cpu_entry,
-                         t->private->size,
+                         private->size,
                          add_counter_to_entry,
                          paddc->counters,
                          &i);
  unlock_up_free:
        write_unlock_bh(&t->lock);
-       up(&ipt_mutex);
+       xt_table_unlock(t);
        module_put(t->me);
  free:
        vfree(paddc);
@@ -1396,25 +1130,26 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                }
                name[IPT_TABLE_MAXNAMELEN-1] = '\0';
 
-               t = try_then_request_module(find_table_lock(name),
+               t = try_then_request_module(xt_find_table_lock(AF_INET, name),
                                            "iptable_%s", name);
                if (t && !IS_ERR(t)) {
                        struct ipt_getinfo info;
+                       struct xt_table_info *private = t->private;
 
                        info.valid_hooks = t->valid_hooks;
-                       memcpy(info.hook_entry, t->private->hook_entry,
+                       memcpy(info.hook_entry, private->hook_entry,
                               sizeof(info.hook_entry));
-                       memcpy(info.underflow, t->private->underflow,
+                       memcpy(info.underflow, private->underflow,
                               sizeof(info.underflow));
-                       info.num_entries = t->private->number;
-                       info.size = t->private->size;
+                       info.num_entries = private->number;
+                       info.size = private->size;
                        memcpy(info.name, name, sizeof(info.name));
 
                        if (copy_to_user(user, &info, *len) != 0)
                                ret = -EFAULT;
                        else
                                ret = 0;
-                       up(&ipt_mutex);
+                       xt_table_unlock(t);
                        module_put(t->me);
                } else
                        ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1441,7 +1176,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        case IPT_SO_GET_REVISION_MATCH:
        case IPT_SO_GET_REVISION_TARGET: {
                struct ipt_get_revision rev;
-               int (*revfn)(const char *, u8, int *);
+               int target;
 
                if (*len != sizeof(rev)) {
                        ret = -EINVAL;
@@ -1453,12 +1188,13 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                }
 
                if (cmd == IPT_SO_GET_REVISION_TARGET)
-                       revfn = target_revfn;
+                       target = 1;
                else
-                       revfn = match_revfn;
+                       target = 0;
 
-               try_then_request_module(find_revision(rev.name, rev.revision,
-                                                     revfn, &ret),
+               try_then_request_module(xt_find_revision(AF_INET, rev.name,
+                                                        rev.revision,
+                                                        target, &ret),
                                        "ipt_%s", rev.name);
                break;
        }
@@ -1471,60 +1207,15 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        return ret;
 }
 
-/* Registration hooks for targets. */
-int
-ipt_register_target(struct ipt_target *target)
+int ipt_register_table(struct xt_table *table, const struct ipt_replace *repl)
 {
        int ret;
-
-       ret = down_interruptible(&ipt_mutex);
-       if (ret != 0)
-               return ret;
-       list_add(&target->list, &ipt_target);
-       up(&ipt_mutex);
-       return ret;
-}
-
-void
-ipt_unregister_target(struct ipt_target *target)
-{
-       down(&ipt_mutex);
-       LIST_DELETE(&ipt_target, target);
-       up(&ipt_mutex);
-}
-
-int
-ipt_register_match(struct ipt_match *match)
-{
-       int ret;
-
-       ret = down_interruptible(&ipt_mutex);
-       if (ret != 0)
-               return ret;
-
-       list_add(&match->list, &ipt_match);
-       up(&ipt_mutex);
-
-       return ret;
-}
-
-void
-ipt_unregister_match(struct ipt_match *match)
-{
-       down(&ipt_mutex);
-       LIST_DELETE(&ipt_match, match);
-       up(&ipt_mutex);
-}
-
-int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl)
-{
-       int ret;
-       struct ipt_table_info *newinfo;
-       static struct ipt_table_info bootstrap
+       struct xt_table_info *newinfo;
+       static struct xt_table_info bootstrap
                = { 0, 0, 0, { 0 }, { 0 }, { } };
        void *loc_cpu_entry;
 
-       newinfo = alloc_table_info(repl->size);
+       newinfo = xt_alloc_table_info(repl->size);
        if (!newinfo)
                return -ENOMEM;
 
@@ -1540,246 +1231,29 @@ int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl)
                              repl->hook_entry,
                              repl->underflow);
        if (ret != 0) {
-               free_table_info(newinfo);
+               xt_free_table_info(newinfo);
                return ret;
        }
 
-       ret = down_interruptible(&ipt_mutex);
-       if (ret != 0) {
-               free_table_info(newinfo);
+       if (xt_register_table(table, &bootstrap, newinfo) != 0) {
+               xt_free_table_info(newinfo);
                return ret;
        }
 
-       /* Don't autoload: we'd eat our tail... */
-       if (list_named_find(&ipt_tables, table->name)) {
-               ret = -EEXIST;
-               goto free_unlock;
-       }
-
-       /* Simplifies replace_table code. */
-       table->private = &bootstrap;
-       if (!replace_table(table, 0, newinfo, &ret))
-               goto free_unlock;
-
-       duprintf("table->private->number = %u\n",
-                table->private->number);
-       
-       /* save number of initial entries */
-       table->private->initial_entries = table->private->number;
-
-       rwlock_init(&table->lock);
-       list_prepend(&ipt_tables, table);
-
- unlock:
-       up(&ipt_mutex);
-       return ret;
-
- free_unlock:
-       free_table_info(newinfo);
-       goto unlock;
+       return 0;
 }
 
 void ipt_unregister_table(struct ipt_table *table)
 {
+       struct xt_table_info *private;
        void *loc_cpu_entry;
 
-       down(&ipt_mutex);
-       LIST_DELETE(&ipt_tables, table);
-       up(&ipt_mutex);
+       private = xt_unregister_table(table);
 
        /* Decrease module usage counts and free resources */
-       loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
-       IPT_ENTRY_ITERATE(loc_cpu_entry, table->private->size,
-                         cleanup_entry, NULL);
-       free_table_info(table->private);
-}
-
-/* Returns 1 if the port is matched by the range, 0 otherwise */
-static inline int
-port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
-{
-       int ret;
-
-       ret = (port >= min && port <= max) ^ invert;
-       return ret;
-}
-
-static int
-tcp_find_option(u_int8_t option,
-               const struct sk_buff *skb,
-               unsigned int optlen,
-               int invert,
-               int *hotdrop)
-{
-       /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
-       u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
-       unsigned int i;
-
-       duprintf("tcp_match: finding option\n");
-
-       if (!optlen)
-               return invert;
-
-       /* If we don't have the whole header, drop packet. */
-       op = skb_header_pointer(skb,
-                               skb->nh.iph->ihl*4 + sizeof(struct tcphdr),
-                               optlen, _opt);
-       if (op == NULL) {
-               *hotdrop = 1;
-               return 0;
-       }
-
-       for (i = 0; i < optlen; ) {
-               if (op[i] == option) return !invert;
-               if (op[i] < 2) i++;
-               else i += op[i+1]?:1;
-       }
-
-       return invert;
-}
-
-static int
-tcp_match(const struct sk_buff *skb,
-         const struct net_device *in,
-         const struct net_device *out,
-         const void *matchinfo,
-         int offset,
-         int *hotdrop)
-{
-       struct tcphdr _tcph, *th;
-       const struct ipt_tcp *tcpinfo = matchinfo;
-
-       if (offset) {
-               /* To quote Alan:
-
-                  Don't allow a fragment of TCP 8 bytes in. Nobody normal
-                  causes this. Its a cracker trying to break in by doing a
-                  flag overwrite to pass the direction checks.
-               */
-               if (offset == 1) {
-                       duprintf("Dropping evil TCP offset=1 frag.\n");
-                       *hotdrop = 1;
-               }
-               /* Must not be a fragment. */
-               return 0;
-       }
-
-#define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
-
-       th = skb_header_pointer(skb, skb->nh.iph->ihl*4,
-                               sizeof(_tcph), &_tcph);
-       if (th == NULL) {
-               /* We've been asked to examine this packet, and we
-                  can't.  Hence, no choice but to drop. */
-               duprintf("Dropping evil TCP offset=0 tinygram.\n");
-               *hotdrop = 1;
-               return 0;
-       }
-
-       if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
-                       ntohs(th->source),
-                       !!(tcpinfo->invflags & IPT_TCP_INV_SRCPT)))
-               return 0;
-       if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
-                       ntohs(th->dest),
-                       !!(tcpinfo->invflags & IPT_TCP_INV_DSTPT)))
-               return 0;
-       if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
-                     == tcpinfo->flg_cmp,
-                     IPT_TCP_INV_FLAGS))
-               return 0;
-       if (tcpinfo->option) {
-               if (th->doff * 4 < sizeof(_tcph)) {
-                       *hotdrop = 1;
-                       return 0;
-               }
-               if (!tcp_find_option(tcpinfo->option, skb,
-                                    th->doff*4 - sizeof(_tcph),
-                                    tcpinfo->invflags & IPT_TCP_INV_OPTION,
-                                    hotdrop))
-                       return 0;
-       }
-       return 1;
-}
-
-/* Called when user tries to insert an entry of this type. */
-static int
-tcp_checkentry(const char *tablename,
-              const struct ipt_ip *ip,
-              void *matchinfo,
-              unsigned int matchsize,
-              unsigned int hook_mask)
-{
-       const struct ipt_tcp *tcpinfo = matchinfo;
-
-       /* Must specify proto == TCP, and no unknown invflags */
-       return ip->proto == IPPROTO_TCP
-               && !(ip->invflags & IPT_INV_PROTO)
-               && matchsize == IPT_ALIGN(sizeof(struct ipt_tcp))
-               && !(tcpinfo->invflags & ~IPT_TCP_INV_MASK);
-}
-
-static int
-udp_match(const struct sk_buff *skb,
-         const struct net_device *in,
-         const struct net_device *out,
-         const void *matchinfo,
-         int offset,
-         int *hotdrop)
-{
-       struct udphdr _udph, *uh;
-       const struct ipt_udp *udpinfo = matchinfo;
-
-       /* Must not be a fragment. */
-       if (offset)
-               return 0;
-
-       uh = skb_header_pointer(skb, skb->nh.iph->ihl*4,
-                               sizeof(_udph), &_udph);
-       if (uh == NULL) {
-               /* We've been asked to examine this packet, and we
-                  can't.  Hence, no choice but to drop. */
-               duprintf("Dropping evil UDP tinygram.\n");
-               *hotdrop = 1;
-               return 0;
-       }
-
-       return port_match(udpinfo->spts[0], udpinfo->spts[1],
-                         ntohs(uh->source),
-                         !!(udpinfo->invflags & IPT_UDP_INV_SRCPT))
-               && port_match(udpinfo->dpts[0], udpinfo->dpts[1],
-                             ntohs(uh->dest),
-                             !!(udpinfo->invflags & IPT_UDP_INV_DSTPT));
-}
-
-/* Called when user tries to insert an entry of this type. */
-static int
-udp_checkentry(const char *tablename,
-              const struct ipt_ip *ip,
-              void *matchinfo,
-              unsigned int matchinfosize,
-              unsigned int hook_mask)
-{
-       const struct ipt_udp *udpinfo = matchinfo;
-
-       /* Must specify proto == UDP, and no unknown invflags */
-       if (ip->proto != IPPROTO_UDP || (ip->invflags & IPT_INV_PROTO)) {
-               duprintf("ipt_udp: Protocol %u != %u\n", ip->proto,
-                        IPPROTO_UDP);
-               return 0;
-       }
-       if (matchinfosize != IPT_ALIGN(sizeof(struct ipt_udp))) {
-               duprintf("ipt_udp: matchsize %u != %u\n",
-                        matchinfosize, IPT_ALIGN(sizeof(struct ipt_udp)));
-               return 0;
-       }
-       if (udpinfo->invflags & ~IPT_UDP_INV_MASK) {
-               duprintf("ipt_udp: unknown flags %X\n",
-                        udpinfo->invflags);
-               return 0;
-       }
-
-       return 1;
+       loc_cpu_entry = private->entries[raw_smp_processor_id()];
+       IPT_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
+       xt_free_table_info(private);
 }
 
 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
@@ -1798,6 +1272,7 @@ icmp_match(const struct sk_buff *skb,
           const struct net_device *out,
           const void *matchinfo,
           int offset,
+          unsigned int protoff,
           int *hotdrop)
 {
        struct icmphdr _icmph, *ic;
@@ -1807,8 +1282,7 @@ icmp_match(const struct sk_buff *skb,
        if (offset)
                return 0;
 
-       ic = skb_header_pointer(skb, skb->nh.iph->ihl*4,
-                               sizeof(_icmph), &_icmph);
+       ic = skb_header_pointer(skb, protoff, sizeof(_icmph), &_icmph);
        if (ic == NULL) {
                /* We've been asked to examine this packet, and we
                 * can't.  Hence, no choice but to drop.
@@ -1828,11 +1302,12 @@ icmp_match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 icmp_checkentry(const char *tablename,
-          const struct ipt_ip *ip,
+          const void *info,
           void *matchinfo,
           unsigned int matchsize,
           unsigned int hook_mask)
 {
+       const struct ipt_ip *ip = info;
        const struct ipt_icmp *icmpinfo = matchinfo;
 
        /* Must specify proto == ICMP, and no unknown invflags */
@@ -1862,123 +1337,22 @@ static struct nf_sockopt_ops ipt_sockopts = {
        .get            = do_ipt_get_ctl,
 };
 
-static struct ipt_match tcp_matchstruct = {
-       .name           = "tcp",
-       .match          = &tcp_match,
-       .checkentry     = &tcp_checkentry,
-};
-
-static struct ipt_match udp_matchstruct = {
-       .name           = "udp",
-       .match          = &udp_match,
-       .checkentry     = &udp_checkentry,
-};
-
 static struct ipt_match icmp_matchstruct = {
        .name           = "icmp",
        .match          = &icmp_match,
        .checkentry     = &icmp_checkentry,
 };
 
-#ifdef CONFIG_PROC_FS
-static inline int print_name(const char *i,
-                            off_t start_offset, char *buffer, int length,
-                            off_t *pos, unsigned int *count)
-{
-       if ((*count)++ >= start_offset) {
-               unsigned int namelen;
-
-               namelen = sprintf(buffer + *pos, "%s\n",
-                                 i + sizeof(struct list_head));
-               if (*pos + namelen > length) {
-                       /* Stop iterating */
-                       return 1;
-               }
-               *pos += namelen;
-       }
-       return 0;
-}
-
-static inline int print_target(const struct ipt_target *t,
-                               off_t start_offset, char *buffer, int length,
-                               off_t *pos, unsigned int *count)
-{
-       if (t == &ipt_standard_target || t == &ipt_error_target)
-               return 0;
-       return print_name((char *)t, start_offset, buffer, length, pos, count);
-}
-
-static int ipt_get_tables(char *buffer, char **start, off_t offset, int length)
-{
-       off_t pos = 0;
-       unsigned int count = 0;
-
-       if (down_interruptible(&ipt_mutex) != 0)
-               return 0;
-
-       LIST_FIND(&ipt_tables, print_name, void *,
-                 offset, buffer, length, &pos, &count);
-
-       up(&ipt_mutex);
-
-       /* `start' hack - see fs/proc/generic.c line ~105 */
-       *start=(char *)((unsigned long)count-offset);
-       return pos;
-}
-
-static int ipt_get_targets(char *buffer, char **start, off_t offset, int length)
-{
-       off_t pos = 0;
-       unsigned int count = 0;
-
-       if (down_interruptible(&ipt_mutex) != 0)
-               return 0;
-
-       LIST_FIND(&ipt_target, print_target, struct ipt_target *,
-                 offset, buffer, length, &pos, &count);
-       
-       up(&ipt_mutex);
-
-       *start = (char *)((unsigned long)count - offset);
-       return pos;
-}
-
-static int ipt_get_matches(char *buffer, char **start, off_t offset, int length)
-{
-       off_t pos = 0;
-       unsigned int count = 0;
-
-       if (down_interruptible(&ipt_mutex) != 0)
-               return 0;
-       
-       LIST_FIND(&ipt_match, print_name, void *,
-                 offset, buffer, length, &pos, &count);
-
-       up(&ipt_mutex);
-
-       *start = (char *)((unsigned long)count - offset);
-       return pos;
-}
-
-static const struct { char *name; get_info_t *get_info; } ipt_proc_entry[] =
-{ { "ip_tables_names", ipt_get_tables },
-  { "ip_tables_targets", ipt_get_targets },
-  { "ip_tables_matches", ipt_get_matches },
-  { NULL, NULL} };
-#endif /*CONFIG_PROC_FS*/
-
 static int __init init(void)
 {
        int ret;
 
+       xt_proto_init(AF_INET);
+
        /* Noone else will be downing sem now, so we won't sleep */
-       down(&ipt_mutex);
-       list_append(&ipt_target, &ipt_standard_target);
-       list_append(&ipt_target, &ipt_error_target);
-       list_append(&ipt_match, &tcp_matchstruct);
-       list_append(&ipt_match, &udp_matchstruct);
-       list_append(&ipt_match, &icmp_matchstruct);
-       up(&ipt_mutex);
+       xt_register_target(AF_INET, &ipt_standard_target);
+       xt_register_target(AF_INET, &ipt_error_target);
+       xt_register_match(AF_INET, &icmp_matchstruct);
 
        /* Register setsockopt */
        ret = nf_register_sockopt(&ipt_sockopts);
@@ -1987,49 +1361,23 @@ static int __init init(void)
                return ret;
        }
 
-#ifdef CONFIG_PROC_FS
-       {
-       struct proc_dir_entry *proc;
-       int i;
-
-       for (i = 0; ipt_proc_entry[i].name; i++) {
-               proc = proc_net_create(ipt_proc_entry[i].name, 0,
-                                      ipt_proc_entry[i].get_info);
-               if (!proc) {
-                       while (--i >= 0)
-                               proc_net_remove(ipt_proc_entry[i].name);
-                       nf_unregister_sockopt(&ipt_sockopts);
-                       return -ENOMEM;
-               }
-               proc->owner = THIS_MODULE;
-       }
-       }
-#endif
-
-       printk("ip_tables: (C) 2000-2002 Netfilter core team\n");
+       printk("ip_tables: (C) 2000-2006 Netfilter Core Team\n");
        return 0;
 }
 
 static void __exit fini(void)
 {
        nf_unregister_sockopt(&ipt_sockopts);
-#ifdef CONFIG_PROC_FS
-       {
-       int i;
-       for (i = 0; ipt_proc_entry[i].name; i++)
-               proc_net_remove(ipt_proc_entry[i].name);
-       }
-#endif
+
+       xt_unregister_match(AF_INET, &icmp_matchstruct);
+       xt_unregister_target(AF_INET, &ipt_error_target);
+       xt_unregister_target(AF_INET, &ipt_standard_target);
+
+       xt_proto_fini(AF_INET);
 }
 
 EXPORT_SYMBOL(ipt_register_table);
 EXPORT_SYMBOL(ipt_unregister_table);
-EXPORT_SYMBOL(ipt_register_match);
-EXPORT_SYMBOL(ipt_unregister_match);
 EXPORT_SYMBOL(ipt_do_table);
-EXPORT_SYMBOL(ipt_register_target);
-EXPORT_SYMBOL(ipt_unregister_target);
-EXPORT_SYMBOL(ipt_find_target);
-
 module_init(init);
 module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_CLASSIFY.c b/net/ipv4/netfilter/ipt_CLASSIFY.c
deleted file mode 100644 (file)
index dab78d8..0000000
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * This is a module which is used for setting the skb->priority field
- * of an skb for qdisc classification.
- */
-
-/* (C) 2001-2002 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_CLASSIFY.h>
-
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("iptables qdisc classification target module");
-
-static unsigned int
-target(struct sk_buff **pskb,
-       const struct net_device *in,
-       const struct net_device *out,
-       unsigned int hooknum,
-       const void *targinfo,
-       void *userinfo)
-{
-       const struct ipt_classify_target_info *clinfo = targinfo;
-
-       if((*pskb)->priority != clinfo->priority) 
-               (*pskb)->priority = clinfo->priority;
-
-       return IPT_CONTINUE;
-}
-
-static int
-checkentry(const char *tablename,
-           const struct ipt_entry *e,
-           void *targinfo,
-           unsigned int targinfosize,
-           unsigned int hook_mask)
-{
-       if (targinfosize != IPT_ALIGN(sizeof(struct ipt_classify_target_info))){
-               printk(KERN_ERR "CLASSIFY: invalid size (%u != %Zu).\n",
-                      targinfosize,
-                      IPT_ALIGN(sizeof(struct ipt_classify_target_info)));
-               return 0;
-       }
-       
-       if (hook_mask & ~((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) |
-                         (1 << NF_IP_POST_ROUTING))) {
-               printk(KERN_ERR "CLASSIFY: only valid in LOCAL_OUT, FORWARD "
-                               "and POST_ROUTING.\n");
-               return 0;
-       }
-
-       if (strcmp(tablename, "mangle") != 0) {
-               printk(KERN_ERR "CLASSIFY: can only be called from "
-                               "\"mangle\" table, not \"%s\".\n",
-                               tablename);
-               return 0;
-       }
-
-       return 1;
-}
-
-static struct ipt_target ipt_classify_reg = { 
-       .name           = "CLASSIFY", 
-       .target         = target,
-       .checkentry     = checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ipt_register_target(&ipt_classify_reg);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_target(&ipt_classify_reg);
-}
-
-module_init(init);
-module_exit(fini);
index 45c52d8f4d99750c0407b8d8cd3510792e85ef3f..d9bc971f03afd677907bf9651e2551f227f34e18 100644 (file)
@@ -379,12 +379,13 @@ target(struct sk_buff **pskb,
 
 static int
 checkentry(const char *tablename,
-          const struct ipt_entry *e,
+          const void *e_void,
            void *targinfo,
            unsigned int targinfosize,
            unsigned int hook_mask)
 {
        struct ipt_clusterip_tgt_info *cipinfo = targinfo;
+       const struct ipt_entry *e = e_void;
 
        struct clusterip_config *config;
 
diff --git a/net/ipv4/netfilter/ipt_CONNMARK.c b/net/ipv4/netfilter/ipt_CONNMARK.c
deleted file mode 100644 (file)
index 8acac5a..0000000
+++ /dev/null
@@ -1,122 +0,0 @@
-/* This kernel module is used to modify the connection mark values, or
- * to optionally restore the skb nfmark from the connection mark
- *
- * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
- * by Henrik Nordstrom <hno@marasystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-
-MODULE_AUTHOR("Henrik Nordstrom <hno@marasytems.com>");
-MODULE_DESCRIPTION("IP tables CONNMARK matching module");
-MODULE_LICENSE("GPL");
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_CONNMARK.h>
-#include <net/netfilter/nf_conntrack_compat.h>
-
-static unsigned int
-target(struct sk_buff **pskb,
-       const struct net_device *in,
-       const struct net_device *out,
-       unsigned int hooknum,
-       const void *targinfo,
-       void *userinfo)
-{
-       const struct ipt_connmark_target_info *markinfo = targinfo;
-       u_int32_t diff;
-       u_int32_t nfmark;
-       u_int32_t newmark;
-       u_int32_t ctinfo;
-       u_int32_t *ctmark = nf_ct_get_mark(*pskb, &ctinfo);
-
-       if (ctmark) {
-           switch(markinfo->mode) {
-           case IPT_CONNMARK_SET:
-               newmark = (*ctmark & ~markinfo->mask) | markinfo->mark;
-               if (newmark != *ctmark)
-                   *ctmark = newmark;
-               break;
-           case IPT_CONNMARK_SAVE:
-               newmark = (*ctmark & ~markinfo->mask) | ((*pskb)->nfmark & markinfo->mask);
-               if (*ctmark != newmark)
-                   *ctmark = newmark;
-               break;
-           case IPT_CONNMARK_RESTORE:
-               nfmark = (*pskb)->nfmark;
-               diff = (*ctmark ^ nfmark) & markinfo->mask;
-               if (diff != 0)
-                   (*pskb)->nfmark = nfmark ^ diff;
-               break;
-           }
-       }
-
-       return IPT_CONTINUE;
-}
-
-static int
-checkentry(const char *tablename,
-          const struct ipt_entry *e,
-          void *targinfo,
-          unsigned int targinfosize,
-          unsigned int hook_mask)
-{
-       struct ipt_connmark_target_info *matchinfo = targinfo;
-       if (targinfosize != IPT_ALIGN(sizeof(struct ipt_connmark_target_info))) {
-               printk(KERN_WARNING "CONNMARK: targinfosize %u != %Zu\n",
-                      targinfosize,
-                      IPT_ALIGN(sizeof(struct ipt_connmark_target_info)));
-               return 0;
-       }
-
-       if (matchinfo->mode == IPT_CONNMARK_RESTORE) {
-           if (strcmp(tablename, "mangle") != 0) {
-                   printk(KERN_WARNING "CONNMARK: restore can only be called from \"mangle\" table, not \"%s\"\n", tablename);
-                   return 0;
-           }
-       }
-
-       if (matchinfo->mark > 0xffffffff || matchinfo->mask > 0xffffffff) {
-               printk(KERN_WARNING "CONNMARK: Only supports 32bit mark\n");
-               return 0;
-       }
-
-       return 1;
-}
-
-static struct ipt_target ipt_connmark_reg = {
-       .name = "CONNMARK",
-       .target = &target,
-       .checkentry = &checkentry,
-       .me = THIS_MODULE
-};
-
-static int __init init(void)
-{
-       need_ip_conntrack();
-       return ipt_register_target(&ipt_connmark_reg);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_target(&ipt_connmark_reg);
-}
-
-module_init(init);
-module_exit(fini);
index 6e319570a28caf01107dff4e825f6844aa52d992..898cdf79ce18258eb75dc073ee5e228c81a21b60 100644 (file)
@@ -57,7 +57,7 @@ target(struct sk_buff **pskb,
 
 static int
 checkentry(const char *tablename,
-          const struct ipt_entry *e,
+          const void *e_void,
            void *targinfo,
            unsigned int targinfosize,
            unsigned int hook_mask)
index a1319693f648c2beba63fea33070c8c79fe04fef..706445426a6dfc4096d97e76d33cfe27a74ea665 100644 (file)
@@ -113,12 +113,13 @@ target(struct sk_buff **pskb,
 
 static int
 checkentry(const char *tablename,
-          const struct ipt_entry *e,
+          const void *e_void,
            void *targinfo,
            unsigned int targinfosize,
            unsigned int hook_mask)
 {
        const struct ipt_ECN_info *einfo = (struct ipt_ECN_info *)targinfo;
+       const struct ipt_entry *e = e_void;
 
        if (targinfosize != IPT_ALIGN(sizeof(struct ipt_ECN_info))) {
                printk(KERN_WARNING "ECN: targinfosize %u != %Zu\n",
index 30be0f1dae370f6212de7955a972e87272854bef..6606ddb66a29e6ea0044c24ce596bb9767aae424 100644 (file)
@@ -431,7 +431,7 @@ ipt_log_target(struct sk_buff **pskb,
 }
 
 static int ipt_log_checkentry(const char *tablename,
-                             const struct ipt_entry *e,
+                             const void *e,
                              void *targinfo,
                              unsigned int targinfosize,
                              unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_MARK.c b/net/ipv4/netfilter/ipt_MARK.c
deleted file mode 100644 (file)
index 52b4f2c..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-/* This is a module which is used for setting the NFMARK field of an skb. */
-
-/* (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_MARK.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
-MODULE_DESCRIPTION("iptables MARK modification module");
-
-static unsigned int
-target_v0(struct sk_buff **pskb,
-         const struct net_device *in,
-         const struct net_device *out,
-         unsigned int hooknum,
-         const void *targinfo,
-         void *userinfo)
-{
-       const struct ipt_mark_target_info *markinfo = targinfo;
-
-       if((*pskb)->nfmark != markinfo->mark)
-               (*pskb)->nfmark = markinfo->mark;
-
-       return IPT_CONTINUE;
-}
-
-static unsigned int
-target_v1(struct sk_buff **pskb,
-         const struct net_device *in,
-         const struct net_device *out,
-         unsigned int hooknum,
-         const void *targinfo,
-         void *userinfo)
-{
-       const struct ipt_mark_target_info_v1 *markinfo = targinfo;
-       int mark = 0;
-
-       switch (markinfo->mode) {
-       case IPT_MARK_SET:
-               mark = markinfo->mark;
-               break;
-               
-       case IPT_MARK_AND:
-               mark = (*pskb)->nfmark & markinfo->mark;
-               break;
-               
-       case IPT_MARK_OR:
-               mark = (*pskb)->nfmark | markinfo->mark;
-               break;
-       }
-
-       if((*pskb)->nfmark != mark)
-               (*pskb)->nfmark = mark;
-
-       return IPT_CONTINUE;
-}
-
-
-static int
-checkentry_v0(const char *tablename,
-             const struct ipt_entry *e,
-             void *targinfo,
-             unsigned int targinfosize,
-             unsigned int hook_mask)
-{
-       struct ipt_mark_target_info *markinfo = targinfo;
-
-       if (targinfosize != IPT_ALIGN(sizeof(struct ipt_mark_target_info))) {
-               printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n",
-                      targinfosize,
-                      IPT_ALIGN(sizeof(struct ipt_mark_target_info)));
-               return 0;
-       }
-
-       if (strcmp(tablename, "mangle") != 0) {
-               printk(KERN_WARNING "MARK: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
-               return 0;
-       }
-
-       if (markinfo->mark > 0xffffffff) {
-               printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n");
-               return 0;
-       }
-
-       return 1;
-}
-
-static int
-checkentry_v1(const char *tablename,
-             const struct ipt_entry *e,
-             void *targinfo,
-             unsigned int targinfosize,
-             unsigned int hook_mask)
-{
-       struct ipt_mark_target_info_v1 *markinfo = targinfo;
-
-       if (targinfosize != IPT_ALIGN(sizeof(struct ipt_mark_target_info_v1))){
-               printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n",
-                      targinfosize,
-                      IPT_ALIGN(sizeof(struct ipt_mark_target_info_v1)));
-               return 0;
-       }
-
-       if (strcmp(tablename, "mangle") != 0) {
-               printk(KERN_WARNING "MARK: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
-               return 0;
-       }
-
-       if (markinfo->mode != IPT_MARK_SET
-           && markinfo->mode != IPT_MARK_AND
-           && markinfo->mode != IPT_MARK_OR) {
-               printk(KERN_WARNING "MARK: unknown mode %u\n",
-                      markinfo->mode);
-               return 0;
-       }
-
-       if (markinfo->mark > 0xffffffff) {
-               printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n");
-               return 0;
-       }
-
-       return 1;
-}
-
-static struct ipt_target ipt_mark_reg_v0 = {
-       .name           = "MARK",
-       .target         = target_v0,
-       .checkentry     = checkentry_v0,
-       .me             = THIS_MODULE,
-       .revision       = 0,
-};
-
-static struct ipt_target ipt_mark_reg_v1 = {
-       .name           = "MARK",
-       .target         = target_v1,
-       .checkentry     = checkentry_v1,
-       .me             = THIS_MODULE,
-       .revision       = 1,
-};
-
-static int __init init(void)
-{
-       int err;
-
-       err = ipt_register_target(&ipt_mark_reg_v0);
-       if (!err) {
-               err = ipt_register_target(&ipt_mark_reg_v1);
-               if (err)
-                       ipt_unregister_target(&ipt_mark_reg_v0);
-       }
-       return err;
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_target(&ipt_mark_reg_v0);
-       ipt_unregister_target(&ipt_mark_reg_v1);
-}
-
-module_init(init);
-module_exit(fini);
index 27860510ca6dacb4464f65352b5f4e9c917c9921..12c56d3343ca3b7d38bd5153f21e7a2b23eb0fd1 100644 (file)
@@ -40,7 +40,7 @@ static DEFINE_RWLOCK(masq_lock);
 /* FIXME: Multiple targets. --RR */
 static int
 masquerade_check(const char *tablename,
-                const struct ipt_entry *e,
+                const void *e,
                 void *targinfo,
                 unsigned int targinfosize,
                 unsigned int hook_mask)
index e6e7b6095363db60b760b93196c5d289a3796da0..b074467fe67b642c0345c152c989363dd617c9bc 100644 (file)
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("iptables 1:1 NAT mapping of IP networks target");
 
 static int
 check(const char *tablename,
-      const struct ipt_entry *e,
+      const void *e,
       void *targinfo,
       unsigned int targinfosize,
       unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_NFQUEUE.c b/net/ipv4/netfilter/ipt_NFQUEUE.c
deleted file mode 100644 (file)
index 3cedc9b..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/* iptables module for using new netfilter netlink queue
- *
- * (C) 2005 by Harald Welte <laforge@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as 
- * published by the Free Software Foundation.
- * 
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_NFQUEUE.h>
-
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("iptables NFQUEUE target");
-MODULE_LICENSE("GPL");
-
-static unsigned int
-target(struct sk_buff **pskb,
-       const struct net_device *in,
-       const struct net_device *out,
-       unsigned int hooknum,
-       const void *targinfo,
-       void *userinfo)
-{
-       const struct ipt_NFQ_info *tinfo = targinfo;
-
-       return NF_QUEUE_NR(tinfo->queuenum);
-}
-
-static int
-checkentry(const char *tablename,
-          const struct ipt_entry *e,
-           void *targinfo,
-           unsigned int targinfosize,
-           unsigned int hook_mask)
-{
-       if (targinfosize != IPT_ALIGN(sizeof(struct ipt_NFQ_info))) {
-               printk(KERN_WARNING "NFQUEUE: targinfosize %u != %Zu\n",
-                      targinfosize,
-                      IPT_ALIGN(sizeof(struct ipt_NFQ_info)));
-               return 0;
-       }
-
-       return 1;
-}
-
-static struct ipt_target ipt_NFQ_reg = {
-       .name           = "NFQUEUE",
-       .target         = target,
-       .checkentry     = checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ipt_register_target(&ipt_NFQ_reg);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_target(&ipt_NFQ_reg);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_NOTRACK.c b/net/ipv4/netfilter/ipt_NOTRACK.c
deleted file mode 100644 (file)
index e3c69d0..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* This is a module which is used for setting up fake conntracks
- * on packets so that they are not seen by the conntrack/NAT code.
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <net/netfilter/nf_conntrack_compat.h>
-
-static unsigned int
-target(struct sk_buff **pskb,
-       const struct net_device *in,
-       const struct net_device *out,
-       unsigned int hooknum,
-       const void *targinfo,
-       void *userinfo)
-{
-       /* Previously seen (loopback)? Ignore. */
-       if ((*pskb)->nfct != NULL)
-               return IPT_CONTINUE;
-
-       /* Attach fake conntrack entry. 
-          If there is a real ct entry correspondig to this packet, 
-          it'll hang aroun till timing out. We don't deal with it
-          for performance reasons. JK */
-       nf_ct_untrack(*pskb);
-       (*pskb)->nfctinfo = IP_CT_NEW;
-       nf_conntrack_get((*pskb)->nfct);
-
-       return IPT_CONTINUE;
-}
-
-static int
-checkentry(const char *tablename,
-          const struct ipt_entry *e,
-           void *targinfo,
-           unsigned int targinfosize,
-           unsigned int hook_mask)
-{
-       if (targinfosize != 0) {
-               printk(KERN_WARNING "NOTRACK: targinfosize %u != 0\n",
-                      targinfosize);
-               return 0;
-       }
-
-       if (strcmp(tablename, "raw") != 0) {
-               printk(KERN_WARNING "NOTRACK: can only be called from \"raw\" table, not \"%s\"\n", tablename);
-               return 0;
-       }
-
-       return 1;
-}
-
-static struct ipt_target ipt_notrack_reg = { 
-       .name = "NOTRACK", 
-       .target = target, 
-       .checkentry = checkentry,
-       .me = THIS_MODULE 
-};
-
-static int __init init(void)
-{
-       if (ipt_register_target(&ipt_notrack_reg))
-               return -EINVAL;
-
-       return 0;
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_target(&ipt_notrack_reg);
-}
-
-module_init(init);
-module_exit(fini);
-MODULE_LICENSE("GPL");
index 5245bfd33d526472f543756af058b87ab214b4fd..140be51f2f01970772d3112d678962fde4c448ea 100644 (file)
@@ -33,7 +33,7 @@ MODULE_DESCRIPTION("iptables REDIRECT target module");
 /* FIXME: Take multiple ranges --RR */
 static int
 redirect_check(const char *tablename,
-              const struct ipt_entry *e,
+              const void *e,
               void *targinfo,
               unsigned int targinfosize,
               unsigned int hook_mask)
index 6693526ae128715cca526a332cf3edde758a8fdd..3eb47aae78c5b4cf1e215d9975f9c4e6c3440067 100644 (file)
@@ -282,12 +282,13 @@ static unsigned int reject(struct sk_buff **pskb,
 }
 
 static int check(const char *tablename,
-                const struct ipt_entry *e,
+                const void *e_void,
                 void *targinfo,
                 unsigned int targinfosize,
                 unsigned int hook_mask)
 {
        const struct ipt_reject_info *rejinfo = targinfo;
+       const struct ipt_entry *e = e_void;
 
        if (targinfosize != IPT_ALIGN(sizeof(struct ipt_reject_info))) {
                DEBUGP("REJECT: targinfosize %u != 0\n", targinfosize);
index 7a0536d864acf15125ef4eb3ff2cb2e869f140a7..a22de59bba0e0581ea29552300550a4788d1e057 100644 (file)
@@ -49,7 +49,7 @@ MODULE_DESCRIPTION("iptables special SNAT module for consistent sourceip");
 
 static int
 same_check(const char *tablename,
-             const struct ipt_entry *e,
+             const void *e,
              void *targinfo,
              unsigned int targinfosize,
              unsigned int hook_mask)
index 8db70d6908c33947917babc2c6cf2df11ce871a3..c122841e182c965d0c8629eb26987af736830001 100644 (file)
@@ -210,12 +210,13 @@ static inline int find_syn_match(const struct ipt_entry_match *m)
 /* Must specify -p tcp --syn/--tcp-flags SYN */
 static int
 ipt_tcpmss_checkentry(const char *tablename,
-                     const struct ipt_entry *e,
+                     const void *e_void,
                      void *targinfo,
                      unsigned int targinfosize,
                      unsigned int hook_mask)
 {
        const struct ipt_tcpmss_info *tcpmssinfo = targinfo;
+       const struct ipt_entry *e = e_void;
 
        if (targinfosize != IPT_ALIGN(sizeof(struct ipt_tcpmss_info))) {
                DEBUGP("ipt_tcpmss_checkentry: targinfosize %u != %u\n",
index deadb36d442805aefba5d96465f5a478aa016fa3..3a44a56db2397352c6b012867a9bb683dd2969f9 100644 (file)
@@ -52,7 +52,7 @@ target(struct sk_buff **pskb,
 
 static int
 checkentry(const char *tablename,
-          const struct ipt_entry *e,
+          const void *e_void,
            void *targinfo,
            unsigned int targinfosize,
            unsigned int hook_mask)
index b9ae6a9382f35cd4eef6b5c175e2f0c438677647..b769eb231970ba82a4737e6cd6654254f0e1a848 100644 (file)
@@ -66,7 +66,7 @@ ipt_ttl_target(struct sk_buff **pskb, const struct net_device *in,
 }
 
 static int ipt_ttl_checkentry(const char *tablename,
-               const struct ipt_entry *e,
+               const void *e,
                void *targinfo,
                unsigned int targinfosize,
                unsigned int hook_mask)
index 38641cd061234683edf4dbbeeea4e3bc05465d09..641dbc477650f6059e9577386a1664d6c9ea0206 100644 (file)
@@ -330,7 +330,7 @@ static void ipt_logfn(unsigned int pf,
 }
 
 static int ipt_ulog_checkentry(const char *tablename,
-                              const struct ipt_entry *e,
+                              const void *e,
                               void *targinfo,
                               unsigned int targinfosize,
                               unsigned int hookmask)
index e19c2a52d00cb537e5783aeaa3e0fdd25005b744..d6b83a976518c1bd0352e40a926b7f9ac7bdbe1b 100644 (file)
@@ -29,7 +29,7 @@ static inline int match_type(u_int32_t addr, u_int16_t mask)
 
 static int match(const struct sk_buff *skb, const struct net_device *in,
                 const struct net_device *out, const void *matchinfo,
-                int offset, int *hotdrop)
+                int offset, unsigned int protoff, int *hotdrop)
 {
        const struct ipt_addrtype_info *info = matchinfo;
        const struct iphdr *iph = skb->nh.iph;
@@ -43,7 +43,7 @@ static int match(const struct sk_buff *skb, const struct net_device *in,
        return ret;
 }
 
-static int checkentry(const char *tablename, const struct ipt_ip *ip,
+static int checkentry(const char *tablename, const void *ip,
                      void *matchinfo, unsigned int matchsize,
                      unsigned int hook_mask)
 {
index a0fea847cb7282642f16b8beee14f01eb0610155..144adfec13cccff549bb4e716c979b7057e02037 100644 (file)
@@ -41,6 +41,7 @@ match(const struct sk_buff *skb,
       const struct net_device *out,
       const void *matchinfo,
       int offset,
+      unsigned int protoff,
       int *hotdrop)
 {
        struct ip_auth_hdr _ahdr, *ah;
@@ -50,7 +51,7 @@ match(const struct sk_buff *skb,
        if (offset)
                return 0;
 
-       ah = skb_header_pointer(skb, skb->nh.iph->ihl * 4,
+       ah = skb_header_pointer(skb, protoff,
                                sizeof(_ahdr), &_ahdr);
        if (ah == NULL) {
                /* We've been asked to examine this packet, and we
@@ -69,12 +70,13 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const struct ipt_ip *ip,
+          const void *ip_void,
           void *matchinfo,
           unsigned int matchinfosize,
           unsigned int hook_mask)
 {
        const struct ipt_ah *ahinfo = matchinfo;
+       const struct ipt_ip *ip = ip_void;
 
        /* Must specify proto == AH, and no unknown invflags */
        if (ip->proto != IPPROTO_AH || (ip->invflags & IPT_INV_PROTO)) {
diff --git a/net/ipv4/netfilter/ipt_comment.c b/net/ipv4/netfilter/ipt_comment.c
deleted file mode 100644 (file)
index 6b76a1e..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Implements a dummy match to allow attaching comments to rules
- *
- * 2003-05-13 Brad Fisher (brad@info-link.net)
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_comment.h>
-
-MODULE_AUTHOR("Brad Fisher <brad@info-link.net>");
-MODULE_DESCRIPTION("iptables comment match module");
-MODULE_LICENSE("GPL");
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       /* We always match */
-       return 1;
-}
-
-static int
-checkentry(const char *tablename,
-           const struct ipt_ip *ip,
-           void *matchinfo,
-           unsigned int matchsize,
-           unsigned int hook_mask)
-{
-       /* Check the size */
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_comment_info)))
-               return 0;
-       return 1;
-}
-
-static struct ipt_match comment_match = {
-       .name           = "comment",
-       .match          = match,
-       .checkentry     = checkentry,
-       .me             = THIS_MODULE
-};
-
-static int __init init(void)
-{
-       return ipt_register_match(&comment_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&comment_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_connbytes.c b/net/ipv4/netfilter/ipt_connbytes.c
deleted file mode 100644 (file)
index d68a048..0000000
+++ /dev/null
@@ -1,161 +0,0 @@
-/* Kernel module to match connection tracking byte counter.
- * GPL (C) 2002 Martin Devera (devik@cdi.cz).
- *
- * 2004-07-20 Harald Welte <laforge@netfilter.org>
- *     - reimplemented to use per-connection accounting counters
- *     - add functionality to match number of packets
- *     - add functionality to match average packet size
- *     - add support to match directions seperately
- *
- */
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <net/netfilter/nf_conntrack_compat.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_connbytes.h>
-
-#include <asm/div64.h>
-#include <asm/bitops.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("iptables match for matching number of pkts/bytes per connection");
-
-/* 64bit divisor, dividend and result. dynamic precision */
-static u_int64_t div64_64(u_int64_t dividend, u_int64_t divisor)
-{
-       u_int32_t d = divisor;
-
-       if (divisor > 0xffffffffULL) {
-               unsigned int shift = fls(divisor >> 32);
-
-               d = divisor >> shift;
-               dividend >>= shift;
-       }
-
-       do_div(dividend, d);
-       return dividend;
-}
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_connbytes_info *sinfo = matchinfo;
-       u_int64_t what = 0;     /* initialize to make gcc happy */
-       const struct ip_conntrack_counter *counters;
-
-       if (!(counters = nf_ct_get_counters(skb)))
-               return 0; /* no match */
-
-       switch (sinfo->what) {
-       case IPT_CONNBYTES_PKTS:
-               switch (sinfo->direction) {
-               case IPT_CONNBYTES_DIR_ORIGINAL:
-                       what = counters[IP_CT_DIR_ORIGINAL].packets;
-                       break;
-               case IPT_CONNBYTES_DIR_REPLY:
-                       what = counters[IP_CT_DIR_REPLY].packets;
-                       break;
-               case IPT_CONNBYTES_DIR_BOTH:
-                       what = counters[IP_CT_DIR_ORIGINAL].packets;
-                       what += counters[IP_CT_DIR_REPLY].packets;
-                       break;
-               }
-               break;
-       case IPT_CONNBYTES_BYTES:
-               switch (sinfo->direction) {
-               case IPT_CONNBYTES_DIR_ORIGINAL:
-                       what = counters[IP_CT_DIR_ORIGINAL].bytes;
-                       break;
-               case IPT_CONNBYTES_DIR_REPLY:
-                       what = counters[IP_CT_DIR_REPLY].bytes;
-                       break;
-               case IPT_CONNBYTES_DIR_BOTH:
-                       what = counters[IP_CT_DIR_ORIGINAL].bytes;
-                       what += counters[IP_CT_DIR_REPLY].bytes;
-                       break;
-               }
-               break;
-       case IPT_CONNBYTES_AVGPKT:
-               switch (sinfo->direction) {
-               case IPT_CONNBYTES_DIR_ORIGINAL:
-                       what = div64_64(counters[IP_CT_DIR_ORIGINAL].bytes,
-                                       counters[IP_CT_DIR_ORIGINAL].packets);
-                       break;
-               case IPT_CONNBYTES_DIR_REPLY:
-                       what = div64_64(counters[IP_CT_DIR_REPLY].bytes,
-                                       counters[IP_CT_DIR_REPLY].packets);
-                       break;
-               case IPT_CONNBYTES_DIR_BOTH:
-                       {
-                               u_int64_t bytes;
-                               u_int64_t pkts;
-                               bytes = counters[IP_CT_DIR_ORIGINAL].bytes +
-                                       counters[IP_CT_DIR_REPLY].bytes;
-                               pkts = counters[IP_CT_DIR_ORIGINAL].packets+
-                                       counters[IP_CT_DIR_REPLY].packets;
-
-                               /* FIXME_THEORETICAL: what to do if sum
-                                * overflows ? */
-
-                               what = div64_64(bytes, pkts);
-                       }
-                       break;
-               }
-               break;
-       }
-
-       if (sinfo->count.to)
-               return (what <= sinfo->count.to && what >= sinfo->count.from);
-       else
-               return (what >= sinfo->count.from);
-}
-
-static int check(const char *tablename,
-                const struct ipt_ip *ip,
-                void *matchinfo,
-                unsigned int matchsize,
-                unsigned int hook_mask)
-{
-       const struct ipt_connbytes_info *sinfo = matchinfo;
-
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_connbytes_info)))
-               return 0;
-
-       if (sinfo->what != IPT_CONNBYTES_PKTS &&
-           sinfo->what != IPT_CONNBYTES_BYTES &&
-           sinfo->what != IPT_CONNBYTES_AVGPKT)
-               return 0;
-
-       if (sinfo->direction != IPT_CONNBYTES_DIR_ORIGINAL &&
-           sinfo->direction != IPT_CONNBYTES_DIR_REPLY &&
-           sinfo->direction != IPT_CONNBYTES_DIR_BOTH)
-               return 0;
-
-       return 1;
-}
-
-static struct ipt_match state_match = {
-       .name           = "connbytes",
-       .match          = &match,
-       .checkentry     = &check,
-       .me             = THIS_MODULE
-};
-
-static int __init init(void)
-{
-       return ipt_register_match(&state_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&state_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_connmark.c b/net/ipv4/netfilter/ipt_connmark.c
deleted file mode 100644 (file)
index 5306ef2..0000000
+++ /dev/null
@@ -1,88 +0,0 @@
-/* This kernel module matches connection mark values set by the
- * CONNMARK target
- *
- * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
- * by Henrik Nordstrom <hno@marasystems.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-MODULE_AUTHOR("Henrik Nordstrom <hno@marasytems.com>");
-MODULE_DESCRIPTION("IP tables connmark match module");
-MODULE_LICENSE("GPL");
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_connmark.h>
-#include <net/netfilter/nf_conntrack_compat.h>
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_connmark_info *info = matchinfo;
-       u_int32_t ctinfo;
-       const u_int32_t *ctmark = nf_ct_get_mark(skb, &ctinfo);
-       if (!ctmark)
-               return 0;
-
-       return (((*ctmark) & info->mask) == info->mark) ^ info->invert;
-}
-
-static int
-checkentry(const char *tablename,
-          const struct ipt_ip *ip,
-          void *matchinfo,
-          unsigned int matchsize,
-          unsigned int hook_mask)
-{
-       struct ipt_connmark_info *cm = 
-                               (struct ipt_connmark_info *)matchinfo;
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_connmark_info)))
-               return 0;
-
-       if (cm->mark > 0xffffffff || cm->mask > 0xffffffff) {
-               printk(KERN_WARNING "connmark: only support 32bit mark\n");
-               return 0;
-       }
-
-       return 1;
-}
-
-static struct ipt_match connmark_match = {
-       .name = "connmark",
-       .match = &match,
-       .checkentry = &checkentry,
-       .me = THIS_MODULE
-};
-
-static int __init init(void)
-{
-       return ipt_register_match(&connmark_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&connmark_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_conntrack.c b/net/ipv4/netfilter/ipt_conntrack.c
deleted file mode 100644 (file)
index c8d1870..0000000
+++ /dev/null
@@ -1,232 +0,0 @@
-/* Kernel module to match connection tracking information.
- * Superset of Rusty's minimalistic state match.
- *
- * (C) 2001  Marc Boucher (marc@mbsi.ca).
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_tuple.h>
-#else
-#include <net/netfilter/nf_conntrack.h>
-#endif
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_conntrack.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
-MODULE_DESCRIPTION("iptables connection tracking match module");
-
-#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_conntrack_info *sinfo = matchinfo;
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-       unsigned int statebit;
-
-       ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
-
-#define FWINV(bool,invflg) ((bool) ^ !!(sinfo->invflags & invflg))
-
-       if (ct == &ip_conntrack_untracked)
-               statebit = IPT_CONNTRACK_STATE_UNTRACKED;
-       else if (ct)
-               statebit = IPT_CONNTRACK_STATE_BIT(ctinfo);
-       else
-               statebit = IPT_CONNTRACK_STATE_INVALID;
-       if(sinfo->flags & IPT_CONNTRACK_STATE) {
-               if (ct) {
-                       if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip !=
-                           ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip)
-                               statebit |= IPT_CONNTRACK_STATE_SNAT;
-
-                       if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip !=
-                           ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip)
-                               statebit |= IPT_CONNTRACK_STATE_DNAT;
-               }
-
-               if (FWINV((statebit & sinfo->statemask) == 0, IPT_CONNTRACK_STATE))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_PROTO) {
-               if (!ct || FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, IPT_CONNTRACK_PROTO))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_ORIGSRC) {
-               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip&sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, IPT_CONNTRACK_ORIGSRC))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_ORIGDST) {
-               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip&sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, IPT_CONNTRACK_ORIGDST))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_REPLSRC) {
-               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip&sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].src.ip, IPT_CONNTRACK_REPLSRC))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_REPLDST) {
-               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip&sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, IPT_CONNTRACK_REPLDST))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_STATUS) {
-               if (!ct || FWINV((ct->status & sinfo->statusmask) == 0, IPT_CONNTRACK_STATUS))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_EXPIRES) {
-               unsigned long expires;
-
-               if(!ct)
-                       return 0;
-
-               expires = timer_pending(&ct->timeout) ? (ct->timeout.expires - jiffies)/HZ : 0;
-
-               if (FWINV(!(expires >= sinfo->expires_min && expires <= sinfo->expires_max), IPT_CONNTRACK_EXPIRES))
-                       return 0;
-       }
-
-       return 1;
-}
-
-#else /* CONFIG_IP_NF_CONNTRACK */
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_conntrack_info *sinfo = matchinfo;
-       struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-       unsigned int statebit;
-
-       ct = nf_ct_get((struct sk_buff *)skb, &ctinfo);
-
-#define FWINV(bool,invflg) ((bool) ^ !!(sinfo->invflags & invflg))
-
-       if (ct == &nf_conntrack_untracked)
-               statebit = IPT_CONNTRACK_STATE_UNTRACKED;
-       else if (ct)
-               statebit = IPT_CONNTRACK_STATE_BIT(ctinfo);
-       else
-               statebit = IPT_CONNTRACK_STATE_INVALID;
-       if(sinfo->flags & IPT_CONNTRACK_STATE) {
-               if (ct) {
-                       if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip !=
-                           ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip)
-                               statebit |= IPT_CONNTRACK_STATE_SNAT;
-
-                       if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip !=
-                           ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip)
-                               statebit |= IPT_CONNTRACK_STATE_DNAT;
-               }
-
-               if (FWINV((statebit & sinfo->statemask) == 0, IPT_CONNTRACK_STATE))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_PROTO) {
-               if (!ct || FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, IPT_CONNTRACK_PROTO))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_ORIGSRC) {
-               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip&sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, IPT_CONNTRACK_ORIGSRC))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_ORIGDST) {
-               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip&sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, IPT_CONNTRACK_ORIGDST))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_REPLSRC) {
-               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip&sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].src.ip, IPT_CONNTRACK_REPLSRC))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_REPLDST) {
-               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip&sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, IPT_CONNTRACK_REPLDST))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_STATUS) {
-               if (!ct || FWINV((ct->status & sinfo->statusmask) == 0, IPT_CONNTRACK_STATUS))
-                       return 0;
-       }
-
-       if(sinfo->flags & IPT_CONNTRACK_EXPIRES) {
-               unsigned long expires;
-
-               if(!ct)
-                       return 0;
-
-               expires = timer_pending(&ct->timeout) ? (ct->timeout.expires - jiffies)/HZ : 0;
-
-               if (FWINV(!(expires >= sinfo->expires_min && expires <= sinfo->expires_max), IPT_CONNTRACK_EXPIRES))
-                       return 0;
-       }
-
-       return 1;
-}
-
-#endif /* CONFIG_NF_IP_CONNTRACK */
-
-static int check(const char *tablename,
-                const struct ipt_ip *ip,
-                void *matchinfo,
-                unsigned int matchsize,
-                unsigned int hook_mask)
-{
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_conntrack_info)))
-               return 0;
-
-       return 1;
-}
-
-static struct ipt_match conntrack_match = {
-       .name           = "conntrack",
-       .match          = &match,
-       .checkentry     = &check,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       need_ip_conntrack();
-       return ipt_register_match(&conntrack_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&conntrack_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_dccp.c b/net/ipv4/netfilter/ipt_dccp.c
deleted file mode 100644 (file)
index ad3278b..0000000
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * iptables module for DCCP protocol header matching
- *
- * (C) 2005 by Harald Welte <laforge@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <net/ip.h>
-#include <linux/dccp.h>
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_dccp.h>
-
-#define DCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
-                                 || (!!((invflag) & (option)) ^ (cond)))
-
-static unsigned char *dccp_optbuf;
-static DEFINE_SPINLOCK(dccp_buflock);
-
-static inline int
-dccp_find_option(u_int8_t option,
-                const struct sk_buff *skb,
-                const struct dccp_hdr *dh,
-                int *hotdrop)
-{
-       /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
-       unsigned char *op;
-       unsigned int optoff = __dccp_hdr_len(dh);
-       unsigned int optlen = dh->dccph_doff*4 - __dccp_hdr_len(dh);
-       unsigned int i;
-
-       if (dh->dccph_doff * 4 < __dccp_hdr_len(dh)) {
-               *hotdrop = 1;
-               return 0;
-       }
-
-       if (!optlen)
-               return 0;
-
-       spin_lock_bh(&dccp_buflock);
-       op = skb_header_pointer(skb,
-                               skb->nh.iph->ihl*4 + optoff,
-                               optlen, dccp_optbuf);
-       if (op == NULL) {
-               /* If we don't have the whole header, drop packet. */
-               spin_unlock_bh(&dccp_buflock);
-               *hotdrop = 1;
-               return 0;
-       }
-
-       for (i = 0; i < optlen; ) {
-               if (op[i] == option) {
-                       spin_unlock_bh(&dccp_buflock);
-                       return 1;
-               }
-
-               if (op[i] < 2) 
-                       i++;
-               else 
-                       i += op[i+1]?:1;
-       }
-
-       spin_unlock_bh(&dccp_buflock);
-       return 0;
-}
-
-
-static inline int
-match_types(const struct dccp_hdr *dh, u_int16_t typemask)
-{
-       return (typemask & (1 << dh->dccph_type));
-}
-
-static inline int
-match_option(u_int8_t option, const struct sk_buff *skb,
-            const struct dccp_hdr *dh, int *hotdrop)
-{
-       return dccp_find_option(option, skb, dh, hotdrop);
-}
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_dccp_info *info = 
-                               (const struct ipt_dccp_info *)matchinfo;
-       struct dccp_hdr _dh, *dh;
-
-       if (offset)
-               return 0;
-       
-       dh = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_dh), &_dh);
-       if (dh == NULL) {
-               *hotdrop = 1;
-               return 0;
-               }
-
-       return  DCCHECK(((ntohs(dh->dccph_sport) >= info->spts[0]) 
-                       && (ntohs(dh->dccph_sport) <= info->spts[1])), 
-                       IPT_DCCP_SRC_PORTS, info->flags, info->invflags)
-               && DCCHECK(((ntohs(dh->dccph_dport) >= info->dpts[0]) 
-                       && (ntohs(dh->dccph_dport) <= info->dpts[1])), 
-                       IPT_DCCP_DEST_PORTS, info->flags, info->invflags)
-               && DCCHECK(match_types(dh, info->typemask),
-                          IPT_DCCP_TYPE, info->flags, info->invflags)
-               && DCCHECK(match_option(info->option, skb, dh, hotdrop),
-                          IPT_DCCP_OPTION, info->flags, info->invflags);
-}
-
-static int
-checkentry(const char *tablename,
-          const struct ipt_ip *ip,
-          void *matchinfo,
-          unsigned int matchsize,
-          unsigned int hook_mask)
-{
-       const struct ipt_dccp_info *info;
-
-       info = (const struct ipt_dccp_info *)matchinfo;
-
-       return ip->proto == IPPROTO_DCCP
-               && !(ip->invflags & IPT_INV_PROTO)
-               && matchsize == IPT_ALIGN(sizeof(struct ipt_dccp_info))
-               && !(info->flags & ~IPT_DCCP_VALID_FLAGS)
-               && !(info->invflags & ~IPT_DCCP_VALID_FLAGS)
-               && !(info->invflags & ~info->flags);
-}
-
-static struct ipt_match dccp_match = 
-{ 
-       .name           = "dccp",
-       .match          = &match,
-       .checkentry     = &checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       int ret;
-
-       /* doff is 8 bits, so the maximum option size is (4*256).  Don't put
-        * this in BSS since DaveM is worried about locked TLB's for kernel
-        * BSS. */
-       dccp_optbuf = kmalloc(256 * 4, GFP_KERNEL);
-       if (!dccp_optbuf)
-               return -ENOMEM;
-       ret = ipt_register_match(&dccp_match);
-       if (ret)
-               kfree(dccp_optbuf);
-
-       return ret;
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&dccp_match);
-       kfree(dccp_optbuf);
-}
-
-module_init(init);
-module_exit(fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("Match for DCCP protocol packets");
-
index 5df52a64a5d4016b43c6952d2a30177a69235e63..92063b4f8602c8de8c34b2dcf33426fcf7ce7353 100644 (file)
@@ -21,7 +21,7 @@ MODULE_LICENSE("GPL");
 
 static int match(const struct sk_buff *skb, const struct net_device *in,
                 const struct net_device *out, const void *matchinfo,
-                int offset, int *hotdrop)
+                int offset, unsigned int protoff, int *hotdrop)
 {
        const struct ipt_dscp_info *info = matchinfo;
        const struct iphdr *iph = skb->nh.iph;
@@ -31,7 +31,7 @@ static int match(const struct sk_buff *skb, const struct net_device *in,
        return ((iph->tos&IPT_DSCP_MASK) == sh_dscp) ^ info->invert;
 }
 
-static int checkentry(const char *tablename, const struct ipt_ip *ip,
+static int checkentry(const char *tablename, const void *ip,
                      void *matchinfo, unsigned int matchsize,
                      unsigned int hook_mask)
 {
index b6f7181e89ccf0e48806a15aacc40fb711f06e81..e68b0c7981f0c7d4594d3e42854914873a43562a 100644 (file)
@@ -67,7 +67,7 @@ static inline int match_tcp(const struct sk_buff *skb,
 
 static int match(const struct sk_buff *skb, const struct net_device *in,
                 const struct net_device *out, const void *matchinfo,
-                int offset, int *hotdrop)
+                int offset, unsigned int protoff, int *hotdrop)
 {
        const struct ipt_ecn_info *info = matchinfo;
 
@@ -85,11 +85,12 @@ static int match(const struct sk_buff *skb, const struct net_device *in,
        return 1;
 }
 
-static int checkentry(const char *tablename, const struct ipt_ip *ip,
+static int checkentry(const char *tablename, const void *ip_void,
                      void *matchinfo, unsigned int matchsize,
                      unsigned int hook_mask)
 {
        const struct ipt_ecn_info *info = matchinfo;
+       const struct ipt_ip *ip = ip_void;
 
        if (matchsize != IPT_ALIGN(sizeof(struct ipt_ecn_info)))
                return 0;
index e1d0dd31e11740a9ed4578bfdfc11471dadbb860..9de191a8162da78e3177a5e0b356d128a39363b2 100644 (file)
@@ -42,6 +42,7 @@ match(const struct sk_buff *skb,
       const struct net_device *out,
       const void *matchinfo,
       int offset,
+      unsigned int protoff,
       int *hotdrop)
 {
        struct ip_esp_hdr _esp, *eh;
@@ -51,7 +52,7 @@ match(const struct sk_buff *skb,
        if (offset)
                return 0;
 
-       eh = skb_header_pointer(skb, skb->nh.iph->ihl * 4,
+       eh = skb_header_pointer(skb, protoff,
                                sizeof(_esp), &_esp);
        if (eh == NULL) {
                /* We've been asked to examine this packet, and we
@@ -70,12 +71,13 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const struct ipt_ip *ip,
+          const void *ip_void,
           void *matchinfo,
           unsigned int matchinfosize,
           unsigned int hook_mask)
 {
        const struct ipt_esp *espinfo = matchinfo;
+       const struct ipt_ip *ip = ip_void;
 
        /* Must specify proto == ESP, and no unknown invflags */
        if (ip->proto != IPPROTO_ESP || (ip->invflags & IPT_INV_PROTO)) {
index 2dd1cccbdab9e938f20920c9355ab1a834bdc0ff..4fe48c1bd5f3f2b08ac6e600eb43f13d6e5dc4fb 100644 (file)
@@ -429,6 +429,7 @@ hashlimit_match(const struct sk_buff *skb,
                const struct net_device *out,
                const void *matchinfo,
                int offset,
+               unsigned int protoff,
                int *hotdrop)
 {
        struct ipt_hashlimit_info *r = 
@@ -504,7 +505,7 @@ hashlimit_match(const struct sk_buff *skb,
 
 static int
 hashlimit_checkentry(const char *tablename,
-                    const struct ipt_ip *ip,
+                    const void *inf,
                     void *matchinfo,
                     unsigned int matchsize,
                     unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_helper.c b/net/ipv4/netfilter/ipt_helper.c
deleted file mode 100644 (file)
index aef649e..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-/* iptables module to match on related connections */
-/*
- * (C) 2001 Martin Josefsson <gandalf@wlug.westbo.se>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *   19 Mar 2002 Harald Welte <laforge@gnumonks.org>:
- *              - Port to newnat infrastructure
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/netfilter.h>
-#include <linux/interrupt.h>
-#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
-#include <linux/netfilter_ipv4/ip_conntrack.h>
-#include <linux/netfilter_ipv4/ip_conntrack_core.h>
-#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
-#else
-#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_conntrack_helper.h>
-#endif
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_helper.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Martin Josefsson <gandalf@netfilter.org>");
-MODULE_DESCRIPTION("iptables helper match module");
-
-#if 0
-#define DEBUGP printk
-#else
-#define DEBUGP(format, args...)
-#endif
-
-#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_helper_info *info = matchinfo;
-       struct ip_conntrack *ct;
-       enum ip_conntrack_info ctinfo;
-       int ret = info->invert;
-       
-       ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
-       if (!ct) {
-               DEBUGP("ipt_helper: Eek! invalid conntrack?\n");
-               return ret;
-       }
-
-       if (!ct->master) {
-               DEBUGP("ipt_helper: conntrack %p has no master\n", ct);
-               return ret;
-       }
-
-       read_lock_bh(&ip_conntrack_lock);
-       if (!ct->master->helper) {
-               DEBUGP("ipt_helper: master ct %p has no helper\n", 
-                       exp->expectant);
-               goto out_unlock;
-       }
-
-       DEBUGP("master's name = %s , info->name = %s\n", 
-               ct->master->helper->name, info->name);
-
-       if (info->name[0] == '\0')
-               ret ^= 1;
-       else
-               ret ^= !strncmp(ct->master->helper->name, info->name, 
-                               strlen(ct->master->helper->name));
-out_unlock:
-       read_unlock_bh(&ip_conntrack_lock);
-       return ret;
-}
-
-#else /* CONFIG_IP_NF_CONNTRACK */
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_helper_info *info = matchinfo;
-       struct nf_conn *ct;
-       enum ip_conntrack_info ctinfo;
-       int ret = info->invert;
-       
-       ct = nf_ct_get((struct sk_buff *)skb, &ctinfo);
-       if (!ct) {
-               DEBUGP("ipt_helper: Eek! invalid conntrack?\n");
-               return ret;
-       }
-
-       if (!ct->master) {
-               DEBUGP("ipt_helper: conntrack %p has no master\n", ct);
-               return ret;
-       }
-
-       read_lock_bh(&nf_conntrack_lock);
-       if (!ct->master->helper) {
-               DEBUGP("ipt_helper: master ct %p has no helper\n", 
-                       exp->expectant);
-               goto out_unlock;
-       }
-
-       DEBUGP("master's name = %s , info->name = %s\n", 
-               ct->master->helper->name, info->name);
-
-       if (info->name[0] == '\0')
-               ret ^= 1;
-       else
-               ret ^= !strncmp(ct->master->helper->name, info->name, 
-                               strlen(ct->master->helper->name));
-out_unlock:
-       read_unlock_bh(&nf_conntrack_lock);
-       return ret;
-}
-#endif
-
-static int check(const char *tablename,
-                const struct ipt_ip *ip,
-                void *matchinfo,
-                unsigned int matchsize,
-                unsigned int hook_mask)
-{
-       struct ipt_helper_info *info = matchinfo;
-
-       info->name[29] = '\0';
-
-       /* verify size */
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_helper_info)))
-               return 0;
-
-       return 1;
-}
-
-static struct ipt_match helper_match = {
-       .name           = "helper",
-       .match          = &match,
-       .checkentry     = &check,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       need_ip_conntrack();
-       return ipt_register_match(&helper_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&helper_match);
-}
-
-module_init(init);
-module_exit(fini);
-
index b835b7b2e56044b2991875418205a95d44a39a73..13fb16fb78923d2bcdbde8bca839e396a8682256 100644 (file)
@@ -28,7 +28,7 @@ match(const struct sk_buff *skb,
       const struct net_device *in,
       const struct net_device *out,
       const void *matchinfo,
-      int offset, int *hotdrop)
+      int offset, unsigned int protoff, int *hotdrop)
 {
        const struct ipt_iprange_info *info = matchinfo;
        const struct iphdr *iph = skb->nh.iph;
@@ -63,7 +63,7 @@ match(const struct sk_buff *skb,
 }
 
 static int check(const char *tablename,
-                const struct ipt_ip *ip,
+                const void *inf,
                 void *matchinfo,
                 unsigned int matchsize,
                 unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_length.c b/net/ipv4/netfilter/ipt_length.c
deleted file mode 100644 (file)
index 4eabcfb..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Kernel module to match packet length. */
-/* (C) 1999-2001 James Morris <jmorros@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter_ipv4/ipt_length.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-
-MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
-MODULE_DESCRIPTION("IP tables packet length matching module");
-MODULE_LICENSE("GPL");
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_length_info *info = matchinfo;
-       u_int16_t pktlen = ntohs(skb->nh.iph->tot_len);
-       
-       return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
-}
-
-static int
-checkentry(const char *tablename,
-           const struct ipt_ip *ip,
-           void *matchinfo,
-           unsigned int matchsize,
-           unsigned int hook_mask)
-{
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_length_info)))
-               return 0;
-
-       return 1;
-}
-
-static struct ipt_match length_match = {
-       .name           = "length",
-       .match          = &match,
-       .checkentry     = &checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ipt_register_match(&length_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&length_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_limit.c b/net/ipv4/netfilter/ipt_limit.c
deleted file mode 100644 (file)
index 0c24dcc..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-/* Kernel module to control the rate
- *
- * 2 September 1999: Changed from the target RATE to the match
- *                   `limit', removed logging.  Did I mention that
- *                   Alexey is a fucking genius?
- *                   Rusty Russell (rusty@rustcorp.com.au).  */
-
-/* (C) 1999 Jérôme de Vivie <devivie@info.enserb.u-bordeaux.fr>
- * (C) 1999 Hervé Eychenne <eychenne@info.enserb.u-bordeaux.fr>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_limit.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Herve Eychenne <rv@wallfire.org>");
-MODULE_DESCRIPTION("iptables rate limit match");
-
-/* The algorithm used is the Simple Token Bucket Filter (TBF)
- * see net/sched/sch_tbf.c in the linux source tree
- */
-
-static DEFINE_SPINLOCK(limit_lock);
-
-/* Rusty: This is my (non-mathematically-inclined) understanding of
-   this algorithm.  The `average rate' in jiffies becomes your initial
-   amount of credit `credit' and the most credit you can ever have
-   `credit_cap'.  The `peak rate' becomes the cost of passing the
-   test, `cost'.
-
-   `prev' tracks the last packet hit: you gain one credit per jiffy.
-   If you get credit balance more than this, the extra credit is
-   discarded.  Every time the match passes, you lose `cost' credits;
-   if you don't have that many, the test fails.
-
-   See Alexey's formal explanation in net/sched/sch_tbf.c.
-
-   To get the maxmum range, we multiply by this factor (ie. you get N
-   credits per jiffy).  We want to allow a rate as low as 1 per day
-   (slowest userspace tool allows), which means
-   CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32. ie. */
-#define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
-
-/* Repeated shift and or gives us all 1s, final shift and add 1 gives
- * us the power of 2 below the theoretical max, so GCC simply does a
- * shift. */
-#define _POW2_BELOW2(x) ((x)|((x)>>1))
-#define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
-#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
-#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
-#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
-#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
-
-#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
-
-static int
-ipt_limit_match(const struct sk_buff *skb,
-               const struct net_device *in,
-               const struct net_device *out,
-               const void *matchinfo,
-               int offset,
-               int *hotdrop)
-{
-       struct ipt_rateinfo *r = ((struct ipt_rateinfo *)matchinfo)->master;
-       unsigned long now = jiffies;
-
-       spin_lock_bh(&limit_lock);
-       r->credit += (now - xchg(&r->prev, now)) * CREDITS_PER_JIFFY;
-       if (r->credit > r->credit_cap)
-               r->credit = r->credit_cap;
-
-       if (r->credit >= r->cost) {
-               /* We're not limited. */
-               r->credit -= r->cost;
-               spin_unlock_bh(&limit_lock);
-               return 1;
-       }
-
-               spin_unlock_bh(&limit_lock);
-       return 0;
-}
-
-/* Precision saver. */
-static u_int32_t
-user2credits(u_int32_t user)
-{
-       /* If multiplying would overflow... */
-       if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
-               /* Divide first. */
-               return (user / IPT_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
-
-       return (user * HZ * CREDITS_PER_JIFFY) / IPT_LIMIT_SCALE;
-}
-
-static int
-ipt_limit_checkentry(const char *tablename,
-                    const struct ipt_ip *ip,
-                    void *matchinfo,
-                    unsigned int matchsize,
-                    unsigned int hook_mask)
-{
-       struct ipt_rateinfo *r = matchinfo;
-
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_rateinfo)))
-               return 0;
-
-       /* Check for overflow. */
-       if (r->burst == 0
-           || user2credits(r->avg * r->burst) < user2credits(r->avg)) {
-               printk("Overflow in ipt_limit, try lower: %u/%u\n",
-                      r->avg, r->burst);
-               return 0;
-       }
-
-       /* User avg in seconds * IPT_LIMIT_SCALE: convert to jiffies *
-          128. */
-       r->prev = jiffies;
-       r->credit = user2credits(r->avg * r->burst);     /* Credits full. */
-       r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
-       r->cost = user2credits(r->avg);
-
-       /* For SMP, we only want to use one set of counters. */
-       r->master = r;
-
-       return 1;
-}
-
-static struct ipt_match ipt_limit_reg = {
-       .name           = "limit",
-       .match          = ipt_limit_match,
-       .checkentry     = ipt_limit_checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       if (ipt_register_match(&ipt_limit_reg))
-               return -EINVAL;
-       return 0;
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&ipt_limit_reg);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_mac.c b/net/ipv4/netfilter/ipt_mac.c
deleted file mode 100644 (file)
index 1b9bb45..0000000
+++ /dev/null
@@ -1,80 +0,0 @@
-/* Kernel module to match MAC address parameters. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/if_ether.h>
-#include <linux/etherdevice.h>
-
-#include <linux/netfilter_ipv4/ipt_mac.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
-MODULE_DESCRIPTION("iptables mac matching module");
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-    const struct ipt_mac_info *info = matchinfo;
-
-    /* Is mac pointer valid? */
-    return (skb->mac.raw >= skb->head
-           && (skb->mac.raw + ETH_HLEN) <= skb->data
-           /* If so, compare... */
-           && ((!compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr))
-               ^ info->invert));
-}
-
-static int
-ipt_mac_checkentry(const char *tablename,
-                  const struct ipt_ip *ip,
-                  void *matchinfo,
-                  unsigned int matchsize,
-                  unsigned int hook_mask)
-{
-       /* FORWARD isn't always valid, but it's nice to be able to do --RR */
-       if (hook_mask
-           & ~((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_IN)
-               | (1 << NF_IP_FORWARD))) {
-               printk("ipt_mac: only valid for PRE_ROUTING, LOCAL_IN or FORWARD.\n");
-               return 0;
-       }
-
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_mac_info)))
-               return 0;
-
-       return 1;
-}
-
-static struct ipt_match mac_match = {
-       .name           = "mac",
-       .match          = &match,
-       .checkentry     = &ipt_mac_checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ipt_register_match(&mac_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&mac_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_mark.c b/net/ipv4/netfilter/ipt_mark.c
deleted file mode 100644 (file)
index 00bef6c..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-/* Kernel module to match NFMARK values. */
-
-/* (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter_ipv4/ipt_mark.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
-MODULE_DESCRIPTION("iptables mark matching module");
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_mark_info *info = matchinfo;
-
-       return ((skb->nfmark & info->mask) == info->mark) ^ info->invert;
-}
-
-static int
-checkentry(const char *tablename,
-           const struct ipt_ip *ip,
-           void *matchinfo,
-           unsigned int matchsize,
-           unsigned int hook_mask)
-{
-       struct ipt_mark_info *minfo = (struct ipt_mark_info *) matchinfo;
-
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_mark_info)))
-               return 0;
-
-       if (minfo->mark > 0xffffffff || minfo->mask > 0xffffffff) {
-               printk(KERN_WARNING "mark: only supports 32bit mark\n");
-               return 0;
-       }
-
-       return 1;
-}
-
-static struct ipt_match mark_match = {
-       .name           = "mark",
-       .match          = &match,
-       .checkentry     = &checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ipt_register_match(&mark_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&mark_match);
-}
-
-module_init(init);
-module_exit(fini);
index 99e8188162e25b3aaf00ef3e01e9aadc6d4a2cee..2d52326553f1fa1b36cea95ada0685bdd8115e51 100644 (file)
@@ -97,6 +97,7 @@ match(const struct sk_buff *skb,
       const struct net_device *out,
       const void *matchinfo,
       int offset,
+      unsigned int protoff,
       int *hotdrop)
 {
        u16 _ports[2], *pptr;
@@ -105,7 +106,7 @@ match(const struct sk_buff *skb,
        if (offset)
                return 0;
 
-       pptr = skb_header_pointer(skb, skb->nh.iph->ihl * 4,
+       pptr = skb_header_pointer(skb, protoff,
                                  sizeof(_ports), _ports);
        if (pptr == NULL) {
                /* We've been asked to examine this packet, and we
@@ -128,6 +129,7 @@ match_v1(const struct sk_buff *skb,
         const struct net_device *out,
         const void *matchinfo,
         int offset,
+        unsigned int protoff,
         int *hotdrop)
 {
        u16 _ports[2], *pptr;
@@ -136,7 +138,7 @@ match_v1(const struct sk_buff *skb,
        if (offset)
                return 0;
 
-       pptr = skb_header_pointer(skb, skb->nh.iph->ihl * 4,
+       pptr = skb_header_pointer(skb, protoff,
                                  sizeof(_ports), _ports);
        if (pptr == NULL) {
                /* We've been asked to examine this packet, and we
@@ -154,7 +156,7 @@ match_v1(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const struct ipt_ip *ip,
+          const void *ip,
           void *matchinfo,
           unsigned int matchsize,
           unsigned int hook_mask)
@@ -164,7 +166,7 @@ checkentry(const char *tablename,
 
 static int
 checkentry_v1(const char *tablename,
-             const struct ipt_ip *ip,
+             const void *ip,
              void *matchinfo,
              unsigned int matchsize,
              unsigned int hook_mask)
index 0cee2862ed85f46d3fbbbe96c908c16aeb92489d..4843d0c9734f28c26a3d0bf35f4acb86f4204442 100644 (file)
@@ -27,6 +27,7 @@ match(const struct sk_buff *skb,
       const struct net_device *out,
       const void *matchinfo,
       int offset,
+      unsigned int protoff,
       int *hotdrop)
 {
        const struct ipt_owner_info *info = matchinfo;
@@ -51,7 +52,7 @@ match(const struct sk_buff *skb,
 
 static int
 checkentry(const char *tablename,
-           const struct ipt_ip *ip,
+           const void *ip,
            void *matchinfo,
            unsigned int matchsize,
            unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_physdev.c b/net/ipv4/netfilter/ipt_physdev.c
deleted file mode 100644 (file)
index 03f5548..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-/* Kernel module to match the bridge port in and
- * out device for IP packets coming into contact with a bridge. */
-
-/* (C) 2001-2003 Bart De Schuymer <bdschuym@pandora.be>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ipt_physdev.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_bridge.h>
-#define MATCH   1
-#define NOMATCH 0
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
-MODULE_DESCRIPTION("iptables bridge physical device match module");
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       int i;
-       static const char nulldevname[IFNAMSIZ];
-       const struct ipt_physdev_info *info = matchinfo;
-       unsigned int ret;
-       const char *indev, *outdev;
-       struct nf_bridge_info *nf_bridge;
-
-       /* Not a bridged IP packet or no info available yet:
-        * LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if
-        * the destination device will be a bridge. */
-       if (!(nf_bridge = skb->nf_bridge)) {
-               /* Return MATCH if the invert flags of the used options are on */
-               if ((info->bitmask & IPT_PHYSDEV_OP_BRIDGED) &&
-                   !(info->invert & IPT_PHYSDEV_OP_BRIDGED))
-                       return NOMATCH;
-               if ((info->bitmask & IPT_PHYSDEV_OP_ISIN) &&
-                   !(info->invert & IPT_PHYSDEV_OP_ISIN))
-                       return NOMATCH;
-               if ((info->bitmask & IPT_PHYSDEV_OP_ISOUT) &&
-                   !(info->invert & IPT_PHYSDEV_OP_ISOUT))
-                       return NOMATCH;
-               if ((info->bitmask & IPT_PHYSDEV_OP_IN) &&
-                   !(info->invert & IPT_PHYSDEV_OP_IN))
-                       return NOMATCH;
-               if ((info->bitmask & IPT_PHYSDEV_OP_OUT) &&
-                   !(info->invert & IPT_PHYSDEV_OP_OUT))
-                       return NOMATCH;
-               return MATCH;
-       }
-
-       /* This only makes sense in the FORWARD and POSTROUTING chains */
-       if ((info->bitmask & IPT_PHYSDEV_OP_BRIDGED) &&
-           (!!(nf_bridge->mask & BRNF_BRIDGED) ^
-           !(info->invert & IPT_PHYSDEV_OP_BRIDGED)))
-               return NOMATCH;
-
-       if ((info->bitmask & IPT_PHYSDEV_OP_ISIN &&
-           (!nf_bridge->physindev ^ !!(info->invert & IPT_PHYSDEV_OP_ISIN))) ||
-           (info->bitmask & IPT_PHYSDEV_OP_ISOUT &&
-           (!nf_bridge->physoutdev ^ !!(info->invert & IPT_PHYSDEV_OP_ISOUT))))
-               return NOMATCH;
-
-       if (!(info->bitmask & IPT_PHYSDEV_OP_IN))
-               goto match_outdev;
-       indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
-       for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned int); i++) {
-               ret |= (((const unsigned int *)indev)[i]
-                       ^ ((const unsigned int *)info->physindev)[i])
-                       & ((const unsigned int *)info->in_mask)[i];
-       }
-
-       if ((ret == 0) ^ !(info->invert & IPT_PHYSDEV_OP_IN))
-               return NOMATCH;
-
-match_outdev:
-       if (!(info->bitmask & IPT_PHYSDEV_OP_OUT))
-               return MATCH;
-       outdev = nf_bridge->physoutdev ?
-                nf_bridge->physoutdev->name : nulldevname;
-       for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned int); i++) {
-               ret |= (((const unsigned int *)outdev)[i]
-                       ^ ((const unsigned int *)info->physoutdev)[i])
-                       & ((const unsigned int *)info->out_mask)[i];
-       }
-
-       return (ret != 0) ^ !(info->invert & IPT_PHYSDEV_OP_OUT);
-}
-
-static int
-checkentry(const char *tablename,
-                      const struct ipt_ip *ip,
-                      void *matchinfo,
-                      unsigned int matchsize,
-                      unsigned int hook_mask)
-{
-       const struct ipt_physdev_info *info = matchinfo;
-
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_physdev_info)))
-               return 0;
-       if (!(info->bitmask & IPT_PHYSDEV_OP_MASK) ||
-           info->bitmask & ~IPT_PHYSDEV_OP_MASK)
-               return 0;
-       return 1;
-}
-
-static struct ipt_match physdev_match = {
-       .name           = "physdev",
-       .match          = &match,
-       .checkentry     = &checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ipt_register_match(&physdev_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&physdev_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_pkttype.c b/net/ipv4/netfilter/ipt_pkttype.c
deleted file mode 100644 (file)
index 8ddb1dc..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/* (C) 1999-2001 Michal Ludvig <michal@logix.cz>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/if_ether.h>
-#include <linux/if_packet.h>
-
-#include <linux/netfilter_ipv4/ipt_pkttype.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Michal Ludvig <michal@logix.cz>");
-MODULE_DESCRIPTION("IP tables match to match on linklayer packet type");
-
-static int match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-    const struct ipt_pkttype_info *info = matchinfo;
-
-    return (skb->pkt_type == info->pkttype) ^ info->invert;
-}
-
-static int checkentry(const char *tablename,
-                  const struct ipt_ip *ip,
-                  void *matchinfo,
-                  unsigned int matchsize,
-                  unsigned int hook_mask)
-{
-/*
-       if (hook_mask
-           & ~((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_IN)
-               | (1 << NF_IP_FORWARD))) {
-               printk("ipt_pkttype: only valid for PRE_ROUTING, LOCAL_IN or FORWARD.\n");
-               return 0;
-       }
-*/
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_pkttype_info)))
-               return 0;
-
-       return 1;
-}
-
-static struct ipt_match pkttype_match = {
-       .name           = "pkttype",
-       .match          = &match,
-       .checkentry     = &checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ipt_register_match(&pkttype_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&pkttype_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_realm.c b/net/ipv4/netfilter/ipt_realm.c
deleted file mode 100644 (file)
index 54a6897..0000000
+++ /dev/null
@@ -1,76 +0,0 @@
-/* IP tables module for matching the routing realm
- *
- * $Id: ipt_realm.c,v 1.3 2004/03/05 13:25:40 laforge Exp $
- *
- * (C) 2003 by Sampsa Ranta <sampsa@netsonic.fi>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/route.h>
-
-#include <linux/netfilter_ipv4/ipt_realm.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-
-MODULE_AUTHOR("Sampsa Ranta <sampsa@netsonic.fi>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("iptables realm match");
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_realm_info *info = matchinfo;
-       struct dst_entry *dst = skb->dst;
-    
-       return (info->id == (dst->tclassid & info->mask)) ^ info->invert;
-}
-
-static int check(const char *tablename,
-                 const struct ipt_ip *ip,
-                 void *matchinfo,
-                 unsigned int matchsize,
-                 unsigned int hook_mask)
-{
-       if (hook_mask
-           & ~((1 << NF_IP_POST_ROUTING) | (1 << NF_IP_FORWARD) |
-               (1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_LOCAL_IN))) {
-               printk("ipt_realm: only valid for POST_ROUTING, LOCAL_OUT, "
-                      "LOCAL_IN or FORWARD.\n");
-               return 0;
-       }
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_realm_info))) {
-               printk("ipt_realm: invalid matchsize.\n");
-               return 0;
-       }
-       return 1;
-}
-
-static struct ipt_match realm_match = {
-       .name           = "realm",
-       .match          = match, 
-       .checkentry     = check,
-       .me             = THIS_MODULE
-};
-
-static int __init init(void)
-{
-       return ipt_register_match(&realm_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&realm_match);
-}
-
-module_init(init);
-module_exit(fini);
index 5ddccb18c65e41f0326af34494dd2f76be6a02cd..44611d6d14f5c4d95e906773b2006d7ff6e40294 100644 (file)
@@ -104,6 +104,7 @@ match(const struct sk_buff *skb,
       const struct net_device *out,
       const void *matchinfo,
       int offset,
+      unsigned int protoff,
       int *hotdrop);
 
 /* Function to hash a given address into the hash table of table_size size */
@@ -317,7 +318,7 @@ static int ip_recent_ctrl(struct file *file, const char __user *input, unsigned
        skb->nh.iph->daddr = 0;
        /* Clear ttl since we have no way of knowing it */
        skb->nh.iph->ttl = 0;
-       match(skb,NULL,NULL,info,0,NULL);
+       match(skb,NULL,NULL,info,0,0,NULL);
 
        kfree(skb->nh.iph);
 out_free_skb:
@@ -357,6 +358,7 @@ match(const struct sk_buff *skb,
       const struct net_device *out,
       const void *matchinfo,
       int offset,
+      unsigned int protoff,
       int *hotdrop)
 {
        int pkt_count, hits_found, ans;
@@ -654,7 +656,7 @@ match(const struct sk_buff *skb,
  */
 static int
 checkentry(const char *tablename,
-           const struct ipt_ip *ip,
+           const void *ip,
            void *matchinfo,
            unsigned int matchsize,
            unsigned int hook_mask)
diff --git a/net/ipv4/netfilter/ipt_sctp.c b/net/ipv4/netfilter/ipt_sctp.c
deleted file mode 100644 (file)
index fe2b327..0000000
+++ /dev/null
@@ -1,203 +0,0 @@
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <net/ip.h>
-#include <linux/sctp.h>
-
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_sctp.h>
-
-#ifdef DEBUG_SCTP
-#define duprintf(format, args...) printk(format , ## args)
-#else
-#define duprintf(format, args...)
-#endif
-
-#define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
-                                             || (!!((invflag) & (option)) ^ (cond)))
-
-static int
-match_flags(const struct ipt_sctp_flag_info *flag_info,
-           const int flag_count,
-           u_int8_t chunktype,
-           u_int8_t chunkflags)
-{
-       int i;
-
-       for (i = 0; i < flag_count; i++) {
-               if (flag_info[i].chunktype == chunktype) {
-                       return (chunkflags & flag_info[i].flag_mask) == flag_info[i].flag;
-               }
-       }
-
-       return 1;
-}
-
-static int
-match_packet(const struct sk_buff *skb,
-            const u_int32_t *chunkmap,
-            int chunk_match_type,
-            const struct ipt_sctp_flag_info *flag_info,
-            const int flag_count,
-            int *hotdrop)
-{
-       int offset;
-       u_int32_t chunkmapcopy[256 / sizeof (u_int32_t)];
-       sctp_chunkhdr_t _sch, *sch;
-
-#ifdef DEBUG_SCTP
-       int i = 0;
-#endif
-
-       if (chunk_match_type == SCTP_CHUNK_MATCH_ALL) {
-               SCTP_CHUNKMAP_COPY(chunkmapcopy, chunkmap);
-       }
-
-       offset = skb->nh.iph->ihl * 4 + sizeof (sctp_sctphdr_t);
-       do {
-               sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch);
-               if (sch == NULL) {
-                       duprintf("Dropping invalid SCTP packet.\n");
-                       *hotdrop = 1;
-                       return 0;
-               }
-
-               duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n", 
-                               ++i, offset, sch->type, htons(sch->length), sch->flags);
-
-               offset += (htons(sch->length) + 3) & ~3;
-
-               duprintf("skb->len: %d\toffset: %d\n", skb->len, offset);
-
-               if (SCTP_CHUNKMAP_IS_SET(chunkmap, sch->type)) {
-                       switch (chunk_match_type) {
-                       case SCTP_CHUNK_MATCH_ANY:
-                               if (match_flags(flag_info, flag_count, 
-                                       sch->type, sch->flags)) {
-                                       return 1;
-                               }
-                               break;
-
-                       case SCTP_CHUNK_MATCH_ALL:
-                               if (match_flags(flag_info, flag_count, 
-                                       sch->type, sch->flags)) {
-                                       SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch->type);
-                               }
-                               break;
-
-                       case SCTP_CHUNK_MATCH_ONLY:
-                               if (!match_flags(flag_info, flag_count, 
-                                       sch->type, sch->flags)) {
-                                       return 0;
-                               }
-                               break;
-                       }
-               } else {
-                       switch (chunk_match_type) {
-                       case SCTP_CHUNK_MATCH_ONLY:
-                               return 0;
-                       }
-               }
-       } while (offset < skb->len);
-
-       switch (chunk_match_type) {
-       case SCTP_CHUNK_MATCH_ALL:
-               return SCTP_CHUNKMAP_IS_CLEAR(chunkmap);
-       case SCTP_CHUNK_MATCH_ANY:
-               return 0;
-       case SCTP_CHUNK_MATCH_ONLY:
-               return 1;
-       }
-
-       /* This will never be reached, but required to stop compiler whine */
-       return 0;
-}
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_sctp_info *info;
-       sctp_sctphdr_t _sh, *sh;
-
-       info = (const struct ipt_sctp_info *)matchinfo;
-
-       if (offset) {
-               duprintf("Dropping non-first fragment.. FIXME\n");
-               return 0;
-       }
-       
-       sh = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_sh), &_sh);
-       if (sh == NULL) {
-               duprintf("Dropping evil TCP offset=0 tinygram.\n");
-               *hotdrop = 1;
-               return 0;
-               }
-       duprintf("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest));
-
-       return  SCCHECK(((ntohs(sh->source) >= info->spts[0]) 
-                       && (ntohs(sh->source) <= info->spts[1])), 
-                       IPT_SCTP_SRC_PORTS, info->flags, info->invflags)
-               && SCCHECK(((ntohs(sh->dest) >= info->dpts[0]) 
-                       && (ntohs(sh->dest) <= info->dpts[1])), 
-                       IPT_SCTP_DEST_PORTS, info->flags, info->invflags)
-               && SCCHECK(match_packet(skb, info->chunkmap, info->chunk_match_type,
-                                       info->flag_info, info->flag_count, 
-                                       hotdrop),
-                          IPT_SCTP_CHUNK_TYPES, info->flags, info->invflags);
-}
-
-static int
-checkentry(const char *tablename,
-          const struct ipt_ip *ip,
-          void *matchinfo,
-          unsigned int matchsize,
-          unsigned int hook_mask)
-{
-       const struct ipt_sctp_info *info;
-
-       info = (const struct ipt_sctp_info *)matchinfo;
-
-       return ip->proto == IPPROTO_SCTP
-               && !(ip->invflags & IPT_INV_PROTO)
-               && matchsize == IPT_ALIGN(sizeof(struct ipt_sctp_info))
-               && !(info->flags & ~IPT_SCTP_VALID_FLAGS)
-               && !(info->invflags & ~IPT_SCTP_VALID_FLAGS)
-               && !(info->invflags & ~info->flags)
-               && ((!(info->flags & IPT_SCTP_CHUNK_TYPES)) || 
-                       (info->chunk_match_type &
-                               (SCTP_CHUNK_MATCH_ALL 
-                               | SCTP_CHUNK_MATCH_ANY
-                               | SCTP_CHUNK_MATCH_ONLY)));
-}
-
-static struct ipt_match sctp_match = 
-{ 
-       .list = { NULL, NULL},
-       .name = "sctp",
-       .match = &match,
-       .checkentry = &checkentry,
-       .destroy = NULL,
-       .me = THIS_MODULE
-};
-
-static int __init init(void)
-{
-       return ipt_register_match(&sctp_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&sctp_match);
-}
-
-module_init(init);
-module_exit(fini);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Kiran Kumar Immidi");
-MODULE_DESCRIPTION("Match for SCTP protocol packets");
-
diff --git a/net/ipv4/netfilter/ipt_state.c b/net/ipv4/netfilter/ipt_state.c
deleted file mode 100644 (file)
index 4d7f16b..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-/* Kernel module to match connection tracking information. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <net/netfilter/nf_conntrack_compat.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_state.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
-MODULE_DESCRIPTION("iptables connection tracking state match module");
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_state_info *sinfo = matchinfo;
-       enum ip_conntrack_info ctinfo;
-       unsigned int statebit;
-
-       if (nf_ct_is_untracked(skb))
-               statebit = IPT_STATE_UNTRACKED;
-       else if (!nf_ct_get_ctinfo(skb, &ctinfo))
-               statebit = IPT_STATE_INVALID;
-       else
-               statebit = IPT_STATE_BIT(ctinfo);
-
-       return (sinfo->statemask & statebit);
-}
-
-static int check(const char *tablename,
-                const struct ipt_ip *ip,
-                void *matchinfo,
-                unsigned int matchsize,
-                unsigned int hook_mask)
-{
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_state_info)))
-               return 0;
-
-       return 1;
-}
-
-static struct ipt_match state_match = {
-       .name           = "state",
-       .match          = &match,
-       .checkentry     = &check,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       need_ip_conntrack();
-       return ipt_register_match(&state_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&state_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_string.c b/net/ipv4/netfilter/ipt_string.c
deleted file mode 100644 (file)
index b5def20..0000000
+++ /dev/null
@@ -1,91 +0,0 @@
-/* String matching match for iptables
- * 
- * (C) 2005 Pablo Neira Ayuso <pablo@eurodev.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ipt_string.h>
-#include <linux/textsearch.h>
-
-MODULE_AUTHOR("Pablo Neira Ayuso <pablo@eurodev.net>");
-MODULE_DESCRIPTION("IP tables string match module");
-MODULE_LICENSE("GPL");
-
-static int match(const struct sk_buff *skb,
-                const struct net_device *in,
-                const struct net_device *out,
-                const void *matchinfo,
-                int offset,
-                int *hotdrop)
-{
-       struct ts_state state;
-       struct ipt_string_info *conf = (struct ipt_string_info *) matchinfo;
-
-       memset(&state, 0, sizeof(struct ts_state));
-
-       return (skb_find_text((struct sk_buff *)skb, conf->from_offset, 
-                            conf->to_offset, conf->config, &state) 
-                            != UINT_MAX) && !conf->invert;
-}
-
-#define STRING_TEXT_PRIV(m) ((struct ipt_string_info *) m)
-
-static int checkentry(const char *tablename,
-                     const struct ipt_ip *ip,
-                     void *matchinfo,
-                     unsigned int matchsize,
-                     unsigned int hook_mask)
-{
-       struct ipt_string_info *conf = matchinfo;
-       struct ts_config *ts_conf;
-
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_string_info)))
-               return 0;
-
-       /* Damn, can't handle this case properly with iptables... */
-       if (conf->from_offset > conf->to_offset)
-               return 0;
-
-       ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen,
-                                    GFP_KERNEL, TS_AUTOLOAD);
-       if (IS_ERR(ts_conf))
-               return 0;
-
-       conf->config = ts_conf;
-
-       return 1;
-}
-
-static void destroy(void *matchinfo, unsigned int matchsize)
-{
-       textsearch_destroy(STRING_TEXT_PRIV(matchinfo)->config);
-}
-
-static struct ipt_match string_match = {
-       .name           = "string",
-       .match          = match,
-       .checkentry     = checkentry,
-       .destroy        = destroy,
-       .me             = THIS_MODULE
-};
-
-static int __init init(void)
-{
-       return ipt_register_match(&string_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&string_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv4/netfilter/ipt_tcpmss.c b/net/ipv4/netfilter/ipt_tcpmss.c
deleted file mode 100644 (file)
index 4dc9b16..0000000
+++ /dev/null
@@ -1,127 +0,0 @@
-/* Kernel module to match TCP MSS values. */
-
-/* Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <net/tcp.h>
-
-#include <linux/netfilter_ipv4/ipt_tcpmss.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
-
-#define TH_SYN 0x02
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
-MODULE_DESCRIPTION("iptables TCP MSS match module");
-
-/* Returns 1 if the mss option is set and matched by the range, 0 otherwise */
-static inline int
-mssoption_match(u_int16_t min, u_int16_t max,
-               const struct sk_buff *skb,
-               int invert,
-               int *hotdrop)
-{
-       struct tcphdr _tcph, *th;
-       /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
-       u8 _opt[15 * 4 - sizeof(_tcph)], *op;
-       unsigned int i, optlen;
-
-       /* If we don't have the whole header, drop packet. */
-       th = skb_header_pointer(skb, skb->nh.iph->ihl * 4,
-                               sizeof(_tcph), &_tcph);
-       if (th == NULL)
-               goto dropit;
-
-       /* Malformed. */
-       if (th->doff*4 < sizeof(*th))
-               goto dropit;
-
-       optlen = th->doff*4 - sizeof(*th);
-       if (!optlen)
-               goto out;
-
-       /* Truncated options. */
-       op = skb_header_pointer(skb, skb->nh.iph->ihl * 4 + sizeof(*th),
-                               optlen, _opt);
-       if (op == NULL)
-               goto dropit;
-
-       for (i = 0; i < optlen; ) {
-               if (op[i] == TCPOPT_MSS
-                   && (optlen - i) >= TCPOLEN_MSS
-                   && op[i+1] == TCPOLEN_MSS) {
-                       u_int16_t mssval;
-
-                       mssval = (op[i+2] << 8) | op[i+3];
-                       
-                       return (mssval >= min && mssval <= max) ^ invert;
-               }
-               if (op[i] < 2) i++;
-               else i += op[i+1]?:1;
-       }
-out:
-       return invert;
-
- dropit:
-       *hotdrop = 1;
-       return 0;
-}
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      int *hotdrop)
-{
-       const struct ipt_tcpmss_match_info *info = matchinfo;
-
-       return mssoption_match(info->mss_min, info->mss_max, skb,
-                              info->invert, hotdrop);
-}
-
-static int
-checkentry(const char *tablename,
-           const struct ipt_ip *ip,
-           void *matchinfo,
-           unsigned int matchsize,
-           unsigned int hook_mask)
-{
-       if (matchsize != IPT_ALIGN(sizeof(struct ipt_tcpmss_match_info)))
-               return 0;
-
-       /* Must specify -p tcp */
-       if (ip->proto != IPPROTO_TCP || (ip->invflags & IPT_INV_PROTO)) {
-               printk("tcpmss: Only works on TCP packets\n");
-               return 0;
-       }
-
-       return 1;
-}
-
-static struct ipt_match tcpmss_match = {
-       .name           = "tcpmss",
-       .match          = &match,
-       .checkentry     = &checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ipt_register_match(&tcpmss_match);
-}
-
-static void __exit fini(void)
-{
-       ipt_unregister_match(&tcpmss_match);
-}
-
-module_init(init);
-module_exit(fini);
index 086a1bb61e3ea8e5b24dd74db3210391da2eab04..9ab765e126f24db8aa5f4cfab815b5a3ddaf1593 100644 (file)
@@ -23,6 +23,7 @@ match(const struct sk_buff *skb,
       const struct net_device *out,
       const void *matchinfo,
       int offset,
+      unsigned int protoff,
       int *hotdrop)
 {
        const struct ipt_tos_info *info = matchinfo;
@@ -32,7 +33,7 @@ match(const struct sk_buff *skb,
 
 static int
 checkentry(const char *tablename,
-           const struct ipt_ip *ip,
+           const void *ip,
            void *matchinfo,
            unsigned int matchsize,
            unsigned int hook_mask)
index 219aa9de88cca63c4530075b801e9381f0af67d3..82da53f430ab27d0b35e74220525434876fa5001 100644 (file)
@@ -21,7 +21,7 @@ MODULE_LICENSE("GPL");
 
 static int match(const struct sk_buff *skb, const struct net_device *in,
                 const struct net_device *out, const void *matchinfo,
-                int offset, int *hotdrop)
+                int offset, unsigned int protoff, int *hotdrop)
 {
        const struct ipt_ttl_info *info = matchinfo;
 
@@ -47,7 +47,7 @@ static int match(const struct sk_buff *skb, const struct net_device *in,
        return 0;
 }
 
-static int checkentry(const char *tablename, const struct ipt_ip *ip,
+static int checkentry(const char *tablename, const void  *ip,
                      void *matchinfo, unsigned int matchsize,
                      unsigned int hook_mask)
 {
index 260a4f0a2a9065b3ae855f678e217739abccae27..212a3079085b2e4ce2d4098c9fc6451be13086b4 100644 (file)
@@ -78,7 +78,8 @@ static struct ipt_table packet_filter = {
        .name           = "filter",
        .valid_hooks    = FILTER_VALID_HOOKS,
        .lock           = RW_LOCK_UNLOCKED,
-       .me             = THIS_MODULE
+       .me             = THIS_MODULE,
+       .af             = AF_INET,
 };
 
 /* The work comes in here from netfilter.c. */
index 160eb11b6e2fa7ff3fde67c0c298e7e2a9ebb721..3212a5cc4b6b0fdbdbd9110dc6cae169042048c6 100644 (file)
@@ -109,6 +109,7 @@ static struct ipt_table packet_mangler = {
        .valid_hooks    = MANGLE_VALID_HOOKS,
        .lock           = RW_LOCK_UNLOCKED,
        .me             = THIS_MODULE,
+       .af             = AF_INET,
 };
 
 /* The work comes in here from netfilter.c. */
index 47449ba83eb9410280a8463b477501d839ff32bb..fdb9e9c81e812b1e1bd785349e6cb13b609a67a7 100644 (file)
@@ -83,7 +83,8 @@ static struct ipt_table packet_raw = {
        .name = "raw", 
        .valid_hooks =  RAW_VALID_HOOKS, 
        .lock = RW_LOCK_UNLOCKED, 
-       .me = THIS_MODULE
+       .me = THIS_MODULE,
+       .af = AF_INET,
 };
 
 /* The work comes in here from netfilter.c. */
index 0c56c52a38317eb7be8f46eb900f2f252501c8ca..167619f638c6543aa50cc76fb1e94fefb2638ebc 100644 (file)
@@ -575,7 +575,7 @@ MODULE_LICENSE("GPL");
 
 static int __init init(void)
 {
-       need_nf_conntrack();
+       need_conntrack();
        return init_or_cleanup(1);
 }
 
@@ -587,9 +587,4 @@ static void __exit fini(void)
 module_init(init);
 module_exit(fini);
 
-void need_ip_conntrack(void)
-{
-}
-
-EXPORT_SYMBOL(need_ip_conntrack);
 EXPORT_SYMBOL(nf_ct_ipv4_gather_frags);
index d23e07fc81facafe1c7a34e10c9a0d5fc75a4168..dbabf81a9b7b4d19389c8d85e3b686026e33dd33 100644 (file)
@@ -42,6 +42,21 @@ __xfrm4_init_tempsel(struct xfrm_state *x, struct flowi *fl,
        x->props.saddr = tmpl->saddr;
        if (x->props.saddr.a4 == 0)
                x->props.saddr.a4 = saddr->a4;
+       if (tmpl->mode && x->props.saddr.a4 == 0) {
+               struct rtable *rt;
+               struct flowi fl_tunnel = {
+                       .nl_u = {
+                               .ip4_u = {
+                                       .daddr = x->id.daddr.a4,
+                               }
+                       }
+               };
+               if (!xfrm_dst_lookup((struct xfrm_dst **)&rt,
+                                    &fl_tunnel, AF_INET)) {
+                       x->props.saddr.a4 = rt->rt_src;
+                       dst_release(&rt->u.dst);
+               }
+       }
        x->props.mode = tmpl->mode;
        x->props.reqid = tmpl->reqid;
        x->props.family = AF_INET;
index 7129d423975561c0eb0b3195651929a15c5ea6f8..dfb4f145a139af35f9ef2ebb61723d5ae3edd5d1 100644 (file)
@@ -2644,7 +2644,7 @@ static int if6_seq_show(struct seq_file *seq, void *v)
 {
        struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
        seq_printf(seq,
-                  "%04x%04x%04x%04x%04x%04x%04x%04x %02x %02x %02x %02x %8s\n",
+                  NIP6_FMT " %02x %02x %02x %02x %8s\n",
                   NIP6(ifp->addr),
                   ifp->idev->dev->ifindex,
                   ifp->prefix_len,
index 13cc7f89558373994ef23d8cf3dd2d09884c003a..c7932cb420a5c91987fa4034abacee84c1ffb76b 100644 (file)
@@ -332,8 +332,7 @@ static void ah6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (!x)
                return;
 
-       NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/"
-                "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+       NETDEBUG(KERN_DEBUG "pmtu discovery on SA AH/%08x/" NIP6_FMT "\n",
                 ntohl(ah->spi), NIP6(iph->daddr));
 
        xfrm_state_put(x);
index 65e73ac0d6d0d5603c1466d9780058a80a377b95..72bd08af2dfb0ac1c1f1d3fe0fe94cf7cdf9363d 100644 (file)
@@ -532,9 +532,7 @@ static int ac6_seq_show(struct seq_file *seq, void *v)
        struct ac6_iter_state *state = ac6_seq_private(seq);
 
        seq_printf(seq,
-                  "%-4d %-15s "
-                  "%04x%04x%04x%04x%04x%04x%04x%04x "
-                  "%5d\n",
+                  "%-4d %-15s " NIP6_FMT " %5d\n",
                   state->dev->ifindex, state->dev->name,
                   NIP6(im->aca_addr),
                   im->aca_users);
index 6de8ee1a5ad9ec85227d9527b9b1374ca73e8952..7b5b94f13902cbf4ec2ad8ce229ff838ba7fbe0a 100644 (file)
@@ -266,8 +266,7 @@ static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        x = xfrm_state_lookup((xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET6);
        if (!x)
                return;
-       printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/"
-                       "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 
+       printk(KERN_DEBUG "pmtu discovery on SA ESP/%08x/" NIP6_FMT "\n", 
                        ntohl(esph->spi), NIP6(iph->daddr));
        xfrm_state_put(x);
 }
index 53c81fcd20ba23b6573c1525a412f700ba10cccd..fcf883183cefef002c56cdf1e4565b5382ec3ba5 100644 (file)
@@ -607,7 +607,7 @@ static int icmpv6_rcv(struct sk_buff **pskb)
                skb->csum = ~csum_ipv6_magic(saddr, daddr, skb->len,
                                             IPPROTO_ICMPV6, 0);
                if (__skb_checksum_complete(skb)) {
-                       LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x > %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x]\n",
+                       LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [" NIP6_FMT " > " NIP6_FMT "]\n",
                                       NIP6(*saddr), NIP6(*daddr));
                        goto discard_it;
                }
index 964ad9d1276d56f01a47d83eeec3ffb54447ff48..4183c8dac7f6e16c448bd7e74f1f67ab649507fc 100644 (file)
@@ -629,9 +629,7 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
 {
        while(fl) {
                seq_printf(seq,
-                          "%05X %-1d %-6d %-6d %-6ld %-8ld "
-                          "%02x%02x%02x%02x%02x%02x%02x%02x "
-                          "%-4d\n",
+                          "%05X %-1d %-6d %-6d %-6ld %-8ld " NIP6_FMT " %-4d\n",
                           (unsigned)ntohl(fl->label),
                           fl->share,
                           (unsigned)fl->owner,
@@ -647,8 +645,8 @@ static void ip6fl_fl_seq_show(struct seq_file *seq, struct ip6_flowlabel *fl)
 static int ip6fl_seq_show(struct seq_file *seq, void *v)
 {
        if (v == SEQ_START_TOKEN)
-               seq_puts(seq, "Label S Owner  Users  Linger Expires  "
-                             "Dst                              Opt\n");
+               seq_printf(seq, "%-5s %-1s %-6s %-6s %-6s %-8s %-39s %s\n",
+                          "Label", "S", "Owner", "Users", "Linger", "Expires", "Dst", "Opt");
        else
                ip6fl_fl_seq_show(seq, v);
        return 0;
index 626dd39685f2c43675871206fb1d7d360b273359..d511a884dad07504fb73aeb5bf134086aff3c152 100644 (file)
@@ -212,8 +212,7 @@ static void ipcomp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        if (!x)
                return;
 
-       printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/"
-                       "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+       printk(KERN_DEBUG "pmtu discovery on SA IPCOMP/%08x/" NIP6_FMT "\n",
                        spi, NIP6(iph->daddr));
        xfrm_state_put(x);
 }
index cc3e9f5608671d4726816551c9320c80c377d494..0e03eabfb9da3fea0541736e1dc742fb9052de36 100644 (file)
@@ -2373,7 +2373,7 @@ static int igmp6_mc_seq_show(struct seq_file *seq, void *v)
        struct igmp6_mc_iter_state *state = igmp6_mc_seq_private(seq);
 
        seq_printf(seq,
-                  "%-4d %-15s %04x%04x%04x%04x%04x%04x%04x%04x %5d %08X %ld\n", 
+                  "%-4d %-15s " NIP6_FMT " %5d %08X %ld\n", 
                   state->dev->ifindex, state->dev->name,
                   NIP6(im->mca_addr),
                   im->mca_users, im->mca_flags,
@@ -2542,15 +2542,12 @@ static int igmp6_mcf_seq_show(struct seq_file *seq, void *v)
        if (v == SEQ_START_TOKEN) {
                seq_printf(seq, 
                           "%3s %6s "
-                          "%32s %32s %6s %6s\n", "Idx",
+                          "%39s %39s %6s %6s\n", "Idx",
                           "Device", "Multicast Address",
                           "Source Address", "INC", "EXC");
        } else {
                seq_printf(seq,
-                          "%3d %6.6s "
-                          "%04x%04x%04x%04x%04x%04x%04x%04x "
-                          "%04x%04x%04x%04x%04x%04x%04x%04x "
-                          "%6lu %6lu\n",
+                          "%3d %6.6s " NIP6_FMT " " NIP6_FMT " %6lu %6lu\n",
                           state->dev->ifindex, state->dev->name,
                           NIP6(state->im->mca_addr),
                           NIP6(psf->sf_addr),
index 305d9ee6d7dbc3f154f6cb11c2b6a997c25bf0cb..cb8856b1d9518ba251f43fbb5b4ab64ba2bed6da 100644 (file)
@@ -692,7 +692,7 @@ static void ndisc_solicit(struct neighbour *neigh, struct sk_buff *skb)
                if (!(neigh->nud_state & NUD_VALID)) {
                        ND_PRINTK1(KERN_DEBUG
                                   "%s(): trying to ucast probe in NUD_INVALID: "
-                                  "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+                                  NIP6_FMT "\n",
                                   __FUNCTION__,
                                   NIP6(*target));
                }
index 105dd69ee9fb54cbe4419ffac31a71cf1205f7ed..2d6f8ecbc27bb421ba1ee2513ffd09f3b7c91aae 100644 (file)
@@ -41,6 +41,7 @@ config IP6_NF_QUEUE
 
 config IP6_NF_IPTABLES
        tristate "IP6 tables support (required for filtering/masq/NAT)"
+       depends on NETFILTER_XTABLES
        help
          ip6tables is a general, extensible packet identification framework.
          Currently only the packet filtering and packet mangling subsystem
@@ -50,25 +51,6 @@ config IP6_NF_IPTABLES
          To compile it as a module, choose M here.  If unsure, say N.
 
 # The simple matches.
-config IP6_NF_MATCH_LIMIT
-       tristate "limit match support"
-       depends on IP6_NF_IPTABLES
-       help
-         limit matching allows you to control the rate at which a rule can be
-         matched: mainly useful in combination with the LOG target ("LOG
-         target support", below) and to avoid some Denial of Service attacks.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
-config IP6_NF_MATCH_MAC
-       tristate "MAC address match support"
-       depends on IP6_NF_IPTABLES
-       help
-         mac matching allows you to match packets based on the source
-         Ethernet address of the packet.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP6_NF_MATCH_RT
        tristate "Routing header match support"
        depends on IP6_NF_IPTABLES
@@ -124,16 +106,6 @@ config IP6_NF_MATCH_OWNER
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP6_NF_MATCH_MARK
-       tristate "netfilter MARK match support"
-       depends on IP6_NF_IPTABLES
-       help
-         Netfilter mark matching allows you to match packets based on the
-         `nfmark' value in the packet.  This can be set by the MARK target
-         (see below).
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP6_NF_MATCH_IPV6HEADER
        tristate "IPv6 Extension Headers Match"
        depends on IP6_NF_IPTABLES
@@ -151,15 +123,6 @@ config IP6_NF_MATCH_AHESP
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP6_NF_MATCH_LENGTH
-       tristate "Packet Length match support"
-       depends on IP6_NF_IPTABLES
-       help
-         This option allows you to match the length of a packet against a
-         specific value or range of values.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP6_NF_MATCH_EUI64
        tristate "EUI64 address check"
        depends on IP6_NF_IPTABLES
@@ -170,15 +133,6 @@ config IP6_NF_MATCH_EUI64
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP6_NF_MATCH_PHYSDEV
-       tristate "Physdev match support"
-       depends on IP6_NF_IPTABLES && BRIDGE_NETFILTER
-       help
-         Physdev packet matching matches against the physical bridge ports
-         the IP packet arrived on or will leave by.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP6_NF_MATCH_POLICY
        tristate "IPsec policy match support"
        depends on IP6_NF_IPTABLES && XFRM
@@ -219,17 +173,6 @@ config IP6_NF_TARGET_REJECT
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP6_NF_TARGET_NFQUEUE
-       tristate "NFQUEUE Target Support"
-       depends on IP6_NF_IPTABLES
-       help
-         This Target replaced the old obsolete QUEUE target.
-
-         As opposed to QUEUE, it supports 65535 different queues,
-         not just one.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP6_NF_MANGLE
        tristate "Packet mangling"
        depends on IP6_NF_IPTABLES
@@ -240,19 +183,6 @@ config IP6_NF_MANGLE
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config IP6_NF_TARGET_MARK
-       tristate "MARK target support"
-       depends on IP6_NF_MANGLE
-       help
-         This option adds a `MARK' target, which allows you to create rules
-         in the `mangle' table which alter the netfilter mark (nfmark) field
-         associated with the packet packet prior to routing. This can change
-         the routing method (see `Use netfilter MARK value as routing
-         key') and can also be used by other subsystems to change their
-         behavior.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config IP6_NF_TARGET_HL
        tristate  'HL (hoplimit) target support'
        depends on IP6_NF_MANGLE
index c0c809b426e87f244267103342e612fc1c7fcbe5..663b4749820d7d5e9ffcb955970457e7e7098470 100644 (file)
@@ -4,10 +4,7 @@
 
 # Link order matters here.
 obj-$(CONFIG_IP6_NF_IPTABLES) += ip6_tables.o
-obj-$(CONFIG_IP6_NF_MATCH_LIMIT) += ip6t_limit.o
-obj-$(CONFIG_IP6_NF_MATCH_MARK) += ip6t_mark.o
 obj-$(CONFIG_IP6_NF_MATCH_LENGTH) += ip6t_length.o
-obj-$(CONFIG_IP6_NF_MATCH_MAC) += ip6t_mac.o
 obj-$(CONFIG_IP6_NF_MATCH_RT) += ip6t_rt.o
 obj-$(CONFIG_IP6_NF_MATCH_OPTS) += ip6t_hbh.o ip6t_dst.o
 obj-$(CONFIG_IP6_NF_MATCH_IPV6HEADER) += ip6t_ipv6header.o
@@ -17,12 +14,9 @@ obj-$(CONFIG_IP6_NF_MATCH_POLICY) += ip6t_policy.o
 obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o
 obj-$(CONFIG_IP6_NF_MATCH_MULTIPORT) += ip6t_multiport.o
 obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o
-obj-$(CONFIG_IP6_NF_MATCH_PHYSDEV) += ip6t_physdev.o
 obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
 obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
-obj-$(CONFIG_IP6_NF_TARGET_MARK) += ip6t_MARK.o
 obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
-obj-$(CONFIG_IP6_NF_TARGET_NFQUEUE) += ip6t_NFQUEUE.o
 obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
 obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
 obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
index 1390370186d975ff1c98bab91d65ee0ca85985b9..847068fd33676cfd3b6bc510be992baf4ab8d06d 100644 (file)
@@ -13,6 +13,9 @@
  *       a table
  * 06 Jun 2002 Andras Kis-Szabo <kisza@sch.bme.hu>
  *      - new extension header parser code
+ * 15 Oct 2005 Harald Welte <laforge@netfilter.org>
+ *     - Unification of {ip,ip6}_tables into x_tables
+ *     - Removed tcp and udp code, since it's not ipv6 specific
  */
 
 #include <linux/capability.h>
@@ -23,8 +26,6 @@
 #include <linux/vmalloc.h>
 #include <linux/netdevice.h>
 #include <linux/module.h>
-#include <linux/tcp.h>
-#include <linux/udp.h>
 #include <linux/icmpv6.h>
 #include <net/ipv6.h>
 #include <asm/uaccess.h>
@@ -33,6 +34,7 @@
 #include <linux/cpumask.h>
 
 #include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/x_tables.h>
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
@@ -67,13 +69,8 @@ do {                                                         \
 #else
 #define IP_NF_ASSERT(x)
 #endif
-#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
 
-static DECLARE_MUTEX(ip6t_mutex);
 
-/* Must have mutex */
-#define ASSERT_READ_LOCK(x) IP_NF_ASSERT(down_trylock(&ip6t_mutex) != 0)
-#define ASSERT_WRITE_LOCK(x) IP_NF_ASSERT(down_trylock(&ip6t_mutex) != 0)
 #include <linux/netfilter_ipv4/listhelp.h>
 
 #if 0
@@ -91,30 +88,6 @@ static DECLARE_MUTEX(ip6t_mutex);
 
    Hence the start of any table is given by get_table() below.  */
 
-/* The table itself */
-struct ip6t_table_info
-{
-       /* Size per table */
-       unsigned int size;
-       /* Number of entries: FIXME. --RR */
-       unsigned int number;
-       /* Initial number of entries. Needed for module usage count */
-       unsigned int initial_entries;
-
-       /* Entry points and underflows */
-       unsigned int hook_entry[NF_IP6_NUMHOOKS];
-       unsigned int underflow[NF_IP6_NUMHOOKS];
-
-       /* ip6t_entry tables: one per CPU */
-       void *entries[NR_CPUS];
-};
-
-static LIST_HEAD(ip6t_target);
-static LIST_HEAD(ip6t_match);
-static LIST_HEAD(ip6t_tables);
-#define SET_COUNTER(c,b,p) do { (c).bcnt = (b); (c).pcnt = (p); } while(0)
-#define ADD_COUNTER(c,b,p) do { (c).bcnt += (b); (c).pcnt += (p); } while(0)
-
 #if 0
 #define down(x) do { printk("DOWN:%u:" #x "\n", __LINE__); down(x); } while(0)
 #define down_interruptible(x) ({ int __r; printk("DOWNi:%u:" #x "\n", __LINE__); __r = down_interruptible(x); if (__r != 0) printk("ABORT-DOWNi:%u\n", __LINE__); __r; })
@@ -297,7 +270,7 @@ ip6t_do_table(struct sk_buff **pskb,
              unsigned int hook,
              const struct net_device *in,
              const struct net_device *out,
-             struct ip6t_table *table,
+             struct xt_table *table,
              void *userdata)
 {
        static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
@@ -309,6 +282,7 @@ ip6t_do_table(struct sk_buff **pskb,
        const char *indev, *outdev;
        void *table_base;
        struct ip6t_entry *e, *back;
+       struct xt_table_info *private;
 
        /* Initialization */
        indev = in ? in->name : nulldevname;
@@ -321,9 +295,10 @@ ip6t_do_table(struct sk_buff **pskb,
         * match it. */
 
        read_lock_bh(&table->lock);
+       private = table->private;
        IP_NF_ASSERT(table->valid_hooks & (1 << hook));
-       table_base = (void *)table->private->entries[smp_processor_id()];
-       e = get_entry(table_base, table->private->hook_entry[hook]);
+       table_base = (void *)private->entries[smp_processor_id()];
+       e = get_entry(table_base, private->hook_entry[hook]);
 
 #ifdef CONFIG_NETFILTER_DEBUG
        /* Check noone else using our table */
@@ -339,7 +314,7 @@ ip6t_do_table(struct sk_buff **pskb,
 #endif
 
        /* For return from builtin chain */
-       back = get_entry(table_base, table->private->underflow[hook]);
+       back = get_entry(table_base, private->underflow[hook]);
 
        do {
                IP_NF_ASSERT(e);
@@ -439,145 +414,6 @@ ip6t_do_table(struct sk_buff **pskb,
 #endif
 }
 
-/*
- * These are weird, but module loading must not be done with mutex
- * held (since they will register), and we have to have a single
- * function to use try_then_request_module().
- */
-
-/* Find table by name, grabs mutex & ref.  Returns ERR_PTR() on error. */
-static inline struct ip6t_table *find_table_lock(const char *name)
-{
-       struct ip6t_table *t;
-
-       if (down_interruptible(&ip6t_mutex) != 0)
-               return ERR_PTR(-EINTR);
-
-       list_for_each_entry(t, &ip6t_tables, list)
-               if (strcmp(t->name, name) == 0 && try_module_get(t->me))
-                       return t;
-       up(&ip6t_mutex);
-       return NULL;
-}
-
-/* Find match, grabs ref.  Returns ERR_PTR() on error. */
-static inline struct ip6t_match *find_match(const char *name, u8 revision)
-{
-       struct ip6t_match *m;
-       int err = 0;
-
-       if (down_interruptible(&ip6t_mutex) != 0)
-               return ERR_PTR(-EINTR);
-
-       list_for_each_entry(m, &ip6t_match, list) {
-               if (strcmp(m->name, name) == 0) {
-                       if (m->revision == revision) {
-                               if (try_module_get(m->me)) {
-                                       up(&ip6t_mutex);
-                                       return m;
-                               }
-                       } else
-                               err = -EPROTOTYPE; /* Found something. */
-               }
-       }
-       up(&ip6t_mutex);
-       return ERR_PTR(err);
-}
-
-/* Find target, grabs ref.  Returns ERR_PTR() on error. */
-static inline struct ip6t_target *find_target(const char *name, u8 revision)
-{
-       struct ip6t_target *t;
-       int err = 0;
-
-       if (down_interruptible(&ip6t_mutex) != 0)
-               return ERR_PTR(-EINTR);
-
-       list_for_each_entry(t, &ip6t_target, list) {
-               if (strcmp(t->name, name) == 0) {
-                       if (t->revision == revision) {
-                               if (try_module_get(t->me)) {
-                                       up(&ip6t_mutex);
-                                       return t;
-                               }
-                       } else
-                               err = -EPROTOTYPE; /* Found something. */
-               }
-       }
-       up(&ip6t_mutex);
-       return ERR_PTR(err);
-}
-
-struct ip6t_target *ip6t_find_target(const char *name, u8 revision)
-{
-       struct ip6t_target *target;
-
-       target = try_then_request_module(find_target(name, revision),
-                                        "ip6t_%s", name);
-       if (IS_ERR(target) || !target)
-               return NULL;
-       return target;
-}
-
-static int match_revfn(const char *name, u8 revision, int *bestp)
-{
-       struct ip6t_match *m;
-       int have_rev = 0;
-
-       list_for_each_entry(m, &ip6t_match, list) {
-               if (strcmp(m->name, name) == 0) {
-                       if (m->revision > *bestp)
-                               *bestp = m->revision;
-                       if (m->revision == revision)
-                               have_rev = 1;
-               }
-       }
-       return have_rev;
-}
-
-static int target_revfn(const char *name, u8 revision, int *bestp)
-{
-       struct ip6t_target *t;
-       int have_rev = 0;
-
-       list_for_each_entry(t, &ip6t_target, list) {
-               if (strcmp(t->name, name) == 0) {
-                       if (t->revision > *bestp)
-                               *bestp = t->revision;
-                       if (t->revision == revision)
-                               have_rev = 1;
-               }
-       }
-       return have_rev;
-}
-
-/* Returns true or fals (if no such extension at all) */
-static inline int find_revision(const char *name, u8 revision,
-                               int (*revfn)(const char *, u8, int *),
-                               int *err)
-{
-       int have_rev, best = -1;
-
-       if (down_interruptible(&ip6t_mutex) != 0) {
-               *err = -EINTR;
-               return 1;
-       }
-       have_rev = revfn(name, revision, &best);
-       up(&ip6t_mutex);
-
-       /* Nothing at all?  Return 0 to try loading module. */
-       if (best == -1) {
-               *err = -ENOENT;
-               return 0;
-       }
-
-       *err = best;
-       if (!have_rev)
-               *err = -EPROTONOSUPPORT;
-       return 1;
-}
-
-
 /* All zeroes == unconditional rule. */
 static inline int
 unconditional(const struct ip6t_ip6 *ipv6)
@@ -594,7 +430,7 @@ unconditional(const struct ip6t_ip6 *ipv6)
 /* Figures out from what hook each rule can be called: returns 0 if
    there are loops.  Puts hook bitmask in comefrom. */
 static int
-mark_source_chains(struct ip6t_table_info *newinfo,
+mark_source_chains(struct xt_table_info *newinfo,
                   unsigned int valid_hooks, void *entry0)
 {
        unsigned int hook;
@@ -740,11 +576,11 @@ check_match(struct ip6t_entry_match *m,
 {
        struct ip6t_match *match;
 
-       match = try_then_request_module(find_match(m->u.user.name,
-                                                  m->u.user.revision),
+       match = try_then_request_module(xt_find_match(AF_INET6, m->u.user.name,
+                                       m->u.user.revision),
                                        "ip6t_%s", m->u.user.name);
        if (IS_ERR(match) || !match) {
-               duprintf("check_match: `%s' not found\n", m->u.user.name);
+               duprintf("check_match: `%s' not found\n", m->u.user.name);
                return match ? PTR_ERR(match) : -ENOENT;
        }
        m->u.kernel.match = match;
@@ -785,8 +621,9 @@ check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
                goto cleanup_matches;
 
        t = ip6t_get_target(e);
-       target = try_then_request_module(find_target(t->u.user.name,
-                                                    t->u.user.revision),
+       target = try_then_request_module(xt_find_target(AF_INET6,
+                                                       t->u.user.name,
+                                                       t->u.user.revision),
                                         "ip6t_%s", t->u.user.name);
        if (IS_ERR(target) || !target) {
                duprintf("check_entry: `%s' not found\n", t->u.user.name);
@@ -822,7 +659,7 @@ check_entry(struct ip6t_entry *e, const char *name, unsigned int size,
 
 static inline int
 check_entry_size_and_hooks(struct ip6t_entry *e,
-                          struct ip6t_table_info *newinfo,
+                          struct xt_table_info *newinfo,
                           unsigned char *base,
                           unsigned char *limit,
                           const unsigned int *hook_entries,
@@ -856,7 +693,7 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
            < 0 (not IP6T_RETURN). --RR */
 
        /* Clear counters and comefrom */
-       e->counters = ((struct ip6t_counters) { 0, 0 });
+       e->counters = ((struct xt_counters) { 0, 0 });
        e->comefrom = 0;
 
        (*i)++;
@@ -886,7 +723,7 @@ cleanup_entry(struct ip6t_entry *e, unsigned int *i)
 static int
 translate_table(const char *name,
                unsigned int valid_hooks,
-               struct ip6t_table_info *newinfo,
+               struct xt_table_info *newinfo,
                void *entry0,
                unsigned int size,
                unsigned int number,
@@ -963,48 +800,10 @@ translate_table(const char *name,
        return ret;
 }
 
-static struct ip6t_table_info *
-replace_table(struct ip6t_table *table,
-             unsigned int num_counters,
-             struct ip6t_table_info *newinfo,
-             int *error)
-{
-       struct ip6t_table_info *oldinfo;
-
-#ifdef CONFIG_NETFILTER_DEBUG
-       {
-               int cpu;
-
-               for_each_cpu(cpu) {
-                       struct ip6t_entry *table_base = newinfo->entries[cpu];
-                       if (table_base)
-                               table_base->comefrom = 0xdead57ac;
-               }
-       }
-#endif
-
-       /* Do the substitution. */
-       write_lock_bh(&table->lock);
-       /* Check inside lock: is the old number correct? */
-       if (num_counters != table->private->number) {
-               duprintf("num_counters != table->private->number (%u/%u)\n",
-                        num_counters, table->private->number);
-               write_unlock_bh(&table->lock);
-               *error = -EAGAIN;
-               return NULL;
-       }
-       oldinfo = table->private;
-       table->private = newinfo;
-       newinfo->initial_entries = oldinfo->initial_entries;
-       write_unlock_bh(&table->lock);
-
-       return oldinfo;
-}
-
 /* Gets counters. */
 static inline int
 add_entry_to_counter(const struct ip6t_entry *e,
-                    struct ip6t_counters total[],
+                    struct xt_counters total[],
                     unsigned int *i)
 {
        ADD_COUNTER(total[*i], e->counters.bcnt, e->counters.pcnt);
@@ -1025,8 +824,8 @@ set_entry_to_counter(const struct ip6t_entry *e,
 }
 
 static void
-get_counters(const struct ip6t_table_info *t,
-            struct ip6t_counters counters[])
+get_counters(const struct xt_table_info *t,
+            struct xt_counters counters[])
 {
        unsigned int cpu;
        unsigned int i;
@@ -1060,19 +859,20 @@ get_counters(const struct ip6t_table_info *t,
 
 static int
 copy_entries_to_user(unsigned int total_size,
-                    struct ip6t_table *table,
+                    struct xt_table *table,
                     void __user *userptr)
 {
        unsigned int off, num, countersize;
        struct ip6t_entry *e;
-       struct ip6t_counters *counters;
+       struct xt_counters *counters;
+       struct xt_table_info *private = table->private;
        int ret = 0;
        void *loc_cpu_entry;
 
        /* We need atomic snapshot of counters: rest doesn't change
           (other than comefrom, which userspace doesn't care
           about). */
-       countersize = sizeof(struct ip6t_counters) * table->private->number;
+       countersize = sizeof(struct xt_counters) * private->number;
        counters = vmalloc(countersize);
 
        if (counters == NULL)
@@ -1080,11 +880,11 @@ copy_entries_to_user(unsigned int total_size,
 
        /* First, sum counters... */
        write_lock_bh(&table->lock);
-       get_counters(table->private, counters);
+       get_counters(private, counters);
        write_unlock_bh(&table->lock);
 
        /* choose the copy that is on ourc node/cpu */
-       loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
+       loc_cpu_entry = private->entries[raw_smp_processor_id()];
        if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
                ret = -EFAULT;
                goto free_counters;
@@ -1143,87 +943,42 @@ get_entries(const struct ip6t_get_entries *entries,
            struct ip6t_get_entries __user *uptr)
 {
        int ret;
-       struct ip6t_table *t;
+       struct xt_table *t;
 
-       t = find_table_lock(entries->name);
+       t = xt_find_table_lock(AF_INET6, entries->name);
        if (t && !IS_ERR(t)) {
-               duprintf("t->private->number = %u\n",
-                        t->private->number);
-               if (entries->size == t->private->size)
-                       ret = copy_entries_to_user(t->private->size,
+               struct xt_table_info *private = t->private;
+               duprintf("t->private->number = %u\n", private->number);
+               if (entries->size == private->size)
+                       ret = copy_entries_to_user(private->size,
                                                   t, uptr->entrytable);
                else {
                        duprintf("get_entries: I've got %u not %u!\n",
-                                t->private->size,
-                                entries->size);
+                                private->size, entries->size);
                        ret = -EINVAL;
                }
                module_put(t->me);
-               up(&ip6t_mutex);
+               xt_table_unlock(t);
        } else
                ret = t ? PTR_ERR(t) : -ENOENT;
 
        return ret;
 }
 
-static void free_table_info(struct ip6t_table_info *info)
-{
-       int cpu;
-       for_each_cpu(cpu) {
-               if (info->size <= PAGE_SIZE)
-                       kfree(info->entries[cpu]);
-               else
-                       vfree(info->entries[cpu]);
-       }
-       kfree(info);
-}
-
-static struct ip6t_table_info *alloc_table_info(unsigned int size)
-{
-       struct ip6t_table_info *newinfo;
-       int cpu;
-
-       newinfo = kzalloc(sizeof(struct ip6t_table_info), GFP_KERNEL);
-       if (!newinfo)
-               return NULL;
-
-       newinfo->size = size;
-
-       for_each_cpu(cpu) {
-               if (size <= PAGE_SIZE)
-                       newinfo->entries[cpu] = kmalloc_node(size,
-                                                       GFP_KERNEL,
-                                                       cpu_to_node(cpu));
-               else
-                       newinfo->entries[cpu] = vmalloc_node(size,
-                                                            cpu_to_node(cpu));
-               if (newinfo->entries[cpu] == NULL) {
-                       free_table_info(newinfo);
-                       return NULL;
-               }
-       }
-
-       return newinfo;
-}
-
 static int
 do_replace(void __user *user, unsigned int len)
 {
        int ret;
        struct ip6t_replace tmp;
-       struct ip6t_table *t;
-       struct ip6t_table_info *newinfo, *oldinfo;
-       struct ip6t_counters *counters;
+       struct xt_table *t;
+       struct xt_table_info *newinfo, *oldinfo;
+       struct xt_counters *counters;
        void *loc_cpu_entry, *loc_cpu_old_entry;
 
        if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
                return -EFAULT;
 
-       /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
-       if ((SMP_ALIGN(tmp.size) >> PAGE_SHIFT) + 2 > num_physpages)
-               return -ENOMEM;
-
-       newinfo = alloc_table_info(tmp.size);
+       newinfo = xt_alloc_table_info(tmp.size);
        if (!newinfo)
                return -ENOMEM;
 
@@ -1235,7 +990,7 @@ do_replace(void __user *user, unsigned int len)
                goto free_newinfo;
        }
 
-       counters = vmalloc(tmp.num_counters * sizeof(struct ip6t_counters));
+       counters = vmalloc(tmp.num_counters * sizeof(struct xt_counters));
        if (!counters) {
                ret = -ENOMEM;
                goto free_newinfo;
@@ -1249,7 +1004,7 @@ do_replace(void __user *user, unsigned int len)
 
        duprintf("ip_tables: Translated table\n");
 
-       t = try_then_request_module(find_table_lock(tmp.name),
+       t = try_then_request_module(xt_find_table_lock(AF_INET6, tmp.name),
                                    "ip6table_%s", tmp.name);
        if (!t || IS_ERR(t)) {
                ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1264,7 +1019,7 @@ do_replace(void __user *user, unsigned int len)
                goto put_module;
        }
 
-       oldinfo = replace_table(t, tmp.num_counters, newinfo, &ret);
+       oldinfo = xt_replace_table(t, tmp.num_counters, newinfo, &ret);
        if (!oldinfo)
                goto put_module;
 
@@ -1283,23 +1038,23 @@ do_replace(void __user *user, unsigned int len)
        /* Decrease module usage counts and free resource */
        loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
        IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry,NULL);
-       free_table_info(oldinfo);
+       xt_free_table_info(oldinfo);
        if (copy_to_user(tmp.counters, counters,
-                        sizeof(struct ip6t_counters) * tmp.num_counters) != 0)
+                        sizeof(struct xt_counters) * tmp.num_counters) != 0)
                ret = -EFAULT;
        vfree(counters);
-       up(&ip6t_mutex);
+       xt_table_unlock(t);
        return ret;
 
  put_module:
        module_put(t->me);
-       up(&ip6t_mutex);
+       xt_table_unlock(t);
  free_newinfo_counters_untrans:
        IP6T_ENTRY_ITERATE(loc_cpu_entry, newinfo->size, cleanup_entry,NULL);
  free_newinfo_counters:
        vfree(counters);
  free_newinfo:
-       free_table_info(newinfo);
+       xt_free_table_info(newinfo);
        return ret;
 }
 
@@ -1307,7 +1062,7 @@ do_replace(void __user *user, unsigned int len)
  * and everything is OK. */
 static inline int
 add_counter_to_entry(struct ip6t_entry *e,
-                    const struct ip6t_counters addme[],
+                    const struct xt_counters addme[],
                     unsigned int *i)
 {
 #if 0
@@ -1329,15 +1084,16 @@ static int
 do_add_counters(void __user *user, unsigned int len)
 {
        unsigned int i;
-       struct ip6t_counters_info tmp, *paddc;
-       struct ip6t_table *t;
+       struct xt_counters_info tmp, *paddc;
+       struct xt_table_info *private;
+       struct xt_table *t;
        int ret = 0;
        void *loc_cpu_entry;
 
        if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
                return -EFAULT;
 
-       if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct ip6t_counters))
+       if (len != sizeof(tmp) + tmp.num_counters*sizeof(struct xt_counters))
                return -EINVAL;
 
        paddc = vmalloc(len);
@@ -1349,29 +1105,30 @@ do_add_counters(void __user *user, unsigned int len)
                goto free;
        }
 
-       t = find_table_lock(tmp.name);
+       t = xt_find_table_lock(AF_INET6, tmp.name);
        if (!t || IS_ERR(t)) {
                ret = t ? PTR_ERR(t) : -ENOENT;
                goto free;
        }
 
        write_lock_bh(&t->lock);
-       if (t->private->number != paddc->num_counters) {
+       private = t->private;
+       if (private->number != paddc->num_counters) {
                ret = -EINVAL;
                goto unlock_up_free;
        }
 
        i = 0;
        /* Choose the copy that is on our node */
-       loc_cpu_entry = t->private->entries[smp_processor_id()];
+       loc_cpu_entry = private->entries[smp_processor_id()];
        IP6T_ENTRY_ITERATE(loc_cpu_entry,
-                         t->private->size,
+                         private->size,
                          add_counter_to_entry,
                          paddc->counters,
                          &i);
  unlock_up_free:
        write_unlock_bh(&t->lock);
-       up(&ip6t_mutex);
+       xt_table_unlock(t);
        module_put(t->me);
  free:
        vfree(paddc);
@@ -1415,7 +1172,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        switch (cmd) {
        case IP6T_SO_GET_INFO: {
                char name[IP6T_TABLE_MAXNAMELEN];
-               struct ip6t_table *t;
+               struct xt_table *t;
 
                if (*len != sizeof(struct ip6t_getinfo)) {
                        duprintf("length %u != %u\n", *len,
@@ -1430,25 +1187,26 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                }
                name[IP6T_TABLE_MAXNAMELEN-1] = '\0';
 
-               t = try_then_request_module(find_table_lock(name),
+               t = try_then_request_module(xt_find_table_lock(AF_INET6, name),
                                            "ip6table_%s", name);
                if (t && !IS_ERR(t)) {
                        struct ip6t_getinfo info;
+                       struct xt_table_info *private = t->private;
 
                        info.valid_hooks = t->valid_hooks;
-                       memcpy(info.hook_entry, t->private->hook_entry,
+                       memcpy(info.hook_entry, private->hook_entry,
                               sizeof(info.hook_entry));
-                       memcpy(info.underflow, t->private->underflow,
+                       memcpy(info.underflow, private->underflow,
                               sizeof(info.underflow));
-                       info.num_entries = t->private->number;
-                       info.size = t->private->size;
+                       info.num_entries = private->number;
+                       info.size = private->size;
                        memcpy(info.name, name, sizeof(info.name));
 
                        if (copy_to_user(user, &info, *len) != 0)
                                ret = -EFAULT;
                        else
                                ret = 0;
-                       up(&ip6t_mutex);
+                       xt_table_unlock(t);
                        module_put(t->me);
                } else
                        ret = t ? PTR_ERR(t) : -ENOENT;
@@ -1475,7 +1233,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        case IP6T_SO_GET_REVISION_MATCH:
        case IP6T_SO_GET_REVISION_TARGET: {
                struct ip6t_get_revision rev;
-               int (*revfn)(const char *, u8, int *);
+               int target;
 
                if (*len != sizeof(rev)) {
                        ret = -EINVAL;
@@ -1487,12 +1245,13 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
                }
 
                if (cmd == IP6T_SO_GET_REVISION_TARGET)
-                       revfn = target_revfn;
+                       target = 1;
                else
-                       revfn = match_revfn;
+                       target = 0;
 
-               try_then_request_module(find_revision(rev.name, rev.revision,
-                                                     revfn, &ret),
+               try_then_request_module(xt_find_revision(AF_INET6, rev.name,
+                                                        rev.revision,
+                                                        target, &ret),
                                        "ip6t_%s", rev.name);
                break;
        }
@@ -1505,61 +1264,16 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
        return ret;
 }
 
-/* Registration hooks for targets. */
-int
-ip6t_register_target(struct ip6t_target *target)
-{
-       int ret;
-
-       ret = down_interruptible(&ip6t_mutex);
-       if (ret != 0)
-               return ret;
-       list_add(&target->list, &ip6t_target);
-       up(&ip6t_mutex);
-       return ret;
-}
-
-void
-ip6t_unregister_target(struct ip6t_target *target)
-{
-       down(&ip6t_mutex);
-       LIST_DELETE(&ip6t_target, target);
-       up(&ip6t_mutex);
-}
-
-int
-ip6t_register_match(struct ip6t_match *match)
-{
-       int ret;
-
-       ret = down_interruptible(&ip6t_mutex);
-       if (ret != 0)
-               return ret;
-
-       list_add(&match->list, &ip6t_match);
-       up(&ip6t_mutex);
-
-       return ret;
-}
-
-void
-ip6t_unregister_match(struct ip6t_match *match)
-{
-       down(&ip6t_mutex);
-       LIST_DELETE(&ip6t_match, match);
-       up(&ip6t_mutex);
-}
-
-int ip6t_register_table(struct ip6t_table *table,
+int ip6t_register_table(struct xt_table *table,
                        const struct ip6t_replace *repl)
 {
        int ret;
-       struct ip6t_table_info *newinfo;
-       static struct ip6t_table_info bootstrap
+       struct xt_table_info *newinfo;
+       static struct xt_table_info bootstrap
                = { 0, 0, 0, { 0 }, { 0 }, { } };
        void *loc_cpu_entry;
 
-       newinfo = alloc_table_info(repl->size);
+       newinfo = xt_alloc_table_info(repl->size);
        if (!newinfo)
                return -ENOMEM;
 
@@ -1573,244 +1287,29 @@ int ip6t_register_table(struct ip6t_table *table,
                              repl->hook_entry,
                              repl->underflow);
        if (ret != 0) {
-               free_table_info(newinfo);
+               xt_free_table_info(newinfo);
                return ret;
        }
 
-       ret = down_interruptible(&ip6t_mutex);
-       if (ret != 0) {
-               free_table_info(newinfo);
+       if (xt_register_table(table, &bootstrap, newinfo) != 0) {
+               xt_free_table_info(newinfo);
                return ret;
        }
 
-       /* Don't autoload: we'd eat our tail... */
-       if (list_named_find(&ip6t_tables, table->name)) {
-               ret = -EEXIST;
-               goto free_unlock;
-       }
-
-       /* Simplifies replace_table code. */
-       table->private = &bootstrap;
-       if (!replace_table(table, 0, newinfo, &ret))
-               goto free_unlock;
-
-       duprintf("table->private->number = %u\n",
-                table->private->number);
-
-       /* save number of initial entries */
-       table->private->initial_entries = table->private->number;
-
-       rwlock_init(&table->lock);
-       list_prepend(&ip6t_tables, table);
-
- unlock:
-       up(&ip6t_mutex);
-       return ret;
-
- free_unlock:
-       free_table_info(newinfo);
-       goto unlock;
+       return 0;
 }
 
-void ip6t_unregister_table(struct ip6t_table *table)
+void ip6t_unregister_table(struct xt_table *table)
 {
+       struct xt_table_info *private;
        void *loc_cpu_entry;
 
-       down(&ip6t_mutex);
-       LIST_DELETE(&ip6t_tables, table);
-       up(&ip6t_mutex);
+       private = xt_unregister_table(table);
 
        /* Decrease module usage counts and free resources */
-       loc_cpu_entry = table->private->entries[raw_smp_processor_id()];
-       IP6T_ENTRY_ITERATE(loc_cpu_entry, table->private->size,
-                         cleanup_entry, NULL);
-       free_table_info(table->private);
-}
-
-/* Returns 1 if the port is matched by the range, 0 otherwise */
-static inline int
-port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
-{
-       int ret;
-
-       ret = (port >= min && port <= max) ^ invert;
-       return ret;
-}
-
-static int
-tcp_find_option(u_int8_t option,
-               const struct sk_buff *skb,
-               unsigned int tcpoff,
-               unsigned int optlen,
-               int invert,
-               int *hotdrop)
-{
-       /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
-       u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
-       unsigned int i;
-
-       duprintf("tcp_match: finding option\n");
-       if (!optlen)
-               return invert;
-       /* If we don't have the whole header, drop packet. */
-       op = skb_header_pointer(skb, tcpoff + sizeof(struct tcphdr), optlen,
-                               _opt);
-       if (op == NULL) {
-               *hotdrop = 1;
-               return 0;
-       }
-
-       for (i = 0; i < optlen; ) {
-               if (op[i] == option) return !invert;
-               if (op[i] < 2) i++;
-               else i += op[i+1]?:1;
-       }
-
-       return invert;
-}
-
-static int
-tcp_match(const struct sk_buff *skb,
-         const struct net_device *in,
-         const struct net_device *out,
-         const void *matchinfo,
-         int offset,
-         unsigned int protoff,
-         int *hotdrop)
-{
-       struct tcphdr _tcph, *th;
-       const struct ip6t_tcp *tcpinfo = matchinfo;
-
-       if (offset) {
-               /* To quote Alan:
-
-                  Don't allow a fragment of TCP 8 bytes in. Nobody normal
-                  causes this. Its a cracker trying to break in by doing a
-                  flag overwrite to pass the direction checks.
-               */
-               if (offset == 1) {
-                       duprintf("Dropping evil TCP offset=1 frag.\n");
-                       *hotdrop = 1;
-               }
-               /* Must not be a fragment. */
-               return 0;
-       }
-
-#define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
-
-       th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
-       if (th == NULL) {
-               /* We've been asked to examine this packet, and we
-                  can't.  Hence, no choice but to drop. */
-               duprintf("Dropping evil TCP offset=0 tinygram.\n");
-               *hotdrop = 1;
-               return 0;
-       }
-
-       if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
-                       ntohs(th->source),
-                       !!(tcpinfo->invflags & IP6T_TCP_INV_SRCPT)))
-               return 0;
-       if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
-                       ntohs(th->dest),
-                       !!(tcpinfo->invflags & IP6T_TCP_INV_DSTPT)))
-               return 0;
-       if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
-                     == tcpinfo->flg_cmp,
-                     IP6T_TCP_INV_FLAGS))
-               return 0;
-       if (tcpinfo->option) {
-               if (th->doff * 4 < sizeof(_tcph)) {
-                       *hotdrop = 1;
-                       return 0;
-               }
-               if (!tcp_find_option(tcpinfo->option, skb, protoff,
-                                    th->doff*4 - sizeof(*th),
-                                    tcpinfo->invflags & IP6T_TCP_INV_OPTION,
-                                    hotdrop))
-                       return 0;
-       }
-       return 1;
-}
-
-/* Called when user tries to insert an entry of this type. */
-static int
-tcp_checkentry(const char *tablename,
-              const struct ip6t_ip6 *ipv6,
-              void *matchinfo,
-              unsigned int matchsize,
-              unsigned int hook_mask)
-{
-       const struct ip6t_tcp *tcpinfo = matchinfo;
-
-       /* Must specify proto == TCP, and no unknown invflags */
-       return ipv6->proto == IPPROTO_TCP
-               && !(ipv6->invflags & IP6T_INV_PROTO)
-               && matchsize == IP6T_ALIGN(sizeof(struct ip6t_tcp))
-               && !(tcpinfo->invflags & ~IP6T_TCP_INV_MASK);
-}
-
-static int
-udp_match(const struct sk_buff *skb,
-         const struct net_device *in,
-         const struct net_device *out,
-         const void *matchinfo,
-         int offset,
-         unsigned int protoff,
-         int *hotdrop)
-{
-       struct udphdr _udph, *uh;
-       const struct ip6t_udp *udpinfo = matchinfo;
-
-       /* Must not be a fragment. */
-       if (offset)
-               return 0;
-
-       uh = skb_header_pointer(skb, protoff, sizeof(_udph), &_udph);
-       if (uh == NULL) {
-               /* We've been asked to examine this packet, and we
-                  can't.  Hence, no choice but to drop. */
-               duprintf("Dropping evil UDP tinygram.\n");
-               *hotdrop = 1;
-               return 0;
-       }
-
-       return port_match(udpinfo->spts[0], udpinfo->spts[1],
-                         ntohs(uh->source),
-                         !!(udpinfo->invflags & IP6T_UDP_INV_SRCPT))
-               && port_match(udpinfo->dpts[0], udpinfo->dpts[1],
-                             ntohs(uh->dest),
-                             !!(udpinfo->invflags & IP6T_UDP_INV_DSTPT));
-}
-
-/* Called when user tries to insert an entry of this type. */
-static int
-udp_checkentry(const char *tablename,
-              const struct ip6t_ip6 *ipv6,
-              void *matchinfo,
-              unsigned int matchinfosize,
-              unsigned int hook_mask)
-{
-       const struct ip6t_udp *udpinfo = matchinfo;
-
-       /* Must specify proto == UDP, and no unknown invflags */
-       if (ipv6->proto != IPPROTO_UDP || (ipv6->invflags & IP6T_INV_PROTO)) {
-               duprintf("ip6t_udp: Protocol %u != %u\n", ipv6->proto,
-                        IPPROTO_UDP);
-               return 0;
-       }
-       if (matchinfosize != IP6T_ALIGN(sizeof(struct ip6t_udp))) {
-               duprintf("ip6t_udp: matchsize %u != %u\n",
-                        matchinfosize, IP6T_ALIGN(sizeof(struct ip6t_udp)));
-               return 0;
-       }
-       if (udpinfo->invflags & ~IP6T_UDP_INV_MASK) {
-               duprintf("ip6t_udp: unknown flags %X\n",
-                        udpinfo->invflags);
-               return 0;
-       }
-
-       return 1;
+       loc_cpu_entry = private->entries[raw_smp_processor_id()];
+       IP6T_ENTRY_ITERATE(loc_cpu_entry, private->size, cleanup_entry, NULL);
+       xt_free_table_info(private);
 }
 
 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
@@ -1858,11 +1357,12 @@ icmp6_match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 icmp6_checkentry(const char *tablename,
-          const struct ip6t_ip6 *ipv6,
+          const void *entry,
           void *matchinfo,
           unsigned int matchsize,
           unsigned int hook_mask)
 {
+       const struct ip6t_ip6 *ipv6 = entry;
        const struct ip6t_icmp *icmpinfo = matchinfo;
 
        /* Must specify proto == ICMP, and no unknown invflags */
@@ -1892,164 +1392,42 @@ static struct nf_sockopt_ops ip6t_sockopts = {
        .get            = do_ip6t_get_ctl,
 };
 
-static struct ip6t_match tcp_matchstruct = {
-       .name           = "tcp",
-       .match          = &tcp_match,
-       .checkentry     = &tcp_checkentry,
-};
-
-static struct ip6t_match udp_matchstruct = {
-       .name           = "udp",
-       .match          = &udp_match,
-       .checkentry     = &udp_checkentry,
-};
-
 static struct ip6t_match icmp6_matchstruct = {
        .name           = "icmp6",
        .match          = &icmp6_match,
        .checkentry     = &icmp6_checkentry,
 };
 
-#ifdef CONFIG_PROC_FS
-static inline int print_name(const char *i,
-                            off_t start_offset, char *buffer, int length,
-                            off_t *pos, unsigned int *count)
-{
-       if ((*count)++ >= start_offset) {
-               unsigned int namelen;
-
-               namelen = sprintf(buffer + *pos, "%s\n",
-                                 i + sizeof(struct list_head));
-               if (*pos + namelen > length) {
-                       /* Stop iterating */
-                       return 1;
-               }
-               *pos += namelen;
-       }
-       return 0;
-}
-
-static inline int print_target(const struct ip6t_target *t,
-                               off_t start_offset, char *buffer, int length,
-                               off_t *pos, unsigned int *count)
-{
-       if (t == &ip6t_standard_target || t == &ip6t_error_target)
-               return 0;
-       return print_name((char *)t, start_offset, buffer, length, pos, count);
-}
-
-static int ip6t_get_tables(char *buffer, char **start, off_t offset, int length)
-{
-       off_t pos = 0;
-       unsigned int count = 0;
-
-       if (down_interruptible(&ip6t_mutex) != 0)
-               return 0;
-
-       LIST_FIND(&ip6t_tables, print_name, char *,
-                 offset, buffer, length, &pos, &count);
-
-       up(&ip6t_mutex);
-
-       /* `start' hack - see fs/proc/generic.c line ~105 */
-       *start=(char *)((unsigned long)count-offset);
-       return pos;
-}
-
-static int ip6t_get_targets(char *buffer, char **start, off_t offset, int length)
-{
-       off_t pos = 0;
-       unsigned int count = 0;
-
-       if (down_interruptible(&ip6t_mutex) != 0)
-               return 0;
-
-       LIST_FIND(&ip6t_target, print_target, struct ip6t_target *,
-                 offset, buffer, length, &pos, &count);
-
-       up(&ip6t_mutex);
-
-       *start = (char *)((unsigned long)count - offset);
-       return pos;
-}
-
-static int ip6t_get_matches(char *buffer, char **start, off_t offset, int length)
-{
-       off_t pos = 0;
-       unsigned int count = 0;
-
-       if (down_interruptible(&ip6t_mutex) != 0)
-               return 0;
-
-       LIST_FIND(&ip6t_match, print_name, char *,
-                 offset, buffer, length, &pos, &count);
-
-       up(&ip6t_mutex);
-
-       *start = (char *)((unsigned long)count - offset);
-       return pos;
-}
-
-static const struct { char *name; get_info_t *get_info; } ip6t_proc_entry[] =
-{ { "ip6_tables_names", ip6t_get_tables },
-  { "ip6_tables_targets", ip6t_get_targets },
-  { "ip6_tables_matches", ip6t_get_matches },
-  { NULL, NULL} };
-#endif /*CONFIG_PROC_FS*/
-
 static int __init init(void)
 {
        int ret;
 
+       xt_proto_init(AF_INET6);
+
        /* Noone else will be downing sem now, so we won't sleep */
-       down(&ip6t_mutex);
-       list_append(&ip6t_target, &ip6t_standard_target);
-       list_append(&ip6t_target, &ip6t_error_target);
-       list_append(&ip6t_match, &tcp_matchstruct);
-       list_append(&ip6t_match, &udp_matchstruct);
-       list_append(&ip6t_match, &icmp6_matchstruct);
-       up(&ip6t_mutex);
+       xt_register_target(AF_INET6, &ip6t_standard_target);
+       xt_register_target(AF_INET6, &ip6t_error_target);
+       xt_register_match(AF_INET6, &icmp6_matchstruct);
 
        /* Register setsockopt */
        ret = nf_register_sockopt(&ip6t_sockopts);
        if (ret < 0) {
                duprintf("Unable to register sockopts.\n");
+               xt_proto_fini(AF_INET6);
                return ret;
        }
 
-#ifdef CONFIG_PROC_FS
-       {
-               struct proc_dir_entry *proc;
-               int i;
-
-               for (i = 0; ip6t_proc_entry[i].name; i++) {
-                       proc = proc_net_create(ip6t_proc_entry[i].name, 0,
-                                              ip6t_proc_entry[i].get_info);
-                       if (!proc) {
-                               while (--i >= 0)
-                                      proc_net_remove(ip6t_proc_entry[i].name);
-                               nf_unregister_sockopt(&ip6t_sockopts);
-                               return -ENOMEM;
-                       }
-                       proc->owner = THIS_MODULE;
-               }
-       }
-#endif
-
-       printk("ip6_tables: (C) 2000-2002 Netfilter core team\n");
+       printk("ip6_tables: (C) 2000-2006 Netfilter Core Team\n");
        return 0;
 }
 
 static void __exit fini(void)
 {
        nf_unregister_sockopt(&ip6t_sockopts);
-#ifdef CONFIG_PROC_FS
-       {
-               int i;
-               for (i = 0; ip6t_proc_entry[i].name; i++)
-                       proc_net_remove(ip6t_proc_entry[i].name);
-       }
-#endif
+       xt_unregister_match(AF_INET6, &icmp6_matchstruct);
+       xt_unregister_target(AF_INET6, &ip6t_error_target);
+       xt_unregister_target(AF_INET6, &ip6t_standard_target);
+       xt_proto_fini(AF_INET6);
 }
 
 /*
@@ -2128,10 +1506,6 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
 EXPORT_SYMBOL(ip6t_register_table);
 EXPORT_SYMBOL(ip6t_unregister_table);
 EXPORT_SYMBOL(ip6t_do_table);
-EXPORT_SYMBOL(ip6t_register_match);
-EXPORT_SYMBOL(ip6t_unregister_match);
-EXPORT_SYMBOL(ip6t_register_target);
-EXPORT_SYMBOL(ip6t_unregister_target);
 EXPORT_SYMBOL(ip6t_ext_hdr);
 EXPORT_SYMBOL(ipv6_find_hdr);
 EXPORT_SYMBOL(ip6_masked_addrcmp);
index 8f5549b72720ee13e63ded8ab72153e18474321b..306200c3505788fd8675c5c4a8bdd839f0ff6cc0 100644 (file)
@@ -62,7 +62,7 @@ static unsigned int ip6t_hl_target(struct sk_buff **pskb,
 }
 
 static int ip6t_hl_checkentry(const char *tablename,
-               const struct ip6t_entry *e,
+               const void *entry,
                void *targinfo,
                unsigned int targinfosize,
                unsigned int hook_mask)
index ae4653bfd65462fe31340bf6cf46324d8cf93385..77c725832decdbfc28b1108f9912cae8b09965dd 100644 (file)
@@ -63,9 +63,8 @@ static void dump_packet(const struct nf_loginfo *info,
                return;
        }
 
-       /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000" */
-       printk("SRC=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(ih->saddr));
-       printk("DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(ih->daddr));
+       /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
+       printk("SRC=" NIP6_FMT " DST=" NIP6_FMT " ", NIP6(ih->saddr), NIP6(ih->daddr));
 
        /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
        printk("LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
@@ -444,7 +443,7 @@ ip6t_log_target(struct sk_buff **pskb,
 
 
 static int ip6t_log_checkentry(const char *tablename,
-                              const struct ip6t_entry *e,
+                              const void *entry,
                               void *targinfo,
                               unsigned int targinfosize,
                               unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_MARK.c b/net/ipv6/netfilter/ip6t_MARK.c
deleted file mode 100644 (file)
index eab8fb8..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/* This is a module which is used for setting the NFMARK field of an skb. */
-
-/* (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/ip.h>
-#include <net/checksum.h>
-
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#include <linux/netfilter_ipv6/ip6t_MARK.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
-
-static unsigned int
-target(struct sk_buff **pskb,
-       const struct net_device *in,
-       const struct net_device *out,
-       unsigned int hooknum,
-       const void *targinfo,
-       void *userinfo)
-{
-       const struct ip6t_mark_target_info *markinfo = targinfo;
-
-       if((*pskb)->nfmark != markinfo->mark)
-               (*pskb)->nfmark = markinfo->mark;
-
-       return IP6T_CONTINUE;
-}
-
-static int
-checkentry(const char *tablename,
-          const struct ip6t_entry *e,
-           void *targinfo,
-           unsigned int targinfosize,
-           unsigned int hook_mask)
-{
-       if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_mark_target_info))) {
-               printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n",
-                      targinfosize,
-                      IP6T_ALIGN(sizeof(struct ip6t_mark_target_info)));
-               return 0;
-       }
-
-       if (strcmp(tablename, "mangle") != 0) {
-               printk(KERN_WARNING "MARK: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
-               return 0;
-       }
-
-       return 1;
-}
-
-static struct ip6t_target ip6t_mark_reg = { 
-       .name           = "MARK",
-       .target         = target,
-       .checkentry     = checkentry,
-       .me             = THIS_MODULE
-};
-
-static int __init init(void)
-{
-       printk(KERN_DEBUG "registering ipv6 mark target\n");
-       if (ip6t_register_target(&ip6t_mark_reg))
-               return -EINVAL;
-
-       return 0;
-}
-
-static void __exit fini(void)
-{
-       ip6t_unregister_target(&ip6t_mark_reg);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_NFQUEUE.c b/net/ipv6/netfilter/ip6t_NFQUEUE.c
deleted file mode 100644 (file)
index c6e3730..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/* ip6tables module for using new netfilter netlink queue
- *
- * (C) 2005 by Harald Welte <laforge@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as 
- * published by the Free Software Foundation.
- * 
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#include <linux/netfilter_ipv4/ipt_NFQUEUE.h>
-
-MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
-MODULE_DESCRIPTION("ip6tables NFQUEUE target");
-MODULE_LICENSE("GPL");
-
-static unsigned int
-target(struct sk_buff **pskb,
-       const struct net_device *in,
-       const struct net_device *out,
-       unsigned int hooknum,
-       const void *targinfo,
-       void *userinfo)
-{
-       const struct ipt_NFQ_info *tinfo = targinfo;
-
-       return NF_QUEUE_NR(tinfo->queuenum);
-}
-
-static int
-checkentry(const char *tablename,
-          const struct ip6t_entry *e,
-           void *targinfo,
-           unsigned int targinfosize,
-           unsigned int hook_mask)
-{
-       if (targinfosize != IP6T_ALIGN(sizeof(struct ipt_NFQ_info))) {
-               printk(KERN_WARNING "NFQUEUE: targinfosize %u != %Zu\n",
-                      targinfosize,
-                      IP6T_ALIGN(sizeof(struct ipt_NFQ_info)));
-               return 0;
-       }
-
-       return 1;
-}
-
-static struct ip6t_target ipt_NFQ_reg = {
-       .name           = "NFQUEUE",
-       .target         = target,
-       .checkentry     = checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ip6t_register_target(&ipt_NFQ_reg);
-}
-
-static void __exit fini(void)
-{
-       ip6t_unregister_target(&ipt_NFQ_reg);
-}
-
-module_init(init);
-module_exit(fini);
index b03e87adca93c149ef2fbf6f5d22677e0c224f65..c745717b4ce2165d0d8b73a415c4cf229b88df00 100644 (file)
@@ -218,12 +218,13 @@ static unsigned int reject6_target(struct sk_buff **pskb,
 }
 
 static int check(const char *tablename,
-                const struct ip6t_entry *e,
+                const void *entry,
                 void *targinfo,
                 unsigned int targinfosize,
                 unsigned int hook_mask)
 {
        const struct ip6t_reject_info *rejinfo = targinfo;
+       const struct ip6t_entry *e = entry;
 
        if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_reject_info))) {
                DEBUGP("ip6t_REJECT: targinfosize %u != 0\n", targinfosize);
index f5c1a7ff4a1f05407ea5cb65f4d5863988f848a6..219a30365dff27dcef63046176e9f8a6ef19d41c 100644 (file)
@@ -98,7 +98,7 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const struct ip6t_ip6 *ip,
+          const void *entry,
           void *matchinfo,
           unsigned int matchinfosize,
           unsigned int hook_mask)
index 48cf5f9efc95c58788044a979db8d7e8466033e7..80fe82669ce2616af5f99e866140dbf50d65a316 100644 (file)
@@ -178,7 +178,7 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const struct ip6t_ip6 *ip,
+          const void *info,
           void *matchinfo,
           unsigned int matchinfosize,
           unsigned int hook_mask)
index e1828f6d0a4061c0ca6e8ae40b084cf9519ef080..724285df87115e5e75e26e99c248ca0cab823dfd 100644 (file)
@@ -76,7 +76,7 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const struct ip6t_ip6 *ip,
+          const void *ip,
           void *matchinfo,
           unsigned int matchinfosize,
           unsigned int hook_mask)
index 616c2cbcd54d297a84e1a933c9b7a28d4c01c006..ddf5f571909c03b27b8038e11af598da276aea6e 100644 (file)
@@ -62,7 +62,7 @@ match(const struct sk_buff *skb,
 
 static int
 ip6t_eui64_checkentry(const char *tablename,
-                  const struct ip6t_ip6 *ip,
+                  const void  *ip,
                   void *matchinfo,
                   unsigned int matchsize,
                   unsigned int hook_mask)
index d1549b268669b7cc990ba322f39ac1726d277db1..a9964b946ed503409eeaa750d044ac82fa654bc3 100644 (file)
@@ -115,7 +115,7 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const struct ip6t_ip6 *ip,
+          const void *ip,
           void *matchinfo,
           unsigned int matchinfosize,
           unsigned int hook_mask)
index e3bc8e2700e77ee5cb03a30955894856f90a3e67..ed8ded18bbd4f5970fadb49f180ef9bd275ea327 100644 (file)
@@ -178,7 +178,7 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const struct ip6t_ip6 *ip,
+          const void *entry,
           void *matchinfo,
           unsigned int matchinfosize,
           unsigned int hook_mask)
index 0beaff5471dd320adbc8be0deb2f43954a0b1ce7..c5d9079f2d9dfc885f764f4984b5e9e8deb8d8b4 100644 (file)
@@ -48,7 +48,7 @@ static int match(const struct sk_buff *skb, const struct net_device *in,
        return 0;
 }
 
-static int checkentry(const char *tablename, const struct ip6t_ip6 *ip,
+static int checkentry(const char *tablename, const void *entry,
                      void *matchinfo, unsigned int matchsize,
                      unsigned int hook_mask)
 {
index 32e67f05845b44353758983ed8c5935e6a028dd0..fda1ceaf5a2976c579ef22e7899791561a465bff 100644 (file)
@@ -124,7 +124,7 @@ ipv6header_match(const struct sk_buff *skb,
 
 static int
 ipv6header_checkentry(const char *tablename,
-                     const struct ip6t_ip6 *ip,
+                     const void *ip,
                      void *matchinfo,
                      unsigned int matchsize,
                      unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_length.c b/net/ipv6/netfilter/ip6t_length.c
deleted file mode 100644 (file)
index e0537d3..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Length Match - IPv6 Port */
-
-/* (C) 1999-2001 James Morris <jmorros@intercode.com.au>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/netfilter_ipv6/ip6t_length.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
-MODULE_DESCRIPTION("IPv6 packet length match");
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      unsigned int protoff,
-      int *hotdrop)
-{
-       const struct ip6t_length_info *info = matchinfo;
-       u_int16_t pktlen = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr);
-       
-       return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
-}
-
-static int
-checkentry(const char *tablename,
-           const struct ip6t_ip6 *ip,
-           void *matchinfo,
-           unsigned int matchsize,
-           unsigned int hook_mask)
-{
-       if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_length_info)))
-               return 0;
-
-       return 1;
-}
-
-static struct ip6t_match length_match = {
-       .name           = "length",
-       .match          = &match,
-       .checkentry     = &checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ip6t_register_match(&length_match);
-}
-
-static void __exit fini(void)
-{
-       ip6t_unregister_match(&length_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_limit.c b/net/ipv6/netfilter/ip6t_limit.c
deleted file mode 100644 (file)
index fb782f6..0000000
+++ /dev/null
@@ -1,147 +0,0 @@
-/* Kernel module to control the rate
- *
- * 2 September 1999: Changed from the target RATE to the match
- *                   `limit', removed logging.  Did I mention that
- *                   Alexey is a fucking genius?
- *                   Rusty Russell (rusty@rustcorp.com.au).  */
-
-/* (C) 1999 Jérôme de Vivie <devivie@info.enserb.u-bordeaux.fr>
- * (C) 1999 Hervé Eychenne <eychenne@info.enserb.u-bordeaux.fr>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-#include <linux/interrupt.h>
-
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#include <linux/netfilter_ipv6/ip6t_limit.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Herve Eychenne <rv@wallfire.org>");
-MODULE_DESCRIPTION("rate limiting within ip6tables");
-
-/* The algorithm used is the Simple Token Bucket Filter (TBF)
- * see net/sched/sch_tbf.c in the linux source tree
- */
-
-static DEFINE_SPINLOCK(limit_lock);
-
-/* Rusty: This is my (non-mathematically-inclined) understanding of
-   this algorithm.  The `average rate' in jiffies becomes your initial
-   amount of credit `credit' and the most credit you can ever have
-   `credit_cap'.  The `peak rate' becomes the cost of passing the
-   test, `cost'.
-
-   `prev' tracks the last packet hit: you gain one credit per jiffy.
-   If you get credit balance more than this, the extra credit is
-   discarded.  Every time the match passes, you lose `cost' credits;
-   if you don't have that many, the test fails.
-
-   See Alexey's formal explanation in net/sched/sch_tbf.c.
-
-   To avoid underflow, we multiply by 128 (ie. you get 128 credits per
-   jiffy).  Hence a cost of 2^32-1, means one pass per 32768 seconds
-   at 1024HZ (or one every 9 hours).  A cost of 1 means 12800 passes
-   per second at 100HZ.  */
-
-#define CREDITS_PER_JIFFY 128
-
-static int
-ip6t_limit_match(const struct sk_buff *skb,
-               const struct net_device *in,
-               const struct net_device *out,
-               const void *matchinfo,
-               int offset,
-               unsigned int protoff,
-               int *hotdrop)
-{
-       struct ip6t_rateinfo *r = ((struct ip6t_rateinfo *)matchinfo)->master;
-       unsigned long now = jiffies;
-
-       spin_lock_bh(&limit_lock);
-       r->credit += (now - xchg(&r->prev, now)) * CREDITS_PER_JIFFY;
-       if (r->credit > r->credit_cap)
-               r->credit = r->credit_cap;
-
-       if (r->credit >= r->cost) {
-               /* We're not limited. */
-               r->credit -= r->cost;
-               spin_unlock_bh(&limit_lock);
-               return 1;
-       }
-
-               spin_unlock_bh(&limit_lock);
-       return 0;
-}
-
-/* Precision saver. */
-static u_int32_t
-user2credits(u_int32_t user)
-{
-       /* If multiplying would overflow... */
-       if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
-               /* Divide first. */
-               return (user / IP6T_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
-
-       return (user * HZ * CREDITS_PER_JIFFY) / IP6T_LIMIT_SCALE;
-}
-
-static int
-ip6t_limit_checkentry(const char *tablename,
-                    const struct ip6t_ip6 *ip,
-                    void *matchinfo,
-                    unsigned int matchsize,
-                    unsigned int hook_mask)
-{
-       struct ip6t_rateinfo *r = matchinfo;
-
-       if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_rateinfo)))
-               return 0;
-
-       /* Check for overflow. */
-       if (r->burst == 0
-           || user2credits(r->avg * r->burst) < user2credits(r->avg)) {
-               printk("Call rusty: overflow in ip6t_limit: %u/%u\n",
-                      r->avg, r->burst);
-               return 0;
-       }
-
-       /* User avg in seconds * IP6T_LIMIT_SCALE: convert to jiffies *
-          128. */
-       r->prev = jiffies;
-       r->credit = user2credits(r->avg * r->burst);     /* Credits full. */
-       r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
-       r->cost = user2credits(r->avg);
-
-       /* For SMP, we only want to use one set of counters. */
-       r->master = r;
-
-       return 1;
-}
-
-static struct ip6t_match ip6t_limit_reg = {
-       .name           = "limit",
-       .match          = ip6t_limit_match,
-       .checkentry     = ip6t_limit_checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       if (ip6t_register_match(&ip6t_limit_reg))
-               return -EINVAL;
-       return 0;
-}
-
-static void __exit fini(void)
-{
-       ip6t_unregister_match(&ip6t_limit_reg);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_mac.c b/net/ipv6/netfilter/ip6t_mac.c
deleted file mode 100644 (file)
index c848152..0000000
+++ /dev/null
@@ -1,81 +0,0 @@
-/* Kernel module to match MAC address parameters. */
-
-/* (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/if_ether.h>
-#include <linux/etherdevice.h>
-
-#include <linux/netfilter_ipv6/ip6t_mac.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MAC address matching module for IPv6");
-MODULE_AUTHOR("Netfilter Core Teaam <coreteam@netfilter.org>");
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      unsigned int protoff,
-      int *hotdrop)
-{
-    const struct ip6t_mac_info *info = matchinfo;
-
-    /* Is mac pointer valid? */
-    return (skb->mac.raw >= skb->head
-           && (skb->mac.raw + ETH_HLEN) <= skb->data
-           /* If so, compare... */
-           && ((!compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr))
-               ^ info->invert));
-}
-
-static int
-ip6t_mac_checkentry(const char *tablename,
-                  const struct ip6t_ip6 *ip,
-                  void *matchinfo,
-                  unsigned int matchsize,
-                  unsigned int hook_mask)
-{
-       if (hook_mask
-           & ~((1 << NF_IP6_PRE_ROUTING) | (1 << NF_IP6_LOCAL_IN)
-               | (1 << NF_IP6_FORWARD))) {
-               printk("ip6t_mac: only valid for PRE_ROUTING, LOCAL_IN or"
-                      " FORWARD\n");
-               return 0;
-       }
-
-       if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_mac_info)))
-               return 0;
-
-       return 1;
-}
-
-static struct ip6t_match mac_match = {
-       .name           = "mac",
-       .match          = &match,
-       .checkentry     = &ip6t_mac_checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ip6t_register_match(&mac_match);
-}
-
-static void __exit fini(void)
-{
-       ip6t_unregister_match(&mac_match);
-}
-
-module_init(init);
-module_exit(fini);
diff --git a/net/ipv6/netfilter/ip6t_mark.c b/net/ipv6/netfilter/ip6t_mark.c
deleted file mode 100644 (file)
index affc3de..0000000
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Kernel module to match NFMARK values. */
-
-/* (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-
-#include <linux/netfilter_ipv6/ip6t_mark.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
-MODULE_DESCRIPTION("ip6tables mark match");
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      unsigned int protoff,
-      int *hotdrop)
-{
-       const struct ip6t_mark_info *info = matchinfo;
-
-       return ((skb->nfmark & info->mask) == info->mark) ^ info->invert;
-}
-
-static int
-checkentry(const char *tablename,
-           const struct ip6t_ip6 *ip,
-           void *matchinfo,
-           unsigned int matchsize,
-           unsigned int hook_mask)
-{
-       if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_mark_info)))
-               return 0;
-
-       return 1;
-}
-
-static struct ip6t_match mark_match = {
-       .name           = "mark",
-       .match          = &match,
-       .checkentry     = &checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ip6t_register_match(&mark_match);
-}
-
-static void __exit fini(void)
-{
-       ip6t_unregister_match(&mark_match);
-}
-
-module_init(init);
-module_exit(fini);
index 6e3246153fa37c6cfebf79f77f2d5990b3a3bee7..49f7829dfbc2343db16fd8f343a848cf97d61a72 100644 (file)
@@ -84,11 +84,12 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const struct ip6t_ip6 *ip,
+          const void *info,
           void *matchinfo,
           unsigned int matchsize,
           unsigned int hook_mask)
 {
+       const struct ip6t_ip6 *ip = info;
        const struct ip6t_multiport *multiinfo = matchinfo;
 
        if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_multiport)))
index 4de4cdad4b7d281522aeb34ca813f9db5c4fa7c0..5409b375b5121efbc3eea1ede04c7e80bafc471d 100644 (file)
@@ -53,7 +53,7 @@ match(const struct sk_buff *skb,
 
 static int
 checkentry(const char *tablename,
-           const struct ip6t_ip6 *ip,
+           const void  *ip,
            void *matchinfo,
            unsigned int matchsize,
            unsigned int hook_mask)
diff --git a/net/ipv6/netfilter/ip6t_physdev.c b/net/ipv6/netfilter/ip6t_physdev.c
deleted file mode 100644 (file)
index 71515c8..0000000
+++ /dev/null
@@ -1,135 +0,0 @@
-/* Kernel module to match the bridge port in and
- * out device for IP packets coming into contact with a bridge. */
-
-/* (C) 2001-2003 Bart De Schuymer <bdschuym@pandora.be>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/module.h>
-#include <linux/skbuff.h>
-#include <linux/netfilter_ipv6/ip6t_physdev.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
-#include <linux/netfilter_bridge.h>
-#define MATCH   1
-#define NOMATCH 0
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
-MODULE_DESCRIPTION("iptables bridge physical device match module");
-
-static int
-match(const struct sk_buff *skb,
-      const struct net_device *in,
-      const struct net_device *out,
-      const void *matchinfo,
-      int offset,
-      unsigned int protoff,
-      int *hotdrop)
-{
-       int i;
-       static const char nulldevname[IFNAMSIZ];
-       const struct ip6t_physdev_info *info = matchinfo;
-       unsigned int ret;
-       const char *indev, *outdev;
-       struct nf_bridge_info *nf_bridge;
-
-       /* Not a bridged IP packet or no info available yet:
-        * LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if
-        * the destination device will be a bridge. */
-       if (!(nf_bridge = skb->nf_bridge)) {
-               /* Return MATCH if the invert flags of the used options are on */
-               if ((info->bitmask & IP6T_PHYSDEV_OP_BRIDGED) &&
-                   !(info->invert & IP6T_PHYSDEV_OP_BRIDGED))
-                       return NOMATCH;
-               if ((info->bitmask & IP6T_PHYSDEV_OP_ISIN) &&
-                   !(info->invert & IP6T_PHYSDEV_OP_ISIN))
-                       return NOMATCH;
-               if ((info->bitmask & IP6T_PHYSDEV_OP_ISOUT) &&
-                   !(info->invert & IP6T_PHYSDEV_OP_ISOUT))
-                       return NOMATCH;
-               if ((info->bitmask & IP6T_PHYSDEV_OP_IN) &&
-                   !(info->invert & IP6T_PHYSDEV_OP_IN))
-                       return NOMATCH;
-               if ((info->bitmask & IP6T_PHYSDEV_OP_OUT) &&
-                   !(info->invert & IP6T_PHYSDEV_OP_OUT))
-                       return NOMATCH;
-               return MATCH;
-       }
-
-       /* This only makes sense in the FORWARD and POSTROUTING chains */
-       if ((info->bitmask & IP6T_PHYSDEV_OP_BRIDGED) &&
-           (!!(nf_bridge->mask & BRNF_BRIDGED) ^
-           !(info->invert & IP6T_PHYSDEV_OP_BRIDGED)))
-               return NOMATCH;
-
-       if ((info->bitmask & IP6T_PHYSDEV_OP_ISIN &&
-           (!nf_bridge->physindev ^ !!(info->invert & IP6T_PHYSDEV_OP_ISIN))) ||
-           (info->bitmask & IP6T_PHYSDEV_OP_ISOUT &&
-           (!nf_bridge->physoutdev ^ !!(info->invert & IP6T_PHYSDEV_OP_ISOUT))))
-               return NOMATCH;
-
-       if (!(info->bitmask & IP6T_PHYSDEV_OP_IN))
-               goto match_outdev;
-       indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
-       for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned int); i++) {
-               ret |= (((const unsigned int *)indev)[i]
-                       ^ ((const unsigned int *)info->physindev)[i])
-                       & ((const unsigned int *)info->in_mask)[i];
-       }
-
-       if ((ret == 0) ^ !(info->invert & IP6T_PHYSDEV_OP_IN))
-               return NOMATCH;
-
-match_outdev:
-       if (!(info->bitmask & IP6T_PHYSDEV_OP_OUT))
-               return MATCH;
-       outdev = nf_bridge->physoutdev ?
-                nf_bridge->physoutdev->name : nulldevname;
-       for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned int); i++) {
-               ret |= (((const unsigned int *)outdev)[i]
-                       ^ ((const unsigned int *)info->physoutdev)[i])
-                       & ((const unsigned int *)info->out_mask)[i];
-       }
-
-       return (ret != 0) ^ !(info->invert & IP6T_PHYSDEV_OP_OUT);
-}
-
-static int
-checkentry(const char *tablename,
-                      const struct ip6t_ip6 *ip,
-                      void *matchinfo,
-                      unsigned int matchsize,
-                      unsigned int hook_mask)
-{
-       const struct ip6t_physdev_info *info = matchinfo;
-
-       if (matchsize != IP6T_ALIGN(sizeof(struct ip6t_physdev_info)))
-               return 0;
-       if (!(info->bitmask & IP6T_PHYSDEV_OP_MASK) ||
-           info->bitmask & ~IP6T_PHYSDEV_OP_MASK)
-               return 0;
-       return 1;
-}
-
-static struct ip6t_match physdev_match = {
-       .name           = "physdev",
-       .match          = &match,
-       .checkentry     = &checkentry,
-       .me             = THIS_MODULE,
-};
-
-static int __init init(void)
-{
-       return ip6t_register_match(&physdev_match);
-}
-
-static void __exit fini(void)
-{
-       ip6t_unregister_match(&physdev_match);
-}
-
-module_init(init);
-module_exit(fini);
index c1e770e45543b3c749debf6b02c0f4444e0f4323..8465b4375855862f7f196ce260b0a64ef2d61343 100644 (file)
@@ -183,7 +183,7 @@ match(const struct sk_buff *skb,
 /* Called when user tries to insert an entry of this type. */
 static int
 checkentry(const char *tablename,
-          const struct ip6t_ip6 *ip,
+          const void *entry,
           void *matchinfo,
           unsigned int matchinfosize,
           unsigned int hook_mask)
index 4c0028671c20f0987c79747532ef5f4b14d87997..ce4a968e1f7067ec18dd4c69657b0b3feabbcdc9 100644 (file)
@@ -97,6 +97,7 @@ static struct ip6t_table packet_filter = {
        .valid_hooks    = FILTER_VALID_HOOKS,
        .lock           = RW_LOCK_UNLOCKED,
        .me             = THIS_MODULE,
+       .af             = AF_INET6,
 };
 
 /* The work comes in here from netfilter.c. */
index 85c1e6eada1911e8555b604d1dc1ffdcb95a0255..30a4627e000d43c97fcc8cb66395775d2744d495 100644 (file)
@@ -127,6 +127,7 @@ static struct ip6t_table packet_mangler = {
        .valid_hooks    = MANGLE_VALID_HOOKS,
        .lock           = RW_LOCK_UNLOCKED,
        .me             = THIS_MODULE,
+       .af             = AF_INET6,
 };
 
 /* The work comes in here from netfilter.c. */
index c2982efd14afd0fb47d7363c71f06a4d1374e848..db28ba3855e2a46af5de28ec31de58f2a86e3512 100644 (file)
@@ -106,11 +106,12 @@ static struct
        }
 };
 
-static struct ip6t_table packet_raw = { 
+static struct xt_table packet_raw = { 
        .name = "raw", 
        .valid_hooks = RAW_VALID_HOOKS, 
        .lock = RW_LOCK_UNLOCKED, 
-       .me = THIS_MODULE
+       .me = THIS_MODULE,
+       .af = AF_INET6,
 };
 
 /* The work comes in here from netfilter.c. */
index e57d6fc9957aee298a0a984c7ca4201246bb6e77..ac702a29dd160a0cd2bcc774d24aae1e6f0a8f62 100644 (file)
@@ -74,7 +74,7 @@ static int ipv6_invert_tuple(struct nf_conntrack_tuple *tuple,
 static int ipv6_print_tuple(struct seq_file *s,
                            const struct nf_conntrack_tuple *tuple)
 {
-       return seq_printf(s, "src=%x:%x:%x:%x:%x:%x:%x:%x dst=%x:%x:%x:%x:%x:%x:%x:%x ",
+       return seq_printf(s, "src=" NIP6_FMT " dst=" NIP6_FMT " ",
                          NIP6(*((struct in6_addr *)tuple->src.u3.ip6)),
                          NIP6(*((struct in6_addr *)tuple->dst.u3.ip6)));
 }
@@ -584,7 +584,7 @@ MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
 
 static int __init init(void)
 {
-       need_nf_conntrack();
+       need_conntrack();
        return init_or_cleanup(1);
 }
 
@@ -595,9 +595,3 @@ static void __exit fini(void)
 
 module_init(init);
 module_exit(fini);
-
-void need_ip6_conntrack(void)
-{
-}
-
-EXPORT_SYMBOL(need_ip6_conntrack);
index f3e5ffbd592f9ee4c3bd04c661453fe6ccda64d5..84ef9a13108d3190ccae6b2c4174219b3c3d2d61 100644 (file)
@@ -70,8 +70,8 @@ struct nf_ct_frag6_skb_cb
 
 struct nf_ct_frag6_queue
 {
-       struct nf_ct_frag6_queue        *next;
-       struct list_head lru_list;              /* lru list member      */
+       struct hlist_node       list;
+       struct list_head        lru_list;       /* lru list member      */
 
        __u32                   id;             /* fragment id          */
        struct in6_addr         saddr;
@@ -90,14 +90,13 @@ struct nf_ct_frag6_queue
 #define FIRST_IN               2
 #define LAST_IN                        1
        __u16                   nhoffset;
-       struct nf_ct_frag6_queue        **pprev;
 };
 
 /* Hash table. */
 
 #define FRAG6Q_HASHSZ  64
 
-static struct nf_ct_frag6_queue *nf_ct_frag6_hash[FRAG6Q_HASHSZ];
+static struct hlist_head nf_ct_frag6_hash[FRAG6Q_HASHSZ];
 static DEFINE_RWLOCK(nf_ct_frag6_lock);
 static u32 nf_ct_frag6_hash_rnd;
 static LIST_HEAD(nf_ct_frag6_lru_list);
@@ -105,9 +104,7 @@ int nf_ct_frag6_nqueues = 0;
 
 static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq)
 {
-       if (fq->next)
-               fq->next->pprev = fq->pprev;
-       *fq->pprev = fq->next;
+       hlist_del(&fq->list);
        list_del(&fq->lru_list);
        nf_ct_frag6_nqueues--;
 }
@@ -158,28 +155,18 @@ static void nf_ct_frag6_secret_rebuild(unsigned long dummy)
        get_random_bytes(&nf_ct_frag6_hash_rnd, sizeof(u32));
        for (i = 0; i < FRAG6Q_HASHSZ; i++) {
                struct nf_ct_frag6_queue *q;
+               struct hlist_node *p, *n;
 
-               q = nf_ct_frag6_hash[i];
-               while (q) {
-                       struct nf_ct_frag6_queue *next = q->next;
+               hlist_for_each_entry_safe(q, p, n, &nf_ct_frag6_hash[i], list) {
                        unsigned int hval = ip6qhashfn(q->id,
                                                       &q->saddr,
                                                       &q->daddr);
-
                        if (hval != i) {
-                               /* Unlink. */
-                               if (q->next)
-                                       q->next->pprev = q->pprev;
-                               *q->pprev = q->next;
-
+                               hlist_del(&q->list);
                                /* Relink to new hash chain. */
-                               if ((q->next = nf_ct_frag6_hash[hval]) != NULL)
-                                       q->next->pprev = &q->next;
-                               nf_ct_frag6_hash[hval] = q;
-                               q->pprev = &nf_ct_frag6_hash[hval];
+                               hlist_add_head(&q->list,
+                                              &nf_ct_frag6_hash[hval]);
                        }
-
-                       q = next;
                }
        }
        write_unlock(&nf_ct_frag6_lock);
@@ -314,15 +301,17 @@ out:
 
 /* Creation primitives. */
 
-
 static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash,
                                          struct nf_ct_frag6_queue *fq_in)
 {
        struct nf_ct_frag6_queue *fq;
+#ifdef CONFIG_SMP
+       struct hlist_node *n;
+#endif
 
        write_lock(&nf_ct_frag6_lock);
 #ifdef CONFIG_SMP
-       for (fq = nf_ct_frag6_hash[hash]; fq; fq = fq->next) {
+       hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) {
                if (fq->id == fq_in->id && 
                    !ipv6_addr_cmp(&fq_in->saddr, &fq->saddr) &&
                    !ipv6_addr_cmp(&fq_in->daddr, &fq->daddr)) {
@@ -340,10 +329,7 @@ static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash,
                atomic_inc(&fq->refcnt);
 
        atomic_inc(&fq->refcnt);
-       if ((fq->next = nf_ct_frag6_hash[hash]) != NULL)
-               fq->next->pprev = &fq->next;
-       nf_ct_frag6_hash[hash] = fq;
-       fq->pprev = &nf_ct_frag6_hash[hash];
+       hlist_add_head(&fq->list, &nf_ct_frag6_hash[hash]);
        INIT_LIST_HEAD(&fq->lru_list);
        list_add_tail(&fq->lru_list, &nf_ct_frag6_lru_list);
        nf_ct_frag6_nqueues++;
@@ -384,10 +370,11 @@ static __inline__ struct nf_ct_frag6_queue *
 fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
 {
        struct nf_ct_frag6_queue *fq;
+       struct hlist_node *n;
        unsigned int hash = ip6qhashfn(id, src, dst);
 
        read_lock(&nf_ct_frag6_lock);
-       for (fq = nf_ct_frag6_hash[hash]; fq; fq = fq->next) {
+       hlist_for_each_entry(fq, n, &nf_ct_frag6_hash[hash], list) {
                if (fq->id == id && 
                    !ipv6_addr_cmp(src, &fq->saddr) &&
                    !ipv6_addr_cmp(dst, &fq->daddr)) {
index bf0d0abc3871b64cccd967f881febcfd0b298396..a5723024d3b372d8df6503cd05e6cfde488fc79e 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/pfkeyv2.h>
 #include <linux/ipsec.h>
 #include <net/ipv6.h>
+#include <net/addrconf.h>
 
 static struct xfrm_state_afinfo xfrm6_state_afinfo;
 
@@ -41,6 +42,22 @@ __xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
        memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr));
        if (ipv6_addr_any((struct in6_addr*)&x->props.saddr))
                memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr));
+       if (tmpl->mode && ipv6_addr_any((struct in6_addr*)&x->props.saddr)) {
+               struct rt6_info *rt;
+               struct flowi fl_tunnel = {
+                       .nl_u = {
+                               .ip6_u = {
+                                       .daddr = *(struct in6_addr *)daddr,
+                               }
+                       }
+               };
+               if (!xfrm_dst_lookup((struct xfrm_dst **)&rt,
+                                    &fl_tunnel, AF_INET6)) {
+                       ipv6_get_saddr(&rt->u.dst, (struct in6_addr *)daddr,
+                                      (struct in6_addr *)&x->props.saddr);
+                       dst_release(&rt->u.dst);
+               }
+       }
        x->props.mode = tmpl->mode;
        x->props.reqid = tmpl->reqid;
        x->props.family = AF_INET6;
index da09ff258648e0be62dbf09e953c4b836edb9c1e..8cfc58b96fc2553cbb946765a70c8c6932ba0cff 100644 (file)
@@ -259,8 +259,7 @@ try_next_2:;
        spi = 0;
        goto out;
 alloc_spi:
-       X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for "
-                             "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", 
+       X6TPRINTK3(KERN_DEBUG "%s(): allocate new spi for " NIP6_FMT "\n",
                              __FUNCTION__, 
                              NIP6(*(struct in6_addr *)saddr));
        x6spi = kmem_cache_alloc(xfrm6_tunnel_spi_kmem, SLAB_ATOMIC);
@@ -323,9 +322,8 @@ void xfrm6_tunnel_free_spi(xfrm_address_t *saddr)
                                  list_byaddr)
        {
                if (memcmp(&x6spi->addr, saddr, sizeof(x6spi->addr)) == 0) {
-                       X6TPRINTK3(KERN_DEBUG "%s(): x6spi object "
-                                             "for %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
-                                             "found at %p\n",
+                       X6TPRINTK3(KERN_DEBUG "%s(): x6spi object for " NIP6_FMT 
+                                             " found at %p\n",
                                   __FUNCTION__, 
                                   NIP6(*(struct in6_addr *)saddr),
                                   x6spi);
index 7d55f9cbd853670688a58613e7bdbb7a0731c9e0..99c0a0fa4a978744258a221321c1d4fa2f6ef39c 100644 (file)
@@ -103,3 +103,261 @@ config NF_CT_NETLINK
          This option enables support for a netlink-based userspace interface
 
 endmenu
+
+config NETFILTER_XTABLES
+       tristate "Netfilter Xtables support (required for ip_tables)"
+       help
+         This is required if you intend to use any of ip_tables,
+         ip6_tables or arp_tables.
+
+# alphabetically ordered list of targets
+
+config NETFILTER_XT_TARGET_CLASSIFY
+       tristate '"CLASSIFY" target support'
+       depends on NETFILTER_XTABLES
+       help
+         This option adds a `CLASSIFY' target, which enables the user to set
+         the priority of a packet. Some qdiscs can use this value for
+         classification, among these are:
+
+         atm, cbq, dsmark, pfifo_fast, htb, prio
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_TARGET_CONNMARK
+       tristate  '"CONNMARK" target support'
+       depends on NETFILTER_XTABLES
+       depends on IP_NF_MANGLE || IP6_NF_MANGLE
+       depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
+       help
+         This option adds a `CONNMARK' target, which allows one to manipulate
+         the connection mark value.  Similar to the MARK target, but
+         affects the connection mark value rather than the packet mark value.
+       
+         If you want to compile it as a module, say M here and read
+         <file:Documentation/modules.txt>.  The module will be called
+         ipt_CONNMARK.o.  If unsure, say `N'.
+
+config NETFILTER_XT_TARGET_MARK
+       tristate '"MARK" target support'
+       depends on NETFILTER_XTABLES
+       help
+         This option adds a `MARK' target, which allows you to create rules
+         in the `mangle' table which alter the netfilter mark (nfmark) field
+         associated with the packet prior to routing. This can change
+         the routing method (see `Use netfilter MARK value as routing
+         key') and can also be used by other subsystems to change their
+         behavior.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_TARGET_NFQUEUE
+       tristate '"NFQUEUE" target Support'
+       depends on NETFILTER_XTABLES
+       help
+         This Target replaced the old obsolete QUEUE target.
+
+         As opposed to QUEUE, it supports 65535 different queues,
+         not just one.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_TARGET_NOTRACK
+       tristate  '"NOTRACK" target support'
+       depends on NETFILTER_XTABLES
+       depends on IP_NF_RAW || IP6_NF_RAW
+       depends on IP_NF_CONNTRACK || NF_CONNTRACK
+       help
+         The NOTRACK target allows a select rule to specify
+         which packets *not* to enter the conntrack/NAT
+         subsystem with all the consequences (no ICMP error tracking,
+         no protocol helpers for the selected packets).
+       
+         If you want to compile it as a module, say M here and read
+         <file:Documentation/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_COMMENT
+       tristate  '"comment" match support'
+       depends on NETFILTER_XTABLES
+       help
+         This option adds a `comment' dummy-match, which allows you to put
+         comments in your iptables ruleset.
+
+         If you want to compile it as a module, say M here and read
+         <file:Documentation/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_CONNBYTES
+       tristate  '"connbytes" per-connection counter match support'
+       depends on NETFILTER_XTABLES
+       depends on (IP_NF_CONNTRACK && IP_NF_CT_ACCT) || NF_CT_ACCT
+       help
+         This option adds a `connbytes' match, which allows you to match the
+         number of bytes and/or packets for each direction within a connection.
+
+         If you want to compile it as a module, say M here and read
+         <file:Documentation/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_CONNMARK
+       tristate  '"connmark" connection mark match support'
+       depends on NETFILTER_XTABLES
+       depends on (IP_NF_CONNTRACK && IP_NF_CONNTRACK_MARK) || NF_CONNTRACK_MARK
+       help
+         This option adds a `connmark' match, which allows you to match the
+         connection mark value previously set for the session by `CONNMARK'. 
+       
+         If you want to compile it as a module, say M here and read
+         <file:Documentation/modules.txt>.  The module will be called
+         ipt_connmark.o.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_CONNTRACK
+       tristate '"conntrack" connection tracking match support'
+       depends on NETFILTER_XTABLES
+       depends on IP_NF_CONNTRACK || NF_CONNTRACK
+       help
+         This is a general conntrack match module, a superset of the state match.
+
+         It allows matching on additional conntrack information, which is
+         useful in complex configurations, such as NAT gateways with multiple
+         internet links or tunnels.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_MATCH_DCCP
+       tristate  '"DCCP" protocol match support'
+       depends on NETFILTER_XTABLES
+       help
+         With this option enabled, you will be able to use the iptables
+         `dccp' match in order to match on DCCP source/destination ports
+         and DCCP flags.
+
+         If you want to compile it as a module, say M here and read
+         <file:Documentation/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_HELPER
+       tristate '"helper" match support'
+       depends on NETFILTER_XTABLES
+       depends on IP_NF_CONNTRACK || NF_CONNTRACK
+       help
+         Helper matching allows you to match packets in dynamic connections
+         tracked by a conntrack-helper, ie. ip_conntrack_ftp
+
+         To compile it as a module, choose M here.  If unsure, say Y.
+
+config NETFILTER_XT_MATCH_LENGTH
+       tristate '"length" match support'
+       depends on NETFILTER_XTABLES
+       help
+         This option allows you to match the length of a packet against a
+         specific value or range of values.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_MATCH_LIMIT
+       tristate '"limit" match support'
+       depends on NETFILTER_XTABLES
+       help
+         limit matching allows you to control the rate at which a rule can be
+         matched: mainly useful in combination with the LOG target ("LOG
+         target support", below) and to avoid some Denial of Service attacks.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_MATCH_MAC
+       tristate '"mac" address match support'
+       depends on NETFILTER_XTABLES
+       help
+         MAC matching allows you to match packets based on the source
+         Ethernet address of the packet.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_MATCH_MARK
+       tristate '"mark" match support'
+       depends on NETFILTER_XTABLES
+       help
+         Netfilter mark matching allows you to match packets based on the
+         `nfmark' value in the packet.  This can be set by the MARK target
+         (see below).
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_MATCH_PHYSDEV
+       tristate '"physdev" match support'
+       depends on NETFILTER_XTABLES && BRIDGE_NETFILTER
+       help
+         Physdev packet matching matches against the physical bridge ports
+         the IP packet arrived on or will leave by.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_MATCH_PKTTYPE
+       tristate '"pkttype" packet type match support'
+       depends on NETFILTER_XTABLES
+       help
+         Packet type matching allows you to match a packet by
+         its "class", eg. BROADCAST, MULTICAST, ...
+
+         Typical usage:
+         iptables -A INPUT -m pkttype --pkt-type broadcast -j LOG
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_MATCH_REALM
+       tristate  '"realm" match support'
+       depends on NETFILTER_XTABLES
+       select NET_CLS_ROUTE
+       help
+         This option adds a `realm' match, which allows you to use the realm
+         key from the routing subsystem inside iptables.
+       
+         This match pretty much resembles the CONFIG_NET_CLS_ROUTE4 option 
+         in tc world.
+       
+         If you want to compile it as a module, say M here and read
+         <file:Documentation/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_SCTP
+       tristate  '"sctp" protocol match support'
+       depends on NETFILTER_XTABLES
+       help
+         With this option enabled, you will be able to use the 
+         `sctp' match in order to match on SCTP source/destination ports
+         and SCTP chunk types.
+
+         If you want to compile it as a module, say M here and read
+         <file:Documentation/modules.txt>.  If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_STATE
+       tristate '"state" match support'
+       depends on NETFILTER_XTABLES
+       depends on IP_NF_CONNTRACK || NF_CONNTRACK
+       help
+         Connection state matching allows you to match packets based on their
+         relationship to a tracked connection (ie. previous packets).  This
+         is a powerful tool for packet classification.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_MATCH_STRING
+       tristate  '"string" match support'
+       depends on NETFILTER_XTABLES
+       select TEXTSEARCH
+       select TEXTSEARCH_KMP
+       select TEXTSEARCH_BM
+       select TEXTSEARCH_FSM
+       help
+         This option adds a `string' match, which allows you to look for
+         pattern matchings in packets.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
+config NETFILTER_XT_MATCH_TCPMSS
+       tristate '"tcpmss" match support'
+       depends on NETFILTER_XTABLES
+       help
+         This option adds a `tcpmss' match, which allows you to examine the
+         MSS value of TCP SYN packets, which control the maximum packet size
+         for that connection.
+
+         To compile it as a module, choose M here.  If unsure, say N.
+
index cb2183145c3723f290ef99833c2511d21ae5232d..746172ebc91bbce7bf26deee69c00e498a1321ee 100644 (file)
@@ -1,4 +1,5 @@
 netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
+nf_conntrack-objs      := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o
 
 obj-$(CONFIG_NETFILTER) = netfilter.o
 
@@ -6,13 +7,43 @@ obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
 obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
 obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
 
-nf_conntrack-objs      := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o
-
+# connection tracking
 obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
-obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
 
 # SCTP protocol connection tracking
 obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
 
 # netlink interface for nf_conntrack
 obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
+
+# connection tracking helpers
+obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
+
+# generic X tables 
+obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
+
+# targets
+obj-$(CONFIG_NETFILTER_XT_TARGET_CLASSIFY) += xt_CLASSIFY.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_CONNMARK) += xt_CONNMARK.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_MARK) += xt_MARK.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
+
+# matches
+obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_CONNBYTES) += xt_connbytes.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_CONNMARK) += xt_connmark.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_LENGTH) += xt_length.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_LIMIT) += xt_limit.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_MAC) += xt_mac.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_MARK) += xt_mark.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_SCTP) += xt_sctp.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_STATE) += xt_state.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_STRING) += xt_string.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
index d5a6eaf4a1defcd39c1343eaff7da7c587a20205..ab0c920f0d30bbf840d731f8c6baddaab7c3f089 100644 (file)
@@ -545,11 +545,11 @@ static int help(struct sk_buff **pskb,
                    different IP address.  Simply don't record it for
                    NAT. */
                if (cmd.l3num == PF_INET) {
-                       DEBUGP("conntrack_ftp: NOT RECORDING: %u,%u,%u,%u != %u.%u.%u.%u\n",
+                       DEBUGP("conntrack_ftp: NOT RECORDING: " NIPQUAD_FMT " != " NIPQUAD_FMT "\n",
                               NIPQUAD(cmd.u3.ip),
                               NIPQUAD(ct->tuplehash[dir].tuple.src.u3.ip));
                } else {
-                       DEBUGP("conntrack_ftp: NOT RECORDING: %x:%x:%x:%x:%x:%x:%x:%x != %x:%x:%x:%x:%x:%x:%x:%x\n",
+                       DEBUGP("conntrack_ftp: NOT RECORDING: " NIP6_FMT " != " NIP6_FMT "\n",
                               NIP6(*((struct in6_addr *)cmd.u3.ip6)),
                               NIP6(*((struct in6_addr *)ct->tuplehash[dir]
                                                        .tuple.src.u3.ip6)));
index 3531d142f693fe686923b5c1e3e3b119224644b3..617599aeeead1c94aa7de5bda3fbd226ec5da3ae 100644 (file)
@@ -821,7 +821,7 @@ module_exit(fini);
 
 /* Some modules need us, but don't depend directly on any symbol.
    They should call this. */
-void need_nf_conntrack(void)
+void need_conntrack(void)
 {
 }
 
@@ -841,7 +841,7 @@ EXPORT_SYMBOL(nf_conntrack_protocol_unregister);
 EXPORT_SYMBOL(nf_ct_invert_tuplepr);
 EXPORT_SYMBOL(nf_conntrack_alter_reply);
 EXPORT_SYMBOL(nf_conntrack_destroyed);
-EXPORT_SYMBOL(need_nf_conntrack);
+EXPORT_SYMBOL(need_conntrack);
 EXPORT_SYMBOL(nf_conntrack_helper_register);
 EXPORT_SYMBOL(nf_conntrack_helper_unregister);
 EXPORT_SYMBOL(nf_ct_iterate_cleanup);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
new file mode 100644 (file)
index 0000000..d7817af
--- /dev/null
@@ -0,0 +1,624 @@
+/*
+ * x_tables core - Backend for {ip,ip6,arp}_tables
+ *
+ * Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
+ *
+ * Based on existing ip_tables code which is
+ *   Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
+ *   Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/socket.h>
+#include <linux/net.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_arp.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("[ip,ip6,arp]_tables backend module");
+
+#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
+
+struct xt_af {
+       struct semaphore mutex;
+       struct list_head match;
+       struct list_head target;
+       struct list_head tables;
+};
+
+static struct xt_af *xt;
+
+#ifdef DEBUG_IP_FIREWALL_USER
+#define duprintf(format, args...) printk(format , ## args)
+#else
+#define duprintf(format, args...)
+#endif
+
+enum {
+       TABLE,
+       TARGET,
+       MATCH,
+};
+
+/* Registration hooks for targets. */
+int
+xt_register_target(int af, struct xt_target *target)
+{
+       int ret;
+
+       ret = down_interruptible(&xt[af].mutex);
+       if (ret != 0)
+               return ret;
+       list_add(&target->list, &xt[af].target);
+       up(&xt[af].mutex);
+       return ret;
+}
+EXPORT_SYMBOL(xt_register_target);
+
+void
+xt_unregister_target(int af, struct xt_target *target)
+{
+       down(&xt[af].mutex);
+       LIST_DELETE(&xt[af].target, target);
+       up(&xt[af].mutex);
+}
+EXPORT_SYMBOL(xt_unregister_target);
+
+int
+xt_register_match(int af, struct xt_match *match)
+{
+       int ret;
+
+       ret = down_interruptible(&xt[af].mutex);
+       if (ret != 0)
+               return ret;
+
+       list_add(&match->list, &xt[af].match);
+       up(&xt[af].mutex);
+
+       return ret;
+}
+EXPORT_SYMBOL(xt_register_match);
+
+void
+xt_unregister_match(int af, struct xt_match *match)
+{
+       down(&xt[af].mutex);
+       LIST_DELETE(&xt[af].match, match);
+       up(&xt[af].mutex);
+}
+EXPORT_SYMBOL(xt_unregister_match);
+
+
+/*
+ * These are weird, but module loading must not be done with mutex
+ * held (since they will register), and we have to have a single
+ * function to use try_then_request_module().
+ */
+
+/* Find match, grabs ref.  Returns ERR_PTR() on error. */
+struct xt_match *xt_find_match(int af, const char *name, u8 revision)
+{
+       struct xt_match *m;
+       int err = 0;
+
+       if (down_interruptible(&xt[af].mutex) != 0)
+               return ERR_PTR(-EINTR);
+
+       list_for_each_entry(m, &xt[af].match, list) {
+               if (strcmp(m->name, name) == 0) {
+                       if (m->revision == revision) {
+                               if (try_module_get(m->me)) {
+                                       up(&xt[af].mutex);
+                                       return m;
+                               }
+                       } else
+                               err = -EPROTOTYPE; /* Found something. */
+               }
+       }
+       up(&xt[af].mutex);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL(xt_find_match);
+
+/* Find target, grabs ref.  Returns ERR_PTR() on error. */
+struct xt_target *xt_find_target(int af, const char *name, u8 revision)
+{
+       struct xt_target *t;
+       int err = 0;
+
+       if (down_interruptible(&xt[af].mutex) != 0)
+               return ERR_PTR(-EINTR);
+
+       list_for_each_entry(t, &xt[af].target, list) {
+               if (strcmp(t->name, name) == 0) {
+                       if (t->revision == revision) {
+                               if (try_module_get(t->me)) {
+                                       up(&xt[af].mutex);
+                                       return t;
+                               }
+                       } else
+                               err = -EPROTOTYPE; /* Found something. */
+               }
+       }
+       up(&xt[af].mutex);
+       return ERR_PTR(err);
+}
+EXPORT_SYMBOL(xt_find_target);
+
+static const char *xt_prefix[NPROTO] = {
+       [AF_INET]       = "ipt_%s",
+       [AF_INET6]      = "ip6t_%s",
+       [NF_ARP]        = "arpt_%s",
+};
+
+struct xt_target *xt_request_find_target(int af, const char *name, u8 revision)
+{
+       struct xt_target *target;
+
+       target = try_then_request_module(xt_find_target(af, name, revision),
+                                        xt_prefix[af], name);
+       if (IS_ERR(target) || !target)
+               return NULL;
+       return target;
+}
+EXPORT_SYMBOL_GPL(xt_request_find_target);
+
+static int match_revfn(int af, const char *name, u8 revision, int *bestp)
+{
+       struct xt_match *m;
+       int have_rev = 0;
+
+       list_for_each_entry(m, &xt[af].match, list) {
+               if (strcmp(m->name, name) == 0) {
+                       if (m->revision > *bestp)
+                               *bestp = m->revision;
+                       if (m->revision == revision)
+                               have_rev = 1;
+               }
+       }
+       return have_rev;
+}
+
+static int target_revfn(int af, const char *name, u8 revision, int *bestp)
+{
+       struct xt_target *t;
+       int have_rev = 0;
+
+       list_for_each_entry(t, &xt[af].target, list) {
+               if (strcmp(t->name, name) == 0) {
+                       if (t->revision > *bestp)
+                               *bestp = t->revision;
+                       if (t->revision == revision)
+                               have_rev = 1;
+               }
+       }
+       return have_rev;
+}
+
+/* Returns true or false (if no such extension at all) */
+int xt_find_revision(int af, const char *name, u8 revision, int target,
+                    int *err)
+{
+       int have_rev, best = -1;
+
+       if (down_interruptible(&xt[af].mutex) != 0) {
+               *err = -EINTR;
+               return 1;
+       }
+       if (target == 1)
+               have_rev = target_revfn(af, name, revision, &best);
+       else
+               have_rev = match_revfn(af, name, revision, &best);
+       up(&xt[af].mutex);
+
+       /* Nothing at all?  Return 0 to try loading module. */
+       if (best == -1) {
+               *err = -ENOENT;
+               return 0;
+       }
+
+       *err = best;
+       if (!have_rev)
+               *err = -EPROTONOSUPPORT;
+       return 1;
+}
+EXPORT_SYMBOL_GPL(xt_find_revision);
+
+struct xt_table_info *xt_alloc_table_info(unsigned int size)
+{
+       struct xt_table_info *newinfo;
+       int cpu;
+
+       /* Pedantry: prevent them from hitting BUG() in vmalloc.c --RR */
+       if ((SMP_ALIGN(size) >> PAGE_SHIFT) + 2 > num_physpages)
+               return NULL;
+
+       newinfo = kzalloc(sizeof(struct xt_table_info), GFP_KERNEL);
+       if (!newinfo)
+               return NULL;
+
+       newinfo->size = size;
+
+       for_each_cpu(cpu) {
+               if (size <= PAGE_SIZE)
+                       newinfo->entries[cpu] = kmalloc_node(size,
+                                                       GFP_KERNEL,
+                                                       cpu_to_node(cpu));
+               else
+                       newinfo->entries[cpu] = vmalloc_node(size,
+                                                       cpu_to_node(cpu));
+
+               if (newinfo->entries[cpu] == NULL) {
+                       xt_free_table_info(newinfo);
+                       return NULL;
+               }
+       }
+
+       return newinfo;
+}
+EXPORT_SYMBOL(xt_alloc_table_info);
+
+void xt_free_table_info(struct xt_table_info *info)
+{
+       int cpu;
+
+       for_each_cpu(cpu) {
+               if (info->size <= PAGE_SIZE)
+                       kfree(info->entries[cpu]);
+               else
+                       vfree(info->entries[cpu]);
+       }
+       kfree(info);
+}
+EXPORT_SYMBOL(xt_free_table_info);
+
+/* Find table by name, grabs mutex & ref.  Returns ERR_PTR() on error. */
+struct xt_table *xt_find_table_lock(int af, const char *name)
+{
+       struct xt_table *t;
+
+       if (down_interruptible(&xt[af].mutex) != 0)
+               return ERR_PTR(-EINTR);
+
+       list_for_each_entry(t, &xt[af].tables, list)
+               if (strcmp(t->name, name) == 0 && try_module_get(t->me))
+                       return t;
+       up(&xt[af].mutex);
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(xt_find_table_lock);
+
+void xt_table_unlock(struct xt_table *table)
+{
+       up(&xt[table->af].mutex);
+}
+EXPORT_SYMBOL_GPL(xt_table_unlock);
+
+
+struct xt_table_info *
+xt_replace_table(struct xt_table *table,
+             unsigned int num_counters,
+             struct xt_table_info *newinfo,
+             int *error)
+{
+       struct xt_table_info *oldinfo, *private;
+
+       /* Do the substitution. */
+       write_lock_bh(&table->lock);
+       private = table->private;
+       /* Check inside lock: is the old number correct? */
+       if (num_counters != private->number) {
+               duprintf("num_counters != table->private->number (%u/%u)\n",
+                        num_counters, private->number);
+               write_unlock_bh(&table->lock);
+               *error = -EAGAIN;
+               return NULL;
+       }
+       oldinfo = private;
+       table->private = newinfo;
+       newinfo->initial_entries = oldinfo->initial_entries;
+       write_unlock_bh(&table->lock);
+
+       return oldinfo;
+}
+EXPORT_SYMBOL_GPL(xt_replace_table);
+
+int xt_register_table(struct xt_table *table,
+                     struct xt_table_info *bootstrap,
+                     struct xt_table_info *newinfo)
+{
+       int ret;
+       struct xt_table_info *private;
+
+       ret = down_interruptible(&xt[table->af].mutex);
+       if (ret != 0)
+               return ret;
+
+       /* Don't autoload: we'd eat our tail... */
+       if (list_named_find(&xt[table->af].tables, table->name)) {
+               ret = -EEXIST;
+               goto unlock;
+       }
+
+       /* Simplifies replace_table code. */
+       table->private = bootstrap;
+       if (!xt_replace_table(table, 0, newinfo, &ret))
+               goto unlock;
+
+       private = table->private;
+       duprintf("table->private->number = %u\n", private->number);
+
+       /* save number of initial entries */
+       private->initial_entries = private->number;
+
+       rwlock_init(&table->lock);
+       list_prepend(&xt[table->af].tables, table);
+
+       ret = 0;
+ unlock:
+       up(&xt[table->af].mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(xt_register_table);
+
+void *xt_unregister_table(struct xt_table *table)
+{
+       struct xt_table_info *private;
+
+       down(&xt[table->af].mutex);
+       private = table->private;
+       LIST_DELETE(&xt[table->af].tables, table);
+       up(&xt[table->af].mutex);
+
+       return private;
+}
+EXPORT_SYMBOL_GPL(xt_unregister_table);
+
+#ifdef CONFIG_PROC_FS
+static char *xt_proto_prefix[NPROTO] = {
+       [AF_INET]       = "ip",
+       [AF_INET6]      = "ip6",
+       [NF_ARP]        = "arp",
+};
+
+static struct list_head *xt_get_idx(struct list_head *list, struct seq_file *seq, loff_t pos)
+{
+       struct list_head *head = list->next;
+
+       if (!head || list_empty(list))
+               return NULL;
+
+       while (pos && (head = head->next)) {
+               if (head == list)
+                       return NULL;
+               pos--;
+       }
+       return pos ? NULL : head;
+}
+
+static struct list_head *type2list(u_int16_t af, u_int16_t type)
+{
+       struct list_head *list;
+
+       switch (type) {
+       case TARGET:
+               list = &xt[af].target;
+               break;
+       case MATCH:
+               list = &xt[af].match;
+               break;
+       case TABLE:
+               list = &xt[af].tables;
+               break;
+       default:
+               list = NULL;
+               break;
+       }
+
+       return list;
+}
+
+static void *xt_tgt_seq_start(struct seq_file *seq, loff_t *pos)
+{
+       struct proc_dir_entry *pde = (struct proc_dir_entry *) seq->private;
+       u_int16_t af = (unsigned long)pde->data & 0xffff;
+       u_int16_t type = (unsigned long)pde->data >> 16;
+       struct list_head *list;
+
+       if (af >= NPROTO)
+               return NULL;
+
+       list = type2list(af, type);
+       if (!list)
+               return NULL;
+
+       if (down_interruptible(&xt[af].mutex) != 0)
+               return NULL;
+       
+       return xt_get_idx(list, seq, *pos);
+}
+
+static void *xt_tgt_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+       struct proc_dir_entry *pde = seq->private;
+       u_int16_t af = (unsigned long)pde->data & 0xffff;
+       u_int16_t type = (unsigned long)pde->data >> 16;
+       struct list_head *list;
+
+       if (af >= NPROTO)
+               return NULL;
+       
+       list = type2list(af, type);
+       if (!list)
+               return NULL;
+
+       (*pos)++;
+       return xt_get_idx(list, seq, *pos);
+}
+
+static void xt_tgt_seq_stop(struct seq_file *seq, void *v)
+{
+       struct proc_dir_entry *pde = seq->private;
+       u_int16_t af = (unsigned long)pde->data & 0xffff;
+
+       up(&xt[af].mutex);
+}
+
+static int xt_name_seq_show(struct seq_file *seq, void *v)
+{
+       char *name = (char *)v + sizeof(struct list_head);
+
+       if (strlen(name))
+               return seq_printf(seq, "%s\n", name);
+       else
+               return 0;
+}
+
+static struct seq_operations xt_tgt_seq_ops = {
+       .start  = xt_tgt_seq_start,
+       .next   = xt_tgt_seq_next,
+       .stop   = xt_tgt_seq_stop,
+       .show   = xt_name_seq_show,
+};
+
+static int xt_tgt_open(struct inode *inode, struct file *file)
+{
+       int ret;
+
+       ret = seq_open(file, &xt_tgt_seq_ops);
+       if (!ret) {
+               struct seq_file *seq = file->private_data;
+               struct proc_dir_entry *pde = PDE(inode);
+
+               seq->private = pde;
+       }
+
+       return ret;
+}
+
+static struct file_operations xt_file_ops = {
+       .owner   = THIS_MODULE,
+       .open    = xt_tgt_open,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .release = seq_release,
+};
+
+#define FORMAT_TABLES  "_tables_names"
+#define        FORMAT_MATCHES  "_tables_matches"
+#define FORMAT_TARGETS         "_tables_targets"
+
+#endif /* CONFIG_PROC_FS */
+
+int xt_proto_init(int af)
+{
+#ifdef CONFIG_PROC_FS
+       char buf[XT_FUNCTION_MAXNAMELEN];
+       struct proc_dir_entry *proc;
+#endif
+
+       if (af >= NPROTO)
+               return -EINVAL;
+
+
+#ifdef CONFIG_PROC_FS
+       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcat(buf, FORMAT_TABLES, sizeof(buf));
+       proc = proc_net_fops_create(buf, 0440, &xt_file_ops);
+       if (!proc)
+               goto out;
+       proc->data = (void *) ((unsigned long) af | (TABLE << 16));
+
+
+       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcat(buf, FORMAT_MATCHES, sizeof(buf));
+       proc = proc_net_fops_create(buf, 0440, &xt_file_ops);
+       if (!proc)
+               goto out_remove_tables;
+       proc->data = (void *) ((unsigned long) af | (MATCH << 16));
+
+       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcat(buf, FORMAT_TARGETS, sizeof(buf));
+       proc = proc_net_fops_create(buf, 0440, &xt_file_ops);
+       if (!proc)
+               goto out_remove_matches;
+       proc->data = (void *) ((unsigned long) af | (TARGET << 16));
+#endif
+
+       return 0;
+
+#ifdef CONFIG_PROC_FS
+out_remove_matches:
+       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcat(buf, FORMAT_MATCHES, sizeof(buf));
+       proc_net_remove(buf);
+
+out_remove_tables:
+       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcat(buf, FORMAT_TABLES, sizeof(buf));
+       proc_net_remove(buf);
+out:
+       return -1;
+#endif
+}
+EXPORT_SYMBOL_GPL(xt_proto_init);
+
+void xt_proto_fini(int af)
+{
+#ifdef CONFIG_PROC_FS
+       char buf[XT_FUNCTION_MAXNAMELEN];
+
+       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcat(buf, FORMAT_TABLES, sizeof(buf));
+       proc_net_remove(buf);
+
+       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcat(buf, FORMAT_TARGETS, sizeof(buf));
+       proc_net_remove(buf);
+
+       strlcpy(buf, xt_proto_prefix[af], sizeof(buf));
+       strlcat(buf, FORMAT_MATCHES, sizeof(buf));
+       proc_net_remove(buf);
+#endif /*CONFIG_PROC_FS*/
+}
+EXPORT_SYMBOL_GPL(xt_proto_fini);
+
+
+static int __init xt_init(void)
+{
+       int i;
+
+       xt = kmalloc(sizeof(struct xt_af) * NPROTO, GFP_KERNEL);
+       if (!xt)
+               return -ENOMEM;
+
+       for (i = 0; i < NPROTO; i++) {
+               init_MUTEX(&xt[i].mutex);
+               INIT_LIST_HEAD(&xt[i].target);
+               INIT_LIST_HEAD(&xt[i].match);
+               INIT_LIST_HEAD(&xt[i].tables);
+       }
+       return 0;
+}
+
+static void __exit xt_fini(void)
+{
+       kfree(xt);
+}
+
+module_init(xt_init);
+module_exit(xt_fini);
+
diff --git a/net/netfilter/xt_CLASSIFY.c b/net/netfilter/xt_CLASSIFY.c
new file mode 100644 (file)
index 0000000..78ee266
--- /dev/null
@@ -0,0 +1,109 @@
+/*
+ * This is a module which is used for setting the skb->priority field
+ * of an skb for qdisc classification.
+ */
+
+/* (C) 2001-2002 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <net/checksum.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_CLASSIFY.h>
+
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("iptables qdisc classification target module");
+MODULE_ALIAS("ipt_CLASSIFY");
+
+static unsigned int
+target(struct sk_buff **pskb,
+       const struct net_device *in,
+       const struct net_device *out,
+       unsigned int hooknum,
+       const void *targinfo,
+       void *userinfo)
+{
+       const struct xt_classify_target_info *clinfo = targinfo;
+
+       if ((*pskb)->priority != clinfo->priority)
+               (*pskb)->priority = clinfo->priority;
+
+       return XT_CONTINUE;
+}
+
+static int
+checkentry(const char *tablename,
+           const void *e,
+           void *targinfo,
+           unsigned int targinfosize,
+           unsigned int hook_mask)
+{
+       if (targinfosize != XT_ALIGN(sizeof(struct xt_classify_target_info))){
+               printk(KERN_ERR "CLASSIFY: invalid size (%u != %Zu).\n",
+                      targinfosize,
+                      XT_ALIGN(sizeof(struct xt_classify_target_info)));
+               return 0;
+       }
+       
+       if (hook_mask & ~((1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_FORWARD) |
+                         (1 << NF_IP_POST_ROUTING))) {
+               printk(KERN_ERR "CLASSIFY: only valid in LOCAL_OUT, FORWARD "
+                               "and POST_ROUTING.\n");
+               return 0;
+       }
+
+       if (strcmp(tablename, "mangle") != 0) {
+               printk(KERN_ERR "CLASSIFY: can only be called from "
+                               "\"mangle\" table, not \"%s\".\n",
+                               tablename);
+               return 0;
+       }
+
+       return 1;
+}
+
+static struct xt_target classify_reg = { 
+       .name           = "CLASSIFY", 
+       .target         = target,
+       .checkentry     = checkentry,
+       .me             = THIS_MODULE,
+};
+static struct xt_target classify6_reg = { 
+       .name           = "CLASSIFY", 
+       .target         = target,
+       .checkentry     = checkentry,
+       .me             = THIS_MODULE,
+};
+
+
+static int __init init(void)
+{
+       int ret;
+
+       ret = xt_register_target(AF_INET, &classify_reg);
+       if (ret)
+               return ret;
+
+       ret = xt_register_target(AF_INET6, &classify6_reg);
+       if (ret)
+               xt_unregister_target(AF_INET, &classify_reg);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_target(AF_INET, &classify_reg);
+       xt_unregister_target(AF_INET6, &classify6_reg);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c
new file mode 100644 (file)
index 0000000..22506e3
--- /dev/null
@@ -0,0 +1,141 @@
+/* This kernel module is used to modify the connection mark values, or
+ * to optionally restore the skb nfmark from the connection mark
+ *
+ * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
+ * by Henrik Nordstrom <hno@marasystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <net/checksum.h>
+
+MODULE_AUTHOR("Henrik Nordstrom <hno@marasytems.com>");
+MODULE_DESCRIPTION("IP tables CONNMARK matching module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_CONNMARK");
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_CONNMARK.h>
+#include <net/netfilter/nf_conntrack_compat.h>
+
+static unsigned int
+target(struct sk_buff **pskb,
+       const struct net_device *in,
+       const struct net_device *out,
+       unsigned int hooknum,
+       const void *targinfo,
+       void *userinfo)
+{
+       const struct xt_connmark_target_info *markinfo = targinfo;
+       u_int32_t diff;
+       u_int32_t nfmark;
+       u_int32_t newmark;
+       u_int32_t ctinfo;
+       u_int32_t *ctmark = nf_ct_get_mark(*pskb, &ctinfo);
+
+       if (ctmark) {
+           switch(markinfo->mode) {
+           case XT_CONNMARK_SET:
+               newmark = (*ctmark & ~markinfo->mask) | markinfo->mark;
+               if (newmark != *ctmark)
+                   *ctmark = newmark;
+               break;
+           case XT_CONNMARK_SAVE:
+               newmark = (*ctmark & ~markinfo->mask) | ((*pskb)->nfmark & markinfo->mask);
+               if (*ctmark != newmark)
+                   *ctmark = newmark;
+               break;
+           case XT_CONNMARK_RESTORE:
+               nfmark = (*pskb)->nfmark;
+               diff = (*ctmark ^ nfmark) & markinfo->mask;
+               if (diff != 0)
+                   (*pskb)->nfmark = nfmark ^ diff;
+               break;
+           }
+       }
+
+       return XT_CONTINUE;
+}
+
+static int
+checkentry(const char *tablename,
+          const void *entry,
+          void *targinfo,
+          unsigned int targinfosize,
+          unsigned int hook_mask)
+{
+       struct xt_connmark_target_info *matchinfo = targinfo;
+       if (targinfosize != XT_ALIGN(sizeof(struct xt_connmark_target_info))) {
+               printk(KERN_WARNING "CONNMARK: targinfosize %u != %Zu\n",
+                      targinfosize,
+                      XT_ALIGN(sizeof(struct xt_connmark_target_info)));
+               return 0;
+       }
+
+       if (matchinfo->mode == XT_CONNMARK_RESTORE) {
+           if (strcmp(tablename, "mangle") != 0) {
+                   printk(KERN_WARNING "CONNMARK: restore can only be called from \"mangle\" table, not \"%s\"\n", tablename);
+                   return 0;
+           }
+       }
+
+       if (matchinfo->mark > 0xffffffff || matchinfo->mask > 0xffffffff) {
+               printk(KERN_WARNING "CONNMARK: Only supports 32bit mark\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static struct xt_target connmark_reg = {
+       .name = "CONNMARK",
+       .target = &target,
+       .checkentry = &checkentry,
+       .me = THIS_MODULE
+};
+static struct xt_target connmark6_reg = {
+       .name = "CONNMARK",
+       .target = &target,
+       .checkentry = &checkentry,
+       .me = THIS_MODULE
+};
+
+static int __init init(void)
+{
+       int ret;
+
+       need_conntrack();
+
+       ret = xt_register_target(AF_INET, &connmark_reg);
+       if (ret)
+               return ret;
+
+       ret = xt_register_target(AF_INET6, &connmark6_reg);
+       if (ret)
+               xt_unregister_target(AF_INET, &connmark_reg);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_target(AF_INET, &connmark_reg);
+       xt_unregister_target(AF_INET6, &connmark6_reg);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_MARK.c b/net/netfilter/xt_MARK.c
new file mode 100644 (file)
index 0000000..0c11ee9
--- /dev/null
@@ -0,0 +1,191 @@
+/* This is a module which is used for setting the NFMARK field of an skb. */
+
+/* (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <net/checksum.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_MARK.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
+MODULE_DESCRIPTION("ip[6]tables MARK modification module");
+MODULE_ALIAS("ipt_MARK");
+MODULE_ALIAS("ip6t_MARK");
+
+static unsigned int
+target_v0(struct sk_buff **pskb,
+         const struct net_device *in,
+         const struct net_device *out,
+         unsigned int hooknum,
+         const void *targinfo,
+         void *userinfo)
+{
+       const struct xt_mark_target_info *markinfo = targinfo;
+
+       if((*pskb)->nfmark != markinfo->mark)
+               (*pskb)->nfmark = markinfo->mark;
+
+       return XT_CONTINUE;
+}
+
+static unsigned int
+target_v1(struct sk_buff **pskb,
+         const struct net_device *in,
+         const struct net_device *out,
+         unsigned int hooknum,
+         const void *targinfo,
+         void *userinfo)
+{
+       const struct xt_mark_target_info_v1 *markinfo = targinfo;
+       int mark = 0;
+
+       switch (markinfo->mode) {
+       case XT_MARK_SET:
+               mark = markinfo->mark;
+               break;
+               
+       case XT_MARK_AND:
+               mark = (*pskb)->nfmark & markinfo->mark;
+               break;
+               
+       case XT_MARK_OR:
+               mark = (*pskb)->nfmark | markinfo->mark;
+               break;
+       }
+
+       if((*pskb)->nfmark != mark)
+               (*pskb)->nfmark = mark;
+
+       return XT_CONTINUE;
+}
+
+
+static int
+checkentry_v0(const char *tablename,
+             const void *entry,
+             void *targinfo,
+             unsigned int targinfosize,
+             unsigned int hook_mask)
+{
+       struct xt_mark_target_info *markinfo = targinfo;
+
+       if (targinfosize != XT_ALIGN(sizeof(struct xt_mark_target_info))) {
+               printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n",
+                      targinfosize,
+                      XT_ALIGN(sizeof(struct xt_mark_target_info)));
+               return 0;
+       }
+
+       if (strcmp(tablename, "mangle") != 0) {
+               printk(KERN_WARNING "MARK: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
+               return 0;
+       }
+
+       if (markinfo->mark > 0xffffffff) {
+               printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static int
+checkentry_v1(const char *tablename,
+             const void *entry,
+             void *targinfo,
+             unsigned int targinfosize,
+             unsigned int hook_mask)
+{
+       struct xt_mark_target_info_v1 *markinfo = targinfo;
+
+       if (targinfosize != XT_ALIGN(sizeof(struct xt_mark_target_info_v1))){
+               printk(KERN_WARNING "MARK: targinfosize %u != %Zu\n",
+                      targinfosize,
+                      XT_ALIGN(sizeof(struct xt_mark_target_info_v1)));
+               return 0;
+       }
+
+       if (strcmp(tablename, "mangle") != 0) {
+               printk(KERN_WARNING "MARK: can only be called from \"mangle\" table, not \"%s\"\n", tablename);
+               return 0;
+       }
+
+       if (markinfo->mode != XT_MARK_SET
+           && markinfo->mode != XT_MARK_AND
+           && markinfo->mode != XT_MARK_OR) {
+               printk(KERN_WARNING "MARK: unknown mode %u\n",
+                      markinfo->mode);
+               return 0;
+       }
+
+       if (markinfo->mark > 0xffffffff) {
+               printk(KERN_WARNING "MARK: Only supports 32bit wide mark\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static struct xt_target ipt_mark_reg_v0 = {
+       .name           = "MARK",
+       .target         = target_v0,
+       .checkentry     = checkentry_v0,
+       .me             = THIS_MODULE,
+       .revision       = 0,
+};
+
+static struct xt_target ipt_mark_reg_v1 = {
+       .name           = "MARK",
+       .target         = target_v1,
+       .checkentry     = checkentry_v1,
+       .me             = THIS_MODULE,
+       .revision       = 1,
+};
+
+static struct xt_target ip6t_mark_reg_v0 = {
+       .name           = "MARK",
+       .target         = target_v0,
+       .checkentry     = checkentry_v0,
+       .me             = THIS_MODULE,
+       .revision       = 0,
+};
+
+static int __init init(void)
+{
+       int err;
+
+       err = xt_register_target(AF_INET, &ipt_mark_reg_v0);
+       if (err)
+               return err;
+
+       err = xt_register_target(AF_INET, &ipt_mark_reg_v1);
+       if (err)
+               xt_unregister_target(AF_INET, &ipt_mark_reg_v0);
+
+       err = xt_register_target(AF_INET6, &ip6t_mark_reg_v0);
+       if (err) {
+               xt_unregister_target(AF_INET, &ipt_mark_reg_v0);
+               xt_unregister_target(AF_INET, &ipt_mark_reg_v1);
+       }
+
+       return err;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_target(AF_INET, &ipt_mark_reg_v0);
+       xt_unregister_target(AF_INET, &ipt_mark_reg_v1);
+       xt_unregister_target(AF_INET6, &ip6t_mark_reg_v0);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_NFQUEUE.c b/net/netfilter/xt_NFQUEUE.c
new file mode 100644 (file)
index 0000000..8b76b6f
--- /dev/null
@@ -0,0 +1,107 @@
+/* iptables module for using new netfilter netlink queue
+ *
+ * (C) 2005 by Harald Welte <laforge@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as 
+ * published by the Free Software Foundation.
+ * 
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter_arp.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_NFQUEUE.h>
+
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("[ip,ip6,arp]_tables NFQUEUE target");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_NFQUEUE");
+MODULE_ALIAS("ip6t_NFQUEUE");
+MODULE_ALIAS("arpt_NFQUEUE");
+
+static unsigned int
+target(struct sk_buff **pskb,
+       const struct net_device *in,
+       const struct net_device *out,
+       unsigned int hooknum,
+       const void *targinfo,
+       void *userinfo)
+{
+       const struct xt_NFQ_info *tinfo = targinfo;
+
+       return NF_QUEUE_NR(tinfo->queuenum);
+}
+
+static int
+checkentry(const char *tablename,
+          const void *entry,
+           void *targinfo,
+           unsigned int targinfosize,
+           unsigned int hook_mask)
+{
+       if (targinfosize != XT_ALIGN(sizeof(struct xt_NFQ_info))) {
+               printk(KERN_WARNING "NFQUEUE: targinfosize %u != %Zu\n",
+                      targinfosize,
+                      XT_ALIGN(sizeof(struct xt_NFQ_info)));
+               return 0;
+       }
+
+       return 1;
+}
+
+static struct xt_target ipt_NFQ_reg = {
+       .name           = "NFQUEUE",
+       .target         = target,
+       .checkentry     = checkentry,
+       .me             = THIS_MODULE,
+};
+
+static struct xt_target ip6t_NFQ_reg = {
+       .name           = "NFQUEUE",
+       .target         = target,
+       .checkentry     = checkentry,
+       .me             = THIS_MODULE,
+};
+
+static struct xt_target arpt_NFQ_reg = {
+       .name           = "NFQUEUE",
+       .target         = target,
+       .checkentry     = checkentry,
+       .me             = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+       int ret;
+       ret = xt_register_target(AF_INET, &ipt_NFQ_reg);
+       if (ret)
+               return ret;
+       ret = xt_register_target(AF_INET6, &ip6t_NFQ_reg);
+       if (ret)
+               goto out_ip;
+       ret = xt_register_target(NF_ARP, &arpt_NFQ_reg);
+       if (ret)
+               goto out_ip6;
+
+       return ret;
+out_ip6:
+       xt_unregister_target(AF_INET6, &ip6t_NFQ_reg);
+out_ip:
+       xt_unregister_target(AF_INET, &ipt_NFQ_reg);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_target(NF_ARP, &arpt_NFQ_reg);
+       xt_unregister_target(AF_INET6, &ip6t_NFQ_reg);
+       xt_unregister_target(AF_INET, &ipt_NFQ_reg);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_NOTRACK.c b/net/netfilter/xt_NOTRACK.c
new file mode 100644 (file)
index 0000000..24d477a
--- /dev/null
@@ -0,0 +1,92 @@
+/* This is a module which is used for setting up fake conntracks
+ * on packets so that they are not seen by the conntrack/NAT code.
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <net/netfilter/nf_conntrack_compat.h>
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_NOTRACK");
+
+static unsigned int
+target(struct sk_buff **pskb,
+       const struct net_device *in,
+       const struct net_device *out,
+       unsigned int hooknum,
+       const void *targinfo,
+       void *userinfo)
+{
+       /* Previously seen (loopback)? Ignore. */
+       if ((*pskb)->nfct != NULL)
+               return XT_CONTINUE;
+
+       /* Attach fake conntrack entry. 
+          If there is a real ct entry correspondig to this packet, 
+          it'll hang aroun till timing out. We don't deal with it
+          for performance reasons. JK */
+       nf_ct_untrack(*pskb);
+       (*pskb)->nfctinfo = IP_CT_NEW;
+       nf_conntrack_get((*pskb)->nfct);
+
+       return XT_CONTINUE;
+}
+
+static int
+checkentry(const char *tablename,
+          const void *entry,
+           void *targinfo,
+           unsigned int targinfosize,
+           unsigned int hook_mask)
+{
+       if (targinfosize != 0) {
+               printk(KERN_WARNING "NOTRACK: targinfosize %u != 0\n",
+                      targinfosize);
+               return 0;
+       }
+
+       if (strcmp(tablename, "raw") != 0) {
+               printk(KERN_WARNING "NOTRACK: can only be called from \"raw\" table, not \"%s\"\n", tablename);
+               return 0;
+       }
+
+       return 1;
+}
+
+static struct xt_target notrack_reg = { 
+       .name = "NOTRACK", 
+       .target = target, 
+       .checkentry = checkentry,
+       .me = THIS_MODULE,
+};
+static struct xt_target notrack6_reg = { 
+       .name = "NOTRACK", 
+       .target = target, 
+       .checkentry = checkentry,
+       .me = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+       int ret;
+
+       ret = xt_register_target(AF_INET, &notrack_reg);
+       if (ret)
+               return ret;
+
+       ret = xt_register_target(AF_INET6, &notrack6_reg);
+       if (ret)
+               xt_unregister_target(AF_INET, &notrack_reg);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_target(AF_INET6, &notrack6_reg);
+       xt_unregister_target(AF_INET, &notrack_reg);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_comment.c b/net/netfilter/xt_comment.c
new file mode 100644 (file)
index 0000000..4ba6fd6
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Implements a dummy match to allow attaching comments to rules
+ *
+ * 2003-05-13 Brad Fisher (brad@info-link.net)
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_comment.h>
+
+MODULE_AUTHOR("Brad Fisher <brad@info-link.net>");
+MODULE_DESCRIPTION("iptables comment match module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_comment");
+MODULE_ALIAS("ip6t_comment");
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protooff,
+      int *hotdrop)
+{
+       /* We always match */
+       return 1;
+}
+
+static int
+checkentry(const char *tablename,
+           const void *ip,
+           void *matchinfo,
+           unsigned int matchsize,
+           unsigned int hook_mask)
+{
+       /* Check the size */
+       if (matchsize != XT_ALIGN(sizeof(struct xt_comment_info)))
+               return 0;
+       return 1;
+}
+
+static struct xt_match comment_match = {
+       .name           = "comment",
+       .match          = match,
+       .checkentry     = checkentry,
+       .me             = THIS_MODULE
+};
+
+static struct xt_match comment6_match = {
+       .name           = "comment",
+       .match          = match,
+       .checkentry     = checkentry,
+       .me             = THIS_MODULE
+};
+
+static int __init init(void)
+{
+       int ret;
+
+       ret = xt_register_match(AF_INET, &comment_match);
+       if (ret)
+               return ret;
+
+       ret = xt_register_match(AF_INET6, &comment6_match);
+       if (ret)
+               xt_unregister_match(AF_INET, &comment_match);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &comment_match);
+       xt_unregister_match(AF_INET6, &comment6_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_connbytes.c b/net/netfilter/xt_connbytes.c
new file mode 100644 (file)
index 0000000..150d2a4
--- /dev/null
@@ -0,0 +1,180 @@
+/* Kernel module to match connection tracking byte counter.
+ * GPL (C) 2002 Martin Devera (devik@cdi.cz).
+ *
+ * 2004-07-20 Harald Welte <laforge@netfilter.org>
+ *     - reimplemented to use per-connection accounting counters
+ *     - add functionality to match number of packets
+ *     - add functionality to match average packet size
+ *     - add support to match directions seperately
+ * 2005-10-16 Harald Welte <laforge@netfilter.org>
+ *     - Port to x_tables
+ *
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/netfilter/nf_conntrack_compat.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_connbytes.h>
+
+#include <asm/div64.h>
+#include <asm/bitops.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("iptables match for matching number of pkts/bytes per connection");
+MODULE_ALIAS("ipt_connbytes");
+
+/* 64bit divisor, dividend and result. dynamic precision */
+static u_int64_t div64_64(u_int64_t dividend, u_int64_t divisor)
+{
+       u_int32_t d = divisor;
+
+       if (divisor > 0xffffffffULL) {
+               unsigned int shift = fls(divisor >> 32);
+
+               d = divisor >> shift;
+               dividend >>= shift;
+       }
+
+       do_div(dividend, d);
+       return dividend;
+}
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_connbytes_info *sinfo = matchinfo;
+       u_int64_t what = 0;     /* initialize to make gcc happy */
+       const struct ip_conntrack_counter *counters;
+
+       if (!(counters = nf_ct_get_counters(skb)))
+               return 0; /* no match */
+
+       switch (sinfo->what) {
+       case XT_CONNBYTES_PKTS:
+               switch (sinfo->direction) {
+               case XT_CONNBYTES_DIR_ORIGINAL:
+                       what = counters[IP_CT_DIR_ORIGINAL].packets;
+                       break;
+               case XT_CONNBYTES_DIR_REPLY:
+                       what = counters[IP_CT_DIR_REPLY].packets;
+                       break;
+               case XT_CONNBYTES_DIR_BOTH:
+                       what = counters[IP_CT_DIR_ORIGINAL].packets;
+                       what += counters[IP_CT_DIR_REPLY].packets;
+                       break;
+               }
+               break;
+       case XT_CONNBYTES_BYTES:
+               switch (sinfo->direction) {
+               case XT_CONNBYTES_DIR_ORIGINAL:
+                       what = counters[IP_CT_DIR_ORIGINAL].bytes;
+                       break;
+               case XT_CONNBYTES_DIR_REPLY:
+                       what = counters[IP_CT_DIR_REPLY].bytes;
+                       break;
+               case XT_CONNBYTES_DIR_BOTH:
+                       what = counters[IP_CT_DIR_ORIGINAL].bytes;
+                       what += counters[IP_CT_DIR_REPLY].bytes;
+                       break;
+               }
+               break;
+       case XT_CONNBYTES_AVGPKT:
+               switch (sinfo->direction) {
+               case XT_CONNBYTES_DIR_ORIGINAL:
+                       what = div64_64(counters[IP_CT_DIR_ORIGINAL].bytes,
+                                       counters[IP_CT_DIR_ORIGINAL].packets);
+                       break;
+               case XT_CONNBYTES_DIR_REPLY:
+                       what = div64_64(counters[IP_CT_DIR_REPLY].bytes,
+                                       counters[IP_CT_DIR_REPLY].packets);
+                       break;
+               case XT_CONNBYTES_DIR_BOTH:
+                       {
+                               u_int64_t bytes;
+                               u_int64_t pkts;
+                               bytes = counters[IP_CT_DIR_ORIGINAL].bytes +
+                                       counters[IP_CT_DIR_REPLY].bytes;
+                               pkts = counters[IP_CT_DIR_ORIGINAL].packets+
+                                       counters[IP_CT_DIR_REPLY].packets;
+
+                               /* FIXME_THEORETICAL: what to do if sum
+                                * overflows ? */
+
+                               what = div64_64(bytes, pkts);
+                       }
+                       break;
+               }
+               break;
+       }
+
+       if (sinfo->count.to)
+               return (what <= sinfo->count.to && what >= sinfo->count.from);
+       else
+               return (what >= sinfo->count.from);
+}
+
+static int check(const char *tablename,
+                const void *ip,
+                void *matchinfo,
+                unsigned int matchsize,
+                unsigned int hook_mask)
+{
+       const struct xt_connbytes_info *sinfo = matchinfo;
+
+       if (matchsize != XT_ALIGN(sizeof(struct xt_connbytes_info)))
+               return 0;
+
+       if (sinfo->what != XT_CONNBYTES_PKTS &&
+           sinfo->what != XT_CONNBYTES_BYTES &&
+           sinfo->what != XT_CONNBYTES_AVGPKT)
+               return 0;
+
+       if (sinfo->direction != XT_CONNBYTES_DIR_ORIGINAL &&
+           sinfo->direction != XT_CONNBYTES_DIR_REPLY &&
+           sinfo->direction != XT_CONNBYTES_DIR_BOTH)
+               return 0;
+
+       return 1;
+}
+
+static struct xt_match connbytes_match = {
+       .name           = "connbytes",
+       .match          = &match,
+       .checkentry     = &check,
+       .me             = THIS_MODULE
+};
+static struct xt_match connbytes6_match = {
+       .name           = "connbytes",
+       .match          = &match,
+       .checkentry     = &check,
+       .me             = THIS_MODULE
+};
+
+static int __init init(void)
+{
+       int ret;
+       ret = xt_register_match(AF_INET, &connbytes_match);
+       if (ret)
+               return ret;
+
+       ret = xt_register_match(AF_INET6, &connbytes6_match);
+       if (ret)
+               xt_unregister_match(AF_INET, &connbytes_match);
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &connbytes_match);
+       xt_unregister_match(AF_INET6, &connbytes6_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c
new file mode 100644 (file)
index 0000000..d06e925
--- /dev/null
@@ -0,0 +1,109 @@
+/* This kernel module matches connection mark values set by the
+ * CONNMARK target
+ *
+ * Copyright (C) 2002,2004 MARA Systems AB <http://www.marasystems.com>
+ * by Henrik Nordstrom <hno@marasystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+MODULE_AUTHOR("Henrik Nordstrom <hno@marasytems.com>");
+MODULE_DESCRIPTION("IP tables connmark match module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_connmark");
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_connmark.h>
+#include <net/netfilter/nf_conntrack_compat.h>
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_connmark_info *info = matchinfo;
+       u_int32_t ctinfo;
+       const u_int32_t *ctmark = nf_ct_get_mark(skb, &ctinfo);
+       if (!ctmark)
+               return 0;
+
+       return (((*ctmark) & info->mask) == info->mark) ^ info->invert;
+}
+
+static int
+checkentry(const char *tablename,
+          const void *ip,
+          void *matchinfo,
+          unsigned int matchsize,
+          unsigned int hook_mask)
+{
+       struct xt_connmark_info *cm = 
+                               (struct xt_connmark_info *)matchinfo;
+       if (matchsize != XT_ALIGN(sizeof(struct xt_connmark_info)))
+               return 0;
+
+       if (cm->mark > 0xffffffff || cm->mask > 0xffffffff) {
+               printk(KERN_WARNING "connmark: only support 32bit mark\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static struct xt_match connmark_match = {
+       .name = "connmark",
+       .match = &match,
+       .checkentry = &checkentry,
+       .me = THIS_MODULE
+};
+static struct xt_match connmark6_match = {
+       .name = "connmark",
+       .match = &match,
+       .checkentry = &checkentry,
+       .me = THIS_MODULE
+};
+
+
+static int __init init(void)
+{
+       int ret;
+
+       need_conntrack();
+
+       ret = xt_register_match(AF_INET, &connmark_match);
+       if (ret)
+               return ret;
+
+       ret = xt_register_match(AF_INET6, &connmark6_match);
+       if (ret)
+               xt_unregister_match(AF_INET, &connmark_match);
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET6, &connmark6_match);
+       xt_unregister_match(AF_INET, &connmark_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_conntrack.c b/net/netfilter/xt_conntrack.c
new file mode 100644 (file)
index 0000000..ffdebc9
--- /dev/null
@@ -0,0 +1,238 @@
+/* Kernel module to match connection tracking information.
+ * Superset of Rusty's minimalistic state match.
+ *
+ * (C) 2001  Marc Boucher (marc@mbsi.ca).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ip_conntrack_tuple.h>
+#else
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_conntrack.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
+MODULE_DESCRIPTION("iptables connection tracking match module");
+MODULE_ALIAS("ipt_conntrack");
+
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_conntrack_info *sinfo = matchinfo;
+       struct ip_conntrack *ct;
+       enum ip_conntrack_info ctinfo;
+       unsigned int statebit;
+
+       ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
+
+#define FWINV(bool,invflg) ((bool) ^ !!(sinfo->invflags & invflg))
+
+       if (ct == &ip_conntrack_untracked)
+               statebit = XT_CONNTRACK_STATE_UNTRACKED;
+       else if (ct)
+               statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
+       else
+               statebit = XT_CONNTRACK_STATE_INVALID;
+       if(sinfo->flags & XT_CONNTRACK_STATE) {
+               if (ct) {
+                       if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip !=
+                           ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip)
+                               statebit |= XT_CONNTRACK_STATE_SNAT;
+
+                       if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip !=
+                           ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip)
+                               statebit |= XT_CONNTRACK_STATE_DNAT;
+               }
+
+               if (FWINV((statebit & sinfo->statemask) == 0, XT_CONNTRACK_STATE))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_PROTO) {
+               if (!ct || FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, XT_CONNTRACK_PROTO))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_ORIGSRC) {
+               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.ip&sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, XT_CONNTRACK_ORIGSRC))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_ORIGDST) {
+               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.ip&sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, XT_CONNTRACK_ORIGDST))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_REPLSRC) {
+               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.ip&sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].src.ip, XT_CONNTRACK_REPLSRC))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_REPLDST) {
+               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip&sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, XT_CONNTRACK_REPLDST))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_STATUS) {
+               if (!ct || FWINV((ct->status & sinfo->statusmask) == 0, XT_CONNTRACK_STATUS))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_EXPIRES) {
+               unsigned long expires;
+
+               if(!ct)
+                       return 0;
+
+               expires = timer_pending(&ct->timeout) ? (ct->timeout.expires - jiffies)/HZ : 0;
+
+               if (FWINV(!(expires >= sinfo->expires_min && expires <= sinfo->expires_max), XT_CONNTRACK_EXPIRES))
+                       return 0;
+       }
+
+       return 1;
+}
+
+#else /* CONFIG_IP_NF_CONNTRACK */
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_conntrack_info *sinfo = matchinfo;
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       unsigned int statebit;
+
+       ct = nf_ct_get((struct sk_buff *)skb, &ctinfo);
+
+#define FWINV(bool,invflg) ((bool) ^ !!(sinfo->invflags & invflg))
+
+       if (ct == &nf_conntrack_untracked)
+               statebit = XT_CONNTRACK_STATE_UNTRACKED;
+       else if (ct)
+               statebit = XT_CONNTRACK_STATE_BIT(ctinfo);
+       else
+               statebit = XT_CONNTRACK_STATE_INVALID;
+       if(sinfo->flags & XT_CONNTRACK_STATE) {
+               if (ct) {
+                       if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip !=
+                           ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip)
+                               statebit |= XT_CONNTRACK_STATE_SNAT;
+
+                       if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip !=
+                           ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip)
+                               statebit |= XT_CONNTRACK_STATE_DNAT;
+               }
+
+               if (FWINV((statebit & sinfo->statemask) == 0, XT_CONNTRACK_STATE))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_PROTO) {
+               if (!ct || FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, XT_CONNTRACK_PROTO))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_ORIGSRC) {
+               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip&sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, XT_CONNTRACK_ORIGSRC))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_ORIGDST) {
+               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip&sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, XT_CONNTRACK_ORIGDST))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_REPLSRC) {
+               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip&sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].src.ip, XT_CONNTRACK_REPLSRC))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_REPLDST) {
+               if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip&sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, XT_CONNTRACK_REPLDST))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_STATUS) {
+               if (!ct || FWINV((ct->status & sinfo->statusmask) == 0, XT_CONNTRACK_STATUS))
+                       return 0;
+       }
+
+       if(sinfo->flags & XT_CONNTRACK_EXPIRES) {
+               unsigned long expires;
+
+               if(!ct)
+                       return 0;
+
+               expires = timer_pending(&ct->timeout) ? (ct->timeout.expires - jiffies)/HZ : 0;
+
+               if (FWINV(!(expires >= sinfo->expires_min && expires <= sinfo->expires_max), XT_CONNTRACK_EXPIRES))
+                       return 0;
+       }
+
+       return 1;
+}
+
+#endif /* CONFIG_NF_IP_CONNTRACK */
+
+static int check(const char *tablename,
+                const void *ip,
+                void *matchinfo,
+                unsigned int matchsize,
+                unsigned int hook_mask)
+{
+       if (matchsize != XT_ALIGN(sizeof(struct xt_conntrack_info)))
+               return 0;
+
+       return 1;
+}
+
+static struct xt_match conntrack_match = {
+       .name           = "conntrack",
+       .match          = &match,
+       .checkentry     = &check,
+       .me             = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+       int ret;
+       need_conntrack();
+       ret = xt_register_match(AF_INET, &conntrack_match);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &conntrack_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_dccp.c b/net/netfilter/xt_dccp.c
new file mode 100644 (file)
index 0000000..779f42f
--- /dev/null
@@ -0,0 +1,221 @@
+/*
+ * iptables module for DCCP protocol header matching
+ *
+ * (C) 2005 by Harald Welte <laforge@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <net/ip.h>
+#include <linux/dccp.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_dccp.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+MODULE_DESCRIPTION("Match for DCCP protocol packets");
+MODULE_ALIAS("ipt_dccp");
+
+#define DCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
+                                 || (!!((invflag) & (option)) ^ (cond)))
+
+static unsigned char *dccp_optbuf;
+static DEFINE_SPINLOCK(dccp_buflock);
+
+static inline int
+dccp_find_option(u_int8_t option,
+                const struct sk_buff *skb,
+                unsigned int protoff,
+                const struct dccp_hdr *dh,
+                int *hotdrop)
+{
+       /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
+       unsigned char *op;
+       unsigned int optoff = __dccp_hdr_len(dh);
+       unsigned int optlen = dh->dccph_doff*4 - __dccp_hdr_len(dh);
+       unsigned int i;
+
+       if (dh->dccph_doff * 4 < __dccp_hdr_len(dh)) {
+               *hotdrop = 1;
+               return 0;
+       }
+
+       if (!optlen)
+               return 0;
+
+       spin_lock_bh(&dccp_buflock);
+       op = skb_header_pointer(skb, protoff + optoff, optlen, dccp_optbuf);
+       if (op == NULL) {
+               /* If we don't have the whole header, drop packet. */
+               spin_unlock_bh(&dccp_buflock);
+               *hotdrop = 1;
+               return 0;
+       }
+
+       for (i = 0; i < optlen; ) {
+               if (op[i] == option) {
+                       spin_unlock_bh(&dccp_buflock);
+                       return 1;
+               }
+
+               if (op[i] < 2) 
+                       i++;
+               else 
+                       i += op[i+1]?:1;
+       }
+
+       spin_unlock_bh(&dccp_buflock);
+       return 0;
+}
+
+
+static inline int
+match_types(const struct dccp_hdr *dh, u_int16_t typemask)
+{
+       return (typemask & (1 << dh->dccph_type));
+}
+
+static inline int
+match_option(u_int8_t option, const struct sk_buff *skb, unsigned int protoff,
+            const struct dccp_hdr *dh, int *hotdrop)
+{
+       return dccp_find_option(option, skb, protoff, dh, hotdrop);
+}
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_dccp_info *info = 
+                               (const struct xt_dccp_info *)matchinfo;
+       struct dccp_hdr _dh, *dh;
+
+       if (offset)
+               return 0;
+       
+       dh = skb_header_pointer(skb, protoff, sizeof(_dh), &_dh);
+       if (dh == NULL) {
+               *hotdrop = 1;
+               return 0;
+               }
+
+       return  DCCHECK(((ntohs(dh->dccph_sport) >= info->spts[0]) 
+                       && (ntohs(dh->dccph_sport) <= info->spts[1])), 
+                       XT_DCCP_SRC_PORTS, info->flags, info->invflags)
+               && DCCHECK(((ntohs(dh->dccph_dport) >= info->dpts[0]) 
+                       && (ntohs(dh->dccph_dport) <= info->dpts[1])), 
+                       XT_DCCP_DEST_PORTS, info->flags, info->invflags)
+               && DCCHECK(match_types(dh, info->typemask),
+                          XT_DCCP_TYPE, info->flags, info->invflags)
+               && DCCHECK(match_option(info->option, skb, protoff, dh,
+                                       hotdrop),
+                          XT_DCCP_OPTION, info->flags, info->invflags);
+}
+
+static int
+checkentry(const char *tablename,
+          const void *inf,
+          void *matchinfo,
+          unsigned int matchsize,
+          unsigned int hook_mask)
+{
+       const struct ipt_ip *ip = inf;
+       const struct xt_dccp_info *info;
+
+       info = (const struct xt_dccp_info *)matchinfo;
+
+       return ip->proto == IPPROTO_DCCP
+               && !(ip->invflags & XT_INV_PROTO)
+               && matchsize == XT_ALIGN(sizeof(struct xt_dccp_info))
+               && !(info->flags & ~XT_DCCP_VALID_FLAGS)
+               && !(info->invflags & ~XT_DCCP_VALID_FLAGS)
+               && !(info->invflags & ~info->flags);
+}
+
+static int
+checkentry6(const char *tablename,
+          const void *inf,
+          void *matchinfo,
+          unsigned int matchsize,
+          unsigned int hook_mask)
+{
+       const struct ip6t_ip6 *ip = inf;
+       const struct xt_dccp_info *info;
+
+       info = (const struct xt_dccp_info *)matchinfo;
+
+       return ip->proto == IPPROTO_DCCP
+               && !(ip->invflags & XT_INV_PROTO)
+               && matchsize == XT_ALIGN(sizeof(struct xt_dccp_info))
+               && !(info->flags & ~XT_DCCP_VALID_FLAGS)
+               && !(info->invflags & ~XT_DCCP_VALID_FLAGS)
+               && !(info->invflags & ~info->flags);
+}
+
+
+static struct xt_match dccp_match = 
+{ 
+       .name           = "dccp",
+       .match          = &match,
+       .checkentry     = &checkentry,
+       .me             = THIS_MODULE,
+};
+static struct xt_match dccp6_match = 
+{ 
+       .name           = "dccp",
+       .match          = &match,
+       .checkentry     = &checkentry6,
+       .me             = THIS_MODULE,
+};
+
+
+static int __init init(void)
+{
+       int ret;
+
+       /* doff is 8 bits, so the maximum option size is (4*256).  Don't put
+        * this in BSS since DaveM is worried about locked TLB's for kernel
+        * BSS. */
+       dccp_optbuf = kmalloc(256 * 4, GFP_KERNEL);
+       if (!dccp_optbuf)
+               return -ENOMEM;
+       ret = xt_register_match(AF_INET, &dccp_match);
+       if (ret)
+               goto out_kfree;
+       ret = xt_register_match(AF_INET6, &dccp6_match);
+       if (ret)
+               goto out_unreg;
+
+       return ret;
+
+out_unreg:
+       xt_unregister_match(AF_INET, &dccp_match);
+out_kfree:
+       kfree(dccp_optbuf);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET6, &dccp6_match);
+       xt_unregister_match(AF_INET, &dccp_match);
+       kfree(dccp_optbuf);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_helper.c b/net/netfilter/xt_helper.c
new file mode 100644 (file)
index 0000000..38b6715
--- /dev/null
@@ -0,0 +1,188 @@
+/* iptables module to match on related connections */
+/*
+ * (C) 2001 Martin Josefsson <gandalf@wlug.westbo.se>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ *   19 Mar 2002 Harald Welte <laforge@gnumonks.org>:
+ *              - Port to newnat infrastructure
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter.h>
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ip_conntrack_core.h>
+#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
+#else
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#endif
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_helper.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Martin Josefsson <gandalf@netfilter.org>");
+MODULE_DESCRIPTION("iptables helper match module");
+MODULE_ALIAS("ipt_helper");
+MODULE_ALIAS("ip6t_helper");
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_helper_info *info = matchinfo;
+       struct ip_conntrack *ct;
+       enum ip_conntrack_info ctinfo;
+       int ret = info->invert;
+       
+       ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
+       if (!ct) {
+               DEBUGP("xt_helper: Eek! invalid conntrack?\n");
+               return ret;
+       }
+
+       if (!ct->master) {
+               DEBUGP("xt_helper: conntrack %p has no master\n", ct);
+               return ret;
+       }
+
+       read_lock_bh(&ip_conntrack_lock);
+       if (!ct->master->helper) {
+               DEBUGP("xt_helper: master ct %p has no helper\n", 
+                       exp->expectant);
+               goto out_unlock;
+       }
+
+       DEBUGP("master's name = %s , info->name = %s\n", 
+               ct->master->helper->name, info->name);
+
+       if (info->name[0] == '\0')
+               ret ^= 1;
+       else
+               ret ^= !strncmp(ct->master->helper->name, info->name, 
+                               strlen(ct->master->helper->name));
+out_unlock:
+       read_unlock_bh(&ip_conntrack_lock);
+       return ret;
+}
+
+#else /* CONFIG_IP_NF_CONNTRACK */
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_helper_info *info = matchinfo;
+       struct nf_conn *ct;
+       enum ip_conntrack_info ctinfo;
+       int ret = info->invert;
+       
+       ct = nf_ct_get((struct sk_buff *)skb, &ctinfo);
+       if (!ct) {
+               DEBUGP("xt_helper: Eek! invalid conntrack?\n");
+               return ret;
+       }
+
+       if (!ct->master) {
+               DEBUGP("xt_helper: conntrack %p has no master\n", ct);
+               return ret;
+       }
+
+       read_lock_bh(&nf_conntrack_lock);
+       if (!ct->master->helper) {
+               DEBUGP("xt_helper: master ct %p has no helper\n", 
+                       exp->expectant);
+               goto out_unlock;
+       }
+
+       DEBUGP("master's name = %s , info->name = %s\n", 
+               ct->master->helper->name, info->name);
+
+       if (info->name[0] == '\0')
+               ret ^= 1;
+       else
+               ret ^= !strncmp(ct->master->helper->name, info->name, 
+                               strlen(ct->master->helper->name));
+out_unlock:
+       read_unlock_bh(&nf_conntrack_lock);
+       return ret;
+}
+#endif
+
+static int check(const char *tablename,
+                const void *inf,
+                void *matchinfo,
+                unsigned int matchsize,
+                unsigned int hook_mask)
+{
+       struct xt_helper_info *info = matchinfo;
+
+       info->name[29] = '\0';
+
+       /* verify size */
+       if (matchsize != XT_ALIGN(sizeof(struct xt_helper_info)))
+               return 0;
+
+       return 1;
+}
+
+static struct xt_match helper_match = {
+       .name           = "helper",
+       .match          = &match,
+       .checkentry     = &check,
+       .me             = THIS_MODULE,
+};
+static struct xt_match helper6_match = {
+       .name           = "helper",
+       .match          = &match,
+       .checkentry     = &check,
+       .me             = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+       int ret;
+       need_conntrack();
+
+       ret = xt_register_match(AF_INET, &helper_match);
+       if (ret < 0)
+               return ret;
+
+       ret = xt_register_match(AF_INET6, &helper6_match);
+       if (ret < 0)
+               xt_unregister_match(AF_INET, &helper_match);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &helper_match);
+       xt_unregister_match(AF_INET6, &helper6_match);
+}
+
+module_init(init);
+module_exit(fini);
+
diff --git a/net/netfilter/xt_length.c b/net/netfilter/xt_length.c
new file mode 100644 (file)
index 0000000..39c8fae
--- /dev/null
@@ -0,0 +1,99 @@
+/* Kernel module to match packet length. */
+/* (C) 1999-2001 James Morris <jmorros@intercode.com.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <net/ip.h>
+
+#include <linux/netfilter/xt_length.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
+MODULE_DESCRIPTION("IP tables packet length matching module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_length");
+MODULE_ALIAS("ip6t_length");
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_length_info *info = matchinfo;
+       u_int16_t pktlen = ntohs(skb->nh.iph->tot_len);
+       
+       return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
+}
+
+static int
+match6(const struct sk_buff *skb,
+       const struct net_device *in,
+       const struct net_device *out,
+       const void *matchinfo,
+       int offset,
+       unsigned int protoff,
+       int *hotdrop)
+{
+       const struct xt_length_info *info = matchinfo;
+       u_int16_t pktlen = ntohs(skb->nh.ipv6h->payload_len) + sizeof(struct ipv6hdr);
+       
+       return (pktlen >= info->min && pktlen <= info->max) ^ info->invert;
+}
+
+static int
+checkentry(const char *tablename,
+           const void *ip,
+           void *matchinfo,
+           unsigned int matchsize,
+           unsigned int hook_mask)
+{
+       if (matchsize != XT_ALIGN(sizeof(struct xt_length_info)))
+               return 0;
+
+       return 1;
+}
+
+static struct xt_match length_match = {
+       .name           = "length",
+       .match          = &match,
+       .checkentry     = &checkentry,
+       .me             = THIS_MODULE,
+};
+static struct xt_match length6_match = {
+       .name           = "length",
+       .match          = &match6,
+       .checkentry     = &checkentry,
+       .me             = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+       int ret;
+       ret = xt_register_match(AF_INET, &length_match);
+       if (ret)
+               return ret;
+       ret = xt_register_match(AF_INET6, &length6_match);
+       if (ret)
+               xt_unregister_match(AF_INET, &length_match);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &length_match);
+       xt_unregister_match(AF_INET6, &length6_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c
new file mode 100644 (file)
index 0000000..15e4050
--- /dev/null
@@ -0,0 +1,175 @@
+/* Kernel module to control the rate
+ *
+ * 2 September 1999: Changed from the target RATE to the match
+ *                   `limit', removed logging.  Did I mention that
+ *                   Alexey is a fucking genius?
+ *                   Rusty Russell (rusty@rustcorp.com.au).  */
+
+/* (C) 1999 Jérôme de Vivie <devivie@info.enserb.u-bordeaux.fr>
+ * (C) 1999 Hervé Eychenne <eychenne@info.enserb.u-bordeaux.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_limit.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Herve Eychenne <rv@wallfire.org>");
+MODULE_DESCRIPTION("iptables rate limit match");
+MODULE_ALIAS("ipt_limit");
+MODULE_ALIAS("ip6t_limit");
+
+/* The algorithm used is the Simple Token Bucket Filter (TBF)
+ * see net/sched/sch_tbf.c in the linux source tree
+ */
+
+static DEFINE_SPINLOCK(limit_lock);
+
+/* Rusty: This is my (non-mathematically-inclined) understanding of
+   this algorithm.  The `average rate' in jiffies becomes your initial
+   amount of credit `credit' and the most credit you can ever have
+   `credit_cap'.  The `peak rate' becomes the cost of passing the
+   test, `cost'.
+
+   `prev' tracks the last packet hit: you gain one credit per jiffy.
+   If you get credit balance more than this, the extra credit is
+   discarded.  Every time the match passes, you lose `cost' credits;
+   if you don't have that many, the test fails.
+
+   See Alexey's formal explanation in net/sched/sch_tbf.c.
+
+   To get the maxmum range, we multiply by this factor (ie. you get N
+   credits per jiffy).  We want to allow a rate as low as 1 per day
+   (slowest userspace tool allows), which means
+   CREDITS_PER_JIFFY*HZ*60*60*24 < 2^32. ie. */
+#define MAX_CPJ (0xFFFFFFFF / (HZ*60*60*24))
+
+/* Repeated shift and or gives us all 1s, final shift and add 1 gives
+ * us the power of 2 below the theoretical max, so GCC simply does a
+ * shift. */
+#define _POW2_BELOW2(x) ((x)|((x)>>1))
+#define _POW2_BELOW4(x) (_POW2_BELOW2(x)|_POW2_BELOW2((x)>>2))
+#define _POW2_BELOW8(x) (_POW2_BELOW4(x)|_POW2_BELOW4((x)>>4))
+#define _POW2_BELOW16(x) (_POW2_BELOW8(x)|_POW2_BELOW8((x)>>8))
+#define _POW2_BELOW32(x) (_POW2_BELOW16(x)|_POW2_BELOW16((x)>>16))
+#define POW2_BELOW32(x) ((_POW2_BELOW32(x)>>1) + 1)
+
+#define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ)
+
+static int
+ipt_limit_match(const struct sk_buff *skb,
+               const struct net_device *in,
+               const struct net_device *out,
+               const void *matchinfo,
+               int offset,
+               unsigned int protoff,
+               int *hotdrop)
+{
+       struct xt_rateinfo *r = ((struct xt_rateinfo *)matchinfo)->master;
+       unsigned long now = jiffies;
+
+       spin_lock_bh(&limit_lock);
+       r->credit += (now - xchg(&r->prev, now)) * CREDITS_PER_JIFFY;
+       if (r->credit > r->credit_cap)
+               r->credit = r->credit_cap;
+
+       if (r->credit >= r->cost) {
+               /* We're not limited. */
+               r->credit -= r->cost;
+               spin_unlock_bh(&limit_lock);
+               return 1;
+       }
+
+               spin_unlock_bh(&limit_lock);
+       return 0;
+}
+
+/* Precision saver. */
+static u_int32_t
+user2credits(u_int32_t user)
+{
+       /* If multiplying would overflow... */
+       if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY))
+               /* Divide first. */
+               return (user / XT_LIMIT_SCALE) * HZ * CREDITS_PER_JIFFY;
+
+       return (user * HZ * CREDITS_PER_JIFFY) / XT_LIMIT_SCALE;
+}
+
+static int
+ipt_limit_checkentry(const char *tablename,
+                    const void *inf,
+                    void *matchinfo,
+                    unsigned int matchsize,
+                    unsigned int hook_mask)
+{
+       struct xt_rateinfo *r = matchinfo;
+
+       if (matchsize != XT_ALIGN(sizeof(struct xt_rateinfo)))
+               return 0;
+
+       /* Check for overflow. */
+       if (r->burst == 0
+           || user2credits(r->avg * r->burst) < user2credits(r->avg)) {
+               printk("Overflow in xt_limit, try lower: %u/%u\n",
+                      r->avg, r->burst);
+               return 0;
+       }
+
+       /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
+          128. */
+       r->prev = jiffies;
+       r->credit = user2credits(r->avg * r->burst);     /* Credits full. */
+       r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */
+       r->cost = user2credits(r->avg);
+
+       /* For SMP, we only want to use one set of counters. */
+       r->master = r;
+
+       return 1;
+}
+
+static struct xt_match ipt_limit_reg = {
+       .name           = "limit",
+       .match          = ipt_limit_match,
+       .checkentry     = ipt_limit_checkentry,
+       .me             = THIS_MODULE,
+};
+static struct xt_match limit6_reg = {
+       .name           = "limit",
+       .match          = ipt_limit_match,
+       .checkentry     = ipt_limit_checkentry,
+       .me             = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+       int ret;
+       
+       ret = xt_register_match(AF_INET, &ipt_limit_reg);
+       if (ret)
+               return ret;
+       
+       ret = xt_register_match(AF_INET6, &limit6_reg);
+       if (ret)
+               xt_unregister_match(AF_INET, &ipt_limit_reg);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &ipt_limit_reg);
+       xt_unregister_match(AF_INET6, &limit6_reg);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_mac.c b/net/netfilter/xt_mac.c
new file mode 100644 (file)
index 0000000..0461dcb
--- /dev/null
@@ -0,0 +1,100 @@
+/* Kernel module to match MAC address parameters. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/xt_mac.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
+MODULE_DESCRIPTION("iptables mac matching module");
+MODULE_ALIAS("ipt_mac");
+MODULE_ALIAS("ip6t_mac");
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+    const struct xt_mac_info *info = matchinfo;
+
+    /* Is mac pointer valid? */
+    return (skb->mac.raw >= skb->head
+           && (skb->mac.raw + ETH_HLEN) <= skb->data
+           /* If so, compare... */
+           && ((!compare_ether_addr(eth_hdr(skb)->h_source, info->srcaddr))
+               ^ info->invert));
+}
+
+static int
+ipt_mac_checkentry(const char *tablename,
+                  const void *inf,
+                  void *matchinfo,
+                  unsigned int matchsize,
+                  unsigned int hook_mask)
+{
+       /* FORWARD isn't always valid, but it's nice to be able to do --RR */
+       if (hook_mask
+           & ~((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_IN)
+               | (1 << NF_IP_FORWARD))) {
+               printk("xt_mac: only valid for PRE_ROUTING, LOCAL_IN or FORWARD.\n");
+               return 0;
+       }
+
+       if (matchsize != XT_ALIGN(sizeof(struct xt_mac_info)))
+               return 0;
+
+       return 1;
+}
+
+static struct xt_match mac_match = {
+       .name           = "mac",
+       .match          = &match,
+       .checkentry     = &ipt_mac_checkentry,
+       .me             = THIS_MODULE,
+};
+static struct xt_match mac6_match = {
+       .name           = "mac",
+       .match          = &match,
+       .checkentry     = &ipt_mac_checkentry,
+       .me             = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+       int ret;
+       ret = xt_register_match(AF_INET, &mac_match);
+       if (ret)
+               return ret;
+
+       ret = xt_register_match(AF_INET6, &mac6_match);
+       if (ret)
+               xt_unregister_match(AF_INET, &mac_match);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &mac_match);
+       xt_unregister_match(AF_INET6, &mac6_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_mark.c b/net/netfilter/xt_mark.c
new file mode 100644 (file)
index 0000000..2a0ac62
--- /dev/null
@@ -0,0 +1,91 @@
+/* Kernel module to match NFMARK values. */
+
+/* (C) 1999-2001 Marc Boucher <marc@mbsi.ca>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <linux/netfilter/xt_mark.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
+MODULE_DESCRIPTION("iptables mark matching module");
+MODULE_ALIAS("ipt_mark");
+MODULE_ALIAS("ip6t_mark");
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_mark_info *info = matchinfo;
+
+       return ((skb->nfmark & info->mask) == info->mark) ^ info->invert;
+}
+
+static int
+checkentry(const char *tablename,
+           const void *entry,
+           void *matchinfo,
+           unsigned int matchsize,
+           unsigned int hook_mask)
+{
+       struct xt_mark_info *minfo = (struct xt_mark_info *) matchinfo;
+
+       if (matchsize != XT_ALIGN(sizeof(struct xt_mark_info)))
+               return 0;
+
+       if (minfo->mark > 0xffffffff || minfo->mask > 0xffffffff) {
+               printk(KERN_WARNING "mark: only supports 32bit mark\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static struct xt_match mark_match = {
+       .name           = "mark",
+       .match          = &match,
+       .checkentry     = &checkentry,
+       .me             = THIS_MODULE,
+};
+
+static struct xt_match mark6_match = {
+       .name           = "mark",
+       .match          = &match,
+       .checkentry     = &checkentry,
+       .me             = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+       int ret;
+       ret = xt_register_match(AF_INET, &mark_match);
+       if (ret)
+               return ret;
+
+       ret = xt_register_match(AF_INET6, &mark6_match);
+       if (ret)
+               xt_unregister_match(AF_INET, &mark_match);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &mark_match);
+       xt_unregister_match(AF_INET6, &mark6_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
new file mode 100644 (file)
index 0000000..19bb57c
--- /dev/null
@@ -0,0 +1,155 @@
+/* Kernel module to match the bridge port in and
+ * out device for IP packets coming into contact with a bridge. */
+
+/* (C) 2001-2003 Bart De Schuymer <bdschuym@pandora.be>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/xt_physdev.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter_bridge.h>
+#define MATCH   1
+#define NOMATCH 0
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
+MODULE_DESCRIPTION("iptables bridge physical device match module");
+MODULE_ALIAS("ipt_physdev");
+MODULE_ALIAS("ip6t_physdev");
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       int i;
+       static const char nulldevname[IFNAMSIZ];
+       const struct xt_physdev_info *info = matchinfo;
+       unsigned int ret;
+       const char *indev, *outdev;
+       struct nf_bridge_info *nf_bridge;
+
+       /* Not a bridged IP packet or no info available yet:
+        * LOCAL_OUT/mangle and LOCAL_OUT/nat don't know if
+        * the destination device will be a bridge. */
+       if (!(nf_bridge = skb->nf_bridge)) {
+               /* Return MATCH if the invert flags of the used options are on */
+               if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
+                   !(info->invert & XT_PHYSDEV_OP_BRIDGED))
+                       return NOMATCH;
+               if ((info->bitmask & XT_PHYSDEV_OP_ISIN) &&
+                   !(info->invert & XT_PHYSDEV_OP_ISIN))
+                       return NOMATCH;
+               if ((info->bitmask & XT_PHYSDEV_OP_ISOUT) &&
+                   !(info->invert & XT_PHYSDEV_OP_ISOUT))
+                       return NOMATCH;
+               if ((info->bitmask & XT_PHYSDEV_OP_IN) &&
+                   !(info->invert & XT_PHYSDEV_OP_IN))
+                       return NOMATCH;
+               if ((info->bitmask & XT_PHYSDEV_OP_OUT) &&
+                   !(info->invert & XT_PHYSDEV_OP_OUT))
+                       return NOMATCH;
+               return MATCH;
+       }
+
+       /* This only makes sense in the FORWARD and POSTROUTING chains */
+       if ((info->bitmask & XT_PHYSDEV_OP_BRIDGED) &&
+           (!!(nf_bridge->mask & BRNF_BRIDGED) ^
+           !(info->invert & XT_PHYSDEV_OP_BRIDGED)))
+               return NOMATCH;
+
+       if ((info->bitmask & XT_PHYSDEV_OP_ISIN &&
+           (!nf_bridge->physindev ^ !!(info->invert & XT_PHYSDEV_OP_ISIN))) ||
+           (info->bitmask & XT_PHYSDEV_OP_ISOUT &&
+           (!nf_bridge->physoutdev ^ !!(info->invert & XT_PHYSDEV_OP_ISOUT))))
+               return NOMATCH;
+
+       if (!(info->bitmask & XT_PHYSDEV_OP_IN))
+               goto match_outdev;
+       indev = nf_bridge->physindev ? nf_bridge->physindev->name : nulldevname;
+       for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned int); i++) {
+               ret |= (((const unsigned int *)indev)[i]
+                       ^ ((const unsigned int *)info->physindev)[i])
+                       & ((const unsigned int *)info->in_mask)[i];
+       }
+
+       if ((ret == 0) ^ !(info->invert & XT_PHYSDEV_OP_IN))
+               return NOMATCH;
+
+match_outdev:
+       if (!(info->bitmask & XT_PHYSDEV_OP_OUT))
+               return MATCH;
+       outdev = nf_bridge->physoutdev ?
+                nf_bridge->physoutdev->name : nulldevname;
+       for (i = 0, ret = 0; i < IFNAMSIZ/sizeof(unsigned int); i++) {
+               ret |= (((const unsigned int *)outdev)[i]
+                       ^ ((const unsigned int *)info->physoutdev)[i])
+                       & ((const unsigned int *)info->out_mask)[i];
+       }
+
+       return (ret != 0) ^ !(info->invert & XT_PHYSDEV_OP_OUT);
+}
+
+static int
+checkentry(const char *tablename,
+                      const void *ip,
+                      void *matchinfo,
+                      unsigned int matchsize,
+                      unsigned int hook_mask)
+{
+       const struct xt_physdev_info *info = matchinfo;
+
+       if (matchsize != XT_ALIGN(sizeof(struct xt_physdev_info)))
+               return 0;
+       if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
+           info->bitmask & ~XT_PHYSDEV_OP_MASK)
+               return 0;
+       return 1;
+}
+
+static struct xt_match physdev_match = {
+       .name           = "physdev",
+       .match          = &match,
+       .checkentry     = &checkentry,
+       .me             = THIS_MODULE,
+};
+
+static struct xt_match physdev6_match = {
+       .name           = "physdev",
+       .match          = &match,
+       .checkentry     = &checkentry,
+       .me             = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+       int ret;
+
+       ret = xt_register_match(AF_INET, &physdev_match);
+       if (ret < 0)
+               return ret;
+
+       ret = xt_register_match(AF_INET6, &physdev6_match);
+       if (ret < 0)
+               xt_unregister_match(AF_INET, &physdev_match);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &physdev_match);
+       xt_unregister_match(AF_INET6, &physdev6_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_pkttype.c b/net/netfilter/xt_pkttype.c
new file mode 100644 (file)
index 0000000..ab1b263
--- /dev/null
@@ -0,0 +1,82 @@
+/* (C) 1999-2001 Michal Ludvig <michal@logix.cz>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+
+#include <linux/netfilter/xt_pkttype.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Michal Ludvig <michal@logix.cz>");
+MODULE_DESCRIPTION("IP tables match to match on linklayer packet type");
+MODULE_ALIAS("ipt_pkttype");
+MODULE_ALIAS("ip6t_pkttype");
+
+static int match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_pkttype_info *info = matchinfo;
+
+       return (skb->pkt_type == info->pkttype) ^ info->invert;
+}
+
+static int checkentry(const char *tablename,
+                  const void *ip,
+                  void *matchinfo,
+                  unsigned int matchsize,
+                  unsigned int hook_mask)
+{
+       if (matchsize != XT_ALIGN(sizeof(struct xt_pkttype_info)))
+               return 0;
+
+       return 1;
+}
+
+static struct xt_match pkttype_match = {
+       .name           = "pkttype",
+       .match          = &match,
+       .checkentry     = &checkentry,
+       .me             = THIS_MODULE,
+};
+static struct xt_match pkttype6_match = {
+       .name           = "pkttype",
+       .match          = &match,
+       .checkentry     = &checkentry,
+       .me             = THIS_MODULE,
+};
+
+
+static int __init init(void)
+{
+       int ret;
+       ret = xt_register_match(AF_INET, &pkttype_match);
+       if (ret)
+               return ret;
+
+       ret = xt_register_match(AF_INET6, &pkttype6_match);
+       if (ret)
+               xt_unregister_match(AF_INET, &pkttype_match);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &pkttype_match);
+       xt_unregister_match(AF_INET6, &pkttype6_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_realm.c b/net/netfilter/xt_realm.c
new file mode 100644 (file)
index 0000000..2b7e178
--- /dev/null
@@ -0,0 +1,79 @@
+/* IP tables module for matching the routing realm
+ *
+ * $Id: ipt_realm.c,v 1.3 2004/03/05 13:25:40 laforge Exp $
+ *
+ * (C) 2003 by Sampsa Ranta <sampsa@netsonic.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/route.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter/xt_realm.h>
+#include <linux/netfilter/x_tables.h>
+
+MODULE_AUTHOR("Sampsa Ranta <sampsa@netsonic.fi>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("X_tables realm match");
+MODULE_ALIAS("ipt_realm");
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_realm_info *info = matchinfo;
+       struct dst_entry *dst = skb->dst;
+    
+       return (info->id == (dst->tclassid & info->mask)) ^ info->invert;
+}
+
+static int check(const char *tablename,
+                 const void *ip,
+                 void *matchinfo,
+                 unsigned int matchsize,
+                 unsigned int hook_mask)
+{
+       if (hook_mask
+           & ~((1 << NF_IP_POST_ROUTING) | (1 << NF_IP_FORWARD) |
+               (1 << NF_IP_LOCAL_OUT) | (1 << NF_IP_LOCAL_IN))) {
+               printk("xt_realm: only valid for POST_ROUTING, LOCAL_OUT, "
+                      "LOCAL_IN or FORWARD.\n");
+               return 0;
+       }
+       if (matchsize != XT_ALIGN(sizeof(struct xt_realm_info))) {
+               printk("xt_realm: invalid matchsize.\n");
+               return 0;
+       }
+       return 1;
+}
+
+static struct xt_match realm_match = {
+       .name           = "realm",
+       .match          = match, 
+       .checkentry     = check,
+       .me             = THIS_MODULE
+};
+
+static int __init init(void)
+{
+       return xt_register_match(AF_INET, &realm_match);
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &realm_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_sctp.c b/net/netfilter/xt_sctp.c
new file mode 100644 (file)
index 0000000..10fbfc5
--- /dev/null
@@ -0,0 +1,250 @@
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/sctp.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_sctp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kiran Kumar Immidi");
+MODULE_DESCRIPTION("Match for SCTP protocol packets");
+MODULE_ALIAS("ipt_sctp");
+
+#ifdef DEBUG_SCTP
+#define duprintf(format, args...) printk(format , ## args)
+#else
+#define duprintf(format, args...)
+#endif
+
+#define SCCHECK(cond, option, flag, invflag) (!((flag) & (option)) \
+                                             || (!!((invflag) & (option)) ^ (cond)))
+
+static int
+match_flags(const struct xt_sctp_flag_info *flag_info,
+           const int flag_count,
+           u_int8_t chunktype,
+           u_int8_t chunkflags)
+{
+       int i;
+
+       for (i = 0; i < flag_count; i++) {
+               if (flag_info[i].chunktype == chunktype) {
+                       return (chunkflags & flag_info[i].flag_mask) == flag_info[i].flag;
+               }
+       }
+
+       return 1;
+}
+
+static inline int
+match_packet(const struct sk_buff *skb,
+            unsigned int offset,
+            const u_int32_t *chunkmap,
+            int chunk_match_type,
+            const struct xt_sctp_flag_info *flag_info,
+            const int flag_count,
+            int *hotdrop)
+{
+       u_int32_t chunkmapcopy[256 / sizeof (u_int32_t)];
+       sctp_chunkhdr_t _sch, *sch;
+
+#ifdef DEBUG_SCTP
+       int i = 0;
+#endif
+
+       if (chunk_match_type == SCTP_CHUNK_MATCH_ALL) {
+               SCTP_CHUNKMAP_COPY(chunkmapcopy, chunkmap);
+       }
+
+       do {
+               sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch);
+               if (sch == NULL) {
+                       duprintf("Dropping invalid SCTP packet.\n");
+                       *hotdrop = 1;
+                       return 0;
+               }
+
+               duprintf("Chunk num: %d\toffset: %d\ttype: %d\tlength: %d\tflags: %x\n", 
+                               ++i, offset, sch->type, htons(sch->length), sch->flags);
+
+               offset += (htons(sch->length) + 3) & ~3;
+
+               duprintf("skb->len: %d\toffset: %d\n", skb->len, offset);
+
+               if (SCTP_CHUNKMAP_IS_SET(chunkmap, sch->type)) {
+                       switch (chunk_match_type) {
+                       case SCTP_CHUNK_MATCH_ANY:
+                               if (match_flags(flag_info, flag_count, 
+                                       sch->type, sch->flags)) {
+                                       return 1;
+                               }
+                               break;
+
+                       case SCTP_CHUNK_MATCH_ALL:
+                               if (match_flags(flag_info, flag_count, 
+                                       sch->type, sch->flags)) {
+                                       SCTP_CHUNKMAP_CLEAR(chunkmapcopy, sch->type);
+                               }
+                               break;
+
+                       case SCTP_CHUNK_MATCH_ONLY:
+                               if (!match_flags(flag_info, flag_count, 
+                                       sch->type, sch->flags)) {
+                                       return 0;
+                               }
+                               break;
+                       }
+               } else {
+                       switch (chunk_match_type) {
+                       case SCTP_CHUNK_MATCH_ONLY:
+                               return 0;
+                       }
+               }
+       } while (offset < skb->len);
+
+       switch (chunk_match_type) {
+       case SCTP_CHUNK_MATCH_ALL:
+               return SCTP_CHUNKMAP_IS_CLEAR(chunkmap);
+       case SCTP_CHUNK_MATCH_ANY:
+               return 0;
+       case SCTP_CHUNK_MATCH_ONLY:
+               return 1;
+       }
+
+       /* This will never be reached, but required to stop compiler whine */
+       return 0;
+}
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_sctp_info *info;
+       sctp_sctphdr_t _sh, *sh;
+
+       info = (const struct xt_sctp_info *)matchinfo;
+
+       if (offset) {
+               duprintf("Dropping non-first fragment.. FIXME\n");
+               return 0;
+       }
+       
+       sh = skb_header_pointer(skb, protoff, sizeof(_sh), &_sh);
+       if (sh == NULL) {
+               duprintf("Dropping evil TCP offset=0 tinygram.\n");
+               *hotdrop = 1;
+               return 0;
+               }
+       duprintf("spt: %d\tdpt: %d\n", ntohs(sh->source), ntohs(sh->dest));
+
+       return  SCCHECK(((ntohs(sh->source) >= info->spts[0]) 
+                       && (ntohs(sh->source) <= info->spts[1])), 
+                       XT_SCTP_SRC_PORTS, info->flags, info->invflags)
+               && SCCHECK(((ntohs(sh->dest) >= info->dpts[0]) 
+                       && (ntohs(sh->dest) <= info->dpts[1])), 
+                       XT_SCTP_DEST_PORTS, info->flags, info->invflags)
+               && SCCHECK(match_packet(skb, protoff,
+                                       info->chunkmap, info->chunk_match_type,
+                                       info->flag_info, info->flag_count, 
+                                       hotdrop),
+                          XT_SCTP_CHUNK_TYPES, info->flags, info->invflags);
+}
+
+static int
+checkentry(const char *tablename,
+          const void *inf,
+          void *matchinfo,
+          unsigned int matchsize,
+          unsigned int hook_mask)
+{
+       const struct xt_sctp_info *info;
+       const struct ipt_ip *ip = inf;
+
+       info = (const struct xt_sctp_info *)matchinfo;
+
+       return ip->proto == IPPROTO_SCTP
+               && !(ip->invflags & XT_INV_PROTO)
+               && matchsize == XT_ALIGN(sizeof(struct xt_sctp_info))
+               && !(info->flags & ~XT_SCTP_VALID_FLAGS)
+               && !(info->invflags & ~XT_SCTP_VALID_FLAGS)
+               && !(info->invflags & ~info->flags)
+               && ((!(info->flags & XT_SCTP_CHUNK_TYPES)) || 
+                       (info->chunk_match_type &
+                               (SCTP_CHUNK_MATCH_ALL 
+                               | SCTP_CHUNK_MATCH_ANY
+                               | SCTP_CHUNK_MATCH_ONLY)));
+}
+
+static int
+checkentry6(const char *tablename,
+          const void *inf,
+          void *matchinfo,
+          unsigned int matchsize,
+          unsigned int hook_mask)
+{
+       const struct xt_sctp_info *info;
+       const struct ip6t_ip6 *ip = inf;
+
+       info = (const struct xt_sctp_info *)matchinfo;
+
+       return ip->proto == IPPROTO_SCTP
+               && !(ip->invflags & XT_INV_PROTO)
+               && matchsize == XT_ALIGN(sizeof(struct xt_sctp_info))
+               && !(info->flags & ~XT_SCTP_VALID_FLAGS)
+               && !(info->invflags & ~XT_SCTP_VALID_FLAGS)
+               && !(info->invflags & ~info->flags)
+               && ((!(info->flags & XT_SCTP_CHUNK_TYPES)) || 
+                       (info->chunk_match_type &
+                               (SCTP_CHUNK_MATCH_ALL 
+                               | SCTP_CHUNK_MATCH_ANY
+                               | SCTP_CHUNK_MATCH_ONLY)));
+}
+
+
+static struct xt_match sctp_match = 
+{ 
+       .name = "sctp",
+       .match = &match,
+       .checkentry = &checkentry,
+       .me = THIS_MODULE
+};
+static struct xt_match sctp6_match = 
+{ 
+       .name = "sctp",
+       .match = &match,
+       .checkentry = &checkentry6,
+       .me = THIS_MODULE
+};
+
+
+static int __init init(void)
+{
+       int ret;
+       ret = xt_register_match(AF_INET, &sctp_match);
+       if (ret)
+               return ret;
+
+       ret = xt_register_match(AF_INET6, &sctp6_match);
+       if (ret)
+               xt_unregister_match(AF_INET, &sctp_match);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET6, &sctp6_match);
+       xt_unregister_match(AF_INET, &sctp_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_state.c b/net/netfilter/xt_state.c
new file mode 100644 (file)
index 0000000..39ce808
--- /dev/null
@@ -0,0 +1,96 @@
+/* Kernel module to match connection tracking information. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2005 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/netfilter/nf_conntrack_compat.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_state.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
+MODULE_DESCRIPTION("ip[6]_tables connection tracking state match module");
+MODULE_ALIAS("ipt_state");
+MODULE_ALIAS("ip6t_state");
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_state_info *sinfo = matchinfo;
+       enum ip_conntrack_info ctinfo;
+       unsigned int statebit;
+
+       if (nf_ct_is_untracked(skb))
+               statebit = XT_STATE_UNTRACKED;
+       else if (!nf_ct_get_ctinfo(skb, &ctinfo))
+               statebit = XT_STATE_INVALID;
+       else
+               statebit = XT_STATE_BIT(ctinfo);
+
+       return (sinfo->statemask & statebit);
+}
+
+static int check(const char *tablename,
+                const void *ip,
+                void *matchinfo,
+                unsigned int matchsize,
+                unsigned int hook_mask)
+{
+       if (matchsize != XT_ALIGN(sizeof(struct xt_state_info)))
+               return 0;
+
+       return 1;
+}
+
+static struct xt_match state_match = {
+       .name           = "state",
+       .match          = &match,
+       .checkentry     = &check,
+       .me             = THIS_MODULE,
+};
+
+static struct xt_match state6_match = {
+       .name           = "state",
+       .match          = &match,
+       .checkentry     = &check,
+       .me             = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+       int ret;
+
+       need_conntrack();
+
+       ret = xt_register_match(AF_INET, &state_match);
+       if (ret < 0)
+               return ret;
+
+       ret = xt_register_match(AF_INET6, &state6_match);
+       if (ret < 0)
+               xt_unregister_match(AF_INET,&state_match);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &state_match);
+       xt_unregister_match(AF_INET6, &state6_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_string.c b/net/netfilter/xt_string.c
new file mode 100644 (file)
index 0000000..7c7d5c8
--- /dev/null
@@ -0,0 +1,111 @@
+/* String matching match for iptables
+ * 
+ * (C) 2005 Pablo Neira Ayuso <pablo@eurodev.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_string.h>
+#include <linux/textsearch.h>
+
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@eurodev.net>");
+MODULE_DESCRIPTION("IP tables string match module");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_string");
+MODULE_ALIAS("ip6t_string");
+
+static int match(const struct sk_buff *skb,
+                const struct net_device *in,
+                const struct net_device *out,
+                const void *matchinfo,
+                int offset,
+                unsigned int protoff,
+                int *hotdrop)
+{
+       struct ts_state state;
+       struct xt_string_info *conf = (struct xt_string_info *) matchinfo;
+
+       memset(&state, 0, sizeof(struct ts_state));
+
+       return (skb_find_text((struct sk_buff *)skb, conf->from_offset, 
+                            conf->to_offset, conf->config, &state) 
+                            != UINT_MAX) && !conf->invert;
+}
+
+#define STRING_TEXT_PRIV(m) ((struct xt_string_info *) m)
+
+static int checkentry(const char *tablename,
+                     const void *ip,
+                     void *matchinfo,
+                     unsigned int matchsize,
+                     unsigned int hook_mask)
+{
+       struct xt_string_info *conf = matchinfo;
+       struct ts_config *ts_conf;
+
+       if (matchsize != XT_ALIGN(sizeof(struct xt_string_info)))
+               return 0;
+
+       /* Damn, can't handle this case properly with iptables... */
+       if (conf->from_offset > conf->to_offset)
+               return 0;
+
+       ts_conf = textsearch_prepare(conf->algo, conf->pattern, conf->patlen,
+                                    GFP_KERNEL, TS_AUTOLOAD);
+       if (IS_ERR(ts_conf))
+               return 0;
+
+       conf->config = ts_conf;
+
+       return 1;
+}
+
+static void destroy(void *matchinfo, unsigned int matchsize)
+{
+       textsearch_destroy(STRING_TEXT_PRIV(matchinfo)->config);
+}
+
+static struct xt_match string_match = {
+       .name           = "string",
+       .match          = match,
+       .checkentry     = checkentry,
+       .destroy        = destroy,
+       .me             = THIS_MODULE
+};
+static struct xt_match string6_match = {
+       .name           = "string",
+       .match          = match,
+       .checkentry     = checkentry,
+       .destroy        = destroy,
+       .me             = THIS_MODULE
+};
+
+static int __init init(void)
+{
+       int ret;
+
+       ret = xt_register_match(AF_INET, &string_match);
+       if (ret)
+               return ret;
+       ret = xt_register_match(AF_INET6, &string6_match);
+       if (ret)
+               xt_unregister_match(AF_INET, &string_match);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET, &string_match);
+       xt_unregister_match(AF_INET6, &string6_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_tcpmss.c b/net/netfilter/xt_tcpmss.c
new file mode 100644 (file)
index 0000000..acf7f53
--- /dev/null
@@ -0,0 +1,172 @@
+/* Kernel module to match TCP MSS values. */
+
+/* Copyright (C) 2000 Marc Boucher <marc@mbsi.ca>
+ * Portions (C) 2005 by Harald Welte <laforge@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/tcp.h>
+
+#include <linux/netfilter/xt_tcpmss.h>
+#include <linux/netfilter/x_tables.h>
+
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+#define TH_SYN 0x02
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
+MODULE_DESCRIPTION("iptables TCP MSS match module");
+MODULE_ALIAS("ipt_tcpmss");
+
+/* Returns 1 if the mss option is set and matched by the range, 0 otherwise */
+static inline int
+mssoption_match(u_int16_t min, u_int16_t max,
+               const struct sk_buff *skb,
+               unsigned int protoff,
+               int invert,
+               int *hotdrop)
+{
+       struct tcphdr _tcph, *th;
+       /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
+       u8 _opt[15 * 4 - sizeof(_tcph)], *op;
+       unsigned int i, optlen;
+
+       /* If we don't have the whole header, drop packet. */
+       th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
+       if (th == NULL)
+               goto dropit;
+
+       /* Malformed. */
+       if (th->doff*4 < sizeof(*th))
+               goto dropit;
+
+       optlen = th->doff*4 - sizeof(*th);
+       if (!optlen)
+               goto out;
+
+       /* Truncated options. */
+       op = skb_header_pointer(skb, protoff + sizeof(*th), optlen, _opt);
+       if (op == NULL)
+               goto dropit;
+
+       for (i = 0; i < optlen; ) {
+               if (op[i] == TCPOPT_MSS
+                   && (optlen - i) >= TCPOLEN_MSS
+                   && op[i+1] == TCPOLEN_MSS) {
+                       u_int16_t mssval;
+
+                       mssval = (op[i+2] << 8) | op[i+3];
+                       
+                       return (mssval >= min && mssval <= max) ^ invert;
+               }
+               if (op[i] < 2) i++;
+               else i += op[i+1]?:1;
+       }
+out:
+       return invert;
+
+ dropit:
+       *hotdrop = 1;
+       return 0;
+}
+
+static int
+match(const struct sk_buff *skb,
+      const struct net_device *in,
+      const struct net_device *out,
+      const void *matchinfo,
+      int offset,
+      unsigned int protoff,
+      int *hotdrop)
+{
+       const struct xt_tcpmss_match_info *info = matchinfo;
+
+       return mssoption_match(info->mss_min, info->mss_max, skb, protoff,
+                              info->invert, hotdrop);
+}
+
+static int
+checkentry(const char *tablename,
+           const void *ipinfo,
+           void *matchinfo,
+           unsigned int matchsize,
+           unsigned int hook_mask)
+{
+       const struct ipt_ip *ip = ipinfo;
+       if (matchsize != XT_ALIGN(sizeof(struct xt_tcpmss_match_info)))
+               return 0;
+
+       /* Must specify -p tcp */
+       if (ip->proto != IPPROTO_TCP || (ip->invflags & IPT_INV_PROTO)) {
+               printk("tcpmss: Only works on TCP packets\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static int
+checkentry6(const char *tablename,
+          const void *ipinfo,
+           void *matchinfo,
+           unsigned int matchsize,
+           unsigned int hook_mask)
+{
+       const struct ip6t_ip6 *ip = ipinfo;
+
+       if (matchsize != XT_ALIGN(sizeof(struct xt_tcpmss_match_info)))
+               return 0;
+
+       /* Must specify -p tcp */
+       if (ip->proto != IPPROTO_TCP || (ip->invflags & XT_INV_PROTO)) {
+               printk("tcpmss: Only works on TCP packets\n");
+               return 0;
+       }
+
+       return 1;
+}
+
+static struct xt_match tcpmss_match = {
+       .name           = "tcpmss",
+       .match          = &match,
+       .checkentry     = &checkentry,
+       .me             = THIS_MODULE,
+};
+
+static struct xt_match tcpmss6_match = {
+       .name           = "tcpmss",
+       .match          = &match,
+       .checkentry     = &checkentry6,
+       .me             = THIS_MODULE,
+};
+
+
+static int __init init(void)
+{
+       int ret;
+       ret = xt_register_match(AF_INET, &tcpmss_match);
+       if (ret)
+               return ret;
+
+       ret = xt_register_match(AF_INET6, &tcpmss6_match);
+       if (ret)
+               xt_unregister_match(AF_INET, &tcpmss_match);
+
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET6, &tcpmss6_match);
+       xt_unregister_match(AF_INET, &tcpmss_match);
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/xt_tcpudp.c b/net/netfilter/xt_tcpudp.c
new file mode 100644 (file)
index 0000000..669c811
--- /dev/null
@@ -0,0 +1,334 @@
+#include <linux/types.h>
+#include <linux/module.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_tcpudp.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+
+MODULE_DESCRIPTION("x_tables match for TCP and UDP, supports IPv4 and IPv6");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("xt_tcp");
+MODULE_ALIAS("xt_udp");
+MODULE_ALIAS("ipt_udp");
+MODULE_ALIAS("ipt_tcp");
+MODULE_ALIAS("ip6t_udp");
+MODULE_ALIAS("ip6t_tcp");
+
+#ifdef DEBUG_IP_FIREWALL_USER
+#define duprintf(format, args...) printk(format , ## args)
+#else
+#define duprintf(format, args...)
+#endif
+
+
+/* Returns 1 if the port is matched by the range, 0 otherwise */
+static inline int
+port_match(u_int16_t min, u_int16_t max, u_int16_t port, int invert)
+{
+       int ret;
+
+       ret = (port >= min && port <= max) ^ invert;
+       return ret;
+}
+
+static int
+tcp_find_option(u_int8_t option,
+               const struct sk_buff *skb,
+               unsigned int protoff,
+               unsigned int optlen,
+               int invert,
+               int *hotdrop)
+{
+       /* tcp.doff is only 4 bits, ie. max 15 * 4 bytes */
+       u_int8_t _opt[60 - sizeof(struct tcphdr)], *op;
+       unsigned int i;
+
+       duprintf("tcp_match: finding option\n");
+
+       if (!optlen)
+               return invert;
+
+       /* If we don't have the whole header, drop packet. */
+       op = skb_header_pointer(skb, protoff + sizeof(struct tcphdr),
+                               optlen, _opt);
+       if (op == NULL) {
+               *hotdrop = 1;
+               return 0;
+       }
+
+       for (i = 0; i < optlen; ) {
+               if (op[i] == option) return !invert;
+               if (op[i] < 2) i++;
+               else i += op[i+1]?:1;
+       }
+
+       return invert;
+}
+
+static int
+tcp_match(const struct sk_buff *skb,
+         const struct net_device *in,
+         const struct net_device *out,
+         const void *matchinfo,
+         int offset,
+         unsigned int protoff,
+         int *hotdrop)
+{
+       struct tcphdr _tcph, *th;
+       const struct xt_tcp *tcpinfo = matchinfo;
+
+       if (offset) {
+               /* To quote Alan:
+
+                  Don't allow a fragment of TCP 8 bytes in. Nobody normal
+                  causes this. Its a cracker trying to break in by doing a
+                  flag overwrite to pass the direction checks.
+               */
+               if (offset == 1) {
+                       duprintf("Dropping evil TCP offset=1 frag.\n");
+                       *hotdrop = 1;
+               }
+               /* Must not be a fragment. */
+               return 0;
+       }
+
+#define FWINVTCP(bool,invflg) ((bool) ^ !!(tcpinfo->invflags & invflg))
+
+       th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
+       if (th == NULL) {
+               /* We've been asked to examine this packet, and we
+                  can't.  Hence, no choice but to drop. */
+               duprintf("Dropping evil TCP offset=0 tinygram.\n");
+               *hotdrop = 1;
+               return 0;
+       }
+
+       if (!port_match(tcpinfo->spts[0], tcpinfo->spts[1],
+                       ntohs(th->source),
+                       !!(tcpinfo->invflags & XT_TCP_INV_SRCPT)))
+               return 0;
+       if (!port_match(tcpinfo->dpts[0], tcpinfo->dpts[1],
+                       ntohs(th->dest),
+                       !!(tcpinfo->invflags & XT_TCP_INV_DSTPT)))
+               return 0;
+       if (!FWINVTCP((((unsigned char *)th)[13] & tcpinfo->flg_mask)
+                     == tcpinfo->flg_cmp,
+                     XT_TCP_INV_FLAGS))
+               return 0;
+       if (tcpinfo->option) {
+               if (th->doff * 4 < sizeof(_tcph)) {
+                       *hotdrop = 1;
+                       return 0;
+               }
+               if (!tcp_find_option(tcpinfo->option, skb, protoff,
+                                    th->doff*4 - sizeof(_tcph),
+                                    tcpinfo->invflags & XT_TCP_INV_OPTION,
+                                    hotdrop))
+                       return 0;
+       }
+       return 1;
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+tcp_checkentry(const char *tablename,
+              const void *info,
+              void *matchinfo,
+              unsigned int matchsize,
+              unsigned int hook_mask)
+{
+       const struct ipt_ip *ip = info;
+       const struct xt_tcp *tcpinfo = matchinfo;
+
+       /* Must specify proto == TCP, and no unknown invflags */
+       return ip->proto == IPPROTO_TCP
+               && !(ip->invflags & XT_INV_PROTO)
+               && matchsize == XT_ALIGN(sizeof(struct xt_tcp))
+               && !(tcpinfo->invflags & ~XT_TCP_INV_MASK);
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+tcp6_checkentry(const char *tablename,
+              const void *entry,
+              void *matchinfo,
+              unsigned int matchsize,
+              unsigned int hook_mask)
+{
+       const struct ip6t_ip6 *ipv6 = entry;
+       const struct xt_tcp *tcpinfo = matchinfo;
+
+       /* Must specify proto == TCP, and no unknown invflags */
+       return ipv6->proto == IPPROTO_TCP
+               && !(ipv6->invflags & XT_INV_PROTO)
+               && matchsize == XT_ALIGN(sizeof(struct xt_tcp))
+               && !(tcpinfo->invflags & ~XT_TCP_INV_MASK);
+}
+
+
+static int
+udp_match(const struct sk_buff *skb,
+         const struct net_device *in,
+         const struct net_device *out,
+         const void *matchinfo,
+         int offset,
+         unsigned int protoff,
+         int *hotdrop)
+{
+       struct udphdr _udph, *uh;
+       const struct xt_udp *udpinfo = matchinfo;
+
+       /* Must not be a fragment. */
+       if (offset)
+               return 0;
+
+       uh = skb_header_pointer(skb, protoff, sizeof(_udph), &_udph);
+       if (uh == NULL) {
+               /* We've been asked to examine this packet, and we
+                  can't.  Hence, no choice but to drop. */
+               duprintf("Dropping evil UDP tinygram.\n");
+               *hotdrop = 1;
+               return 0;
+       }
+
+       return port_match(udpinfo->spts[0], udpinfo->spts[1],
+                         ntohs(uh->source),
+                         !!(udpinfo->invflags & XT_UDP_INV_SRCPT))
+               && port_match(udpinfo->dpts[0], udpinfo->dpts[1],
+                             ntohs(uh->dest),
+                             !!(udpinfo->invflags & XT_UDP_INV_DSTPT));
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+udp_checkentry(const char *tablename,
+              const void *info,
+              void *matchinfo,
+              unsigned int matchinfosize,
+              unsigned int hook_mask)
+{
+       const struct ipt_ip *ip = info;
+       const struct xt_udp *udpinfo = matchinfo;
+
+       /* Must specify proto == UDP, and no unknown invflags */
+       if (ip->proto != IPPROTO_UDP || (ip->invflags & XT_INV_PROTO)) {
+               duprintf("ipt_udp: Protocol %u != %u\n", ip->proto,
+                        IPPROTO_UDP);
+               return 0;
+       }
+       if (matchinfosize != XT_ALIGN(sizeof(struct xt_udp))) {
+               duprintf("ipt_udp: matchsize %u != %u\n",
+                        matchinfosize, XT_ALIGN(sizeof(struct xt_udp)));
+               return 0;
+       }
+       if (udpinfo->invflags & ~XT_UDP_INV_MASK) {
+               duprintf("ipt_udp: unknown flags %X\n",
+                        udpinfo->invflags);
+               return 0;
+       }
+
+       return 1;
+}
+
+/* Called when user tries to insert an entry of this type. */
+static int
+udp6_checkentry(const char *tablename,
+              const void *entry,
+              void *matchinfo,
+              unsigned int matchinfosize,
+              unsigned int hook_mask)
+{
+       const struct ip6t_ip6 *ipv6 = entry;
+       const struct xt_udp *udpinfo = matchinfo;
+
+       /* Must specify proto == UDP, and no unknown invflags */
+       if (ipv6->proto != IPPROTO_UDP || (ipv6->invflags & XT_INV_PROTO)) {
+               duprintf("ip6t_udp: Protocol %u != %u\n", ipv6->proto,
+                        IPPROTO_UDP);
+               return 0;
+       }
+       if (matchinfosize != XT_ALIGN(sizeof(struct xt_udp))) {
+               duprintf("ip6t_udp: matchsize %u != %u\n",
+                        matchinfosize, XT_ALIGN(sizeof(struct xt_udp)));
+               return 0;
+       }
+       if (udpinfo->invflags & ~XT_UDP_INV_MASK) {
+               duprintf("ip6t_udp: unknown flags %X\n",
+                        udpinfo->invflags);
+               return 0;
+       }
+
+       return 1;
+}
+
+static struct xt_match tcp_matchstruct = {
+       .name           = "tcp",
+       .match          = &tcp_match,
+       .checkentry     = &tcp_checkentry,
+       .me             = THIS_MODULE,
+};
+static struct xt_match tcp6_matchstruct = {
+       .name           = "tcp",
+       .match          = &tcp_match,
+       .checkentry     = &tcp6_checkentry,
+       .me             = THIS_MODULE,
+};
+
+static struct xt_match udp_matchstruct = {
+       .name           = "udp",
+       .match          = &udp_match,
+       .checkentry     = &udp_checkentry,
+       .me             = THIS_MODULE,
+};
+static struct xt_match udp6_matchstruct = {
+       .name           = "udp",
+       .match          = &udp_match,
+       .checkentry     = &udp6_checkentry,
+       .me             = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+       int ret;
+       ret = xt_register_match(AF_INET, &tcp_matchstruct);
+       if (ret)
+               return ret;
+
+       ret = xt_register_match(AF_INET6, &tcp6_matchstruct);
+       if (ret)
+               goto out_unreg_tcp;
+
+       ret = xt_register_match(AF_INET, &udp_matchstruct);
+       if (ret)
+               goto out_unreg_tcp6;
+       
+       ret = xt_register_match(AF_INET6, &udp6_matchstruct);
+       if (ret)
+               goto out_unreg_udp;
+
+       return ret;
+
+out_unreg_udp:
+       xt_unregister_match(AF_INET, &tcp_matchstruct);
+out_unreg_tcp6:
+       xt_unregister_match(AF_INET6, &tcp6_matchstruct);
+out_unreg_tcp:
+       xt_unregister_match(AF_INET, &tcp_matchstruct);
+       return ret;
+}
+
+static void __exit fini(void)
+{
+       xt_unregister_match(AF_INET6, &udp6_matchstruct);
+       xt_unregister_match(AF_INET, &udp_matchstruct);
+       xt_unregister_match(AF_INET6, &tcp6_matchstruct);
+       xt_unregister_match(AF_INET, &tcp_matchstruct);
+}
+
+module_init(init);
+module_exit(fini);
index 3b1378498d50b12647f2ea433a341b16502e1ac9..4ae1538c54a9397505d8ae3e74eb19eba6c50337 100644 (file)
@@ -222,11 +222,6 @@ int genl_register_family(struct genl_family *family)
                goto errout_locked;
        }
 
-       if (!try_module_get(family->owner)) {
-               err = -EBUSY;
-               goto errout_locked;
-       }
-
        if (family->id == GENL_ID_GENERATE) {
                u16 newid = genl_generate_id();
 
@@ -283,7 +278,6 @@ int genl_unregister_family(struct genl_family *family)
                INIT_LIST_HEAD(&family->ops_list);
                genl_unlock();
 
-               module_put(family->owner);
                kfree(family->attrbuf);
                genl_ctrl_event(CTRL_CMD_DELFAMILY, family);
                return 0;
@@ -535,7 +529,6 @@ static struct genl_family genl_ctrl = {
        .name = "nlctrl",
        .version = 0x1,
        .maxattr = CTRL_ATTR_MAX,
-       .owner = THIS_MODULE,
 };
 
 static int __init genl_init(void)
index 8a260d43ceef156d89389aa1dbb388f93c0c7a91..778b1e5a4b50976196677b4b7bb51dd13345c921 100644 (file)
@@ -44,7 +44,7 @@ if NET_SCHED
 
 choice
        prompt "Packet scheduler clock source"
-       default NET_SCH_CLK_JIFFIES
+       default NET_SCH_CLK_GETTIMEOFDAY
        ---help---
          Packet schedulers need a monotonic clock that increments at a static
          rate. The kernel provides several suitable interfaces, each with
index b5001939b74b59822291de1b0f5759164da29f2b..39a22a3ffe78822636cad6306536916c368e5f19 100644 (file)
@@ -62,7 +62,7 @@ ipt_init_target(struct ipt_entry_target *t, char *table, unsigned int hook)
        struct ipt_target *target;
        int ret = 0;
 
-       target = ipt_find_target(t->u.user.name, t->u.user.revision);
+       target = xt_find_target(AF_INET, t->u.user.name, t->u.user.revision);
        if (!target)
                return -ENOENT;
 
index 04c7fab4edc42b49068513a455ca2c747b174f8e..2e266129a764d2f0dc0edfaabdf483155b8cfdb1 100644 (file)
@@ -180,8 +180,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport,
        }
 
        SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, "
-                         "src:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
-                         "dst:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+                         "src:" NIP6_FMT " dst:" NIP6_FMT "\n",
                          __FUNCTION__, skb, skb->len,
                          NIP6(fl.fl6_src), NIP6(fl.fl6_dst));
 
@@ -206,13 +205,13 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
                fl.oif = daddr->v6.sin6_scope_id;
        
 
-       SCTP_DEBUG_PRINTK("%s: DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ",
+       SCTP_DEBUG_PRINTK("%s: DST=" NIP6_FMT " ",
                          __FUNCTION__, NIP6(fl.fl6_dst));
 
        if (saddr) {
                ipv6_addr_copy(&fl.fl6_src, &saddr->v6.sin6_addr);
                SCTP_DEBUG_PRINTK(
-                       "SRC=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x - ",
+                       "SRC=" NIP6_FMT " - ",
                        NIP6(fl.fl6_src));
        }
 
@@ -221,8 +220,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
                struct rt6_info *rt;
                rt = (struct rt6_info *)dst;
                SCTP_DEBUG_PRINTK(
-                       "rt6_dst:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x "
-                       "rt6_src:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+                       "rt6_dst:" NIP6_FMT " rt6_src:" NIP6_FMT "\n",
                        NIP6(rt->rt6i_dst.addr), NIP6(rt->rt6i_src.addr));
        } else {
                SCTP_DEBUG_PRINTK("NO ROUTE\n");
@@ -271,13 +269,12 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
        __u8 bmatchlen;
 
        SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p "
-                         "daddr:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ",
+                         "daddr:" NIP6_FMT " ",
                          __FUNCTION__, asoc, dst, NIP6(daddr->v6.sin6_addr));
 
        if (!asoc) {
                ipv6_get_saddr(dst, &daddr->v6.sin6_addr,&saddr->v6.sin6_addr);
-               SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: "
-                                 "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+               SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n",
                                  NIP6(saddr->v6.sin6_addr));
                return;
        }
@@ -305,13 +302,11 @@ static void sctp_v6_get_saddr(struct sctp_association *asoc,
 
        if (baddr) {
                memcpy(saddr, baddr, sizeof(union sctp_addr));
-               SCTP_DEBUG_PRINTK("saddr: "
-                                 "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+               SCTP_DEBUG_PRINTK("saddr: " NIP6_FMT "\n",
                                  NIP6(saddr->v6.sin6_addr));
        } else {
                printk(KERN_ERR "%s: asoc:%p Could not find a valid source "
-                      "address for the "
-                      "dest:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+                      "address for the dest:" NIP6_FMT "\n",
                       __FUNCTION__, asoc, NIP6(daddr->v6.sin6_addr));
        }
 
@@ -675,8 +670,7 @@ static int sctp_v6_is_ce(const struct sk_buff *skb)
 /* Dump the v6 addr to the seq file. */
 static void sctp_v6_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr)
 {
-       seq_printf(seq, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ",
-                  NIP6(addr->v6.sin6_addr));
+       seq_printf(seq, NIP6_FMT " ", NIP6(addr->v6.sin6_addr));
 }
 
 /* Initialize a PF_INET6 socket msg_name. */
index 557a7d90b92a1a9f5c31430d81d2ed3a7222d3b5..477d7f80dba686713ac6b10288f343a581e1ba5b 100644 (file)
@@ -1036,14 +1036,14 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep,
                if (from_addr.sa.sa_family == AF_INET6) {
                        printk(KERN_WARNING
                               "%s association %p could not find address "
-                              "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n",
+                              NIP6_FMT "\n",
                               __FUNCTION__,
                               asoc,
                               NIP6(from_addr.v6.sin6_addr));
                } else {
                        printk(KERN_WARNING
                               "%s association %p could not find address "
-                              "%u.%u.%u.%u\n",
+                              NIPQUAD_FMT "\n",
                               __FUNCTION__,
                               asoc,
                               NIPQUAD(from_addr.v4.sin_addr.s_addr));
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
new file mode 100644 (file)
index 0000000..05ab18e
--- /dev/null
@@ -0,0 +1,112 @@
+#
+# TIPC configuration
+#
+
+menu "TIPC Configuration (EXPERIMENTAL)"
+       depends on INET && EXPERIMENTAL
+
+config TIPC
+       tristate "The TIPC Protocol (EXPERIMENTAL)"
+       ---help---
+         TBD.
+
+         This protocol support is also available as a module ( = code which
+         can be inserted in and removed from the running kernel whenever you
+         want). The module will be called tipc. If you want to compile it
+         as a module, say M here and read <file:Documentation/modules.txt>.
+
+         If in doubt, say N.
+
+config TIPC_ADVANCED
+       bool "TIPC: Advanced configuration"
+       depends on TIPC
+       default n
+       help
+         Saying Y here will open some advanced configuration
+          for TIPC. Most users do not need to bother, so if
+          unsure, just say N.
+
+config TIPC_ZONES
+       int "Maximum number of zones in network"
+       depends on TIPC && TIPC_ADVANCED
+       default "3"
+       help
+        Max number of zones inside TIPC network. Max supported value 
+         is 255 zones, minimum is 1
+
+        Default is 3 zones in a network; setting this to higher
+        allows more zones but might use more memory.
+
+config TIPC_CLUSTERS
+       int "Maximum number of clusters in a zone"
+       depends on TIPC && TIPC_ADVANCED
+       default "1"
+       help
+          ***Only 1 (one cluster in a zone) is supported by current code.
+          Any value set here will be overridden.***
+
+          (Max number of clusters inside TIPC zone. Max supported 
+          value is 4095 clusters, minimum is 1.
+
+         Default is 1; setting this to smaller value might save 
+          some memory, setting it to higher
+         allows more clusters and might consume more memory.)
+
+config TIPC_NODES
+       int "Maximum number of nodes in cluster"
+       depends on TIPC && TIPC_ADVANCED
+       default "255"
+       help
+         Maximum number of nodes inside a TIPC cluster. Maximum 
+          supported value is 2047 nodes, minimum is 8. 
+
+         Setting this to a smaller value saves some memory, 
+         setting it to higher allows more nodes.
+
+config TIPC_SLAVE_NODES
+       int "Maximum number of slave nodes in cluster"
+       depends on TIPC && TIPC_ADVANCED
+       default "0"
+       help
+          ***This capability is not supported by current code.***
+         
+         Maximum number of slave nodes inside a TIPC cluster. Maximum 
+          supported value is 2047 nodes, minimum is 0. 
+
+         Setting this to a smaller value saves some memory, 
+         setting it to higher allows more nodes.
+
+config TIPC_PORTS
+       int "Maximum number of ports in a node"
+       depends on TIPC && TIPC_ADVANCED
+       default "8191"
+       help
+         Maximum number of ports within a node. Maximum 
+          supported value is 64535 nodes, minimum is 127. 
+
+         Setting this to a smaller value saves some memory, 
+         setting it to higher allows more ports.
+
+config TIPC_LOG
+       int "Size of log buffer"
+       depends on TIPC && TIPC_ADVANCED
+       default 0
+       help
+         Size (in bytes) of TIPC's internal log buffer, which records the
+         occurrence of significant events.  Maximum supported value
+         is 32768 bytes, minimum is 0.
+
+         There is no need to enable the log buffer unless the node will be
+         managed remotely via TIPC.
+
+config TIPC_DEBUG
+       bool "Enable debugging support"
+       depends on TIPC
+       default n
+       help
+         This will enable debugging of TIPC.
+
+         Only say Y here if you are having trouble with TIPC.  It will
+         enable the display of detailed information about what is going on.
+
+endmenu
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
new file mode 100644 (file)
index 0000000..dceb702
--- /dev/null
@@ -0,0 +1,13 @@
+#
+# Makefile for the Linux TIPC layer
+#
+
+obj-$(CONFIG_TIPC) := tipc.o
+
+tipc-y += addr.o bcast.o bearer.o config.o cluster.o \
+          core.o handler.o link.o discover.o msg.o  \
+          name_distr.o  subscr.o name_table.o net.o  \
+          netlink.o node.o node_subscr.o port.o ref.o  \
+          socket.o user_reg.o zone.o dbg.o eth_media.o
+
+# End of file
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
new file mode 100644 (file)
index 0000000..eca2226
--- /dev/null
@@ -0,0 +1,94 @@
+/*
+ * net/tipc/addr.c: TIPC address utility routines
+ *     
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "addr.h"
+#include "zone.h"
+#include "cluster.h"
+#include "net.h"
+
+u32 tipc_get_addr(void)
+{
+       return tipc_own_addr;
+}
+
+/**
+ * addr_domain_valid - validates a network domain address
+ * 
+ * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>, 
+ * where Z, C, and N are non-zero and do not exceed the configured limits.
+ * 
+ * Returns 1 if domain address is valid, otherwise 0
+ */
+
+int addr_domain_valid(u32 addr)
+{
+       u32 n = tipc_node(addr);
+       u32 c = tipc_cluster(addr);
+       u32 z = tipc_zone(addr);
+       u32 max_nodes = tipc_max_nodes;
+
+       if (is_slave(addr))
+               max_nodes = LOWEST_SLAVE + tipc_max_slaves;
+       if (n > max_nodes)
+               return 0;
+       if (c > tipc_max_clusters)
+               return 0;
+       if (z > tipc_max_zones)
+               return 0;
+
+       if (n && (!z || !c))
+               return 0;
+       if (c && !z)
+               return 0;
+       return 1;
+}
+
+/**
+ * addr_node_valid - validates a proposed network address for this node
+ * 
+ * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed 
+ * the configured limits.
+ * 
+ * Returns 1 if address can be used, otherwise 0
+ */
+
+int addr_node_valid(u32 addr)
+{
+       return (addr_domain_valid(addr) && tipc_node(addr));
+}
+
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
new file mode 100644 (file)
index 0000000..02ca717
--- /dev/null
@@ -0,0 +1,128 @@
+/*
+ * net/tipc/addr.h: Include file for TIPC address utility routines
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_ADDR_H
+#define _TIPC_ADDR_H
+
+static inline u32 own_node(void)
+{
+       return tipc_node(tipc_own_addr);
+}
+
+static inline u32 own_cluster(void)
+{
+       return tipc_cluster(tipc_own_addr);
+}
+
+static inline u32 own_zone(void)
+{
+       return tipc_zone(tipc_own_addr);
+}
+
+static inline int in_own_cluster(u32 addr)
+{
+       return !((addr ^ tipc_own_addr) >> 12);
+}
+
+static inline int in_own_zone(u32 addr)
+{
+       return !((addr ^ tipc_own_addr) >> 24);
+}
+
+static inline int is_slave(u32 addr)
+{
+       return addr & 0x800;
+}
+
+static inline int may_route(u32 addr)
+{
+       return(addr ^ tipc_own_addr) >> 11;
+}
+
+static inline int in_scope(u32 domain, u32 addr)
+{
+       if (!domain || (domain == addr))
+               return 1;
+       if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */
+               return 1;
+       if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */
+               return 1;
+       return 0;
+}
+
+/**
+ * addr_scope - convert message lookup domain to equivalent 2-bit scope value
+ */
+
+static inline int addr_scope(u32 domain)
+{
+       if (likely(!domain))
+               return TIPC_ZONE_SCOPE;
+       if (tipc_node(domain))
+               return TIPC_NODE_SCOPE;
+       if (tipc_cluster(domain))
+               return TIPC_CLUSTER_SCOPE;
+       return TIPC_ZONE_SCOPE;
+}
+
+/**
+ * addr_domain - convert 2-bit scope value to equivalent message lookup domain
+ *  
+ * Needed when address of a named message must be looked up a second time 
+ * after a network hop.
+ */
+
+static inline int addr_domain(int sc)
+{
+       if (likely(sc == TIPC_NODE_SCOPE))
+               return tipc_own_addr;
+       if (sc == TIPC_CLUSTER_SCOPE)
+               return tipc_addr(tipc_zone(tipc_own_addr),
+                                tipc_cluster(tipc_own_addr), 0);
+       return tipc_addr(tipc_zone(tipc_own_addr), 0, 0);
+}
+
+static inline char *addr_string_fill(char *string, u32 addr)
+{
+       snprintf(string, 16, "<%u.%u.%u>",
+                tipc_zone(addr), tipc_cluster(addr), tipc_node(addr));
+       return string;
+}
+
+int addr_domain_valid(u32);
+int addr_node_valid(u32 addr);
+
+#endif
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
new file mode 100644 (file)
index 0000000..9713d62
--- /dev/null
@@ -0,0 +1,806 @@
+/*
+ * net/tipc/bcast.c: TIPC broadcast code
+ *     
+ * Copyright (c) 2004-2006, Ericsson AB
+ * Copyright (c) 2004, Intel Corporation.
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "msg.h"
+#include "dbg.h"
+#include "link.h"
+#include "net.h"
+#include "node.h"
+#include "port.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "name_distr.h"
+#include "bearer.h"
+#include "name_table.h"
+#include "bcast.h"
+
+
+#define MAX_PKT_DEFAULT_MCAST 1500     /* bcast link max packet size (fixed) */
+
+#define BCLINK_WIN_DEFAULT 20          /* bcast link window size (default) */
+
+#define BCLINK_LOG_BUF_SIZE 0
+
+/**
+ * struct bcbearer_pair - a pair of bearers used by broadcast link
+ * @primary: pointer to primary bearer
+ * @secondary: pointer to secondary bearer
+ * 
+ * Bearers must have same priority and same set of reachable destinations 
+ * to be paired.
+ */
+
+struct bcbearer_pair {
+       struct bearer *primary;
+       struct bearer *secondary;
+};
+
+/**
+ * struct bcbearer - bearer used by broadcast link
+ * @bearer: (non-standard) broadcast bearer structure
+ * @media: (non-standard) broadcast media structure
+ * @bpairs: array of bearer pairs
+ * @bpairs_temp: array of bearer pairs used during creation of "bpairs"
+ */
+
+struct bcbearer {
+       struct bearer bearer;
+       struct media media;
+       struct bcbearer_pair bpairs[MAX_BEARERS];
+       struct bcbearer_pair bpairs_temp[TIPC_NUM_LINK_PRI];
+};
+
+/**
+ * struct bclink - link used for broadcast messages
+ * @link: (non-standard) broadcast link structure
+ * @node: (non-standard) node structure representing b'cast link's peer node
+ * 
+ * Handles sequence numbering, fragmentation, bundling, etc.
+ */
+
+struct bclink {
+       struct link link;
+       struct node node;
+};
+
+
+static struct bcbearer *bcbearer = NULL;
+static struct bclink *bclink = NULL;
+static struct link *bcl = NULL;
+static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
+
+char bc_link_name[] = "multicast-link";
+
+
+static inline u32 buf_seqno(struct sk_buff *buf)
+{
+       return msg_seqno(buf_msg(buf));
+} 
+
+static inline u32 bcbuf_acks(struct sk_buff *buf)
+{
+       return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
+}
+
+static inline void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
+{
+       TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
+}
+
+static inline void bcbuf_decr_acks(struct sk_buff *buf)
+{
+       bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
+}
+
+
+/** 
+ * bclink_set_gap - set gap according to contents of current deferred pkt queue
+ * 
+ * Called with 'node' locked, bc_lock unlocked
+ */
+
+static inline void bclink_set_gap(struct node *n_ptr)
+{
+       struct sk_buff *buf = n_ptr->bclink.deferred_head;
+
+       n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
+               mod(n_ptr->bclink.last_in);
+       if (unlikely(buf != NULL))
+               n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
+}
+
+/** 
+ * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
+ * 
+ * This mechanism endeavours to prevent all nodes in network from trying
+ * to ACK or NACK at the same time.
+ * 
+ * Note: TIPC uses a different trigger to distribute ACKs than it does to
+ *       distribute NACKs, but tries to use the same spacing (divide by 16). 
+ */
+
+static inline int bclink_ack_allowed(u32 n)
+{
+       return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag);
+}
+
+
+/** 
+ * bclink_retransmit_pkt - retransmit broadcast packets
+ * @after: sequence number of last packet to *not* retransmit
+ * @to: sequence number of last packet to retransmit
+ * 
+ * Called with 'node' locked, bc_lock unlocked
+ */
+
+static void bclink_retransmit_pkt(u32 after, u32 to)
+{
+       struct sk_buff *buf;
+
+       spin_lock_bh(&bc_lock);
+       buf = bcl->first_out;
+       while (buf && less_eq(buf_seqno(buf), after)) {
+               buf = buf->next;                
+       }
+       if (buf != NULL)
+               link_retransmit(bcl, buf, mod(to - after));
+       spin_unlock_bh(&bc_lock);              
+}
+
+/** 
+ * bclink_acknowledge - handle acknowledgement of broadcast packets
+ * @n_ptr: node that sent acknowledgement info
+ * @acked: broadcast sequence # that has been acknowledged
+ * 
+ * Node is locked, bc_lock unlocked.
+ */
+
+void bclink_acknowledge(struct node *n_ptr, u32 acked)
+{
+       struct sk_buff *crs;
+       struct sk_buff *next;
+       unsigned int released = 0;
+
+       if (less_eq(acked, n_ptr->bclink.acked))
+               return;
+
+       spin_lock_bh(&bc_lock);
+
+       /* Skip over packets that node has previously acknowledged */
+
+       crs = bcl->first_out;
+       while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
+               crs = crs->next;
+       }
+
+       /* Update packets that node is now acknowledging */
+
+       while (crs && less_eq(buf_seqno(crs), acked)) {
+               next = crs->next;
+               bcbuf_decr_acks(crs);
+               if (bcbuf_acks(crs) == 0) {
+                       bcl->first_out = next;
+                       bcl->out_queue_size--;
+                       buf_discard(crs);
+                       released = 1;
+               }
+               crs = next;
+       }
+       n_ptr->bclink.acked = acked;
+
+       /* Try resolving broadcast link congestion, if necessary */
+
+       if (unlikely(bcl->next_out))
+               link_push_queue(bcl);
+       if (unlikely(released && !list_empty(&bcl->waiting_ports)))
+               link_wakeup_ports(bcl, 0);
+       spin_unlock_bh(&bc_lock);
+}
+
+/** 
+ * bclink_send_ack - unicast an ACK msg
+ * 
+ * net_lock and node lock set
+ */
+
+static void bclink_send_ack(struct node *n_ptr)
+{
+       struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
+
+       if (l_ptr != NULL)
+               link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+}
+
+/** 
+ * bclink_send_nack- broadcast a NACK msg
+ * 
+ * net_lock and node lock set
+ */
+
+static void bclink_send_nack(struct node *n_ptr)
+{
+       struct sk_buff *buf;
+       struct tipc_msg *msg;
+
+       if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
+               return;
+
+       buf = buf_acquire(INT_H_SIZE);
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
+                        TIPC_OK, INT_H_SIZE, n_ptr->addr);
+               msg_set_mc_netid(msg, tipc_net_id);
+               msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 
+               msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
+               msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
+               msg_set_bcast_tag(msg, tipc_own_tag);
+
+               if (bearer_send(&bcbearer->bearer, buf, 0)) {
+                       bcl->stats.sent_nacks++;
+                       buf_discard(buf);
+               } else {
+                       bearer_schedule(bcl->b_ptr, bcl);
+                       bcl->proto_msg_queue = buf;
+                       bcl->stats.bearer_congs++;
+               }
+
+               /* 
+                * Ensure we doesn't send another NACK msg to the node
+                * until 16 more deferred messages arrive from it
+                * (i.e. helps prevent all nodes from NACK'ing at same time)
+                */
+               
+               n_ptr->bclink.nack_sync = tipc_own_tag;
+       }
+}
+
+/** 
+ * bclink_check_gap - send a NACK if a sequence gap exists
+ *
+ * net_lock and node lock set
+ */
+
+void bclink_check_gap(struct node *n_ptr, u32 last_sent)
+{
+       if (!n_ptr->bclink.supported ||
+           less_eq(last_sent, mod(n_ptr->bclink.last_in)))
+               return;
+
+       bclink_set_gap(n_ptr);
+       if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
+               n_ptr->bclink.gap_to = last_sent;
+       bclink_send_nack(n_ptr);
+}
+
+/** 
+ * bclink_peek_nack - process a NACK msg meant for another node
+ * 
+ * Only net_lock set.
+ */
+
+void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
+{
+       struct node *n_ptr = node_find(dest);
+       u32 my_after, my_to;
+
+       if (unlikely(!n_ptr || !node_is_up(n_ptr)))
+               return;
+       node_lock(n_ptr);
+       /*
+        * Modify gap to suppress unnecessary NACKs from this node
+        */
+       my_after = n_ptr->bclink.gap_after;
+       my_to = n_ptr->bclink.gap_to;
+
+       if (less_eq(gap_after, my_after)) {
+               if (less(my_after, gap_to) && less(gap_to, my_to))
+                       n_ptr->bclink.gap_after = gap_to;
+               else if (less_eq(my_to, gap_to))
+                       n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
+       } else if (less_eq(gap_after, my_to)) {
+               if (less_eq(my_to, gap_to))
+                       n_ptr->bclink.gap_to = gap_after;
+       } else {
+               /* 
+                * Expand gap if missing bufs not in deferred queue:
+                */
+               struct sk_buff *buf = n_ptr->bclink.deferred_head;
+               u32 prev = n_ptr->bclink.gap_to;
+
+               for (; buf; buf = buf->next) {
+                       u32 seqno = buf_seqno(buf);
+
+                       if (mod(seqno - prev) != 1)
+                               buf = NULL;
+                       if (seqno == gap_after)
+                               break;
+                       prev = seqno;
+               }
+               if (buf == NULL)
+                       n_ptr->bclink.gap_to = gap_after;
+       }
+       /*
+        * Some nodes may send a complementary NACK now:
+        */ 
+       if (bclink_ack_allowed(sender_tag + 1)) {
+               if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
+                       bclink_send_nack(n_ptr);
+                       bclink_set_gap(n_ptr);
+               }
+       }
+       node_unlock(n_ptr);
+}
+
+/**
+ * bclink_send_msg - broadcast a packet to all nodes in cluster
+ */
+
+int bclink_send_msg(struct sk_buff *buf)
+{
+       int res;
+
+       spin_lock_bh(&bc_lock);
+
+       res = link_send_buf(bcl, buf);
+       if (unlikely(res == -ELINKCONG))
+               buf_discard(buf);
+       else
+               bcl->stats.sent_info++;
+
+       if (bcl->out_queue_size > bcl->stats.max_queue_sz)
+               bcl->stats.max_queue_sz = bcl->out_queue_size;
+       bcl->stats.queue_sz_counts++;
+       bcl->stats.accu_queue_sz += bcl->out_queue_size;
+
+       spin_unlock_bh(&bc_lock);
+       return res;
+}
+
+/**
+ * bclink_recv_pkt - receive a broadcast packet, and deliver upwards
+ * 
+ * net_lock is read_locked, no other locks set
+ */
+
+void bclink_recv_pkt(struct sk_buff *buf)
+{        
+       struct tipc_msg *msg = buf_msg(buf);
+       struct node* node = node_find(msg_prevnode(msg));
+       u32 next_in;
+       u32 seqno;
+       struct sk_buff *deferred;
+
+       msg_dbg(msg, "<BC<<<");
+
+       if (unlikely(!node || !node_is_up(node) || !node->bclink.supported || 
+                    (msg_mc_netid(msg) != tipc_net_id))) {
+               buf_discard(buf);
+               return;
+       }
+
+       if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
+               msg_dbg(msg, "<BCNACK<<<");
+               if (msg_destnode(msg) == tipc_own_addr) {
+                       node_lock(node);
+                       bclink_acknowledge(node, msg_bcast_ack(msg));
+                       node_unlock(node);
+                       bcl->stats.recv_nacks++;
+                       bclink_retransmit_pkt(msg_bcgap_after(msg),
+                                             msg_bcgap_to(msg));
+               } else {
+                       bclink_peek_nack(msg_destnode(msg),
+                                        msg_bcast_tag(msg),
+                                        msg_bcgap_after(msg),
+                                        msg_bcgap_to(msg));
+               }
+               buf_discard(buf);
+               return;
+       }
+
+       node_lock(node);
+receive:
+       deferred = node->bclink.deferred_head;
+       next_in = mod(node->bclink.last_in + 1);
+       seqno = msg_seqno(msg);
+
+       if (likely(seqno == next_in)) {
+               bcl->stats.recv_info++;
+               node->bclink.last_in++;
+               bclink_set_gap(node);
+               if (unlikely(bclink_ack_allowed(seqno))) {
+                       bclink_send_ack(node);
+                       bcl->stats.sent_acks++;
+               }
+               if (likely(msg_isdata(msg))) {
+                       node_unlock(node);
+                       port_recv_mcast(buf, NULL);
+               } else if (msg_user(msg) == MSG_BUNDLER) {
+                       bcl->stats.recv_bundles++;
+                       bcl->stats.recv_bundled += msg_msgcnt(msg);
+                       node_unlock(node);
+                       link_recv_bundle(buf);
+               } else if (msg_user(msg) == MSG_FRAGMENTER) {
+                       bcl->stats.recv_fragments++;
+                       if (link_recv_fragment(&node->bclink.defragm,
+                                              &buf, &msg))
+                               bcl->stats.recv_fragmented++;
+                       node_unlock(node);
+                       net_route_msg(buf);
+               } else {
+                       node_unlock(node);
+                       net_route_msg(buf);
+               }
+               if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
+                       node_lock(node);
+                       buf = deferred;
+                       msg = buf_msg(buf);
+                       node->bclink.deferred_head = deferred->next;
+                       goto receive;
+               }
+               return;
+       } else if (less(next_in, seqno)) {
+               u32 gap_after = node->bclink.gap_after;
+               u32 gap_to = node->bclink.gap_to;
+
+               if (link_defer_pkt(&node->bclink.deferred_head,
+                                  &node->bclink.deferred_tail,
+                                  buf)) {
+                       node->bclink.nack_sync++;
+                       bcl->stats.deferred_recv++;
+                       if (seqno == mod(gap_after + 1))
+                               node->bclink.gap_after = seqno;
+                       else if (less(gap_after, seqno) && less(seqno, gap_to))
+                               node->bclink.gap_to = seqno;
+               }
+               if (bclink_ack_allowed(node->bclink.nack_sync)) {
+                       if (gap_to != gap_after)
+                               bclink_send_nack(node);
+                       bclink_set_gap(node);
+               }
+       } else {
+               bcl->stats.duplicates++;
+               buf_discard(buf);
+       }
+       node_unlock(node);
+}
+
+u32 bclink_get_last_sent(void)
+{
+       u32 last_sent = mod(bcl->next_out_no - 1);
+
+       if (bcl->next_out)
+               last_sent = mod(buf_seqno(bcl->next_out) - 1);
+       return last_sent;
+}
+
+u32 bclink_acks_missing(struct node *n_ptr)
+{
+       return (n_ptr->bclink.supported &&
+               (bclink_get_last_sent() != n_ptr->bclink.acked));
+}
+
+
+/**
+ * bcbearer_send - send a packet through the broadcast pseudo-bearer
+ * 
+ * Send through as many bearers as necessary to reach all nodes
+ * that support TIPC multicasting.
+ * 
+ * Returns 0 if packet sent successfully, non-zero if not
+ */
+
+int bcbearer_send(struct sk_buff *buf,
+                 struct tipc_bearer *unused1,
+                 struct tipc_media_addr *unused2)
+{
+       static int send_count = 0;
+
+       struct node_map remains;
+       struct node_map remains_new;
+       int bp_index;
+       int swap_time;
+
+       /* Prepare buffer for broadcasting (if first time trying to send it) */
+
+       if (likely(!msg_non_seq(buf_msg(buf)))) {
+               struct tipc_msg *msg;
+
+               assert(cluster_bcast_nodes.count != 0);
+               bcbuf_set_acks(buf, cluster_bcast_nodes.count);
+               msg = buf_msg(buf);
+               msg_set_non_seq(msg);
+               msg_set_mc_netid(msg, tipc_net_id);
+       }
+
+       /* Determine if bearer pairs should be swapped following this attempt */
+
+       if ((swap_time = (++send_count >= 10)))
+               send_count = 0;
+
+       /* Send buffer over bearers until all targets reached */
+       
+       remains = cluster_bcast_nodes;
+
+       for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
+               struct bearer *p = bcbearer->bpairs[bp_index].primary;
+               struct bearer *s = bcbearer->bpairs[bp_index].secondary;
+
+               if (!p)
+                       break;  /* no more bearers to try */
+
+               nmap_diff(&remains, &p->nodes, &remains_new);
+               if (remains_new.count == remains.count)
+                       continue;       /* bearer pair doesn't add anything */
+
+               if (!p->publ.blocked &&
+                   !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
+                       if (swap_time && s && !s->publ.blocked)
+                               goto swap;
+                       else
+                               goto update;
+               }
+
+               if (!s || s->publ.blocked ||
+                   s->media->send_msg(buf, &s->publ, &s->media->bcast_addr))
+                       continue;       /* unable to send using bearer pair */
+swap:
+               bcbearer->bpairs[bp_index].primary = s;
+               bcbearer->bpairs[bp_index].secondary = p;
+update:
+               if (remains_new.count == 0)
+                       return TIPC_OK;
+
+               remains = remains_new;
+       }
+       
+       /* Unable to reach all targets */
+
+       bcbearer->bearer.publ.blocked = 1;
+       bcl->stats.bearer_congs++;
+       return ~TIPC_OK;
+}
+
+/**
+ * bcbearer_sort - create sets of bearer pairs used by broadcast bearer
+ */
+
+void bcbearer_sort(void)
+{
+       struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
+       struct bcbearer_pair *bp_curr;
+       int b_index;
+       int pri;
+
+       spin_lock_bh(&bc_lock);
+
+       /* Group bearers by priority (can assume max of two per priority) */
+
+       memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
+
+       for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
+               struct bearer *b = &bearers[b_index];
+
+               if (!b->active || !b->nodes.count)
+                       continue;
+
+               if (!bp_temp[b->priority].primary)
+                       bp_temp[b->priority].primary = b;
+               else
+                       bp_temp[b->priority].secondary = b;
+       }
+
+       /* Create array of bearer pairs for broadcasting */
+
+       bp_curr = bcbearer->bpairs;
+       memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
+
+       for (pri = (TIPC_NUM_LINK_PRI - 1); pri >= 0; pri--) {
+
+               if (!bp_temp[pri].primary)
+                       continue;
+
+               bp_curr->primary = bp_temp[pri].primary;
+
+               if (bp_temp[pri].secondary) {
+                       if (nmap_equal(&bp_temp[pri].primary->nodes,
+                                      &bp_temp[pri].secondary->nodes)) {
+                               bp_curr->secondary = bp_temp[pri].secondary;
+                       } else {
+                               bp_curr++;
+                               bp_curr->primary = bp_temp[pri].secondary;
+                       }
+               }
+
+               bp_curr++;
+       }
+
+       spin_unlock_bh(&bc_lock);
+}
+
+/**
+ * bcbearer_push - resolve bearer congestion
+ * 
+ * Forces bclink to push out any unsent packets, until all packets are gone
+ * or congestion reoccurs.
+ * No locks set when function called
+ */
+
+void bcbearer_push(void)
+{
+       struct bearer *b_ptr;
+
+       spin_lock_bh(&bc_lock);
+       b_ptr = &bcbearer->bearer;
+       if (b_ptr->publ.blocked) {
+               b_ptr->publ.blocked = 0;
+               bearer_lock_push(b_ptr);
+       }
+       spin_unlock_bh(&bc_lock);
+}
+
+
+int bclink_stats(char *buf, const u32 buf_size)
+{
+       struct print_buf pb;
+
+       if (!bcl)
+               return 0;
+
+       printbuf_init(&pb, buf, buf_size);
+
+       spin_lock_bh(&bc_lock);
+
+       tipc_printf(&pb, "Link <%s>\n"
+                        "  Window:%u packets\n", 
+                   bcl->name, bcl->queue_limit[0]);
+       tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n", 
+                   bcl->stats.recv_info,
+                   bcl->stats.recv_fragments,
+                   bcl->stats.recv_fragmented,
+                   bcl->stats.recv_bundles,
+                   bcl->stats.recv_bundled);
+       tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n", 
+                   bcl->stats.sent_info,
+                   bcl->stats.sent_fragments,
+                   bcl->stats.sent_fragmented, 
+                   bcl->stats.sent_bundles,
+                   bcl->stats.sent_bundled);
+       tipc_printf(&pb, "  RX naks:%u defs:%u dups:%u\n", 
+                   bcl->stats.recv_nacks,
+                   bcl->stats.deferred_recv, 
+                   bcl->stats.duplicates);
+       tipc_printf(&pb, "  TX naks:%u acks:%u dups:%u\n", 
+                   bcl->stats.sent_nacks, 
+                   bcl->stats.sent_acks, 
+                   bcl->stats.retransmitted);
+       tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
+                   bcl->stats.bearer_congs,
+                   bcl->stats.link_congs,
+                   bcl->stats.max_queue_sz,
+                   bcl->stats.queue_sz_counts
+                   ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
+                   : 0);
+
+       spin_unlock_bh(&bc_lock);
+       return printbuf_validate(&pb);
+}
+
+int bclink_reset_stats(void)
+{
+       if (!bcl)
+               return -ENOPROTOOPT;
+
+       spin_lock_bh(&bc_lock);
+       memset(&bcl->stats, 0, sizeof(bcl->stats));
+       spin_unlock_bh(&bc_lock);
+       return TIPC_OK;
+}
+
+int bclink_set_queue_limits(u32 limit)
+{
+       if (!bcl)
+               return -ENOPROTOOPT;
+       if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
+               return -EINVAL;
+
+       spin_lock_bh(&bc_lock);
+       link_set_queue_limits(bcl, limit);
+       spin_unlock_bh(&bc_lock);
+       return TIPC_OK;
+}
+
+int bclink_init(void)
+{
+       bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC);
+       bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
+       if (!bcbearer || !bclink) {
+ nomem:
+               warn("Memory squeeze; Failed to create multicast link\n");
+               kfree(bcbearer);
+               bcbearer = NULL;
+               kfree(bclink);
+               bclink = NULL;
+               return -ENOMEM;
+       }
+
+       memset(bcbearer, 0, sizeof(struct bcbearer));
+       INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
+       bcbearer->bearer.media = &bcbearer->media;
+       bcbearer->media.send_msg = bcbearer_send;
+       sprintf(bcbearer->media.name, "tipc-multicast");
+
+       bcl = &bclink->link;
+       memset(bclink, 0, sizeof(struct bclink));
+       INIT_LIST_HEAD(&bcl->waiting_ports);
+       bcl->next_out_no = 1;
+       bclink->node.lock =  SPIN_LOCK_UNLOCKED;        
+       bcl->owner = &bclink->node;
+        bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
+       link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
+       bcl->b_ptr = &bcbearer->bearer;
+       bcl->state = WORKING_WORKING;
+       sprintf(bcl->name, bc_link_name);
+
+       if (BCLINK_LOG_BUF_SIZE) {
+               char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
+
+               if (!pb)
+                       goto nomem;
+               printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
+       }
+
+       return TIPC_OK;
+}
+
+void bclink_stop(void)
+{
+       spin_lock_bh(&bc_lock);
+       if (bcbearer) {
+               link_stop(bcl);
+               if (BCLINK_LOG_BUF_SIZE)
+                       kfree(bcl->print_buf.buf);
+               bcl = NULL;
+               kfree(bclink);
+               bclink = NULL;
+               kfree(bcbearer);
+               bcbearer = NULL;
+       }
+       spin_unlock_bh(&bc_lock);
+}
+
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
new file mode 100644 (file)
index 0000000..5430e52
--- /dev/null
@@ -0,0 +1,223 @@
+/*
+ * net/tipc/bcast.h: Include file for TIPC broadcast code
+ * 
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_BCAST_H
+#define _TIPC_BCAST_H
+
+#define MAX_NODES 4096
+#define WSIZE 32
+
+/**
+ * struct node_map - set of node identifiers
+ * @count: # of nodes in set
+ * @map: bitmap of node identifiers that are in the set
+ */
+
+struct node_map {
+       u32 count;
+       u32 map[MAX_NODES / WSIZE];
+};
+
+
+#define PLSIZE 32
+
+/**
+ * struct port_list - set of node local destination ports
+ * @count: # of ports in set (only valid for first entry in list)
+ * @next: pointer to next entry in list
+ * @ports: array of port references
+ */
+
+struct port_list {
+       int count;
+       struct port_list *next;
+       u32 ports[PLSIZE];
+};
+
+
+struct node;
+
+extern char bc_link_name[];
+
+
+/**
+ * nmap_get - determine if node exists in a node map
+ */
+
+static inline int nmap_get(struct node_map *nm_ptr, u32 node)
+{
+       int n = tipc_node(node);
+       int w = n / WSIZE;
+       int b = n % WSIZE;
+
+       return nm_ptr->map[w] & (1 << b);
+}
+
+/**
+ * nmap_add - add a node to a node map
+ */
+
+static inline void nmap_add(struct node_map *nm_ptr, u32 node)
+{
+       int n = tipc_node(node);
+       int w = n / WSIZE;
+       u32 mask = (1 << (n % WSIZE));
+
+       if ((nm_ptr->map[w] & mask) == 0) {
+               nm_ptr->count++;
+               nm_ptr->map[w] |= mask;
+       }
+}
+
+/** 
+ * nmap_remove - remove a node from a node map
+ */
+
+static inline void nmap_remove(struct node_map *nm_ptr, u32 node)
+{
+       int n = tipc_node(node);
+       int w = n / WSIZE;
+       u32 mask = (1 << (n % WSIZE));
+
+       if ((nm_ptr->map[w] & mask) != 0) {
+               nm_ptr->map[w] &= ~mask;
+               nm_ptr->count--;
+       }
+}
+
+/**
+ * nmap_equal - test for equality of node maps
+ */
+
+static inline int nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
+{
+       return !memcmp(nm_a, nm_b, sizeof(*nm_a));
+}
+
+/**
+ * nmap_diff - find differences between node maps
+ * @nm_a: input node map A
+ * @nm_b: input node map B
+ * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
+ */
+
+static inline void nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
+                            struct node_map *nm_diff)
+{
+       int stop = sizeof(nm_a->map) / sizeof(u32);
+       int w;
+       int b;
+       u32 map;
+
+       memset(nm_diff, 0, sizeof(*nm_diff));
+       for (w = 0; w < stop; w++) {
+               map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
+               nm_diff->map[w] = map;
+               if (map != 0) {
+                       for (b = 0 ; b < WSIZE; b++) {
+                               if (map & (1 << b))
+                                       nm_diff->count++;
+                       }
+               }
+       }
+}
+
+/**
+ * port_list_add - add a port to a port list, ensuring no duplicates
+ */
+
+static inline void port_list_add(struct port_list *pl_ptr, u32 port)
+{
+       struct port_list *item = pl_ptr;
+       int i;
+       int item_sz = PLSIZE;
+       int cnt = pl_ptr->count;
+
+       for (; ; cnt -= item_sz, item = item->next) {
+               if (cnt < PLSIZE)
+                       item_sz = cnt;
+               for (i = 0; i < item_sz; i++)
+                       if (item->ports[i] == port)
+                               return;
+               if (i < PLSIZE) {
+                       item->ports[i] = port;
+                       pl_ptr->count++;
+                       return;
+               }
+               if (!item->next) {
+                       item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
+                       if (!item->next) {
+                               warn("Memory squeeze: multicast destination port list is incomplete\n");
+                               return;
+                       }
+                       item->next->next = NULL;
+               }
+       }
+}
+
+/**
+ * port_list_free - free dynamically created entries in port_list chain
+ * 
+ * Note: First item is on stack, so it doesn't need to be released
+ */
+
+static inline void port_list_free(struct port_list *pl_ptr)
+{
+       struct port_list *item;
+       struct port_list *next;
+
+       for (item = pl_ptr->next; item; item = next) {
+               next = item->next;
+               kfree(item);
+       }
+}
+
+
+int  bclink_init(void);
+void bclink_stop(void);
+void bclink_acknowledge(struct node *n_ptr, u32 acked);
+int  bclink_send_msg(struct sk_buff *buf);
+void bclink_recv_pkt(struct sk_buff *buf);
+u32  bclink_get_last_sent(void);
+u32  bclink_acks_missing(struct node *n_ptr);
+void bclink_check_gap(struct node *n_ptr, u32 seqno);
+int  bclink_stats(char *stats_buf, const u32 buf_size);
+int  bclink_reset_stats(void);
+int  bclink_set_queue_limits(u32 limit);
+void bcbearer_sort(void);
+void bcbearer_push(void);
+
+#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
new file mode 100644 (file)
index 0000000..3dd19fd
--- /dev/null
@@ -0,0 +1,692 @@
+/*
+ * net/tipc/bearer.c: TIPC bearer code
+ * 
+ * Copyright (c) 1996-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+#include "bearer.h"
+#include "link.h"
+#include "port.h"
+#include "discover.h"
+#include "bcast.h"
+
+#define MAX_ADDR_STR 32
+
+static struct media *media_list = 0;
+static u32 media_count = 0;
+
+struct bearer *bearers = 0;
+
+/**
+ * media_name_valid - validate media name
+ * 
+ * Returns 1 if media name is valid, otherwise 0.
+ */
+
+static int media_name_valid(const char *name)
+{
+       u32 len;
+
+       len = strlen(name);
+       if ((len + 1) > TIPC_MAX_MEDIA_NAME)
+               return 0;
+       return (strspn(name, tipc_alphabet) == len);
+}
+
+/**
+ * media_find - locates specified media object by name
+ */
+
+static struct media *media_find(const char *name)
+{
+       struct media *m_ptr;
+       u32 i;
+
+       for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+               if (!strcmp(m_ptr->name, name))
+                       return m_ptr;
+       }
+       return 0;
+}
+
+/**
+ * tipc_register_media - register a media type
+ * 
+ * Bearers for this media type must be activated separately at a later stage.
+ */
+
+int  tipc_register_media(u32 media_type,
+                        char *name, 
+                        int (*enable)(struct tipc_bearer *), 
+                        void (*disable)(struct tipc_bearer *), 
+                        int (*send_msg)(struct sk_buff *, 
+                                        struct tipc_bearer *,
+                                        struct tipc_media_addr *), 
+                        char *(*addr2str)(struct tipc_media_addr *a,
+                                          char *str_buf, int str_size),
+                        struct tipc_media_addr *bcast_addr,
+                        const u32 bearer_priority,
+                        const u32 link_tolerance,  /* [ms] */
+                        const u32 send_window_limit)
+{
+       struct media *m_ptr;
+       u32 media_id;
+       u32 i;
+       int res = -EINVAL;
+
+       write_lock_bh(&net_lock);
+       if (!media_list)
+               goto exit;
+
+       if (!media_name_valid(name)) {
+               warn("Media registration error: illegal name <%s>\n", name);
+               goto exit;
+       }
+       if (!bcast_addr) {
+               warn("Media registration error: no broadcast address supplied\n");
+               goto exit;
+       }
+       if (bearer_priority >= TIPC_NUM_LINK_PRI) {
+               warn("Media registration error: priority %u\n", bearer_priority);
+               goto exit;
+       }
+       if ((link_tolerance < TIPC_MIN_LINK_TOL) || 
+           (link_tolerance > TIPC_MAX_LINK_TOL)) {
+               warn("Media registration error: tolerance %u\n", link_tolerance);
+               goto exit;
+       }
+
+       media_id = media_count++;
+       if (media_id >= MAX_MEDIA) {
+               warn("Attempt to register more than %u media\n", MAX_MEDIA);
+               media_count--;
+               goto exit;
+       }
+       for (i = 0; i < media_id; i++) {
+               if (media_list[i].type_id == media_type) {
+                       warn("Attempt to register second media with type %u\n", 
+                            media_type);
+                       media_count--;
+                       goto exit;
+               }
+               if (!strcmp(name, media_list[i].name)) {
+                       warn("Attempt to re-register media name <%s>\n", name);
+                       media_count--;
+                       goto exit;
+               }
+       }
+
+       m_ptr = &media_list[media_id];
+       m_ptr->type_id = media_type;
+       m_ptr->send_msg = send_msg;
+       m_ptr->enable_bearer = enable;
+       m_ptr->disable_bearer = disable;
+       m_ptr->addr2str = addr2str;
+       memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr));
+       m_ptr->bcast = 1;
+       strcpy(m_ptr->name, name);
+       m_ptr->priority = bearer_priority;
+       m_ptr->tolerance = link_tolerance;
+       m_ptr->window = send_window_limit;
+       dbg("Media <%s> registered\n", name);
+       res = 0;
+exit:
+       write_unlock_bh(&net_lock);
+       return res;
+}
+
+/**
+ * media_addr_printf - record media address in print buffer
+ */
+
+void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
+{
+       struct media *m_ptr;
+       u32 media_type;
+       u32 i;
+
+       media_type = ntohl(a->type);
+       for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+               if (m_ptr->type_id == media_type)
+                       break;
+       }
+
+       if ((i < media_count) && (m_ptr->addr2str != NULL)) {
+               char addr_str[MAX_ADDR_STR];
+
+               tipc_printf(pb, "%s(%s) ", m_ptr->name, 
+                           m_ptr->addr2str(a, addr_str, sizeof(addr_str)));
+       } else {
+               unchar *addr = (unchar *)&a->dev_addr;
+
+               tipc_printf(pb, "UNKNOWN(%u):", media_type);
+               for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++) {
+                       tipc_printf(pb, "%02x ", addr[i]);
+               }
+       }
+}
+
+/**
+ * media_get_names - record names of registered media in buffer
+ */
+
+struct sk_buff *media_get_names(void)
+{
+       struct sk_buff *buf;
+       struct media *m_ptr;
+       int i;
+
+       buf = cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
+       if (!buf)
+               return NULL;
+
+       read_lock_bh(&net_lock);
+       for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+               cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name, 
+                              strlen(m_ptr->name) + 1);
+       }
+       read_unlock_bh(&net_lock);
+       return buf;
+}
+
+/**
+ * bearer_name_validate - validate & (optionally) deconstruct bearer name
+ * @name - ptr to bearer name string
+ * @name_parts - ptr to area for bearer name components (or NULL if not needed)
+ * 
+ * Returns 1 if bearer name is valid, otherwise 0.
+ */
+
+static int bearer_name_validate(const char *name, 
+                               struct bearer_name *name_parts)
+{
+       char name_copy[TIPC_MAX_BEARER_NAME];
+       char *media_name;
+       char *if_name;
+       u32 media_len;
+       u32 if_len;
+
+       /* copy bearer name & ensure length is OK */
+
+       name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
+       /* need above in case non-Posix strncpy() doesn't pad with nulls */
+       strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
+       if (name_copy[TIPC_MAX_BEARER_NAME - 1] != 0)
+               return 0;
+
+       /* ensure all component parts of bearer name are present */
+
+       media_name = name_copy;
+       if ((if_name = strchr(media_name, ':')) == NULL)
+               return 0;
+       *(if_name++) = 0;
+       media_len = if_name - media_name;
+       if_len = strlen(if_name) + 1;
+
+       /* validate component parts of bearer name */
+
+       if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) || 
+           (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) || 
+           (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
+           (strspn(if_name, tipc_alphabet) != (if_len - 1)))
+               return 0;
+
+       /* return bearer name components, if necessary */
+
+       if (name_parts) {
+               strcpy(name_parts->media_name, media_name);
+               strcpy(name_parts->if_name, if_name);
+       }
+       return 1;
+}
+
+/**
+ * bearer_find - locates bearer object with matching bearer name
+ */
+
+static struct bearer *bearer_find(const char *name)
+{
+       struct bearer *b_ptr;
+       u32 i;
+
+       for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) {
+               if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
+                       return b_ptr;
+       }
+       return 0;
+}
+
+/**
+ * bearer_find - locates bearer object with matching interface name
+ */
+
+struct bearer *bearer_find_interface(const char *if_name)
+{
+       struct bearer *b_ptr;
+       char *b_if_name;
+       u32 i;
+
+       for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) {
+               if (!b_ptr->active)
+                       continue;
+               b_if_name = strchr(b_ptr->publ.name, ':') + 1;
+               if (!strcmp(b_if_name, if_name))
+                       return b_ptr;
+       }
+       return 0;
+}
+
+/**
+ * bearer_get_names - record names of bearers in buffer
+ */
+
+struct sk_buff *bearer_get_names(void)
+{
+       struct sk_buff *buf;
+       struct media *m_ptr;
+       struct bearer *b_ptr;
+       int i, j;
+
+       buf = cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
+       if (!buf)
+               return NULL;
+
+       read_lock_bh(&net_lock);
+       for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
+               for (j = 0; j < MAX_BEARERS; j++) {
+                       b_ptr = &bearers[j];
+                       if (b_ptr->active && (b_ptr->media == m_ptr)) {
+                               cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME, 
+                                              b_ptr->publ.name, 
+                                              strlen(b_ptr->publ.name) + 1);
+                       }
+               }
+       }
+       read_unlock_bh(&net_lock);
+       return buf;
+}
+
+void bearer_add_dest(struct bearer *b_ptr, u32 dest)
+{
+       nmap_add(&b_ptr->nodes, dest);
+       disc_update_link_req(b_ptr->link_req);
+       bcbearer_sort();
+}
+
+void bearer_remove_dest(struct bearer *b_ptr, u32 dest)
+{
+       nmap_remove(&b_ptr->nodes, dest);
+       disc_update_link_req(b_ptr->link_req);
+       bcbearer_sort();
+}
+
+/*
+ * bearer_push(): Resolve bearer congestion. Force the waiting
+ * links to push out their unsent packets, one packet per link
+ * per iteration, until all packets are gone or congestion reoccurs.
+ * 'net_lock' is read_locked when this function is called
+ * bearer.lock must be taken before calling
+ * Returns binary true(1) ore false(0)
+ */
+static int bearer_push(struct bearer *b_ptr)
+{
+       u32 res = TIPC_OK;
+       struct link *ln, *tln;
+
+       if (b_ptr->publ.blocked)
+               return 0;
+
+       while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
+               list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
+                       res = link_push_packet(ln);
+                       if (res == PUSH_FAILED)
+                               break;
+                       if (res == PUSH_FINISHED)
+                               list_move_tail(&ln->link_list, &b_ptr->links);
+               }
+       }
+       return list_empty(&b_ptr->cong_links);
+}
+
+void bearer_lock_push(struct bearer *b_ptr)
+{
+       int res;
+
+       spin_lock_bh(&b_ptr->publ.lock);
+       res = bearer_push(b_ptr);
+       spin_unlock_bh(&b_ptr->publ.lock);
+       if (res)
+               bcbearer_push();
+}
+
+
+/*
+ * Interrupt enabling new requests after bearer congestion or blocking:    
+ * See bearer_send().   
+ */
+void tipc_continue(struct tipc_bearer *tb_ptr)
+{
+       struct bearer *b_ptr = (struct bearer *)tb_ptr;
+
+       spin_lock_bh(&b_ptr->publ.lock);
+       b_ptr->continue_count++;
+       if (!list_empty(&b_ptr->cong_links))
+               k_signal((Handler)bearer_lock_push, (unsigned long)b_ptr);
+       b_ptr->publ.blocked = 0;
+       spin_unlock_bh(&b_ptr->publ.lock);
+}
+
+/*
+ * Schedule link for sending of messages after the bearer 
+ * has been deblocked by 'continue()'. This method is called 
+ * when somebody tries to send a message via this link while 
+ * the bearer is congested. 'net_lock' is in read_lock here
+ * bearer.lock is busy
+ */
+
+static void bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
+{
+       list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
+}
+
+/*
+ * Schedule link for sending of messages after the bearer 
+ * has been deblocked by 'continue()'. This method is called 
+ * when somebody tries to send a message via this link while 
+ * the bearer is congested. 'net_lock' is in read_lock here,
+ * bearer.lock is free
+ */
+
+void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
+{
+       spin_lock_bh(&b_ptr->publ.lock);
+       bearer_schedule_unlocked(b_ptr, l_ptr);
+       spin_unlock_bh(&b_ptr->publ.lock);
+}
+
+
+/*
+ * bearer_resolve_congestion(): Check if there is bearer congestion,
+ * and if there is, try to resolve it before returning.
+ * 'net_lock' is read_locked when this function is called
+ */
+int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
+{
+       int res = 1;
+
+       if (list_empty(&b_ptr->cong_links))
+               return 1;
+       spin_lock_bh(&b_ptr->publ.lock);
+       if (!bearer_push(b_ptr)) {
+               bearer_schedule_unlocked(b_ptr, l_ptr);
+               res = 0;
+       }
+       spin_unlock_bh(&b_ptr->publ.lock);
+       return res;
+}
+
+
+/**
+ * tipc_enable_bearer - enable bearer with the given name
+ */              
+
+int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
+{
+       struct bearer *b_ptr;
+       struct media *m_ptr;
+       struct bearer_name b_name;
+       char addr_string[16];
+       u32 bearer_id;
+       u32 with_this_prio;
+       u32 i;
+       int res = -EINVAL;
+
+       if (tipc_mode != TIPC_NET_MODE)
+               return -ENOPROTOOPT;
+       if (!bearer_name_validate(name, &b_name) ||
+           !addr_domain_valid(bcast_scope) ||
+           !in_scope(bcast_scope, tipc_own_addr) ||
+           (priority > TIPC_NUM_LINK_PRI))
+               return -EINVAL;
+
+       write_lock_bh(&net_lock);
+       if (!bearers)
+               goto failed;
+
+       m_ptr = media_find(b_name.media_name);
+       if (!m_ptr) {
+               warn("No media <%s>\n", b_name.media_name);
+               goto failed;
+       }
+       if (priority == TIPC_NUM_LINK_PRI)
+               priority = m_ptr->priority;
+
+restart:
+       bearer_id = MAX_BEARERS;
+       with_this_prio = 1;
+       for (i = MAX_BEARERS; i-- != 0; ) {
+               if (!bearers[i].active) {
+                       bearer_id = i;
+                       continue;
+               }
+               if (!strcmp(name, bearers[i].publ.name)) {
+                       warn("Bearer <%s> already enabled\n", name);
+                       goto failed;
+               }
+               if ((bearers[i].priority == priority) &&
+                   (++with_this_prio > 2)) {
+                       if (priority-- == 0) {
+                               warn("Third bearer <%s> with priority %u, unable to lower to %u\n",
+                                    name, priority + 1, priority);
+                               goto failed;
+                       }
+                       warn("Third bearer <%s> with priority %u, lowering to %u\n",
+                            name, priority + 1, priority);
+                       goto restart;
+               }
+       }
+       if (bearer_id >= MAX_BEARERS) {
+               warn("Attempt to enable more than %d bearers\n", MAX_BEARERS);
+               goto failed;
+       }
+
+       b_ptr = &bearers[bearer_id];
+       memset(b_ptr, 0, sizeof(struct bearer));
+
+       strcpy(b_ptr->publ.name, name);
+       res = m_ptr->enable_bearer(&b_ptr->publ);
+       if (res) {
+               warn("Failed to enable bearer <%s>\n", name);
+               goto failed;
+       }
+
+       b_ptr->identity = bearer_id;
+       b_ptr->media = m_ptr;
+       b_ptr->net_plane = bearer_id + 'A';
+       b_ptr->active = 1;
+       b_ptr->detect_scope = bcast_scope;
+       b_ptr->priority = priority;
+       INIT_LIST_HEAD(&b_ptr->cong_links);
+       INIT_LIST_HEAD(&b_ptr->links);
+       if (m_ptr->bcast) {
+               b_ptr->link_req = disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
+                                                    bcast_scope, 2);
+       }
+       b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
+       write_unlock_bh(&net_lock);
+       info("Enabled bearer <%s>, discovery domain %s\n",
+            name, addr_string_fill(addr_string, bcast_scope));
+       return 0;
+failed:
+       write_unlock_bh(&net_lock);
+       return res;
+}
+
+/**
+ * tipc_block_bearer(): Block the bearer with the given name,
+ *                      and reset all its links
+ */
+
+int tipc_block_bearer(const char *name)
+{
+       struct bearer *b_ptr = 0;
+       struct link *l_ptr;
+       struct link *temp_l_ptr;
+
+       if (tipc_mode != TIPC_NET_MODE)
+               return -ENOPROTOOPT;
+
+       read_lock_bh(&net_lock);
+       b_ptr = bearer_find(name);
+       if (!b_ptr) {
+               warn("Attempt to block unknown bearer <%s>\n", name);
+               read_unlock_bh(&net_lock);
+               return -EINVAL;
+       }
+
+       spin_lock_bh(&b_ptr->publ.lock);
+       b_ptr->publ.blocked = 1;
+       list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
+               struct node *n_ptr = l_ptr->owner;
+
+               spin_lock_bh(&n_ptr->lock);
+               link_reset(l_ptr);
+               spin_unlock_bh(&n_ptr->lock);
+       }
+       spin_unlock_bh(&b_ptr->publ.lock);
+       read_unlock_bh(&net_lock);
+       info("Blocked bearer <%s>\n", name);
+       return TIPC_OK;
+}
+
+/**
+ * bearer_disable -
+ * 
+ * Note: This routine assumes caller holds net_lock.
+ */
+
+static int bearer_disable(const char *name)
+{
+       struct bearer *b_ptr;
+       struct link *l_ptr;
+       struct link *temp_l_ptr;
+
+       if (tipc_mode != TIPC_NET_MODE)
+               return -ENOPROTOOPT;
+
+       b_ptr = bearer_find(name);
+       if (!b_ptr) {
+               warn("Attempt to disable unknown bearer <%s>\n", name);
+               return -EINVAL;
+       }
+
+       disc_stop_link_req(b_ptr->link_req);
+       spin_lock_bh(&b_ptr->publ.lock);
+       b_ptr->link_req = NULL;
+       b_ptr->publ.blocked = 1;
+       if (b_ptr->media->disable_bearer) {
+               spin_unlock_bh(&b_ptr->publ.lock);
+               write_unlock_bh(&net_lock);
+               b_ptr->media->disable_bearer(&b_ptr->publ);
+               write_lock_bh(&net_lock);
+               spin_lock_bh(&b_ptr->publ.lock);
+       }
+       list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
+               link_delete(l_ptr);
+       }
+       spin_unlock_bh(&b_ptr->publ.lock);
+       info("Disabled bearer <%s>\n", name);
+       memset(b_ptr, 0, sizeof(struct bearer));
+       return TIPC_OK;
+}
+
+int tipc_disable_bearer(const char *name)
+{
+       int res;
+
+       write_lock_bh(&net_lock);
+       res = bearer_disable(name);
+       write_unlock_bh(&net_lock);
+       return res;
+}
+
+
+
+int bearer_init(void)
+{
+       int res;
+
+       write_lock_bh(&net_lock);
+       bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC);
+       media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC);
+       if (bearers && media_list) {
+               memset(bearers, 0, MAX_BEARERS * sizeof(struct bearer));
+               memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
+               res = TIPC_OK;
+       } else {
+               kfree(bearers);
+               kfree(media_list);
+               bearers = 0;
+               media_list = 0;
+               res = -ENOMEM;
+       }
+       write_unlock_bh(&net_lock);
+       return res;
+}
+
+void bearer_stop(void)
+{
+       u32 i;
+
+       if (!bearers)
+               return;
+
+       for (i = 0; i < MAX_BEARERS; i++) {
+               if (bearers[i].active)
+                       bearers[i].publ.blocked = 1;
+       }
+       for (i = 0; i < MAX_BEARERS; i++) {
+               if (bearers[i].active)
+                       bearer_disable(bearers[i].publ.name);
+       }
+       kfree(bearers);
+       kfree(media_list);
+       bearers = 0;
+       media_list = 0;
+       media_count = 0;
+}
+
+
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
new file mode 100644 (file)
index 0000000..21e63d3
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * net/tipc/bearer.h: Include file for TIPC bearer code
+ * 
+ * Copyright (c) 1996-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_BEARER_H
+#define _TIPC_BEARER_H
+
+#include <net/tipc/tipc_bearer.h>
+#include "bcast.h"
+
+#define MAX_BEARERS 8
+#define MAX_MEDIA 4
+
+
+/**
+ * struct media - TIPC media information available to internal users
+ * @send_msg: routine which handles buffer transmission
+ * @enable_bearer: routine which enables a bearer
+ * @disable_bearer: routine which disables a bearer
+ * @addr2str: routine which converts bearer's address to string form
+ * @bcast_addr: media address used in broadcasting
+ * @bcast: non-zero if media supports broadcasting [currently mandatory]
+ * @priority: default link (and bearer) priority
+ * @tolerance: default time (in ms) before declaring link failure
+ * @window: default window (in packets) before declaring link congestion
+ * @type_id: TIPC media identifier [defined in tipc_bearer.h]
+ * @name: media name
+ */
+struct media {
+       int (*send_msg)(struct sk_buff *buf, 
+                       struct tipc_bearer *b_ptr,
+                       struct tipc_media_addr *dest);
+       int (*enable_bearer)(struct tipc_bearer *b_ptr);
+       void (*disable_bearer)(struct tipc_bearer *b_ptr);
+       char *(*addr2str)(struct tipc_media_addr *a, 
+                         char *str_buf, int str_size);
+       struct tipc_media_addr bcast_addr;
+       int bcast;
+       u32 priority;
+       u32 tolerance;
+       u32 window;
+       u32 type_id;
+       char name[TIPC_MAX_MEDIA_NAME];
+};
+
+/**
+ * struct bearer - TIPC bearer information available to internal users
+ * @publ: bearer information available to privileged users
+ * @media: ptr to media structure associated with bearer
+ * @priority: default link priority for bearer
+ * @detect_scope: network address mask used during automatic link creation
+ * @identity: array index of this bearer within TIPC bearer array
+ * @link_req: ptr to (optional) structure making periodic link setup requests
+ * @links: list of non-congested links associated with bearer
+ * @cong_links: list of congested links associated with bearer
+ * @continue_count: # of times bearer has resumed after congestion or blocking
+ * @active: non-zero if bearer structure is represents a bearer
+ * @net_plane: network plane ('A' through 'H') currently associated with bearer
+ * @nodes: indicates which nodes in cluster can be reached through bearer
+ */
+struct bearer {
+       struct tipc_bearer publ;
+       struct media *media;
+       u32 priority;
+       u32 detect_scope;
+       u32 identity;
+       struct link_req *link_req;
+       struct list_head links;
+       struct list_head cong_links;
+       u32 continue_count;
+       int active;
+       char net_plane;
+       struct node_map nodes;
+};
+
+struct bearer_name {
+       char media_name[TIPC_MAX_MEDIA_NAME];
+       char if_name[TIPC_MAX_IF_NAME];
+};
+
+struct link;
+
+extern struct bearer *bearers;
+
+void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
+struct sk_buff *media_get_names(void);
+
+struct sk_buff *bearer_get_names(void);
+void bearer_add_dest(struct bearer *b_ptr, u32 dest);
+void bearer_remove_dest(struct bearer *b_ptr, u32 dest);
+void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
+struct bearer *bearer_find_interface(const char *if_name);
+int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
+int bearer_init(void);
+void bearer_stop(void);
+int bearer_broadcast(struct sk_buff *buf, struct tipc_bearer *b_ptr,
+                    struct tipc_media_addr *dest);
+void bearer_lock_push(struct bearer *b_ptr);
+
+
+/**
+ * bearer_send- sends buffer to destination over bearer 
+ * 
+ * Returns true (1) if successful, or false (0) if unable to send
+ * 
+ * IMPORTANT:
+ * The media send routine must not alter the buffer being passed in
+ * as it may be needed for later retransmission!
+ * 
+ * If the media send routine returns a non-zero value (indicating that 
+ * it was unable to send the buffer), it must:
+ *   1) mark the bearer as blocked,
+ *   2) call tipc_continue() once the bearer is able to send again.
+ * Media types that are unable to meet these two critera must ensure their
+ * send routine always returns success -- even if the buffer was not sent --
+ * and let TIPC's link code deal with the undelivered message. 
+ */
+
+static inline int bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
+                             struct tipc_media_addr *dest)
+{
+       return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
+}
+
+/**
+ * bearer_congested - determines if bearer is currently congested
+ */
+
+static inline int bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
+{
+       if (unlikely(b_ptr->publ.blocked))
+               return 1;
+       if (likely(list_empty(&b_ptr->cong_links)))
+               return 0;
+       return !bearer_resolve_congestion(b_ptr, l_ptr);
+}
+
+#endif
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
new file mode 100644 (file)
index 0000000..f0f7bac
--- /dev/null
@@ -0,0 +1,576 @@
+/*
+ * net/tipc/cluster.c: TIPC cluster management routines
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "cluster.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "link.h"
+#include "node.h"
+#include "net.h"
+#include "msg.h"
+#include "bearer.h"
+
+void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf, 
+                      u32 lower, u32 upper);
+struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest);
+
+struct node **local_nodes = 0;
+struct node_map cluster_bcast_nodes = {0,{0,}};
+u32 highest_allowed_slave = 0;
+
+struct cluster *cluster_create(u32 addr)
+{
+       struct _zone *z_ptr;
+       struct cluster *c_ptr;
+       int max_nodes; 
+       int alloc;
+
+       c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
+       if (c_ptr == NULL)
+               return 0;
+       memset(c_ptr, 0, sizeof(*c_ptr));
+
+       c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
+       if (in_own_cluster(addr))
+               max_nodes = LOWEST_SLAVE + tipc_max_slaves;
+       else
+               max_nodes = tipc_max_nodes + 1;
+       alloc = sizeof(void *) * (max_nodes + 1);
+       c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
+       if (c_ptr->nodes == NULL) {
+               kfree(c_ptr);
+               return 0;
+       }
+       memset(c_ptr->nodes, 0, alloc);  
+       if (in_own_cluster(addr))
+               local_nodes = c_ptr->nodes;
+       c_ptr->highest_slave = LOWEST_SLAVE - 1;
+       c_ptr->highest_node = 0;
+       
+       z_ptr = zone_find(tipc_zone(addr));
+       if (z_ptr == NULL) {
+               z_ptr = zone_create(addr);
+       }
+       if (z_ptr != NULL) {
+               zone_attach_cluster(z_ptr, c_ptr);
+               c_ptr->owner = z_ptr;
+       }
+       else {
+               kfree(c_ptr);
+               c_ptr = 0;
+       }
+
+       return c_ptr;
+}
+
+void cluster_delete(struct cluster *c_ptr)
+{
+       u32 n_num;
+
+       if (!c_ptr)
+               return;
+       for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
+               node_delete(c_ptr->nodes[n_num]);
+       }
+       for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
+               node_delete(c_ptr->nodes[n_num]);
+       }
+       kfree(c_ptr->nodes);
+       kfree(c_ptr);
+}
+
+u32 cluster_next_node(struct cluster *c_ptr, u32 addr)
+{
+       struct node *n_ptr;
+       u32 n_num = tipc_node(addr) + 1;
+
+       if (!c_ptr)
+               return addr;
+       for (; n_num <= c_ptr->highest_node; n_num++) {
+               n_ptr = c_ptr->nodes[n_num];
+               if (n_ptr && node_has_active_links(n_ptr))
+                       return n_ptr->addr;
+       }
+       for (n_num = 1; n_num < tipc_node(addr); n_num++) {
+               n_ptr = c_ptr->nodes[n_num];
+               if (n_ptr && node_has_active_links(n_ptr))
+                       return n_ptr->addr;
+       }
+       return 0;
+}
+
+void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr)
+{
+       u32 n_num = tipc_node(n_ptr->addr);
+       u32 max_n_num = tipc_max_nodes;
+
+       if (in_own_cluster(n_ptr->addr))
+               max_n_num = highest_allowed_slave;
+       assert(n_num > 0);
+       assert(n_num <= max_n_num);
+       assert(c_ptr->nodes[n_num] == 0);
+       c_ptr->nodes[n_num] = n_ptr;
+       if (n_num > c_ptr->highest_node)
+               c_ptr->highest_node = n_num;
+}
+
+/**
+ * cluster_select_router - select router to a cluster
+ * 
+ * Uses deterministic and fair algorithm.
+ */
+
+u32 cluster_select_router(struct cluster *c_ptr, u32 ref)
+{
+       u32 n_num;
+       u32 ulim = c_ptr->highest_node;
+       u32 mask;
+       u32 tstart;
+
+       assert(!in_own_cluster(c_ptr->addr));
+       if (!ulim)
+               return 0;
+
+       /* Start entry must be random */
+       mask = tipc_max_nodes;
+       while (mask > ulim)
+               mask >>= 1;
+       tstart = ref & mask;
+       n_num = tstart;
+
+       /* Lookup upwards with wrap-around */
+       do {
+               if (node_is_up(c_ptr->nodes[n_num]))
+                       break;
+       } while (++n_num <= ulim);
+       if (n_num > ulim) {
+               n_num = 1;
+               do {
+                       if (node_is_up(c_ptr->nodes[n_num]))
+                               break;
+               } while (++n_num < tstart);
+               if (n_num == tstart)
+                       return 0;
+       }
+       assert(n_num <= ulim);
+       return node_select_router(c_ptr->nodes[n_num], ref);
+}
+
+/**
+ * cluster_select_node - select destination node within a remote cluster
+ * 
+ * Uses deterministic and fair algorithm.
+ */
+
+struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
+{
+       u32 n_num;
+       u32 mask = tipc_max_nodes;
+       u32 start_entry;
+
+       assert(!in_own_cluster(c_ptr->addr));
+       if (!c_ptr->highest_node)
+               return 0;
+
+       /* Start entry must be random */
+       while (mask > c_ptr->highest_node) {
+               mask >>= 1;
+       }
+       start_entry = (selector & mask) ? selector & mask : 1u;
+       assert(start_entry <= c_ptr->highest_node);
+
+       /* Lookup upwards with wrap-around */
+       for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
+               if (node_has_active_links(c_ptr->nodes[n_num]))
+                       return c_ptr->nodes[n_num];
+       }
+       for (n_num = 1; n_num < start_entry; n_num++) {
+               if (node_has_active_links(c_ptr->nodes[n_num]))
+                       return c_ptr->nodes[n_num];
+       }
+       return 0;
+}
+
+/*
+ *    Routing table management: See description in node.c
+ */
+
+struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest)
+{
+       u32 size = INT_H_SIZE + data_size;
+       struct sk_buff *buf = buf_acquire(size);
+       struct tipc_msg *msg;
+
+       if (buf) {
+               msg = buf_msg(buf);
+               memset((char *)msg, 0, size);
+               msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest);
+       }
+       return buf;
+}
+
+void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest,
+                            u32 lower, u32 upper)
+{
+       struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
+       struct tipc_msg *msg;
+
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_set_remote_node(msg, dest);
+               msg_set_type(msg, ROUTE_ADDITION);
+               cluster_multicast(c_ptr, buf, lower, upper);
+       } else {
+               warn("Memory squeeze: broadcast of new route failed\n");
+       }
+}
+
+void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest,
+                             u32 lower, u32 upper)
+{
+       struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
+       struct tipc_msg *msg;
+
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_set_remote_node(msg, dest);
+               msg_set_type(msg, ROUTE_REMOVAL);
+               cluster_multicast(c_ptr, buf, lower, upper);
+       } else {
+               warn("Memory squeeze: broadcast of lost route failed\n");
+       }
+}
+
+void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
+{
+       struct sk_buff *buf;
+       struct tipc_msg *msg;
+       u32 highest = c_ptr->highest_slave;
+       u32 n_num;
+       int send = 0;
+
+       assert(!is_slave(dest));
+       assert(in_own_cluster(dest));
+       assert(in_own_cluster(c_ptr->addr));
+       if (highest <= LOWEST_SLAVE)
+               return;
+       buf = cluster_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
+                                         c_ptr->addr);
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_set_remote_node(msg, c_ptr->addr);
+               msg_set_type(msg, SLAVE_ROUTING_TABLE);
+               for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
+                       if (c_ptr->nodes[n_num] && 
+                           node_has_active_links(c_ptr->nodes[n_num])) {
+                               send = 1;
+                               msg_set_dataoctet(msg, n_num);
+                       }
+               }
+               if (send)
+                       link_send(buf, dest, dest);
+               else
+                       buf_discard(buf);
+       } else {
+               warn("Memory squeeze: broadcast of lost route failed\n");
+       }
+}
+
+void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
+{
+       struct sk_buff *buf;
+       struct tipc_msg *msg;
+       u32 highest = c_ptr->highest_node;
+       u32 n_num;
+       int send = 0;
+
+       if (in_own_cluster(c_ptr->addr))
+               return;
+       assert(!is_slave(dest));
+       assert(in_own_cluster(dest));
+       highest = c_ptr->highest_node;
+       buf = cluster_prepare_routing_msg(highest + 1, c_ptr->addr);
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_set_remote_node(msg, c_ptr->addr);
+               msg_set_type(msg, EXT_ROUTING_TABLE);
+               for (n_num = 1; n_num <= highest; n_num++) {
+                       if (c_ptr->nodes[n_num] && 
+                           node_has_active_links(c_ptr->nodes[n_num])) {
+                               send = 1;
+                               msg_set_dataoctet(msg, n_num);
+                       }
+               }
+               if (send)
+                       link_send(buf, dest, dest);
+               else
+                       buf_discard(buf);
+       } else {
+               warn("Memory squeeze: broadcast of external route failed\n");
+       }
+}
+
+void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
+{
+       struct sk_buff *buf;
+       struct tipc_msg *msg;
+       u32 highest = c_ptr->highest_node;
+       u32 n_num;
+       int send = 0;
+
+       assert(is_slave(dest));
+       assert(in_own_cluster(c_ptr->addr));
+       buf = cluster_prepare_routing_msg(highest, c_ptr->addr);
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_set_remote_node(msg, c_ptr->addr);
+               msg_set_type(msg, LOCAL_ROUTING_TABLE);
+               for (n_num = 1; n_num <= highest; n_num++) {
+                       if (c_ptr->nodes[n_num] && 
+                           node_has_active_links(c_ptr->nodes[n_num])) {
+                               send = 1;
+                               msg_set_dataoctet(msg, n_num);
+                       }
+               }
+               if (send)
+                       link_send(buf, dest, dest);
+               else
+                       buf_discard(buf);
+       } else {
+               warn("Memory squeeze: broadcast of local route failed\n");
+       }
+}
+
+void cluster_recv_routing_table(struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       struct cluster *c_ptr;
+       struct node *n_ptr;
+       unchar *node_table;
+       u32 table_size;
+       u32 router;
+       u32 rem_node = msg_remote_node(msg);
+       u32 z_num;
+       u32 c_num;
+       u32 n_num;
+
+       c_ptr = cluster_find(rem_node);
+       if (!c_ptr) {
+               c_ptr = cluster_create(rem_node);
+               if (!c_ptr) {
+                       buf_discard(buf);
+                       return;
+               }
+       }
+
+       node_table = buf->data + msg_hdr_sz(msg);
+       table_size = msg_size(msg) - msg_hdr_sz(msg);
+       router = msg_prevnode(msg);
+       z_num = tipc_zone(rem_node);
+       c_num = tipc_cluster(rem_node);
+
+       switch (msg_type(msg)) {
+       case LOCAL_ROUTING_TABLE:
+               assert(is_slave(tipc_own_addr));
+       case EXT_ROUTING_TABLE:
+               for (n_num = 1; n_num < table_size; n_num++) {
+                       if (node_table[n_num]) {
+                               u32 addr = tipc_addr(z_num, c_num, n_num);
+                               n_ptr = c_ptr->nodes[n_num];
+                               if (!n_ptr) {
+                                       n_ptr = node_create(addr);
+                               }
+                               if (n_ptr)
+                                       node_add_router(n_ptr, router);
+                       }
+               }
+               break;
+       case SLAVE_ROUTING_TABLE:
+               assert(!is_slave(tipc_own_addr));
+               assert(in_own_cluster(c_ptr->addr));
+               for (n_num = 1; n_num < table_size; n_num++) {
+                       if (node_table[n_num]) {
+                               u32 slave_num = n_num + LOWEST_SLAVE;
+                               u32 addr = tipc_addr(z_num, c_num, slave_num);
+                               n_ptr = c_ptr->nodes[slave_num];
+                               if (!n_ptr) {
+                                       n_ptr = node_create(addr);
+                               }
+                               if (n_ptr)
+                                       node_add_router(n_ptr, router);
+                       }
+               }
+               break;
+       case ROUTE_ADDITION:
+               if (!is_slave(tipc_own_addr)) {
+                       assert(!in_own_cluster(c_ptr->addr)
+                              || is_slave(rem_node));
+               } else {
+                       assert(in_own_cluster(c_ptr->addr)
+                              && !is_slave(rem_node));
+               }
+               n_ptr = c_ptr->nodes[tipc_node(rem_node)];
+               if (!n_ptr)
+                       n_ptr = node_create(rem_node);
+               if (n_ptr)
+                       node_add_router(n_ptr, router);
+               break;
+       case ROUTE_REMOVAL:
+               if (!is_slave(tipc_own_addr)) {
+                       assert(!in_own_cluster(c_ptr->addr)
+                              || is_slave(rem_node));
+               } else {
+                       assert(in_own_cluster(c_ptr->addr)
+                              && !is_slave(rem_node));
+               }
+               n_ptr = c_ptr->nodes[tipc_node(rem_node)];
+               if (n_ptr)
+                       node_remove_router(n_ptr, router);
+               break;
+       default:
+               assert(!"Illegal routing manager message received\n");
+       }
+       buf_discard(buf);
+}
+
+void cluster_remove_as_router(struct cluster *c_ptr, u32 router)
+{
+       u32 start_entry;
+       u32 tstop;
+       u32 n_num;
+
+       if (is_slave(router))
+               return; /* Slave nodes can not be routers */
+
+       if (in_own_cluster(c_ptr->addr)) {
+               start_entry = LOWEST_SLAVE;
+               tstop = c_ptr->highest_slave;
+       } else {
+               start_entry = 1;
+               tstop = c_ptr->highest_node;
+       }
+
+       for (n_num = start_entry; n_num <= tstop; n_num++) {
+               if (c_ptr->nodes[n_num]) {
+                       node_remove_router(c_ptr->nodes[n_num], router);
+               }
+       }
+}
+
+/**
+ * cluster_multicast - multicast message to local nodes 
+ */
+
+void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf, 
+                      u32 lower, u32 upper)
+{
+       struct sk_buff *buf_copy;
+       struct node *n_ptr;
+       u32 n_num;
+       u32 tstop;
+
+       assert(lower <= upper);
+       assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
+              ((lower >= LOWEST_SLAVE) && (lower <= highest_allowed_slave)));
+       assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
+              ((upper >= LOWEST_SLAVE) && (upper <= highest_allowed_slave)));
+       assert(in_own_cluster(c_ptr->addr));
+
+       tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
+       if (tstop > upper)
+               tstop = upper;
+       for (n_num = lower; n_num <= tstop; n_num++) {
+               n_ptr = c_ptr->nodes[n_num];
+               if (n_ptr && node_has_active_links(n_ptr)) {
+                       buf_copy = skb_copy(buf, GFP_ATOMIC);
+                       if (buf_copy == NULL)
+                               break;
+                       msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
+                       link_send(buf_copy, n_ptr->addr, n_ptr->addr);
+               }
+       }
+       buf_discard(buf);
+}
+
+/**
+ * cluster_broadcast - broadcast message to all nodes within cluster
+ */
+
+void cluster_broadcast(struct sk_buff *buf)
+{
+       struct sk_buff *buf_copy;
+       struct cluster *c_ptr;
+       struct node *n_ptr;
+       u32 n_num;
+       u32 tstart;
+       u32 tstop;
+       u32 node_type;
+
+       if (tipc_mode == TIPC_NET_MODE) {
+               c_ptr = cluster_find(tipc_own_addr);
+               assert(in_own_cluster(c_ptr->addr));    /* For now */
+
+               /* Send to standard nodes, then repeat loop sending to slaves */
+               tstart = 1;
+               tstop = c_ptr->highest_node;
+               for (node_type = 1; node_type <= 2; node_type++) {
+                       for (n_num = tstart; n_num <= tstop; n_num++) {
+                               n_ptr = c_ptr->nodes[n_num];
+                               if (n_ptr && node_has_active_links(n_ptr)) {
+                                       buf_copy = skb_copy(buf, GFP_ATOMIC);
+                                       if (buf_copy == NULL)
+                                               goto exit;
+                                       msg_set_destnode(buf_msg(buf_copy), 
+                                                        n_ptr->addr);
+                                       link_send(buf_copy, n_ptr->addr, 
+                                                 n_ptr->addr);
+                               }
+                       }
+                       tstart = LOWEST_SLAVE;
+                       tstop = c_ptr->highest_slave;
+               }
+       }
+exit:
+       buf_discard(buf);
+}
+
+int cluster_init(void)
+{
+       highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
+       return cluster_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;
+}
+
diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h
new file mode 100644 (file)
index 0000000..1ffb095
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * net/tipc/cluster.h: Include file for TIPC cluster management routines
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CLUSTER_H
+#define _TIPC_CLUSTER_H
+
+#include "addr.h"
+#include "zone.h"
+
+#define LOWEST_SLAVE  2048u
+
+/**
+ * struct cluster - TIPC cluster structure
+ * @addr: network address of cluster
+ * @owner: pointer to zone that cluster belongs to
+ * @nodes: array of pointers to all nodes within cluster
+ * @highest_node: id of highest numbered node within cluster
+ * @highest_slave: (used for secondary node support)
+ */
+struct cluster {
+       u32 addr;
+       struct _zone *owner;
+       struct node **nodes;
+       u32 highest_node;
+       u32 highest_slave;
+};
+
+
+extern struct node **local_nodes;
+extern u32 highest_allowed_slave;
+extern struct node_map cluster_bcast_nodes;
+
+void cluster_remove_as_router(struct cluster *c_ptr, u32 router);
+void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest);
+struct node *cluster_select_node(struct cluster *c_ptr, u32 selector);
+u32 cluster_select_router(struct cluster *c_ptr, u32 ref);
+void cluster_recv_routing_table(struct sk_buff *buf);
+struct cluster *cluster_create(u32 addr);
+void cluster_delete(struct cluster *c_ptr);
+void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr);
+void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest);
+void cluster_broadcast(struct sk_buff *buf);
+int cluster_init(void);
+u32 cluster_next_node(struct cluster *c_ptr, u32 addr);
+void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
+void cluster_send_local_routes(struct cluster *c_ptr, u32 dest);
+void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
+
+static inline struct cluster *cluster_find(u32 addr)
+{
+       struct _zone *z_ptr = zone_find(addr);
+
+       if (z_ptr)
+               return z_ptr->clusters[1];
+       return 0;
+}
+
+#endif
diff --git a/net/tipc/config.c b/net/tipc/config.c
new file mode 100644 (file)
index 0000000..8ddef4f
--- /dev/null
@@ -0,0 +1,718 @@
+/*
+ * net/tipc/config.c: TIPC configuration management code
+ * 
+ * Copyright (c) 2002-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "bearer.h"
+#include "port.h"
+#include "link.h"
+#include "zone.h"
+#include "addr.h"
+#include "name_table.h"
+#include "node.h"
+#include "config.h"
+#include "discover.h"
+
+struct subscr_data {
+       char usr_handle[8];
+       u32 domain;
+       u32 port_ref;
+       struct list_head subd_list;
+};
+
+struct manager {
+       u32 user_ref;
+       u32 port_ref;
+       u32 subscr_ref;
+       u32 link_subscriptions;
+       struct list_head link_subscribers;
+};
+
+static struct manager mng = { 0};
+
+static spinlock_t config_lock = SPIN_LOCK_UNLOCKED;
+
+static const void *req_tlv_area;       /* request message TLV area */
+static int req_tlv_space;              /* request message TLV area size */
+static int rep_headroom;               /* reply message headroom to use */
+
+
+void cfg_link_event(u32 addr, char *name, int up)
+{
+       /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */
+}
+
+
+struct sk_buff *cfg_reply_alloc(int payload_size)
+{
+       struct sk_buff *buf;
+
+       buf = alloc_skb(rep_headroom + payload_size, GFP_ATOMIC);
+       if (buf)
+               skb_reserve(buf, rep_headroom);
+       return buf;
+}
+
+int cfg_append_tlv(struct sk_buff *buf, int tlv_type, 
+                  void *tlv_data, int tlv_data_size)
+{
+       struct tlv_desc *tlv = (struct tlv_desc *)buf->tail;
+       int new_tlv_space = TLV_SPACE(tlv_data_size);
+
+       if (skb_tailroom(buf) < new_tlv_space) {
+               dbg("cfg_append_tlv unable to append TLV\n");
+               return 0;
+       }
+       skb_put(buf, new_tlv_space);
+       tlv->tlv_type = htons(tlv_type);
+       tlv->tlv_len  = htons(TLV_LENGTH(tlv_data_size));
+       if (tlv_data_size && tlv_data)
+               memcpy(TLV_DATA(tlv), tlv_data, tlv_data_size);
+       return 1;
+}
+
+struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value)
+{
+       struct sk_buff *buf;
+       u32 value_net;
+
+       buf = cfg_reply_alloc(TLV_SPACE(sizeof(value)));
+       if (buf) {
+               value_net = htonl(value);
+               cfg_append_tlv(buf, tlv_type, &value_net, 
+                              sizeof(value_net));
+       }
+       return buf;
+}
+
+struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string)
+{
+       struct sk_buff *buf;
+       int string_len = strlen(string) + 1;
+
+       buf = cfg_reply_alloc(TLV_SPACE(string_len));
+       if (buf)
+               cfg_append_tlv(buf, tlv_type, string, string_len);
+       return buf;
+}
+
+
+
+
+#if 0
+
+/* Now obsolete code for handling commands not yet implemented the new way */
+
+int tipc_cfg_cmd(const struct tipc_cmd_msg * msg,
+                char *data,
+                u32 sz,
+                u32 *ret_size,
+                struct tipc_portid *orig)
+{
+       int rv = -EINVAL;
+       u32 cmd = msg->cmd;
+
+       *ret_size = 0;
+       switch (cmd) {
+       case TIPC_REMOVE_LINK:
+       case TIPC_CMD_BLOCK_LINK:
+       case TIPC_CMD_UNBLOCK_LINK:
+               if (!cfg_check_connection(orig))
+                       rv = link_control(msg->argv.link_name, msg->cmd, 0);
+               break;
+       case TIPC_ESTABLISH:
+               {
+                       int connected;
+
+                       tipc_isconnected(mng.conn_port_ref, &connected);
+                       if (connected || !orig) {
+                               rv = TIPC_FAILURE;
+                               break;
+                       }
+                       rv = tipc_connect2port(mng.conn_port_ref, orig);
+                       if (rv == TIPC_OK)
+                               orig = 0;
+                       break;
+               }
+       case TIPC_GET_PEER_ADDRESS:
+               *ret_size = link_peer_addr(msg->argv.link_name, data, sz);
+               break;
+       case TIPC_GET_ROUTES:
+               rv = TIPC_OK;
+               break;
+       default: {}
+       }
+       if (*ret_size)
+               rv = TIPC_OK;
+       return rv;
+}
+
+static void cfg_cmd_event(struct tipc_cmd_msg *msg,
+                         char *data,
+                         u32 sz,        
+                         struct tipc_portid const *orig)
+{
+       int rv = -EINVAL;
+       struct tipc_cmd_result_msg rmsg;
+       struct iovec msg_sect[2];
+       int *arg;
+
+       msg->cmd = ntohl(msg->cmd);
+
+       cfg_prepare_res_msg(msg->cmd, msg->usr_handle, rv, &rmsg, msg_sect, 
+                           data, 0);
+       if (ntohl(msg->magic) != TIPC_MAGIC)
+               goto exit;
+
+       switch (msg->cmd) {
+       case TIPC_CREATE_LINK:
+               if (!cfg_check_connection(orig))
+                       rv = disc_create_link(&msg->argv.create_link);
+               break;
+       case TIPC_LINK_SUBSCRIBE:
+               {
+                       struct subscr_data *sub;
+
+                       if (mng.link_subscriptions > 64)
+                               break;
+                       sub = (struct subscr_data *)kmalloc(sizeof(*sub),
+                                                           GFP_ATOMIC);
+                       if (sub == NULL) {
+                               warn("Memory squeeze; dropped remote link subscription\n");
+                               break;
+                       }
+                       INIT_LIST_HEAD(&sub->subd_list);
+                       tipc_createport(mng.user_ref,
+                                       (void *)sub,
+                                       TIPC_HIGH_IMPORTANCE,
+                                       0,
+                                       0,
+                                       (tipc_conn_shutdown_event)cfg_linksubscr_cancel,
+                                       0,
+                                       0,
+                                       (tipc_conn_msg_event)cfg_linksubscr_cancel,
+                                       0,
+                                       &sub->port_ref);
+                       if (!sub->port_ref) {
+                               kfree(sub);
+                               break;
+                       }
+                       memcpy(sub->usr_handle,msg->usr_handle,
+                              sizeof(sub->usr_handle));
+                       sub->domain = msg->argv.domain;
+                       list_add_tail(&sub->subd_list, &mng.link_subscribers);
+                       tipc_connect2port(sub->port_ref, orig);
+                       rmsg.retval = TIPC_OK;
+                       tipc_send(sub->port_ref, 2u, msg_sect);
+                       mng.link_subscriptions++;
+                       return;
+               }
+       default:
+               rv = tipc_cfg_cmd(msg, data, sz, (u32 *)&msg_sect[1].iov_len, orig);
+       }
+       exit:
+       rmsg.result_len = htonl(msg_sect[1].iov_len);
+       rmsg.retval = htonl(rv);
+       cfg_respond(msg_sect, 2u, orig);
+}
+#endif
+
+static struct sk_buff *cfg_enable_bearer(void)
+{
+       struct tipc_bearer_config *args;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
+       if (tipc_enable_bearer(args->name,
+                              ntohl(args->detect_scope),
+                              ntohl(args->priority)))
+               return cfg_reply_error_string("unable to enable bearer");
+
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_disable_bearer(void)
+{
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
+               return cfg_reply_error_string("unable to disable bearer");
+
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_own_addr(void)
+{
+       u32 addr;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       addr = *(u32 *)TLV_DATA(req_tlv_area);
+       addr = ntohl(addr);
+       if (addr == tipc_own_addr)
+               return cfg_reply_none();
+       if (!addr_node_valid(addr))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (node address)");
+       if (tipc_own_addr)
+               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                             " (cannot change node address once assigned)");
+
+       spin_unlock_bh(&config_lock);
+       stop_net();
+       tipc_own_addr = addr;
+       start_net();
+       spin_lock_bh(&config_lock);
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_remote_mng(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       tipc_remote_management = (value != 0);
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_publications(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 1, 65535))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (max publications must be 1-65535)");
+       tipc_max_publications = value;
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_subscriptions(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 1, 65535))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (max subscriptions must be 1-65535");
+       tipc_max_subscriptions = value;
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_ports(void)
+{
+       int orig_mode;
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 127, 65535))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (max ports must be 127-65535)");
+
+       if (value == tipc_max_ports)
+               return cfg_reply_none();
+
+       if (atomic_read(&tipc_user_count) > 2)
+               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                             " (cannot change max ports while TIPC users exist)");
+
+       spin_unlock_bh(&config_lock);
+       orig_mode = tipc_get_mode();
+       if (orig_mode == TIPC_NET_MODE)
+               stop_net();
+       stop_core();
+       tipc_max_ports = value;
+       start_core();
+       if (orig_mode == TIPC_NET_MODE)
+               start_net();
+       spin_lock_bh(&config_lock);
+       return cfg_reply_none();
+}
+
+static struct sk_buff *set_net_max(int value, int *parameter)
+{
+       int orig_mode;
+
+       if (value != *parameter) {
+               orig_mode = tipc_get_mode();
+               if (orig_mode == TIPC_NET_MODE)
+                       stop_net();
+               *parameter = value;
+               if (orig_mode == TIPC_NET_MODE)
+                       start_net();
+       }
+
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_zones(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 1, 255))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (max zones must be 1-255)");
+       return set_net_max(value, &tipc_max_zones);
+}
+
+static struct sk_buff *cfg_set_max_clusters(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != 1)
+               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                             " (max clusters fixed at 1)");
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_max_nodes(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 8, 2047))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (max nodes must be 8-2047)");
+       return set_net_max(value, &tipc_max_nodes);
+}
+
+static struct sk_buff *cfg_set_max_slaves(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != 0)
+               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                             " (max secondary nodes fixed at 0)");
+       return cfg_reply_none();
+}
+
+static struct sk_buff *cfg_set_netid(void)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 1, 9999))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (network id must be 1-9999)");
+
+       if (tipc_own_addr)
+               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                             " (cannot change network id once part of network)");
+       
+       return set_net_max(value, &tipc_net_id);
+}
+
+struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
+                          int request_space, int reply_headroom)
+{
+       struct sk_buff *rep_tlv_buf;
+
+       spin_lock_bh(&config_lock);
+
+       /* Save request and reply details in a well-known location */
+
+       req_tlv_area = request_area;
+       req_tlv_space = request_space;
+       rep_headroom = reply_headroom;
+
+       /* Check command authorization */
+
+       if (likely(orig_node == tipc_own_addr)) {
+               /* command is permitted */
+       } else if (cmd >= 0x8000) {
+               rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                                    " (cannot be done remotely)");
+               goto exit;
+       } else if (!tipc_remote_management) {
+               rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
+               goto exit;
+       }
+       else if (cmd >= 0x4000) {
+               u32 domain = 0;
+
+               if ((nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
+                   (domain != orig_node)) {
+                       rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR);
+                       goto exit;
+               }
+       }
+
+       /* Call appropriate processing routine */
+
+       switch (cmd) {
+       case TIPC_CMD_NOOP:
+               rep_tlv_buf = cfg_reply_none();
+               break;
+       case TIPC_CMD_GET_NODES:
+               rep_tlv_buf = node_get_nodes(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_GET_LINKS:
+               rep_tlv_buf = node_get_links(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_SHOW_LINK_STATS:
+               rep_tlv_buf = link_cmd_show_stats(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_RESET_LINK_STATS:
+               rep_tlv_buf = link_cmd_reset_stats(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_SHOW_NAME_TABLE:
+               rep_tlv_buf = nametbl_get(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_GET_BEARER_NAMES:
+               rep_tlv_buf = bearer_get_names();
+               break;
+       case TIPC_CMD_GET_MEDIA_NAMES:
+               rep_tlv_buf = media_get_names();
+               break;
+       case TIPC_CMD_SHOW_PORTS:
+               rep_tlv_buf = port_get_ports();
+               break;
+#if 0
+       case TIPC_CMD_SHOW_PORT_STATS:
+               rep_tlv_buf = port_show_stats(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_RESET_PORT_STATS:
+               rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED);
+               break;
+#endif
+       case TIPC_CMD_SET_LOG_SIZE:
+               rep_tlv_buf = log_resize(req_tlv_area, req_tlv_space);
+               break;
+       case TIPC_CMD_DUMP_LOG:
+               rep_tlv_buf = log_dump();
+               break;
+       case TIPC_CMD_SET_LINK_TOL:
+       case TIPC_CMD_SET_LINK_PRI:
+       case TIPC_CMD_SET_LINK_WINDOW:
+               rep_tlv_buf = link_cmd_config(req_tlv_area, req_tlv_space, cmd);
+               break;
+       case TIPC_CMD_ENABLE_BEARER:
+               rep_tlv_buf = cfg_enable_bearer();
+               break;
+       case TIPC_CMD_DISABLE_BEARER:
+               rep_tlv_buf = cfg_disable_bearer();
+               break;
+       case TIPC_CMD_SET_NODE_ADDR:
+               rep_tlv_buf = cfg_set_own_addr();
+               break;
+       case TIPC_CMD_SET_REMOTE_MNG:
+               rep_tlv_buf = cfg_set_remote_mng();
+               break;
+       case TIPC_CMD_SET_MAX_PORTS:
+               rep_tlv_buf = cfg_set_max_ports();
+               break;
+       case TIPC_CMD_SET_MAX_PUBL:
+               rep_tlv_buf = cfg_set_max_publications();
+               break;
+       case TIPC_CMD_SET_MAX_SUBSCR:
+               rep_tlv_buf = cfg_set_max_subscriptions();
+               break;
+       case TIPC_CMD_SET_MAX_ZONES:
+               rep_tlv_buf = cfg_set_max_zones();
+               break;
+       case TIPC_CMD_SET_MAX_CLUSTERS:
+               rep_tlv_buf = cfg_set_max_clusters();
+               break;
+       case TIPC_CMD_SET_MAX_NODES:
+               rep_tlv_buf = cfg_set_max_nodes();
+               break;
+       case TIPC_CMD_SET_MAX_SLAVES:
+               rep_tlv_buf = cfg_set_max_slaves();
+               break;
+       case TIPC_CMD_SET_NETID:
+               rep_tlv_buf = cfg_set_netid();
+               break;
+       case TIPC_CMD_GET_REMOTE_MNG:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_remote_management);
+               break;
+       case TIPC_CMD_GET_MAX_PORTS:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_ports);
+               break;
+       case TIPC_CMD_GET_MAX_PUBL:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_publications);
+               break;
+       case TIPC_CMD_GET_MAX_SUBSCR:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_subscriptions);
+               break;
+       case TIPC_CMD_GET_MAX_ZONES:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_zones);
+               break;
+       case TIPC_CMD_GET_MAX_CLUSTERS:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_clusters);
+               break;
+       case TIPC_CMD_GET_MAX_NODES:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_nodes);
+               break;
+       case TIPC_CMD_GET_MAX_SLAVES:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_max_slaves);
+               break;
+       case TIPC_CMD_GET_NETID:
+               rep_tlv_buf = cfg_reply_unsigned(tipc_net_id);
+               break;
+       default:
+               rep_tlv_buf = NULL;
+               break;
+       }
+
+       /* Return reply buffer */
+exit:
+       spin_unlock_bh(&config_lock);
+       return rep_tlv_buf;
+}
+
+static void cfg_named_msg_event(void *userdata,
+                               u32 port_ref,
+                               struct sk_buff **buf,
+                               const unchar *msg,
+                               u32 size,
+                               u32 importance, 
+                               struct tipc_portid const *orig,
+                               struct tipc_name_seq const *dest)
+{
+       struct tipc_cfg_msg_hdr *req_hdr;
+       struct tipc_cfg_msg_hdr *rep_hdr;
+       struct sk_buff *rep_buf;
+
+       /* Validate configuration message header (ignore invalid message) */
+
+       req_hdr = (struct tipc_cfg_msg_hdr *)msg;
+       if ((size < sizeof(*req_hdr)) ||
+           (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
+           (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
+               warn("discarded invalid configuration message\n");
+               return;
+       }
+
+       /* Generate reply for request (if can't, return request) */
+
+       rep_buf = cfg_do_cmd(orig->node,
+                            ntohs(req_hdr->tcm_type), 
+                            msg + sizeof(*req_hdr),
+                            size - sizeof(*req_hdr),
+                            BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
+       if (rep_buf) {
+               skb_push(rep_buf, sizeof(*rep_hdr));
+               rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data;
+               memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
+               rep_hdr->tcm_len = htonl(rep_buf->len);
+               rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
+       } else {
+               rep_buf = *buf;
+               *buf = NULL;
+       }
+
+       /* NEED TO ADD CODE TO HANDLE FAILED SEND (SUCH AS CONGESTION) */
+       tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len);
+}
+
+int cfg_init(void)
+{
+       struct tipc_name_seq seq;
+       int res;
+
+       memset(&mng, 0, sizeof(mng));
+       INIT_LIST_HEAD(&mng.link_subscribers);
+
+       res = tipc_attach(&mng.user_ref, 0, 0);
+       if (res)
+               goto failed;
+
+       res = tipc_createport(mng.user_ref, 0, TIPC_CRITICAL_IMPORTANCE,
+                             NULL, NULL, NULL,
+                             NULL, cfg_named_msg_event, NULL,
+                             NULL, &mng.port_ref);
+       if (res)
+               goto failed;
+
+       seq.type = TIPC_CFG_SRV;
+       seq.lower = seq.upper = tipc_own_addr;
+       res = nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq);
+       if (res)
+               goto failed;
+
+       return 0;
+
+failed:
+       err("Unable to create configuration service\n");
+       tipc_detach(mng.user_ref);
+       mng.user_ref = 0;
+       return res;
+}
+
+void cfg_stop(void)
+{
+       if (mng.user_ref) {
+               tipc_detach(mng.user_ref);
+               mng.user_ref = 0;
+       }
+}
diff --git a/net/tipc/config.h b/net/tipc/config.h
new file mode 100644 (file)
index 0000000..646377d
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * net/tipc/config.h: Include file for TIPC configuration service code
+ * 
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CONFIG_H
+#define _TIPC_CONFIG_H
+
+/* ---------------------------------------------------------------------- */
+
+#include <linux/tipc.h>
+#include <linux/tipc_config.h>
+#include "link.h"
+
+struct sk_buff *cfg_reply_alloc(int payload_size);
+int cfg_append_tlv(struct sk_buff *buf, int tlv_type, 
+                  void *tlv_data, int tlv_data_size);
+struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value);
+struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string);
+
+static inline struct sk_buff *cfg_reply_none(void)
+{
+       return cfg_reply_alloc(0);
+}
+
+static inline struct sk_buff *cfg_reply_unsigned(u32 value)
+{
+       return cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
+}
+
+static inline struct sk_buff *cfg_reply_error_string(char *string)
+{
+       return cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
+}
+
+static inline struct sk_buff *cfg_reply_ultra_string(char *string)
+{
+       return cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
+}
+
+struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, 
+                          const void *req_tlv_area, int req_tlv_space, 
+                          int headroom);
+
+void cfg_link_event(u32 addr, char *name, int up);
+int  cfg_init(void);
+void cfg_stop(void);
+
+#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
new file mode 100644 (file)
index 0000000..e83ac06
--- /dev/null
@@ -0,0 +1,285 @@
+/*
+ * net/tipc/core.c: TIPC module code
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/version.h>
+#include <linux/random.h>
+
+#include "core.h"
+#include "dbg.h"
+#include "ref.h"
+#include "net.h"
+#include "user_reg.h"
+#include "name_table.h"
+#include "subscr.h"
+#include "config.h"
+
+int  eth_media_start(void);
+void eth_media_stop(void);
+int  handler_start(void);
+void handler_stop(void);
+int  socket_init(void);
+void socket_stop(void);
+int  netlink_start(void);
+void netlink_stop(void);
+
+#define MOD_NAME "tipc_start: "
+
+#ifndef CONFIG_TIPC_ZONES
+#define CONFIG_TIPC_ZONES 3
+#endif
+
+#ifndef CONFIG_TIPC_CLUSTERS
+#define CONFIG_TIPC_CLUSTERS 1
+#endif
+
+#ifndef CONFIG_TIPC_NODES
+#define CONFIG_TIPC_NODES 255
+#endif
+
+#ifndef CONFIG_TIPC_SLAVE_NODES
+#define CONFIG_TIPC_SLAVE_NODES 0
+#endif
+
+#ifndef CONFIG_TIPC_PORTS
+#define CONFIG_TIPC_PORTS 8191
+#endif
+
+#ifndef CONFIG_TIPC_LOG
+#define CONFIG_TIPC_LOG 0
+#endif
+
+/* global variables used by multiple sub-systems within TIPC */
+
+int tipc_mode = TIPC_NOT_RUNNING;
+int tipc_random;
+atomic_t tipc_user_count = ATOMIC_INIT(0);
+
+const char tipc_alphabet[] = 
+       "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_";
+
+/* configurable TIPC parameters */
+
+u32 tipc_own_addr;
+int tipc_max_zones;
+int tipc_max_clusters;
+int tipc_max_nodes;
+int tipc_max_slaves;
+int tipc_max_ports;
+int tipc_max_subscriptions;
+int tipc_max_publications;
+int tipc_net_id;
+int tipc_remote_management;
+
+
+int tipc_get_mode(void)
+{
+       return tipc_mode;
+}
+
+/**
+ * stop_net - shut down TIPC networking sub-systems
+ */
+
+void stop_net(void)
+{
+       eth_media_stop();
+       tipc_stop_net();
+}
+
+/**
+ * start_net - start TIPC networking sub-systems
+ */
+
+int start_net(void)
+{
+       int res;
+
+       if ((res = tipc_start_net()) ||
+           (res = eth_media_start())) {
+               stop_net();
+       }
+       return res;
+}
+
+/**
+ * stop_core - switch TIPC from SINGLE NODE to NOT RUNNING mode
+ */
+
+void stop_core(void)
+{
+       if (tipc_mode != TIPC_NODE_MODE)
+               return;
+
+       tipc_mode = TIPC_NOT_RUNNING;
+
+       netlink_stop();
+       handler_stop();
+       cfg_stop();
+       subscr_stop();
+       reg_stop();
+       nametbl_stop();
+       ref_table_stop();
+       socket_stop();
+}
+
+/**
+ * start_core - switch TIPC from NOT RUNNING to SINGLE NODE mode
+ */
+
+int start_core(void)
+{
+       int res;
+
+       if (tipc_mode != TIPC_NOT_RUNNING)
+               return -ENOPROTOOPT;
+
+       get_random_bytes(&tipc_random, sizeof(tipc_random));
+       tipc_mode = TIPC_NODE_MODE;
+
+       if ((res = handler_start()) || 
+           (res = ref_table_init(tipc_max_ports + tipc_max_subscriptions,
+                                 tipc_random)) ||
+           (res = reg_start()) ||
+           (res = nametbl_init()) ||
+            (res = k_signal((Handler)subscr_start, 0)) ||
+           (res = k_signal((Handler)cfg_init, 0)) || 
+           (res = netlink_start()) ||
+           (res = socket_init())) {
+               stop_core();
+       }
+       return res;
+}
+
+
+static int __init tipc_init(void)
+{
+       int res;
+
+       log_reinit(CONFIG_TIPC_LOG);
+       info("Activated (compiled " __DATE__ " " __TIME__ ")\n");
+
+       tipc_own_addr = 0;
+       tipc_remote_management = 1;
+       tipc_max_publications = 10000;
+       tipc_max_subscriptions = 2000;
+       tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536);
+       tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 511);
+       tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1);
+       tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047);
+       tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
+       tipc_net_id = 4711;
+
+       if ((res = start_core()))
+               err("Unable to start in single node mode\n");
+       else    
+               info("Started in single node mode\n");
+        return res;
+}
+
+static void __exit tipc_exit(void)
+{
+       stop_net();
+       stop_core();
+       info("Deactivated\n");
+       log_stop();
+}
+
+module_init(tipc_init);
+module_exit(tipc_exit);
+
+MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/* Native TIPC API for kernel-space applications (see tipc.h) */
+
+EXPORT_SYMBOL(tipc_attach);
+EXPORT_SYMBOL(tipc_detach);
+EXPORT_SYMBOL(tipc_get_addr);
+EXPORT_SYMBOL(tipc_get_mode);
+EXPORT_SYMBOL(tipc_createport);
+EXPORT_SYMBOL(tipc_deleteport);
+EXPORT_SYMBOL(tipc_ownidentity);
+EXPORT_SYMBOL(tipc_portimportance);
+EXPORT_SYMBOL(tipc_set_portimportance);
+EXPORT_SYMBOL(tipc_portunreliable);
+EXPORT_SYMBOL(tipc_set_portunreliable);
+EXPORT_SYMBOL(tipc_portunreturnable);
+EXPORT_SYMBOL(tipc_set_portunreturnable);
+EXPORT_SYMBOL(tipc_publish);
+EXPORT_SYMBOL(tipc_withdraw);
+EXPORT_SYMBOL(tipc_connect2port);
+EXPORT_SYMBOL(tipc_disconnect);
+EXPORT_SYMBOL(tipc_shutdown);
+EXPORT_SYMBOL(tipc_isconnected);
+EXPORT_SYMBOL(tipc_peer);
+EXPORT_SYMBOL(tipc_ref_valid);
+EXPORT_SYMBOL(tipc_send);
+EXPORT_SYMBOL(tipc_send_buf);
+EXPORT_SYMBOL(tipc_send2name);
+EXPORT_SYMBOL(tipc_forward2name);
+EXPORT_SYMBOL(tipc_send_buf2name);
+EXPORT_SYMBOL(tipc_forward_buf2name);
+EXPORT_SYMBOL(tipc_send2port);
+EXPORT_SYMBOL(tipc_forward2port);
+EXPORT_SYMBOL(tipc_send_buf2port);
+EXPORT_SYMBOL(tipc_forward_buf2port);
+EXPORT_SYMBOL(tipc_multicast);
+/* EXPORT_SYMBOL(tipc_multicast_buf); not available yet */
+EXPORT_SYMBOL(tipc_ispublished);
+EXPORT_SYMBOL(tipc_available_nodes);
+
+/* TIPC API for external bearers (see tipc_bearer.h) */
+
+EXPORT_SYMBOL(tipc_block_bearer);
+EXPORT_SYMBOL(tipc_continue); 
+EXPORT_SYMBOL(tipc_disable_bearer);
+EXPORT_SYMBOL(tipc_enable_bearer);
+EXPORT_SYMBOL(tipc_recv_msg);
+EXPORT_SYMBOL(tipc_register_media); 
+
+/* TIPC API for external APIs (see tipc_port.h) */
+
+EXPORT_SYMBOL(tipc_createport_raw);
+EXPORT_SYMBOL(tipc_set_msg_option);
+EXPORT_SYMBOL(tipc_reject_msg);
+EXPORT_SYMBOL(tipc_send_buf_fast);
+EXPORT_SYMBOL(tipc_acknowledge);
+EXPORT_SYMBOL(tipc_get_port);
+EXPORT_SYMBOL(tipc_get_handle);
+
diff --git a/net/tipc/core.h b/net/tipc/core.h
new file mode 100644 (file)
index 0000000..b69b60b
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+ * net/tipc/core.h: Include file for TIPC global declarations
+ * 
+ * Copyright (c) 2005-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_CORE_H
+#define _TIPC_CORE_H
+
+#include <net/tipc/tipc.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <asm/uaccess.h>
+#include <linux/interrupt.h>
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>  
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+/*
+ * TIPC debugging code
+ */
+
+#define assert(i)  BUG_ON(!(i))
+
+struct tipc_msg;
+extern struct print_buf *CONS, *LOG;
+extern struct print_buf *TEE(struct print_buf *, struct print_buf *);
+void msg_print(struct print_buf*,struct tipc_msg *,const char*);
+void tipc_printf(struct print_buf *, const char *fmt, ...);
+void tipc_dump(struct print_buf*,const char *fmt, ...);
+
+#ifdef CONFIG_TIPC_DEBUG
+
+/*
+ * TIPC debug support included:
+ * - system messages are printed to TIPC_OUTPUT print buffer
+ * - debug messages are printed to DBG_OUTPUT print buffer
+ */
+
+#define err(fmt, arg...)  tipc_printf(TIPC_OUTPUT, KERN_ERR "TIPC: " fmt, ## arg)
+#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_WARNING "TIPC: " fmt, ## arg)
+#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg)
+
+#define dbg(fmt, arg...)  do {if (DBG_OUTPUT) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0)
+#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) msg_print(DBG_OUTPUT, msg, txt);} while(0)
+#define dump(fmt, arg...) do {if (DBG_OUTPUT) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0)
+
+
+/*     
+ * By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer,
+ * while DBG_OUTPUT is the null print buffer.  These defaults can be changed
+ * here, or on a per .c file basis, by redefining these symbols.  The following
+ * print buffer options are available:
+ *
+ * NULL                        : Output to null print buffer (i.e. print nowhere)
+ * CONS                        : Output to system console
+ * LOG                 : Output to TIPC log buffer 
+ * &buf                : Output to user-defined buffer (struct print_buf *)
+ * TEE(&buf_a,&buf_b)  : Output to two print buffers (eg. TEE(CONS,LOG) )
+ */
+
+#ifndef TIPC_OUTPUT
+#define TIPC_OUTPUT TEE(CONS,LOG)
+#endif
+
+#ifndef DBG_OUTPUT
+#define DBG_OUTPUT NULL
+#endif
+
+#else
+
+#ifndef DBG_OUTPUT
+#define DBG_OUTPUT NULL
+#endif
+
+/*
+ * TIPC debug support not included:
+ * - system messages are printed to system console
+ * - debug messages are not printed
+ */
+
+#define err(fmt, arg...)  printk(KERN_ERR "TIPC: " fmt , ## arg)
+#define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg)
+#define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg)
+
+#define dbg(fmt, arg...) do {} while (0)
+#define msg_dbg(msg,txt) do {} while (0)
+#define dump(fmt,arg...) do {} while (0)
+
+#endif                   
+
+
+/* 
+ * TIPC-specific error codes
+ */
+
+#define ELINKCONG EAGAIN       /* link congestion <=> resource unavailable */
+
+/*
+ * Global configuration variables
+ */
+
+extern u32 tipc_own_addr;
+extern int tipc_max_zones;
+extern int tipc_max_clusters;
+extern int tipc_max_nodes;
+extern int tipc_max_slaves;
+extern int tipc_max_ports;
+extern int tipc_max_subscriptions;
+extern int tipc_max_publications;
+extern int tipc_net_id;
+extern int tipc_remote_management;
+
+/*
+ * Other global variables
+ */
+
+extern int tipc_mode;
+extern int tipc_random;
+extern const char tipc_alphabet[];
+extern atomic_t tipc_user_count;
+
+
+/*
+ * Routines available to privileged subsystems
+ */
+
+extern int  start_core(void);
+extern void stop_core(void);
+extern int  start_net(void);
+extern void stop_net(void);
+
+static inline int delimit(int val, int min, int max)
+{
+       if (val > max)
+               return max;
+       if (val < min)
+               return min;
+       return val;
+}
+
+
+/*
+ * TIPC timer and signal code
+ */
+
+typedef void (*Handler) (unsigned long);
+
+u32 k_signal(Handler routine, unsigned long argument);
+
+/**
+ * k_init_timer - initialize a timer
+ * @timer: pointer to timer structure
+ * @routine: pointer to routine to invoke when timer expires
+ * @argument: value to pass to routine when timer expires
+ * 
+ * Timer must be initialized before use (and terminated when no longer needed).
+ */
+
+static inline void k_init_timer(struct timer_list *timer, Handler routine, 
+                               unsigned long argument)
+{
+       dbg("initializing timer %p\n", timer);
+       init_timer(timer);
+       timer->function = routine;
+       timer->data = argument;
+}
+
+/**
+ * k_start_timer - start a timer
+ * @timer: pointer to timer structure
+ * @msec: time to delay (in ms)
+ * 
+ * Schedules a previously initialized timer for later execution.
+ * If timer is already running, the new timeout overrides the previous request.
+ * 
+ * To ensure the timer doesn't expire before the specified delay elapses,
+ * the amount of delay is rounded up when converting to the jiffies
+ * then an additional jiffy is added to account for the fact that 
+ * the starting time may be in the middle of the current jiffy.
+ */
+
+static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
+{
+       dbg("starting timer %p for %u\n", timer, msec);
+       mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
+}
+
+/**
+ * k_cancel_timer - cancel a timer
+ * @timer: pointer to timer structure
+ * 
+ * Cancels a previously initialized timer.  
+ * Can be called safely even if the timer is already inactive.
+ * 
+ * WARNING: Must not be called when holding locks required by the timer's
+ *          timeout routine, otherwise deadlock can occur on SMP systems!
+ */
+
+static inline void k_cancel_timer(struct timer_list *timer)
+{
+       dbg("cancelling timer %p\n", timer);
+       del_timer_sync(timer);
+}
+
+/**
+ * k_term_timer - terminate a timer
+ * @timer: pointer to timer structure
+ * 
+ * Prevents further use of a previously initialized timer.
+ * 
+ * WARNING: Caller must ensure timer isn't currently running.
+ * 
+ * (Do not "enhance" this routine to automatically cancel an active timer,
+ * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
+ */
+
+static inline void k_term_timer(struct timer_list *timer)
+{
+       dbg("terminating timer %p\n", timer);
+}
+
+
+/*
+ * TIPC message buffer code
+ *
+ * TIPC message buffer headroom leaves room for 14 byte Ethernet header, 
+ * while ensuring TIPC header is word aligned for quicker access
+ */
+
+#define BUF_HEADROOM 16u 
+
+struct tipc_skb_cb {
+       void *handle;
+};
+
+#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
+
+
+static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
+{
+       return (struct tipc_msg *)skb->data;
+}
+
+/**
+ * buf_acquire - creates a TIPC message buffer
+ * @size: message size (including TIPC header)
+ *
+ * Returns a new buffer.  Space is reserved for a data link header.
+ */
+
+static inline struct sk_buff *buf_acquire(u32 size)
+{
+       struct sk_buff *skb;
+       unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
+
+       skb = alloc_skb(buf_size, GFP_ATOMIC);
+       if (skb) {
+               skb_reserve(skb, BUF_HEADROOM);
+               skb_put(skb, size);
+               skb->next = NULL;
+       }
+       return skb;
+}
+
+/**
+ * buf_discard - frees a TIPC message buffer
+ * @skb: message buffer
+ *
+ * Frees a new buffer.  If passed NULL, just returns.
+ */
+
+static inline void buf_discard(struct sk_buff *skb)
+{
+       if (likely(skb != NULL))
+               kfree_skb(skb);
+}
+
+#endif                 
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
new file mode 100644 (file)
index 0000000..7ed60a1
--- /dev/null
@@ -0,0 +1,395 @@
+/*
+ * net/tipc/dbg.c: TIPC print buffer routines for debuggign
+ * 
+ * Copyright (c) 1996-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+
+#define MAX_STRING 512
+
+static char print_string[MAX_STRING];
+static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
+
+static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
+struct print_buf *CONS = &cons_buf;
+
+static struct print_buf log_buf = { NULL, 0, NULL, NULL };
+struct print_buf *LOG = &log_buf;
+
+
+#define FORMAT(PTR,LEN,FMT) \
+{\
+       va_list args;\
+       va_start(args, FMT);\
+       LEN = vsprintf(PTR, FMT, args);\
+       va_end(args);\
+       *(PTR + LEN) = '\0';\
+}
+
+/*
+ * Locking policy when using print buffers.
+ *
+ * 1) Routines of the form printbuf_XXX() rely on the caller to prevent
+ *    simultaneous use of the print buffer(s) being manipulated.
+ * 2) tipc_printf() uses 'print_lock' to prevent simultaneous use of
+ *    'print_string' and to protect its print buffer(s).
+ * 3) TEE() uses 'print_lock' to protect its print buffer(s).
+ * 4) Routines of the form log_XXX() uses 'print_lock' to protect LOG.
+ */
+
+/**
+ * printbuf_init - initialize print buffer to empty
+ */
+
+void printbuf_init(struct print_buf *pb, char *raw, u32 sz)
+{
+       if (!pb || !raw || (sz < (MAX_STRING + 1)))
+               return;
+
+       pb->crs = pb->buf = raw;
+       pb->size = sz;
+       pb->next = 0;
+       pb->buf[0] = 0;
+       pb->buf[sz-1] = ~0;
+}
+
+/**
+ * printbuf_reset - reinitialize print buffer to empty state
+ */
+
+void printbuf_reset(struct print_buf *pb)
+{
+       if (pb && pb->buf)
+               printbuf_init(pb, pb->buf, pb->size);
+}
+
+/**
+ * printbuf_empty - test if print buffer is in empty state
+ */
+
+int printbuf_empty(struct print_buf *pb)
+{
+       return (!pb || !pb->buf || (pb->crs == pb->buf));
+}
+
+/**
+ * printbuf_validate - check for print buffer overflow
+ * 
+ * Verifies that a print buffer has captured all data written to it. 
+ * If data has been lost, linearize buffer and prepend an error message
+ * 
+ * Returns length of print buffer data string (including trailing NULL)
+ */
+
+int printbuf_validate(struct print_buf *pb)
+{
+        char *err = "             *** PRINT BUFFER WRAPPED AROUND ***\n";
+        char *cp_buf;
+        struct print_buf cb;
+
+       if (!pb || !pb->buf)
+               return 0;
+
+       if (pb->buf[pb->size - 1] == '\0') {
+                cp_buf = kmalloc(pb->size, GFP_ATOMIC);
+                if (cp_buf != NULL){
+                        printbuf_init(&cb, cp_buf, pb->size);
+                        printbuf_move(&cb, pb);
+                        printbuf_move(pb, &cb);
+                        kfree(cp_buf);
+                        memcpy(pb->buf, err, strlen(err));
+                } else {
+                        printbuf_reset(pb);
+                        tipc_printf(pb, err);
+                }
+       }
+       return (pb->crs - pb->buf + 1);
+}
+
+/**
+ * printbuf_move - move print buffer contents to another print buffer
+ * 
+ * Current contents of destination print buffer (if any) are discarded.
+ * Source print buffer becomes empty if a successful move occurs.
+ */
+
+void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
+{
+       int len;
+
+       /* Handle the cases where contents can't be moved */
+
+       if (!pb_to || !pb_to->buf)
+               return;
+
+       if (!pb_from || !pb_from->buf) {
+               printbuf_reset(pb_to);
+               return;
+       }
+
+       if (pb_to->size < pb_from->size) {
+               printbuf_reset(pb_to);
+               tipc_printf(pb_to, "*** PRINT BUFFER OVERFLOW ***");
+               return;
+       }
+
+       /* Copy data from char after cursor to end (if used) */
+       len = pb_from->buf + pb_from->size - pb_from->crs - 2;
+       if ((pb_from->buf[pb_from->size-1] == 0) && (len > 0)) {
+               strcpy(pb_to->buf, pb_from->crs + 1);
+               pb_to->crs = pb_to->buf + len;
+       } else
+               pb_to->crs = pb_to->buf;
+
+       /* Copy data from start to cursor (always) */
+       len = pb_from->crs - pb_from->buf;
+       strcpy(pb_to->crs, pb_from->buf);
+       pb_to->crs += len;
+
+       printbuf_reset(pb_from);
+}
+
+/**
+ * tipc_printf - append formatted output to print buffer chain
+ */
+
+void tipc_printf(struct print_buf *pb, const char *fmt, ...)
+{
+       int chars_to_add;
+       int chars_left;
+       char save_char;
+       struct print_buf *pb_next;
+
+       spin_lock_bh(&print_lock);
+       FORMAT(print_string, chars_to_add, fmt);
+       if (chars_to_add >= MAX_STRING)
+               strcpy(print_string, "*** STRING TOO LONG ***");
+
+       while (pb) {
+               if (pb == CONS)
+                       printk(print_string);
+               else if (pb->buf) {
+                       chars_left = pb->buf + pb->size - pb->crs - 1;
+                       if (chars_to_add <= chars_left) {
+                               strcpy(pb->crs, print_string);
+                               pb->crs += chars_to_add;
+                       } else {
+                               strcpy(pb->buf, print_string + chars_left);
+                                save_char = print_string[chars_left];
+                                print_string[chars_left] = 0;
+                                strcpy(pb->crs, print_string);
+                                print_string[chars_left] = save_char;
+                                pb->crs = pb->buf + chars_to_add - chars_left;
+                        }
+                }
+               pb_next = pb->next;
+               pb->next = 0;
+               pb = pb_next;
+       }
+       spin_unlock_bh(&print_lock);
+}
+
+/**
+ * TEE - perform next output operation on both print buffers  
+ */
+
+struct print_buf *TEE(struct print_buf *b0, struct print_buf *b1)
+{
+       struct print_buf *pb = b0;
+
+       if (!b0 || (b0 == b1))
+               return b1;
+       if (!b1)
+               return b0;
+
+       spin_lock_bh(&print_lock);
+       while (pb->next) {
+               if ((pb->next == b1) || (pb->next == b0))
+                       pb->next = pb->next->next;
+               else
+                       pb = pb->next;
+       }
+       pb->next = b1;
+       spin_unlock_bh(&print_lock);
+       return b0;
+}
+
+/**
+ * print_to_console - write string of bytes to console in multiple chunks
+ */
+
+static void print_to_console(char *crs, int len)
+{
+       int rest = len;
+
+       while (rest > 0) {
+               int sz = rest < MAX_STRING ? rest : MAX_STRING;
+               char c = crs[sz];
+
+               crs[sz] = 0;
+               printk((const char *)crs);
+               crs[sz] = c;
+               rest -= sz;
+               crs += sz;
+       }
+}
+
+/**
+ * printbuf_dump - write print buffer contents to console
+ */
+
+static void printbuf_dump(struct print_buf *pb)
+{
+       int len;
+
+       /* Dump print buffer from char after cursor to end (if used) */
+       len = pb->buf + pb->size - pb->crs - 2;
+       if ((pb->buf[pb->size - 1] == 0) && (len > 0))
+               print_to_console(pb->crs + 1, len);
+
+       /* Dump print buffer from start to cursor (always) */
+       len = pb->crs - pb->buf;
+       print_to_console(pb->buf, len);
+}
+
+/**
+ * tipc_dump - dump non-console print buffer(s) to console
+ */
+
+void tipc_dump(struct print_buf *pb, const char *fmt, ...)
+{
+       int len;
+
+       spin_lock_bh(&print_lock);
+       FORMAT(CONS->buf, len, fmt);
+       printk(CONS->buf);
+
+       for (; pb; pb = pb->next) {
+               if (pb == CONS)
+                       continue;
+               printk("\n---- Start of dump,%s log ----\n\n", 
+                      (pb == LOG) ? "global" : "local");
+               printbuf_dump(pb);
+               printbuf_reset(pb);
+               printk("\n-------- End of dump --------\n");
+       }
+       spin_unlock_bh(&print_lock);
+}
+
+/**
+ * log_stop - free up TIPC log print buffer 
+ */
+
+void log_stop(void)
+{
+       spin_lock_bh(&print_lock);
+       if (LOG->buf) {
+               kfree(LOG->buf);
+               LOG->buf = NULL;
+       }
+       spin_unlock_bh(&print_lock);
+}
+
+/**
+ * log_reinit - set TIPC log print buffer to specified size
+ */
+
+void log_reinit(int log_size)
+{
+       log_stop();
+
+       if (log_size) {
+               if (log_size <= MAX_STRING)
+                       log_size = MAX_STRING + 1;
+               spin_lock_bh(&print_lock);
+               printbuf_init(LOG, kmalloc(log_size, GFP_ATOMIC), log_size);
+               spin_unlock_bh(&print_lock);
+       }
+}
+
+/**
+ * log_resize - reconfigure size of TIPC log buffer
+ */
+
+struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space)
+{
+       u32 value;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       value = *(u32 *)TLV_DATA(req_tlv_area);
+       value = ntohl(value);
+       if (value != delimit(value, 0, 32768))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (log size must be 0-32768)");
+       log_reinit(value);
+       return cfg_reply_none();
+}
+
+/**
+ * log_dump - capture TIPC log buffer contents in configuration message
+ */
+
+struct sk_buff *log_dump(void)
+{
+       struct sk_buff *reply;
+
+       spin_lock_bh(&print_lock);
+       if (!LOG->buf)
+               reply = cfg_reply_ultra_string("log not activated\n");
+       else if (printbuf_empty(LOG))
+               reply = cfg_reply_ultra_string("log is empty\n");
+       else {
+               struct tlv_desc *rep_tlv;
+               struct print_buf pb;
+               int str_len;
+
+               str_len = min(LOG->size, 32768u);
+               reply = cfg_reply_alloc(TLV_SPACE(str_len));
+               if (reply) {
+                       rep_tlv = (struct tlv_desc *)reply->data;
+                       printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
+                       printbuf_move(&pb, LOG);
+                       str_len = strlen(TLV_DATA(rep_tlv)) + 1;
+                       skb_put(reply, TLV_SPACE(str_len));
+                       TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+               }
+       }
+       spin_unlock_bh(&print_lock);
+       return reply;
+}
+
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
new file mode 100644 (file)
index 0000000..c6b2a64
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * net/tipc/dbg.h: Include file for TIPC print buffer routines
+ * 
+ * Copyright (c) 1997-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_DBG_H
+#define _TIPC_DBG_H
+
+struct print_buf {
+       char *buf;
+       u32 size;
+       char *crs;
+       struct print_buf *next;
+};
+
+void printbuf_init(struct print_buf *pb, char *buf, u32 sz);
+void printbuf_reset(struct print_buf *pb);
+int  printbuf_empty(struct print_buf *pb);
+int  printbuf_validate(struct print_buf *pb);
+void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
+
+void log_reinit(int log_size);
+void log_stop(void);
+
+struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *log_dump(void);
+
+#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
new file mode 100644 (file)
index 0000000..b106ef1
--- /dev/null
@@ -0,0 +1,318 @@
+/*
+ * net/tipc/discover.c
+ * 
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "link.h"
+#include "zone.h"
+#include "discover.h"
+#include "port.h"
+#include "name_table.h"
+
+#define TIPC_LINK_REQ_INIT     125     /* min delay during bearer start up */
+#define TIPC_LINK_REQ_FAST     2000    /* normal delay if bearer has no links */
+#define TIPC_LINK_REQ_SLOW     600000  /* normal delay if bearer has links */
+
+#if 0
+#define  GET_NODE_INFO         300
+#define  GET_NODE_INFO_RESULT  301
+#define  FORWARD_LINK_PROBE    302
+#define  LINK_REQUEST_REJECTED 303
+#define  LINK_REQUEST_ACCEPTED 304
+#define  DROP_LINK_REQUEST     305
+#define  CHECK_LINK_COUNT      306
+#endif
+
+/* 
+ * TODO: Most of the inter-cluster setup stuff should be
+ * rewritten, and be made conformant with specification.
+ */ 
+
+
+/**
+ * struct link_req - information about an ongoing link setup request
+ * @bearer: bearer issuing requests
+ * @dest: destination address for request messages
+ * @buf: request message to be (repeatedly) sent
+ * @timer: timer governing period between requests
+ * @timer_intv: current interval between requests (in ms)
+ */
+struct link_req {
+       struct bearer *bearer;
+       struct tipc_media_addr dest;
+       struct sk_buff *buf;
+       struct timer_list timer;
+       unsigned int timer_intv;
+};
+
+
+#if 0
+int disc_create_link(const struct tipc_link_create *argv) 
+{
+       /* 
+        * Code for inter cluster link setup here 
+        */
+       return TIPC_OK;
+}
+#endif
+
+/*
+ * disc_lost_link(): A link has lost contact
+ */
+
+void disc_link_event(u32 addr, char *name, int up) 
+{
+       if (in_own_cluster(addr))
+               return;
+       /* 
+        * Code for inter cluster link setup here 
+        */
+}
+
+/** 
+ * disc_init_msg - initialize a link setup message
+ * @type: message type (request or response)
+ * @req_links: number of links associated with message
+ * @dest_domain: network domain of node(s) which should respond to message
+ * @b_ptr: ptr to bearer issuing message
+ */
+
+struct sk_buff *disc_init_msg(u32 type,
+                             u32 req_links,
+                             u32 dest_domain,
+                             struct bearer *b_ptr)
+{
+       struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
+       struct tipc_msg *msg;
+
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_init(msg, LINK_CONFIG, type, TIPC_OK, DSC_H_SIZE,
+                        dest_domain);
+               msg_set_non_seq(msg);
+               msg_set_req_links(msg, req_links);
+               msg_set_dest_domain(msg, dest_domain);
+               msg_set_bc_netid(msg, tipc_net_id);
+               msg_set_media_addr(msg, &b_ptr->publ.addr);
+       }
+       return buf;
+}
+
+/**
+ * disc_recv_msg - handle incoming link setup message (request or response)
+ * @buf: buffer containing message
+ */
+
+void disc_recv_msg(struct sk_buff *buf)
+{
+       struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
+       struct link *link;
+       struct tipc_media_addr media_addr;
+       struct tipc_msg *msg = buf_msg(buf);
+       u32 dest = msg_dest_domain(msg);
+       u32 orig = msg_prevnode(msg);
+       u32 net_id = msg_bc_netid(msg);
+       u32 type = msg_type(msg);
+
+       msg_get_media_addr(msg,&media_addr);
+       msg_dbg(msg, "RECV:");
+       buf_discard(buf);
+
+       if (net_id != tipc_net_id)
+               return;
+       if (!addr_domain_valid(dest))
+               return;
+       if (!addr_node_valid(orig))
+               return;
+       if (orig == tipc_own_addr)
+               return;
+       if (!in_scope(dest, tipc_own_addr))
+               return;
+       if (is_slave(tipc_own_addr) && is_slave(orig))
+               return;
+       if (is_slave(orig) && !in_own_cluster(orig))
+               return;
+       if (in_own_cluster(orig)) {
+               /* Always accept link here */
+               struct sk_buff *rbuf;
+               struct tipc_media_addr *addr;
+               struct node *n_ptr = node_find(orig);
+               int link_up;
+               dbg(" in own cluster\n");
+               if (n_ptr == NULL) {
+                       n_ptr = node_create(orig);
+               }
+               if (n_ptr == NULL) {
+                       warn("Memory squeeze; Failed to create node\n");
+                       return;
+               }
+               spin_lock_bh(&n_ptr->lock);
+               link = n_ptr->links[b_ptr->identity];
+               if (!link) {
+                       dbg("creating link\n");
+                       link = link_create(b_ptr, orig, &media_addr);
+                       if (!link) {
+                               spin_unlock_bh(&n_ptr->lock);                
+                               return;
+                       }
+               }
+               addr = &link->media_addr;
+               if (memcmp(addr, &media_addr, sizeof(*addr))) {
+                       char addr_string[16];
+
+                       warn("New bearer address for %s\n", 
+                            addr_string_fill(addr_string, orig));
+                       memcpy(addr, &media_addr, sizeof(*addr));
+                       link_reset(link);     
+               }
+               link_up = link_is_up(link);
+               spin_unlock_bh(&n_ptr->lock);                
+               if ((type == DSC_RESP_MSG) || link_up)
+                       return;
+               rbuf = disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
+               if (rbuf != NULL) {
+                       msg_dbg(buf_msg(rbuf),"SEND:");
+                       b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
+                       buf_discard(rbuf);
+               }
+       }
+}
+
+/**
+ * disc_stop_link_req - stop sending periodic link setup requests
+ * @req: ptr to link request structure
+ */
+
+void disc_stop_link_req(struct link_req *req) 
+{
+       if (!req)
+               return;
+               
+       k_cancel_timer(&req->timer);
+       k_term_timer(&req->timer);
+       buf_discard(req->buf);
+       kfree(req);
+} 
+
+/**
+ * disc_update_link_req - update frequency of periodic link setup requests
+ * @req: ptr to link request structure
+ */
+
+void disc_update_link_req(struct link_req *req) 
+{
+       if (!req)
+               return;
+
+       if (req->timer_intv == TIPC_LINK_REQ_SLOW) {
+               if (!req->bearer->nodes.count) {
+                       req->timer_intv = TIPC_LINK_REQ_FAST;
+                       k_start_timer(&req->timer, req->timer_intv);
+               }
+       } else if (req->timer_intv == TIPC_LINK_REQ_FAST) {
+               if (req->bearer->nodes.count) {
+                       req->timer_intv = TIPC_LINK_REQ_SLOW;
+                       k_start_timer(&req->timer, req->timer_intv);
+               }
+       } else {
+               /* leave timer "as is" if haven't yet reached a "normal" rate */
+       }
+} 
+
+/**
+ * disc_timeout - send a periodic link setup request
+ * @req: ptr to link request structure
+ * 
+ * Called whenever a link setup request timer associated with a bearer expires.
+ */
+
+static void disc_timeout(struct link_req *req) 
+{
+       spin_lock_bh(&req->bearer->publ.lock);
+
+       req->bearer->media->send_msg(req->buf, &req->bearer->publ, &req->dest);
+
+       if ((req->timer_intv == TIPC_LINK_REQ_SLOW) ||
+           (req->timer_intv == TIPC_LINK_REQ_FAST)) {
+               /* leave timer interval "as is" if already at a "normal" rate */
+       } else {
+               req->timer_intv *= 2;
+               if (req->timer_intv > TIPC_LINK_REQ_SLOW)
+                       req->timer_intv = TIPC_LINK_REQ_SLOW;
+               if ((req->timer_intv == TIPC_LINK_REQ_FAST) && 
+                   (req->bearer->nodes.count))
+                       req->timer_intv = TIPC_LINK_REQ_SLOW;
+       }
+       k_start_timer(&req->timer, req->timer_intv);
+
+       spin_unlock_bh(&req->bearer->publ.lock);
+}
+
+/**
+ * disc_init_link_req - start sending periodic link setup requests
+ * @b_ptr: ptr to bearer issuing requests
+ * @dest: destination address for request messages
+ * @dest_domain: network domain of node(s) which should respond to message
+ * @req_links: max number of desired links
+ * 
+ * Returns pointer to link request structure, or NULL if unable to create.
+ */
+
+struct link_req *disc_init_link_req(struct bearer *b_ptr, 
+                                   const struct tipc_media_addr *dest,
+                                   u32 dest_domain,
+                                   u32 req_links) 
+{
+       struct link_req *req;
+
+       req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC);
+       if (!req)
+               return NULL;
+
+       req->buf = disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
+       if (!req->buf) {
+               kfree(req);
+               return NULL;
+       }
+
+       memcpy(&req->dest, dest, sizeof(*dest));
+       req->bearer = b_ptr;
+       req->timer_intv = TIPC_LINK_REQ_INIT;
+       k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
+       k_start_timer(&req->timer, req->timer_intv);
+       return req;
+} 
+
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
new file mode 100644 (file)
index 0000000..2a6114d
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * net/tipc/discover.h
+ *
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_DISCOVER_H
+#define _TIPC_DISCOVER_H
+
+#include <linux/tipc.h>
+
+struct link_req;
+
+struct link_req *disc_init_link_req(struct bearer *b_ptr, 
+                                   const struct tipc_media_addr *dest,
+                                   u32 dest_domain,
+                                   u32 req_links);
+void disc_update_link_req(struct link_req *req);
+void disc_stop_link_req(struct link_req *req);
+
+void disc_recv_msg(struct sk_buff *buf);
+
+void disc_link_event(u32 addr, char *name, int up);
+#if 0
+int  disc_create_link(const struct tipc_link_create *argv);
+#endif
+
+#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
new file mode 100644 (file)
index 0000000..34d0462
--- /dev/null
@@ -0,0 +1,299 @@
+/*
+ * net/tipc/eth_media.c: Ethernet bearer support for TIPC
+ * 
+ * Copyright (c) 2001-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <net/tipc/tipc.h>
+#include <net/tipc/tipc_bearer.h>
+#include <net/tipc/tipc_msg.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+
+#define MAX_ETH_BEARERS                2
+#define TIPC_PROTOCOL          0x88ca
+#define ETH_LINK_PRIORITY      10
+#define ETH_LINK_TOLERANCE     TIPC_DEF_LINK_TOL
+
+
+/**
+ * struct eth_bearer - Ethernet bearer data structure
+ * @bearer: ptr to associated "generic" bearer structure
+ * @dev: ptr to associated Ethernet network device
+ * @tipc_packet_type: used in binding TIPC to Ethernet driver
+ */
+struct eth_bearer {
+       struct tipc_bearer *bearer;
+       struct net_device *dev;
+       struct packet_type tipc_packet_type;
+};
+
+static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
+static int eth_started = 0;
+static struct notifier_block notifier;
+
+/**
+ * send_msg - send a TIPC message out over an Ethernet interface 
+ */
+
+static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr, 
+                   struct tipc_media_addr *dest)
+{
+       struct sk_buff *clone;
+       struct net_device *dev;
+
+       clone = skb_clone(buf, GFP_ATOMIC);
+       if (clone) {
+               clone->nh.raw = clone->data;
+               dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
+               clone->dev = dev;
+               dev->hard_header(clone, dev, TIPC_PROTOCOL, 
+                                &dest->dev_addr.eth_addr,
+                                dev->dev_addr, clone->len);
+               dev_queue_xmit(clone);
+       }
+       return TIPC_OK;
+}
+
+/**
+ * recv_msg - handle incoming TIPC message from an Ethernet interface
+ * 
+ * Routine truncates any Ethernet padding/CRC appended to the message,
+ * and ensures message size matches actual length
+ */
+
+static int recv_msg(struct sk_buff *buf, struct net_device *dev, 
+                   struct packet_type *pt, struct net_device *orig_dev)
+{
+       struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
+       u32 size;
+
+       if (likely(eb_ptr->bearer)) {
+               size = msg_size((struct tipc_msg *)buf->data);
+               skb_trim(buf, size);
+               if (likely(buf->len == size)) {
+                       buf->next = NULL;
+                       tipc_recv_msg(buf, eb_ptr->bearer);
+               } else {
+                       kfree_skb(buf);
+               }
+       } else {
+               kfree_skb(buf);
+       }
+       return TIPC_OK;
+}
+
+/**
+ * enable_bearer - attach TIPC bearer to an Ethernet interface 
+ */
+
+static int enable_bearer(struct tipc_bearer *tb_ptr)
+{
+       struct net_device *dev = dev_base;
+       struct eth_bearer *eb_ptr = &eth_bearers[0];
+       struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+       char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
+
+       /* Find device with specified name */
+
+       while (dev && dev->name &&
+              (memcmp(dev->name, driver_name, strlen(dev->name)))) {
+               dev = dev->next;
+       }
+       if (!dev)
+               return -ENODEV;
+
+       /* Find Ethernet bearer for device (or create one) */
+
+       for (;(eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev); eb_ptr++);
+       if (eb_ptr == stop)
+               return -EDQUOT;
+       if (!eb_ptr->dev) {
+               eb_ptr->dev = dev;
+               eb_ptr->tipc_packet_type.type = __constant_htons(TIPC_PROTOCOL);
+               eb_ptr->tipc_packet_type.dev = dev;
+               eb_ptr->tipc_packet_type.func = recv_msg;
+               eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
+               INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
+               dev_hold(dev);
+               dev_add_pack(&eb_ptr->tipc_packet_type);
+       }
+
+       /* Associate TIPC bearer with Ethernet bearer */
+
+       eb_ptr->bearer = tb_ptr;
+       tb_ptr->usr_handle = (void *)eb_ptr;
+       tb_ptr->mtu = dev->mtu;
+       tb_ptr->blocked = 0; 
+       tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
+       memcpy(&tb_ptr->addr.dev_addr, &dev->dev_addr, ETH_ALEN);
+       return 0;
+}
+
+/**
+ * disable_bearer - detach TIPC bearer from an Ethernet interface 
+ *
+ * We really should do dev_remove_pack() here, but this function can not be
+ * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away
+ * incoming buffers, & postpone dev_remove_pack() to eth_media_stop() on exit.
+ */
+
+static void disable_bearer(struct tipc_bearer *tb_ptr)
+{
+       ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = 0;
+}
+
+/**
+ * recv_notification - handle device updates from OS
+ *
+ * Change the state of the Ethernet bearer (if any) associated with the 
+ * specified device.
+ */
+
+static int recv_notification(struct notifier_block *nb, unsigned long evt, 
+                            void *dv)
+{
+       struct net_device *dev = (struct net_device *)dv;
+       struct eth_bearer *eb_ptr = &eth_bearers[0];
+       struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
+
+       while ((eb_ptr->dev != dev)) {
+               if (++eb_ptr == stop)
+                       return NOTIFY_DONE;     /* couldn't find device */
+       }
+       if (!eb_ptr->bearer)
+               return NOTIFY_DONE;             /* bearer had been disabled */
+
+        eb_ptr->bearer->mtu = dev->mtu;
+
+       switch (evt) {
+       case NETDEV_CHANGE:
+               if (netif_carrier_ok(dev))
+                       tipc_continue(eb_ptr->bearer);
+               else
+                       tipc_block_bearer(eb_ptr->bearer->name);
+               break;
+       case NETDEV_UP:
+               tipc_continue(eb_ptr->bearer);
+               break;
+       case NETDEV_DOWN:
+               tipc_block_bearer(eb_ptr->bearer->name);
+               break;
+       case NETDEV_CHANGEMTU:
+        case NETDEV_CHANGEADDR:
+               tipc_block_bearer(eb_ptr->bearer->name);
+                tipc_continue(eb_ptr->bearer);
+               break;
+       case NETDEV_UNREGISTER:
+        case NETDEV_CHANGENAME:
+               tipc_disable_bearer(eb_ptr->bearer->name);
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+/**
+ * eth_addr2str - convert Ethernet address to string
+ */
+
+static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
+{                       
+       unchar *addr = (unchar *)&a->dev_addr;
+
+       if (str_size < 18)
+               *str_buf = '\0';
+       else
+               sprintf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+                       addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
+       return str_buf;
+}
+
+/**
+ * eth_media_start - activate Ethernet bearer support
+ *
+ * Register Ethernet media type with TIPC bearer code.  Also register
+ * with OS for notifications about device state changes.
+ */
+
+int eth_media_start(void)
+{                       
+       struct tipc_media_addr bcast_addr;
+       int res;
+
+       if (eth_started)
+               return -EINVAL;
+
+       memset(&bcast_addr, 0xff, sizeof(bcast_addr));
+       memset(eth_bearers, 0, sizeof(eth_bearers));
+
+       res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
+                                 enable_bearer, disable_bearer, send_msg, 
+                                 eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY, 
+                                 ETH_LINK_TOLERANCE, TIPC_DEF_LINK_WIN);
+       if (res)
+               return res;
+
+       notifier.notifier_call = &recv_notification;
+       notifier.priority = 0;
+       res = register_netdevice_notifier(&notifier);
+       if (!res)
+               eth_started = 1;
+       return res;
+}
+
+/**
+ * eth_media_stop - deactivate Ethernet bearer support
+ */
+
+void eth_media_stop(void)
+{
+       int i;
+
+       if (!eth_started)
+               return;
+
+       unregister_netdevice_notifier(&notifier);
+       for (i = 0; i < MAX_ETH_BEARERS ; i++) {
+               if (eth_bearers[i].bearer) {
+                       eth_bearers[i].bearer->blocked = 1;
+                       eth_bearers[i].bearer = 0;
+               }
+               if (eth_bearers[i].dev) {
+                       dev_remove_pack(&eth_bearers[i].tipc_packet_type);
+                       dev_put(eth_bearers[i].dev);
+               }
+       }
+       memset(&eth_bearers, 0, sizeof(eth_bearers));
+       eth_started = 0;
+}
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
new file mode 100644 (file)
index 0000000..f320010
--- /dev/null
@@ -0,0 +1,132 @@
+/*
+ * net/tipc/handler.c: TIPC signal handling
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+
+struct queue_item {
+       struct list_head next_signal;
+       void (*handler) (unsigned long);
+       unsigned long data;
+};
+
+static kmem_cache_t *tipc_queue_item_cache;
+static struct list_head signal_queue_head;
+static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED;
+static int handler_enabled = 0;
+
+static void process_signal_queue(unsigned long dummy);
+
+static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
+
+
+unsigned int k_signal(Handler routine, unsigned long argument)
+{
+       struct queue_item *item;
+
+       if (!handler_enabled) {
+               err("Signal request ignored by handler\n");
+               return -ENOPROTOOPT;
+       }
+
+       spin_lock_bh(&qitem_lock);
+       item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
+       if (!item) {
+               err("Signal queue out of memory\n");
+               spin_unlock_bh(&qitem_lock);
+               return -ENOMEM;
+       }
+       item->handler = routine;
+       item->data = argument;
+       list_add_tail(&item->next_signal, &signal_queue_head);
+       spin_unlock_bh(&qitem_lock);
+       tasklet_schedule(&tipc_tasklet);
+       return 0;
+}
+
+static void process_signal_queue(unsigned long dummy)
+{
+       struct queue_item *__volatile__ item;
+       struct list_head *l, *n;
+
+       spin_lock_bh(&qitem_lock);
+       list_for_each_safe(l, n, &signal_queue_head) {
+               item = list_entry(l, struct queue_item, next_signal);
+               list_del(&item->next_signal);
+               spin_unlock_bh(&qitem_lock);
+               item->handler(item->data);
+               spin_lock_bh(&qitem_lock);
+               kmem_cache_free(tipc_queue_item_cache, item);
+       }
+       spin_unlock_bh(&qitem_lock);
+}
+
+int handler_start(void)
+{
+       tipc_queue_item_cache = 
+               kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
+                                 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+       if (!tipc_queue_item_cache)
+               return -ENOMEM;
+
+       INIT_LIST_HEAD(&signal_queue_head);
+       tasklet_enable(&tipc_tasklet);
+       handler_enabled = 1;
+       return 0;
+}
+
+void handler_stop(void)
+{
+       struct list_head *l, *n;
+       struct queue_item *item; 
+
+       if (!handler_enabled)
+               return;
+
+       handler_enabled = 0;
+       tasklet_disable(&tipc_tasklet);
+       tasklet_kill(&tipc_tasklet);
+
+       spin_lock_bh(&qitem_lock);
+       list_for_each_safe(l, n, &signal_queue_head) {
+               item = list_entry(l, struct queue_item, next_signal);
+               list_del(&item->next_signal);
+               kmem_cache_free(tipc_queue_item_cache, item);
+       }
+       spin_unlock_bh(&qitem_lock);
+
+       kmem_cache_destroy(tipc_queue_item_cache);
+}
+
diff --git a/net/tipc/link.c b/net/tipc/link.c
new file mode 100644 (file)
index 0000000..7265f4b
--- /dev/null
@@ -0,0 +1,3167 @@
+/*
+ * net/tipc/link.c: TIPC link code
+ * 
+ * Copyright (c) 1996-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "link.h"
+#include "net.h"
+#include "node.h"
+#include "port.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "name_distr.h"
+#include "bearer.h"
+#include "name_table.h"
+#include "discover.h"
+#include "config.h"
+#include "bcast.h"
+
+
+/* 
+ * Limit for deferred reception queue: 
+ */
+
+#define DEF_QUEUE_LIMIT 256u
+
+/* 
+ * Link state events: 
+ */
+
+#define  STARTING_EVT    856384768     /* link processing trigger */
+#define  TRAFFIC_MSG_EVT 560815u       /* rx'd ??? */
+#define  TIMEOUT_EVT     560817u       /* link timer expired */
+
+/*   
+ * The following two 'message types' is really just implementation 
+ * data conveniently stored in the message header. 
+ * They must not be considered part of the protocol
+ */
+#define OPEN_MSG   0
+#define CLOSED_MSG 1
+
+/* 
+ * State value stored in 'exp_msg_count'
+ */
+
+#define START_CHANGEOVER 100000u
+
+/**
+ * struct link_name - deconstructed link name
+ * @addr_local: network address of node at this end
+ * @if_local: name of interface at this end
+ * @addr_peer: network address of node at far end
+ * @if_peer: name of interface at far end
+ */
+
+struct link_name {
+       u32 addr_local;
+       char if_local[TIPC_MAX_IF_NAME];
+       u32 addr_peer;
+       char if_peer[TIPC_MAX_IF_NAME];
+};
+
+#if 0
+
+/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
+
+/** 
+ * struct link_event - link up/down event notification
+ */
+
+struct link_event {
+       u32 addr;
+       int up;
+       void (*fcn)(u32, char *, int);
+       char name[TIPC_MAX_LINK_NAME];
+};
+
+#endif
+
+static void link_handle_out_of_seq_msg(struct link *l_ptr,
+                                      struct sk_buff *buf);
+static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
+static int  link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
+static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
+static int  link_send_sections_long(struct port *sender,
+                                   struct iovec const *msg_sect,
+                                   u32 num_sect, u32 destnode);
+static void link_check_defragm_bufs(struct link *l_ptr);
+static void link_state_event(struct link *l_ptr, u32 event);
+static void link_reset_statistics(struct link *l_ptr);
+static void link_print(struct link *l_ptr, struct print_buf *buf, 
+                      const char *str);
+
+/*
+ * Debugging code used by link routines only
+ *
+ * When debugging link problems on a system that has multiple links,
+ * the standard TIPC debugging routines may not be useful since they
+ * allow the output from multiple links to be intermixed.  For this reason
+ * routines of the form "dbg_link_XXX()" have been created that will capture
+ * debug info into a link's personal print buffer, which can then be dumped
+ * into the TIPC system log (LOG) upon request.
+ *
+ * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
+ * of the print buffer used by each link.  If LINK_LOG_BUF_SIZE is set to 0,
+ * the dbg_link_XXX() routines simply send their output to the standard 
+ * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful
+ * when there is only a single link in the system being debugged.
+ *
+ * Notes:
+ * - When enabled, LINK_LOG_BUF_SIZE should be set to at least 1000 (bytes)
+ * - "l_ptr" must be valid when using dbg_link_XXX() macros  
+ */
+
+#define LINK_LOG_BUF_SIZE 0
+
+#define dbg_link(fmt, arg...)  do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
+#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) msg_print(&l_ptr->print_buf, msg, txt); } while(0)
+#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
+#define dbg_link_dump() do { \
+       if (LINK_LOG_BUF_SIZE) { \
+               tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
+               printbuf_move(LOG, &l_ptr->print_buf); \
+       } \
+} while (0)
+
+static inline void dbg_print_link(struct link *l_ptr, const char *str)
+{
+       if (DBG_OUTPUT)
+               link_print(l_ptr, DBG_OUTPUT, str);
+}
+
+static inline void dbg_print_buf_chain(struct sk_buff *root_buf)
+{
+       if (DBG_OUTPUT) {
+               struct sk_buff *buf = root_buf;
+
+               while (buf) {
+                       msg_dbg(buf_msg(buf), "In chain: ");
+                       buf = buf->next;
+               }
+       }
+}
+
+/*
+ *  Simple inlined link routines
+ */
+
+static inline unsigned int align(unsigned int i)
+{
+       return (i + 3) & ~3u;
+}
+
+static inline int link_working_working(struct link *l_ptr)
+{
+       return (l_ptr->state == WORKING_WORKING);
+}
+
+static inline int link_working_unknown(struct link *l_ptr)
+{
+       return (l_ptr->state == WORKING_UNKNOWN);
+}
+
+static inline int link_reset_unknown(struct link *l_ptr)
+{
+       return (l_ptr->state == RESET_UNKNOWN);
+}
+
+static inline int link_reset_reset(struct link *l_ptr)
+{
+       return (l_ptr->state == RESET_RESET);
+}
+
+static inline int link_blocked(struct link *l_ptr)
+{
+       return (l_ptr->exp_msg_count || l_ptr->blocked);
+}
+
+static inline int link_congested(struct link *l_ptr)
+{
+       return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
+}
+
+static inline u32 link_max_pkt(struct link *l_ptr)
+{
+       return l_ptr->max_pkt;
+}
+
+static inline void link_init_max_pkt(struct link *l_ptr)
+{
+       u32 max_pkt;
+       
+       max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
+       if (max_pkt > MAX_MSG_SIZE)
+               max_pkt = MAX_MSG_SIZE;
+
+        l_ptr->max_pkt_target = max_pkt;
+       if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
+               l_ptr->max_pkt = l_ptr->max_pkt_target;
+       else 
+               l_ptr->max_pkt = MAX_PKT_DEFAULT;
+
+        l_ptr->max_pkt_probes = 0;
+}
+
+static inline u32 link_next_sent(struct link *l_ptr)
+{
+       if (l_ptr->next_out)
+               return msg_seqno(buf_msg(l_ptr->next_out));
+       return mod(l_ptr->next_out_no);
+}
+
+static inline u32 link_last_sent(struct link *l_ptr)
+{
+       return mod(link_next_sent(l_ptr) - 1);
+}
+
+/*
+ *  Simple non-inlined link routines (i.e. referenced outside this file)
+ */
+
+int link_is_up(struct link *l_ptr)
+{
+       if (!l_ptr)
+               return 0;
+       return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
+}
+
+int link_is_active(struct link *l_ptr)
+{
+       return ((l_ptr->owner->active_links[0] == l_ptr) ||
+               (l_ptr->owner->active_links[1] == l_ptr));
+}
+
+/**
+ * link_name_validate - validate & (optionally) deconstruct link name
+ * @name - ptr to link name string
+ * @name_parts - ptr to area for link name components (or NULL if not needed)
+ * 
+ * Returns 1 if link name is valid, otherwise 0.
+ */
+
+static int link_name_validate(const char *name, struct link_name *name_parts)
+{
+       char name_copy[TIPC_MAX_LINK_NAME];
+       char *addr_local;
+       char *if_local;
+       char *addr_peer;
+       char *if_peer;
+       char dummy;
+       u32 z_local, c_local, n_local;
+       u32 z_peer, c_peer, n_peer;
+       u32 if_local_len;
+       u32 if_peer_len;
+
+       /* copy link name & ensure length is OK */
+
+       name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
+       /* need above in case non-Posix strncpy() doesn't pad with nulls */
+       strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
+       if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
+               return 0;
+
+       /* ensure all component parts of link name are present */
+
+       addr_local = name_copy;
+       if ((if_local = strchr(addr_local, ':')) == NULL)
+               return 0;
+       *(if_local++) = 0;
+       if ((addr_peer = strchr(if_local, '-')) == NULL)
+               return 0;
+       *(addr_peer++) = 0;
+       if_local_len = addr_peer - if_local;
+       if ((if_peer = strchr(addr_peer, ':')) == NULL)
+               return 0;
+       *(if_peer++) = 0;
+       if_peer_len = strlen(if_peer) + 1;
+
+       /* validate component parts of link name */
+
+       if ((sscanf(addr_local, "%u.%u.%u%c",
+                   &z_local, &c_local, &n_local, &dummy) != 3) ||
+           (sscanf(addr_peer, "%u.%u.%u%c",
+                   &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
+           (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
+           (z_peer  > 255) || (c_peer  > 4095) || (n_peer  > 4095) ||
+           (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) || 
+           (if_peer_len  <= 1) || (if_peer_len  > TIPC_MAX_IF_NAME) || 
+           (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
+           (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
+               return 0;
+
+       /* return link name components, if necessary */
+
+       if (name_parts) {
+               name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
+               strcpy(name_parts->if_local, if_local);
+               name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
+               strcpy(name_parts->if_peer, if_peer);
+       }
+       return 1;
+}
+
+/**
+ * link_timeout - handle expiration of link timer
+ * @l_ptr: pointer to link
+ * 
+ * This routine must not grab "net_lock" to avoid a potential deadlock conflict
+ * with link_delete().  (There is no risk that the node will be deleted by
+ * another thread because link_delete() always cancels the link timer before
+ * node_delete() is called.)
+ */
+
+static void link_timeout(struct link *l_ptr)
+{
+       node_lock(l_ptr->owner);
+
+       /* update counters used in statistical profiling of send traffic */
+
+       l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
+       l_ptr->stats.queue_sz_counts++;
+
+       if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
+               l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
+
+       if (l_ptr->first_out) {
+               struct tipc_msg *msg = buf_msg(l_ptr->first_out);
+               u32 length = msg_size(msg);
+
+               if ((msg_user(msg) == MSG_FRAGMENTER)
+                   && (msg_type(msg) == FIRST_FRAGMENT)) {
+                       length = msg_size(msg_get_wrapped(msg));
+               }
+               if (length) {
+                       l_ptr->stats.msg_lengths_total += length;
+                       l_ptr->stats.msg_length_counts++;
+                       if (length <= 64)
+                               l_ptr->stats.msg_length_profile[0]++;
+                       else if (length <= 256)
+                               l_ptr->stats.msg_length_profile[1]++;
+                       else if (length <= 1024)
+                               l_ptr->stats.msg_length_profile[2]++;
+                       else if (length <= 4096)
+                               l_ptr->stats.msg_length_profile[3]++;
+                       else if (length <= 16384)
+                               l_ptr->stats.msg_length_profile[4]++;
+                       else if (length <= 32768)
+                               l_ptr->stats.msg_length_profile[5]++;
+                       else
+                               l_ptr->stats.msg_length_profile[6]++;
+               }
+       }
+
+       /* do all other link processing performed on a periodic basis */
+
+       link_check_defragm_bufs(l_ptr);
+
+       link_state_event(l_ptr, TIMEOUT_EVT);
+
+       if (l_ptr->next_out)
+               link_push_queue(l_ptr);
+
+       node_unlock(l_ptr->owner);
+}
+
+static inline void link_set_timer(struct link *l_ptr, u32 time)
+{
+       k_start_timer(&l_ptr->timer, time);
+}
+
+/**
+ * link_create - create a new link
+ * @b_ptr: pointer to associated bearer
+ * @peer: network address of node at other end of link
+ * @media_addr: media address to use when sending messages over link
+ * 
+ * Returns pointer to link.
+ */
+
+struct link *link_create(struct bearer *b_ptr, const u32 peer,
+                        const struct tipc_media_addr *media_addr)
+{
+       struct link *l_ptr;
+       struct tipc_msg *msg;
+       char *if_name;
+
+       l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
+       if (!l_ptr) {
+               warn("Memory squeeze; Failed to create link\n");
+               return NULL;
+       }
+       memset(l_ptr, 0, sizeof(*l_ptr));
+
+       l_ptr->addr = peer;
+       if_name = strchr(b_ptr->publ.name, ':') + 1;
+       sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
+               tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
+               tipc_node(tipc_own_addr), 
+               if_name,
+               tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
+               /* note: peer i/f is appended to link name by reset/activate */
+       memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
+       k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
+       list_add_tail(&l_ptr->link_list, &b_ptr->links);
+       l_ptr->checkpoint = 1;
+       l_ptr->b_ptr = b_ptr;
+       link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
+       l_ptr->state = RESET_UNKNOWN;
+
+       l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
+       msg = l_ptr->pmsg;
+       msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
+       msg_set_size(msg, sizeof(l_ptr->proto_msg));
+       msg_set_session(msg, tipc_random);
+       msg_set_bearer_id(msg, b_ptr->identity);
+       strcpy((char *)msg_data(msg), if_name);
+
+       l_ptr->priority = b_ptr->priority;
+       link_set_queue_limits(l_ptr, b_ptr->media->window);
+
+       link_init_max_pkt(l_ptr);
+
+       l_ptr->next_out_no = 1;
+       INIT_LIST_HEAD(&l_ptr->waiting_ports);
+
+       link_reset_statistics(l_ptr);
+
+       l_ptr->owner = node_attach_link(l_ptr);
+       if (!l_ptr->owner) {
+               kfree(l_ptr);
+               return NULL;
+       }
+
+       if (LINK_LOG_BUF_SIZE) {
+               char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
+
+               if (!pb) {
+                       kfree(l_ptr);
+                       warn("Memory squeeze; Failed to create link\n");
+                       return NULL;
+               }
+               printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
+       }
+
+       k_signal((Handler)link_start, (unsigned long)l_ptr);
+
+       dbg("link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
+           l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
+       
+       return l_ptr;
+}
+
+/** 
+ * link_delete - delete a link
+ * @l_ptr: pointer to link
+ * 
+ * Note: 'net_lock' is write_locked, bearer is locked.
+ * This routine must not grab the node lock until after link timer cancellation
+ * to avoid a potential deadlock situation.  
+ */
+
+void link_delete(struct link *l_ptr)
+{
+       if (!l_ptr) {
+               err("Attempt to delete non-existent link\n");
+               return;
+       }
+
+       dbg("link_delete()\n");
+
+       k_cancel_timer(&l_ptr->timer);
+       
+       node_lock(l_ptr->owner);
+       link_reset(l_ptr);
+       node_detach_link(l_ptr->owner, l_ptr);
+       link_stop(l_ptr);
+       list_del_init(&l_ptr->link_list);
+       if (LINK_LOG_BUF_SIZE)
+               kfree(l_ptr->print_buf.buf);
+       node_unlock(l_ptr->owner);
+       k_term_timer(&l_ptr->timer);
+       kfree(l_ptr);
+}
+
+void link_start(struct link *l_ptr)
+{
+       dbg("link_start %x\n", l_ptr);
+       link_state_event(l_ptr, STARTING_EVT);
+}
+
+/**
+ * link_schedule_port - schedule port for deferred sending 
+ * @l_ptr: pointer to link
+ * @origport: reference to sending port
+ * @sz: amount of data to be sent
+ * 
+ * Schedules port for renewed sending of messages after link congestion 
+ * has abated.
+ */
+
+static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
+{
+       struct port *p_ptr;
+
+       spin_lock_bh(&port_list_lock);
+       p_ptr = port_lock(origport);
+       if (p_ptr) {
+               if (!p_ptr->wakeup)
+                       goto exit;
+               if (!list_empty(&p_ptr->wait_list))
+                       goto exit;
+               p_ptr->congested_link = l_ptr;
+               p_ptr->publ.congested = 1;
+               p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr));
+               list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
+               l_ptr->stats.link_congs++;
+exit:
+               port_unlock(p_ptr);
+       }
+       spin_unlock_bh(&port_list_lock);
+       return -ELINKCONG;
+}
+
+void link_wakeup_ports(struct link *l_ptr, int all)
+{
+       struct port *p_ptr;
+       struct port *temp_p_ptr;
+       int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
+
+       if (all)
+               win = 100000;
+       if (win <= 0)
+               return;
+       if (!spin_trylock_bh(&port_list_lock))
+               return;
+       if (link_congested(l_ptr))
+               goto exit;
+       list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports, 
+                                wait_list) {
+               if (win <= 0)
+                       break;
+               list_del_init(&p_ptr->wait_list);
+               p_ptr->congested_link = 0;
+               assert(p_ptr->wakeup);
+               spin_lock_bh(p_ptr->publ.lock);
+               p_ptr->publ.congested = 0;
+               p_ptr->wakeup(&p_ptr->publ);
+               win -= p_ptr->waiting_pkts;
+               spin_unlock_bh(p_ptr->publ.lock);
+       }
+
+exit:
+       spin_unlock_bh(&port_list_lock);
+}
+
+/** 
+ * link_release_outqueue - purge link's outbound message queue
+ * @l_ptr: pointer to link
+ */
+
+static void link_release_outqueue(struct link *l_ptr)
+{
+       struct sk_buff *buf = l_ptr->first_out;
+       struct sk_buff *next;
+
+       while (buf) {
+               next = buf->next;
+               buf_discard(buf);
+               buf = next;
+       }
+       l_ptr->first_out = NULL;
+       l_ptr->out_queue_size = 0;
+}
+
+/**
+ * link_reset_fragments - purge link's inbound message fragments queue
+ * @l_ptr: pointer to link
+ */
+
+void link_reset_fragments(struct link *l_ptr)
+{
+       struct sk_buff *buf = l_ptr->defragm_buf;
+       struct sk_buff *next;
+
+       while (buf) {
+               next = buf->next;
+               buf_discard(buf);
+               buf = next;
+       }
+       l_ptr->defragm_buf = NULL;
+}
+
+/** 
+ * link_stop - purge all inbound and outbound messages associated with link
+ * @l_ptr: pointer to link
+ */
+
+void link_stop(struct link *l_ptr)
+{
+       struct sk_buff *buf;
+       struct sk_buff *next;
+
+       buf = l_ptr->oldest_deferred_in;
+       while (buf) {
+               next = buf->next;
+               buf_discard(buf);
+               buf = next;
+       }
+
+       buf = l_ptr->first_out;
+       while (buf) {
+               next = buf->next;
+               buf_discard(buf);
+               buf = next;
+       }
+
+       link_reset_fragments(l_ptr);
+
+       buf_discard(l_ptr->proto_msg_queue);
+       l_ptr->proto_msg_queue = NULL;
+}
+
+#if 0
+
+/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
+
+static void link_recv_event(struct link_event *ev)
+{
+       ev->fcn(ev->addr, ev->name, ev->up);
+       kfree(ev);
+}
+
+static void link_send_event(void (*fcn)(u32 a, char *n, int up),
+                           struct link *l_ptr, int up)
+{
+       struct link_event *ev;
+       
+       ev = kmalloc(sizeof(*ev), GFP_ATOMIC);
+       if (!ev) {
+               warn("Link event allocation failure\n");
+               return;
+       }
+       ev->addr = l_ptr->addr;
+       ev->up = up;
+       ev->fcn = fcn;
+       memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME);
+       k_signal((Handler)link_recv_event, (unsigned long)ev);
+}
+
+#else
+
+#define link_send_event(fcn, l_ptr, up) do { } while (0)
+
+#endif
+
+void link_reset(struct link *l_ptr)
+{
+       struct sk_buff *buf;
+       u32 prev_state = l_ptr->state;
+       u32 checkpoint = l_ptr->next_in_no;
+       
+       msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
+
+        /* Link is down, accept any session: */
+       l_ptr->peer_session = 0;
+
+        /* Prepare for max packet size negotiation */
+       link_init_max_pkt(l_ptr);
+       
+       l_ptr->state = RESET_UNKNOWN;
+       dbg_link_state("Resetting Link\n");
+
+       if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
+               return;
+
+       node_link_down(l_ptr->owner, l_ptr);
+       bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
+#if 0
+       tipc_printf(CONS, "\nReset link <%s>\n", l_ptr->name);
+       dbg_link_dump();
+#endif
+       if (node_has_active_links(l_ptr->owner) &&
+           l_ptr->owner->permit_changeover) {
+               l_ptr->reset_checkpoint = checkpoint;
+               l_ptr->exp_msg_count = START_CHANGEOVER;
+       }
+
+       /* Clean up all queues: */
+
+       link_release_outqueue(l_ptr);
+       buf_discard(l_ptr->proto_msg_queue);
+       l_ptr->proto_msg_queue = NULL;
+       buf = l_ptr->oldest_deferred_in;
+       while (buf) {
+               struct sk_buff *next = buf->next;
+               buf_discard(buf);
+               buf = next;
+       }
+       if (!list_empty(&l_ptr->waiting_ports))
+               link_wakeup_ports(l_ptr, 1);
+
+       l_ptr->retransm_queue_head = 0;
+       l_ptr->retransm_queue_size = 0;
+       l_ptr->last_out = NULL;
+       l_ptr->first_out = NULL;
+       l_ptr->next_out = NULL;
+       l_ptr->unacked_window = 0;
+       l_ptr->checkpoint = 1;
+       l_ptr->next_out_no = 1;
+       l_ptr->deferred_inqueue_sz = 0;
+       l_ptr->oldest_deferred_in = NULL;
+       l_ptr->newest_deferred_in = NULL;
+       l_ptr->fsm_msg_cnt = 0;
+       l_ptr->stale_count = 0;
+       link_reset_statistics(l_ptr);
+
+       link_send_event(cfg_link_event, l_ptr, 0);
+       if (!in_own_cluster(l_ptr->addr))
+               link_send_event(disc_link_event, l_ptr, 0);
+}
+
+
+static void link_activate(struct link *l_ptr)
+{
+       l_ptr->next_in_no = 1;
+       node_link_up(l_ptr->owner, l_ptr);
+       bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
+       link_send_event(cfg_link_event, l_ptr, 1);
+       if (!in_own_cluster(l_ptr->addr))
+               link_send_event(disc_link_event, l_ptr, 1);
+}
+
+/**
+ * link_state_event - link finite state machine
+ * @l_ptr: pointer to link
+ * @event: state machine event to process
+ */
+
+static void link_state_event(struct link *l_ptr, unsigned event)
+{
+       struct link *other; 
+       u32 cont_intv = l_ptr->continuity_interval;
+
+       if (!l_ptr->started && (event != STARTING_EVT))
+               return;         /* Not yet. */
+
+       if (link_blocked(l_ptr)) {
+               if (event == TIMEOUT_EVT) {
+                       link_set_timer(l_ptr, cont_intv);
+               }
+               return;   /* Changeover going on */
+       }
+       dbg_link("STATE_EV: <%s> ", l_ptr->name);
+
+       switch (l_ptr->state) {
+       case WORKING_WORKING:
+               dbg_link("WW/");
+               switch (event) {
+               case TRAFFIC_MSG_EVT:
+                       dbg_link("TRF-");
+                       /* fall through */
+               case ACTIVATE_MSG:
+                       dbg_link("ACT\n");
+                       break;
+               case TIMEOUT_EVT:
+                       dbg_link("TIM ");
+                       if (l_ptr->next_in_no != l_ptr->checkpoint) {
+                               l_ptr->checkpoint = l_ptr->next_in_no;
+                               if (bclink_acks_missing(l_ptr->owner)) {
+                                       link_send_proto_msg(l_ptr, STATE_MSG, 
+                                                           0, 0, 0, 0, 0);
+                                       l_ptr->fsm_msg_cnt++;
+                               } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
+                                       link_send_proto_msg(l_ptr, STATE_MSG, 
+                                                           1, 0, 0, 0, 0);
+                                       l_ptr->fsm_msg_cnt++;
+                               }
+                               link_set_timer(l_ptr, cont_intv);
+                               break;
+                       }
+                       dbg_link(" -> WU\n");
+                       l_ptr->state = WORKING_UNKNOWN;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv / 4);
+                       break;
+               case RESET_MSG:
+                       dbg_link("RES -> RR\n");
+                       link_reset(l_ptr);
+                       l_ptr->state = RESET_RESET;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               default:
+                       err("Unknown link event %u in WW state\n", event);
+               }
+               break;
+       case WORKING_UNKNOWN:
+               dbg_link("WU/");
+               switch (event) {
+               case TRAFFIC_MSG_EVT:
+                       dbg_link("TRF-");
+               case ACTIVATE_MSG:
+                       dbg_link("ACT -> WW\n");
+                       l_ptr->state = WORKING_WORKING;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               case RESET_MSG:
+                       dbg_link("RES -> RR\n");
+                       link_reset(l_ptr);
+                       l_ptr->state = RESET_RESET;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               case TIMEOUT_EVT:
+                       dbg_link("TIM ");
+                       if (l_ptr->next_in_no != l_ptr->checkpoint) {
+                               dbg_link("-> WW \n");
+                               l_ptr->state = WORKING_WORKING;
+                               l_ptr->fsm_msg_cnt = 0;
+                               l_ptr->checkpoint = l_ptr->next_in_no;
+                               if (bclink_acks_missing(l_ptr->owner)) {
+                                       link_send_proto_msg(l_ptr, STATE_MSG,
+                                                           0, 0, 0, 0, 0);
+                                       l_ptr->fsm_msg_cnt++;
+                               }
+                               link_set_timer(l_ptr, cont_intv);
+                       } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
+                               dbg_link("Probing %u/%u,timer = %u ms)\n",
+                                        l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
+                                        cont_intv / 4);
+                               link_send_proto_msg(l_ptr, STATE_MSG, 
+                                                   1, 0, 0, 0, 0);
+                               l_ptr->fsm_msg_cnt++;
+                               link_set_timer(l_ptr, cont_intv / 4);
+                       } else {        /* Link has failed */
+                               dbg_link("-> RU (%u probes unanswered)\n",
+                                        l_ptr->fsm_msg_cnt);
+                               link_reset(l_ptr);
+                               l_ptr->state = RESET_UNKNOWN;
+                               l_ptr->fsm_msg_cnt = 0;
+                               link_send_proto_msg(l_ptr, RESET_MSG,
+                                                   0, 0, 0, 0, 0);
+                               l_ptr->fsm_msg_cnt++;
+                               link_set_timer(l_ptr, cont_intv);
+                       }
+                       break;
+               default:
+                       err("Unknown link event %u in WU state\n", event);
+               }
+               break;
+       case RESET_UNKNOWN:
+               dbg_link("RU/");
+               switch (event) {
+               case TRAFFIC_MSG_EVT:
+                       dbg_link("TRF-\n");
+                       break;
+               case ACTIVATE_MSG:
+                       other = l_ptr->owner->active_links[0];
+                       if (other && link_working_unknown(other)) {
+                               dbg_link("ACT\n");
+                               break;
+                       }
+                       dbg_link("ACT -> WW\n");
+                       l_ptr->state = WORKING_WORKING;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_activate(l_ptr);
+                       link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               case RESET_MSG:
+                       dbg_link("RES \n");
+                       dbg_link(" -> RR\n");
+                       l_ptr->state = RESET_RESET;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               case STARTING_EVT:
+                       dbg_link("START-");
+                       l_ptr->started = 1;
+                       /* fall through */
+               case TIMEOUT_EVT:
+                       dbg_link("TIM \n");
+                       link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               default:
+                       err("Unknown link event %u in RU state\n", event);
+               }
+               break;
+       case RESET_RESET:
+               dbg_link("RR/ ");
+               switch (event) {
+               case TRAFFIC_MSG_EVT:
+                       dbg_link("TRF-");
+                       /* fall through */
+               case ACTIVATE_MSG:
+                       other = l_ptr->owner->active_links[0];
+                       if (other && link_working_unknown(other)) {
+                               dbg_link("ACT\n");
+                               break;
+                       }
+                       dbg_link("ACT -> WW\n");
+                       l_ptr->state = WORKING_WORKING;
+                       l_ptr->fsm_msg_cnt = 0;
+                       link_activate(l_ptr);
+                       link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       break;
+               case RESET_MSG:
+                       dbg_link("RES\n");
+                       break;
+               case TIMEOUT_EVT:
+                       dbg_link("TIM\n");
+                       link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+                       l_ptr->fsm_msg_cnt++;
+                       link_set_timer(l_ptr, cont_intv);
+                       dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
+                       break;
+               default:
+                       err("Unknown link event %u in RR state\n", event);
+               }
+               break;
+       default:
+               err("Unknown link state %u/%u\n", l_ptr->state, event);
+       }
+}
+
+/*
+ * link_bundle_buf(): Append contents of a buffer to
+ * the tail of an existing one. 
+ */
+
+static int link_bundle_buf(struct link *l_ptr,
+                          struct sk_buff *bundler, 
+                          struct sk_buff *buf)
+{
+       struct tipc_msg *bundler_msg = buf_msg(bundler);
+       struct tipc_msg *msg = buf_msg(buf);
+       u32 size = msg_size(msg);
+       u32 to_pos = align(msg_size(bundler_msg));
+       u32 rest = link_max_pkt(l_ptr) - to_pos;
+
+       if (msg_user(bundler_msg) != MSG_BUNDLER)
+               return 0;
+       if (msg_type(bundler_msg) != OPEN_MSG)
+               return 0;
+       if (rest < align(size))
+               return 0;
+
+       skb_put(bundler, (to_pos - msg_size(bundler_msg)) + size);
+       memcpy(bundler->data + to_pos, buf->data, size);
+       msg_set_size(bundler_msg, to_pos + size);
+       msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
+       dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n",
+           msg_msgcnt(bundler_msg), size, to_pos, msg_seqno(bundler_msg));
+       msg_dbg(msg, "PACKD:");
+       buf_discard(buf);
+       l_ptr->stats.sent_bundled++;
+       return 1;
+}
+
+static inline void link_add_to_outqueue(struct link *l_ptr, 
+                                       struct sk_buff *buf, 
+                                       struct tipc_msg *msg)
+{
+       u32 ack = mod(l_ptr->next_in_no - 1);
+       u32 seqno = mod(l_ptr->next_out_no++);
+
+       msg_set_word(msg, 2, ((ack << 16) | seqno));
+       msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
+       buf->next = NULL;
+       if (l_ptr->first_out) {
+               l_ptr->last_out->next = buf;
+               l_ptr->last_out = buf;
+       } else
+               l_ptr->first_out = l_ptr->last_out = buf;
+       l_ptr->out_queue_size++;
+}
+
+/* 
+ * link_send_buf() is the 'full path' for messages, called from 
+ * inside TIPC when the 'fast path' in tipc_send_buf
+ * has failed, and from link_send()
+ */
+
+int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       u32 size = msg_size(msg);
+       u32 dsz = msg_data_sz(msg);
+       u32 queue_size = l_ptr->out_queue_size;
+       u32 imp = msg_tot_importance(msg);
+       u32 queue_limit = l_ptr->queue_limit[imp];
+       u32 max_packet = link_max_pkt(l_ptr);
+
+       msg_set_prevnode(msg, tipc_own_addr);   /* If routed message */
+
+       /* Match msg importance against queue limits: */
+
+       if (unlikely(queue_size >= queue_limit)) {
+               if (imp <= TIPC_CRITICAL_IMPORTANCE) {
+                       return link_schedule_port(l_ptr, msg_origport(msg),
+                                                 size);
+               }
+               msg_dbg(msg, "TIPC: Congestion, throwing away\n");
+               buf_discard(buf);
+               if (imp > CONN_MANAGER) {
+                       warn("Resetting <%s>, send queue full", l_ptr->name);
+                       link_reset(l_ptr);
+               }
+               return dsz;
+       }
+
+       /* Fragmentation needed ? */
+
+       if (size > max_packet)
+               return link_send_long_buf(l_ptr, buf);
+
+       /* Packet can be queued or sent: */
+
+       if (queue_size > l_ptr->stats.max_queue_sz)
+               l_ptr->stats.max_queue_sz = queue_size;
+
+       if (likely(!bearer_congested(l_ptr->b_ptr, l_ptr) && 
+                  !link_congested(l_ptr))) {
+               link_add_to_outqueue(l_ptr, buf, msg);
+
+               if (likely(bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
+                       l_ptr->unacked_window = 0;
+               } else {
+                       bearer_schedule(l_ptr->b_ptr, l_ptr);
+                       l_ptr->stats.bearer_congs++;
+                       l_ptr->next_out = buf;
+               }
+               return dsz;
+       }
+       /* Congestion: can message be bundled ?: */
+
+       if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
+           (msg_user(msg) != MSG_FRAGMENTER)) {
+
+               /* Try adding message to an existing bundle */
+
+               if (l_ptr->next_out && 
+                   link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
+                       bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
+                       return dsz;
+               }
+
+               /* Try creating a new bundle */
+
+               if (size <= max_packet * 2 / 3) {
+                       struct sk_buff *bundler = buf_acquire(max_packet);
+                       struct tipc_msg bundler_hdr;
+
+                       if (bundler) {
+                               msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
+                                        TIPC_OK, INT_H_SIZE, l_ptr->addr);
+                               memcpy(bundler->data, (unchar *)&bundler_hdr, 
+                                      INT_H_SIZE);
+                               skb_trim(bundler, INT_H_SIZE);
+                               link_bundle_buf(l_ptr, bundler, buf);
+                               buf = bundler;
+                               msg = buf_msg(buf);
+                               l_ptr->stats.sent_bundles++;
+                       }
+               }
+       }
+       if (!l_ptr->next_out)
+               l_ptr->next_out = buf;
+       link_add_to_outqueue(l_ptr, buf, msg);
+       bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
+       return dsz;
+}
+
+/* 
+ * link_send(): same as link_send_buf(), but the link to use has 
+ * not been selected yet, and the the owner node is not locked
+ * Called by TIPC internal users, e.g. the name distributor
+ */
+
+int link_send(struct sk_buff *buf, u32 dest, u32 selector)
+{
+       struct link *l_ptr;
+       struct node *n_ptr;
+       int res = -ELINKCONG;
+
+       read_lock_bh(&net_lock);
+       n_ptr = node_select(dest, selector);
+       if (n_ptr) {
+               node_lock(n_ptr);
+               l_ptr = n_ptr->active_links[selector & 1];
+               dbg("link_send: found link %x for dest %x\n", l_ptr, dest);
+               if (l_ptr) {
+                       res = link_send_buf(l_ptr, buf);
+               }
+               node_unlock(n_ptr);
+       } else {
+               dbg("Attempt to send msg to unknown node:\n");
+               msg_dbg(buf_msg(buf),">>>");
+               buf_discard(buf);
+       }
+       read_unlock_bh(&net_lock);
+       return res;
+}
+
+/* 
+ * link_send_buf_fast: Entry for data messages where the 
+ * destination link is known and the header is complete,
+ * inclusive total message length. Very time critical.
+ * Link is locked. Returns user data length.
+ */
+
+static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
+                                    u32 *used_max_pkt)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       int res = msg_data_sz(msg);
+
+       if (likely(!link_congested(l_ptr))) {
+               if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
+                       if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
+                               link_add_to_outqueue(l_ptr, buf, msg);
+                               if (likely(bearer_send(l_ptr->b_ptr, buf,
+                                                      &l_ptr->media_addr))) {
+                                       l_ptr->unacked_window = 0;
+                                       msg_dbg(msg,"SENT_FAST:");
+                                       return res;
+                               }
+                               dbg("failed sent fast...\n");
+                               bearer_schedule(l_ptr->b_ptr, l_ptr);
+                               l_ptr->stats.bearer_congs++;
+                               l_ptr->next_out = buf;
+                               return res;
+                       }
+               }
+               else
+                       *used_max_pkt = link_max_pkt(l_ptr);
+       }
+       return link_send_buf(l_ptr, buf);  /* All other cases */
+}
+
+/* 
+ * tipc_send_buf_fast: Entry for data messages where the 
+ * destination node is known and the header is complete,
+ * inclusive total message length.
+ * Returns user data length.
+ */
+int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
+{
+       struct link *l_ptr;
+       struct node *n_ptr;
+       int res;
+       u32 selector = msg_origport(buf_msg(buf)) & 1;
+       u32 dummy;
+
+       if (destnode == tipc_own_addr)
+               return port_recv_msg(buf);
+
+       read_lock_bh(&net_lock);
+       n_ptr = node_select(destnode, selector);
+       if (likely(n_ptr)) {
+               node_lock(n_ptr);
+               l_ptr = n_ptr->active_links[selector];
+               dbg("send_fast: buf %x selected %x, destnode = %x\n",
+                   buf, l_ptr, destnode);
+               if (likely(l_ptr)) {
+                       res = link_send_buf_fast(l_ptr, buf, &dummy);
+                       node_unlock(n_ptr);
+                       read_unlock_bh(&net_lock);
+                       return res;
+               }
+               node_unlock(n_ptr);
+       }
+       read_unlock_bh(&net_lock);
+       res = msg_data_sz(buf_msg(buf));
+       tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
+       return res;
+}
+
+
+/* 
+ * link_send_sections_fast: Entry for messages where the 
+ * destination processor is known and the header is complete,
+ * except for total message length. 
+ * Returns user data length or errno.
+ */
+int link_send_sections_fast(struct port *sender, 
+                           struct iovec const *msg_sect,
+                           const u32 num_sect, 
+                           u32 destaddr)
+{
+       struct tipc_msg *hdr = &sender->publ.phdr;
+       struct link *l_ptr;
+       struct sk_buff *buf;
+       struct node *node;
+       int res;
+       u32 selector = msg_origport(hdr) & 1;
+
+       assert(destaddr != tipc_own_addr);
+
+again:
+       /*
+        * Try building message using port's max_pkt hint.
+        * (Must not hold any locks while building message.)
+        */
+
+       res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
+                       !sender->user_port, &buf);
+
+       read_lock_bh(&net_lock);
+       node = node_select(destaddr, selector);
+       if (likely(node)) {
+               node_lock(node);
+               l_ptr = node->active_links[selector];
+               if (likely(l_ptr)) {
+                       if (likely(buf)) {
+                               res = link_send_buf_fast(l_ptr, buf,
+                                                        &sender->max_pkt);
+                               if (unlikely(res < 0))
+                                       buf_discard(buf);
+exit:
+                               node_unlock(node);
+                               read_unlock_bh(&net_lock);
+                               return res;
+                       }
+
+                       /* Exit if build request was invalid */
+
+                       if (unlikely(res < 0))
+                               goto exit;
+
+                       /* Exit if link (or bearer) is congested */
+
+                       if (link_congested(l_ptr) || 
+                           !list_empty(&l_ptr->b_ptr->cong_links)) {
+                               res = link_schedule_port(l_ptr,
+                                                        sender->publ.ref, res);
+                               goto exit;
+                       }
+
+                       /* 
+                        * Message size exceeds max_pkt hint; update hint,
+                        * then re-try fast path or fragment the message
+                        */
+
+                       sender->max_pkt = link_max_pkt(l_ptr);
+                       node_unlock(node);
+                       read_unlock_bh(&net_lock);
+
+
+                       if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
+                               goto again;
+
+                       return link_send_sections_long(sender, msg_sect,
+                                                      num_sect, destaddr);
+               }
+               node_unlock(node);
+       }
+       read_unlock_bh(&net_lock);
+
+       /* Couldn't find a link to the destination node */
+
+       if (buf)
+               return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
+       if (res >= 0)
+               return port_reject_sections(sender, hdr, msg_sect, num_sect,
+                                           TIPC_ERR_NO_NODE);
+       return res;
+}
+
+/* 
+ * link_send_sections_long(): Entry for long messages where the 
+ * destination node is known and the header is complete,
+ * inclusive total message length. 
+ * Link and bearer congestion status have been checked to be ok,
+ * and are ignored if they change.
+ *
+ * Note that fragments do not use the full link MTU so that they won't have
+ * to undergo refragmentation if link changeover causes them to be sent
+ * over another link with an additional tunnel header added as prefix.
+ * (Refragmentation will still occur if the other link has a smaller MTU.)
+ *
+ * Returns user data length or errno.
+ */
+static int link_send_sections_long(struct port *sender,
+                                  struct iovec const *msg_sect,
+                                  u32 num_sect,
+                                  u32 destaddr)
+{
+       struct link *l_ptr;
+       struct node *node;
+       struct tipc_msg *hdr = &sender->publ.phdr;
+       u32 dsz = msg_data_sz(hdr);
+       u32 max_pkt,fragm_sz,rest;
+       struct tipc_msg fragm_hdr;
+       struct sk_buff *buf,*buf_chain,*prev;
+       u32 fragm_crs,fragm_rest,hsz,sect_rest;
+       const unchar *sect_crs;
+       int curr_sect;
+       u32 fragm_no;
+
+again:
+       fragm_no = 1;
+       max_pkt = sender->max_pkt - INT_H_SIZE;  
+               /* leave room for tunnel header in case of link changeover */
+       fragm_sz = max_pkt - INT_H_SIZE; 
+               /* leave room for fragmentation header in each fragment */
+       rest = dsz;
+       fragm_crs = 0;
+       fragm_rest = 0;
+       sect_rest = 0;
+       sect_crs = 0;
+       curr_sect = -1;
+
+       /* Prepare reusable fragment header: */
+
+       msg_dbg(hdr, ">FRAGMENTING>");
+       msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
+                TIPC_OK, INT_H_SIZE, msg_destnode(hdr));
+       msg_set_link_selector(&fragm_hdr, sender->publ.ref);
+       msg_set_size(&fragm_hdr, max_pkt);
+       msg_set_fragm_no(&fragm_hdr, 1);
+
+       /* Prepare header of first fragment: */
+
+       buf_chain = buf = buf_acquire(max_pkt);
+       if (!buf)
+               return -ENOMEM;
+       buf->next = NULL;
+       memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+       hsz = msg_hdr_sz(hdr);
+       memcpy(buf->data + INT_H_SIZE, (unchar *)hdr, hsz);
+       msg_dbg(buf_msg(buf), ">BUILD>");
+
+       /* Chop up message: */
+
+       fragm_crs = INT_H_SIZE + hsz;
+       fragm_rest = fragm_sz - hsz;
+
+       do {            /* For all sections */
+               u32 sz;
+
+               if (!sect_rest) {
+                       sect_rest = msg_sect[++curr_sect].iov_len;
+                       sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
+               }
+
+               if (sect_rest < fragm_rest)
+                       sz = sect_rest;
+               else
+                       sz = fragm_rest;
+
+               if (likely(!sender->user_port)) {
+                       if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
+error:
+                               for (; buf_chain; buf_chain = buf) {
+                                       buf = buf_chain->next;
+                                       buf_discard(buf_chain);
+                               }
+                               return -EFAULT;
+                       }
+               } else
+                       memcpy(buf->data + fragm_crs, sect_crs, sz);
+
+               sect_crs += sz;
+               sect_rest -= sz;
+               fragm_crs += sz;
+               fragm_rest -= sz;
+               rest -= sz;
+
+               if (!fragm_rest && rest) {
+
+                       /* Initiate new fragment: */
+                       if (rest <= fragm_sz) {
+                               fragm_sz = rest;
+                               msg_set_type(&fragm_hdr,LAST_FRAGMENT);
+                       } else {
+                               msg_set_type(&fragm_hdr, FRAGMENT);
+                       }
+                       msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
+                       msg_set_fragm_no(&fragm_hdr, ++fragm_no);
+                       prev = buf;
+                       buf = buf_acquire(fragm_sz + INT_H_SIZE);
+                       if (!buf)
+                               goto error;
+
+                       buf->next = NULL;                                
+                       prev->next = buf;
+                       memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+                       fragm_crs = INT_H_SIZE;
+                       fragm_rest = fragm_sz;
+                       msg_dbg(buf_msg(buf),"  >BUILD>");
+               }
+       }
+       while (rest > 0);
+
+       /* 
+        * Now we have a buffer chain. Select a link and check
+        * that packet size is still OK
+        */
+       node = node_select(destaddr, sender->publ.ref & 1);
+       if (likely(node)) {
+               node_lock(node);
+               l_ptr = node->active_links[sender->publ.ref & 1];
+               if (!l_ptr) {
+                       node_unlock(node);
+                       goto reject;
+               }
+               if (link_max_pkt(l_ptr) < max_pkt) {
+                       sender->max_pkt = link_max_pkt(l_ptr);
+                       node_unlock(node);
+                       for (; buf_chain; buf_chain = buf) {
+                               buf = buf_chain->next;
+                               buf_discard(buf_chain);
+                       }
+                       goto again;
+               }
+       } else {
+reject:
+               for (; buf_chain; buf_chain = buf) {
+                       buf = buf_chain->next;
+                       buf_discard(buf_chain);
+               }
+               return port_reject_sections(sender, hdr, msg_sect, num_sect,
+                                           TIPC_ERR_NO_NODE);
+       }
+
+       /* Append whole chain to send queue: */
+
+       buf = buf_chain;
+       l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1);
+       if (!l_ptr->next_out)
+               l_ptr->next_out = buf_chain;
+       l_ptr->stats.sent_fragmented++;
+       while (buf) {
+               struct sk_buff *next = buf->next;
+               struct tipc_msg *msg = buf_msg(buf);
+
+               l_ptr->stats.sent_fragments++;
+               msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
+               link_add_to_outqueue(l_ptr, buf, msg);
+               msg_dbg(msg, ">ADD>");
+               buf = next;
+       }
+
+       /* Send it, if possible: */
+
+       link_push_queue(l_ptr);
+       node_unlock(node);
+       return dsz;
+}
+
+/* 
+ * link_push_packet: Push one unsent packet to the media
+ */
+u32 link_push_packet(struct link *l_ptr)
+{
+       struct sk_buff *buf = l_ptr->first_out;
+       u32 r_q_size = l_ptr->retransm_queue_size;
+       u32 r_q_head = l_ptr->retransm_queue_head;
+
+       /* Step to position where retransmission failed, if any,    */
+       /* consider that buffers may have been released in meantime */
+
+       if (r_q_size && buf) {
+               u32 last = lesser(mod(r_q_head + r_q_size), 
+                                 link_last_sent(l_ptr));
+               u32 first = msg_seqno(buf_msg(buf));
+
+               while (buf && less(first, r_q_head)) {
+                       first = mod(first + 1);
+                       buf = buf->next;
+               }
+               l_ptr->retransm_queue_head = r_q_head = first;
+               l_ptr->retransm_queue_size = r_q_size = mod(last - first);
+       }
+
+       /* Continue retransmission now, if there is anything: */
+
+       if (r_q_size && buf && !skb_cloned(buf)) {
+               msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
+               msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 
+               if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+                       msg_dbg(buf_msg(buf), ">DEF-RETR>");
+                       l_ptr->retransm_queue_head = mod(++r_q_head);
+                       l_ptr->retransm_queue_size = --r_q_size;
+                       l_ptr->stats.retransmitted++;
+                       return TIPC_OK;
+               } else {
+                       l_ptr->stats.bearer_congs++;
+                       msg_dbg(buf_msg(buf), "|>DEF-RETR>");
+                       return PUSH_FAILED;
+               }
+       }
+
+       /* Send deferred protocol message, if any: */
+
+       buf = l_ptr->proto_msg_queue;
+       if (buf) {
+               msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
+               msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in); 
+               if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+                       msg_dbg(buf_msg(buf), ">DEF-PROT>");
+                       l_ptr->unacked_window = 0;
+                       buf_discard(buf);
+                       l_ptr->proto_msg_queue = 0;
+                       return TIPC_OK;
+               } else {
+                       msg_dbg(buf_msg(buf), "|>DEF-PROT>");
+                       l_ptr->stats.bearer_congs++;
+                       return PUSH_FAILED;
+               }
+       }
+
+       /* Send one deferred data message, if send window not full: */
+
+       buf = l_ptr->next_out;
+       if (buf) {
+               struct tipc_msg *msg = buf_msg(buf);
+               u32 next = msg_seqno(msg);
+               u32 first = msg_seqno(buf_msg(l_ptr->first_out));
+
+               if (mod(next - first) < l_ptr->queue_limit[0]) {
+                       msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+                       msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
+                       if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+                               if (msg_user(msg) == MSG_BUNDLER)
+                                       msg_set_type(msg, CLOSED_MSG);
+                               msg_dbg(msg, ">PUSH-DATA>");
+                               l_ptr->next_out = buf->next;
+                               return TIPC_OK;
+                       } else {
+                               msg_dbg(msg, "|PUSH-DATA|");
+                               l_ptr->stats.bearer_congs++;
+                               return PUSH_FAILED;
+                       }
+               }
+       }
+       return PUSH_FINISHED;
+}
+
+/*
+ * push_queue(): push out the unsent messages of a link where
+ *               congestion has abated. Node is locked
+ */
+void link_push_queue(struct link *l_ptr)
+{
+       u32 res;
+
+       if (bearer_congested(l_ptr->b_ptr, l_ptr))
+               return;
+
+       do {
+               res = link_push_packet(l_ptr);
+       }
+       while (res == TIPC_OK);
+       if (res == PUSH_FAILED)
+               bearer_schedule(l_ptr->b_ptr, l_ptr);
+}
+
+void link_retransmit(struct link *l_ptr, struct sk_buff *buf, 
+                    u32 retransmits)
+{
+       struct tipc_msg *msg;
+
+       dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
+
+       if (bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
+               msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
+               dbg_print_link(l_ptr, "   ");
+               l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
+               l_ptr->retransm_queue_size = retransmits;
+               return;
+       }
+       while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
+               msg = buf_msg(buf);
+               msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+               msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
+               if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+                        /* Catch if retransmissions fail repeatedly: */
+                        if (l_ptr->last_retransmitted == msg_seqno(msg)) {
+                                if (++l_ptr->stale_count > 100) {
+                                        msg_print(CONS, buf_msg(buf), ">RETR>");
+                                        info("...Retransmitted %u times\n",
+                                            l_ptr->stale_count);
+                                        link_print(l_ptr, CONS, "Resetting Link\n");;
+                                        link_reset(l_ptr);
+                                        break;
+                                }
+                        } else {
+                                l_ptr->stale_count = 0;
+                        }
+                        l_ptr->last_retransmitted = msg_seqno(msg);
+
+                       msg_dbg(buf_msg(buf), ">RETR>");
+                       buf = buf->next;
+                       retransmits--;
+                       l_ptr->stats.retransmitted++;
+               } else {
+                       bearer_schedule(l_ptr->b_ptr, l_ptr);
+                       l_ptr->stats.bearer_congs++;
+                       l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
+                       l_ptr->retransm_queue_size = retransmits;
+                       return;
+               }
+       }
+       l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
+}
+
+/* 
+ * link_recv_non_seq: Receive packets which are outside
+ *                    the link sequence flow
+ */
+
+static void link_recv_non_seq(struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+
+       if (msg_user(msg) ==  LINK_CONFIG)
+               disc_recv_msg(buf);
+       else
+               bclink_recv_pkt(buf);
+}
+
+/** 
+ * link_insert_deferred_queue - insert deferred messages back into receive chain
+ */
+
+static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr, 
+                                                 struct sk_buff *buf)
+{
+       u32 seq_no;
+
+       if (l_ptr->oldest_deferred_in == NULL)
+               return buf;
+
+       seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+       if (seq_no == mod(l_ptr->next_in_no)) {
+               l_ptr->newest_deferred_in->next = buf;
+               buf = l_ptr->oldest_deferred_in;
+               l_ptr->oldest_deferred_in = NULL;
+               l_ptr->deferred_inqueue_sz = 0;
+       }
+       return buf;
+}
+
+void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
+{
+       read_lock_bh(&net_lock);
+       while (head) {
+               struct bearer *b_ptr;
+               struct node *n_ptr;
+               struct link *l_ptr;
+               struct sk_buff *crs;
+               struct sk_buff *buf = head;
+               struct tipc_msg *msg = buf_msg(buf);
+               u32 seq_no = msg_seqno(msg);
+               u32 ackd = msg_ack(msg);
+               u32 released = 0;
+               int type;
+
+               b_ptr = (struct bearer *)tb_ptr;
+               TIPC_SKB_CB(buf)->handle = b_ptr;
+
+               head = head->next;
+               if (unlikely(msg_version(msg) != TIPC_VERSION))
+                       goto cont;
+#if 0
+               if (msg_user(msg) != LINK_PROTOCOL)
+#endif
+                       msg_dbg(msg,"<REC<");
+
+               if (unlikely(msg_non_seq(msg))) {
+                       link_recv_non_seq(buf);
+                       continue;
+               }
+               n_ptr = node_find(msg_prevnode(msg));
+               if (unlikely(!n_ptr))
+                       goto cont;
+
+               node_lock(n_ptr);
+               l_ptr = n_ptr->links[b_ptr->identity];
+               if (unlikely(!l_ptr)) {
+                       node_unlock(n_ptr);
+                       goto cont;
+               }
+               /* 
+                * Release acked messages 
+                */
+               if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
+                       if (node_is_up(n_ptr) && n_ptr->bclink.supported)
+                               bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
+               }
+
+               crs = l_ptr->first_out;
+               while ((crs != l_ptr->next_out) && 
+                      less_eq(msg_seqno(buf_msg(crs)), ackd)) {
+                       struct sk_buff *next = crs->next;
+
+                       buf_discard(crs);
+                       crs = next;
+                       released++;
+               }
+               if (released) {
+                       l_ptr->first_out = crs;
+                       l_ptr->out_queue_size -= released;
+               }
+               if (unlikely(l_ptr->next_out))
+                       link_push_queue(l_ptr);
+               if (unlikely(!list_empty(&l_ptr->waiting_ports)))
+                       link_wakeup_ports(l_ptr, 0);
+               if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
+                       l_ptr->stats.sent_acks++;
+                       link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+               }
+
+protocol_check:
+               if (likely(link_working_working(l_ptr))) {
+                       if (likely(seq_no == mod(l_ptr->next_in_no))) {
+                               l_ptr->next_in_no++;
+                               if (unlikely(l_ptr->oldest_deferred_in))
+                                       head = link_insert_deferred_queue(l_ptr,
+                                                                         head);
+                               if (likely(msg_is_dest(msg, tipc_own_addr))) {
+deliver:
+                                       if (likely(msg_isdata(msg))) {
+                                               node_unlock(n_ptr);
+                                               port_recv_msg(buf);
+                                               continue;
+                                       }
+                                       switch (msg_user(msg)) {
+                                       case MSG_BUNDLER:
+                                               l_ptr->stats.recv_bundles++;
+                                               l_ptr->stats.recv_bundled += 
+                                                       msg_msgcnt(msg);
+                                               node_unlock(n_ptr);
+                                               link_recv_bundle(buf);
+                                               continue;
+                                       case ROUTE_DISTRIBUTOR:
+                                               node_unlock(n_ptr);
+                                               cluster_recv_routing_table(buf);
+                                               continue;
+                                       case NAME_DISTRIBUTOR:
+                                               node_unlock(n_ptr);
+                                               named_recv(buf);
+                                               continue;
+                                       case CONN_MANAGER:
+                                               node_unlock(n_ptr);
+                                               port_recv_proto_msg(buf);
+                                               continue;
+                                       case MSG_FRAGMENTER:
+                                               l_ptr->stats.recv_fragments++;
+                                               if (link_recv_fragment(
+                                                       &l_ptr->defragm_buf, 
+                                                       &buf, &msg)) {
+                                                       l_ptr->stats.recv_fragmented++;
+                                                       goto deliver;
+                                               }
+                                               break;
+                                       case CHANGEOVER_PROTOCOL:
+                                               type = msg_type(msg);
+                                               if (link_recv_changeover_msg(
+                                                       &l_ptr, &buf)) {
+                                                       msg = buf_msg(buf);
+                                                       seq_no = msg_seqno(msg);
+                                                       TIPC_SKB_CB(buf)->handle 
+                                                               = b_ptr;
+                                                       if (type == ORIGINAL_MSG)
+                                                               goto deliver;
+                                                       goto protocol_check;
+                                               }
+                                               break;
+                                       }
+                               }
+                               node_unlock(n_ptr);
+                               net_route_msg(buf);
+                               continue;
+                       }
+                       link_handle_out_of_seq_msg(l_ptr, buf);
+                       head = link_insert_deferred_queue(l_ptr, head);
+                       node_unlock(n_ptr);
+                       continue;
+               }
+
+               if (msg_user(msg) == LINK_PROTOCOL) {
+                       link_recv_proto_msg(l_ptr, buf);
+                       head = link_insert_deferred_queue(l_ptr, head);
+                       node_unlock(n_ptr);
+                       continue;
+               }
+               msg_dbg(msg,"NSEQ<REC<");
+               link_state_event(l_ptr, TRAFFIC_MSG_EVT);
+
+               if (link_working_working(l_ptr)) {
+                       /* Re-insert in front of queue */
+                       msg_dbg(msg,"RECV-REINS:");
+                       buf->next = head;
+                       head = buf;
+                       node_unlock(n_ptr);
+                       continue;
+               }
+               node_unlock(n_ptr);
+cont:
+               buf_discard(buf);
+       }
+       read_unlock_bh(&net_lock);
+}
+
+/* 
+ * link_defer_buf(): Sort a received out-of-sequence packet 
+ *                   into the deferred reception queue.
+ * Returns the increase of the queue length,i.e. 0 or 1
+ */
+
+u32 link_defer_pkt(struct sk_buff **head,
+                  struct sk_buff **tail,
+                  struct sk_buff *buf)
+{
+       struct sk_buff *prev = 0;
+       struct sk_buff *crs = *head;
+       u32 seq_no = msg_seqno(buf_msg(buf));
+
+       buf->next = NULL;
+
+       /* Empty queue ? */
+       if (*head == NULL) {
+               *head = *tail = buf;
+               return 1;
+       }
+
+       /* Last ? */
+       if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
+               (*tail)->next = buf;
+               *tail = buf;
+               return 1;
+       }
+
+       /* Scan through queue and sort it in */
+       do {
+               struct tipc_msg *msg = buf_msg(crs);
+
+               if (less(seq_no, msg_seqno(msg))) {
+                       buf->next = crs;
+                       if (prev)
+                               prev->next = buf;
+                       else
+                               *head = buf;   
+                       return 1;
+               }
+               if (seq_no == msg_seqno(msg)) {
+                       break;
+               }
+               prev = crs;
+               crs = crs->next;
+       }
+       while (crs);
+
+       /* Message is a duplicate of an existing message */
+
+       buf_discard(buf);
+       return 0;
+}
+
+/** 
+ * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
+ */
+
+static void link_handle_out_of_seq_msg(struct link *l_ptr, 
+                                      struct sk_buff *buf)
+{
+       u32 seq_no = msg_seqno(buf_msg(buf));
+
+       if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
+               link_recv_proto_msg(l_ptr, buf);
+               return;
+       }
+
+       dbg("rx OOS msg: seq_no %u, expecting %u (%u)\n", 
+           seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no);
+
+       /* Record OOS packet arrival (force mismatch on next timeout) */
+
+       l_ptr->checkpoint--;
+
+       /* 
+        * Discard packet if a duplicate; otherwise add it to deferred queue
+        * and notify peer of gap as per protocol specification
+        */
+
+       if (less(seq_no, mod(l_ptr->next_in_no))) {
+               l_ptr->stats.duplicates++;
+               buf_discard(buf);
+               return;
+       }
+
+       if (link_defer_pkt(&l_ptr->oldest_deferred_in,
+                          &l_ptr->newest_deferred_in, buf)) {
+               l_ptr->deferred_inqueue_sz++;
+               l_ptr->stats.deferred_recv++;
+               if ((l_ptr->deferred_inqueue_sz % 16) == 1)
+                       link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+       } else
+               l_ptr->stats.duplicates++;
+}
+
+/*
+ * Send protocol message to the other endpoint.
+ */
+void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
+                        u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
+{
+       struct sk_buff *buf = 0;
+       struct tipc_msg *msg = l_ptr->pmsg;
+        u32 msg_size = sizeof(l_ptr->proto_msg);
+
+       if (link_blocked(l_ptr))
+               return;
+       msg_set_type(msg, msg_typ);
+       msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
+       msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 
+       msg_set_last_bcast(msg, bclink_get_last_sent());
+
+       if (msg_typ == STATE_MSG) {
+               u32 next_sent = mod(l_ptr->next_out_no);
+
+               if (!link_is_up(l_ptr))
+                       return;
+               if (l_ptr->next_out)
+                       next_sent = msg_seqno(buf_msg(l_ptr->next_out));
+               msg_set_next_sent(msg, next_sent);
+               if (l_ptr->oldest_deferred_in) {
+                       u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+                       gap = mod(rec - mod(l_ptr->next_in_no));
+               }
+               msg_set_seq_gap(msg, gap);
+               if (gap)
+                       l_ptr->stats.sent_nacks++;
+               msg_set_link_tolerance(msg, tolerance);
+               msg_set_linkprio(msg, priority);
+               msg_set_max_pkt(msg, ack_mtu);
+               msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
+               msg_set_probe(msg, probe_msg != 0);
+               if (probe_msg) { 
+                       u32 mtu = l_ptr->max_pkt;
+
+                        if ((mtu < l_ptr->max_pkt_target) &&
+                           link_working_working(l_ptr) &&
+                           l_ptr->fsm_msg_cnt) {
+                               msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
+                                if (l_ptr->max_pkt_probes == 10) {
+                                        l_ptr->max_pkt_target = (msg_size - 4);
+                                        l_ptr->max_pkt_probes = 0;
+                                       msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
+                                }
+                               l_ptr->max_pkt_probes++;
+                        }
+
+                       l_ptr->stats.sent_probes++;
+                }
+               l_ptr->stats.sent_states++;
+       } else {                /* RESET_MSG or ACTIVATE_MSG */
+               msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
+               msg_set_seq_gap(msg, 0);
+               msg_set_next_sent(msg, 1);
+               msg_set_link_tolerance(msg, l_ptr->tolerance);
+               msg_set_linkprio(msg, l_ptr->priority);
+               msg_set_max_pkt(msg, l_ptr->max_pkt_target);
+       }
+
+       if (node_has_redundant_links(l_ptr->owner)) {
+               msg_set_redundant_link(msg);
+       } else {
+               msg_clear_redundant_link(msg);
+       }
+       msg_set_linkprio(msg, l_ptr->priority);
+
+       /* Ensure sequence number will not fit : */
+
+       msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
+
+       /* Congestion? */
+
+       if (bearer_congested(l_ptr->b_ptr, l_ptr)) {
+               if (!l_ptr->proto_msg_queue) {
+                       l_ptr->proto_msg_queue =
+                               buf_acquire(sizeof(l_ptr->proto_msg));
+               }
+               buf = l_ptr->proto_msg_queue;
+               if (!buf)
+                       return;
+               memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
+               return;
+       }
+       msg_set_timestamp(msg, jiffies_to_msecs(jiffies));
+
+       /* Message can be sent */
+
+       msg_dbg(msg, ">>");
+
+       buf = buf_acquire(msg_size);
+       if (!buf)
+               return;
+
+       memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
+        msg_set_size(buf_msg(buf), msg_size);
+
+       if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
+               l_ptr->unacked_window = 0;
+               buf_discard(buf);
+               return;
+       }
+
+       /* New congestion */
+       bearer_schedule(l_ptr->b_ptr, l_ptr);
+       l_ptr->proto_msg_queue = buf;
+       l_ptr->stats.bearer_congs++;
+}
+
+/*
+ * Receive protocol message :
+ * Note that network plane id propagates through the network, and may 
+ * change at any time. The node with lowest address rules    
+ */
+
+static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
+{
+       u32 rec_gap = 0;
+       u32 max_pkt_info;
+        u32 max_pkt_ack;
+       u32 msg_tol;
+       struct tipc_msg *msg = buf_msg(buf);
+
+       dbg("AT(%u):", jiffies_to_msecs(jiffies));
+       msg_dbg(msg, "<<");
+       if (link_blocked(l_ptr))
+               goto exit;
+
+       /* record unnumbered packet arrival (force mismatch on next timeout) */
+
+       l_ptr->checkpoint--;
+
+       if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
+               if (tipc_own_addr > msg_prevnode(msg))
+                       l_ptr->b_ptr->net_plane = msg_net_plane(msg);
+
+       l_ptr->owner->permit_changeover = msg_redundant_link(msg);
+
+       switch (msg_type(msg)) {
+       
+       case RESET_MSG:
+               if (!link_working_unknown(l_ptr) && l_ptr->peer_session) {
+                       if (msg_session(msg) == l_ptr->peer_session) {
+                               dbg("Duplicate RESET: %u<->%u\n",
+                                   msg_session(msg), l_ptr->peer_session);                                     
+                               break; /* duplicate: ignore */
+                       }
+               }
+               /* fall thru' */
+       case ACTIVATE_MSG:
+               /* Update link settings according other endpoint's values */
+
+               strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
+
+               if ((msg_tol = msg_link_tolerance(msg)) &&
+                   (msg_tol > l_ptr->tolerance))
+                       link_set_supervision_props(l_ptr, msg_tol);
+
+               if (msg_linkprio(msg) > l_ptr->priority)
+                       l_ptr->priority = msg_linkprio(msg);
+
+               max_pkt_info = msg_max_pkt(msg);
+                if (max_pkt_info) {
+                       if (max_pkt_info < l_ptr->max_pkt_target)
+                               l_ptr->max_pkt_target = max_pkt_info;
+                       if (l_ptr->max_pkt > l_ptr->max_pkt_target)
+                               l_ptr->max_pkt = l_ptr->max_pkt_target;
+               } else {
+                        l_ptr->max_pkt = l_ptr->max_pkt_target;
+               }
+               l_ptr->owner->bclink.supported = (max_pkt_info != 0);
+
+               link_state_event(l_ptr, msg_type(msg));
+
+               l_ptr->peer_session = msg_session(msg);
+               l_ptr->peer_bearer_id = msg_bearer_id(msg);
+
+               /* Synchronize broadcast sequence numbers */
+               if (!node_has_redundant_links(l_ptr->owner)) {
+                       l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
+               }
+               break;
+       case STATE_MSG:
+
+               if ((msg_tol = msg_link_tolerance(msg)))
+                       link_set_supervision_props(l_ptr, msg_tol);
+               
+               if (msg_linkprio(msg) && 
+                   (msg_linkprio(msg) != l_ptr->priority)) {
+                       warn("Changing prio <%s>: %u->%u\n",
+                            l_ptr->name, l_ptr->priority, msg_linkprio(msg));
+                       l_ptr->priority = msg_linkprio(msg);
+                       link_reset(l_ptr); /* Enforce change to take effect */
+                       break;
+               }
+               link_state_event(l_ptr, TRAFFIC_MSG_EVT);
+               l_ptr->stats.recv_states++;
+               if (link_reset_unknown(l_ptr))
+                       break;
+
+               if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
+                       rec_gap = mod(msg_next_sent(msg) - 
+                                     mod(l_ptr->next_in_no));
+               }
+
+               max_pkt_ack = msg_max_pkt(msg);
+                if (max_pkt_ack > l_ptr->max_pkt) {
+                        dbg("Link <%s> updated MTU %u -> %u\n",
+                            l_ptr->name, l_ptr->max_pkt, max_pkt_ack);
+                        l_ptr->max_pkt = max_pkt_ack;
+                        l_ptr->max_pkt_probes = 0;
+                }
+
+               max_pkt_ack = 0;
+                if (msg_probe(msg)) {
+                       l_ptr->stats.recv_probes++;
+                        if (msg_size(msg) > sizeof(l_ptr->proto_msg)) {
+                                max_pkt_ack = msg_size(msg);
+                        }
+                }
+
+               /* Protocol message before retransmits, reduce loss risk */
+
+               bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
+
+               if (rec_gap || (msg_probe(msg))) {
+                       link_send_proto_msg(l_ptr, STATE_MSG,
+                                           0, rec_gap, 0, 0, max_pkt_ack);
+               }
+               if (msg_seq_gap(msg)) {
+                       msg_dbg(msg, "With Gap:");
+                       l_ptr->stats.recv_nacks++;
+                       link_retransmit(l_ptr, l_ptr->first_out,
+                                       msg_seq_gap(msg));
+               }
+               break;
+       default:
+               msg_dbg(buf_msg(buf), "<DISCARDING UNKNOWN<");
+       }
+exit:
+       buf_discard(buf);
+}
+
+
+/*
+ * link_tunnel(): Send one message via a link belonging to 
+ * another bearer. Owner node is locked.
+ */
+void link_tunnel(struct link *l_ptr, 
+           struct tipc_msg *tunnel_hdr, 
+           struct tipc_msg  *msg,
+           u32 selector)
+{
+       struct link *tunnel;
+       struct sk_buff *buf;
+       u32 length = msg_size(msg);
+
+       tunnel = l_ptr->owner->active_links[selector & 1];
+       if (!link_is_up(tunnel))
+               return;
+       msg_set_size(tunnel_hdr, length + INT_H_SIZE);
+       buf = buf_acquire(length + INT_H_SIZE);
+       if (!buf)
+               return;
+       memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
+       memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
+       dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
+       msg_dbg(buf_msg(buf), ">SEND>");
+       assert(tunnel);
+       link_send_buf(tunnel, buf);
+}
+
+
+
+/*
+ * changeover(): Send whole message queue via the remaining link
+ *               Owner node is locked.
+ */
+
+void link_changeover(struct link *l_ptr)
+{
+       u32 msgcount = l_ptr->out_queue_size;
+       struct sk_buff *crs = l_ptr->first_out;
+       struct link *tunnel = l_ptr->owner->active_links[0];
+       int split_bundles = node_has_redundant_links(l_ptr->owner);
+       struct tipc_msg tunnel_hdr;
+
+       if (!tunnel)
+               return;
+
+       if (!l_ptr->owner->permit_changeover)
+               return;
+
+       msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
+                ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
+       msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
+       msg_set_msgcnt(&tunnel_hdr, msgcount);
+       if (!l_ptr->first_out) {
+               struct sk_buff *buf;
+
+               assert(!msgcount);
+               buf = buf_acquire(INT_H_SIZE);
+               if (buf) {
+                       memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
+                       msg_set_size(&tunnel_hdr, INT_H_SIZE);
+                       dbg("%c->%c:", l_ptr->b_ptr->net_plane,
+                           tunnel->b_ptr->net_plane);
+                       msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
+                       link_send_buf(tunnel, buf);
+               } else {
+                       warn("Memory squeeze; link changeover failed\n");
+               }
+               return;
+       }
+       while (crs) {
+               struct tipc_msg *msg = buf_msg(crs);
+
+               if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
+                       u32 msgcount = msg_msgcnt(msg);
+                       struct tipc_msg *m = msg_get_wrapped(msg);
+                       unchar* pos = (unchar*)m;
+
+                       while (msgcount--) {
+                               msg_set_seqno(m,msg_seqno(msg));
+                               link_tunnel(l_ptr, &tunnel_hdr, m,
+                                           msg_link_selector(m));
+                               pos += align(msg_size(m));
+                               m = (struct tipc_msg *)pos;
+                       }
+               } else {
+                       link_tunnel(l_ptr, &tunnel_hdr, msg,
+                                   msg_link_selector(msg));
+               }
+               crs = crs->next;
+       }
+}
+
+void link_send_duplicate(struct link *l_ptr, struct link *tunnel)
+{
+       struct sk_buff *iter;
+       struct tipc_msg tunnel_hdr;
+
+       msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
+                DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
+       msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
+       msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
+       iter = l_ptr->first_out;
+       while (iter) {
+               struct sk_buff *outbuf;
+               struct tipc_msg *msg = buf_msg(iter);
+               u32 length = msg_size(msg);
+
+               if (msg_user(msg) == MSG_BUNDLER)
+                       msg_set_type(msg, CLOSED_MSG);
+               msg_set_ack(msg, mod(l_ptr->next_in_no - 1));   /* Update */
+               msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 
+               msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
+               outbuf = buf_acquire(length + INT_H_SIZE);
+               if (outbuf == NULL) {
+                       warn("Memory squeeze; buffer duplication failed\n");
+                       return;
+               }
+               memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
+               memcpy(outbuf->data + INT_H_SIZE, iter->data, length);
+               dbg("%c->%c:", l_ptr->b_ptr->net_plane,
+                   tunnel->b_ptr->net_plane);
+               msg_dbg(buf_msg(outbuf), ">SEND>");
+               link_send_buf(tunnel, outbuf);
+               if (!link_is_up(l_ptr))
+                       return;
+               iter = iter->next;
+       }
+}
+
+
+
+/**
+ * buf_extract - extracts embedded TIPC message from another message
+ * @skb: encapsulating message buffer
+ * @from_pos: offset to extract from
+ *
+ * Returns a new message buffer containing an embedded message.  The 
+ * encapsulating message itself is left unchanged.
+ */
+
+static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
+{
+       struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
+       u32 size = msg_size(msg);
+       struct sk_buff *eb;
+
+       eb = buf_acquire(size);
+       if (eb)
+               memcpy(eb->data, (unchar *)msg, size);
+       return eb;
+}
+
+/* 
+ *  link_recv_changeover_msg(): Receive tunneled packet sent
+ *  via other link. Node is locked. Return extracted buffer.
+ */
+
+static int link_recv_changeover_msg(struct link **l_ptr,
+                                   struct sk_buff **buf)
+{
+       struct sk_buff *tunnel_buf = *buf;
+       struct link *dest_link;
+       struct tipc_msg *msg;
+       struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
+       u32 msg_typ = msg_type(tunnel_msg);
+       u32 msg_count = msg_msgcnt(tunnel_msg);
+
+       dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
+       assert(dest_link != *l_ptr);
+       if (!dest_link) {
+               msg_dbg(tunnel_msg, "NOLINK/<REC<");
+               goto exit;
+       }
+       dbg("%c<-%c:", dest_link->b_ptr->net_plane,
+           (*l_ptr)->b_ptr->net_plane);
+       *l_ptr = dest_link;
+       msg = msg_get_wrapped(tunnel_msg);
+
+       if (msg_typ == DUPLICATE_MSG) {
+               if (less(msg_seqno(msg), mod(dest_link->next_in_no))) {
+                       msg_dbg(tunnel_msg, "DROP/<REC<");
+                       goto exit;
+               }
+               *buf = buf_extract(tunnel_buf,INT_H_SIZE);
+               if (*buf == NULL) {
+                       warn("Memory squeeze; failed to extract msg\n");
+                       goto exit;
+               }
+               msg_dbg(tunnel_msg, "TNL<REC<");
+               buf_discard(tunnel_buf);
+               return 1;
+       }
+
+       /* First original message ?: */
+
+       if (link_is_up(dest_link)) {
+               msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
+               link_reset(dest_link);
+               dest_link->exp_msg_count = msg_count;
+               if (!msg_count)
+                       goto exit;
+       } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
+               msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
+               dest_link->exp_msg_count = msg_count;
+               if (!msg_count)
+                       goto exit;
+       }
+
+       /* Receive original message */
+
+       if (dest_link->exp_msg_count == 0) {
+               msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
+               dbg_print_link(dest_link, "LINK:");
+               goto exit;
+       }
+       dest_link->exp_msg_count--;
+       if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
+               msg_dbg(tunnel_msg, "DROP/DUPL/<REC<");
+               goto exit;
+       } else {
+               *buf = buf_extract(tunnel_buf, INT_H_SIZE);
+               if (*buf != NULL) {
+                       msg_dbg(tunnel_msg, "TNL<REC<");
+                       buf_discard(tunnel_buf);
+                       return 1;
+               } else {
+                       warn("Memory squeeze; dropped incoming msg\n");
+               }
+       }
+exit:
+       *buf = 0;
+       buf_discard(tunnel_buf);
+       return 0;
+}
+
+/*
+ *  Bundler functionality:
+ */
+void link_recv_bundle(struct sk_buff *buf)
+{
+       u32 msgcount = msg_msgcnt(buf_msg(buf));
+       u32 pos = INT_H_SIZE;
+       struct sk_buff *obuf;
+
+       msg_dbg(buf_msg(buf), "<BNDL<: ");
+       while (msgcount--) {
+               obuf = buf_extract(buf, pos);
+               if (obuf == NULL) {
+                       char addr_string[16];
+
+                       warn("Buffer allocation failure;\n");
+                       warn("  incoming message(s) from %s lost\n",
+                            addr_string_fill(addr_string, 
+                                             msg_orignode(buf_msg(buf))));
+                       return;
+               };
+               pos += align(msg_size(buf_msg(obuf)));
+               msg_dbg(buf_msg(obuf), "     /");
+               net_route_msg(obuf);
+       }
+       buf_discard(buf);
+}
+
+/*
+ *  Fragmentation/defragmentation:
+ */
+
+
+/* 
+ * link_send_long_buf: Entry for buffers needing fragmentation.
+ * The buffer is complete, inclusive total message length. 
+ * Returns user data length.
+ */
+int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
+{
+       struct tipc_msg *inmsg = buf_msg(buf);
+       struct tipc_msg fragm_hdr;
+       u32 insize = msg_size(inmsg);
+       u32 dsz = msg_data_sz(inmsg);
+       unchar *crs = buf->data;
+       u32 rest = insize;
+       u32 pack_sz = link_max_pkt(l_ptr);
+       u32 fragm_sz = pack_sz - INT_H_SIZE;
+       u32 fragm_no = 1;
+       u32 destaddr = msg_destnode(inmsg);
+
+       if (msg_short(inmsg))
+               destaddr = l_ptr->addr;
+
+       if (msg_routed(inmsg))
+               msg_set_prevnode(inmsg, tipc_own_addr);
+
+       /* Prepare reusable fragment header: */
+
+       msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
+                TIPC_OK, INT_H_SIZE, destaddr);
+       msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
+       msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
+       msg_set_fragm_no(&fragm_hdr, fragm_no);
+       l_ptr->stats.sent_fragmented++;
+
+       /* Chop up message: */
+
+       while (rest > 0) {
+               struct sk_buff *fragm;
+
+               if (rest <= fragm_sz) {
+                       fragm_sz = rest;
+                       msg_set_type(&fragm_hdr, LAST_FRAGMENT);
+               }
+               fragm = buf_acquire(fragm_sz + INT_H_SIZE);
+               if (fragm == NULL) {
+                       warn("Memory squeeze; failed to fragment msg\n");
+                       dsz = -ENOMEM;
+                       goto exit;
+               }
+               msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
+               memcpy(fragm->data, (unchar *)&fragm_hdr, INT_H_SIZE);
+               memcpy(fragm->data + INT_H_SIZE, crs, fragm_sz);
+
+               /*  Send queued messages first, if any: */
+
+               l_ptr->stats.sent_fragments++;
+               link_send_buf(l_ptr, fragm);
+               if (!link_is_up(l_ptr))
+                       return dsz;
+               msg_set_fragm_no(&fragm_hdr, ++fragm_no);
+               rest -= fragm_sz;
+               crs += fragm_sz;
+               msg_set_type(&fragm_hdr, FRAGMENT);
+       }
+exit:
+       buf_discard(buf);
+       return dsz;
+}
+
+/* 
+ * A pending message being re-assembled must store certain values 
+ * to handle subsequent fragments correctly. The following functions 
+ * help storing these values in unused, available fields in the
+ * pending message. This makes dynamic memory allocation unecessary.
+ */
+
+static inline u32 get_long_msg_seqno(struct sk_buff *buf)
+{
+       return msg_seqno(buf_msg(buf));
+}
+
+static inline void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
+{
+       msg_set_seqno(buf_msg(buf), seqno);
+}
+
+static inline u32 get_fragm_size(struct sk_buff *buf)
+{
+       return msg_ack(buf_msg(buf));
+}
+
+static inline void set_fragm_size(struct sk_buff *buf, u32 sz)
+{
+       msg_set_ack(buf_msg(buf), sz);
+}
+
+static inline u32 get_expected_frags(struct sk_buff *buf)
+{
+       return msg_bcast_ack(buf_msg(buf));
+}
+
+static inline void set_expected_frags(struct sk_buff *buf, u32 exp)
+{
+       msg_set_bcast_ack(buf_msg(buf), exp);
+}
+
+static inline u32 get_timer_cnt(struct sk_buff *buf)
+{
+       return msg_reroute_cnt(buf_msg(buf));
+}
+
+static inline void incr_timer_cnt(struct sk_buff *buf)
+{
+       msg_incr_reroute_cnt(buf_msg(buf));
+}
+
+/* 
+ * link_recv_fragment(): Called with node lock on. Returns 
+ * the reassembled buffer if message is complete.
+ */
+int link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, 
+                      struct tipc_msg **m)
+{
+       struct sk_buff *prev = 0;
+       struct sk_buff *fbuf = *fb;
+       struct tipc_msg *fragm = buf_msg(fbuf);
+       struct sk_buff *pbuf = *pending;
+       u32 long_msg_seq_no = msg_long_msgno(fragm);
+
+       *fb = 0;
+       msg_dbg(fragm,"FRG<REC<");
+
+       /* Is there an incomplete message waiting for this fragment? */
+
+       while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no)
+                       || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
+               prev = pbuf;
+               pbuf = pbuf->next;
+       }
+
+       if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
+               struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
+               u32 msg_sz = msg_size(imsg);
+               u32 fragm_sz = msg_data_sz(fragm);
+               u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
+               u32 max =  TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE;
+               if (msg_type(imsg) == TIPC_MCAST_MSG)
+                       max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
+               if (msg_size(imsg) > max) {
+                       msg_dbg(fragm,"<REC<Oversized: ");
+                       buf_discard(fbuf);
+                       return 0;
+               }
+               pbuf = buf_acquire(msg_size(imsg));
+               if (pbuf != NULL) {
+                       pbuf->next = *pending;
+                       *pending = pbuf;
+                       memcpy(pbuf->data, (unchar *)imsg, msg_data_sz(fragm));
+
+                       /*  Prepare buffer for subsequent fragments. */
+
+                       set_long_msg_seqno(pbuf, long_msg_seq_no); 
+                       set_fragm_size(pbuf,fragm_sz); 
+                       set_expected_frags(pbuf,exp_fragm_cnt - 1); 
+               } else {
+                       warn("Memory squeeze; got no defragmenting buffer\n");
+               }
+               buf_discard(fbuf);
+               return 0;
+       } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
+               u32 dsz = msg_data_sz(fragm);
+               u32 fsz = get_fragm_size(pbuf);
+               u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
+               u32 exp_frags = get_expected_frags(pbuf) - 1;
+               memcpy(pbuf->data + crs, msg_data(fragm), dsz);
+               buf_discard(fbuf);
+
+               /* Is message complete? */
+
+               if (exp_frags == 0) {
+                       if (prev)
+                               prev->next = pbuf->next;
+                       else
+                               *pending = pbuf->next;
+                       msg_reset_reroute_cnt(buf_msg(pbuf));
+                       *fb = pbuf;
+                       *m = buf_msg(pbuf);
+                       return 1;
+               }
+               set_expected_frags(pbuf,exp_frags);     
+               return 0;
+       }
+       dbg(" Discarding orphan fragment %x\n",fbuf);
+       msg_dbg(fragm,"ORPHAN:");
+       dbg("Pending long buffers:\n");
+       dbg_print_buf_chain(*pending);
+       buf_discard(fbuf);
+       return 0;
+}
+
+/**
+ * link_check_defragm_bufs - flush stale incoming message fragments
+ * @l_ptr: pointer to link
+ */
+
+static void link_check_defragm_bufs(struct link *l_ptr)
+{
+       struct sk_buff *prev = 0;
+       struct sk_buff *next = 0;
+       struct sk_buff *buf = l_ptr->defragm_buf;
+
+       if (!buf)
+               return;
+       if (!link_working_working(l_ptr))
+               return;
+       while (buf) {
+               u32 cnt = get_timer_cnt(buf);
+
+               next = buf->next;
+               if (cnt < 4) {
+                       incr_timer_cnt(buf);
+                       prev = buf;
+               } else {
+                       dbg(" Discarding incomplete long buffer\n");
+                       msg_dbg(buf_msg(buf), "LONG:");
+                       dbg_print_link(l_ptr, "curr:");
+                       dbg("Pending long buffers:\n");
+                       dbg_print_buf_chain(l_ptr->defragm_buf);
+                       if (prev)
+                               prev->next = buf->next;
+                       else
+                               l_ptr->defragm_buf = buf->next;
+                       buf_discard(buf);
+               }
+               buf = next;
+       }
+}
+
+
+
+static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
+{
+       l_ptr->tolerance = tolerance;
+       l_ptr->continuity_interval =
+               ((tolerance / 4) > 500) ? 500 : tolerance / 4;
+       l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
+}
+
+
+void link_set_queue_limits(struct link *l_ptr, u32 window)
+{
+       /* Data messages from this node, inclusive FIRST_FRAGM */
+       l_ptr->queue_limit[DATA_LOW] = window;
+       l_ptr->queue_limit[DATA_MEDIUM] = (window / 3) * 4;
+       l_ptr->queue_limit[DATA_HIGH] = (window / 3) * 5;
+       l_ptr->queue_limit[DATA_CRITICAL] = (window / 3) * 6;
+       /* Transiting data messages,inclusive FIRST_FRAGM */
+       l_ptr->queue_limit[DATA_LOW + 4] = 300;
+       l_ptr->queue_limit[DATA_MEDIUM + 4] = 600;
+       l_ptr->queue_limit[DATA_HIGH + 4] = 900;
+       l_ptr->queue_limit[DATA_CRITICAL + 4] = 1200;
+       l_ptr->queue_limit[CONN_MANAGER] = 1200;
+       l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200;
+       l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
+       l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
+       /* FRAGMENT and LAST_FRAGMENT packets */
+       l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
+}
+
+/**
+ * link_find_link - locate link by name
+ * @name - ptr to link name string
+ * @node - ptr to area to be filled with ptr to associated node
+ * 
+ * Caller must hold 'net_lock' to ensure node and bearer are not deleted;
+ * this also prevents link deletion.
+ * 
+ * Returns pointer to link (or 0 if invalid link name).
+ */
+
+static struct link *link_find_link(const char *name, struct node **node)
+{
+       struct link_name link_name_parts;
+       struct bearer *b_ptr;
+       struct link *l_ptr; 
+
+       if (!link_name_validate(name, &link_name_parts))
+               return 0;
+
+       b_ptr = bearer_find_interface(link_name_parts.if_local);
+       if (!b_ptr)
+               return 0;
+
+       *node = node_find(link_name_parts.addr_peer); 
+       if (!*node)
+               return 0;
+
+       l_ptr = (*node)->links[b_ptr->identity];
+       if (!l_ptr || strcmp(l_ptr->name, name))
+               return 0;
+
+       return l_ptr;
+}
+
+struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, 
+                               u16 cmd)
+{
+       struct tipc_link_config *args;
+        u32 new_value;
+       struct link *l_ptr;
+       struct node *node;
+        int res;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
+       new_value = ntohl(args->value);
+
+       if (!strcmp(args->name, bc_link_name)) {
+               if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
+                   (bclink_set_queue_limits(new_value) == 0))
+                       return cfg_reply_none();
+               return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
+                                             " (cannot change setting on broadcast link)");
+       }
+
+       read_lock_bh(&net_lock);
+       l_ptr = link_find_link(args->name, &node); 
+       if (!l_ptr) {
+               read_unlock_bh(&net_lock);
+               return cfg_reply_error_string("link not found");
+       }
+
+       node_lock(node);
+       res = -EINVAL;
+       switch (cmd) {
+       case TIPC_CMD_SET_LINK_TOL: 
+               if ((new_value >= TIPC_MIN_LINK_TOL) && 
+                   (new_value <= TIPC_MAX_LINK_TOL)) {
+                       link_set_supervision_props(l_ptr, new_value);
+                       link_send_proto_msg(l_ptr, STATE_MSG, 
+                                           0, 0, new_value, 0, 0);
+                       res = TIPC_OK;
+               }
+               break;
+       case TIPC_CMD_SET_LINK_PRI: 
+               if (new_value < TIPC_NUM_LINK_PRI) {
+                       l_ptr->priority = new_value;
+                       link_send_proto_msg(l_ptr, STATE_MSG, 
+                                           0, 0, 0, new_value, 0);
+                       res = TIPC_OK;
+               }
+               break;
+       case TIPC_CMD_SET_LINK_WINDOW: 
+               if ((new_value >= TIPC_MIN_LINK_WIN) && 
+                   (new_value <= TIPC_MAX_LINK_WIN)) {
+                       link_set_queue_limits(l_ptr, new_value);
+                       res = TIPC_OK;
+               }
+               break;
+       }
+       node_unlock(node);
+
+       read_unlock_bh(&net_lock);
+       if (res)
+               return cfg_reply_error_string("cannot change link setting");
+
+       return cfg_reply_none();
+}
+
+/**
+ * link_reset_statistics - reset link statistics
+ * @l_ptr: pointer to link
+ */
+
+static void link_reset_statistics(struct link *l_ptr)
+{
+       memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
+       l_ptr->stats.sent_info = l_ptr->next_out_no;
+       l_ptr->stats.recv_info = l_ptr->next_in_no;
+}
+
+struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
+{
+       char *link_name;
+       struct link *l_ptr; 
+       struct node *node;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       link_name = (char *)TLV_DATA(req_tlv_area);
+       if (!strcmp(link_name, bc_link_name)) {
+               if (bclink_reset_stats())
+                       return cfg_reply_error_string("link not found");
+               return cfg_reply_none();
+       }
+
+       read_lock_bh(&net_lock);
+       l_ptr = link_find_link(link_name, &node); 
+       if (!l_ptr) {
+               read_unlock_bh(&net_lock);
+               return cfg_reply_error_string("link not found");
+       }
+
+       node_lock(node);
+       link_reset_statistics(l_ptr);
+       node_unlock(node);
+       read_unlock_bh(&net_lock);
+       return cfg_reply_none();
+}
+
+/**
+ * percent - convert count to a percentage of total (rounding up or down)
+ */
+
+static u32 percent(u32 count, u32 total)
+{
+       return (count * 100 + (total / 2)) / total;
+}
+
+/**
+ * link_stats - print link statistics
+ * @name: link name
+ * @buf: print buffer area
+ * @buf_size: size of print buffer area
+ * 
+ * Returns length of print buffer data string (or 0 if error)
+ */
+
+static int link_stats(const char *name, char *buf, const u32 buf_size)
+{
+       struct print_buf pb;
+       struct link *l_ptr; 
+       struct node *node;
+       char *status;
+       u32 profile_total = 0;
+
+       if (!strcmp(name, bc_link_name))
+               return bclink_stats(buf, buf_size);
+
+       printbuf_init(&pb, buf, buf_size);
+
+       read_lock_bh(&net_lock);
+       l_ptr = link_find_link(name, &node); 
+       if (!l_ptr) {
+               read_unlock_bh(&net_lock);
+               return 0;
+       }
+       node_lock(node);
+
+       if (link_is_active(l_ptr))
+               status = "ACTIVE";
+       else if (link_is_up(l_ptr))
+               status = "STANDBY";
+       else
+               status = "DEFUNCT";
+       tipc_printf(&pb, "Link <%s>\n"
+                        "  %s  MTU:%u  Priority:%u  Tolerance:%u ms"
+                        "  Window:%u packets\n", 
+                   l_ptr->name, status, link_max_pkt(l_ptr), 
+                   l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
+       tipc_printf(&pb, "  RX packets:%u fragments:%u/%u bundles:%u/%u\n", 
+                   l_ptr->next_in_no - l_ptr->stats.recv_info,
+                   l_ptr->stats.recv_fragments,
+                   l_ptr->stats.recv_fragmented,
+                   l_ptr->stats.recv_bundles,
+                   l_ptr->stats.recv_bundled);
+       tipc_printf(&pb, "  TX packets:%u fragments:%u/%u bundles:%u/%u\n", 
+                   l_ptr->next_out_no - l_ptr->stats.sent_info,
+                   l_ptr->stats.sent_fragments,
+                   l_ptr->stats.sent_fragmented, 
+                   l_ptr->stats.sent_bundles,
+                   l_ptr->stats.sent_bundled);
+       profile_total = l_ptr->stats.msg_length_counts;
+       if (!profile_total)
+               profile_total = 1;
+       tipc_printf(&pb, "  TX profile sample:%u packets  average:%u octets\n"
+                        "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
+                        "-16354:%u%% -32768:%u%% -66000:%u%%\n",
+                   l_ptr->stats.msg_length_counts,
+                   l_ptr->stats.msg_lengths_total / profile_total,
+                   percent(l_ptr->stats.msg_length_profile[0], profile_total),
+                   percent(l_ptr->stats.msg_length_profile[1], profile_total),
+                   percent(l_ptr->stats.msg_length_profile[2], profile_total),
+                   percent(l_ptr->stats.msg_length_profile[3], profile_total),
+                   percent(l_ptr->stats.msg_length_profile[4], profile_total),
+                   percent(l_ptr->stats.msg_length_profile[5], profile_total),
+                   percent(l_ptr->stats.msg_length_profile[6], profile_total));
+       tipc_printf(&pb, "  RX states:%u probes:%u naks:%u defs:%u dups:%u\n", 
+                   l_ptr->stats.recv_states,
+                   l_ptr->stats.recv_probes,
+                   l_ptr->stats.recv_nacks,
+                   l_ptr->stats.deferred_recv, 
+                   l_ptr->stats.duplicates);
+       tipc_printf(&pb, "  TX states:%u probes:%u naks:%u acks:%u dups:%u\n", 
+                   l_ptr->stats.sent_states, 
+                   l_ptr->stats.sent_probes, 
+                   l_ptr->stats.sent_nacks, 
+                   l_ptr->stats.sent_acks, 
+                   l_ptr->stats.retransmitted);
+       tipc_printf(&pb, "  Congestion bearer:%u link:%u  Send queue max:%u avg:%u\n",
+                   l_ptr->stats.bearer_congs,
+                   l_ptr->stats.link_congs, 
+                   l_ptr->stats.max_queue_sz,
+                   l_ptr->stats.queue_sz_counts
+                   ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
+                   : 0);
+
+       node_unlock(node);
+       read_unlock_bh(&net_lock);
+       return printbuf_validate(&pb);
+}
+
+#define MAX_LINK_STATS_INFO 2000
+
+struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
+{
+       struct sk_buff *buf;
+       struct tlv_desc *rep_tlv;
+       int str_len;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       buf = cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
+       if (!buf)
+               return NULL;
+
+       rep_tlv = (struct tlv_desc *)buf->data;
+
+       str_len = link_stats((char *)TLV_DATA(req_tlv_area),
+                            (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
+       if (!str_len) {
+               buf_discard(buf);
+               return cfg_reply_error_string("link not found");
+       }
+
+       skb_put(buf, TLV_SPACE(str_len));
+       TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+       return buf;
+}
+
+#if 0
+int link_control(const char *name, u32 op, u32 val)
+{
+       int res = -EINVAL;
+       struct link *l_ptr;
+       u32 bearer_id;
+       struct node * node;
+       u32 a;
+
+       a = link_name2addr(name, &bearer_id);
+       read_lock_bh(&net_lock);
+       node = node_find(a);
+       if (node) {
+               node_lock(node);
+               l_ptr = node->links[bearer_id];
+               if (l_ptr) {
+                       if (op == TIPC_REMOVE_LINK) {
+                               struct bearer *b_ptr = l_ptr->b_ptr;
+                               spin_lock_bh(&b_ptr->publ.lock);
+                               link_delete(l_ptr);
+                               spin_unlock_bh(&b_ptr->publ.lock);
+                       }
+                       if (op == TIPC_CMD_BLOCK_LINK) {
+                               link_reset(l_ptr);
+                               l_ptr->blocked = 1;
+                       }
+                       if (op == TIPC_CMD_UNBLOCK_LINK) {
+                               l_ptr->blocked = 0;
+                       }
+                       res = TIPC_OK;
+               }
+               node_unlock(node);
+       }
+       read_unlock_bh(&net_lock);
+       return res;
+}
+#endif
+
+/**
+ * link_get_max_pkt - get maximum packet size to use when sending to destination
+ * @dest: network address of destination node
+ * @selector: used to select from set of active links
+ * 
+ * If no active link can be found, uses default maximum packet size.
+ */
+
+u32 link_get_max_pkt(u32 dest, u32 selector)
+{
+       struct node *n_ptr;
+       struct link *l_ptr;
+       u32 res = MAX_PKT_DEFAULT;
+       
+       if (dest == tipc_own_addr)
+               return MAX_MSG_SIZE;
+
+       read_lock_bh(&net_lock);        
+       n_ptr = node_select(dest, selector);
+       if (n_ptr) {
+               node_lock(n_ptr);
+               l_ptr = n_ptr->active_links[selector & 1];
+               if (l_ptr)
+                       res = link_max_pkt(l_ptr);
+               node_unlock(n_ptr);
+       }
+       read_unlock_bh(&net_lock);       
+       return res;
+}
+
+#if 0
+static void link_dump_rec_queue(struct link *l_ptr)
+{
+       struct sk_buff *crs;
+
+       if (!l_ptr->oldest_deferred_in) {
+               info("Reception queue empty\n");
+               return;
+       }
+       info("Contents of Reception queue:\n");
+       crs = l_ptr->oldest_deferred_in;
+       while (crs) {
+               if (crs->data == (void *)0x0000a3a3) {
+                       info("buffer %x invalid\n", crs);
+                       return;
+               }
+               msg_dbg(buf_msg(crs), "In rec queue: \n");
+               crs = crs->next;
+       }
+}
+#endif
+
+static void link_dump_send_queue(struct link *l_ptr)
+{
+       if (l_ptr->next_out) {
+               info("\nContents of unsent queue:\n");
+               dbg_print_buf_chain(l_ptr->next_out);
+       }
+       info("\nContents of send queue:\n");
+       if (l_ptr->first_out) {
+               dbg_print_buf_chain(l_ptr->first_out);
+       }
+       info("Empty send queue\n");
+}
+
+static void link_print(struct link *l_ptr, struct print_buf *buf,
+                      const char *str)
+{
+       tipc_printf(buf, str);
+       if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
+               return;
+       tipc_printf(buf, "Link %x<%s>:",
+                   l_ptr->addr, l_ptr->b_ptr->publ.name);
+       tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
+       tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
+       tipc_printf(buf, "SQUE");
+       if (l_ptr->first_out) {
+               tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
+               if (l_ptr->next_out)
+                       tipc_printf(buf, "%u..",
+                                   msg_seqno(buf_msg(l_ptr->next_out)));
+               tipc_printf(buf, "%u]",
+                           msg_seqno(buf_msg
+                                     (l_ptr->last_out)), l_ptr->out_queue_size);
+               if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) - 
+                        msg_seqno(buf_msg(l_ptr->first_out))) 
+                    != (l_ptr->out_queue_size - 1))
+                   || (l_ptr->last_out->next != 0)) {
+                       tipc_printf(buf, "\nSend queue inconsistency\n");
+                       tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
+                       tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
+                       tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
+                       link_dump_send_queue(l_ptr);
+               }
+       } else
+               tipc_printf(buf, "[]");
+       tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
+       if (l_ptr->oldest_deferred_in) {
+               u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
+               u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
+               tipc_printf(buf, ":RQUE[%u..%u]", o, n);
+               if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
+                       tipc_printf(buf, ":RQSIZ(%u)",
+                                   l_ptr->deferred_inqueue_sz);
+               }
+       }
+       if (link_working_unknown(l_ptr))
+               tipc_printf(buf, ":WU");
+       if (link_reset_reset(l_ptr))
+               tipc_printf(buf, ":RR");
+       if (link_reset_unknown(l_ptr))
+               tipc_printf(buf, ":RU");
+       if (link_working_working(l_ptr))
+               tipc_printf(buf, ":WW");
+       tipc_printf(buf, "\n");
+}
+
diff --git a/net/tipc/link.h b/net/tipc/link.h
new file mode 100644 (file)
index 0000000..c2553f0
--- /dev/null
@@ -0,0 +1,296 @@
+/*
+ * net/tipc/link.h: Include file for TIPC link code
+ * 
+ * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_LINK_H
+#define _TIPC_LINK_H
+
+#include "dbg.h"
+#include "msg.h"
+#include "bearer.h"
+#include "node.h"
+
+#define PUSH_FAILED   1
+#define PUSH_FINISHED 2
+
+/* 
+ * Link states 
+ */
+
+#define WORKING_WORKING 560810u
+#define WORKING_UNKNOWN 560811u
+#define RESET_UNKNOWN   560812u
+#define RESET_RESET     560813u
+
+/* 
+ * Starting value for maximum packet size negotiation on unicast links
+ * (unless bearer MTU is less)
+ */
+
+#define MAX_PKT_DEFAULT 1500
+
+/**
+ * struct link - TIPC link data structure
+ * @addr: network address of link's peer node
+ * @name: link name character string
+ * @media_addr: media address to use when sending messages over link
+ * @timer: link timer
+ * @owner: pointer to peer node
+ * @link_list: adjacent links in bearer's list of links
+ * @started: indicates if link has been started
+ * @checkpoint: reference point for triggering link continuity checking
+ * @peer_session: link session # being used by peer end of link
+ * @peer_bearer_id: bearer id used by link's peer endpoint
+ * @b_ptr: pointer to bearer used by link
+ * @tolerance: minimum link continuity loss needed to reset link [in ms] 
+ * @continuity_interval: link continuity testing interval [in ms]
+ * @abort_limit: # of unacknowledged continuity probes needed to reset link
+ * @state: current state of link FSM
+ * @blocked: indicates if link has been administratively blocked
+ * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
+ * @proto_msg: template for control messages generated by link
+ * @pmsg: convenience pointer to "proto_msg" field
+ * @priority: current link priority
+ * @queue_limit: outbound message queue congestion thresholds (indexed by user)
+ * @exp_msg_count: # of tunnelled messages expected during link changeover
+ * @reset_checkpoint: seq # of last acknowledged message at time of link reset
+ * @max_pkt: current maximum packet size for this link
+ * @max_pkt_target: desired maximum packet size for this link
+ * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
+ * @out_queue_size: # of messages in outbound message queue
+ * @first_out: ptr to first outbound message in queue
+ * @last_out: ptr to last outbound message in queue
+ * @next_out_no: next sequence number to use for outbound messages
+ * @last_retransmitted: sequence number of most recently retransmitted message
+ * @stale_count: # of identical retransmit requests made by peer
+ * @next_in_no: next sequence number to expect for inbound messages
+ * @deferred_inqueue_sz: # of messages in inbound message queue
+ * @oldest_deferred_in: ptr to first inbound message in queue
+ * @newest_deferred_in: ptr to last inbound message in queue
+ * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
+ * @proto_msg_queue: ptr to (single) outbound control message
+ * @retransm_queue_size: number of messages to retransmit
+ * @retransm_queue_head: sequence number of first message to retransmit
+ * @next_out: ptr to first unsent outbound message in queue
+ * @waiting_ports: linked list of ports waiting for link congestion to abate
+ * @long_msg_seq_no: next identifier to use for outbound fragmented messages
+ * @defragm_buf: list of partially reassembled inbound message fragments
+ * @stats: collects statistics regarding link activity
+ * @print_buf: print buffer used to log link activity
+ */
+struct link {
+       u32 addr;
+       char name[TIPC_MAX_LINK_NAME];
+       struct tipc_media_addr media_addr;
+       struct timer_list timer;
+       struct node *owner;
+       struct list_head link_list;
+
+       /* Management and link supervision data */
+       int started;
+       u32 checkpoint;
+       u32 peer_session;
+       u32 peer_bearer_id;
+       struct bearer *b_ptr;
+       u32 tolerance;
+       u32 continuity_interval;
+       u32 abort_limit;
+       int state;
+       int blocked;
+       u32 fsm_msg_cnt;
+       struct {
+               unchar hdr[INT_H_SIZE];
+               unchar body[TIPC_MAX_IF_NAME];
+       } proto_msg;
+       struct tipc_msg *pmsg;
+       u32 priority;
+       u32 queue_limit[15];    /* queue_limit[0]==window limit */
+
+       /* Changeover */
+       u32 exp_msg_count;
+       u32 reset_checkpoint;
+
+        /* Max packet negotiation */
+        u32 max_pkt;
+        u32 max_pkt_target;
+        u32 max_pkt_probes;
+
+       /* Sending */
+       u32 out_queue_size;
+       struct sk_buff *first_out;
+       struct sk_buff *last_out;
+       u32 next_out_no;
+        u32 last_retransmitted;
+        u32 stale_count;
+
+       /* Reception */
+       u32 next_in_no;
+       u32 deferred_inqueue_sz;
+       struct sk_buff *oldest_deferred_in;
+       struct sk_buff *newest_deferred_in;
+       u32 unacked_window;
+
+       /* Congestion handling */
+       struct sk_buff *proto_msg_queue;
+       u32 retransm_queue_size;
+       u32 retransm_queue_head;
+       struct sk_buff *next_out;
+       struct list_head waiting_ports;
+
+       /* Fragmentation/defragmentation */
+       u32 long_msg_seq_no;
+       struct sk_buff *defragm_buf;
+
+        /* Statistics */
+       struct {
+               u32 sent_info;          /* used in counting # sent packets */
+               u32 recv_info;          /* used in counting # recv'd packets */
+               u32 sent_states;
+               u32 recv_states;
+               u32 sent_probes;
+               u32 recv_probes;
+               u32 sent_nacks;
+               u32 recv_nacks;
+               u32 sent_acks;
+               u32 sent_bundled;
+               u32 sent_bundles;
+               u32 recv_bundled;
+               u32 recv_bundles;
+               u32 retransmitted;
+               u32 sent_fragmented;
+               u32 sent_fragments;
+               u32 recv_fragmented;
+               u32 recv_fragments;
+               u32 link_congs;         /* # port sends blocked by congestion */
+               u32 bearer_congs;
+               u32 deferred_recv;
+               u32 duplicates;
+
+               /* for statistical profiling of send queue size */
+
+               u32 max_queue_sz;
+               u32 accu_queue_sz;
+               u32 queue_sz_counts;
+
+               /* for statistical profiling of message lengths */
+
+               u32 msg_length_counts;
+               u32 msg_lengths_total;
+               u32 msg_length_profile[7];
+#if 0
+               u32 sent_tunneled;
+               u32 recv_tunneled;
+#endif
+       } stats;
+
+       struct print_buf print_buf;
+};
+
+struct port;
+
+struct link *link_create(struct bearer *b_ptr, const u32 peer,
+                        const struct tipc_media_addr *media_addr);
+void link_delete(struct link *l_ptr);
+void link_changeover(struct link *l_ptr);
+void link_send_duplicate(struct link *l_ptr, struct link *dest);
+void link_reset_fragments(struct link *l_ptr);
+int link_is_up(struct link *l_ptr);
+int link_is_active(struct link *l_ptr);
+void link_start(struct link *l_ptr);
+u32 link_push_packet(struct link *l_ptr);
+void link_stop(struct link *l_ptr);
+struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
+struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
+void link_reset(struct link *l_ptr);
+int link_send(struct sk_buff *buf, u32 dest, u32 selector);
+int link_send_buf(struct link *l_ptr, struct sk_buff *buf);
+u32 link_get_max_pkt(u32 dest,u32 selector);
+int link_send_sections_fast(struct port* sender, 
+                           struct iovec const *msg_sect,
+                           const u32 num_sect, 
+                           u32 destnode);
+
+int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
+void link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr,
+                struct tipc_msg *msg, u32 selector);
+void link_recv_bundle(struct sk_buff *buf);
+int  link_recv_fragment(struct sk_buff **pending,
+                       struct sk_buff **fb,
+                       struct tipc_msg **msg);
+void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap, 
+                        u32 tolerance, u32 priority, u32 acked_mtu);
+void link_push_queue(struct link *l_ptr);
+u32 link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
+                  struct sk_buff *buf);
+void link_wakeup_ports(struct link *l_ptr, int all);
+void link_set_queue_limits(struct link *l_ptr, u32 window);
+void link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
+
+/*
+ * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
+ */
+
+static inline u32 mod(u32 x)
+{
+       return x & 0xffffu;
+}
+
+static inline int between(u32 lower, u32 upper, u32 n)
+{
+       if ((lower < n) && (n < upper))
+               return 1;
+       if ((upper < lower) && ((n > lower) || (n < upper)))
+               return 1;
+       return 0;
+}
+
+static inline int less_eq(u32 left, u32 right)
+{
+       return (mod(right - left) < 32768u);
+}
+
+static inline int less(u32 left, u32 right)
+{
+       return (less_eq(left, right) && (mod(right) != mod(left)));
+}
+
+static inline u32 lesser(u32 left, u32 right)
+{
+       return less_eq(left, right) ? left : right;
+}
+
+#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
new file mode 100644 (file)
index 0000000..03dbc55
--- /dev/null
@@ -0,0 +1,334 @@
+/*
+ * net/tipc/msg.c: TIPC message header routines
+ *     
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "addr.h"
+#include "dbg.h"
+#include "msg.h"
+#include "bearer.h"
+
+
+void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
+{
+       memcpy(&((int *)m)[5], a, sizeof(*a));
+}
+
+void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
+{
+       memcpy(a, &((int*)m)[5], sizeof(*a));
+}
+
+
+void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
+{
+       u32 usr = msg_user(msg);
+       tipc_printf(buf, str);
+
+       switch (usr) {
+       case MSG_BUNDLER:
+               tipc_printf(buf, "BNDL::");
+               tipc_printf(buf, "MSGS(%u):", msg_msgcnt(msg));
+               break;
+       case BCAST_PROTOCOL:
+               tipc_printf(buf, "BCASTP::");
+               break;
+       case MSG_FRAGMENTER:
+               tipc_printf(buf, "FRAGM::");
+               switch (msg_type(msg)) {
+               case FIRST_FRAGMENT:
+                       tipc_printf(buf, "FIRST:");
+                       break;
+               case FRAGMENT:
+                       tipc_printf(buf, "BODY:");
+                       break;
+               case LAST_FRAGMENT:
+                       tipc_printf(buf, "LAST:");
+                       break;
+               default:
+                       tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
+
+               }
+               tipc_printf(buf, "NO(%u/%u):",msg_long_msgno(msg),
+                           msg_fragm_no(msg));
+               break;
+       case DATA_LOW:
+       case DATA_MEDIUM:
+       case DATA_HIGH:
+       case DATA_CRITICAL:
+               tipc_printf(buf, "DAT%u:", msg_user(msg));
+               if (msg_short(msg)) {
+                       tipc_printf(buf, "CON:");
+                       break;
+               }
+               switch (msg_type(msg)) {
+               case TIPC_CONN_MSG:
+                       tipc_printf(buf, "CON:");
+                       break;
+               case TIPC_MCAST_MSG:
+                       tipc_printf(buf, "MCST:");
+                       break;
+               case TIPC_NAMED_MSG:
+                       tipc_printf(buf, "NAM:");
+                       break;
+               case TIPC_DIRECT_MSG:
+                       tipc_printf(buf, "DIR:");
+                       break;
+               default:
+                       tipc_printf(buf, "UNKNOWN TYPE %u",msg_type(msg));
+               }
+               if (msg_routed(msg) && !msg_non_seq(msg))
+                       tipc_printf(buf, "ROUT:");
+               if (msg_reroute_cnt(msg))
+                       tipc_printf(buf, "REROUTED(%u):",
+                                   msg_reroute_cnt(msg));
+               break;
+       case NAME_DISTRIBUTOR:
+               tipc_printf(buf, "NMD::");
+               switch (msg_type(msg)) {
+               case PUBLICATION:
+                       tipc_printf(buf, "PUBL(%u):", (msg_size(msg) - msg_hdr_sz(msg)) / 20);  /* Items */
+                       break;
+               case WITHDRAWAL:
+                       tipc_printf(buf, "WDRW:");
+                       break;
+               default:
+                       tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
+               }
+               if (msg_routed(msg))
+                       tipc_printf(buf, "ROUT:");
+               if (msg_reroute_cnt(msg))
+                       tipc_printf(buf, "REROUTED(%u):",
+                                   msg_reroute_cnt(msg));
+               break;
+       case CONN_MANAGER:
+               tipc_printf(buf, "CONN_MNG:");
+               switch (msg_type(msg)) {
+               case CONN_PROBE:
+                       tipc_printf(buf, "PROBE:");
+                       break;
+               case CONN_PROBE_REPLY:
+                       tipc_printf(buf, "PROBE_REPLY:");
+                       break;
+               case CONN_ACK:
+                       tipc_printf(buf, "CONN_ACK:");
+                       tipc_printf(buf, "ACK(%u):",msg_msgcnt(msg));
+                       break;
+               default:
+                       tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+               }
+               if (msg_routed(msg))
+                       tipc_printf(buf, "ROUT:");
+               if (msg_reroute_cnt(msg))
+                       tipc_printf(buf, "REROUTED(%u):",msg_reroute_cnt(msg));
+               break;
+       case LINK_PROTOCOL:
+               tipc_printf(buf, "PROT:TIM(%u):",msg_timestamp(msg));
+               switch (msg_type(msg)) {
+               case STATE_MSG:
+                       tipc_printf(buf, "STATE:");
+                       tipc_printf(buf, "%s:",msg_probe(msg) ? "PRB" :"");
+                       tipc_printf(buf, "NXS(%u):",msg_next_sent(msg));
+                       tipc_printf(buf, "GAP(%u):",msg_seq_gap(msg));
+                       tipc_printf(buf, "LSTBC(%u):",msg_last_bcast(msg));
+                       break;
+               case RESET_MSG:
+                       tipc_printf(buf, "RESET:");
+                       if (msg_size(msg) != msg_hdr_sz(msg))
+                               tipc_printf(buf, "BEAR:%s:",msg_data(msg));
+                       break;
+               case ACTIVATE_MSG:
+                       tipc_printf(buf, "ACTIVATE:");
+                       break;
+               default:
+                       tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+               }
+               tipc_printf(buf, "PLANE(%c):",msg_net_plane(msg));
+               tipc_printf(buf, "SESS(%u):",msg_session(msg));
+               break;
+       case CHANGEOVER_PROTOCOL:
+               tipc_printf(buf, "TUNL:");
+               switch (msg_type(msg)) {
+               case DUPLICATE_MSG:
+                       tipc_printf(buf, "DUPL:");
+                       break;
+               case ORIGINAL_MSG:
+                       tipc_printf(buf, "ORIG:");
+                       tipc_printf(buf, "EXP(%u)",msg_msgcnt(msg));
+                       break;
+               default:
+                       tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+               }
+               break;
+       case ROUTE_DISTRIBUTOR:
+               tipc_printf(buf, "ROUTING_MNG:");
+               switch (msg_type(msg)) {
+               case EXT_ROUTING_TABLE:
+                       tipc_printf(buf, "EXT_TBL:");
+                       tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+                       break;
+               case LOCAL_ROUTING_TABLE:
+                       tipc_printf(buf, "LOCAL_TBL:");
+                       tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+                       break;
+               case SLAVE_ROUTING_TABLE:
+                       tipc_printf(buf, "DP_TBL:");
+                       tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+                       break;
+               case ROUTE_ADDITION:
+                       tipc_printf(buf, "ADD:");
+                       tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+                       break;
+               case ROUTE_REMOVAL:
+                       tipc_printf(buf, "REMOVE:");
+                       tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
+                       break;
+               default:
+                       tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
+               }
+               break;
+       case LINK_CONFIG:
+               tipc_printf(buf, "CFG:");
+               switch (msg_type(msg)) {
+               case DSC_REQ_MSG:
+                       tipc_printf(buf, "DSC_REQ:");
+                       break;
+               case DSC_RESP_MSG:
+                       tipc_printf(buf, "DSC_RESP:");
+                       break;
+               default:
+                       tipc_printf(buf, "UNKNOWN TYPE:%x:",msg_type(msg));
+                       break;
+               }
+               break;
+       default:
+               tipc_printf(buf, "UNKNOWN USER:");
+       }
+
+       switch (usr) {
+       case CONN_MANAGER:
+       case NAME_DISTRIBUTOR:
+       case DATA_LOW:
+       case DATA_MEDIUM:
+       case DATA_HIGH:
+       case DATA_CRITICAL:
+               if (msg_short(msg))
+                       break;  /* No error */
+               switch (msg_errcode(msg)) {
+               case TIPC_OK:
+                       break;
+               case TIPC_ERR_NO_NAME:
+                       tipc_printf(buf, "NO_NAME:");
+                       break;
+               case TIPC_ERR_NO_PORT:
+                       tipc_printf(buf, "NO_PORT:");
+                       break;
+               case TIPC_ERR_NO_NODE:
+                       tipc_printf(buf, "NO_PROC:");
+                       break;
+               case TIPC_ERR_OVERLOAD:
+                       tipc_printf(buf, "OVERLOAD:");
+                       break;
+               case TIPC_CONN_SHUTDOWN:
+                       tipc_printf(buf, "SHUTDOWN:");
+                       break;
+               default:
+                       tipc_printf(buf, "UNKNOWN ERROR(%x):",
+                                   msg_errcode(msg));
+               }
+       default:{}
+       }
+
+       tipc_printf(buf, "HZ(%u):", msg_hdr_sz(msg));
+       tipc_printf(buf, "SZ(%u):", msg_size(msg));
+       tipc_printf(buf, "SQNO(%u):", msg_seqno(msg));
+
+       if (msg_non_seq(msg))
+               tipc_printf(buf, "NOSEQ:");
+       else {
+               tipc_printf(buf, "ACK(%u):", msg_ack(msg));
+       }
+       tipc_printf(buf, "BACK(%u):", msg_bcast_ack(msg));
+       tipc_printf(buf, "PRND(%x)", msg_prevnode(msg));
+
+       if (msg_isdata(msg)) {
+               if (msg_named(msg)) {
+                       tipc_printf(buf, "NTYP(%u):", msg_nametype(msg));
+                       tipc_printf(buf, "NINST(%u)", msg_nameinst(msg));
+               }
+       }
+
+       if ((usr != LINK_PROTOCOL) && (usr != LINK_CONFIG) &&
+           (usr != MSG_BUNDLER)) {
+               if (!msg_short(msg)) {
+                       tipc_printf(buf, ":ORIG(%x:%u):",
+                                   msg_orignode(msg), msg_origport(msg));
+                       tipc_printf(buf, ":DEST(%x:%u):",
+                                   msg_destnode(msg), msg_destport(msg));
+               } else {
+                       tipc_printf(buf, ":OPRT(%u):", msg_origport(msg));
+                       tipc_printf(buf, ":DPRT(%u):", msg_destport(msg));
+               }
+               if (msg_routed(msg) && !msg_non_seq(msg))
+                       tipc_printf(buf, ":TSEQN(%u)", msg_transp_seqno(msg));
+       }
+       if (msg_user(msg) == NAME_DISTRIBUTOR) {
+               tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
+               tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
+               if (msg_routed(msg)) {
+                       tipc_printf(buf, ":CSEQN(%u)", msg_transp_seqno(msg));
+               }
+       }
+
+       if (msg_user(msg) ==  LINK_CONFIG) {
+               u32* raw = (u32*)msg;
+               struct tipc_media_addr* orig = (struct tipc_media_addr*)&raw[5];
+               tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
+               tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
+               tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
+               media_addr_printf(buf, orig);
+       }
+       if (msg_user(msg) == BCAST_PROTOCOL) {
+               tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
+               tipc_printf(buf, "TO(%u):", msg_bcgap_to(msg));
+       }
+       tipc_printf(buf, "\n");
+       if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
+               msg_print(buf,msg_get_wrapped(msg),"      /");
+       }
+       if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
+               msg_print(buf,msg_get_wrapped(msg),"      /");
+       }
+}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
new file mode 100644 (file)
index 0000000..662c818
--- /dev/null
@@ -0,0 +1,818 @@
+/*
+ * net/tipc/msg.h: Include file for TIPC message header routines
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_MSG_H
+#define _TIPC_MSG_H
+
+#include <net/tipc/tipc_msg.h>
+
+#define TIPC_VERSION              2
+#define DATA_LOW                  TIPC_LOW_IMPORTANCE
+#define DATA_MEDIUM               TIPC_MEDIUM_IMPORTANCE
+#define DATA_HIGH                 TIPC_HIGH_IMPORTANCE
+#define DATA_CRITICAL             TIPC_CRITICAL_IMPORTANCE
+#define SHORT_H_SIZE              24   /* Connected,in cluster */
+#define DIR_MSG_H_SIZE            32   /* Directly addressed messages */
+#define CONN_MSG_H_SIZE           36   /* Routed connected msgs*/
+#define LONG_H_SIZE               40   /* Named Messages */
+#define MCAST_H_SIZE              44   /* Multicast messages */
+#define MAX_H_SIZE                60   /* Inclusive full options */
+#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
+#define LINK_CONFIG               13
+
+
+/*
+               TIPC user data message header format, version 2
+               
+       - Fundamental definitions available to privileged TIPC users
+         are located in tipc_msg.h.
+       - Remaining definitions available to TIPC internal users appear below. 
+*/
+
+
+static inline void msg_set_word(struct tipc_msg *m, u32 w, u32 val)
+{
+       m->hdr[w] = htonl(val);
+}
+
+static inline void msg_set_bits(struct tipc_msg *m, u32 w,
+                               u32 pos, u32 mask, u32 val)
+{
+       u32 word = msg_word(m,w) & ~(mask << pos);
+       msg_set_word(m, w, (word |= (val << pos)));
+}
+
+/* 
+ * Word 0
+ */
+
+static inline u32 msg_version(struct tipc_msg *m)
+{
+       return msg_bits(m, 0, 29, 7);
+}
+
+static inline void msg_set_version(struct tipc_msg *m) 
+{
+       msg_set_bits(m, 0, 29, 0xf, TIPC_VERSION);
+}
+
+static inline u32 msg_user(struct tipc_msg *m)
+{
+       return msg_bits(m, 0, 25, 0xf);
+}
+
+static inline u32 msg_isdata(struct tipc_msg *m)
+{
+       return (msg_user(m) <= DATA_CRITICAL);
+}
+
+static inline void msg_set_user(struct tipc_msg *m, u32 n) 
+{
+       msg_set_bits(m, 0, 25, 0xf, n);
+}
+
+static inline void msg_set_importance(struct tipc_msg *m, u32 i) 
+{
+       msg_set_user(m, i);
+}
+
+static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n) 
+{
+       msg_set_bits(m, 0, 21, 0xf, n>>2);
+}
+
+static inline int msg_non_seq(struct tipc_msg *m) 
+{
+       return msg_bits(m, 0, 20, 1);
+}
+
+static inline void msg_set_non_seq(struct tipc_msg *m) 
+{
+       msg_set_bits(m, 0, 20, 1, 1);
+}
+
+static inline int msg_dest_droppable(struct tipc_msg *m) 
+{
+       return msg_bits(m, 0, 19, 1);
+}
+
+static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d) 
+{
+       msg_set_bits(m, 0, 19, 1, d);
+}
+
+static inline int msg_src_droppable(struct tipc_msg *m) 
+{
+       return msg_bits(m, 0, 18, 1);
+}
+
+static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d) 
+{
+       msg_set_bits(m, 0, 18, 1, d);
+}
+
+static inline void msg_set_size(struct tipc_msg *m, u32 sz)
+{
+       m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz);
+}
+
+
+/* 
+ * Word 1
+ */
+
+static inline void msg_set_type(struct tipc_msg *m, u32 n) 
+{
+       msg_set_bits(m, 1, 29, 0x7, n);
+}
+
+static inline void msg_set_errcode(struct tipc_msg *m, u32 err) 
+{
+       msg_set_bits(m, 1, 25, 0xf, err);
+}
+
+static inline u32 msg_reroute_cnt(struct tipc_msg *m) 
+{
+       return msg_bits(m, 1, 21, 0xf);
+}
+
+static inline void msg_incr_reroute_cnt(struct tipc_msg *m) 
+{
+       msg_set_bits(m, 1, 21, 0xf, msg_reroute_cnt(m) + 1);
+}
+
+static inline void msg_reset_reroute_cnt(struct tipc_msg *m) 
+{
+       msg_set_bits(m, 1, 21, 0xf, 0);
+}
+
+static inline u32 msg_lookup_scope(struct tipc_msg *m)
+{
+       return msg_bits(m, 1, 19, 0x3);
+}
+
+static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n) 
+{
+       msg_set_bits(m, 1, 19, 0x3, n);
+}
+
+static inline void msg_set_options(struct tipc_msg *m, const char *opt, u32 sz) 
+{
+       u32 hsz = msg_hdr_sz(m);
+       char *to = (char *)&m->hdr[hsz/4];
+
+       if ((hsz < DIR_MSG_H_SIZE) || ((hsz + sz) > MAX_H_SIZE))
+               return;
+       msg_set_bits(m, 1, 16, 0x7, (hsz - 28)/4);
+       msg_set_hdr_sz(m, hsz + sz);
+       memcpy(to, opt, sz);
+}
+
+static inline u32 msg_bcast_ack(struct tipc_msg *m)
+{
+       return msg_bits(m, 1, 0, 0xffff);
+}
+
+static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n) 
+{
+       msg_set_bits(m, 1, 0, 0xffff, n);
+}
+
+
+/* 
+ * Word 2
+ */
+
+static inline u32 msg_ack(struct tipc_msg *m)
+{
+       return msg_bits(m, 2, 16, 0xffff);
+}
+
+static inline void msg_set_ack(struct tipc_msg *m, u32 n) 
+{
+       msg_set_bits(m, 2, 16, 0xffff, n);
+}
+
+static inline u32 msg_seqno(struct tipc_msg *m)
+{
+       return msg_bits(m, 2, 0, 0xffff);
+}
+
+static inline void msg_set_seqno(struct tipc_msg *m, u32 n) 
+{
+       msg_set_bits(m, 2, 0, 0xffff, n);
+}
+
+
+/* 
+ * Words 3-10
+ */
+
+
+static inline void msg_set_prevnode(struct tipc_msg *m, u32 a) 
+{
+       msg_set_word(m, 3, a);
+}
+
+static inline void msg_set_origport(struct tipc_msg *m, u32 p) 
+{
+       msg_set_word(m, 4, p);
+}
+
+static inline void msg_set_destport(struct tipc_msg *m, u32 p) 
+{
+       msg_set_word(m, 5, p);
+}
+
+static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p) 
+{
+       msg_set_word(m, 5, p);
+}
+
+static inline void msg_set_orignode(struct tipc_msg *m, u32 a) 
+{
+       msg_set_word(m, 6, a);
+}
+
+static inline void msg_set_destnode(struct tipc_msg *m, u32 a) 
+{
+       msg_set_word(m, 7, a);
+}
+
+static inline int msg_is_dest(struct tipc_msg *m, u32 d) 
+{
+       return(msg_short(m) || (msg_destnode(m) == d));
+}
+
+static inline u32 msg_routed(struct tipc_msg *m)
+{
+       if (likely(msg_short(m)))
+               return 0;
+       return(msg_destnode(m) ^ msg_orignode(m)) >> 11;
+}
+
+static inline void msg_set_nametype(struct tipc_msg *m, u32 n) 
+{
+       msg_set_word(m, 8, n);
+}
+
+static inline u32 msg_transp_seqno(struct tipc_msg *m)
+{
+       return msg_word(m, 8);
+}
+
+static inline void msg_set_timestamp(struct tipc_msg *m, u32 n)
+{
+       msg_set_word(m, 8, n);
+}
+
+static inline u32 msg_timestamp(struct tipc_msg *m)
+{
+       return msg_word(m, 8);
+}
+
+static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
+{
+       msg_set_word(m, 8, n);
+}
+
+static inline void msg_set_namelower(struct tipc_msg *m, u32 n) 
+{
+       msg_set_word(m, 9, n);
+}
+
+static inline void msg_set_nameinst(struct tipc_msg *m, u32 n) 
+{
+       msg_set_namelower(m, n);
+}
+
+static inline void msg_set_nameupper(struct tipc_msg *m, u32 n) 
+{
+       msg_set_word(m, 10, n);
+}
+
+static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
+{
+       return (struct tipc_msg *)msg_data(m);
+}
+
+static inline void msg_expand(struct tipc_msg *m, u32 destnode) 
+{
+       if (!msg_short(m))
+               return;
+       msg_set_hdr_sz(m, LONG_H_SIZE);
+       msg_set_orignode(m, msg_prevnode(m));
+       msg_set_destnode(m, destnode);
+       memset(&m->hdr[8], 0, 12);
+}
+
+
+
+/*
+               TIPC internal message header format, version 2
+
+       1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0 
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w0:|vers |msg usr|hdr sz |n|resrv|            packet size          |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w1:|m typ|rsv=0|   sequence gap    |       broadcast ack no        |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w2:| link level ack no/bc_gap_from |     seq no / bcast_gap_to     |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w3:|                       previous node                           |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w4:|  next sent broadcast/fragm no | next sent pkt/ fragm msg no   |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w5:|          session no           |rsv=0|r|berid|link prio|netpl|p|
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w6:|                      originating node                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w7:|                      destination node                         |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w8:|                   transport sequence number                   |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   w9:|   msg count / bcast tag       |       link tolerance          |
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+      \                                                               \
+      /                     User Specific Data                        /
+      \                                                               \
+      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+      NB: CONN_MANAGER use data message format. LINK_CONFIG has own format.
+*/   
+
+/* 
+ * Internal users
+ */
+
+#define  BCAST_PROTOCOL       5
+#define  MSG_BUNDLER          6
+#define  LINK_PROTOCOL        7
+#define  CONN_MANAGER         8
+#define  ROUTE_DISTRIBUTOR    9
+#define  CHANGEOVER_PROTOCOL  10
+#define  NAME_DISTRIBUTOR     11
+#define  MSG_FRAGMENTER       12
+#define  LINK_CONFIG          13
+#define  INT_H_SIZE           40
+#define  DSC_H_SIZE           40
+
+/* 
+ *  Connection management protocol messages
+ */
+
+#define CONN_PROBE        0
+#define CONN_PROBE_REPLY  1
+#define CONN_ACK          2
+
+/* 
+ * Name distributor messages
+ */
+
+#define PUBLICATION       0
+#define WITHDRAWAL        1
+
+
+/* 
+ * Word 1
+ */
+
+static inline u32 msg_seq_gap(struct tipc_msg *m)
+{
+       return msg_bits(m, 1, 16, 0xff);
+}
+
+static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 1, 16, 0xff, n);
+}
+
+static inline u32 msg_req_links(struct tipc_msg *m)
+{
+       return msg_bits(m, 1, 16, 0xfff);
+}
+
+static inline void msg_set_req_links(struct tipc_msg *m, u32 n) 
+{
+       msg_set_bits(m, 1, 16, 0xfff, n);
+}
+
+
+/* 
+ * Word 2
+ */
+
+static inline u32 msg_dest_domain(struct tipc_msg *m)
+{
+       return msg_word(m, 2);
+}
+
+static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n) 
+{
+       msg_set_word(m, 2, n);
+}
+
+static inline u32 msg_bcgap_after(struct tipc_msg *m)
+{
+       return msg_bits(m, 2, 16, 0xffff);
+}
+
+static inline void msg_set_bcgap_after(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 2, 16, 0xffff, n);
+}
+
+static inline u32 msg_bcgap_to(struct tipc_msg *m)
+{
+       return msg_bits(m, 2, 0, 0xffff);
+}
+
+static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n) 
+{
+       msg_set_bits(m, 2, 0, 0xffff, n);
+}
+
+
+/* 
+ * Word 4
+ */
+
+static inline u32 msg_last_bcast(struct tipc_msg *m)
+{
+       return msg_bits(m, 4, 16, 0xffff);
+}
+
+static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 4, 16, 0xffff, n);
+}
+
+
+static inline u32 msg_fragm_no(struct tipc_msg *m)
+{
+       return msg_bits(m, 4, 16, 0xffff);
+}
+
+static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 4, 16, 0xffff, n);
+}
+
+
+static inline u32 msg_next_sent(struct tipc_msg *m)
+{
+       return msg_bits(m, 4, 0, 0xffff);
+}
+
+static inline void msg_set_next_sent(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 4, 0, 0xffff, n);
+}
+
+
+static inline u32 msg_long_msgno(struct tipc_msg *m)
+{
+       return msg_bits(m, 4, 0, 0xffff);
+}
+
+static inline void msg_set_long_msgno(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 4, 0, 0xffff, n);
+}
+
+static inline u32 msg_bc_netid(struct tipc_msg *m)
+{
+       return msg_word(m, 4);
+}
+
+static inline void msg_set_bc_netid(struct tipc_msg *m, u32 id)
+{
+       msg_set_word(m, 4, id);
+}
+
+static inline u32 msg_link_selector(struct tipc_msg *m)
+{
+       return msg_bits(m, 4, 0, 1);
+}
+
+static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 4, 0, 1, (n & 1));
+}
+
+/* 
+ * Word 5
+ */
+
+static inline u32 msg_session(struct tipc_msg *m)
+{
+       return msg_bits(m, 5, 16, 0xffff);
+}
+
+static inline void msg_set_session(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 5, 16, 0xffff, n);
+}
+
+static inline u32 msg_probe(struct tipc_msg *m)
+{
+       return msg_bits(m, 5, 0, 1);
+}
+
+static inline void msg_set_probe(struct tipc_msg *m, u32 val)
+{
+       msg_set_bits(m, 5, 0, 1, (val & 1));
+}
+
+static inline char msg_net_plane(struct tipc_msg *m)
+{
+       return msg_bits(m, 5, 1, 7) + 'A';
+}
+
+static inline void msg_set_net_plane(struct tipc_msg *m, char n)
+{
+       msg_set_bits(m, 5, 1, 7, (n - 'A'));
+}
+
+static inline u32 msg_linkprio(struct tipc_msg *m)
+{
+       return msg_bits(m, 5, 4, 0x1f);
+}
+
+static inline void msg_set_linkprio(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 5, 4, 0x1f, n);
+}
+
+static inline u32 msg_bearer_id(struct tipc_msg *m)
+{
+       return msg_bits(m, 5, 9, 0x7);
+}
+
+static inline void msg_set_bearer_id(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 5, 9, 0x7, n);
+}
+
+static inline u32 msg_redundant_link(struct tipc_msg *m)
+{
+       return msg_bits(m, 5, 12, 0x1);
+}
+
+static inline void msg_set_redundant_link(struct tipc_msg *m)
+{
+       msg_set_bits(m, 5, 12, 0x1, 1);
+}
+
+static inline void msg_clear_redundant_link(struct tipc_msg *m)
+{
+       msg_set_bits(m, 5, 12, 0x1, 0);
+}
+
+
+/* 
+ * Word 9
+ */
+
+static inline u32 msg_msgcnt(struct tipc_msg *m)
+{
+       return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_msgcnt(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u32 msg_bcast_tag(struct tipc_msg *m)
+{
+       return msg_bits(m, 9, 16, 0xffff);
+}
+
+static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 9, 16, 0xffff, n);
+}
+
+static inline u32 msg_max_pkt(struct tipc_msg *m) 
+{
+       return (msg_bits(m, 9, 16, 0xffff) * 4);
+}
+
+static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n) 
+{
+       msg_set_bits(m, 9, 16, 0xffff, (n / 4));
+}
+
+static inline u32 msg_link_tolerance(struct tipc_msg *m)
+{
+       return msg_bits(m, 9, 0, 0xffff);
+}
+
+static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
+{
+       msg_set_bits(m, 9, 0, 0xffff, n);
+}
+
+/* 
+ * Routing table message data
+ */
+
+
+static inline u32 msg_remote_node(struct tipc_msg *m)
+{
+       return msg_word(m, msg_hdr_sz(m)/4);
+}
+
+static inline void msg_set_remote_node(struct tipc_msg *m, u32 a)
+{
+       msg_set_word(m, msg_hdr_sz(m)/4, a);
+}
+
+static inline int msg_dataoctet(struct tipc_msg *m, u32 pos)
+{
+       return(msg_data(m)[pos + 4] != 0);
+}
+
+static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
+{
+       msg_data(m)[pos + 4] = 1;
+}
+
+/* 
+ * Segmentation message types
+ */
+
+#define FIRST_FRAGMENT     0
+#define FRAGMENT           1
+#define LAST_FRAGMENT      2
+
+/* 
+ * Link management protocol message types
+ */
+
+#define STATE_MSG       0
+#define RESET_MSG       1
+#define ACTIVATE_MSG    2
+
+/* 
+ * Changeover tunnel message types
+ */
+#define DUPLICATE_MSG    0
+#define ORIGINAL_MSG     1
+
+/* 
+ * Routing table message types
+ */
+#define EXT_ROUTING_TABLE    0
+#define LOCAL_ROUTING_TABLE  1
+#define SLAVE_ROUTING_TABLE  2
+#define ROUTE_ADDITION       3
+#define ROUTE_REMOVAL        4
+
+/* 
+ * Config protocol message types
+ */
+
+#define DSC_REQ_MSG          0
+#define DSC_RESP_MSG         1
+
+static inline u32 msg_tot_importance(struct tipc_msg *m)
+{
+       if (likely(msg_isdata(m))) {
+               if (likely(msg_orignode(m) == tipc_own_addr))
+                       return msg_importance(m);
+               return msg_importance(m) + 4;
+       }
+       if ((msg_user(m) == MSG_FRAGMENTER)  &&
+           (msg_type(m) == FIRST_FRAGMENT))
+               return msg_importance(msg_get_wrapped(m));
+       return msg_importance(m);
+}
+
+
+static inline void msg_init(struct tipc_msg *m, u32 user, u32 type, 
+                           u32 err, u32 hsize, u32 destnode)
+{
+       memset(m, 0, hsize);
+       msg_set_version(m);
+       msg_set_user(m, user);
+       msg_set_hdr_sz(m, hsize);
+       msg_set_size(m, hsize);
+       msg_set_prevnode(m, tipc_own_addr);
+       msg_set_type(m, type);
+       msg_set_errcode(m, err);
+       if (!msg_short(m)) {
+               msg_set_orignode(m, tipc_own_addr);
+               msg_set_destnode(m, destnode);
+       }
+}
+
+/** 
+ * msg_calc_data_size - determine total data size for message
+ */
+
+static inline int msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
+{
+       int dsz = 0;
+       int i;
+
+       for (i = 0; i < num_sect; i++)
+               dsz += msg_sect[i].iov_len;
+       return dsz;
+}
+
+/** 
+ * msg_build - create message using specified header and data
+ * 
+ * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
+ * 
+ * Returns message data size or errno
+ */
+
+static inline int msg_build(struct tipc_msg *hdr, 
+                           struct iovec const *msg_sect, u32 num_sect,
+                           int max_size, int usrmem, struct sk_buff** buf)
+{
+       int dsz, sz, hsz, pos, res, cnt;
+
+       dsz = msg_calc_data_size(msg_sect, num_sect);
+       if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) {
+               *buf = NULL;
+               return -EINVAL;
+       }
+
+       pos = hsz = msg_hdr_sz(hdr);
+       sz = hsz + dsz;
+       msg_set_size(hdr, sz);
+       if (unlikely(sz > max_size)) {
+               *buf = NULL;
+               return dsz;
+       }
+
+       *buf = buf_acquire(sz);
+       if (!(*buf))
+               return -ENOMEM;
+       memcpy((*buf)->data, (unchar *)hdr, hsz);
+       for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
+               if (likely(usrmem))
+                       res = !copy_from_user((*buf)->data + pos, 
+                                             msg_sect[cnt].iov_base, 
+                                             msg_sect[cnt].iov_len);
+               else
+                       memcpy((*buf)->data + pos, msg_sect[cnt].iov_base, 
+                              msg_sect[cnt].iov_len);
+               pos += msg_sect[cnt].iov_len;
+       }
+       if (likely(res))
+               return dsz;
+
+       buf_discard(*buf);
+       *buf = NULL;
+       return -EFAULT;
+}
+
+
+struct tipc_media_addr;
+
+extern void msg_set_media_addr(struct tipc_msg *m,
+                              struct tipc_media_addr *a);
+
+extern void msg_get_media_addr(struct tipc_msg *m,
+                              struct tipc_media_addr *a);
+
+
+#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
new file mode 100644 (file)
index 0000000..41cbaf1
--- /dev/null
@@ -0,0 +1,309 @@
+/*
+ * net/tipc/name_distr.c: TIPC name distribution code
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "cluster.h"
+#include "dbg.h"
+#include "link.h"
+#include "msg.h"
+#include "name_distr.h"
+
+#undef  DBG_OUTPUT
+#define DBG_OUTPUT NULL
+
+#define ITEM_SIZE sizeof(struct distr_item)
+
+/**
+ * struct distr_item - publication info distributed to other nodes
+ * @type: name sequence type
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @ref: publishing port reference
+ * @key: publication key
+ * 
+ * ===> All fields are stored in network byte order. <===
+ * 
+ * First 3 fields identify (name or) name sequence being published.
+ * Reference field uniquely identifies port that published name sequence.
+ * Key field uniquely identifies publication, in the event a port has
+ * multiple publications of the same name sequence.
+ * 
+ * Note: There is no field that identifies the publishing node because it is 
+ * the same for all items contained within a publication message.
+ */
+
+struct distr_item {
+       u32 type;
+       u32 lower;
+       u32 upper;
+       u32 ref;
+       u32 key;
+};
+
+/**
+ * List of externally visible publications by this node -- 
+ * that is, all publications having scope > TIPC_NODE_SCOPE.
+ */
+
+static LIST_HEAD(publ_root);
+static u32 publ_cnt = 0;               
+
+/**
+ * publ_to_item - add publication info to a publication message
+ */
+
+static void publ_to_item(struct distr_item *i, struct publication *p)
+{
+       i->type = htonl(p->type);
+       i->lower = htonl(p->lower);
+       i->upper = htonl(p->upper);
+       i->ref = htonl(p->ref);
+       i->key = htonl(p->key);
+       dbg("publ_to_item: %u, %u, %u\n", p->type, p->lower, p->upper);
+}
+
+/**
+ * named_prepare_buf - allocate & initialize a publication message
+ */
+
+static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
+{
+       struct sk_buff *buf = buf_acquire(LONG_H_SIZE + size);  
+       struct tipc_msg *msg;
+
+       if (buf != NULL) {
+               msg = buf_msg(buf);
+               msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK, 
+                        LONG_H_SIZE, dest);
+               msg_set_size(msg, LONG_H_SIZE + size);
+       }
+       return buf;
+}
+
+/**
+ * named_publish - tell other nodes about a new publication by this node
+ */
+
+void named_publish(struct publication *publ)
+{
+       struct sk_buff *buf;
+       struct distr_item *item;
+
+       list_add(&publ->local_list, &publ_root);
+       publ_cnt++;
+
+       buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
+       if (!buf) {
+               warn("Memory squeeze; failed to distribute publication\n");
+               return;
+       }
+
+       item = (struct distr_item *)msg_data(buf_msg(buf));
+       publ_to_item(item, publ);
+       dbg("named_withdraw: broadcasting publish msg\n");
+       cluster_broadcast(buf);
+}
+
+/**
+ * named_withdraw - tell other nodes about a withdrawn publication by this node
+ */
+
+void named_withdraw(struct publication *publ)
+{
+       struct sk_buff *buf;
+       struct distr_item *item;
+
+       list_del(&publ->local_list);
+       publ_cnt--;
+
+       buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
+       if (!buf) {
+               warn("Memory squeeze; failed to distribute withdrawal\n");
+               return;
+       }
+
+       item = (struct distr_item *)msg_data(buf_msg(buf));
+       publ_to_item(item, publ);
+       dbg("named_withdraw: broadcasting withdraw msg\n");
+       cluster_broadcast(buf);
+}
+
+/**
+ * named_node_up - tell specified node about all publications by this node
+ */
+
+void named_node_up(unsigned long node)
+{
+       struct publication *publ;
+       struct distr_item *item = 0;
+       struct sk_buff *buf = 0;
+       u32 left = 0;
+       u32 rest;
+       u32 max_item_buf;
+
+       assert(in_own_cluster(node));
+       read_lock_bh(&nametbl_lock); 
+       max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
+       max_item_buf *= ITEM_SIZE;
+       rest = publ_cnt * ITEM_SIZE;
+
+       list_for_each_entry(publ, &publ_root, local_list) {
+               if (!buf) {
+                       left = (rest <= max_item_buf) ? rest : max_item_buf;
+                       rest -= left;
+                       buf = named_prepare_buf(PUBLICATION, left, node);       
+                       if (buf == NULL) {
+                               warn("Memory Squeeze; could not send publication\n");
+                               goto exit;
+                       }
+                       item = (struct distr_item *)msg_data(buf_msg(buf));
+               }
+               publ_to_item(item, publ);
+               item++;
+               left -= ITEM_SIZE;
+               if (!left) {
+                       msg_set_link_selector(buf_msg(buf), node);
+                       dbg("named_node_up: sending publish msg to "
+                           "<%u.%u.%u>\n", tipc_zone(node), 
+                           tipc_cluster(node), tipc_node(node));
+                       link_send(buf, node, node);
+                       buf = 0;
+               }
+       }
+exit:
+       read_unlock_bh(&nametbl_lock); 
+}
+
+/**
+ * node_is_down - remove publication associated with a failed node
+ * 
+ * Invoked for each publication issued by a newly failed node.  
+ * Removes publication structure from name table & deletes it.
+ * In rare cases the link may have come back up again when this
+ * function is called, and we have two items representing the same
+ * publication. Nudge this item's key to distinguish it from the other.
+ * (Note: Publication's node subscription is already unsubscribed.)
+ */
+
+static void node_is_down(struct publication *publ)
+{
+       struct publication *p;
+        write_lock_bh(&nametbl_lock);
+       dbg("node_is_down: withdrawing %u, %u, %u\n", 
+           publ->type, publ->lower, publ->upper);
+        publ->key += 1222345;
+       p = nametbl_remove_publ(publ->type, publ->lower, 
+                               publ->node, publ->ref, publ->key);
+        assert(p == publ);
+       write_unlock_bh(&nametbl_lock);
+       if (publ)
+               kfree(publ);
+}
+
+/**
+ * named_recv - process name table update message sent by another node
+ */
+
+void named_recv(struct sk_buff *buf)
+{
+       struct publication *publ;
+       struct tipc_msg *msg = buf_msg(buf);
+       struct distr_item *item = (struct distr_item *)msg_data(msg);
+       u32 count = msg_data_sz(msg) / ITEM_SIZE;
+
+       write_lock_bh(&nametbl_lock); 
+       while (count--) {
+               if (msg_type(msg) == PUBLICATION) {
+                       dbg("named_recv: got publication for %u, %u, %u\n", 
+                           ntohl(item->type), ntohl(item->lower),
+                           ntohl(item->upper));
+                       publ = nametbl_insert_publ(ntohl(item->type), 
+                                                  ntohl(item->lower),
+                                                  ntohl(item->upper),
+                                                  TIPC_CLUSTER_SCOPE,
+                                                  msg_orignode(msg), 
+                                                  ntohl(item->ref),
+                                                  ntohl(item->key));
+                       if (publ) {
+                               nodesub_subscribe(&publ->subscr, 
+                                                 msg_orignode(msg), 
+                                                 publ,
+                                                 (net_ev_handler)node_is_down);
+                       }
+               } else if (msg_type(msg) == WITHDRAWAL) {
+                       dbg("named_recv: got withdrawl for %u, %u, %u\n", 
+                           ntohl(item->type), ntohl(item->lower),
+                           ntohl(item->upper));
+                       publ = nametbl_remove_publ(ntohl(item->type),
+                                                  ntohl(item->lower),
+                                                  msg_orignode(msg),
+                                                  ntohl(item->ref),
+                                                  ntohl(item->key));
+
+                       if (publ) {
+                               nodesub_unsubscribe(&publ->subscr);
+                               kfree(publ);
+                       }
+               } else {
+                       warn("named_recv: unknown msg\n");
+               }
+               item++;
+       }
+       write_unlock_bh(&nametbl_lock); 
+       buf_discard(buf);
+}
+
+/**
+ * named_reinit - re-initialize local publication list
+ * 
+ * This routine is called whenever TIPC networking is (re)enabled.
+ * All existing publications by this node that have "cluster" or "zone" scope
+ * are updated to reflect the node's current network address.
+ * (If the node's address is unchanged, the update loop terminates immediately.)
+ */
+
+void named_reinit(void)
+{
+       struct publication *publ;
+
+       write_lock_bh(&nametbl_lock); 
+       list_for_each_entry(publ, &publ_root, local_list) {
+               if (publ->node == tipc_own_addr)
+                       break;
+               publ->node = tipc_own_addr;
+       }
+       write_unlock_bh(&nametbl_lock); 
+}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
new file mode 100644 (file)
index 0000000..a04bdea
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * net/tipc/name_distr.h: Include file for TIPC name distribution code
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NAME_DISTR_H
+#define _TIPC_NAME_DISTR_H
+
+#include "name_table.h"
+
+void named_publish(struct publication *publ);
+void named_withdraw(struct publication *publ);
+void named_node_up(unsigned long node);
+void named_recv(struct sk_buff *buf);
+void named_reinit(void);
+
+#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
new file mode 100644 (file)
index 0000000..972c83e
--- /dev/null
@@ -0,0 +1,1079 @@
+/*
+ * net/tipc/name_table.c: TIPC name table code
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+#include "name_table.h"
+#include "name_distr.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "subscr.h"
+#include "port.h"
+#include "cluster.h"
+#include "bcast.h"
+
+int tipc_nametbl_size = 1024;          /* must be a power of 2 */
+
+/**
+ * struct sub_seq - container for all published instances of a name sequence
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @node_list: circular list of matching publications with >= node scope
+ * @cluster_list: circular list of matching publications with >= cluster scope
+ * @zone_list: circular list of matching publications with >= zone scope
+ */
+
+struct sub_seq {
+       u32 lower;
+       u32 upper;
+       struct publication *node_list;
+       struct publication *cluster_list;
+       struct publication *zone_list;
+};
+
+/** 
+ * struct name_seq - container for all published instances of a name type
+ * @type: 32 bit 'type' value for name sequence
+ * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
+ *        sub-sequences are sorted in ascending order
+ * @alloc: number of sub-sequences currently in array
+ * @first_free: upper bound of highest sub-sequence + 1
+ * @ns_list: links to adjacent name sequences in hash chain
+ * @subscriptions: list of subscriptions for this 'type'
+ * @lock: spinlock controlling access to name sequence structure
+ */
+
+struct name_seq {
+       u32 type;
+       struct sub_seq *sseqs;
+       u32 alloc;
+       u32 first_free;
+       struct hlist_node ns_list;
+       struct list_head subscriptions;
+       spinlock_t lock;
+};
+
+/**
+ * struct name_table - table containing all existing port name publications
+ * @types: pointer to fixed-sized array of name sequence lists, 
+ *         accessed via hashing on 'type'; name sequence lists are *not* sorted
+ * @local_publ_count: number of publications issued by this node
+ */
+
+struct name_table {
+       struct hlist_head *types;
+       u32 local_publ_count;
+};
+
+struct name_table table = { NULL } ;
+static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
+rwlock_t nametbl_lock = RW_LOCK_UNLOCKED;
+
+
+static inline int hash(int x)
+{
+       return(x & (tipc_nametbl_size - 1));
+}
+
+/**
+ * publ_create - create a publication structure
+ */
+
+static struct publication *publ_create(u32 type, u32 lower, u32 upper, 
+                                      u32 scope, u32 node, u32 port_ref,   
+                                      u32 key)
+{
+       struct publication *publ =
+               (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
+       if (publ == NULL) {
+               warn("Memory squeeze; failed to create publication\n");
+               return 0;
+       }
+
+       memset(publ, 0, sizeof(*publ));
+       publ->type = type;
+       publ->lower = lower;
+       publ->upper = upper;
+       publ->scope = scope;
+       publ->node = node;
+       publ->ref = port_ref;
+       publ->key = key;
+       INIT_LIST_HEAD(&publ->local_list);
+       INIT_LIST_HEAD(&publ->pport_list);
+       INIT_LIST_HEAD(&publ->subscr.nodesub_list);
+       return publ;
+}
+
+/**
+ * subseq_alloc - allocate a specified number of sub-sequence structures
+ */
+
+struct sub_seq *subseq_alloc(u32 cnt)
+{
+       u32 sz = cnt * sizeof(struct sub_seq);
+       struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC);
+
+       if (sseq)
+               memset(sseq, 0, sz);
+       return sseq;
+}
+
+/**
+ * nameseq_create - create a name sequence structure for the specified 'type'
+ * 
+ * Allocates a single sub-sequence structure and sets it to all 0's.
+ */
+
+struct name_seq *nameseq_create(u32 type, struct hlist_head *seq_head)
+{
+       struct name_seq *nseq = 
+               (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC);
+       struct sub_seq *sseq = subseq_alloc(1);
+
+       if (!nseq || !sseq) {
+               warn("Memory squeeze; failed to create name sequence\n");
+               kfree(nseq);
+               kfree(sseq);
+               return 0;
+       }
+
+       memset(nseq, 0, sizeof(*nseq));
+       nseq->lock = SPIN_LOCK_UNLOCKED;
+       nseq->type = type;
+       nseq->sseqs = sseq;
+       dbg("nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n",
+           nseq, type, nseq->sseqs, nseq->first_free);
+       nseq->alloc = 1;
+       INIT_HLIST_NODE(&nseq->ns_list);
+       INIT_LIST_HEAD(&nseq->subscriptions);
+       hlist_add_head(&nseq->ns_list, seq_head);
+       return nseq;
+}
+
+/**
+ * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
+ *  
+ * Very time-critical, so binary searches through sub-sequence array.
+ */
+
+static inline struct sub_seq *nameseq_find_subseq(struct name_seq *nseq, 
+                                                 u32 instance)
+{
+       struct sub_seq *sseqs = nseq->sseqs;
+       int low = 0;
+       int high = nseq->first_free - 1;
+       int mid;
+
+       while (low <= high) {
+               mid = (low + high) / 2;
+               if (instance < sseqs[mid].lower)
+                       high = mid - 1;
+               else if (instance > sseqs[mid].upper)
+                       low = mid + 1;
+               else
+                       return &sseqs[mid];
+       }
+       return 0;
+}
+
+/**
+ * nameseq_locate_subseq - determine position of name instance in sub-sequence
+ * 
+ * Returns index in sub-sequence array of the entry that contains the specified
+ * instance value; if no entry contains that value, returns the position
+ * where a new entry for it would be inserted in the array.
+ *
+ * Note: Similar to binary search code for locating a sub-sequence.
+ */
+
+static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
+{
+       struct sub_seq *sseqs = nseq->sseqs;
+       int low = 0;
+       int high = nseq->first_free - 1;
+       int mid;
+
+       while (low <= high) {
+               mid = (low + high) / 2;
+               if (instance < sseqs[mid].lower)
+                       high = mid - 1;
+               else if (instance > sseqs[mid].upper)
+                       low = mid + 1;
+               else
+                       return mid;
+       }
+       return low;
+}
+
+/**
+ * nameseq_insert_publ - 
+ */
+
+struct publication *nameseq_insert_publ(struct name_seq *nseq,
+                                       u32 type, u32 lower, u32 upper,
+                                       u32 scope, u32 node, u32 port, u32 key)
+{
+       struct subscription *s;
+       struct subscription *st;
+       struct publication *publ;
+       struct sub_seq *sseq;
+       int created_subseq = 0;
+
+       assert(nseq->first_free <= nseq->alloc);
+       sseq = nameseq_find_subseq(nseq, lower);
+       dbg("nameseq_ins: for seq %x,<%u,%u>, found sseq %x\n",
+           nseq, type, lower, sseq);
+       if (sseq) {
+
+               /* Lower end overlaps existing entry => need an exact match */
+
+               if ((sseq->lower != lower) || (sseq->upper != upper)) {
+                       warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
+                       return 0;
+               }
+       } else {
+               u32 inspos;
+               struct sub_seq *freesseq;
+
+               /* Find where lower end should be inserted */
+
+               inspos = nameseq_locate_subseq(nseq, lower);
+
+               /* Fail if upper end overlaps into an existing entry */
+
+               if ((inspos < nseq->first_free) &&
+                   (upper >= nseq->sseqs[inspos].lower)) {
+                       warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
+                       return 0;
+               }
+
+               /* Ensure there is space for new sub-sequence */
+
+               if (nseq->first_free == nseq->alloc) {
+                       struct sub_seq *sseqs = nseq->sseqs;
+                       nseq->sseqs = subseq_alloc(nseq->alloc * 2);
+                       if (nseq->sseqs != NULL) {
+                               memcpy(nseq->sseqs, sseqs,
+                                      nseq->alloc * sizeof (struct sub_seq));
+                               kfree(sseqs);
+                               dbg("Allocated %u sseqs\n", nseq->alloc);
+                               nseq->alloc *= 2;
+                       } else {
+                               warn("Memory squeeze; failed to create sub-sequence\n");
+                               return 0;
+                       }
+               }
+               dbg("Have %u sseqs for type %u\n", nseq->alloc, type);
+
+               /* Insert new sub-sequence */
+
+               dbg("ins in pos %u, ff = %u\n", inspos, nseq->first_free);
+               sseq = &nseq->sseqs[inspos];
+               freesseq = &nseq->sseqs[nseq->first_free];
+               memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof (*sseq));
+               memset(sseq, 0, sizeof (*sseq));
+               nseq->first_free++;
+               sseq->lower = lower;
+               sseq->upper = upper;
+               created_subseq = 1;
+       }
+       dbg("inserting (%u %u %u) from %x:%u into sseq %x(%u,%u) of seq %x\n",
+           type, lower, upper, node, port, sseq,
+           sseq->lower, sseq->upper, nseq);
+
+       /* Insert a publication: */
+
+       publ = publ_create(type, lower, upper, scope, node, port, key);
+       if (!publ)
+               return 0;
+       dbg("inserting publ %x, node=%x publ->node=%x, subscr->node=%x\n",
+           publ, node, publ->node, publ->subscr.node);
+
+       if (!sseq->zone_list)
+               sseq->zone_list = publ->zone_list_next = publ;
+       else {
+               publ->zone_list_next = sseq->zone_list->zone_list_next;
+               sseq->zone_list->zone_list_next = publ;
+       }
+
+       if (in_own_cluster(node)) {
+               if (!sseq->cluster_list)
+                       sseq->cluster_list = publ->cluster_list_next = publ;
+               else {
+                       publ->cluster_list_next =
+                       sseq->cluster_list->cluster_list_next;
+                       sseq->cluster_list->cluster_list_next = publ;
+               }
+       }
+
+       if (node == tipc_own_addr) {
+               if (!sseq->node_list)
+                       sseq->node_list = publ->node_list_next = publ;
+               else {
+                       publ->node_list_next = sseq->node_list->node_list_next;
+                       sseq->node_list->node_list_next = publ;
+               }
+       }
+
+       /* 
+        * Any subscriptions waiting for notification? 
+        */
+       list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
+               dbg("calling report_overlap()\n");
+               subscr_report_overlap(s,
+                                     publ->lower,
+                                     publ->upper,
+                                     TIPC_PUBLISHED,
+                                     publ->ref, 
+                                     publ->node,
+                                     created_subseq);
+       }
+       return publ;
+}
+
+/**
+ * nameseq_remove_publ -
+ */
+
+struct publication *nameseq_remove_publ(struct name_seq *nseq, u32 inst,
+                                       u32 node, u32 ref, u32 key)
+{
+       struct publication *publ;
+       struct publication *prev;
+       struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
+       struct sub_seq *free;
+       struct subscription *s, *st;
+       int removed_subseq = 0;
+
+       assert(nseq);
+
+       if (!sseq) {
+               int i;
+
+               warn("Withdraw unknown <%u,%u>?\n", nseq->type, inst);
+               assert(nseq->sseqs);
+               dbg("Dumping subseqs %x for %x, alloc = %u,ff=%u\n",
+                   nseq->sseqs, nseq, nseq->alloc, 
+                   nseq->first_free);
+               for (i = 0; i < nseq->first_free; i++) {
+                       dbg("Subseq %u(%x): lower = %u,upper = %u\n",
+                           i, &nseq->sseqs[i], nseq->sseqs[i].lower,
+                           nseq->sseqs[i].upper);
+               }
+               return 0;
+       }
+       dbg("nameseq_remove: seq: %x, sseq %x, <%u,%u> key %u\n",
+           nseq, sseq, nseq->type, inst, key);
+
+       prev = sseq->zone_list;
+       publ = sseq->zone_list->zone_list_next;
+       while ((publ->key != key) || (publ->ref != ref) || 
+              (publ->node && (publ->node != node))) {
+               prev = publ;
+               publ = publ->zone_list_next;
+               assert(prev != sseq->zone_list);
+       }
+       if (publ != sseq->zone_list)
+               prev->zone_list_next = publ->zone_list_next;
+       else if (publ->zone_list_next != publ) {
+               prev->zone_list_next = publ->zone_list_next;
+               sseq->zone_list = publ->zone_list_next;
+       } else {
+               sseq->zone_list = 0;
+       }
+
+       if (in_own_cluster(node)) {
+               prev = sseq->cluster_list;
+               publ = sseq->cluster_list->cluster_list_next;
+               while ((publ->key != key) || (publ->ref != ref) || 
+                      (publ->node && (publ->node != node))) {
+                       prev = publ;
+                       publ = publ->cluster_list_next;
+                       assert(prev != sseq->cluster_list);
+               }
+               if (publ != sseq->cluster_list)
+                       prev->cluster_list_next = publ->cluster_list_next;
+               else if (publ->cluster_list_next != publ) {
+                       prev->cluster_list_next = publ->cluster_list_next;
+                       sseq->cluster_list = publ->cluster_list_next;
+               } else {
+                       sseq->cluster_list = 0;
+               }
+       }
+
+       if (node == tipc_own_addr) {
+               prev = sseq->node_list;
+               publ = sseq->node_list->node_list_next;
+               while ((publ->key != key) || (publ->ref != ref) || 
+                      (publ->node && (publ->node != node))) {
+                       prev = publ;
+                       publ = publ->node_list_next;
+                       assert(prev != sseq->node_list);
+               }
+               if (publ != sseq->node_list)
+                       prev->node_list_next = publ->node_list_next;
+               else if (publ->node_list_next != publ) {
+                       prev->node_list_next = publ->node_list_next;
+                       sseq->node_list = publ->node_list_next;
+               } else {
+                       sseq->node_list = 0;
+               }
+       }
+       assert(!publ->node || (publ->node == node));
+       assert(publ->ref == ref);
+       assert(publ->key == key);
+
+       /* 
+        * Contract subseq list if no more publications:
+        */
+       if (!sseq->node_list && !sseq->cluster_list && !sseq->zone_list) {
+               free = &nseq->sseqs[nseq->first_free--];
+               memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq));
+               removed_subseq = 1;
+       }
+
+       /* 
+        * Any subscriptions waiting ? 
+        */
+       list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
+               subscr_report_overlap(s,
+                                     publ->lower,
+                                     publ->upper,
+                                     TIPC_WITHDRAWN, 
+                                     publ->ref, 
+                                     publ->node,
+                                     removed_subseq);
+       }
+       return publ;
+}
+
+/**
+ * nameseq_subscribe: attach a subscription, and issue
+ * the prescribed number of events if there is any sub-
+ * sequence overlapping with the requested sequence
+ */
+
+void nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
+{
+       struct sub_seq *sseq = nseq->sseqs;
+
+       list_add(&s->nameseq_list, &nseq->subscriptions);
+
+       if (!sseq)
+               return;
+
+       while (sseq != &nseq->sseqs[nseq->first_free]) {
+               struct publication *zl = sseq->zone_list;
+               if (zl && subscr_overlap(s,sseq->lower,sseq->upper)) {
+                       struct publication *crs = zl;
+                       int must_report = 1;
+
+                       do {
+                               subscr_report_overlap(s, 
+                                                      sseq->lower, 
+                                                      sseq->upper,
+                                                      TIPC_PUBLISHED,
+                                                      crs->ref,
+                                                      crs->node,
+                                                      must_report);
+                               must_report = 0;
+                               crs = crs->zone_list_next;
+                       } while (crs != zl);
+               }
+               sseq++;
+       }
+}
+
+static struct name_seq *nametbl_find_seq(u32 type)
+{
+       struct hlist_head *seq_head;
+       struct hlist_node *seq_node;
+       struct name_seq *ns;
+
+       dbg("find_seq %u,(%u,0x%x) table = %p, hash[type] = %u\n",
+           type, ntohl(type), type, table.types, hash(type));
+
+       seq_head = &table.types[hash(type)];
+       hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
+               if (ns->type == type) {
+                       dbg("found %x\n", ns);
+                       return ns;
+               }
+       }
+
+       return 0;
+};
+
+struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
+                   u32 scope, u32 node, u32 port, u32 key)
+{
+       struct name_seq *seq = nametbl_find_seq(type);
+
+       dbg("ins_publ: <%u,%x,%x> found %x\n", type, lower, upper, seq);
+       if (lower > upper) {
+               warn("Failed to publish illegal <%u,%u,%u>\n",
+                    type, lower, upper);
+               return 0;
+       }
+
+       dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node);
+       if (!seq) {
+               seq = nameseq_create(type, &table.types[hash(type)]);
+               dbg("nametbl_insert_publ: created %x\n", seq);
+       }
+       if (!seq)
+               return 0;
+
+       assert(seq->type == type);
+       return nameseq_insert_publ(seq, type, lower, upper,
+                                  scope, node, port, key);
+}
+
+struct publication *nametbl_remove_publ(u32 type, u32 lower, 
+                                       u32 node, u32 ref, u32 key)
+{
+       struct publication *publ;
+       struct name_seq *seq = nametbl_find_seq(type);
+
+       if (!seq)
+               return 0;
+
+       dbg("Withdrawing <%u,%u> from %x\n", type, lower, node);
+       publ = nameseq_remove_publ(seq, lower, node, ref, key);
+
+       if (!seq->first_free && list_empty(&seq->subscriptions)) {
+               hlist_del_init(&seq->ns_list);
+               kfree(seq->sseqs);
+               kfree(seq);
+       }
+       return publ;
+}
+
+/*
+ * nametbl_translate(): Translate tipc_name -> tipc_portid.
+ *                      Very time-critical.
+ *
+ * Note: on entry 'destnode' is the search domain used during translation;
+ *       on exit it passes back the node address of the matching port (if any)
+ */
+
+u32 nametbl_translate(u32 type, u32 instance, u32 *destnode)
+{
+       struct sub_seq *sseq;
+       struct publication *publ = 0;
+       struct name_seq *seq;
+       u32 ref;
+
+       if (!in_scope(*destnode, tipc_own_addr))
+               return 0;
+
+       read_lock_bh(&nametbl_lock);
+       seq = nametbl_find_seq(type);
+       if (unlikely(!seq))
+               goto not_found;
+       sseq = nameseq_find_subseq(seq, instance);
+       if (unlikely(!sseq))
+               goto not_found;
+       spin_lock_bh(&seq->lock);
+
+       /* Closest-First Algorithm: */
+       if (likely(!*destnode)) {
+               publ = sseq->node_list;
+               if (publ) {
+                       sseq->node_list = publ->node_list_next;
+found:
+                       ref = publ->ref;
+                       *destnode = publ->node;
+                       spin_unlock_bh(&seq->lock);
+                       read_unlock_bh(&nametbl_lock);
+                       return ref;
+               }
+               publ = sseq->cluster_list;
+               if (publ) {
+                       sseq->cluster_list = publ->cluster_list_next;
+                       goto found;
+               }
+               publ = sseq->zone_list;
+               if (publ) {
+                       sseq->zone_list = publ->zone_list_next;
+                       goto found;
+               }
+       }
+
+       /* Round-Robin Algorithm: */
+       else if (*destnode == tipc_own_addr) {
+               publ = sseq->node_list;
+               if (publ) {
+                       sseq->node_list = publ->node_list_next;
+                       goto found;
+               }
+       } else if (in_own_cluster(*destnode)) {
+               publ = sseq->cluster_list;
+               if (publ) {
+                       sseq->cluster_list = publ->cluster_list_next;
+                       goto found;
+               }
+       } else {
+               publ = sseq->zone_list;
+               if (publ) {
+                       sseq->zone_list = publ->zone_list_next;
+                       goto found;
+               }
+       }
+       spin_unlock_bh(&seq->lock);
+not_found:
+       *destnode = 0;
+       read_unlock_bh(&nametbl_lock);
+       return 0;
+}
+
+/**
+ * nametbl_mc_translate - find multicast destinations
+ * 
+ * Creates list of all local ports that overlap the given multicast address;
+ * also determines if any off-node ports overlap.
+ *
+ * Note: Publications with a scope narrower than 'limit' are ignored.
+ * (i.e. local node-scope publications mustn't receive messages arriving
+ * from another node, even if the multcast link brought it here)
+ * 
+ * Returns non-zero if any off-node ports overlap
+ */
+
+int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
+                        struct port_list *dports)
+{
+       struct name_seq *seq;
+       struct sub_seq *sseq;
+       struct sub_seq *sseq_stop;
+       int res = 0;
+
+       read_lock_bh(&nametbl_lock);
+       seq = nametbl_find_seq(type);
+       if (!seq)
+               goto exit;
+
+       spin_lock_bh(&seq->lock);
+
+       sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
+       sseq_stop = seq->sseqs + seq->first_free;
+       for (; sseq != sseq_stop; sseq++) {
+               struct publication *publ;
+
+               if (sseq->lower > upper)
+                       break;
+               publ = sseq->cluster_list;
+               if (publ && (publ->scope <= limit))
+                       do {
+                               if (publ->node == tipc_own_addr)
+                                       port_list_add(dports, publ->ref);
+                               else
+                                       res = 1;
+                               publ = publ->cluster_list_next;
+                       } while (publ != sseq->cluster_list);
+       }
+
+       spin_unlock_bh(&seq->lock);
+exit:
+       read_unlock_bh(&nametbl_lock);
+       return res;
+}
+
+/**
+ * nametbl_publish_rsv - publish port name using a reserved name type
+ */
+
+int nametbl_publish_rsv(u32 ref, unsigned int scope, 
+                       struct tipc_name_seq const *seq)
+{
+       int res;
+
+       atomic_inc(&rsv_publ_ok);
+       res = tipc_publish(ref, scope, seq);
+       atomic_dec(&rsv_publ_ok);
+       return res;
+}
+
+/**
+ * nametbl_publish - add name publication to network name tables
+ */
+
+struct publication *nametbl_publish(u32 type, u32 lower, u32 upper, 
+                                   u32 scope, u32 port_ref, u32 key)
+{
+       struct publication *publ;
+
+       if (table.local_publ_count >= tipc_max_publications) {
+               warn("Failed publish: max %u local publication\n", 
+                    tipc_max_publications);
+               return 0;
+       }
+       if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
+               warn("Failed to publish reserved name <%u,%u,%u>\n",
+                    type, lower, upper);
+               return 0;
+       }
+
+       write_lock_bh(&nametbl_lock);
+       table.local_publ_count++;
+       publ = nametbl_insert_publ(type, lower, upper, scope,
+                                  tipc_own_addr, port_ref, key);
+       if (publ && (scope != TIPC_NODE_SCOPE)) {
+               named_publish(publ);
+       }
+       write_unlock_bh(&nametbl_lock);
+       return publ;
+}
+
+/**
+ * nametbl_withdraw - withdraw name publication from network name tables
+ */
+
+int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
+{
+       struct publication *publ;
+
+       dbg("nametbl_withdraw:<%d,%d,%d>\n", type, lower, key);
+       write_lock_bh(&nametbl_lock);
+       publ = nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
+       if (publ) {
+               table.local_publ_count--;
+               if (publ->scope != TIPC_NODE_SCOPE)
+                       named_withdraw(publ);
+               write_unlock_bh(&nametbl_lock);
+               list_del_init(&publ->pport_list);
+               kfree(publ);
+               return 1;
+       }
+       write_unlock_bh(&nametbl_lock);
+       return 0;
+}
+
+/**
+ * nametbl_subscribe - add a subscription object to the name table
+ */
+
+void
+nametbl_subscribe(struct subscription *s)
+{
+       u32 type = s->seq.type;
+       struct name_seq *seq;
+
+        write_lock_bh(&nametbl_lock);
+       seq = nametbl_find_seq(type);
+       if (!seq) {
+               seq = nameseq_create(type, &table.types[hash(type)]);
+       }
+        if (seq){
+                spin_lock_bh(&seq->lock);
+                dbg("nametbl_subscribe:found %x for <%u,%u,%u>\n",
+                    seq, type, s->seq.lower, s->seq.upper);
+                assert(seq->type == type);
+                nameseq_subscribe(seq, s);
+                spin_unlock_bh(&seq->lock);
+        }
+        write_unlock_bh(&nametbl_lock);
+}
+
+/**
+ * nametbl_unsubscribe - remove a subscription object from name table
+ */
+
+void
+nametbl_unsubscribe(struct subscription *s)
+{
+       struct name_seq *seq;
+
+        write_lock_bh(&nametbl_lock);
+        seq = nametbl_find_seq(s->seq.type);
+       if (seq != NULL){
+                spin_lock_bh(&seq->lock);
+                list_del_init(&s->nameseq_list);
+                spin_unlock_bh(&seq->lock);
+                if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) {
+                        hlist_del_init(&seq->ns_list);
+                        kfree(seq->sseqs);
+                        kfree(seq);
+                }
+        }
+        write_unlock_bh(&nametbl_lock);
+}
+
+
+/**
+ * subseq_list: print specified sub-sequence contents into the given buffer
+ */
+
+static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
+                       u32 index)
+{
+       char portIdStr[27];
+       char *scopeStr;
+       struct publication *publ = sseq->zone_list;
+
+       tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
+
+       if (depth == 2 || !publ) {
+               tipc_printf(buf, "\n");
+               return;
+       }
+
+       do {
+               sprintf (portIdStr, "<%u.%u.%u:%u>",
+                        tipc_zone(publ->node), tipc_cluster(publ->node),
+                        tipc_node(publ->node), publ->ref);
+               tipc_printf(buf, "%-26s ", portIdStr);
+               if (depth > 3) {
+                       if (publ->node != tipc_own_addr)
+                               scopeStr = "";
+                       else if (publ->scope == TIPC_NODE_SCOPE)
+                               scopeStr = "node";
+                       else if (publ->scope == TIPC_CLUSTER_SCOPE)
+                               scopeStr = "cluster";
+                       else
+                               scopeStr = "zone";
+                       tipc_printf(buf, "%-10u %s", publ->key, scopeStr);
+               }
+
+               publ = publ->zone_list_next;
+               if (publ == sseq->zone_list)
+                       break;
+
+               tipc_printf(buf, "\n%33s", " ");
+       } while (1);
+
+       tipc_printf(buf, "\n");
+}
+
+/**
+ * nameseq_list: print specified name sequence contents into the given buffer
+ */
+
+static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
+                        u32 type, u32 lowbound, u32 upbound, u32 index)
+{
+       struct sub_seq *sseq;
+       char typearea[11];
+
+       sprintf(typearea, "%-10u", seq->type);
+
+       if (depth == 1) {
+               tipc_printf(buf, "%s\n", typearea);
+               return;
+       }
+
+       for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
+               if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
+                       tipc_printf(buf, "%s ", typearea);
+                       subseq_list(sseq, buf, depth, index);
+                       sprintf(typearea, "%10s", " ");
+               }
+       }
+}
+
+/**
+ * nametbl_header - print name table header into the given buffer
+ */
+
+static void nametbl_header(struct print_buf *buf, u32 depth)
+{
+       tipc_printf(buf, "Type       ");
+
+       if (depth > 1)
+               tipc_printf(buf, "Lower      Upper      ");
+       if (depth > 2)
+               tipc_printf(buf, "Port Identity              ");
+       if (depth > 3)
+               tipc_printf(buf, "Publication");
+
+       tipc_printf(buf, "\n-----------");
+
+       if (depth > 1)
+               tipc_printf(buf, "--------------------- ");
+       if (depth > 2)
+               tipc_printf(buf, "-------------------------- ");
+       if (depth > 3)
+               tipc_printf(buf, "------------------");
+
+       tipc_printf(buf, "\n");
+}
+
+/**
+ * nametbl_list - print specified name table contents into the given buffer
+ */
+
+static void nametbl_list(struct print_buf *buf, u32 depth_info, 
+                        u32 type, u32 lowbound, u32 upbound)
+{
+       struct hlist_head *seq_head;
+       struct hlist_node *seq_node;
+       struct name_seq *seq;
+       int all_types;
+       u32 depth;
+       u32 i;
+
+       all_types = (depth_info & TIPC_NTQ_ALLTYPES);
+       depth = (depth_info & ~TIPC_NTQ_ALLTYPES);
+
+       if (depth == 0)
+               return;
+
+       if (all_types) {
+               /* display all entries in name table to specified depth */
+               nametbl_header(buf, depth);
+               lowbound = 0;
+               upbound = ~0;
+               for (i = 0; i < tipc_nametbl_size; i++) {
+                       seq_head = &table.types[i];
+                       hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
+                               nameseq_list(seq, buf, depth, seq->type, 
+                                            lowbound, upbound, i);
+                       }
+               }
+       } else {
+               /* display only the sequence that matches the specified type */
+               if (upbound < lowbound) {
+                       tipc_printf(buf, "invalid name sequence specified\n");
+                       return;
+               }
+               nametbl_header(buf, depth);
+               i = hash(type);
+               seq_head = &table.types[i];
+               hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
+                       if (seq->type == type) {
+                               nameseq_list(seq, buf, depth, type, 
+                                            lowbound, upbound, i);
+                               break;
+                       }
+               }
+       }
+}
+
+void nametbl_print(struct print_buf *buf, const char *str)
+{
+       tipc_printf(buf, str);
+       read_lock_bh(&nametbl_lock);
+       nametbl_list(buf, 0, 0, 0, 0);
+       read_unlock_bh(&nametbl_lock);
+}
+
+#define MAX_NAME_TBL_QUERY 32768
+
+struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space)
+{
+       struct sk_buff *buf;
+       struct tipc_name_table_query *argv;
+       struct tlv_desc *rep_tlv;
+       struct print_buf b;
+       int str_len;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       buf = cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY));
+       if (!buf)
+               return NULL;
+
+       rep_tlv = (struct tlv_desc *)buf->data;
+       printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
+       argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
+       read_lock_bh(&nametbl_lock);
+       nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type), 
+                    ntohl(argv->lowbound), ntohl(argv->upbound));
+       read_unlock_bh(&nametbl_lock);
+       str_len = printbuf_validate(&b);
+
+       skb_put(buf, TLV_SPACE(str_len));
+       TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+       return buf;
+}
+
+void nametbl_dump(void)
+{
+       nametbl_list(CONS, 0, 0, 0, 0);
+}
+
+int nametbl_init(void)
+{
+       int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
+
+       table.types = (struct hlist_head *)kmalloc(array_size, GFP_ATOMIC);
+       if (!table.types)
+               return -ENOMEM;
+
+       write_lock_bh(&nametbl_lock);
+       memset(table.types, 0, array_size);
+       table.local_publ_count = 0;
+       write_unlock_bh(&nametbl_lock);
+       return 0;
+}
+
+void nametbl_stop(void)
+{
+       struct hlist_head *seq_head;
+       struct hlist_node *seq_node;
+       struct hlist_node *tmp;
+       struct name_seq *seq;
+       u32 i;
+
+       if (!table.types)
+               return;
+
+       write_lock_bh(&nametbl_lock);
+       for (i = 0; i < tipc_nametbl_size; i++) {
+               seq_head = &table.types[i];
+               hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) {
+                       struct sub_seq *sseq = seq->sseqs;
+
+                       for (; sseq != &seq->sseqs[seq->first_free]; sseq++) {
+                               struct publication *publ = sseq->zone_list;
+                               assert(publ);
+                               do {
+                                       struct publication *next =
+                                               publ->zone_list_next;
+                                       kfree(publ);
+                                       publ = next;
+                               }
+                               while (publ != sseq->zone_list);
+                       }
+               }
+       }
+       kfree(table.types);
+       table.types = NULL;
+       write_unlock_bh(&nametbl_lock);
+}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
new file mode 100644 (file)
index 0000000..f826933
--- /dev/null
@@ -0,0 +1,108 @@
+/*
+ * net/tipc/name_table.h: Include file for TIPC name table code
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NAME_TABLE_H
+#define _TIPC_NAME_TABLE_H
+
+#include "node_subscr.h"
+
+struct subscription;
+struct port_list;
+
+/*
+ * TIPC name types reserved for internal TIPC use (both current and planned)
+ */
+
+#define TIPC_ZM_SRV 3                  /* zone master service name type */
+
+
+/**
+ * struct publication - info about a published (name or) name sequence
+ * @type: name sequence type
+ * @lower: name sequence lower bound
+ * @upper: name sequence upper bound
+ * @scope: scope of publication
+ * @node: network address of publishing port's node
+ * @ref: publishing port
+ * @key: publication key
+ * @subscr: subscription to "node down" event (for off-node publications only)
+ * @local_list: adjacent entries in list of publications made by this node
+ * @pport_list: adjacent entries in list of publications made by this port
+ * @node_list: next matching name seq publication with >= node scope
+ * @cluster_list: next matching name seq publication with >= cluster scope
+ * @zone_list: next matching name seq publication with >= zone scope
+ * 
+ * Note that the node list, cluster list, and zone list are circular lists.
+ */
+
+struct publication {
+       u32 type;
+       u32 lower;
+       u32 upper;
+       u32 scope;
+       u32 node;
+       u32 ref;
+       u32 key;
+       struct node_subscr subscr;
+       struct list_head local_list;
+       struct list_head pport_list;
+       struct publication *node_list_next;
+       struct publication *cluster_list_next;
+       struct publication *zone_list_next;
+};
+
+
+extern rwlock_t nametbl_lock;
+
+struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space);
+u32 nametbl_translate(u32 type, u32 instance, u32 *node);
+int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 
+                        struct port_list *dports);
+int nametbl_publish_rsv(u32 ref, unsigned int scope, 
+                       struct tipc_name_seq const *seq);
+struct publication *nametbl_publish(u32 type, u32 lower, u32 upper,
+                                   u32 scope, u32 port_ref, u32 key);
+int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
+struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
+                                       u32 scope, u32 node, u32 ref, u32 key);
+struct publication *nametbl_remove_publ(u32 type, u32 lower, 
+                                       u32 node, u32 ref, u32 key);
+void nametbl_subscribe(struct subscription *s);
+void nametbl_unsubscribe(struct subscription *s);
+int nametbl_init(void);
+void nametbl_stop(void);
+
+#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
new file mode 100644 (file)
index 0000000..6826b49
--- /dev/null
@@ -0,0 +1,311 @@
+/*
+ * net/tipc/net.c: TIPC network routing code
+ * 
+ * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "bearer.h"
+#include "net.h"
+#include "zone.h"
+#include "addr.h"
+#include "name_table.h"
+#include "name_distr.h"
+#include "subscr.h"
+#include "link.h"
+#include "msg.h"
+#include "port.h"
+#include "bcast.h"
+#include "discover.h"
+#include "config.h"
+
+/* 
+ * The TIPC locking policy is designed to ensure a very fine locking
+ * granularity, permitting complete parallel access to individual
+ * port and node/link instances. The code consists of three major 
+ * locking domains, each protected with their own disjunct set of locks.
+ *
+ * 1: The routing hierarchy.
+ *    Comprises the structures 'zone', 'cluster', 'node', 'link' 
+ *    and 'bearer'. The whole hierarchy is protected by a big 
+ *    read/write lock, net_lock, to enssure that nothing is added 
+ *    or removed while code is accessing any of these structures. 
+ *    This layer must not be called from the two others while they 
+ *    hold any of their own locks.
+ *    Neither must it itself do any upcalls to the other two before
+ *    it has released net_lock and other protective locks.
+ *
+ *   Within the net_lock domain there are two sub-domains;'node' and 
+ *   'bearer', where local write operations are permitted,
+ *   provided that those are protected by individual spin_locks
+ *   per instance. Code holding net_lock(read) and a node spin_lock 
+ *   is permitted to poke around in both the node itself and its
+ *   subordinate links. I.e, it can update link counters and queues, 
+ *   change link state, send protocol messages, and alter the 
+ *   "active_links" array in the node; but it can _not_ remove a link 
+ *   or a node from the overall structure.
+ *   Correspondingly, individual bearers may change status within a 
+ *   net_lock(read), protected by an individual spin_lock ber bearer 
+ *   instance, but it needs net_lock(write) to remove/add any bearers.
+ *     
+ *
+ *  2: The transport level of the protocol. 
+ *     This consists of the structures port, (and its user level 
+ *     representations, such as user_port and tipc_sock), reference and 
+ *     tipc_user (port.c, reg.c, socket.c). 
+ *
+ *     This layer has four different locks:
+ *     - The tipc_port spin_lock. This is protecting each port instance
+ *       from parallel data access and removal. Since we can not place 
+ *       this lock in the port itself, it has been placed in the 
+ *       corresponding reference table entry, which has the same life
+ *       cycle as the module. This entry is difficult to access from 
+ *       outside the TIPC core, however, so a pointer to the lock has 
+ *       been added in the port instance, -to be used for unlocking 
+ *       only.
+ *     - A read/write lock to protect the reference table itself (teg.c). 
+ *       (Nobody is using read-only access to this, so it can just as 
+ *       well be changed to a spin_lock)
+ *     - A spin lock to protect the registry of kernel/driver users (reg.c)
+ *     - A global spin_lock (port_lock), which only task is to ensure 
+ *       consistency where more than one port is involved in an operation,
+ *       i.e., whe a port is part of a linked list of ports.
+ *       There are two such lists; 'port_list', which is used for management,
+ *       and 'wait_list', which is used to queue ports during congestion.
+ *     
+ *  3: The name table (name_table.c, name_distr.c, subscription.c)
+ *     - There is one big read/write-lock (nametbl_lock) protecting the 
+ *       overall name table structure. Nothing must be added/removed to 
+ *       this structure without holding write access to it.
+ *     - There is one local spin_lock per sub_sequence, which can be seen
+ *       as a sub-domain to the nametbl_lock domain. It is used only
+ *       for translation operations, and is needed because a translation
+ *       steps the root of the 'publication' linked list between each lookup.
+ *       This is always used within the scope of a nametbl_lock(read).
+ *     - A local spin_lock protecting the queue of subscriber events.
+*/
+
+rwlock_t net_lock = RW_LOCK_UNLOCKED;
+struct network net = { 0 };
+
+struct node *net_select_remote_node(u32 addr, u32 ref) 
+{
+       return zone_select_remote_node(net.zones[tipc_zone(addr)], addr, ref);
+}
+
+u32 net_select_router(u32 addr, u32 ref)
+{
+       return zone_select_router(net.zones[tipc_zone(addr)], addr, ref);
+}
+
+
+u32 net_next_node(u32 a)
+{
+       if (net.zones[tipc_zone(a)])
+               return zone_next_node(a);
+       return 0;
+}
+
+void net_remove_as_router(u32 router)
+{
+       u32 z_num;
+
+       for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
+               if (!net.zones[z_num])
+                       continue;
+               zone_remove_as_router(net.zones[z_num], router);
+       }
+}
+
+void net_send_external_routes(u32 dest)
+{
+       u32 z_num;
+
+       for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
+               if (net.zones[z_num])
+                       zone_send_external_routes(net.zones[z_num], dest);
+       }
+}
+
+int net_init(void)
+{
+       u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1);
+
+       memset(&net, 0, sizeof(net));
+       net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC);
+       if (!net.zones) {
+               return -ENOMEM;
+       }
+       memset(net.zones, 0, sz);
+       return TIPC_OK;
+}
+
+void net_stop(void)
+{
+       u32 z_num;
+
+       if (!net.zones)
+               return;
+
+       for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
+               zone_delete(net.zones[z_num]);
+       }
+       kfree(net.zones);
+       net.zones = 0;
+}
+
+static void net_route_named_msg(struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       u32 dnode;
+       u32 dport;
+
+       if (!msg_named(msg)) {
+               msg_dbg(msg, "net->drop_nam:");
+               buf_discard(buf);
+               return;
+       }
+
+       dnode = addr_domain(msg_lookup_scope(msg));
+       dport = nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
+       dbg("net->lookup<%u,%u>-><%u,%x>\n",
+           msg_nametype(msg), msg_nameinst(msg), dport, dnode);
+       if (dport) {
+               msg_set_destnode(msg, dnode);
+               msg_set_destport(msg, dport);
+               net_route_msg(buf);
+               return;
+       }
+       msg_dbg(msg, "net->rej:NO NAME: ");
+       tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
+}
+
+void net_route_msg(struct sk_buff *buf)
+{
+       struct tipc_msg *msg;
+       u32 dnode;
+
+       if (!buf)
+               return;
+       msg = buf_msg(buf);
+
+       msg_incr_reroute_cnt(msg);
+       if (msg_reroute_cnt(msg) > 6) {
+               if (msg_errcode(msg)) {
+                       msg_dbg(msg, "NET>DISC>:");
+                       buf_discard(buf);
+               } else {
+                       msg_dbg(msg, "NET>REJ>:");
+                       tipc_reject_msg(buf, msg_destport(msg) ? 
+                                       TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
+               }
+               return;
+       }
+
+       msg_dbg(msg, "net->rout: ");
+
+       /* Handle message for this node */
+       dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
+       if (in_scope(dnode, tipc_own_addr)) {
+               if (msg_isdata(msg)) {
+                       if (msg_mcast(msg)) 
+                               port_recv_mcast(buf, NULL);
+                       else if (msg_destport(msg))
+                               port_recv_msg(buf);
+                       else
+                               net_route_named_msg(buf);
+                       return;
+               }
+               switch (msg_user(msg)) {
+               case ROUTE_DISTRIBUTOR:
+                       cluster_recv_routing_table(buf);
+                       break;
+               case NAME_DISTRIBUTOR:
+                       named_recv(buf);
+                       break;
+               case CONN_MANAGER:
+                       port_recv_proto_msg(buf);
+                       break;
+               default:
+                       msg_dbg(msg,"DROP/NET/<REC<");
+                       buf_discard(buf);
+               }
+               return;
+       }
+
+       /* Handle message for another node */
+       msg_dbg(msg, "NET>SEND>: ");
+       link_send(buf, dnode, msg_link_selector(msg));
+}
+
+int tipc_start_net(void)
+{
+       char addr_string[16];
+       int res;
+
+       if (tipc_mode != TIPC_NODE_MODE)
+               return -ENOPROTOOPT;
+
+       tipc_mode = TIPC_NET_MODE;
+       named_reinit();
+       port_reinit();
+
+       if ((res = bearer_init()) ||
+           (res = net_init()) ||
+           (res = cluster_init()) ||
+           (res = bclink_init())) {
+               return res;
+       }
+        subscr_stop();
+       cfg_stop();
+       k_signal((Handler)subscr_start, 0);
+       k_signal((Handler)cfg_init, 0);
+       info("Started in network mode\n");
+       info("Own node address %s, network identity %u\n",
+            addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
+       return TIPC_OK;
+}
+
+void tipc_stop_net(void)
+{
+       if (tipc_mode != TIPC_NET_MODE)
+               return;
+        write_lock_bh(&net_lock);
+       bearer_stop();
+       tipc_mode = TIPC_NODE_MODE;
+       bclink_stop();
+       net_stop();
+        write_unlock_bh(&net_lock);
+       info("Left network mode \n");
+}
+
diff --git a/net/tipc/net.h b/net/tipc/net.h
new file mode 100644 (file)
index 0000000..948c6d4
--- /dev/null
@@ -0,0 +1,66 @@
+/*
+ * net/tipc/net.h: Include file for TIPC network routing code
+ * 
+ * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NET_H
+#define _TIPC_NET_H
+
+struct _zone;
+
+/**
+ * struct network - TIPC network structure
+ * @zones: array of pointers to all zones within network
+ */
+struct network {
+       struct _zone **zones;
+};
+
+
+extern struct network net;
+extern rwlock_t net_lock;
+
+int net_init(void);
+void net_stop(void);
+void net_remove_as_router(u32 router);
+void net_send_external_routes(u32 dest);
+void net_route_msg(struct sk_buff *buf);
+struct node *net_select_remote_node(u32 addr, u32 ref);
+u32 net_select_router(u32 addr, u32 ref);
+
+int tipc_start_net(void);
+void tipc_stop_net(void);
+
+#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
new file mode 100644 (file)
index 0000000..19b3f40
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * net/tipc/netlink.c: TIPC configuration handling
+ * 
+ * Copyright (c) 2005-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include <net/genetlink.h>
+
+static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
+{
+       struct sk_buff *rep_buf;
+       struct nlmsghdr *rep_nlh;
+       struct nlmsghdr *req_nlh = info->nlhdr;
+       struct tipc_genlmsghdr *req_userhdr = info->userhdr;
+       int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN);
+
+       if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
+               rep_buf = cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
+       else
+               rep_buf = cfg_do_cmd(req_userhdr->dest,
+                                    req_userhdr->cmd,
+                                    NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
+                                    NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
+                                    hdr_space);
+
+       if (rep_buf) {
+               skb_push(rep_buf, hdr_space);
+               rep_nlh = (struct nlmsghdr *)rep_buf->data;
+               memcpy(rep_nlh, req_nlh, hdr_space);
+               rep_nlh->nlmsg_len = rep_buf->len;
+               genlmsg_unicast(rep_buf, req_nlh->nlmsg_pid);
+       }
+
+        return 0;
+}
+
+static struct genl_family family = {
+        .id            = GENL_ID_GENERATE,
+        .name          = TIPC_GENL_NAME,
+        .version       = TIPC_GENL_VERSION,
+        .hdrsize       = TIPC_GENL_HDRLEN,
+        .maxattr       = 0,
+};
+
+static struct genl_ops ops = {
+       .cmd            = TIPC_GENL_CMD,
+       .doit           = handle_cmd,
+};
+
+static int family_registered = 0;
+
+int netlink_start(void)
+{
+
+
+       if (genl_register_family(&family))
+               goto err;
+
+       family_registered = 1;
+
+       if (genl_register_ops(&family, &ops))
+               goto err_unregister;
+
+        return 0;
+
+ err_unregister:
+       genl_unregister_family(&family);
+       family_registered = 0;
+ err:
+       err("Failed to register netlink interface\n");
+       return -EFAULT;
+}
+
+void netlink_stop(void)
+{
+       if (family_registered) {
+               genl_unregister_family(&family);
+               family_registered = 0;
+       }
+}
diff --git a/net/tipc/node.c b/net/tipc/node.c
new file mode 100644 (file)
index 0000000..05688d0
--- /dev/null
@@ -0,0 +1,679 @@
+/*
+ * net/tipc/node.c: TIPC node management routines
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "node.h"
+#include "cluster.h"
+#include "net.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "link.h"
+#include "port.h"
+#include "bearer.h"
+#include "name_distr.h"
+#include "net.h"
+
+void node_print(struct print_buf *buf, struct node *n_ptr, char *str);
+static void node_lost_contact(struct node *n_ptr);
+static void node_established_contact(struct node *n_ptr);
+
+struct node *nodes = NULL;     /* sorted list of nodes within cluster */
+
+u32 tipc_own_tag = 0;
+
+struct node *node_create(u32 addr)
+{
+       struct cluster *c_ptr;
+       struct node *n_ptr;
+        struct node **curr_node;
+
+       n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC);
+        if (n_ptr != NULL) {
+                memset(n_ptr, 0, sizeof(*n_ptr));
+                n_ptr->addr = addr;
+                n_ptr->lock =  SPIN_LOCK_UNLOCKED;     
+                INIT_LIST_HEAD(&n_ptr->nsub);
+       
+               c_ptr = cluster_find(addr);
+                if (c_ptr == NULL)
+                        c_ptr = cluster_create(addr);
+                if (c_ptr != NULL) {
+                        n_ptr->owner = c_ptr;
+                        cluster_attach_node(c_ptr, n_ptr);
+                        n_ptr->last_router = -1;
+
+                        /* Insert node into ordered list */
+                        for (curr_node = &nodes; *curr_node; 
+                            curr_node = &(*curr_node)->next) {
+                                if (addr < (*curr_node)->addr) {
+                                        n_ptr->next = *curr_node;
+                                        break;
+                                }
+                        }
+                        (*curr_node) = n_ptr;
+                } else {
+                        kfree(n_ptr);
+                        n_ptr = NULL;
+                }
+        }
+       return n_ptr;
+}
+
+void node_delete(struct node *n_ptr)
+{
+       if (!n_ptr)
+               return;
+
+#if 0
+       /* Not needed because links are already deleted via bearer_stop() */
+
+       u32 l_num;
+
+       for (l_num = 0; l_num < MAX_BEARERS; l_num++) {
+               link_delete(n_ptr->links[l_num]);
+       }
+#endif
+
+       dbg("node %x deleted\n", n_ptr->addr);
+       kfree(n_ptr);
+}
+
+
+/**
+ * node_link_up - handle addition of link
+ * 
+ * Link becomes active (alone or shared) or standby, depending on its priority.
+ */
+
+void node_link_up(struct node *n_ptr, struct link *l_ptr)
+{
+       struct link **active = &n_ptr->active_links[0];
+
+       info("Established link <%s> on network plane %c\n",
+            l_ptr->name, l_ptr->b_ptr->net_plane);
+       
+       if (!active[0]) {
+               dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]);
+               active[0] = active[1] = l_ptr;
+               node_established_contact(n_ptr);
+               return;
+       }
+       if (l_ptr->priority < active[0]->priority) { 
+               info("Link is standby\n");
+               return;
+       }
+       link_send_duplicate(active[0], l_ptr);
+       if (l_ptr->priority == active[0]->priority) { 
+               active[0] = l_ptr;
+               return;
+       }
+       info("Link <%s> on network plane %c becomes standby\n",
+            active[0]->name, active[0]->b_ptr->net_plane);
+       active[0] = active[1] = l_ptr;
+}
+
+/**
+ * node_select_active_links - select active link
+ */
+
+static void node_select_active_links(struct node *n_ptr)
+{
+       struct link **active = &n_ptr->active_links[0];
+       u32 i;
+       u32 highest_prio = 0;
+
+        active[0] = active[1] = 0;
+
+       for (i = 0; i < MAX_BEARERS; i++) {
+                struct link *l_ptr = n_ptr->links[i];
+
+               if (!l_ptr || !link_is_up(l_ptr) ||
+                   (l_ptr->priority < highest_prio))
+                       continue;
+
+               if (l_ptr->priority > highest_prio) {
+                        highest_prio = l_ptr->priority;
+                       active[0] = active[1] = l_ptr;
+               } else {
+                       active[1] = l_ptr;
+               }
+       }
+}
+
+/**
+ * node_link_down - handle loss of link
+ */
+
+void node_link_down(struct node *n_ptr, struct link *l_ptr)
+{
+       struct link **active;
+
+       if (!link_is_active(l_ptr)) {
+               info("Lost standby link <%s> on network plane %c\n",
+                    l_ptr->name, l_ptr->b_ptr->net_plane);
+               return;
+       }
+       info("Lost link <%s> on network plane %c\n",
+               l_ptr->name, l_ptr->b_ptr->net_plane);
+
+       active = &n_ptr->active_links[0];
+       if (active[0] == l_ptr)
+               active[0] = active[1];
+       if (active[1] == l_ptr)
+               active[1] = active[0];
+       if (active[0] == l_ptr)
+               node_select_active_links(n_ptr);
+       if (node_is_up(n_ptr)) 
+               link_changeover(l_ptr);
+       else 
+               node_lost_contact(n_ptr);
+}
+
+int node_has_active_links(struct node *n_ptr)
+{
+       return (n_ptr && 
+               ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
+}
+
+int node_has_redundant_links(struct node *n_ptr)
+{
+       return (node_has_active_links(n_ptr) &&
+               (n_ptr->active_links[0] != n_ptr->active_links[1]));
+}
+
+int node_has_active_routes(struct node *n_ptr)
+{
+       return (n_ptr && (n_ptr->last_router >= 0));
+}
+
+int node_is_up(struct node *n_ptr)
+{
+       return (node_has_active_links(n_ptr) || node_has_active_routes(n_ptr));
+}
+
+struct node *node_attach_link(struct link *l_ptr)
+{
+       struct node *n_ptr = node_find(l_ptr->addr);
+
+       if (!n_ptr)
+               n_ptr = node_create(l_ptr->addr);
+        if (n_ptr) {
+               u32 bearer_id = l_ptr->b_ptr->identity;
+               char addr_string[16];
+
+                assert(bearer_id < MAX_BEARERS);
+                if (n_ptr->link_cnt >= 2) {
+                       char addr_string[16];
+
+                        err("Attempt to create third link to %s\n",
+                           addr_string_fill(addr_string, n_ptr->addr));
+                        return 0;
+                }
+
+                if (!n_ptr->links[bearer_id]) {
+                        n_ptr->links[bearer_id] = l_ptr;
+                        net.zones[tipc_zone(l_ptr->addr)]->links++;
+                        n_ptr->link_cnt++;
+                        return n_ptr;
+                }
+                err("Attempt to establish second link on <%s> to <%s> \n",
+                    l_ptr->b_ptr->publ.name, 
+                   addr_string_fill(addr_string, l_ptr->addr));
+        }
+       return 0;
+}
+
+void node_detach_link(struct node *n_ptr, struct link *l_ptr)
+{
+       n_ptr->links[l_ptr->b_ptr->identity] = 0;
+       net.zones[tipc_zone(l_ptr->addr)]->links--;
+       n_ptr->link_cnt--;
+}
+
+/*
+ * Routing table management - five cases to handle:
+ *
+ * 1: A link towards a zone/cluster external node comes up.
+ *    => Send a multicast message updating routing tables of all 
+ *    system nodes within own cluster that the new destination 
+ *    can be reached via this node. 
+ *    (node.establishedContact()=>cluster.multicastNewRoute())
+ *
+ * 2: A link towards a slave node comes up.
+ *    => Send a multicast message updating routing tables of all 
+ *    system nodes within own cluster that the new destination 
+ *    can be reached via this node. 
+ *    (node.establishedContact()=>cluster.multicastNewRoute())
+ *    => Send a  message to the slave node about existence 
+ *    of all system nodes within cluster:
+ *    (node.establishedContact()=>cluster.sendLocalRoutes())
+ *
+ * 3: A new cluster local system node becomes available.
+ *    => Send message(s) to this particular node containing
+ *    information about all cluster external and slave
+ *     nodes which can be reached via this node.
+ *    (node.establishedContact()==>network.sendExternalRoutes())
+ *    (node.establishedContact()==>network.sendSlaveRoutes())
+ *    => Send messages to all directly connected slave nodes 
+ *    containing information about the existence of the new node
+ *    (node.establishedContact()=>cluster.multicastNewRoute())
+ *    
+ * 4: The link towards a zone/cluster external node or slave
+ *    node goes down.
+ *    => Send a multcast message updating routing tables of all 
+ *    nodes within cluster that the new destination can not any
+ *    longer be reached via this node.
+ *    (node.lostAllLinks()=>cluster.bcastLostRoute())
+ *
+ * 5: A cluster local system node becomes unavailable.
+ *    => Remove all references to this node from the local
+ *    routing tables. Note: This is a completely node
+ *    local operation.
+ *    (node.lostAllLinks()=>network.removeAsRouter())
+ *    => Send messages to all directly connected slave nodes 
+ *    containing information about loss of the node
+ *    (node.establishedContact()=>cluster.multicastLostRoute())
+ *
+ */
+
+static void node_established_contact(struct node *n_ptr)
+{
+       struct cluster *c_ptr;
+
+       dbg("node_established_contact:-> %x\n", n_ptr->addr);
+       if (!node_has_active_routes(n_ptr)) { 
+               k_signal((Handler)named_node_up, n_ptr->addr);
+       }
+
+        /* Syncronize broadcast acks */
+        n_ptr->bclink.acked = bclink_get_last_sent();
+
+       if (is_slave(tipc_own_addr))
+               return;
+       if (!in_own_cluster(n_ptr->addr)) {
+               /* Usage case 1 (see above) */
+               c_ptr = cluster_find(tipc_own_addr);
+               if (!c_ptr)
+                       c_ptr = cluster_create(tipc_own_addr);
+                if (c_ptr)
+                        cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, 
+                                               tipc_max_nodes);
+               return;
+       } 
+
+       c_ptr = n_ptr->owner;
+       if (is_slave(n_ptr->addr)) {
+               /* Usage case 2 (see above) */
+               cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes);
+               cluster_send_local_routes(c_ptr, n_ptr->addr);
+               return;
+       }
+
+       if (n_ptr->bclink.supported) {
+               nmap_add(&cluster_bcast_nodes, n_ptr->addr);
+               if (n_ptr->addr < tipc_own_addr)
+                       tipc_own_tag++;
+       }
+
+       /* Case 3 (see above) */
+       net_send_external_routes(n_ptr->addr);
+       cluster_send_slave_routes(c_ptr, n_ptr->addr);
+       cluster_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE,
+                               highest_allowed_slave);
+}
+
+static void node_lost_contact(struct node *n_ptr)
+{
+       struct cluster *c_ptr;
+       struct node_subscr *ns, *tns;
+       char addr_string[16];
+       u32 i;
+
+        /* Clean up broadcast reception remains */
+        n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
+        while (n_ptr->bclink.deferred_head) {
+                struct sk_buff* buf = n_ptr->bclink.deferred_head;
+                n_ptr->bclink.deferred_head = buf->next;
+                buf_discard(buf);
+        }
+        if (n_ptr->bclink.defragm) {
+                buf_discard(n_ptr->bclink.defragm);  
+                n_ptr->bclink.defragm = NULL;
+        }            
+        if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { 
+                bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
+        }
+
+        /* Update routing tables */
+       if (is_slave(tipc_own_addr)) {
+               net_remove_as_router(n_ptr->addr);
+       } else {
+               if (!in_own_cluster(n_ptr->addr)) { 
+                       /* Case 4 (see above) */
+                       c_ptr = cluster_find(tipc_own_addr);
+                       cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1,
+                                                tipc_max_nodes);
+               } else {
+                       /* Case 5 (see above) */
+                       c_ptr = cluster_find(n_ptr->addr);
+                       if (is_slave(n_ptr->addr)) {
+                               cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1,
+                                                        tipc_max_nodes);
+                       } else {
+                               if (n_ptr->bclink.supported) {
+                                       nmap_remove(&cluster_bcast_nodes, 
+                                                   n_ptr->addr);
+                                       if (n_ptr->addr < tipc_own_addr)
+                                               tipc_own_tag--;
+                               }
+                               net_remove_as_router(n_ptr->addr);
+                               cluster_bcast_lost_route(c_ptr, n_ptr->addr,
+                                                        LOWEST_SLAVE,
+                                                        highest_allowed_slave);
+                       }
+               }
+       }
+       if (node_has_active_routes(n_ptr))
+               return;
+
+       info("Lost contact with %s\n", 
+            addr_string_fill(addr_string, n_ptr->addr));
+
+       /* Abort link changeover */
+       for (i = 0; i < MAX_BEARERS; i++) {
+               struct link *l_ptr = n_ptr->links[i];
+               if (!l_ptr) 
+                       continue;
+               l_ptr->reset_checkpoint = l_ptr->next_in_no;
+               l_ptr->exp_msg_count = 0;
+               link_reset_fragments(l_ptr);
+       }
+
+       /* Notify subscribers */
+       list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
+                ns->node = 0;
+               list_del_init(&ns->nodesub_list);
+               k_signal((Handler)ns->handle_node_down,
+                        (unsigned long)ns->usr_handle);
+       }
+}
+
+/**
+ * node_select_next_hop - find the next-hop node for a message
+ * 
+ * Called by when cluster local lookup has failed.
+ */
+
+struct node *node_select_next_hop(u32 addr, u32 selector)
+{
+       struct node *n_ptr;
+       u32 router_addr;
+
+        if (!addr_domain_valid(addr))
+                return 0;
+
+       /* Look for direct link to destination processsor */
+       n_ptr = node_find(addr);
+       if (n_ptr && node_has_active_links(n_ptr))
+                return n_ptr;
+
+       /* Cluster local system nodes *must* have direct links */
+       if (!is_slave(addr) && in_own_cluster(addr))
+               return 0;
+
+       /* Look for cluster local router with direct link to node */
+       router_addr = node_select_router(n_ptr, selector);
+       if (router_addr) 
+                return node_select(router_addr, selector);
+
+       /* Slave nodes can only be accessed within own cluster via a 
+          known router with direct link -- if no router was found,give up */
+       if (is_slave(addr))
+               return 0;
+
+       /* Inter zone/cluster -- find any direct link to remote cluster */
+       addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
+       n_ptr = net_select_remote_node(addr, selector);
+       if (n_ptr && node_has_active_links(n_ptr))
+                return n_ptr;
+
+       /* Last resort -- look for any router to anywhere in remote zone */
+       router_addr =  net_select_router(addr, selector);
+       if (router_addr) 
+                return node_select(router_addr, selector);
+
+        return 0;
+}
+
+/**
+ * node_select_router - select router to reach specified node
+ * 
+ * Uses a deterministic and fair algorithm for selecting router node. 
+ */
+
+u32 node_select_router(struct node *n_ptr, u32 ref)
+{
+       u32 ulim;
+       u32 mask;
+       u32 start;
+       u32 r;
+
+        if (!n_ptr)
+                return 0;
+
+       if (n_ptr->last_router < 0)
+               return 0;
+       ulim = ((n_ptr->last_router + 1) * 32) - 1;
+
+       /* Start entry must be random */
+       mask = tipc_max_nodes;
+       while (mask > ulim)
+               mask >>= 1;
+       start = ref & mask;
+       r = start;
+
+       /* Lookup upwards with wrap-around */
+       do {
+               if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
+                       break;
+       } while (++r <= ulim);
+       if (r > ulim) {
+               r = 1;
+               do {
+                       if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
+                               break;
+               } while (++r < start);
+               assert(r != start);
+       }
+       assert(r && (r <= ulim));
+       return tipc_addr(own_zone(), own_cluster(), r);
+}
+
+void node_add_router(struct node *n_ptr, u32 router)
+{
+       u32 r_num = tipc_node(router);
+
+       n_ptr->routers[r_num / 32] = 
+               ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]);
+       n_ptr->last_router = tipc_max_nodes / 32;
+       while ((--n_ptr->last_router >= 0) && 
+              !n_ptr->routers[n_ptr->last_router]);
+}
+
+void node_remove_router(struct node *n_ptr, u32 router)
+{
+       u32 r_num = tipc_node(router);
+
+       if (n_ptr->last_router < 0)
+               return;         /* No routes */
+
+       n_ptr->routers[r_num / 32] =
+               ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32]));
+       n_ptr->last_router = tipc_max_nodes / 32;
+       while ((--n_ptr->last_router >= 0) && 
+              !n_ptr->routers[n_ptr->last_router]);
+
+       if (!node_is_up(n_ptr))
+               node_lost_contact(n_ptr);
+}
+
+#if 0
+void node_print(struct print_buf *buf, struct node *n_ptr, char *str)
+{
+       u32 i;
+
+       tipc_printf(buf, "\n\n%s", str);
+       for (i = 0; i < MAX_BEARERS; i++) {
+               if (!n_ptr->links[i]) 
+                       continue;
+               tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]);
+       }
+       tipc_printf(buf, "Active links: [%x,%x]\n",
+                   n_ptr->active_links[0], n_ptr->active_links[1]);
+}
+#endif
+
+u32 tipc_available_nodes(const u32 domain)
+{
+       struct node *n_ptr;
+       u32 cnt = 0;
+
+       for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
+               if (!in_scope(domain, n_ptr->addr))
+                       continue;
+               if (node_is_up(n_ptr))
+                       cnt++;
+       }
+       return cnt;
+}
+
+struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space)
+{
+       u32 domain;
+       struct sk_buff *buf;
+       struct node *n_ptr;
+        struct tipc_node_info node_info;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       domain = *(u32 *)TLV_DATA(req_tlv_area);
+       domain = ntohl(domain);
+       if (!addr_domain_valid(domain))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (network address)");
+
+        if (!nodes)
+                return cfg_reply_none();
+
+       /* For now, get space for all other nodes 
+          (will need to modify this when slave nodes are supported */
+
+       buf = cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) *
+                           (tipc_max_nodes - 1));
+       if (!buf)
+               return NULL;
+
+       /* Add TLVs for all nodes in scope */
+
+       for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
+               if (!in_scope(domain, n_ptr->addr))
+                       continue;
+                node_info.addr = htonl(n_ptr->addr);
+                node_info.up = htonl(node_is_up(n_ptr));
+               cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 
+                              &node_info, sizeof(node_info));
+       }
+
+       return buf;
+}
+
+struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
+{
+       u32 domain;
+       struct sk_buff *buf;
+       struct node *n_ptr;
+        struct tipc_link_info link_info;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       domain = *(u32 *)TLV_DATA(req_tlv_area);
+       domain = ntohl(domain);
+       if (!addr_domain_valid(domain))
+               return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
+                                             " (network address)");
+
+        if (!nodes)
+                return cfg_reply_none();
+
+       /* For now, get space for 2 links to all other nodes + bcast link 
+          (will need to modify this when slave nodes are supported */
+
+       buf = cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) *
+                           (2 * (tipc_max_nodes - 1) + 1));
+       if (!buf)
+               return NULL;
+
+       /* Add TLV for broadcast link */
+
+        link_info.dest = tipc_own_addr & 0xfffff00;
+       link_info.dest = htonl(link_info.dest);
+        link_info.up = htonl(1);
+        sprintf(link_info.str, bc_link_name);
+       cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
+
+       /* Add TLVs for any other links in scope */
+
+       for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
+                u32 i;
+
+               if (!in_scope(domain, n_ptr->addr))
+                       continue;
+                for (i = 0; i < MAX_BEARERS; i++) {
+                        if (!n_ptr->links[i]) 
+                                continue;
+                        link_info.dest = htonl(n_ptr->addr);
+                        link_info.up = htonl(link_is_up(n_ptr->links[i]));
+                        strcpy(link_info.str, n_ptr->links[i]->name);
+                       cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 
+                                      &link_info, sizeof(link_info));
+                }
+       }
+
+       return buf;
+}
diff --git a/net/tipc/node.h b/net/tipc/node.h
new file mode 100644 (file)
index 0000000..b39442b
--- /dev/null
@@ -0,0 +1,144 @@
+/*
+ * net/tipc/node.h: Include file for TIPC node management routines
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NODE_H
+#define _TIPC_NODE_H
+
+#include "node_subscr.h"
+#include "addr.h"
+#include "cluster.h"
+#include "bearer.h"
+
+/**
+ * struct node - TIPC node structure
+ * @addr: network address of node
+ * @lock: spinlock governing access to structure
+ * @owner: pointer to cluster that node belongs to
+ * @next: pointer to next node in sorted list of cluster's nodes
+ * @nsub: list of "node down" subscriptions monitoring node
+ * @active_links: pointers to active links to node
+ * @links: pointers to all links to node
+ * @link_cnt: number of links to node
+ * @permit_changeover: non-zero if node has redundant links to this system
+ * @routers: bitmap (used for multicluster communication)
+ * @last_router: (used for multicluster communication)
+ * @bclink: broadcast-related info
+ *    @supported: non-zero if node supports TIPC b'cast capability
+ *    @acked: sequence # of last outbound b'cast message acknowledged by node
+ *    @last_in: sequence # of last in-sequence b'cast message received from node
+ *    @gap_after: sequence # of last message not requiring a NAK request
+ *    @gap_to: sequence # of last message requiring a NAK request
+ *    @nack_sync: counter that determines when NAK requests should be sent
+ *    @deferred_head: oldest OOS b'cast message received from node
+ *    @deferred_tail: newest OOS b'cast message received from node
+ *    @defragm: list of partially reassembled b'cast message fragments from node
+ */
+struct node {
+       u32 addr;
+       spinlock_t lock;
+       struct cluster *owner;
+       struct node *next;
+       struct list_head nsub;
+       struct link *active_links[2];
+       struct link *links[MAX_BEARERS];
+       int link_cnt;
+       int permit_changeover;
+       u32 routers[512/32];
+       int last_router;
+       struct {
+               int supported;
+               u32 acked;
+               u32 last_in;
+               u32 gap_after; 
+               u32 gap_to; 
+               u32 nack_sync;
+               struct sk_buff *deferred_head;
+               struct sk_buff *deferred_tail;
+               struct sk_buff *defragm;
+       } bclink;
+};
+
+extern struct node *nodes;
+extern u32 tipc_own_tag;
+
+struct node *node_create(u32 addr);
+void node_delete(struct node *n_ptr);
+struct node *node_attach_link(struct link *l_ptr);
+void node_detach_link(struct node *n_ptr, struct link *l_ptr);
+void node_link_down(struct node *n_ptr, struct link *l_ptr);
+void node_link_up(struct node *n_ptr, struct link *l_ptr);
+int node_has_active_links(struct node *n_ptr);
+int node_has_redundant_links(struct node *n_ptr);
+u32 node_select_router(struct node *n_ptr, u32 ref);
+struct node *node_select_next_hop(u32 addr, u32 selector);
+int node_is_up(struct node *n_ptr);
+void node_add_router(struct node *n_ptr, u32 router);
+void node_remove_router(struct node *n_ptr, u32 router);
+struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space);
+struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space);
+
+static inline struct node *node_find(u32 addr)
+{
+       if (likely(in_own_cluster(addr)))
+               return local_nodes[tipc_node(addr)];
+       else if (addr_domain_valid(addr)) {
+               struct cluster *c_ptr = cluster_find(addr);
+
+               if (c_ptr)
+                       return c_ptr->nodes[tipc_node(addr)];
+       }
+       return 0;
+}
+
+static inline struct node *node_select(u32 addr, u32 selector)
+{
+       if (likely(in_own_cluster(addr)))
+               return local_nodes[tipc_node(addr)];
+       return node_select_next_hop(addr, selector);
+}
+
+static inline void node_lock(struct node *n_ptr)
+{
+       spin_lock_bh(&n_ptr->lock);
+}
+
+static inline void node_unlock(struct node *n_ptr)
+{
+       spin_unlock_bh(&n_ptr->lock);
+}
+
+#endif
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
new file mode 100644 (file)
index 0000000..7937592
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * net/tipc/node_subscr.c: TIPC "node down" subscription handling
+ * 
+ * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "node_subscr.h"
+#include "node.h"
+#include "addr.h"
+
+/**
+ * nodesub_subscribe - create "node down" subscription for specified node
+ */
+
+void nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 
+                      void *usr_handle, net_ev_handler handle_down)
+{
+       node_sub->node = 0;
+       if (addr == tipc_own_addr)
+               return;
+       if (!addr_node_valid(addr)) {
+               warn("node_subscr with illegal %x\n", addr);
+               return;
+       }
+
+       node_sub->handle_node_down = handle_down;
+       node_sub->usr_handle = usr_handle;
+       node_sub->node = node_find(addr);
+       assert(node_sub->node);
+       node_lock(node_sub->node);
+       list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
+       node_unlock(node_sub->node);
+}
+
+/**
+ * nodesub_unsubscribe - cancel "node down" subscription (if any)
+ */
+
+void nodesub_unsubscribe(struct node_subscr *node_sub)
+{
+       if (!node_sub->node)
+               return;
+
+       node_lock(node_sub->node);
+       list_del_init(&node_sub->nodesub_list);
+       node_unlock(node_sub->node);
+}
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
new file mode 100644 (file)
index 0000000..a3b87ac
--- /dev/null
@@ -0,0 +1,63 @@
+/*
+ * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
+ * 
+ * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_NODE_SUBSCR_H
+#define _TIPC_NODE_SUBSCR_H
+
+#include "addr.h"
+
+typedef void (*net_ev_handler) (void *usr_handle);
+
+/**
+ * struct node_subscr - "node down" subscription entry
+ * @node: ptr to node structure of interest (or NULL, if none)
+ * @handle_node_down: routine to invoke when node fails
+ * @usr_handle: argument to pass to routine when node fails
+ * @nodesub_list: adjacent entries in list of subscriptions for the node
+ */
+
+struct node_subscr {
+       struct node *node;
+       net_ev_handler handle_node_down;
+       void *usr_handle;
+       struct list_head nodesub_list;
+};
+
+void nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
+                      void *usr_handle, net_ev_handler handle_down);
+void nodesub_unsubscribe(struct node_subscr *node_sub);
+
+#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
new file mode 100644 (file)
index 0000000..66caca7
--- /dev/null
@@ -0,0 +1,1708 @@
+/*
+ * net/tipc/port.c: TIPC port code
+ * 
+ * Copyright (c) 1992-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "config.h"
+#include "dbg.h"
+#include "port.h"
+#include "addr.h"
+#include "link.h"
+#include "node.h"
+#include "port.h"
+#include "name_table.h"
+#include "user_reg.h"
+#include "msg.h"
+#include "bcast.h"
+
+/* Connection management: */
+#define PROBING_INTERVAL 3600000       /* [ms] => 1 h */
+#define CONFIRMED 0
+#define PROBING 1
+
+#define MAX_REJECT_SIZE 1024
+
+static struct sk_buff *msg_queue_head = 0;
+static struct sk_buff *msg_queue_tail = 0;
+
+spinlock_t port_list_lock = SPIN_LOCK_UNLOCKED;
+static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
+
+LIST_HEAD(ports);
+static void port_handle_node_down(unsigned long ref);
+static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
+static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
+static void port_timeout(unsigned long ref);
+
+
+static inline u32 port_peernode(struct port *p_ptr)
+{
+       return msg_destnode(&p_ptr->publ.phdr);
+}
+
+static inline u32 port_peerport(struct port *p_ptr)
+{
+       return msg_destport(&p_ptr->publ.phdr);
+}
+
+static inline u32 port_out_seqno(struct port *p_ptr)
+{
+       return msg_transp_seqno(&p_ptr->publ.phdr);
+}
+
+static inline void port_set_out_seqno(struct port *p_ptr, u32 seqno) 
+{
+       msg_set_transp_seqno(&p_ptr->publ.phdr,seqno);
+}
+
+static inline void port_incr_out_seqno(struct port *p_ptr)
+{
+       struct tipc_msg *m = &p_ptr->publ.phdr;
+
+       if (likely(!msg_routed(m)))
+               return;
+       msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
+}
+
+/**
+ * tipc_multicast - send a multicast message to local and remote destinations
+ */
+
+int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
+                  u32 num_sect, struct iovec const *msg_sect)
+{
+       struct tipc_msg *hdr;
+       struct sk_buff *buf;
+       struct sk_buff *ibuf = NULL;
+       struct port_list dports = {0, NULL, };
+       struct port *oport = port_deref(ref);
+       int ext_targets;
+       int res;
+
+       if (unlikely(!oport))
+               return -EINVAL;
+
+       /* Create multicast message */
+
+       hdr = &oport->publ.phdr;
+       msg_set_type(hdr, TIPC_MCAST_MSG);
+       msg_set_nametype(hdr, seq->type);
+       msg_set_namelower(hdr, seq->lower);
+       msg_set_nameupper(hdr, seq->upper);
+       msg_set_hdr_sz(hdr, MCAST_H_SIZE);
+       res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
+                       !oport->user_port, &buf);
+       if (unlikely(!buf))
+               return res;
+
+       /* Figure out where to send multicast message */
+
+       ext_targets = nametbl_mc_translate(seq->type, seq->lower, seq->upper,
+                                          TIPC_NODE_SCOPE, &dports);
+       
+       /* Send message to destinations (duplicate it only if necessary) */ 
+
+       if (ext_targets) {
+               if (dports.count != 0) {
+                       ibuf = skb_copy(buf, GFP_ATOMIC);
+                       if (ibuf == NULL) {
+                               port_list_free(&dports);
+                               buf_discard(buf);
+                               return -ENOMEM;
+                       }
+               }
+               res = bclink_send_msg(buf);
+               if ((res < 0) && (dports.count != 0)) {
+                       buf_discard(ibuf);
+               }
+       } else {
+               ibuf = buf;
+       }
+
+       if (res >= 0) {
+               if (ibuf)
+                       port_recv_mcast(ibuf, &dports);
+       } else {
+               port_list_free(&dports);
+       }
+       return res;
+}
+
+/**
+ * port_recv_mcast - deliver multicast message to all destination ports
+ * 
+ * If there is no port list, perform a lookup to create one
+ */
+
+void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
+{
+       struct tipc_msg* msg;
+       struct port_list dports = {0, NULL, };
+       struct port_list *item = dp;
+       int cnt = 0;
+
+       assert(buf);
+       msg = buf_msg(buf);
+
+       /* Create destination port list, if one wasn't supplied */
+
+       if (dp == NULL) {
+               nametbl_mc_translate(msg_nametype(msg),
+                                    msg_namelower(msg),
+                                    msg_nameupper(msg),
+                                    TIPC_CLUSTER_SCOPE,
+                                    &dports);
+               item = dp = &dports;
+       }
+
+       /* Deliver a copy of message to each destination port */
+
+       if (dp->count != 0) {
+               if (dp->count == 1) {
+                       msg_set_destport(msg, dp->ports[0]);
+                       port_recv_msg(buf);
+                       port_list_free(dp);
+                       return;
+               }
+               for (; cnt < dp->count; cnt++) {
+                       int index = cnt % PLSIZE;
+                       struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
+
+                       if (b == NULL) {
+                               warn("Buffer allocation failure\n");
+                               msg_dbg(msg, "LOST:");
+                               goto exit;
+                       }
+                       if ((index == 0) && (cnt != 0)) {
+                               item = item->next;
+                       }
+                       msg_set_destport(buf_msg(b),item->ports[index]);
+                       port_recv_msg(b);
+               }
+       }
+exit:
+       buf_discard(buf);
+       port_list_free(dp);
+}
+
+/**
+ * tipc_createport_raw - create a native TIPC port
+ * 
+ * Returns local port reference
+ */
+
+u32 tipc_createport_raw(void *usr_handle,
+                       u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
+                       void (*wakeup)(struct tipc_port *),
+                       const u32 importance)
+{
+       struct port *p_ptr;
+       struct tipc_msg *msg;
+       u32 ref;
+
+       p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC);
+       if (p_ptr == NULL) {
+               warn("Memory squeeze; failed to create port\n");
+               return 0;
+       }
+       memset(p_ptr, 0, sizeof(*p_ptr));
+       ref = ref_acquire(p_ptr, &p_ptr->publ.lock);
+       if (!ref) {
+               warn("Reference Table Exhausted\n");
+               kfree(p_ptr);
+               return 0;
+       }
+
+       port_lock(ref);
+       p_ptr->publ.ref = ref;
+       msg = &p_ptr->publ.phdr;
+       msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0);
+       msg_set_orignode(msg, tipc_own_addr);
+       msg_set_prevnode(msg, tipc_own_addr);
+       msg_set_origport(msg, ref);
+       msg_set_importance(msg,importance);
+       p_ptr->last_in_seqno = 41;
+       p_ptr->sent = 1;
+       p_ptr->publ.usr_handle = usr_handle;
+       INIT_LIST_HEAD(&p_ptr->wait_list);
+       INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
+       p_ptr->congested_link = 0;
+       p_ptr->max_pkt = MAX_PKT_DEFAULT;
+       p_ptr->dispatcher = dispatcher;
+       p_ptr->wakeup = wakeup;
+       p_ptr->user_port = 0;
+       k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
+       spin_lock_bh(&port_list_lock);
+       INIT_LIST_HEAD(&p_ptr->publications);
+       INIT_LIST_HEAD(&p_ptr->port_list);
+       list_add_tail(&p_ptr->port_list, &ports);
+       spin_unlock_bh(&port_list_lock);
+       port_unlock(p_ptr);
+       return ref;
+}
+
+int tipc_deleteport(u32 ref)
+{
+       struct port *p_ptr;
+       struct sk_buff *buf = 0;
+
+       tipc_withdraw(ref, 0, 0);
+       p_ptr = port_lock(ref);
+       if (!p_ptr) 
+               return -EINVAL;
+
+       ref_discard(ref);
+       port_unlock(p_ptr);
+
+       k_cancel_timer(&p_ptr->timer);
+       if (p_ptr->publ.connected) {
+               buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
+               nodesub_unsubscribe(&p_ptr->subscription);
+       }
+       if (p_ptr->user_port) {
+               reg_remove_port(p_ptr->user_port);
+               kfree(p_ptr->user_port);
+       }
+
+       spin_lock_bh(&port_list_lock);
+       list_del(&p_ptr->port_list);
+       list_del(&p_ptr->wait_list);
+       spin_unlock_bh(&port_list_lock);
+       k_term_timer(&p_ptr->timer);
+       kfree(p_ptr);
+       dbg("Deleted port %u\n", ref);
+       net_route_msg(buf);
+       return TIPC_OK;
+}
+
+/**
+ * tipc_get_port() - return port associated with 'ref'
+ * 
+ * Note: Port is not locked.
+ */
+
+struct tipc_port *tipc_get_port(const u32 ref)
+{
+       return (struct tipc_port *)ref_deref(ref);
+}
+
+/**
+ * tipc_get_handle - return user handle associated to port 'ref'
+ */
+
+void *tipc_get_handle(const u32 ref)
+{
+       struct port *p_ptr;
+       void * handle;
+
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return 0;
+       handle = p_ptr->publ.usr_handle;
+       port_unlock(p_ptr);
+       return handle;
+}
+
+static inline int port_unreliable(struct port *p_ptr)
+{
+       return msg_src_droppable(&p_ptr->publ.phdr);
+}
+
+int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
+{
+       struct port *p_ptr;
+       
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return -EINVAL;
+       *isunreliable = port_unreliable(p_ptr);
+       spin_unlock_bh(p_ptr->publ.lock);
+       return TIPC_OK;
+}
+
+int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
+{
+       struct port *p_ptr;
+       
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return -EINVAL;
+       msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
+       port_unlock(p_ptr);
+       return TIPC_OK;
+}
+
+static inline int port_unreturnable(struct port *p_ptr)
+{
+       return msg_dest_droppable(&p_ptr->publ.phdr);
+}
+
+int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
+{
+       struct port *p_ptr;
+       
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return -EINVAL;
+       *isunrejectable = port_unreturnable(p_ptr);
+       spin_unlock_bh(p_ptr->publ.lock);
+       return TIPC_OK;
+}
+
+int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
+{
+       struct port *p_ptr;
+       
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return -EINVAL;
+       msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
+       port_unlock(p_ptr);
+       return TIPC_OK;
+}
+
+/* 
+ * port_build_proto_msg(): build a port level protocol 
+ * or a connection abortion message. Called with 
+ * tipc_port lock on.
+ */
+static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
+                                           u32 origport, u32 orignode,
+                                           u32 usr, u32 type, u32 err, 
+                                           u32 seqno, u32 ack)
+{
+       struct sk_buff *buf;
+       struct tipc_msg *msg;
+       
+       buf = buf_acquire(LONG_H_SIZE);
+       if (buf) {
+               msg = buf_msg(buf);
+               msg_init(msg, usr, type, err, LONG_H_SIZE, destnode);
+               msg_set_destport(msg, destport);
+               msg_set_origport(msg, origport);
+               msg_set_destnode(msg, destnode);
+               msg_set_orignode(msg, orignode);
+               msg_set_transp_seqno(msg, seqno);
+               msg_set_msgcnt(msg, ack);
+               msg_dbg(msg, "PORT>SEND>:");
+       }
+       return buf;
+}
+
+int tipc_set_msg_option(struct tipc_port *tp_ptr, const char *opt, const u32 sz)
+{
+       msg_expand(&tp_ptr->phdr, msg_destnode(&tp_ptr->phdr));
+       msg_set_options(&tp_ptr->phdr, opt, sz);
+       return TIPC_OK;
+}
+
+int tipc_reject_msg(struct sk_buff *buf, u32 err)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       struct sk_buff *rbuf;
+       struct tipc_msg *rmsg;
+       int hdr_sz;
+       u32 imp = msg_importance(msg);
+       u32 data_sz = msg_data_sz(msg);
+
+       if (data_sz > MAX_REJECT_SIZE)
+               data_sz = MAX_REJECT_SIZE;
+       if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE))
+               imp++;
+       msg_dbg(msg, "port->rej: ");
+
+       /* discard rejected message if it shouldn't be returned to sender */
+       if (msg_errcode(msg) || msg_dest_droppable(msg)) {
+               buf_discard(buf);
+               return data_sz;
+       }
+
+       /* construct rejected message */
+       if (msg_mcast(msg))
+               hdr_sz = MCAST_H_SIZE;
+       else
+               hdr_sz = LONG_H_SIZE;
+       rbuf = buf_acquire(data_sz + hdr_sz);
+       if (rbuf == NULL) {
+               buf_discard(buf);
+               return data_sz;
+       }
+       rmsg = buf_msg(rbuf);
+       msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg));
+       msg_set_destport(rmsg, msg_origport(msg));
+       msg_set_prevnode(rmsg, tipc_own_addr);
+       msg_set_origport(rmsg, msg_destport(msg));
+       if (msg_short(msg))
+               msg_set_orignode(rmsg, tipc_own_addr);
+       else
+               msg_set_orignode(rmsg, msg_destnode(msg));
+       msg_set_size(rmsg, data_sz + hdr_sz); 
+       msg_set_nametype(rmsg, msg_nametype(msg));
+       msg_set_nameinst(rmsg, msg_nameinst(msg));
+       memcpy(rbuf->data + hdr_sz, msg_data(msg), data_sz);
+
+       /* send self-abort message when rejecting on a connected port */
+       if (msg_connected(msg)) {
+               struct sk_buff *abuf = 0;
+               struct port *p_ptr = port_lock(msg_destport(msg));
+
+               if (p_ptr) {
+                       if (p_ptr->publ.connected)
+                               abuf = port_build_self_abort_msg(p_ptr, err);
+                       port_unlock(p_ptr);
+               }
+               net_route_msg(abuf);
+       }
+
+       /* send rejected message */
+       buf_discard(buf);
+       net_route_msg(rbuf);
+       return data_sz;
+}
+
+int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+                        struct iovec const *msg_sect, u32 num_sect,
+                        int err)
+{
+       struct sk_buff *buf;
+       int res;
+
+       res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE, 
+                       !p_ptr->user_port, &buf);
+       if (!buf)
+               return res;
+
+       return tipc_reject_msg(buf, err);
+}
+
+static void port_timeout(unsigned long ref)
+{
+       struct port *p_ptr = port_lock(ref);
+       struct sk_buff *buf = 0;
+
+       if (!p_ptr || !p_ptr->publ.connected)
+               return;
+
+       /* Last probe answered ? */
+       if (p_ptr->probing_state == PROBING) {
+               buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
+       } else {
+               buf = port_build_proto_msg(port_peerport(p_ptr),
+                                          port_peernode(p_ptr),
+                                          p_ptr->publ.ref,
+                                          tipc_own_addr,
+                                          CONN_MANAGER,
+                                          CONN_PROBE,
+                                          TIPC_OK, 
+                                          port_out_seqno(p_ptr),
+                                          0);
+               port_incr_out_seqno(p_ptr);
+               p_ptr->probing_state = PROBING;
+               k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
+       }
+       port_unlock(p_ptr);
+       net_route_msg(buf);
+}
+
+
+static void port_handle_node_down(unsigned long ref)
+{
+       struct port *p_ptr = port_lock(ref);
+       struct sk_buff* buf = 0;
+
+       if (!p_ptr)
+               return;
+       buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
+       port_unlock(p_ptr);
+       net_route_msg(buf);
+}
+
+
+static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
+{
+       u32 imp = msg_importance(&p_ptr->publ.phdr);
+
+       if (!p_ptr->publ.connected)
+               return 0;
+       if (imp < TIPC_CRITICAL_IMPORTANCE)
+               imp++;
+       return port_build_proto_msg(p_ptr->publ.ref,
+                                   tipc_own_addr,
+                                   port_peerport(p_ptr),
+                                   port_peernode(p_ptr),
+                                   imp,
+                                   TIPC_CONN_MSG,
+                                   err, 
+                                   p_ptr->last_in_seqno + 1,
+                                   0);
+}
+
+
+static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
+{
+       u32 imp = msg_importance(&p_ptr->publ.phdr);
+
+       if (!p_ptr->publ.connected)
+               return 0;
+       if (imp < TIPC_CRITICAL_IMPORTANCE)
+               imp++;
+       return port_build_proto_msg(port_peerport(p_ptr),
+                                   port_peernode(p_ptr),
+                                   p_ptr->publ.ref,
+                                   tipc_own_addr,
+                                   imp,
+                                   TIPC_CONN_MSG,
+                                   err, 
+                                   port_out_seqno(p_ptr),
+                                   0);
+}
+
+void port_recv_proto_msg(struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       struct port *p_ptr = port_lock(msg_destport(msg));
+       u32 err = TIPC_OK;
+       struct sk_buff *r_buf = 0;
+       struct sk_buff *abort_buf = 0;
+
+       msg_dbg(msg, "PORT<RECV<:");
+
+       if (!p_ptr) {
+               err = TIPC_ERR_NO_PORT;
+       } else if (p_ptr->publ.connected) {
+               if (port_peernode(p_ptr) != msg_orignode(msg))
+                       err = TIPC_ERR_NO_PORT;
+               if (port_peerport(p_ptr) != msg_origport(msg))
+                       err = TIPC_ERR_NO_PORT;
+               if (!err && msg_routed(msg)) {
+                       u32 seqno = msg_transp_seqno(msg);
+                       u32 myno =  ++p_ptr->last_in_seqno;
+                       if (seqno != myno) {
+                               err = TIPC_ERR_NO_PORT;
+                               abort_buf = port_build_self_abort_msg(p_ptr, err);
+                       }
+               }
+               if (msg_type(msg) == CONN_ACK) {
+                       int wakeup = port_congested(p_ptr) && 
+                                    p_ptr->publ.congested &&
+                                    p_ptr->wakeup;
+                       p_ptr->acked += msg_msgcnt(msg);
+                       if (port_congested(p_ptr))
+                               goto exit;
+                       p_ptr->publ.congested = 0;
+                       if (!wakeup)
+                               goto exit;
+                       p_ptr->wakeup(&p_ptr->publ);
+                       goto exit;
+               }
+       } else if (p_ptr->publ.published) {
+               err = TIPC_ERR_NO_PORT;
+       }
+       if (err) {
+               r_buf = port_build_proto_msg(msg_origport(msg),
+                                            msg_orignode(msg), 
+                                            msg_destport(msg), 
+                                            tipc_own_addr,
+                                            DATA_HIGH,
+                                            TIPC_CONN_MSG,
+                                            err,
+                                            0,
+                                            0);
+               goto exit;
+       }
+
+       /* All is fine */
+       if (msg_type(msg) == CONN_PROBE) {
+               r_buf = port_build_proto_msg(msg_origport(msg), 
+                                            msg_orignode(msg), 
+                                            msg_destport(msg), 
+                                            tipc_own_addr, 
+                                            CONN_MANAGER,
+                                            CONN_PROBE_REPLY,
+                                            TIPC_OK,
+                                            port_out_seqno(p_ptr),
+                                            0);
+       }
+       p_ptr->probing_state = CONFIRMED;
+       port_incr_out_seqno(p_ptr);
+exit:
+       if (p_ptr)
+               port_unlock(p_ptr);
+       net_route_msg(r_buf);
+       net_route_msg(abort_buf);
+       buf_discard(buf);
+}
+
+static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
+{
+        struct publication *publ;
+
+       if (full_id)
+               tipc_printf(buf, "<%u.%u.%u:%u>:", 
+                           tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
+                            tipc_node(tipc_own_addr), p_ptr->publ.ref);
+       else
+               tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
+
+        if (p_ptr->publ.connected) {
+                u32 dport = port_peerport(p_ptr);
+                u32 destnode = port_peernode(p_ptr);
+
+                tipc_printf(buf, " connected to <%u.%u.%u:%u>",
+                            tipc_zone(destnode), tipc_cluster(destnode),
+                            tipc_node(destnode), dport);
+                if (p_ptr->publ.conn_type != 0)
+                        tipc_printf(buf, " via {%u,%u}",
+                                    p_ptr->publ.conn_type,
+                                    p_ptr->publ.conn_instance);
+        }
+        else if (p_ptr->publ.published) {
+                tipc_printf(buf, " bound to");
+                list_for_each_entry(publ, &p_ptr->publications, pport_list) {
+                       if (publ->lower == publ->upper)
+                               tipc_printf(buf, " {%u,%u}", publ->type,
+                                           publ->lower);
+                       else
+                               tipc_printf(buf, " {%u,%u,%u}", publ->type, 
+                                           publ->lower, publ->upper);
+                }
+        }
+        tipc_printf(buf, "\n");
+}
+
+#define MAX_PORT_QUERY 32768
+
+struct sk_buff *port_get_ports(void)
+{
+       struct sk_buff *buf;
+       struct tlv_desc *rep_tlv;
+       struct print_buf pb;
+       struct port *p_ptr;
+       int str_len;
+
+       buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
+       if (!buf)
+               return NULL;
+       rep_tlv = (struct tlv_desc *)buf->data;
+
+       printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
+       spin_lock_bh(&port_list_lock);
+       list_for_each_entry(p_ptr, &ports, port_list) {
+               spin_lock_bh(p_ptr->publ.lock);
+               port_print(p_ptr, &pb, 0);
+               spin_unlock_bh(p_ptr->publ.lock);
+       }
+       spin_unlock_bh(&port_list_lock);
+       str_len = printbuf_validate(&pb);
+
+       skb_put(buf, TLV_SPACE(str_len));
+       TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+       return buf;
+}
+
+#if 0
+
+#define MAX_PORT_STATS 2000
+
+struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
+{
+       u32 ref;
+       struct port *p_ptr;
+       struct sk_buff *buf;
+       struct tlv_desc *rep_tlv;
+       struct print_buf pb;
+       int str_len;
+
+       if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_PORT_REF))
+               return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
+
+       ref = *(u32 *)TLV_DATA(req_tlv_area);
+       ref = ntohl(ref);
+
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return cfg_reply_error_string("port not found");
+
+       buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
+       if (!buf) {
+               port_unlock(p_ptr);
+               return NULL;
+       }
+       rep_tlv = (struct tlv_desc *)buf->data;
+
+       printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
+       port_print(p_ptr, &pb, 1);
+       /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */
+       port_unlock(p_ptr);
+       str_len = printbuf_validate(&pb);
+
+       skb_put(buf, TLV_SPACE(str_len));
+       TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
+
+       return buf;
+}
+
+#endif
+
+void port_reinit(void)
+{
+       struct port *p_ptr;
+       struct tipc_msg *msg;
+
+       spin_lock_bh(&port_list_lock);
+       list_for_each_entry(p_ptr, &ports, port_list) {
+               msg = &p_ptr->publ.phdr;
+               if (msg_orignode(msg) == tipc_own_addr)
+                       break;
+               msg_set_orignode(msg, tipc_own_addr);
+       }
+       spin_unlock_bh(&port_list_lock);
+}
+
+
+/*
+ *  port_dispatcher_sigh(): Signal handler for messages destinated
+ *                          to the tipc_port interface.
+ */
+
+static void port_dispatcher_sigh(void *dummy)
+{
+       struct sk_buff *buf;
+
+       spin_lock_bh(&queue_lock);
+       buf = msg_queue_head;
+       msg_queue_head = 0;
+       spin_unlock_bh(&queue_lock);
+
+       while (buf) {
+               struct port *p_ptr;
+               struct user_port *up_ptr;
+               struct tipc_portid orig;
+               struct tipc_name_seq dseq;
+               void *usr_handle;
+               int connected;
+               int published;
+
+               struct sk_buff *next = buf->next;
+               struct tipc_msg *msg = buf_msg(buf);
+               u32 dref = msg_destport(msg);
+               
+               p_ptr = port_lock(dref);
+               if (!p_ptr) {
+                       /* Port deleted while msg in queue */
+                       tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+                       buf = next;
+                       continue;
+               }
+               orig.ref = msg_origport(msg);
+               orig.node = msg_orignode(msg);
+               up_ptr = p_ptr->user_port;
+               usr_handle = up_ptr->usr_handle;
+               connected = p_ptr->publ.connected;
+               published = p_ptr->publ.published;
+
+               if (unlikely(msg_errcode(msg)))
+                       goto err;
+
+               switch (msg_type(msg)) {
+               
+               case TIPC_CONN_MSG:{
+                               tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
+                               u32 peer_port = port_peerport(p_ptr);
+                               u32 peer_node = port_peernode(p_ptr);
+
+                               spin_unlock_bh(p_ptr->publ.lock);
+                               if (unlikely(!connected)) {
+                                       if (unlikely(published))
+                                               goto reject;
+                                       tipc_connect2port(dref,&orig);
+                               }
+                               if (unlikely(msg_origport(msg) != peer_port))
+                                       goto reject;
+                               if (unlikely(msg_orignode(msg) != peer_node))
+                                       goto reject;
+                               if (unlikely(!cb))
+                                       goto reject;
+                               if (unlikely(++p_ptr->publ.conn_unacked >= 
+                                            TIPC_FLOW_CONTROL_WIN))
+                                       tipc_acknowledge(dref, 
+                                                        p_ptr->publ.conn_unacked);
+                               skb_pull(buf, msg_hdr_sz(msg));
+                               cb(usr_handle, dref, &buf, msg_data(msg),
+                                  msg_data_sz(msg));
+                               break;
+                       }
+               case TIPC_DIRECT_MSG:{
+                               tipc_msg_event cb = up_ptr->msg_cb;
+
+                               spin_unlock_bh(p_ptr->publ.lock);
+                               if (unlikely(connected))
+                                       goto reject;
+                               if (unlikely(!cb))
+                                       goto reject;
+                               skb_pull(buf, msg_hdr_sz(msg));
+                               cb(usr_handle, dref, &buf, msg_data(msg), 
+                                  msg_data_sz(msg), msg_importance(msg),
+                                  &orig);
+                               break;
+                       }
+               case TIPC_NAMED_MSG:{
+                               tipc_named_msg_event cb = up_ptr->named_msg_cb;
+
+                               spin_unlock_bh(p_ptr->publ.lock);
+                               if (unlikely(connected))
+                                       goto reject;
+                               if (unlikely(!cb))
+                                       goto reject;
+                               if (unlikely(!published))
+                                       goto reject;
+                               dseq.type =  msg_nametype(msg);
+                               dseq.lower = msg_nameinst(msg);
+                               dseq.upper = dseq.lower;
+                               skb_pull(buf, msg_hdr_sz(msg));
+                               cb(usr_handle, dref, &buf, msg_data(msg), 
+                                  msg_data_sz(msg), msg_importance(msg),
+                                  &orig, &dseq);
+                               break;
+                       }
+               }
+               if (buf)
+                       buf_discard(buf);
+               buf = next;
+               continue;
+err:
+               switch (msg_type(msg)) {
+               
+               case TIPC_CONN_MSG:{
+                               tipc_conn_shutdown_event cb = 
+                                       up_ptr->conn_err_cb;
+                               u32 peer_port = port_peerport(p_ptr);
+                               u32 peer_node = port_peernode(p_ptr);
+
+                               spin_unlock_bh(p_ptr->publ.lock);
+                               if (!connected || !cb)
+                                       break;
+                               if (msg_origport(msg) != peer_port)
+                                       break;
+                               if (msg_orignode(msg) != peer_node)
+                                       break;
+                               tipc_disconnect(dref);
+                               skb_pull(buf, msg_hdr_sz(msg));
+                               cb(usr_handle, dref, &buf, msg_data(msg),
+                                  msg_data_sz(msg), msg_errcode(msg));
+                               break;
+                       }
+               case TIPC_DIRECT_MSG:{
+                               tipc_msg_err_event cb = up_ptr->err_cb;
+
+                               spin_unlock_bh(p_ptr->publ.lock);
+                               if (connected || !cb)
+                                       break;
+                               skb_pull(buf, msg_hdr_sz(msg));
+                               cb(usr_handle, dref, &buf, msg_data(msg),
+                                  msg_data_sz(msg), msg_errcode(msg), &orig);
+                               break;
+                       }
+               case TIPC_NAMED_MSG:{
+                               tipc_named_msg_err_event cb = 
+                                       up_ptr->named_err_cb;
+
+                               spin_unlock_bh(p_ptr->publ.lock);
+                               if (connected || !cb)
+                                       break;
+                               dseq.type =  msg_nametype(msg);
+                               dseq.lower = msg_nameinst(msg);
+                               dseq.upper = dseq.lower;
+                               skb_pull(buf, msg_hdr_sz(msg));
+                               cb(usr_handle, dref, &buf, msg_data(msg), 
+                                  msg_data_sz(msg), msg_errcode(msg), &dseq);
+                               break;
+                       }
+               }
+               if (buf)
+                       buf_discard(buf);
+               buf = next;
+               continue;
+reject:
+               tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+               buf = next;
+       }
+}
+
+/*
+ *  port_dispatcher(): Dispatcher for messages destinated
+ *  to the tipc_port interface. Called with port locked.
+ */
+
+static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
+{
+       buf->next = NULL;
+       spin_lock_bh(&queue_lock);
+       if (msg_queue_head) {
+               msg_queue_tail->next = buf;
+               msg_queue_tail = buf;
+       } else {
+               msg_queue_tail = msg_queue_head = buf;
+               k_signal((Handler)port_dispatcher_sigh, 0);
+       }
+       spin_unlock_bh(&queue_lock);
+       return TIPC_OK;
+}
+
+/* 
+ * Wake up port after congestion: Called with port locked,
+ *                                
+ */
+
+static void port_wakeup_sh(unsigned long ref)
+{
+       struct port *p_ptr;
+       struct user_port *up_ptr;
+       tipc_continue_event cb = 0;
+       void *uh = 0;
+
+       p_ptr = port_lock(ref);
+       if (p_ptr) {
+               up_ptr = p_ptr->user_port;
+               if (up_ptr) {
+                       cb = up_ptr->continue_event_cb;
+                       uh = up_ptr->usr_handle;
+               }
+               port_unlock(p_ptr);
+       }
+       if (cb)
+               cb(uh, ref);
+}
+
+
+static void port_wakeup(struct tipc_port *p_ptr)
+{
+       k_signal((Handler)port_wakeup_sh, p_ptr->ref);
+}
+
+void tipc_acknowledge(u32 ref, u32 ack)
+{
+       struct port *p_ptr;
+       struct sk_buff *buf = 0;
+
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return;
+       if (p_ptr->publ.connected) {
+               p_ptr->publ.conn_unacked -= ack;
+               buf = port_build_proto_msg(port_peerport(p_ptr),
+                                          port_peernode(p_ptr),
+                                          ref,
+                                          tipc_own_addr,
+                                          CONN_MANAGER,
+                                          CONN_ACK,
+                                          TIPC_OK, 
+                                          port_out_seqno(p_ptr),
+                                          ack);
+       }
+       port_unlock(p_ptr);
+       net_route_msg(buf);
+}
+
+/*
+ * tipc_createport(): user level call. Will add port to
+ *                    registry if non-zero user_ref.
+ */
+
+int tipc_createport(u32 user_ref, 
+                   void *usr_handle, 
+                   unsigned int importance, 
+                   tipc_msg_err_event error_cb, 
+                   tipc_named_msg_err_event named_error_cb, 
+                   tipc_conn_shutdown_event conn_error_cb, 
+                   tipc_msg_event msg_cb, 
+                   tipc_named_msg_event named_msg_cb, 
+                   tipc_conn_msg_event conn_msg_cb, 
+                   tipc_continue_event continue_event_cb,/* May be zero */
+                   u32 *portref)
+{
+       struct user_port *up_ptr;
+       struct port *p_ptr; 
+       u32 ref;
+
+       up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
+       if (up_ptr == NULL) {
+               return -ENOMEM;
+       }
+       ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance);
+       p_ptr = port_lock(ref);
+       if (!p_ptr) {
+               kfree(up_ptr);
+               return -ENOMEM;
+       }
+
+       p_ptr->user_port = up_ptr;
+       up_ptr->user_ref = user_ref;
+       up_ptr->usr_handle = usr_handle;
+       up_ptr->ref = p_ptr->publ.ref;
+       up_ptr->err_cb = error_cb;
+       up_ptr->named_err_cb = named_error_cb;
+       up_ptr->conn_err_cb = conn_error_cb;
+       up_ptr->msg_cb = msg_cb;
+       up_ptr->named_msg_cb = named_msg_cb;
+       up_ptr->conn_msg_cb = conn_msg_cb;
+       up_ptr->continue_event_cb = continue_event_cb;
+       INIT_LIST_HEAD(&up_ptr->uport_list);
+       reg_add_port(up_ptr);
+       *portref = p_ptr->publ.ref;
+       dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);        
+       port_unlock(p_ptr);
+       return TIPC_OK;
+}
+
+int tipc_ownidentity(u32 ref, struct tipc_portid *id)
+{
+       id->ref = ref;
+       id->node = tipc_own_addr;
+       return TIPC_OK;
+}
+
+int tipc_portimportance(u32 ref, unsigned int *importance)
+{
+       struct port *p_ptr;
+       
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return -EINVAL;
+       *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
+       spin_unlock_bh(p_ptr->publ.lock);
+       return TIPC_OK;
+}
+
+int tipc_set_portimportance(u32 ref, unsigned int imp)
+{
+       struct port *p_ptr;
+
+       if (imp > TIPC_CRITICAL_IMPORTANCE)
+               return -EINVAL;
+
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return -EINVAL;
+       msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
+       spin_unlock_bh(p_ptr->publ.lock);
+       return TIPC_OK;
+}
+
+
+int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+{
+       struct port *p_ptr;
+       struct publication *publ;
+       u32 key;
+       int res = -EINVAL;
+
+       p_ptr = port_lock(ref);
+       dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
+           "lower = %u, upper = %u\n",
+           ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
+       if (!p_ptr)
+               return -EINVAL;
+       if (p_ptr->publ.connected)
+               goto exit;
+       if (seq->lower > seq->upper)
+               goto exit;
+       if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
+               goto exit;
+       key = ref + p_ptr->pub_count + 1;
+       if (key == ref) {
+               res = -EADDRINUSE;
+               goto exit;
+       }
+       publ = nametbl_publish(seq->type, seq->lower, seq->upper,
+                              scope, p_ptr->publ.ref, key);
+       if (publ) {
+               list_add(&publ->pport_list, &p_ptr->publications);
+               p_ptr->pub_count++;
+               p_ptr->publ.published = 1;
+               res = TIPC_OK;
+       }
+exit:
+       port_unlock(p_ptr);
+       return res;
+}
+
+int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+{
+       struct port *p_ptr;
+       struct publication *publ;
+       struct publication *tpubl;
+       int res = -EINVAL;
+       
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return -EINVAL;
+       if (!p_ptr->publ.published)
+               goto exit;
+       if (!seq) {
+               list_for_each_entry_safe(publ, tpubl, 
+                                        &p_ptr->publications, pport_list) {
+                       nametbl_withdraw(publ->type, publ->lower, 
+                                        publ->ref, publ->key);
+               }
+               res = TIPC_OK;
+       } else {
+               list_for_each_entry_safe(publ, tpubl, 
+                                        &p_ptr->publications, pport_list) {
+                       if (publ->scope != scope)
+                               continue;
+                       if (publ->type != seq->type)
+                               continue;
+                       if (publ->lower != seq->lower)
+                               continue;
+                       if (publ->upper != seq->upper)
+                               break;
+                       nametbl_withdraw(publ->type, publ->lower, 
+                                        publ->ref, publ->key);
+                       res = TIPC_OK;
+                       break;
+               }
+       }
+       if (list_empty(&p_ptr->publications))
+               p_ptr->publ.published = 0;
+exit:
+       port_unlock(p_ptr);
+       return res;
+}
+
+int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
+{
+       struct port *p_ptr;
+       struct tipc_msg *msg;
+       int res = -EINVAL;
+
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return -EINVAL;
+       if (p_ptr->publ.published || p_ptr->publ.connected)
+               goto exit;
+       if (!peer->ref)
+               goto exit;
+
+       msg = &p_ptr->publ.phdr;
+       msg_set_destnode(msg, peer->node);
+       msg_set_destport(msg, peer->ref);
+       msg_set_orignode(msg, tipc_own_addr);
+       msg_set_origport(msg, p_ptr->publ.ref);
+       msg_set_transp_seqno(msg, 42);
+       msg_set_type(msg, TIPC_CONN_MSG);
+       if (!may_route(peer->node))
+               msg_set_hdr_sz(msg, SHORT_H_SIZE);
+       else
+               msg_set_hdr_sz(msg, LONG_H_SIZE);
+
+       p_ptr->probing_interval = PROBING_INTERVAL;
+       p_ptr->probing_state = CONFIRMED;
+       p_ptr->publ.connected = 1;
+       k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
+
+       nodesub_subscribe(&p_ptr->subscription,peer->node,
+                         (void *)(unsigned long)ref,
+                         (net_ev_handler)port_handle_node_down);
+       res = TIPC_OK;
+exit:
+       port_unlock(p_ptr);
+       p_ptr->max_pkt = link_get_max_pkt(peer->node, ref);
+       return res;
+}
+
+/*
+ * tipc_disconnect(): Disconnect port form peer.
+ *                    This is a node local operation.
+ */
+
+int tipc_disconnect(u32 ref)
+{
+       struct port *p_ptr;
+       int res = -ENOTCONN;
+
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return -EINVAL;
+       if (p_ptr->publ.connected) {
+               p_ptr->publ.connected = 0;
+               /* let timer expire on it's own to avoid deadlock! */
+               nodesub_unsubscribe(&p_ptr->subscription);
+               res = TIPC_OK;
+       }
+       port_unlock(p_ptr);
+       return res;
+}
+
+/*
+ * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
+ */
+int tipc_shutdown(u32 ref)
+{
+       struct port *p_ptr;
+       struct sk_buff *buf = 0;
+
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return -EINVAL;
+
+       if (p_ptr->publ.connected) {
+               u32 imp = msg_importance(&p_ptr->publ.phdr);
+               if (imp < TIPC_CRITICAL_IMPORTANCE)
+                       imp++;
+               buf = port_build_proto_msg(port_peerport(p_ptr),
+                                          port_peernode(p_ptr),
+                                          ref,
+                                          tipc_own_addr,
+                                          imp,
+                                          TIPC_CONN_MSG,
+                                          TIPC_CONN_SHUTDOWN, 
+                                          port_out_seqno(p_ptr),
+                                          0);
+       }
+       port_unlock(p_ptr);
+       net_route_msg(buf);
+       return tipc_disconnect(ref);
+}
+
+int tipc_isconnected(u32 ref, int *isconnected)
+{
+       struct port *p_ptr;
+       
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return -EINVAL;
+       *isconnected = p_ptr->publ.connected;
+       port_unlock(p_ptr);
+       return TIPC_OK;
+}
+
+int tipc_peer(u32 ref, struct tipc_portid *peer)
+{
+       struct port *p_ptr;
+       int res;
+        
+       p_ptr = port_lock(ref);
+       if (!p_ptr)
+               return -EINVAL;
+       if (p_ptr->publ.connected) {
+               peer->ref = port_peerport(p_ptr);
+               peer->node = port_peernode(p_ptr);
+               res = TIPC_OK;
+       } else
+               res = -ENOTCONN;
+       port_unlock(p_ptr);
+       return res;
+}
+
+int tipc_ref_valid(u32 ref)
+{
+       /* Works irrespective of type */
+       return !!ref_deref(ref);
+}
+
+
+/*
+ *  port_recv_sections(): Concatenate and deliver sectioned
+ *                        message for this node.
+ */
+
+int port_recv_sections(struct port *sender, unsigned int num_sect,
+                      struct iovec const *msg_sect)
+{
+       struct sk_buff *buf;
+       int res;
+        
+       res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
+                       MAX_MSG_SIZE, !sender->user_port, &buf);
+       if (likely(buf))
+               port_recv_msg(buf);
+       return res;
+}
+
+/**
+ * tipc_send - send message sections on connection
+ */
+
+int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
+{
+       struct port *p_ptr;
+       u32 destnode;
+       int res;
+
+       p_ptr = port_deref(ref);
+       if (!p_ptr || !p_ptr->publ.connected)
+               return -EINVAL;
+
+       p_ptr->publ.congested = 1;
+       if (!port_congested(p_ptr)) {
+               destnode = port_peernode(p_ptr);
+               if (likely(destnode != tipc_own_addr))
+                       res = link_send_sections_fast(p_ptr, msg_sect, num_sect,
+                                                     destnode);
+               else
+                       res = port_recv_sections(p_ptr, num_sect, msg_sect);
+
+               if (likely(res != -ELINKCONG)) {
+                       port_incr_out_seqno(p_ptr);
+                       p_ptr->publ.congested = 0;
+                       p_ptr->sent++;
+                       return res;
+               }
+       }
+       if (port_unreliable(p_ptr)) {
+               p_ptr->publ.congested = 0;
+               /* Just calculate msg length and return */
+               return msg_calc_data_size(msg_sect, num_sect);
+       }
+       return -ELINKCONG;
+}
+
+/** 
+ * tipc_send_buf - send message buffer on connection
+ */
+
+int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
+{
+       struct port *p_ptr;
+       struct tipc_msg *msg;
+       u32 destnode;
+       u32 hsz;
+       u32 sz;
+       u32 res;
+        
+       p_ptr = port_deref(ref);
+       if (!p_ptr || !p_ptr->publ.connected)
+               return -EINVAL;
+
+       msg = &p_ptr->publ.phdr;
+       hsz = msg_hdr_sz(msg);
+       sz = hsz + dsz;
+       msg_set_size(msg, sz);
+       if (skb_cow(buf, hsz))
+               return -ENOMEM;
+
+       skb_push(buf, hsz);
+       memcpy(buf->data, (unchar *)msg, hsz);
+       destnode = msg_destnode(msg);
+       p_ptr->publ.congested = 1;
+       if (!port_congested(p_ptr)) {
+               if (likely(destnode != tipc_own_addr))
+                       res = tipc_send_buf_fast(buf, destnode);
+               else {
+                       port_recv_msg(buf);
+                       res = sz;
+               }
+               if (likely(res != -ELINKCONG)) {
+                       port_incr_out_seqno(p_ptr);
+                       p_ptr->sent++;
+                       p_ptr->publ.congested = 0;
+                       return res;
+               }
+       }
+       if (port_unreliable(p_ptr)) {
+               p_ptr->publ.congested = 0;
+               return dsz;
+       }
+       return -ELINKCONG;
+}
+
+/**
+ * tipc_forward2name - forward message sections to port name
+ */
+
+int tipc_forward2name(u32 ref, 
+                     struct tipc_name const *name, 
+                     u32 domain,
+                     u32 num_sect, 
+                     struct iovec const *msg_sect,
+                     struct tipc_portid const *orig, 
+                     unsigned int importance)
+{
+       struct port *p_ptr;
+       struct tipc_msg *msg;
+       u32 destnode = domain;
+       u32 destport = 0;
+       int res;
+
+       p_ptr = port_deref(ref);
+       if (!p_ptr || p_ptr->publ.connected)
+               return -EINVAL;
+
+       msg = &p_ptr->publ.phdr;
+       msg_set_type(msg, TIPC_NAMED_MSG);
+       msg_set_orignode(msg, orig->node);
+       msg_set_origport(msg, orig->ref);
+       msg_set_hdr_sz(msg, LONG_H_SIZE);
+       msg_set_nametype(msg, name->type);
+       msg_set_nameinst(msg, name->instance);
+       msg_set_lookup_scope(msg, addr_scope(domain));
+       if (importance <= TIPC_CRITICAL_IMPORTANCE)
+               msg_set_importance(msg,importance);
+       destport = nametbl_translate(name->type, name->instance, &destnode);
+       msg_set_destnode(msg, destnode);
+       msg_set_destport(msg, destport);
+
+       if (likely(destport || destnode)) {
+               p_ptr->sent++;
+               if (likely(destnode == tipc_own_addr))
+                       return port_recv_sections(p_ptr, num_sect, msg_sect);
+               res = link_send_sections_fast(p_ptr, msg_sect, num_sect, 
+                                             destnode);
+               if (likely(res != -ELINKCONG))
+                       return res;
+               if (port_unreliable(p_ptr)) {
+                       /* Just calculate msg length and return */
+                       return msg_calc_data_size(msg_sect, num_sect);
+               }
+               return -ELINKCONG;
+       }
+       return port_reject_sections(p_ptr, msg, msg_sect, num_sect, 
+                                   TIPC_ERR_NO_NAME);
+}
+
+/**
+ * tipc_send2name - send message sections to port name
+ */
+
+int tipc_send2name(u32 ref, 
+                  struct tipc_name const *name,
+                  unsigned int domain, 
+                  unsigned int num_sect, 
+                  struct iovec const *msg_sect)
+{
+       struct tipc_portid orig;
+
+       orig.ref = ref;
+       orig.node = tipc_own_addr;
+       return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig,
+                                TIPC_PORT_IMPORTANCE);
+}
+
+/** 
+ * tipc_forward_buf2name - forward message buffer to port name
+ */
+
+int tipc_forward_buf2name(u32 ref,
+                         struct tipc_name const *name,
+                         u32 domain,
+                         struct sk_buff *buf,
+                         unsigned int dsz,
+                         struct tipc_portid const *orig,
+                         unsigned int importance)
+{
+       struct port *p_ptr;
+       struct tipc_msg *msg;
+       u32 destnode = domain;
+       u32 destport = 0;
+       int res;
+
+       p_ptr = (struct port *)ref_deref(ref);
+       if (!p_ptr || p_ptr->publ.connected)
+               return -EINVAL;
+
+       msg = &p_ptr->publ.phdr;
+       if (importance <= TIPC_CRITICAL_IMPORTANCE)
+               msg_set_importance(msg, importance);
+       msg_set_type(msg, TIPC_NAMED_MSG);
+       msg_set_orignode(msg, orig->node);
+       msg_set_origport(msg, orig->ref);
+       msg_set_nametype(msg, name->type);
+       msg_set_nameinst(msg, name->instance);
+       msg_set_lookup_scope(msg, addr_scope(domain));
+       msg_set_hdr_sz(msg, LONG_H_SIZE);
+       msg_set_size(msg, LONG_H_SIZE + dsz);
+       destport = nametbl_translate(name->type, name->instance, &destnode);
+       msg_set_destnode(msg, destnode);
+       msg_set_destport(msg, destport);
+       msg_dbg(msg, "forw2name ==> ");
+       if (skb_cow(buf, LONG_H_SIZE))
+               return -ENOMEM;
+       skb_push(buf, LONG_H_SIZE);
+       memcpy(buf->data, (unchar *)msg, LONG_H_SIZE);
+       msg_dbg(buf_msg(buf),"PREP:");
+       if (likely(destport || destnode)) {
+               p_ptr->sent++;
+               if (destnode == tipc_own_addr)
+                       return port_recv_msg(buf);
+               res = tipc_send_buf_fast(buf, destnode);
+               if (likely(res != -ELINKCONG))
+                       return res;
+               if (port_unreliable(p_ptr))
+                       return dsz;
+               return -ELINKCONG;
+       }
+       return tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
+}
+
+/** 
+ * tipc_send_buf2name - send message buffer to port name
+ */
+
+int tipc_send_buf2name(u32 ref, 
+                      struct tipc_name const *dest, 
+                      u32 domain,
+                      struct sk_buff *buf, 
+                      unsigned int dsz)
+{
+       struct tipc_portid orig;
+
+       orig.ref = ref;
+       orig.node = tipc_own_addr;
+       return tipc_forward_buf2name(ref, dest, domain, buf, dsz, &orig,
+                                    TIPC_PORT_IMPORTANCE);
+}
+
+/** 
+ * tipc_forward2port - forward message sections to port identity
+ */
+
+int tipc_forward2port(u32 ref,
+                     struct tipc_portid const *dest,
+                     unsigned int num_sect, 
+                     struct iovec const *msg_sect,
+                     struct tipc_portid const *orig, 
+                     unsigned int importance)
+{
+       struct port *p_ptr;
+       struct tipc_msg *msg;
+       int res;
+
+       p_ptr = port_deref(ref);
+       if (!p_ptr || p_ptr->publ.connected)
+               return -EINVAL;
+
+       msg = &p_ptr->publ.phdr;
+       msg_set_type(msg, TIPC_DIRECT_MSG);
+       msg_set_orignode(msg, orig->node);
+       msg_set_origport(msg, orig->ref);
+       msg_set_destnode(msg, dest->node);
+       msg_set_destport(msg, dest->ref);
+       msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
+       if (importance <= TIPC_CRITICAL_IMPORTANCE)
+               msg_set_importance(msg, importance);
+       p_ptr->sent++;
+       if (dest->node == tipc_own_addr)
+               return port_recv_sections(p_ptr, num_sect, msg_sect);
+       res = link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
+       if (likely(res != -ELINKCONG))
+               return res;
+       if (port_unreliable(p_ptr)) {
+               /* Just calculate msg length and return */
+               return msg_calc_data_size(msg_sect, num_sect);
+       }
+       return -ELINKCONG;
+}
+
+/** 
+ * tipc_send2port - send message sections to port identity 
+ */
+
+int tipc_send2port(u32 ref, 
+                  struct tipc_portid const *dest,
+                  unsigned int num_sect, 
+                  struct iovec const *msg_sect)
+{
+       struct tipc_portid orig;
+
+       orig.ref = ref;
+       orig.node = tipc_own_addr;
+       return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig, 
+                                TIPC_PORT_IMPORTANCE);
+}
+
+/** 
+ * tipc_forward_buf2port - forward message buffer to port identity
+ */
+int tipc_forward_buf2port(u32 ref,
+                         struct tipc_portid const *dest,
+                         struct sk_buff *buf,
+                         unsigned int dsz,
+                         struct tipc_portid const *orig,
+                         unsigned int importance)
+{
+       struct port *p_ptr;
+       struct tipc_msg *msg;
+       int res;
+
+       p_ptr = (struct port *)ref_deref(ref);
+       if (!p_ptr || p_ptr->publ.connected)
+               return -EINVAL;
+
+       msg = &p_ptr->publ.phdr;
+       msg_set_type(msg, TIPC_DIRECT_MSG);
+       msg_set_orignode(msg, orig->node);
+       msg_set_origport(msg, orig->ref);
+       msg_set_destnode(msg, dest->node);
+       msg_set_destport(msg, dest->ref);
+       msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
+       if (importance <= TIPC_CRITICAL_IMPORTANCE)
+               msg_set_importance(msg, importance);
+       msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
+       if (skb_cow(buf, DIR_MSG_H_SIZE))
+               return -ENOMEM;
+
+       skb_push(buf, DIR_MSG_H_SIZE);
+       memcpy(buf->data, (unchar *)msg, DIR_MSG_H_SIZE);
+       msg_dbg(msg, "buf2port: ");
+       p_ptr->sent++;
+       if (dest->node == tipc_own_addr)
+               return port_recv_msg(buf);
+       res = tipc_send_buf_fast(buf, dest->node);
+       if (likely(res != -ELINKCONG))
+               return res;
+       if (port_unreliable(p_ptr))
+               return dsz;
+       return -ELINKCONG;
+}
+
+/** 
+ * tipc_send_buf2port - send message buffer to port identity
+ */
+
+int tipc_send_buf2port(u32 ref, 
+                      struct tipc_portid const *dest,
+                      struct sk_buff *buf, 
+                      unsigned int dsz)
+{
+       struct tipc_portid orig;
+
+       orig.ref = ref;
+       orig.node = tipc_own_addr;
+       return tipc_forward_buf2port(ref, dest, buf, dsz, &orig, 
+                                    TIPC_PORT_IMPORTANCE);
+}
+
diff --git a/net/tipc/port.h b/net/tipc/port.h
new file mode 100644 (file)
index 0000000..e829a99
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ * net/tipc/port.h: Include file for TIPC port code
+ * 
+ * Copyright (c) 1994-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_PORT_H
+#define _TIPC_PORT_H
+
+#include <net/tipc/tipc_port.h>
+#include "ref.h"
+#include "net.h"
+#include "msg.h"
+#include "dbg.h"
+#include "node_subscr.h"
+
+/**
+ * struct user_port - TIPC user port (used with native API)
+ * @user_ref: id of user who created user port
+ * @usr_handle: user-specified field
+ * @ref: object reference to associated TIPC port
+ * <various callback routines>
+ * @uport_list: adjacent user ports in list of ports held by user
+ */
+struct user_port {
+       u32 user_ref;
+       void *usr_handle; 
+       u32 ref;
+       tipc_msg_err_event err_cb; 
+       tipc_named_msg_err_event named_err_cb; 
+       tipc_conn_shutdown_event conn_err_cb; 
+       tipc_msg_event msg_cb; 
+       tipc_named_msg_event named_msg_cb; 
+       tipc_conn_msg_event conn_msg_cb; 
+       tipc_continue_event continue_event_cb;
+       struct list_head uport_list;
+};
+
+/**
+ * struct port - TIPC port structure
+ * @publ: TIPC port info available to privileged users
+ * @port_list: adjacent ports in TIPC's global list of ports
+ * @dispatcher: ptr to routine which handles received messages
+ * @wakeup: ptr to routine to call when port is no longer congested
+ * @user_port: ptr to user port associated with port (if any)
+ * @wait_list: adjacent ports in list of ports waiting on link congestion
+ * @congested_link: ptr to congested link port is waiting on
+ * @waiting_pkts:
+ * @sent:
+ * @acked:
+ * @publications: list of publications for port
+ * @pub_count: total # of publications port has made during its lifetime
+ * @max_pkt: maximum packet size "hint" used when building messages sent by port
+ * @probing_state:
+ * @probing_interval:
+ * @last_in_seqno:
+ * @timer_ref:
+ * @subscription: "node down" subscription used to terminate failed connections
+ */
+
+struct port {
+       struct tipc_port publ;
+       struct list_head port_list;
+       u32 (*dispatcher)(struct tipc_port *, struct sk_buff *);
+       void (*wakeup)(struct tipc_port *);
+       struct user_port *user_port;
+       struct list_head wait_list;
+       struct link *congested_link;
+       u32 waiting_pkts;
+       u32 sent;
+       u32 acked;
+       struct list_head publications;
+       u32 pub_count;
+       u32 max_pkt;
+       u32 probing_state;
+       u32 probing_interval;
+       u32 last_in_seqno;
+       struct timer_list timer;
+       struct node_subscr subscription;
+};
+
+extern spinlock_t port_list_lock;
+struct port_list;
+
+int port_recv_sections(struct port *p_ptr, u32 num_sect, 
+                      struct iovec const *msg_sect);
+int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
+                        struct iovec const *msg_sect, u32 num_sect,
+                        int err);
+struct sk_buff *port_get_ports(void);
+struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space);
+void port_recv_proto_msg(struct sk_buff *buf);
+void port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
+void port_reinit(void);
+
+/**
+ * port_lock - lock port instance referred to and return its pointer
+ */
+
+static inline struct port *port_lock(u32 ref)
+{
+       return (struct port *)ref_lock(ref);
+}
+
+/** 
+ * port_unlock - unlock a port instance
+ * 
+ * Can use pointer instead of ref_unlock() since port is already locked.
+ */
+
+static inline void port_unlock(struct port *p_ptr)
+{
+       spin_unlock_bh(p_ptr->publ.lock);
+}
+
+static inline struct port* port_deref(u32 ref)
+{
+       return (struct port *)ref_deref(ref);
+}
+
+static inline u32 peer_port(struct port *p_ptr)
+{
+       return msg_destport(&p_ptr->publ.phdr);
+}
+
+static inline u32 peer_node(struct port *p_ptr)
+{
+       return msg_destnode(&p_ptr->publ.phdr);
+}
+
+static inline int port_congested(struct port *p_ptr)
+{
+       return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2));
+}
+
+/** 
+ * port_recv_msg - receive message from lower layer and deliver to port user
+ */
+
+static inline int port_recv_msg(struct sk_buff *buf)
+{
+       struct port *p_ptr;
+       struct tipc_msg *msg = buf_msg(buf);
+       u32 destport = msg_destport(msg);
+       u32 dsz = msg_data_sz(msg);
+       u32 err;
+       
+       /* forward unresolved named message */
+       if (unlikely(!destport)) {
+               net_route_msg(buf);
+               return dsz;
+       }
+
+       /* validate destination & pass to port, otherwise reject message */
+       p_ptr = port_lock(destport);
+       if (likely(p_ptr)) {
+               if (likely(p_ptr->publ.connected)) {
+                       if ((unlikely(msg_origport(msg) != peer_port(p_ptr))) ||
+                           (unlikely(msg_orignode(msg) != peer_node(p_ptr))) ||
+                           (unlikely(!msg_connected(msg)))) {
+                               err = TIPC_ERR_NO_PORT;
+                               port_unlock(p_ptr);
+                               goto reject;
+                       }
+               }
+               err = p_ptr->dispatcher(&p_ptr->publ, buf);
+               port_unlock(p_ptr);
+               if (likely(!err))
+                       return dsz;
+       } else {
+               err = TIPC_ERR_NO_PORT;
+       }
+reject:
+       dbg("port->rejecting, err = %x..\n",err);
+       return tipc_reject_msg(buf, err);
+}
+
+#endif
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
new file mode 100644 (file)
index 0000000..944093f
--- /dev/null
@@ -0,0 +1,189 @@
+/*
+ * net/tipc/ref.c: TIPC object registry code
+ * 
+ * Copyright (c) 1991-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "ref.h"
+#include "port.h"
+#include "subscr.h"
+#include "name_distr.h"
+#include "name_table.h"
+#include "config.h"
+#include "discover.h"
+#include "bearer.h"
+#include "node.h"
+#include "bcast.h"
+
+/*
+ * Object reference table consists of 2**N entries.
+ *
+ * A used entry has object ptr != 0, reference == XXXX|own index
+ *                                  (XXXX changes each time entry is acquired) 
+ * A free entry has object ptr == 0, reference == YYYY|next free index
+ *                                  (YYYY is one more than last used XXXX)
+ *
+ * Free list is initially chained from entry (2**N)-1 to entry 1. 
+ * Entry 0 is not used to allow index 0 to indicate the end of the free list.
+ *
+ * Note: Any accidental reference of the form XXXX|0--0 won't match entry 0
+ * because entry 0's reference field has the form XXXX|1--1.
+ */
+
+struct ref_table ref_table = { 0 };
+
+rwlock_t reftbl_lock = RW_LOCK_UNLOCKED;
+
+/**
+ * ref_table_init - create reference table for objects
+ */
+
+int ref_table_init(u32 requested_size, u32 start)
+{
+       struct reference *table;
+       u32 sz = 1 << 4;
+       u32 index_mask;
+       int i;
+
+       while (sz < requested_size) {
+               sz <<= 1;
+       }
+       table = (struct reference *)vmalloc(sz * sizeof(struct reference));
+       if (table == NULL)
+               return -ENOMEM;
+
+       write_lock_bh(&reftbl_lock);
+       index_mask = sz - 1;
+       for (i = sz - 1; i >= 0; i--) {
+               table[i].object = 0;
+               table[i].lock = SPIN_LOCK_UNLOCKED;
+               table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
+       }
+       ref_table.entries = table;
+       ref_table.index_mask = index_mask;
+       ref_table.first_free = sz - 1;
+       ref_table.last_free = 1;
+       write_unlock_bh(&reftbl_lock);
+       return TIPC_OK;
+}
+
+/**
+ * ref_table_stop - destroy reference table for objects
+ */
+
+void ref_table_stop(void)
+{
+       if (!ref_table.entries)
+               return;
+
+       vfree(ref_table.entries);
+       ref_table.entries = 0;
+}
+
+/**
+ * ref_acquire - create reference to an object
+ * 
+ * Return a unique reference value which can be translated back to the pointer
+ * 'object' at a later time.  Also, pass back a pointer to the lock protecting 
+ * the object, but without locking it.
+ */
+
+u32 ref_acquire(void *object, spinlock_t **lock)
+{
+       struct reference *entry;
+       u32 index;
+       u32 index_mask;
+       u32 next_plus_upper;
+       u32 reference = 0;
+
+       assert(ref_table.entries && object);
+
+       write_lock_bh(&reftbl_lock);
+       if (ref_table.first_free) {
+               index = ref_table.first_free;
+               entry = &(ref_table.entries[index]);
+               index_mask = ref_table.index_mask;
+               /* take lock in case a previous user of entry still holds it */ 
+               spin_lock_bh(&entry->lock);  
+               next_plus_upper = entry->data.next_plus_upper;
+               ref_table.first_free = next_plus_upper & index_mask;
+               reference = (next_plus_upper & ~index_mask) + index;
+               entry->data.reference = reference;
+               entry->object = object;
+                if (lock != 0)
+                        *lock = &entry->lock;
+               spin_unlock_bh(&entry->lock);
+       }
+       write_unlock_bh(&reftbl_lock);
+       return reference;
+}
+
+/**
+ * ref_discard - invalidate references to an object
+ * 
+ * Disallow future references to an object and free up the entry for re-use.
+ * Note: The entry's spin_lock may still be busy after discard
+ */
+
+void ref_discard(u32 ref)
+{
+       struct reference *entry;
+       u32 index; 
+       u32 index_mask;
+
+       assert(ref_table.entries);
+       assert(ref != 0);
+
+       write_lock_bh(&reftbl_lock);
+       index_mask = ref_table.index_mask;
+       index = ref & index_mask;
+       entry = &(ref_table.entries[index]);
+       assert(entry->object != 0);
+       assert(entry->data.reference == ref);
+
+       /* mark entry as unused */
+       entry->object = 0;
+       if (ref_table.first_free == 0)
+               ref_table.first_free = index;
+       else
+               /* next_plus_upper is always XXXX|0--0 for last free entry */
+               ref_table.entries[ref_table.last_free].data.next_plus_upper 
+                       |= index;
+       ref_table.last_free = index;
+
+       /* increment upper bits of entry to invalidate subsequent references */
+       entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
+       write_unlock_bh(&reftbl_lock);
+}
+
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
new file mode 100644 (file)
index 0000000..429cde5
--- /dev/null
@@ -0,0 +1,131 @@
+/*
+ * net/tipc/ref.h: Include file for TIPC object registry code
+ * 
+ * Copyright (c) 1991-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_REF_H
+#define _TIPC_REF_H
+
+/**
+ * struct reference - TIPC object reference entry
+ * @object: pointer to object associated with reference entry
+ * @lock: spinlock controlling access to object
+ * @data: reference value associated with object (or link to next unused entry)
+ */
+struct reference {
+       void *object;
+       spinlock_t lock;
+       union {
+               u32 next_plus_upper;
+               u32 reference;
+       } data;
+};
+
+/**
+ * struct ref_table - table of TIPC object reference entries
+ * @entries: pointer to array of reference entries
+ * @index_mask: bitmask for array index portion of reference values
+ * @first_free: array index of first unused object reference entry
+ * @last_free: array index of last unused object reference entry
+ */
+
+struct ref_table {
+       struct reference *entries;
+       u32 index_mask;
+       u32 first_free;
+       u32 last_free;
+};
+
+extern struct ref_table ref_table;
+
+int ref_table_init(u32 requested_size, u32 start);
+void ref_table_stop(void);
+
+u32 ref_acquire(void *object, spinlock_t **lock);
+void ref_discard(u32 ref);
+
+
+/**
+ * ref_lock - lock referenced object and return pointer to it
+ */
+
+static inline void *ref_lock(u32 ref)
+{
+       if (likely(ref_table.entries)) {
+               struct reference *r =
+                       &ref_table.entries[ref & ref_table.index_mask];
+
+               spin_lock_bh(&r->lock);
+               if (likely(r->data.reference == ref))
+                       return r->object;
+               spin_unlock_bh(&r->lock);
+       }
+       return 0;
+}
+
+/**
+ * ref_unlock - unlock referenced object 
+ */
+
+static inline void ref_unlock(u32 ref)
+{
+       if (likely(ref_table.entries)) {
+               struct reference *r =
+                       &ref_table.entries[ref & ref_table.index_mask];
+
+               if (likely(r->data.reference == ref))
+                       spin_unlock_bh(&r->lock);
+               else
+                       err("ref_unlock() invoked using obsolete reference\n");
+       }
+}
+
+/**
+ * ref_deref - return pointer referenced object (without locking it)
+ */
+
+static inline void *ref_deref(u32 ref)
+{
+       if (likely(ref_table.entries)) {
+               struct reference *r = 
+                       &ref_table.entries[ref & ref_table.index_mask];
+
+               if (likely(r->data.reference == ref))
+                       return r->object;
+       }
+       return 0;
+}
+
+#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
new file mode 100644 (file)
index 0000000..d21f8c0
--- /dev/null
@@ -0,0 +1,1726 @@
+/*
+ * net/tipc/socket.c: TIPC socket API
+ * 
+ * Copyright (c) 2001-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/version.h>
+#include <linux/fcntl.h>
+#include <linux/version.h>
+#include <asm/semaphore.h>
+#include <asm/string.h>
+#include <asm/atomic.h>
+#include <net/sock.h>
+
+#include <linux/tipc.h>
+#include <linux/tipc_config.h>
+#include <net/tipc/tipc_msg.h>
+#include <net/tipc/tipc_port.h>
+
+#include "core.h"
+
+#define SS_LISTENING   -1      /* socket is listening */
+#define SS_READY       -2      /* socket is connectionless */
+
+#define OVERLOAD_LIMIT_BASE    5000
+
+struct tipc_sock {
+       struct sock sk;
+       struct tipc_port *p;
+       struct semaphore sem;
+};
+
+#define tipc_sk(sk) ((struct tipc_sock*)sk)
+
+static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
+static void wakeupdispatch(struct tipc_port *tport);
+
+static struct proto_ops packet_ops;
+static struct proto_ops stream_ops;
+static struct proto_ops msg_ops;
+
+static struct proto tipc_proto;
+
+static int sockets_enabled = 0;
+
+static atomic_t tipc_queue_size = ATOMIC_INIT(0);
+
+
+/* 
+ * sock_lock(): Lock a port/socket pair. lock_sock() can 
+ * not be used here, since the same lock must protect ports 
+ * with non-socket interfaces.
+ * See net.c for description of locking policy.
+ */
+static inline void sock_lock(struct tipc_sock* tsock)
+{
+        spin_lock_bh(tsock->p->lock);       
+}
+
+/* 
+ * sock_unlock(): Unlock a port/socket pair
+ */
+static inline void sock_unlock(struct tipc_sock* tsock)
+{
+        spin_unlock_bh(tsock->p->lock);
+}
+
+/**
+ * pollmask - determine the current set of poll() events for a socket
+ * @sock: socket structure
+ * 
+ * TIPC sets the returned events as follows:
+ * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty
+ *    or if a connection-oriented socket is does not have an active connection
+ *    (i.e. a read operation will not block).
+ * b) POLLOUT is set except when a socket's connection has been terminated
+ *    (i.e. a write operation will not block).
+ * c) POLLHUP is set when a socket's connection has been terminated.
+ *
+ * IMPORTANT: The fact that a read or write operation will not block does NOT
+ * imply that the operation will succeed!
+ * 
+ * Returns pollmask value
+ */
+
+static inline u32 pollmask(struct socket *sock)
+{
+       u32 mask;
+
+       if ((skb_queue_len(&sock->sk->sk_receive_queue) != 0) ||
+           (sock->state == SS_UNCONNECTED) ||
+           (sock->state == SS_DISCONNECTING))
+               mask = (POLLRDNORM | POLLIN);
+       else
+               mask = 0;
+
+       if (sock->state == SS_DISCONNECTING) 
+               mask |= POLLHUP;
+       else
+               mask |= POLLOUT;
+
+       return mask;
+}
+
+
+/**
+ * advance_queue - discard first buffer in queue
+ * @tsock: TIPC socket
+ */
+
+static inline void advance_queue(struct tipc_sock *tsock)
+{
+        sock_lock(tsock);
+       buf_discard(skb_dequeue(&tsock->sk.sk_receive_queue));
+        sock_unlock(tsock);
+       atomic_dec(&tipc_queue_size);
+}
+
+/**
+ * tipc_create - create a TIPC socket
+ * @sock: pre-allocated socket structure
+ * @protocol: protocol indicator (must be 0)
+ * 
+ * This routine creates and attaches a 'struct sock' to the 'struct socket',
+ * then create and attaches a TIPC port to the 'struct sock' part.
+ *
+ * Returns 0 on success, errno otherwise
+ */
+static int tipc_create(struct socket *sock, int protocol)
+{
+       struct tipc_sock *tsock;
+       struct tipc_port *port;
+       struct sock *sk;
+        u32 ref;
+
+       if ((sock->type != SOCK_STREAM) && 
+           (sock->type != SOCK_SEQPACKET) &&
+           (sock->type != SOCK_DGRAM) &&
+           (sock->type != SOCK_RDM))
+               return -EPROTOTYPE;
+
+       if (unlikely(protocol != 0))
+               return -EPROTONOSUPPORT;
+
+       ref = tipc_createport_raw(0, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE);
+       if (unlikely(!ref))
+               return -ENOMEM;
+
+       sock->state = SS_UNCONNECTED;
+
+       switch (sock->type) {
+       case SOCK_STREAM:
+               sock->ops = &stream_ops;
+               break;
+       case SOCK_SEQPACKET:
+               sock->ops = &packet_ops;
+               break;
+       case SOCK_DGRAM:
+               tipc_set_portunreliable(ref, 1);
+               /* fall through */
+       case SOCK_RDM:
+               tipc_set_portunreturnable(ref, 1);
+               sock->ops = &msg_ops;
+               sock->state = SS_READY;
+               break;
+       }
+
+       sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1);
+       if (!sk) {
+               tipc_deleteport(ref);
+               return -ENOMEM;
+       }
+
+       sock_init_data(sock, sk);
+       init_waitqueue_head(sk->sk_sleep);
+       sk->sk_rcvtimeo = 8 * HZ;   /* default connect timeout = 8s */
+
+       tsock = tipc_sk(sk);
+       port = tipc_get_port(ref);
+
+       tsock->p = port;
+       port->usr_handle = tsock;
+
+       init_MUTEX(&tsock->sem);
+
+       dbg("sock_create: %x\n",tsock);
+
+       atomic_inc(&tipc_user_count);
+
+       return 0;
+}
+
+/**
+ * release - destroy a TIPC socket
+ * @sock: socket to destroy
+ *
+ * This routine cleans up any messages that are still queued on the socket.
+ * For DGRAM and RDM socket types, all queued messages are rejected.
+ * For SEQPACKET and STREAM socket types, the first message is rejected
+ * and any others are discarded.  (If the first message on a STREAM socket
+ * is partially-read, it is discarded and the next one is rejected instead.)
+ * 
+ * NOTE: Rejected messages are not necessarily returned to the sender!  They
+ * are returned or discarded according to the "destination droppable" setting
+ * specified for the message by the sender.
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int release(struct socket *sock)
+{
+       struct tipc_sock *tsock = tipc_sk(sock->sk);
+       struct sock *sk = sock->sk;
+       int res = TIPC_OK;
+       struct sk_buff *buf;
+
+        dbg("sock_delete: %x\n",tsock);
+       if (!tsock)
+               return 0;
+       down_interruptible(&tsock->sem);
+       if (!sock->sk) {
+               up(&tsock->sem);
+               return 0;
+       }
+       
+       /* Reject unreceived messages, unless no longer connected */
+
+       while (sock->state != SS_DISCONNECTING) {
+               sock_lock(tsock);
+               buf = skb_dequeue(&sk->sk_receive_queue);
+               if (!buf)
+                       tsock->p->usr_handle = 0;
+               sock_unlock(tsock);
+               if (!buf)
+                       break;
+               if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
+                       buf_discard(buf);
+               else
+                       tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+               atomic_dec(&tipc_queue_size);
+       }
+
+       /* Delete TIPC port */
+
+       res = tipc_deleteport(tsock->p->ref);
+       sock->sk = NULL;
+
+       /* Discard any remaining messages */
+
+       while ((buf = skb_dequeue(&sk->sk_receive_queue))) {
+               buf_discard(buf);
+               atomic_dec(&tipc_queue_size);
+       }
+
+       up(&tsock->sem);
+
+       sock_put(sk);
+
+        atomic_dec(&tipc_user_count);
+       return res;
+}
+
+/**
+ * bind - associate or disassocate TIPC name(s) with a socket
+ * @sock: socket structure
+ * @uaddr: socket address describing name(s) and desired operation
+ * @uaddr_len: size of socket address data structure
+ * 
+ * Name and name sequence binding is indicated using a positive scope value;
+ * a negative scope value unbinds the specified name.  Specifying no name
+ * (i.e. a socket address length of 0) unbinds all names from the socket.
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
+{
+       struct tipc_sock *tsock = tipc_sk(sock->sk);
+       struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
+       int res;
+
+       if (down_interruptible(&tsock->sem))
+               return -ERESTARTSYS;
+       
+       if (unlikely(!uaddr_len)) {
+               res = tipc_withdraw(tsock->p->ref, 0, 0);
+               goto exit;
+       }
+
+       if (uaddr_len < sizeof(struct sockaddr_tipc)) {
+               res = -EINVAL;
+               goto exit;
+       }
+
+       if (addr->family != AF_TIPC) {
+               res = -EAFNOSUPPORT;
+               goto exit;
+       }
+       if (addr->addrtype == TIPC_ADDR_NAME)
+               addr->addr.nameseq.upper = addr->addr.nameseq.lower;
+       else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
+               res = -EAFNOSUPPORT;
+               goto exit;
+       }
+        
+               if (addr->scope > 0)
+               res = tipc_publish(tsock->p->ref, addr->scope,
+                                  &addr->addr.nameseq);
+       else
+               res = tipc_withdraw(tsock->p->ref, -addr->scope,
+                                   &addr->addr.nameseq);
+exit:
+       up(&tsock->sem);
+       return res;
+}
+
+/** 
+ * get_name - get port ID of socket or peer socket
+ * @sock: socket structure
+ * @uaddr: area for returned socket address
+ * @uaddr_len: area for returned length of socket address
+ * @peer: 0 to obtain socket name, 1 to obtain peer socket name
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int get_name(struct socket *sock, struct sockaddr *uaddr, 
+                   int *uaddr_len, int peer)
+{
+       struct tipc_sock *tsock = tipc_sk(sock->sk);
+       struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
+       u32 res;
+
+       if (down_interruptible(&tsock->sem))
+               return -ERESTARTSYS;
+
+       *uaddr_len = sizeof(*addr);
+       addr->addrtype = TIPC_ADDR_ID;
+       addr->family = AF_TIPC;
+       addr->scope = 0;
+       if (peer)
+               res = tipc_peer(tsock->p->ref, &addr->addr.id);
+       else
+               res = tipc_ownidentity(tsock->p->ref, &addr->addr.id);
+       addr->addr.name.domain = 0;
+
+       up(&tsock->sem);
+       return res;
+}
+
+/**
+ * poll - read and possibly block on pollmask
+ * @file: file structure associated with the socket
+ * @sock: socket for which to calculate the poll bits
+ * @wait: ???
+ *
+ * Returns the pollmask
+ */
+
+static unsigned int poll(struct file *file, struct socket *sock, 
+                        poll_table *wait)
+{
+       poll_wait(file, sock->sk->sk_sleep, wait);
+       /* NEED LOCK HERE? */
+       return pollmask(sock);
+}
+
+/** 
+ * dest_name_check - verify user is permitted to send to specified port name
+ * @dest: destination address
+ * @m: descriptor for message to be sent
+ * 
+ * Prevents restricted configuration commands from being issued by
+ * unauthorized users.
+ * 
+ * Returns 0 if permission is granted, otherwise errno
+ */
+
+static inline int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
+{
+       struct tipc_cfg_msg_hdr hdr;
+
+        if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
+                return 0;
+        if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
+                return 0;
+
+        if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
+                return -EACCES;
+
+        if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
+               return -EFAULT;
+       if ((ntohs(hdr.tcm_type) & 0xC000) & (!capable(CAP_NET_ADMIN)))
+               return -EACCES;
+        
+       return 0;
+}
+
+/**
+ * send_msg - send message in connectionless manner
+ * @iocb: (unused)
+ * @sock: socket structure
+ * @m: message to send
+ * @total_len: (unused)
+ * 
+ * Message must have an destination specified explicitly.
+ * Used for SOCK_RDM and SOCK_DGRAM messages, 
+ * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
+ * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
+ * 
+ * Returns the number of bytes sent on success, or errno otherwise
+ */
+
+static int send_msg(struct kiocb *iocb, struct socket *sock,
+                   struct msghdr *m, size_t total_len)
+{
+       struct tipc_sock *tsock = tipc_sk(sock->sk);
+        struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
+       struct sk_buff *buf;
+       int needs_conn;
+       int res = -EINVAL;
+
+       if (unlikely(!dest))
+               return -EDESTADDRREQ;
+       if (unlikely(dest->family != AF_TIPC))
+               return -EINVAL;
+
+       needs_conn = (sock->state != SS_READY);
+       if (unlikely(needs_conn)) {
+               if (sock->state == SS_LISTENING)
+                       return -EPIPE;
+               if (sock->state != SS_UNCONNECTED)
+                       return -EISCONN;
+               if ((tsock->p->published) ||
+                   ((sock->type == SOCK_STREAM) && (total_len != 0)))
+                       return -EOPNOTSUPP;
+       }
+
+       if (down_interruptible(&tsock->sem))
+               return -ERESTARTSYS;
+
+       if (needs_conn) {
+
+               /* Abort any pending connection attempts (very unlikely) */
+
+               while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
+                       tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+                       atomic_dec(&tipc_queue_size);
+               }
+
+               sock->state = SS_CONNECTING;
+       }
+
+        do {
+                if (dest->addrtype == TIPC_ADDR_NAME) {
+                        if ((res = dest_name_check(dest, m)))
+                                goto exit;
+                        res = tipc_send2name(tsock->p->ref,
+                                             &dest->addr.name.name,
+                                             dest->addr.name.domain, 
+                                             m->msg_iovlen,
+                                             m->msg_iov);
+                }
+                else if (dest->addrtype == TIPC_ADDR_ID) {
+                        res = tipc_send2port(tsock->p->ref,
+                                             &dest->addr.id,
+                                             m->msg_iovlen,
+                                             m->msg_iov);
+                }
+                else if (dest->addrtype == TIPC_ADDR_MCAST) {
+                       if (needs_conn) {
+                               res = -EOPNOTSUPP;
+                               goto exit;
+                       }
+                        if ((res = dest_name_check(dest, m)))
+                                goto exit;
+                        res = tipc_multicast(tsock->p->ref,
+                                             &dest->addr.nameseq,
+                                             0,
+                                             m->msg_iovlen,
+                                             m->msg_iov);
+                }
+                if (likely(res != -ELINKCONG)) {
+exit:                                
+                        up(&tsock->sem);
+                        return res;
+                }
+               if (m->msg_flags & MSG_DONTWAIT) {
+                       res = -EWOULDBLOCK;
+                       goto exit;
+               }
+                if (wait_event_interruptible(*sock->sk->sk_sleep,
+                                             !tsock->p->congested)) {
+                    res = -ERESTARTSYS;
+                    goto exit;
+                }
+        } while (1);
+}
+
+/** 
+ * send_packet - send a connection-oriented message
+ * @iocb: (unused)
+ * @sock: socket structure
+ * @m: message to send
+ * @total_len: (unused)
+ * 
+ * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
+ * 
+ * Returns the number of bytes sent on success, or errno otherwise
+ */
+
+static int send_packet(struct kiocb *iocb, struct socket *sock,
+                      struct msghdr *m, size_t total_len)
+{
+       struct tipc_sock *tsock = tipc_sk(sock->sk);
+        struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
+       int res;
+
+       /* Handle implied connection establishment */
+
+       if (unlikely(dest))
+               return send_msg(iocb, sock, m, total_len);
+
+       if (down_interruptible(&tsock->sem)) {
+               return -ERESTARTSYS;
+        }
+
+        if (unlikely(sock->state != SS_CONNECTED)) {
+                if (sock->state == SS_DISCONNECTING)
+                        res = -EPIPE;   
+                else
+                        res = -ENOTCONN;
+                goto exit;
+        }
+
+        do {
+                res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov);
+                if (likely(res != -ELINKCONG)) {
+exit:
+                        up(&tsock->sem);
+                        return res;
+                }
+               if (m->msg_flags & MSG_DONTWAIT) {
+                       res = -EWOULDBLOCK;
+                       goto exit;
+               }
+                if (wait_event_interruptible(*sock->sk->sk_sleep,
+                                             !tsock->p->congested)) {
+                    res = -ERESTARTSYS;
+                    goto exit;
+                }
+        } while (1);
+}
+
+/** 
+ * send_stream - send stream-oriented data
+ * @iocb: (unused)
+ * @sock: socket structure
+ * @m: data to send
+ * @total_len: total length of data to be sent
+ * 
+ * Used for SOCK_STREAM data.
+ * 
+ * Returns the number of bytes sent on success, or errno otherwise
+ */
+
+
+static int send_stream(struct kiocb *iocb, struct socket *sock,
+                      struct msghdr *m, size_t total_len)
+{
+       struct msghdr my_msg;
+       struct iovec my_iov;
+       struct iovec *curr_iov;
+       int curr_iovlen;
+       char __user *curr_start;
+       int curr_left;
+       int bytes_to_send;
+       int res;
+       
+       if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE))
+               return send_packet(iocb, sock, m, total_len);
+
+       /* Can only send large data streams if already connected */
+
+        if (unlikely(sock->state != SS_CONNECTED)) {
+                if (sock->state == SS_DISCONNECTING)
+                        return -EPIPE;   
+                else
+                        return -ENOTCONN;
+        }
+
+       /* 
+        * Send each iovec entry using one or more messages
+        *
+        * Note: This algorithm is good for the most likely case 
+        * (i.e. one large iovec entry), but could be improved to pass sets
+        * of small iovec entries into send_packet().
+        */
+
+       my_msg = *m;
+       curr_iov = my_msg.msg_iov;
+       curr_iovlen = my_msg.msg_iovlen;
+       my_msg.msg_iov = &my_iov;
+       my_msg.msg_iovlen = 1;
+
+       while (curr_iovlen--) {
+               curr_start = curr_iov->iov_base;
+               curr_left = curr_iov->iov_len;
+
+               while (curr_left) {
+                       bytes_to_send = (curr_left < TIPC_MAX_USER_MSG_SIZE)
+                               ? curr_left : TIPC_MAX_USER_MSG_SIZE;
+                       my_iov.iov_base = curr_start;
+                       my_iov.iov_len = bytes_to_send;
+                        if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0)
+                                return res;
+                       curr_left -= bytes_to_send;
+                       curr_start += bytes_to_send;
+               }
+
+               curr_iov++;
+       }
+
+       return total_len;
+}
+
+/**
+ * auto_connect - complete connection setup to a remote port
+ * @sock: socket structure
+ * @tsock: TIPC-specific socket structure
+ * @msg: peer's response message
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int auto_connect(struct socket *sock, struct tipc_sock *tsock, 
+                       struct tipc_msg *msg)
+{
+       struct tipc_portid peer;
+
+       if (msg_errcode(msg)) {
+               sock->state = SS_DISCONNECTING;
+               return -ECONNREFUSED;
+       }
+
+       peer.ref = msg_origport(msg);
+       peer.node = msg_orignode(msg);
+       tipc_connect2port(tsock->p->ref, &peer);
+       tipc_set_portimportance(tsock->p->ref, msg_importance(msg));
+       sock->state = SS_CONNECTED;
+       return 0;
+}
+
+/**
+ * set_orig_addr - capture sender's address for received message
+ * @m: descriptor for message info
+ * @msg: received message header
+ * 
+ * Note: Address is not captured if not requested by receiver.
+ */
+
+static inline void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
+{
+        struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
+
+        if (addr) {
+               addr->family = AF_TIPC;
+               addr->addrtype = TIPC_ADDR_ID;
+               addr->addr.id.ref = msg_origport(msg);
+               addr->addr.id.node = msg_orignode(msg);
+               addr->addr.name.domain = 0;     /* could leave uninitialized */
+               addr->scope = 0;                /* could leave uninitialized */
+               m->msg_namelen = sizeof(struct sockaddr_tipc);
+       }
+}
+
+/**
+ * anc_data_recv - optionally capture ancillary data for received message 
+ * @m: descriptor for message info
+ * @msg: received message header
+ * @tport: TIPC port associated with message
+ * 
+ * Note: Ancillary data is not captured if not requested by receiver.
+ * 
+ * Returns 0 if successful, otherwise errno
+ */
+
+static inline int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 
+                               struct tipc_port *tport)
+{
+       u32 anc_data[3];
+       u32 err;
+       u32 dest_type;
+       int res;
+
+       if (likely(m->msg_controllen == 0))
+               return 0;
+
+       /* Optionally capture errored message object(s) */
+
+       err = msg ? msg_errcode(msg) : 0;
+       if (unlikely(err)) {
+               anc_data[0] = err;
+               anc_data[1] = msg_data_sz(msg);
+               if ((res = put_cmsg(m, SOL_SOCKET, TIPC_ERRINFO, 8, anc_data)))
+                       return res;
+               if (anc_data[1] &&
+                   (res = put_cmsg(m, SOL_SOCKET, TIPC_RETDATA, anc_data[1], 
+                                   msg_data(msg))))
+                       return res;
+       }
+
+       /* Optionally capture message destination object */
+
+       dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
+       switch (dest_type) {
+       case TIPC_NAMED_MSG:
+               anc_data[0] = msg_nametype(msg);
+               anc_data[1] = msg_namelower(msg);
+               anc_data[2] = msg_namelower(msg);
+               break;
+       case TIPC_MCAST_MSG:
+               anc_data[0] = msg_nametype(msg);
+               anc_data[1] = msg_namelower(msg);
+               anc_data[2] = msg_nameupper(msg);
+               break;
+       case TIPC_CONN_MSG:
+               anc_data[0] = tport->conn_type;
+               anc_data[1] = tport->conn_instance;
+               anc_data[2] = tport->conn_instance;
+               break;
+       default:
+               anc_data[0] = 0;
+       }
+       if (anc_data[0] &&
+           (res = put_cmsg(m, SOL_SOCKET, TIPC_DESTNAME, 12, anc_data)))
+               return res;
+
+       return 0;
+}
+
+/** 
+ * recv_msg - receive packet-oriented message
+ * @iocb: (unused)
+ * @m: descriptor for message info
+ * @buf_len: total size of user buffer area
+ * @flags: receive flags
+ * 
+ * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
+ * If the complete message doesn't fit in user area, truncate it.
+ *
+ * Returns size of returned message data, errno otherwise
+ */
+
+static int recv_msg(struct kiocb *iocb, struct socket *sock,
+                   struct msghdr *m, size_t buf_len, int flags)
+{
+       struct tipc_sock *tsock = tipc_sk(sock->sk);
+       struct sk_buff *buf;
+       struct tipc_msg *msg;
+       unsigned int q_len;
+       unsigned int sz;
+       u32 err;
+       int res;
+
+       /* Currently doesn't support receiving into multiple iovec entries */
+
+       if (m->msg_iovlen != 1)
+               return -EOPNOTSUPP;
+
+       /* Catch invalid receive attempts */
+
+       if (unlikely(!buf_len))
+               return -EINVAL;
+
+       if (sock->type == SOCK_SEQPACKET) {
+               if (unlikely(sock->state == SS_UNCONNECTED))
+                       return -ENOTCONN;
+               if (unlikely((sock->state == SS_DISCONNECTING) && 
+                            (skb_queue_len(&sock->sk->sk_receive_queue) == 0)))
+                       return -ENOTCONN;
+       }
+
+       /* Look for a message in receive queue; wait if necessary */
+
+       if (unlikely(down_interruptible(&tsock->sem)))
+               return -ERESTARTSYS;
+
+restart:
+       if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
+                    (flags & MSG_DONTWAIT))) {
+               res = -EWOULDBLOCK;
+               goto exit;
+       }
+
+       if ((res = wait_event_interruptible(
+               *sock->sk->sk_sleep, 
+               ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
+                (sock->state == SS_DISCONNECTING))) )) {
+               goto exit;
+       }
+
+       /* Catch attempt to receive on an already terminated connection */
+       /* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
+
+       if (!q_len) {
+               res = -ENOTCONN;
+               goto exit;
+       }
+
+       /* Get access to first message in receive queue */
+
+       buf = skb_peek(&sock->sk->sk_receive_queue);
+       msg = buf_msg(buf);
+       sz = msg_data_sz(msg);
+       err = msg_errcode(msg);
+
+       /* Complete connection setup for an implied connect */
+
+       if (unlikely(sock->state == SS_CONNECTING)) {
+               if ((res = auto_connect(sock, tsock, msg)))
+                       goto exit;
+       }
+
+       /* Discard an empty non-errored message & try again */
+
+       if ((!sz) && (!err)) {
+               advance_queue(tsock);
+               goto restart;
+       }
+
+       /* Capture sender's address (optional) */
+
+       set_orig_addr(m, msg);
+
+       /* Capture ancillary data (optional) */
+
+       if ((res = anc_data_recv(m, msg, tsock->p)))
+               goto exit;
+
+       /* Capture message data (if valid) & compute return value (always) */
+       
+       if (!err) {
+               if (unlikely(buf_len < sz)) {
+                       sz = buf_len;
+                       m->msg_flags |= MSG_TRUNC;
+               }
+               if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg),
+                                         sz))) {
+                       res = -EFAULT;
+                       goto exit;
+               }
+               res = sz;
+       } else {
+               if ((sock->state == SS_READY) ||
+                   ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
+                       res = 0;
+               else
+                       res = -ECONNRESET;
+       }
+
+       /* Consume received message (optional) */
+
+       if (likely(!(flags & MSG_PEEK))) {
+                if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
+                        tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
+               advance_queue(tsock);
+        }
+exit:
+       up(&tsock->sem);
+       return res;
+}
+
+/** 
+ * recv_stream - receive stream-oriented data
+ * @iocb: (unused)
+ * @m: descriptor for message info
+ * @buf_len: total size of user buffer area
+ * @flags: receive flags
+ * 
+ * Used for SOCK_STREAM messages only.  If not enough data is available 
+ * will optionally wait for more; never truncates data.
+ *
+ * Returns size of returned message data, errno otherwise
+ */
+
+static int recv_stream(struct kiocb *iocb, struct socket *sock,
+                      struct msghdr *m, size_t buf_len, int flags)
+{
+       struct tipc_sock *tsock = tipc_sk(sock->sk);
+       struct sk_buff *buf;
+       struct tipc_msg *msg;
+       unsigned int q_len;
+       unsigned int sz;
+       int sz_to_copy;
+       int sz_copied = 0;
+       int needed;
+       char *crs = m->msg_iov->iov_base;
+       unsigned char *buf_crs;
+       u32 err;
+       int res;
+
+       /* Currently doesn't support receiving into multiple iovec entries */
+
+       if (m->msg_iovlen != 1)
+               return -EOPNOTSUPP;
+
+       /* Catch invalid receive attempts */
+
+       if (unlikely(!buf_len))
+               return -EINVAL;
+
+       if (unlikely(sock->state == SS_DISCONNECTING)) {
+               if (skb_queue_len(&sock->sk->sk_receive_queue) == 0)
+                       return -ENOTCONN;
+       } else if (unlikely(sock->state != SS_CONNECTED))
+               return -ENOTCONN;
+
+       /* Look for a message in receive queue; wait if necessary */
+
+       if (unlikely(down_interruptible(&tsock->sem)))
+               return -ERESTARTSYS;
+
+restart:
+       if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
+                    (flags & MSG_DONTWAIT))) {
+               res = (sz_copied == 0) ? -EWOULDBLOCK : 0;
+               goto exit;
+       }
+
+       if ((res = wait_event_interruptible(
+               *sock->sk->sk_sleep, 
+               ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
+                (sock->state == SS_DISCONNECTING))) )) {
+               goto exit;
+       }
+
+       /* Catch attempt to receive on an already terminated connection */
+       /* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
+
+       if (!q_len) {
+               res = -ENOTCONN;
+               goto exit;
+       }
+
+       /* Get access to first message in receive queue */
+
+       buf = skb_peek(&sock->sk->sk_receive_queue);
+       msg = buf_msg(buf);
+       sz = msg_data_sz(msg);
+       err = msg_errcode(msg);
+
+       /* Discard an empty non-errored message & try again */
+
+       if ((!sz) && (!err)) {
+               advance_queue(tsock);
+               goto restart;
+       }
+
+       /* Optionally capture sender's address & ancillary data of first msg */
+
+       if (sz_copied == 0) {
+               set_orig_addr(m, msg);
+               if ((res = anc_data_recv(m, msg, tsock->p)))
+                       goto exit;
+       }
+
+       /* Capture message data (if valid) & compute return value (always) */
+       
+       if (!err) {
+               buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
+               sz = buf->tail - buf_crs;
+
+               needed = (buf_len - sz_copied);
+               sz_to_copy = (sz <= needed) ? sz : needed;
+               if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) {
+                       res = -EFAULT;
+                       goto exit;
+               }
+               sz_copied += sz_to_copy;
+
+               if (sz_to_copy < sz) {
+                       if (!(flags & MSG_PEEK))
+                               TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy;
+                       goto exit;
+               }
+
+               crs += sz_to_copy;
+       } else {
+               if (sz_copied != 0)
+                       goto exit; /* can't add error msg to valid data */
+
+               if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
+                       res = 0;
+               else
+                       res = -ECONNRESET;
+       }
+
+       /* Consume received message (optional) */
+
+       if (likely(!(flags & MSG_PEEK))) {
+                if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
+                        tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
+               advance_queue(tsock);
+        }
+
+       /* Loop around if more data is required */
+
+       if ((sz_copied < buf_len)    /* didn't get all requested data */ 
+           && (flags & MSG_WAITALL) /* ... and need to wait for more */
+           && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
+           && (!err)                /* ... and haven't reached a FIN */
+           )
+               goto restart;
+
+exit:
+       up(&tsock->sem);
+       return res ? res : sz_copied;
+}
+
+/**
+ * queue_overloaded - test if queue overload condition exists
+ * @queue_size: current size of queue
+ * @base: nominal maximum size of queue
+ * @msg: message to be added to queue
+ * 
+ * Returns 1 if queue is currently overloaded, 0 otherwise
+ */
+
+static int queue_overloaded(u32 queue_size, u32 base, struct tipc_msg *msg)
+{
+       u32 threshold;
+       u32 imp = msg_importance(msg);
+
+       if (imp == TIPC_LOW_IMPORTANCE)
+               threshold = base;
+       else if (imp == TIPC_MEDIUM_IMPORTANCE)
+               threshold = base * 2;
+       else if (imp == TIPC_HIGH_IMPORTANCE)
+               threshold = base * 100;
+       else
+               return 0;
+
+       if (msg_connected(msg))
+               threshold *= 4;
+
+       return (queue_size > threshold);
+}
+
+/** 
+ * async_disconnect - wrapper function used to disconnect port
+ * @portref: TIPC port reference (passed as pointer-sized value)
+ */
+
+static void async_disconnect(unsigned long portref)
+{
+       tipc_disconnect((u32)portref);
+}
+
+/** 
+ * dispatch - handle arriving message
+ * @tport: TIPC port that received message
+ * @buf: message
+ * 
+ * Called with port locked.  Must not take socket lock to avoid deadlock risk.
+ * 
+ * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
+ */
+
+static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
+       struct socket *sock;
+       u32 recv_q_len;
+
+       /* Reject message if socket is closing */
+
+       if (!tsock)
+               return TIPC_ERR_NO_PORT;
+
+       /* Reject message if it is wrong sort of message for socket */
+
+       /*
+        * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
+        * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
+        * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
+        */
+       sock = tsock->sk.sk_socket;
+       if (sock->state == SS_READY) {
+               if (msg_connected(msg)) {
+                       msg_dbg(msg, "dispatch filter 1\n");
+                       return TIPC_ERR_NO_PORT;
+               }
+       } else {
+               if (msg_mcast(msg)) {
+                       msg_dbg(msg, "dispatch filter 2\n");
+                       return TIPC_ERR_NO_PORT;
+               }
+               if (sock->state == SS_CONNECTED) {
+                       if (!msg_connected(msg)) {
+                               msg_dbg(msg, "dispatch filter 3\n");
+                               return TIPC_ERR_NO_PORT;
+                       }
+               }
+               else if (sock->state == SS_CONNECTING) {
+                       if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
+                               msg_dbg(msg, "dispatch filter 4\n");
+                               return TIPC_ERR_NO_PORT;
+                       }
+               } 
+               else if (sock->state == SS_LISTENING) {
+                       if (msg_connected(msg) || msg_errcode(msg)) {
+                               msg_dbg(msg, "dispatch filter 5\n");
+                               return TIPC_ERR_NO_PORT;
+                       }
+               } 
+               else if (sock->state == SS_DISCONNECTING) {
+                       msg_dbg(msg, "dispatch filter 6\n");
+                       return TIPC_ERR_NO_PORT;
+               }
+               else /* (sock->state == SS_UNCONNECTED) */ {
+                       if (msg_connected(msg) || msg_errcode(msg)) {
+                               msg_dbg(msg, "dispatch filter 7\n");
+                               return TIPC_ERR_NO_PORT;
+                       }
+               }
+       }
+
+       /* Reject message if there isn't room to queue it */
+
+       if (unlikely((u32)atomic_read(&tipc_queue_size) > 
+                    OVERLOAD_LIMIT_BASE)) {
+               if (queue_overloaded(atomic_read(&tipc_queue_size), 
+                                    OVERLOAD_LIMIT_BASE, msg))
+                       return TIPC_ERR_OVERLOAD;
+        }
+       recv_q_len = skb_queue_len(&tsock->sk.sk_receive_queue);
+       if (unlikely(recv_q_len > (OVERLOAD_LIMIT_BASE / 2))) {
+               if (queue_overloaded(recv_q_len, 
+                                    OVERLOAD_LIMIT_BASE / 2, msg)) 
+                       return TIPC_ERR_OVERLOAD;
+        }
+
+       /* Initiate connection termination for an incoming 'FIN' */
+
+       if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
+               sock->state = SS_DISCONNECTING;
+               /* Note: Use signal since port lock is already taken! */
+               k_signal((Handler)async_disconnect, tport->ref);
+       }
+
+       /* Enqueue message (finally!) */
+
+       msg_dbg(msg,"<DISP<: ");
+       TIPC_SKB_CB(buf)->handle = msg_data(msg);
+       atomic_inc(&tipc_queue_size);
+       skb_queue_tail(&sock->sk->sk_receive_queue, buf);
+
+        wake_up_interruptible(sock->sk->sk_sleep);
+       return TIPC_OK;
+}
+
+/** 
+ * wakeupdispatch - wake up port after congestion
+ * @tport: port to wakeup
+ * 
+ * Called with port lock on.
+ */
+
+static void wakeupdispatch(struct tipc_port *tport)
+{
+       struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
+
+        wake_up_interruptible(tsock->sk.sk_sleep);
+}
+
+/**
+ * connect - establish a connection to another TIPC port
+ * @sock: socket structure
+ * @dest: socket address for destination port
+ * @destlen: size of socket address data structure
+ * @flags: (unused)
+ *
+ * Returns 0 on success, errno otherwise
+ */
+
+static int connect(struct socket *sock, struct sockaddr *dest, int destlen, 
+                  int flags)
+{
+   struct tipc_sock *tsock = tipc_sk(sock->sk);
+   struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
+   struct msghdr m = {0,};
+   struct sk_buff *buf;
+   struct tipc_msg *msg;
+   int res;
+
+   /* For now, TIPC does not allow use of connect() with DGRAM or RDM types */
+
+   if (sock->state == SS_READY)
+          return -EOPNOTSUPP;
+
+   /* MOVE THE REST OF THIS ERROR CHECKING TO send_msg()? */
+   if (sock->state == SS_LISTENING)
+          return -EOPNOTSUPP;
+   if (sock->state == SS_CONNECTING)
+          return -EALREADY;
+   if (sock->state != SS_UNCONNECTED)
+           return -EISCONN;
+
+   if ((dst->family != AF_TIPC) ||
+       ((dst->addrtype != TIPC_ADDR_NAME) && (dst->addrtype != TIPC_ADDR_ID)))
+           return -EINVAL;
+
+   /* Send a 'SYN-' to destination */
+
+   m.msg_name = dest;
+   if ((res = send_msg(0, sock, &m, 0)) < 0) {
+          sock->state = SS_DISCONNECTING;
+          return res;
+   }
+
+   if (down_interruptible(&tsock->sem)) 
+           return -ERESTARTSYS;
+       
+   /* Wait for destination's 'ACK' response */
+
+   res = wait_event_interruptible_timeout(*sock->sk->sk_sleep,
+                                          skb_queue_len(&sock->sk->sk_receive_queue),
+                                         sock->sk->sk_rcvtimeo);
+   buf = skb_peek(&sock->sk->sk_receive_queue);
+   if (res > 0) {
+          msg = buf_msg(buf);
+           res = auto_connect(sock, tsock, msg);
+           if (!res) {
+                  if (dst->addrtype == TIPC_ADDR_NAME) {
+                          tsock->p->conn_type = dst->addr.name.name.type;
+                          tsock->p->conn_instance = dst->addr.name.name.instance;
+                  }
+                  if (!msg_data_sz(msg))
+                          advance_queue(tsock);
+          }
+   } else {
+          if (res == 0) {
+                  res = -ETIMEDOUT;
+          } else
+                  { /* leave "res" unchanged */ }
+          sock->state = SS_DISCONNECTING;
+   }
+
+   up(&tsock->sem);
+   return res;
+}
+
+/** 
+ * listen - allow socket to listen for incoming connections
+ * @sock: socket structure
+ * @len: (unused)
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int listen(struct socket *sock, int len)
+{
+       /* REQUIRES SOCKET LOCKING OF SOME SORT? */
+
+       if (sock->state == SS_READY)
+               return -EOPNOTSUPP;
+       if (sock->state != SS_UNCONNECTED)
+               return -EINVAL;
+       sock->state = SS_LISTENING;
+        return 0;
+}
+
+/** 
+ * accept - wait for connection request
+ * @sock: listening socket
+ * @newsock: new socket that is to be connected
+ * @flags: file-related flags associated with socket
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int accept(struct socket *sock, struct socket *newsock, int flags)
+{
+       struct tipc_sock *tsock = tipc_sk(sock->sk);
+       struct sk_buff *buf;
+       int res = -EFAULT;
+
+       if (sock->state == SS_READY)
+               return -EOPNOTSUPP;
+       if (sock->state != SS_LISTENING)
+               return -EINVAL;
+       
+       if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) && 
+                    (flags & O_NONBLOCK)))
+               return -EWOULDBLOCK;
+
+       if (down_interruptible(&tsock->sem))
+               return -ERESTARTSYS;
+
+       if (wait_event_interruptible(*sock->sk->sk_sleep, 
+                                    skb_queue_len(&sock->sk->sk_receive_queue))) {
+               res = -ERESTARTSYS;
+               goto exit;
+       }
+       buf = skb_peek(&sock->sk->sk_receive_queue);
+
+       res = tipc_create(newsock, 0);
+       if (!res) {
+               struct tipc_sock *new_tsock = tipc_sk(newsock->sk);
+               struct tipc_portid id;
+               struct tipc_msg *msg = buf_msg(buf);
+               u32 new_ref = new_tsock->p->ref;
+
+               id.ref = msg_origport(msg);
+               id.node = msg_orignode(msg);
+               tipc_connect2port(new_ref, &id);
+               newsock->state = SS_CONNECTED;
+
+               tipc_set_portimportance(new_ref, msg_importance(msg));
+               if (msg_named(msg)) {
+                       new_tsock->p->conn_type = msg_nametype(msg);
+                       new_tsock->p->conn_instance = msg_nameinst(msg);
+               }
+
+               /* 
+                * Respond to 'SYN-' by discarding it & returning 'ACK'-.
+                * Respond to 'SYN+' by queuing it on new socket.
+                */
+
+               msg_dbg(msg,"<ACC<: ");
+                if (!msg_data_sz(msg)) {
+                        struct msghdr m = {0,};
+
+                        send_packet(0, newsock, &m, 0);      
+                        advance_queue(tsock);
+                } else {
+                       sock_lock(tsock);
+                       skb_dequeue(&sock->sk->sk_receive_queue);
+                       sock_unlock(tsock);
+                       skb_queue_head(&newsock->sk->sk_receive_queue, buf);
+               }
+       }
+exit:
+       up(&tsock->sem);
+       return res;
+}
+
+/**
+ * shutdown - shutdown socket connection
+ * @sock: socket structure
+ * @how: direction to close (always treated as read + write)
+ *
+ * Terminates connection (if necessary), then purges socket's receive queue.
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int shutdown(struct socket *sock, int how)
+{
+       struct tipc_sock* tsock = tipc_sk(sock->sk);
+       struct sk_buff *buf;
+       int res;
+
+       /* Could return -EINVAL for an invalid "how", but why bother? */
+
+       if (down_interruptible(&tsock->sem))
+               return -ERESTARTSYS;
+
+       sock_lock(tsock);
+
+       switch (sock->state) {
+       case SS_CONNECTED:
+
+               /* Send 'FIN+' or 'FIN-' message to peer */
+
+               sock_unlock(tsock);
+restart:
+               if ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
+                       atomic_dec(&tipc_queue_size);
+                       if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
+                               buf_discard(buf);
+                               goto restart;
+                       }
+                       tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
+               }
+               else {
+                       tipc_shutdown(tsock->p->ref);
+               }
+               sock_lock(tsock);
+
+               /* fall through */
+
+       case SS_DISCONNECTING:
+
+               /* Discard any unreceived messages */
+
+               while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
+                       atomic_dec(&tipc_queue_size);
+                       buf_discard(buf);
+               }
+               tsock->p->conn_unacked = 0;
+
+               /* fall through */
+
+       case SS_CONNECTING:
+               sock->state = SS_DISCONNECTING;
+               res = 0;
+               break;
+
+       default:
+               res = -ENOTCONN;
+       }
+
+       sock_unlock(tsock);
+
+       up(&tsock->sem);
+       return res;
+}
+
+/**
+ * setsockopt - set socket option
+ * @sock: socket structure
+ * @lvl: option level
+ * @opt: option identifier
+ * @ov: pointer to new option value
+ * @ol: length of option value
+ * 
+ * For stream sockets only, accepts and ignores all IPPROTO_TCP options 
+ * (to ease compatibility).
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol)
+{
+       struct tipc_sock *tsock = tipc_sk(sock->sk);
+       u32 value;
+       int res;
+
+        if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
+                return 0;
+       if (lvl != SOL_TIPC)
+               return -ENOPROTOOPT;
+       if (ol < sizeof(value))
+               return -EINVAL;
+        if ((res = get_user(value, (u32 *)ov)))
+               return res;
+
+       if (down_interruptible(&tsock->sem)) 
+               return -ERESTARTSYS;
+       
+       switch (opt) {
+       case TIPC_IMPORTANCE:
+               res = tipc_set_portimportance(tsock->p->ref, value);
+               break;
+       case TIPC_SRC_DROPPABLE:
+               if (sock->type != SOCK_STREAM)
+                       res = tipc_set_portunreliable(tsock->p->ref, value);
+               else 
+                       res = -ENOPROTOOPT;
+               break;
+       case TIPC_DEST_DROPPABLE:
+               res = tipc_set_portunreturnable(tsock->p->ref, value);
+               break;
+       case TIPC_CONN_TIMEOUT:
+               sock->sk->sk_rcvtimeo = (value * HZ / 1000);
+               break;
+       default:
+               res = -EINVAL;
+       }
+
+       up(&tsock->sem);
+       return res;
+}
+
+/**
+ * getsockopt - get socket option
+ * @sock: socket structure
+ * @lvl: option level
+ * @opt: option identifier
+ * @ov: receptacle for option value
+ * @ol: receptacle for length of option value
+ * 
+ * For stream sockets only, returns 0 length result for all IPPROTO_TCP options 
+ * (to ease compatibility).
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+
+static int getsockopt(struct socket *sock, int lvl, int opt, char *ov, int *ol)
+{
+       struct tipc_sock *tsock = tipc_sk(sock->sk);
+        int len;
+       u32 value;
+        int res;
+
+        if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
+                return put_user(0, ol);
+       if (lvl != SOL_TIPC)
+               return -ENOPROTOOPT;
+        if ((res = get_user(len, ol)))
+                return res;
+
+       if (down_interruptible(&tsock->sem)) 
+               return -ERESTARTSYS;
+
+       switch (opt) {
+       case TIPC_IMPORTANCE:
+               res = tipc_portimportance(tsock->p->ref, &value);
+               break;
+       case TIPC_SRC_DROPPABLE:
+               res = tipc_portunreliable(tsock->p->ref, &value);
+               break;
+       case TIPC_DEST_DROPPABLE:
+               res = tipc_portunreturnable(tsock->p->ref, &value);
+               break;
+       case TIPC_CONN_TIMEOUT:
+               value = (sock->sk->sk_rcvtimeo * 1000) / HZ;
+               break;
+       default:
+               res = -EINVAL;
+       }
+
+       if (res) {
+               /* "get" failed */
+       }
+       else if (len < sizeof(value)) {
+               res = -EINVAL;
+       }
+       else if ((res = copy_to_user(ov, &value, sizeof(value)))) {
+               /* couldn't return value */
+       }
+       else {
+               res = put_user(sizeof(value), ol);
+       }
+
+        up(&tsock->sem);
+       return res;
+}
+
+/**
+ * Placeholders for non-implemented functionality
+ * 
+ * Returns error code (POSIX-compliant where defined)
+ */
+
+static int ioctl(struct socket *s, u32 cmd, unsigned long arg)
+{
+        return -EINVAL;
+}
+
+static int no_mmap(struct file *file, struct socket *sock,
+                   struct vm_area_struct *vma)
+{
+        return -EINVAL;
+}
+static ssize_t no_sendpage(struct socket *sock, struct page *page,
+                           int offset, size_t size, int flags)
+{
+        return -EINVAL;
+}
+
+static int no_skpair(struct socket *s1, struct socket *s2)
+{
+       return -EOPNOTSUPP;
+}
+
+/**
+ * Protocol switches for the various types of TIPC sockets
+ */
+
+static struct proto_ops msg_ops = {
+       .owner          = THIS_MODULE,
+       .family         = AF_TIPC,
+       .release        = release,
+       .bind           = bind,
+       .connect        = connect,
+       .socketpair     = no_skpair,
+       .accept         = accept,
+       .getname        = get_name,
+       .poll           = poll,
+       .ioctl          = ioctl,
+       .listen         = listen,
+       .shutdown       = shutdown,
+       .setsockopt     = setsockopt,
+       .getsockopt     = getsockopt,
+       .sendmsg        = send_msg,
+       .recvmsg        = recv_msg,
+        .mmap          = no_mmap,
+        .sendpage      = no_sendpage
+};
+
+static struct proto_ops packet_ops = {
+       .owner          = THIS_MODULE,
+       .family         = AF_TIPC,
+       .release        = release,
+       .bind           = bind,
+       .connect        = connect,
+       .socketpair     = no_skpair,
+       .accept         = accept,
+       .getname        = get_name,
+       .poll           = poll,
+       .ioctl          = ioctl,
+       .listen         = listen,
+       .shutdown       = shutdown,
+       .setsockopt     = setsockopt,
+       .getsockopt     = getsockopt,
+       .sendmsg        = send_packet,
+       .recvmsg        = recv_msg,
+        .mmap          = no_mmap,
+        .sendpage      = no_sendpage
+};
+
+static struct proto_ops stream_ops = {
+       .owner          = THIS_MODULE,
+       .family         = AF_TIPC,
+       .release        = release,
+       .bind           = bind,
+       .connect        = connect,
+       .socketpair     = no_skpair,
+       .accept         = accept,
+       .getname        = get_name,
+       .poll           = poll,
+       .ioctl          = ioctl,
+       .listen         = listen,
+       .shutdown       = shutdown,
+       .setsockopt     = setsockopt,
+       .getsockopt     = getsockopt,
+       .sendmsg        = send_stream,
+       .recvmsg        = recv_stream,
+        .mmap          = no_mmap,
+        .sendpage      = no_sendpage
+};
+
+static struct net_proto_family tipc_family_ops = {
+       .owner          = THIS_MODULE,
+       .family         = AF_TIPC,
+       .create         = tipc_create
+};
+
+static struct proto tipc_proto = {
+       .name           = "TIPC",
+       .owner          = THIS_MODULE,
+       .obj_size       = sizeof(struct tipc_sock)
+};
+
+/**
+ * socket_init - initialize TIPC socket interface
+ * 
+ * Returns 0 on success, errno otherwise
+ */
+int socket_init(void)
+{
+       int res;
+
+        res = proto_register(&tipc_proto, 1);
+       if (res) {
+               err("Failed to register TIPC protocol type\n");
+               goto out;
+       }
+
+       res = sock_register(&tipc_family_ops);
+       if (res) {
+               err("Failed to register TIPC socket type\n");
+               proto_unregister(&tipc_proto);
+               goto out;
+       }
+
+       sockets_enabled = 1;
+ out:
+       return res;
+}
+
+/**
+ * sock_stop - stop TIPC socket interface
+ */
+void socket_stop(void)
+{
+       if (!sockets_enabled)
+               return;
+
+       sockets_enabled = 0;
+       sock_unregister(tipc_family_ops.family);
+       proto_unregister(&tipc_proto);
+}
+
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
new file mode 100644 (file)
index 0000000..80e219b
--- /dev/null
@@ -0,0 +1,527 @@
+/*
+ * net/tipc/subscr.c: TIPC subscription service
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "dbg.h"
+#include "subscr.h"
+#include "name_table.h"
+#include "ref.h"
+
+/**
+ * struct subscriber - TIPC network topology subscriber
+ * @ref: object reference to subscriber object itself
+ * @lock: pointer to spinlock controlling access to subscriber object
+ * @subscriber_list: adjacent subscribers in top. server's list of subscribers
+ * @subscription_list: list of subscription objects for this subscriber
+ * @port_ref: object reference to port used to communicate with subscriber
+ * @swap: indicates if subscriber uses opposite endianness in its messages
+ */
+struct subscriber {
+       u32 ref;
+        spinlock_t *lock;
+       struct list_head subscriber_list;
+       struct list_head subscription_list;
+       u32 port_ref;
+       int swap;
+};
+
+/**
+ * struct top_srv - TIPC network topology subscription service
+ * @user_ref: TIPC userid of subscription service
+ * @setup_port: reference to TIPC port that handles subscription requests
+ * @subscription_count: number of active subscriptions (not subscribers!)
+ * @subscriber_list: list of ports subscribing to service
+ * @lock: spinlock govering access to subscriber list
+ */
+
+struct top_srv {
+       u32 user_ref;
+       u32 setup_port;
+       atomic_t subscription_count;
+       struct list_head subscriber_list;
+       spinlock_t lock;
+};
+
+static struct top_srv topsrv = { 0 };
+
+/**
+ * htohl - convert value to endianness used by destination
+ * @in: value to convert
+ * @swap: non-zero if endianness must be reversed
+ * 
+ * Returns converted value
+ */
+
+static inline u32 htohl(u32 in, int swap)
+{
+       char *c = (char *)&in;
+
+       return swap ? ((c[3] << 3) + (c[2] << 2) + (c[1] << 1) + c[0]) : in;
+}
+
+/**
+ * subscr_send_event - send a message containing a tipc_event to the subscriber
+ */
+
+static void subscr_send_event(struct subscription *sub, 
+                             u32 found_lower, 
+                             u32 found_upper,
+                             u32 event, 
+                             u32 port_ref, 
+                             u32 node)
+{
+       struct iovec msg_sect;
+
+       msg_sect.iov_base = (void *)&sub->evt;
+       msg_sect.iov_len = sizeof(struct tipc_event);
+
+       sub->evt.event = htohl(event, sub->owner->swap);
+       sub->evt.found_lower = htohl(found_lower, sub->owner->swap);
+       sub->evt.found_upper = htohl(found_upper, sub->owner->swap);
+       sub->evt.port.ref = htohl(port_ref, sub->owner->swap);
+       sub->evt.port.node = htohl(node, sub->owner->swap);
+       tipc_send(sub->owner->port_ref, 1, &msg_sect);
+}
+
+/**
+ * subscr_overlap - test for subscription overlap with the given values
+ *
+ * Returns 1 if there is overlap, otherwise 0.
+ */
+
+int subscr_overlap(struct subscription *sub, 
+                  u32 found_lower, 
+                  u32 found_upper)
+
+{
+       if (found_lower < sub->seq.lower)
+               found_lower = sub->seq.lower;
+       if (found_upper > sub->seq.upper)
+               found_upper = sub->seq.upper;
+       if (found_lower > found_upper)
+               return 0;
+       return 1;
+}
+
+/**
+ * subscr_report_overlap - issue event if there is subscription overlap
+ * 
+ * Protected by nameseq.lock in name_table.c
+ */
+
+void subscr_report_overlap(struct subscription *sub, 
+                          u32 found_lower, 
+                          u32 found_upper,
+                          u32 event, 
+                          u32 port_ref, 
+                          u32 node,
+                          int must)
+{
+       dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower,
+           sub->seq.upper, found_lower, found_upper);
+       if (!subscr_overlap(sub, found_lower, found_upper))
+               return;
+       if (!must && (sub->filter != TIPC_SUB_PORTS))
+               return;
+       subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
+}
+
+/**
+ * subscr_timeout - subscription timeout has occurred
+ */
+
+static void subscr_timeout(struct subscription *sub)
+{
+       struct subscriber *subscriber;
+       u32 subscriber_ref;
+
+       /* Validate subscriber reference (in case subscriber is terminating) */
+
+       subscriber_ref = sub->owner->ref;
+       subscriber = (struct subscriber *)ref_lock(subscriber_ref);
+       if (subscriber == NULL)
+               return;
+
+       /* Unlink subscription from name table */
+
+       nametbl_unsubscribe(sub);
+
+       /* Notify subscriber of timeout, then unlink subscription */
+
+       subscr_send_event(sub, 
+                         sub->evt.s.seq.lower, 
+                         sub->evt.s.seq.upper,
+                         TIPC_SUBSCR_TIMEOUT, 
+                         0, 
+                         0);
+       list_del(&sub->subscription_list);
+
+       /* Now destroy subscription */
+
+       ref_unlock(subscriber_ref);
+       k_term_timer(&sub->timer);
+       kfree(sub);
+       atomic_dec(&topsrv.subscription_count);
+}
+
+/**
+ * subscr_terminate - terminate communication with a subscriber
+ * 
+ * Called with subscriber locked.  Routine must temporarily release this lock
+ * to enable subscription timeout routine(s) to finish without deadlocking; 
+ * the lock is then reclaimed to allow caller to release it upon return.
+ * (This should work even in the unlikely event some other thread creates 
+ * a new object reference in the interim that uses this lock; this routine will
+ * simply wait for it to be released, then claim it.)
+ */
+
+static void subscr_terminate(struct subscriber *subscriber)
+{
+       struct subscription *sub;
+       struct subscription *sub_temp;
+
+       /* Invalidate subscriber reference */
+
+       ref_discard(subscriber->ref);
+       spin_unlock_bh(subscriber->lock);
+
+       /* Destroy any existing subscriptions for subscriber */
+       
+       list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
+                                subscription_list) {
+               if (sub->timeout != TIPC_WAIT_FOREVER) {
+                       k_cancel_timer(&sub->timer);
+                       k_term_timer(&sub->timer);
+               }
+               nametbl_unsubscribe(sub);
+               list_del(&sub->subscription_list);
+               dbg("Term: Removed sub %u,%u,%u from subscriber %x list\n",
+                   sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
+               kfree(sub);
+               atomic_dec(&topsrv.subscription_count);
+       }
+
+       /* Sever connection to subscriber */
+
+       tipc_shutdown(subscriber->port_ref);
+       tipc_deleteport(subscriber->port_ref);
+
+       /* Remove subscriber from topology server's subscriber list */
+
+       spin_lock_bh(&topsrv.lock);
+       list_del(&subscriber->subscriber_list);
+       spin_unlock_bh(&topsrv.lock);
+
+       /* Now destroy subscriber */
+
+       spin_lock_bh(subscriber->lock);
+       kfree(subscriber);
+}
+
+/**
+ * subscr_subscribe - create subscription for subscriber
+ * 
+ * Called with subscriber locked
+ */
+
+static void subscr_subscribe(struct tipc_subscr *s,
+                            struct subscriber *subscriber)
+{
+       struct subscription *sub;
+
+       /* Refuse subscription if global limit exceeded */
+
+       if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
+               warn("Failed: max %u subscriptions\n", tipc_max_subscriptions);
+               subscr_terminate(subscriber);
+               return;
+       }
+
+       /* Allocate subscription object */
+
+       sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
+       if (sub == NULL) {
+               warn("Memory squeeze; ignoring subscription\n");
+               subscr_terminate(subscriber);
+               return;
+       }
+
+       /* Determine/update subscriber's endianness */
+
+       if ((s->filter == TIPC_SUB_PORTS) || (s->filter == TIPC_SUB_SERVICE))
+               subscriber->swap = 0;
+       else
+               subscriber->swap = 1;
+
+       /* Initialize subscription object */
+
+       memset(sub, 0, sizeof(*sub));
+       sub->seq.type = htohl(s->seq.type, subscriber->swap);
+       sub->seq.lower = htohl(s->seq.lower, subscriber->swap);
+       sub->seq.upper = htohl(s->seq.upper, subscriber->swap);
+       sub->timeout = htohl(s->timeout, subscriber->swap);
+       sub->filter = htohl(s->filter, subscriber->swap);
+       if ((((sub->filter != TIPC_SUB_PORTS) 
+             && (sub->filter != TIPC_SUB_SERVICE)))
+           || (sub->seq.lower > sub->seq.upper)) {
+               warn("Rejecting illegal subscription %u,%u,%u\n",
+                    sub->seq.type, sub->seq.lower, sub->seq.upper);
+               kfree(sub);
+               subscr_terminate(subscriber);
+               return;
+       }
+       memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
+       INIT_LIST_HEAD(&sub->subscription_list);
+       INIT_LIST_HEAD(&sub->nameseq_list);
+       list_add(&sub->subscription_list, &subscriber->subscription_list);
+       atomic_inc(&topsrv.subscription_count);
+       if (sub->timeout != TIPC_WAIT_FOREVER) {
+               k_init_timer(&sub->timer,
+                            (Handler)subscr_timeout, (unsigned long)sub);
+               k_start_timer(&sub->timer, sub->timeout);
+       }
+       sub->owner = subscriber;
+       nametbl_subscribe(sub);
+}
+
+/**
+ * subscr_conn_shutdown_event - handle termination request from subscriber
+ */
+
+static void subscr_conn_shutdown_event(void *usr_handle,
+                                      u32 portref,
+                                      struct sk_buff **buf,
+                                      unsigned char const *data,
+                                      unsigned int size,
+                                      int reason)
+{
+       struct subscriber *subscriber;
+       spinlock_t *subscriber_lock;
+
+       subscriber = ref_lock((u32)(unsigned long)usr_handle);
+       if (subscriber == NULL)
+               return;
+
+       subscriber_lock = subscriber->lock;
+       subscr_terminate(subscriber);
+       spin_unlock_bh(subscriber_lock);
+}
+
+/**
+ * subscr_conn_msg_event - handle new subscription request from subscriber
+ */
+
+static void subscr_conn_msg_event(void *usr_handle,
+                                 u32 port_ref,
+                                 struct sk_buff **buf,
+                                 const unchar *data,
+                                 u32 size)
+{
+       struct subscriber *subscriber;
+       spinlock_t *subscriber_lock;
+
+       subscriber = ref_lock((u32)(unsigned long)usr_handle);
+       if (subscriber == NULL)
+               return;
+
+       subscriber_lock = subscriber->lock;
+       if (size != sizeof(struct tipc_subscr))
+               subscr_terminate(subscriber);
+       else
+               subscr_subscribe((struct tipc_subscr *)data, subscriber);
+       
+       spin_unlock_bh(subscriber_lock);
+}
+
+/**
+ * subscr_named_msg_event - handle request to establish a new subscriber
+ */
+
+static void subscr_named_msg_event(void *usr_handle,
+                                  u32 port_ref,
+                                  struct sk_buff **buf,
+                                  const unchar *data,
+                                  u32 size,
+                                  u32 importance, 
+                                  struct tipc_portid const *orig,
+                                  struct tipc_name_seq const *dest)
+{
+       struct subscriber *subscriber;
+       struct iovec msg_sect = {0, 0};
+       spinlock_t *subscriber_lock;
+
+       dbg("subscr_named_msg_event: orig = %x own = %x,\n",
+           orig->node, tipc_own_addr);
+       if (size && (size != sizeof(struct tipc_subscr))) {
+               warn("Received tipc_subscr of invalid size\n");
+               return;
+       }
+
+       /* Create subscriber object */
+
+       subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC);
+       if (subscriber == NULL) {
+               warn("Memory squeeze; ignoring subscriber setup\n");
+               return;
+       }
+       memset(subscriber, 0, sizeof(struct subscriber));
+       INIT_LIST_HEAD(&subscriber->subscription_list);
+       INIT_LIST_HEAD(&subscriber->subscriber_list);
+       subscriber->ref = ref_acquire(subscriber, &subscriber->lock);
+       if (subscriber->ref == 0) {
+               warn("Failed to acquire subscriber reference\n");
+               kfree(subscriber);
+               return;
+       }
+
+       /* Establish a connection to subscriber */
+
+       tipc_createport(topsrv.user_ref,
+                       (void *)(unsigned long)subscriber->ref,
+                       importance,
+                       0,
+                       0,
+                       subscr_conn_shutdown_event,
+                       0,
+                       0,
+                       subscr_conn_msg_event,
+                       0,
+                       &subscriber->port_ref);
+       if (subscriber->port_ref == 0) {
+               warn("Memory squeeze; failed to create subscription port\n");
+               ref_discard(subscriber->ref);
+               kfree(subscriber);
+               return;
+       }
+       tipc_connect2port(subscriber->port_ref, orig);
+
+
+       /* Add subscriber to topology server's subscriber list */
+
+       ref_lock(subscriber->ref);
+       spin_lock_bh(&topsrv.lock);
+       list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
+       spin_unlock_bh(&topsrv.lock);
+
+       /*
+        * Subscribe now if message contains a subscription,
+        * otherwise send an empty response to complete connection handshaking
+        */
+
+       subscriber_lock = subscriber->lock;
+       if (size)
+               subscr_subscribe((struct tipc_subscr *)data, subscriber);
+       else
+               tipc_send(subscriber->port_ref, 1, &msg_sect);
+
+       spin_unlock_bh(subscriber_lock);
+}
+
+int subscr_start(void)
+{
+       struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
+       int res = -1;
+
+       memset(&topsrv, 0, sizeof (topsrv));
+       topsrv.lock = SPIN_LOCK_UNLOCKED;
+       INIT_LIST_HEAD(&topsrv.subscriber_list);
+
+       spin_lock_bh(&topsrv.lock);
+       res = tipc_attach(&topsrv.user_ref, 0, 0);
+       if (res) {
+               spin_unlock_bh(&topsrv.lock);
+               return res;
+       }
+
+       res = tipc_createport(topsrv.user_ref,
+                             0,
+                             TIPC_CRITICAL_IMPORTANCE,
+                             0,
+                             0,
+                             0,
+                             0,
+                             subscr_named_msg_event,
+                             0,
+                             0,
+                             &topsrv.setup_port);
+       if (res)
+               goto failed;
+
+       res = nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
+       if (res)
+               goto failed;
+
+       spin_unlock_bh(&topsrv.lock);
+       return 0;
+
+failed:
+       err("Failed to create subscription service\n");
+       tipc_detach(topsrv.user_ref);
+       topsrv.user_ref = 0;
+       spin_unlock_bh(&topsrv.lock);
+       return res;
+}
+
+void subscr_stop(void)
+{
+       struct subscriber *subscriber;
+       struct subscriber *subscriber_temp;
+       spinlock_t *subscriber_lock;
+
+       if (topsrv.user_ref) {
+               tipc_deleteport(topsrv.setup_port);
+               list_for_each_entry_safe(subscriber, subscriber_temp, 
+                                        &topsrv.subscriber_list,
+                                        subscriber_list) {
+                       ref_lock(subscriber->ref);
+                       subscriber_lock = subscriber->lock;
+                       subscr_terminate(subscriber);
+                       spin_unlock_bh(subscriber_lock);
+               }
+               tipc_detach(topsrv.user_ref);
+               topsrv.user_ref = 0;
+       }
+}
+
+
+int tipc_ispublished(struct tipc_name const *name)
+{
+       u32 domain = 0;
+
+       return(nametbl_translate(name->type, name->instance,&domain) != 0);
+}
+
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
new file mode 100644 (file)
index 0000000..ccff4ef
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * net/tipc/subscr.h: Include file for TIPC subscription service
+ * 
+ * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_SUBSCR_H
+#define _TIPC_SUBSCR_H
+
+/**
+ * struct subscription - TIPC network topology subscription object
+ * @seq: name sequence associated with subscription
+ * @timeout: duration of subscription (in ms)
+ * @filter: event filtering to be done for subscription
+ * @evt: template for events generated by subscription
+ * @subscription_list: adjacent subscriptions in subscriber's subscription list
+ * @nameseq_list: adjacent subscriptions in name sequence's subscription list
+ * @timer_ref: reference to timer governing subscription duration (may be NULL)
+ * @owner: pointer to subscriber object associated with this subscription
+ */
+struct subscription {
+       struct tipc_name_seq seq;
+       u32 timeout;
+       u32 filter;
+       struct tipc_event evt;
+       struct list_head subscription_list;
+       struct list_head nameseq_list;
+       struct timer_list timer;
+       struct subscriber *owner;
+};
+
+int subscr_overlap(struct subscription * sub, 
+                  u32 found_lower, 
+                  u32 found_upper);
+
+void subscr_report_overlap(struct subscription * sub, 
+                          u32 found_lower, 
+                          u32 found_upper,
+                          u32 event, 
+                          u32 port_ref, 
+                          u32 node,
+                          int must_report);
+
+int subscr_start(void);
+
+void subscr_stop(void);
+
+
+#endif
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
new file mode 100644 (file)
index 0000000..35ec7dc
--- /dev/null
@@ -0,0 +1,265 @@
+/*
+ * net/tipc/user_reg.c: TIPC user registry code
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2004-2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "user_reg.h"
+
+/*
+ * TIPC user registry keeps track of users of the tipc_port interface.
+ *
+ * The registry utilizes an array of "TIPC user" entries; 
+ * a user's ID is the index of their associated array entry.
+ * Array entry 0 is not used, so userid 0 is not valid;
+ * TIPC sometimes uses this value to denote an anonymous user.
+ * The list of free entries is initially chained from last entry to entry 1.
+ */
+
+/**
+ * struct tipc_user - registered TIPC user info
+ * @next: index of next free registry entry (or -1 for an allocated entry)
+ * @callback: ptr to routine to call when TIPC mode changes (NULL if none)
+ * @usr_handle: user-defined value passed to callback routine 
+ * @ports: list of user ports owned by the user
+ */
+
+struct tipc_user {
+       int next;
+       tipc_mode_event callback;
+       void *usr_handle;
+       struct list_head ports;
+};
+
+#define MAX_USERID 64
+#define USER_LIST_SIZE ((MAX_USERID + 1) * sizeof(struct tipc_user))
+
+static struct tipc_user *users = 0;
+static u32 next_free_user = MAX_USERID + 1;
+static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED;
+
+/**
+ * reg_init - create TIPC user registry (but don't activate it)
+ * 
+ * If registry has been pre-initialized it is left "as is".
+ * NOTE: This routine may be called when TIPC is inactive.
+ */
+
+static int reg_init(void)
+{
+       u32 i;
+       
+       spin_lock_bh(&reg_lock);
+       if (!users) {
+               users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC);
+               if (users) {
+                       memset(users, 0, USER_LIST_SIZE);
+                       for (i = 1; i <= MAX_USERID; i++) {
+                               users[i].next = i - 1;
+                       }
+                       next_free_user = MAX_USERID;
+               }
+       }
+       spin_unlock_bh(&reg_lock);
+       return users ? TIPC_OK : -ENOMEM;
+}
+
+/**
+ * reg_callback - inform TIPC user about current operating mode
+ */
+
+static void reg_callback(struct tipc_user *user_ptr)
+{
+       tipc_mode_event cb;
+       void *arg;
+
+       spin_lock_bh(&reg_lock);
+       cb = user_ptr->callback;
+       arg = user_ptr->usr_handle;
+       spin_unlock_bh(&reg_lock);
+
+       if (cb)
+               cb(arg, tipc_mode, tipc_own_addr);
+}
+
+/**
+ * reg_start - activate TIPC user registry
+ */
+
+int reg_start(void)
+{
+       u32 u;
+       int res;
+
+       if ((res = reg_init()))
+               return res;
+
+       for (u = 1; u <= MAX_USERID; u++) {
+               if (users[u].callback)
+                       k_signal((Handler)reg_callback,
+                                (unsigned long)&users[u]);
+       }
+       return TIPC_OK;
+}
+
+/**
+ * reg_stop - shut down & delete TIPC user registry
+ */
+
+void reg_stop(void)
+{               
+       int id;
+
+       if (!users)
+               return;
+
+       for (id = 1; id <= MAX_USERID; id++) {
+               if (users[id].callback)
+                       reg_callback(&users[id]);
+       }
+       kfree(users);
+       users = 0;
+}
+
+/**
+ * tipc_attach - register a TIPC user
+ *
+ * NOTE: This routine may be called when TIPC is inactive.
+ */
+
+int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
+{
+       struct tipc_user *user_ptr;
+
+       if ((tipc_mode == TIPC_NOT_RUNNING) && !cb)
+               return -ENOPROTOOPT;
+       if (!users)
+               reg_init();
+
+       spin_lock_bh(&reg_lock);
+       if (!next_free_user) {
+               spin_unlock_bh(&reg_lock);
+               return -EBUSY;
+       }
+       user_ptr = &users[next_free_user];
+       *userid = next_free_user;
+       next_free_user = user_ptr->next;
+       user_ptr->next = -1; 
+       spin_unlock_bh(&reg_lock);
+
+       user_ptr->callback = cb;
+       user_ptr->usr_handle = usr_handle;
+       INIT_LIST_HEAD(&user_ptr->ports);
+       atomic_inc(&tipc_user_count);
+       
+       if (cb && (tipc_mode != TIPC_NOT_RUNNING))
+               k_signal((Handler)reg_callback, (unsigned long)user_ptr);
+       return TIPC_OK;
+}
+
+/**
+ * tipc_detach - deregister a TIPC user
+ */
+
+void tipc_detach(u32 userid)
+{
+       struct tipc_user *user_ptr;
+       struct list_head ports_temp;
+       struct user_port *up_ptr, *temp_up_ptr;
+
+       if ((userid == 0) || (userid > MAX_USERID))
+               return;
+
+       spin_lock_bh(&reg_lock);
+       if ((!users) || (users[userid].next >= 0)) {
+               spin_unlock_bh(&reg_lock);
+               return;
+       }
+
+       user_ptr = &users[userid];
+        user_ptr->callback = NULL;              
+       INIT_LIST_HEAD(&ports_temp);
+        list_splice(&user_ptr->ports, &ports_temp);
+       user_ptr->next = next_free_user;
+       next_free_user = userid;
+       spin_unlock_bh(&reg_lock);
+
+       atomic_dec(&tipc_user_count);
+
+        list_for_each_entry_safe(up_ptr, temp_up_ptr, &ports_temp, uport_list) {
+               tipc_deleteport(up_ptr->ref);
+       }
+}
+
+/**
+ * reg_add_port - register a user's driver port
+ */
+
+int reg_add_port(struct user_port *up_ptr)
+{
+       struct tipc_user *user_ptr;
+
+       if (up_ptr->user_ref == 0)
+               return TIPC_OK;
+       if (up_ptr->user_ref > MAX_USERID)
+               return -EINVAL;
+       if ((tipc_mode == TIPC_NOT_RUNNING) || !users )
+               return -ENOPROTOOPT;
+
+       spin_lock_bh(&reg_lock);
+       user_ptr = &users[up_ptr->user_ref];
+       list_add(&up_ptr->uport_list, &user_ptr->ports);
+       spin_unlock_bh(&reg_lock);
+       return TIPC_OK;
+}
+
+/**
+ * reg_remove_port - deregister a user's driver port
+ */
+
+int reg_remove_port(struct user_port *up_ptr)
+{
+       if (up_ptr->user_ref == 0)
+               return TIPC_OK;
+       if (up_ptr->user_ref > MAX_USERID)
+               return -EINVAL;
+       if (!users )
+               return -ENOPROTOOPT;
+
+       spin_lock_bh(&reg_lock);
+       list_del_init(&up_ptr->uport_list);
+       spin_unlock_bh(&reg_lock);
+       return TIPC_OK;
+}
+
diff --git a/net/tipc/user_reg.h b/net/tipc/user_reg.h
new file mode 100644 (file)
index 0000000..122ca9b
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * net/tipc/user_reg.h: Include file for TIPC user registry code
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_USER_REG_H
+#define _TIPC_USER_REG_H
+
+#include "port.h"
+
+int reg_start(void);
+void reg_stop(void);
+
+int reg_add_port(struct user_port *up_ptr);
+int reg_remove_port(struct user_port *up_ptr);
+
+#endif
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
new file mode 100644 (file)
index 0000000..4eaef66
--- /dev/null
@@ -0,0 +1,169 @@
+/*
+ * net/tipc/zone.c: TIPC zone management routines
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "core.h"
+#include "zone.h"
+#include "net.h"
+#include "addr.h"
+#include "node_subscr.h"
+#include "cluster.h"
+#include "node.h"
+
+struct _zone *zone_create(u32 addr)
+{
+       struct _zone *z_ptr = 0;
+       u32 z_num;
+
+       if (!addr_domain_valid(addr))
+               return 0;
+
+       z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
+       if (z_ptr != NULL) {
+               memset(z_ptr, 0, sizeof(*z_ptr));
+               z_num = tipc_zone(addr);
+               z_ptr->addr = tipc_addr(z_num, 0, 0);
+               net.zones[z_num] = z_ptr;
+       }
+       return z_ptr;
+}
+
+void zone_delete(struct _zone *z_ptr)
+{
+       u32 c_num;
+
+       if (!z_ptr)
+               return;
+       for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+               cluster_delete(z_ptr->clusters[c_num]);
+       }
+       kfree(z_ptr);
+}
+
+void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
+{
+       u32 c_num = tipc_cluster(c_ptr->addr);
+
+       assert(c_ptr->addr);
+       assert(c_num <= tipc_max_clusters);
+       assert(z_ptr->clusters[c_num] == 0);
+       z_ptr->clusters[c_num] = c_ptr;
+}
+
+void zone_remove_as_router(struct _zone *z_ptr, u32 router)
+{
+       u32 c_num;
+
+       for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+               if (z_ptr->clusters[c_num]) {
+                       cluster_remove_as_router(z_ptr->clusters[c_num], 
+                                                router);
+               }
+       }
+}
+
+void zone_send_external_routes(struct _zone *z_ptr, u32 dest)
+{
+       u32 c_num;
+
+       for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+               if (z_ptr->clusters[c_num]) {
+                       if (in_own_cluster(z_ptr->addr))
+                               continue;
+                       cluster_send_ext_routes(z_ptr->clusters[c_num], dest);
+               }
+       }
+}
+
+struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
+{
+       struct cluster *c_ptr;
+       struct node *n_ptr;
+       u32 c_num;
+
+       if (!z_ptr)
+               return 0;
+       c_ptr = z_ptr->clusters[tipc_cluster(addr)];
+       if (!c_ptr)
+               return 0;
+       n_ptr = cluster_select_node(c_ptr, ref);
+       if (n_ptr)
+               return n_ptr;
+
+       /* Links to any other clusters within this zone ? */
+       for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+               c_ptr = z_ptr->clusters[c_num];
+               if (!c_ptr)
+                       return 0;
+               n_ptr = cluster_select_node(c_ptr, ref);
+               if (n_ptr)
+                       return n_ptr;
+       }
+       return 0;
+}
+
+u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
+{
+       struct cluster *c_ptr;
+       u32 c_num;
+       u32 router;
+
+       if (!z_ptr)
+               return 0;
+       c_ptr = z_ptr->clusters[tipc_cluster(addr)];
+       router = c_ptr ? cluster_select_router(c_ptr, ref) : 0;
+       if (router)
+               return router;
+
+       /* Links to any other clusters within the zone? */
+       for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
+               c_ptr = z_ptr->clusters[c_num];
+               router = c_ptr ? cluster_select_router(c_ptr, ref) : 0;
+               if (router)
+                       return router;
+       }
+       return 0;
+}
+
+
+u32 zone_next_node(u32 addr)
+{
+       struct cluster *c_ptr = cluster_find(addr);
+
+       if (c_ptr)
+               return cluster_next_node(c_ptr, addr);
+       return 0;
+}
+
diff --git a/net/tipc/zone.h b/net/tipc/zone.h
new file mode 100644 (file)
index 0000000..4326f78
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * net/tipc/zone.h: Include file for TIPC zone management routines
+ * 
+ * Copyright (c) 2000-2006, Ericsson AB
+ * Copyright (c) 2005, Wind River Systems
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ *    contributors may be used to endorse or promote products derived from
+ *    this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TIPC_ZONE_H
+#define _TIPC_ZONE_H
+
+#include "node_subscr.h"
+#include "net.h"
+
+
+/**
+ * struct _zone - TIPC zone structure
+ * @addr: network address of zone
+ * @clusters: array of pointers to all clusters within zone
+ * @links: (used for inter-zone communication)
+ */
+struct _zone {
+       u32 addr;
+       struct cluster *clusters[2]; /* currently limited to just 1 cluster */
+       u32 links;
+};
+
+struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref);
+u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref);
+void zone_remove_as_router(struct _zone *z_ptr, u32 router);
+void zone_send_external_routes(struct _zone *z_ptr, u32 dest);
+struct _zone *zone_create(u32 addr);
+void zone_delete(struct _zone *z_ptr);
+void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr);
+u32 zone_next_node(u32 addr);
+
+static inline struct _zone *zone_find(u32 addr)
+{
+       return net.zones[tipc_zone(addr)];
+}
+
+#endif
index 12e4fb72bf0f46bfc22a5e1a8a75efd93792d650..53d6c7bbf56459f6aa7145a7c3649a754588abc1 100644 (file)
@@ -494,8 +494,7 @@ static inline void avc_print_ipv6_addr(struct audit_buffer *ab,
                                       char *name1, char *name2)
 {
        if (!ipv6_addr_any(addr))
-               audit_log_format(ab, " %s=%04x:%04x:%04x:%04x:%04x:"
-                                "%04x:%04x:%04x", name1, NIP6(*addr));
+               audit_log_format(ab, " %s=" NIP6_FMT, name1, NIP6(*addr));
        if (port)
                audit_log_format(ab, " %s=%d", name2, ntohs(port));
 }
@@ -504,7 +503,7 @@ static inline void avc_print_ipv4_addr(struct audit_buffer *ab, u32 addr,
                                       __be16 port, char *name1, char *name2)
 {
        if (addr)
-               audit_log_format(ab, " %s=%d.%d.%d.%d", name1, NIPQUAD(addr));
+               audit_log_format(ab, " %s=" NIPQUAD_FMT, name1, NIPQUAD(addr));
        if (port)
                audit_log_format(ab, " %s=%d", name2, ntohs(port));
 }
index 222014cafc1a9050c5670b28e55a2ebc5a1c91c9..a1b0b92af4b5acfdcf5c7e60aa75008604cf9db7 100644 (file)
@@ -270,7 +270,6 @@ extern int dmasound_catchRadius;
 #define SW_INPUT_VOLUME_SCALE  4
 #define SW_INPUT_VOLUME_DEFAULT        (128 / SW_INPUT_VOLUME_SCALE)
 
-extern int expand_bal; /* Balance factor for expanding (not volume!) */
 extern int expand_read_bal;    /* Balance factor for reading */
 extern uint software_input_volume; /* software implemented recording volume! */
 
index 59eb53f893184a22a26a2fa52b2bca055179c758..dc31373069a5f3231fa1a7bb044e99e8dd3b08af 100644 (file)
@@ -67,46 +67,46 @@ static int expand_data;     /* Data for expanding */
  * ++geert: split in even more functions (one per format)
  */
 
-static ssize_t ata_ct_law(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_law(const u_char __user *userPtr, size_t userCount,
                          u_char frame[], ssize_t *frameUsed,
                          ssize_t frameLeft);
-static ssize_t ata_ct_s8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_s8(const u_char __user *userPtr, size_t userCount,
                         u_char frame[], ssize_t *frameUsed,
                         ssize_t frameLeft);
-static ssize_t ata_ct_u8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_u8(const u_char __user *userPtr, size_t userCount,
                         u_char frame[], ssize_t *frameUsed,
                         ssize_t frameLeft);
-static ssize_t ata_ct_s16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_s16be(const u_char __user *userPtr, size_t userCount,
                            u_char frame[], ssize_t *frameUsed,
                            ssize_t frameLeft);
-static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_u16be(const u_char __user *userPtr, size_t userCount,
                            u_char frame[], ssize_t *frameUsed,
                            ssize_t frameLeft);
-static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_s16le(const u_char __user *userPtr, size_t userCount,
                            u_char frame[], ssize_t *frameUsed,
                            ssize_t frameLeft);
-static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_u16le(const u_char __user *userPtr, size_t userCount,
                            u_char frame[], ssize_t *frameUsed,
                            ssize_t frameLeft);
-static ssize_t ata_ctx_law(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_law(const u_char __user *userPtr, size_t userCount,
                           u_char frame[], ssize_t *frameUsed,
                           ssize_t frameLeft);
-static ssize_t ata_ctx_s8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_s8(const u_char __user *userPtr, size_t userCount,
                          u_char frame[], ssize_t *frameUsed,
                          ssize_t frameLeft);
-static ssize_t ata_ctx_u8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_u8(const u_char __user *userPtr, size_t userCount,
                          u_char frame[], ssize_t *frameUsed,
                          ssize_t frameLeft);
-static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_s16be(const u_char __user *userPtr, size_t userCount,
                             u_char frame[], ssize_t *frameUsed,
                             ssize_t frameLeft);
-static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_u16be(const u_char __user *userPtr, size_t userCount,
                             u_char frame[], ssize_t *frameUsed,
                             ssize_t frameLeft);
-static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_s16le(const u_char __user *userPtr, size_t userCount,
                             u_char frame[], ssize_t *frameUsed,
                             ssize_t frameLeft);
-static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_u16le(const u_char __user *userPtr, size_t userCount,
                             u_char frame[], ssize_t *frameUsed,
                             ssize_t frameLeft);
 
@@ -151,7 +151,7 @@ static int FalconStateInfo(char *buffer, size_t space);
 /*** Translations ************************************************************/
 
 
-static ssize_t ata_ct_law(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_law(const u_char __user *userPtr, size_t userCount,
                          u_char frame[], ssize_t *frameUsed,
                          ssize_t frameLeft)
 {
@@ -176,7 +176,7 @@ static ssize_t ata_ct_law(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ct_s8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_s8(const u_char __user *userPtr, size_t userCount,
                         u_char frame[], ssize_t *frameUsed,
                         ssize_t frameLeft)
 {
@@ -194,7 +194,7 @@ static ssize_t ata_ct_s8(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ct_u8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_u8(const u_char __user *userPtr, size_t userCount,
                         u_char frame[], ssize_t *frameUsed,
                         ssize_t frameLeft)
 {
@@ -217,8 +217,9 @@ static ssize_t ata_ct_u8(const u_char *userPtr, size_t userCount,
                used = count*2;
                while (count > 0) {
                        u_short data;
-                       if (get_user(data, ((u_short *)userPtr)++))
+                       if (get_user(data, (u_short __user *)userPtr))
                                return -EFAULT;
+                       userPtr += 2;
                        *p++ = data ^ 0x8080;
                        count--;
                }
@@ -228,7 +229,7 @@ static ssize_t ata_ct_u8(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ct_s16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_s16be(const u_char __user *userPtr, size_t userCount,
                            u_char frame[], ssize_t *frameUsed,
                            ssize_t frameLeft)
 {
@@ -240,8 +241,9 @@ static ssize_t ata_ct_s16be(const u_char *userPtr, size_t userCount,
                used = count*2;
                while (count > 0) {
                        u_short data;
-                       if (get_user(data, ((u_short *)userPtr)++))
+                       if (get_user(data, (u_short __user *)userPtr))
                                return -EFAULT;
+                       userPtr += 2;
                        *p++ = data;
                        *p++ = data;
                        count--;
@@ -259,7 +261,7 @@ static ssize_t ata_ct_s16be(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_u16be(const u_char __user *userPtr, size_t userCount,
                            u_char frame[], ssize_t *frameUsed,
                            ssize_t frameLeft)
 {
@@ -271,8 +273,9 @@ static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
                used = count*2;
                while (count > 0) {
                        u_short data;
-                       if (get_user(data, ((u_short *)userPtr)++))
+                       if (get_user(data, (u_short __user *)userPtr))
                                return -EFAULT;
+                       userPtr += 2;
                        data ^= 0x8000;
                        *p++ = data;
                        *p++ = data;
@@ -284,9 +287,10 @@ static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
                count = min_t(unsigned long, userCount, frameLeft)>>2;
                used = count*4;
                while (count > 0) {
-                       u_long data;
-                       if (get_user(data, ((u_int *)userPtr)++))
+                       u_int data;
+                       if (get_user(data, (u_int __user *)userPtr))
                                return -EFAULT;
+                       userPtr += 4;
                        *p++ = data ^ 0x80008000;
                        count--;
                }
@@ -296,7 +300,7 @@ static ssize_t ata_ct_u16be(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_s16le(const u_char __user *userPtr, size_t userCount,
                            u_char frame[], ssize_t *frameUsed,
                            ssize_t frameLeft)
 {
@@ -309,8 +313,9 @@ static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
                used = count*2;
                while (count > 0) {
                        u_short data;
-                       if (get_user(data, ((u_short *)userPtr)++))
+                       if (get_user(data, (u_short __user *)userPtr))
                                return -EFAULT;
+                       userPtr += 2;
                        data = le2be16(data);
                        *p++ = data;
                        *p++ = data;
@@ -323,8 +328,9 @@ static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
                used = count*4;
                while (count > 0) {
                        u_long data;
-                       if (get_user(data, ((u_int *)userPtr)++))
+                       if (get_user(data, (u_int __user *)userPtr))
                                return -EFAULT;
+                       userPtr += 4;
                        data = le2be16dbl(data);
                        *p++ = data;
                        count--;
@@ -335,7 +341,7 @@ static ssize_t ata_ct_s16le(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ct_u16le(const u_char __user *userPtr, size_t userCount,
                            u_char frame[], ssize_t *frameUsed,
                            ssize_t frameLeft)
 {
@@ -348,8 +354,9 @@ static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
                used = count*2;
                while (count > 0) {
                        u_short data;
-                       if (get_user(data, ((u_short *)userPtr)++))
+                       if (get_user(data, (u_short __user *)userPtr))
                                return -EFAULT;
+                       userPtr += 2;
                        data = le2be16(data) ^ 0x8000;
                        *p++ = data;
                        *p++ = data;
@@ -361,8 +368,9 @@ static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
                used = count;
                while (count > 0) {
                        u_long data;
-                       if (get_user(data, ((u_int *)userPtr)++))
+                       if (get_user(data, (u_int __user *)userPtr))
                                return -EFAULT;
+                       userPtr += 4;
                        data = le2be16dbl(data) ^ 0x80008000;
                        *p++ = data;
                        count--;
@@ -373,7 +381,7 @@ static ssize_t ata_ct_u16le(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ctx_law(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_law(const u_char __user *userPtr, size_t userCount,
                           u_char frame[], ssize_t *frameUsed,
                           ssize_t frameLeft)
 {
@@ -435,7 +443,7 @@ static ssize_t ata_ctx_law(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ctx_s8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_s8(const u_char __user *userPtr, size_t userCount,
                          u_char frame[], ssize_t *frameUsed,
                          ssize_t frameLeft)
 {
@@ -470,8 +478,9 @@ static ssize_t ata_ctx_s8(const u_char *userPtr, size_t userCount,
                        if (bal < 0) {
                                if (userCount < 2)
                                        break;
-                               if (get_user(data, ((u_short *)userPtr)++))
+                               if (get_user(data, (u_short __user *)userPtr))
                                        return -EFAULT;
+                               userPtr += 2;
                                userCount -= 2;
                                bal += hSpeed;
                        }
@@ -488,7 +497,7 @@ static ssize_t ata_ctx_s8(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ctx_u8(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_u8(const u_char __user *userPtr, size_t userCount,
                          u_char frame[], ssize_t *frameUsed,
                          ssize_t frameLeft)
 {
@@ -524,8 +533,9 @@ static ssize_t ata_ctx_u8(const u_char *userPtr, size_t userCount,
                        if (bal < 0) {
                                if (userCount < 2)
                                        break;
-                               if (get_user(data, ((u_short *)userPtr)++))
+                               if (get_user(data, (u_short __user *)userPtr))
                                        return -EFAULT;
+                               userPtr += 2;
                                data ^= 0x8080;
                                userCount -= 2;
                                bal += hSpeed;
@@ -543,7 +553,7 @@ static ssize_t ata_ctx_u8(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_s16be(const u_char __user *userPtr, size_t userCount,
                             u_char frame[], ssize_t *frameUsed,
                             ssize_t frameLeft)
 {
@@ -561,8 +571,9 @@ static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
                        if (bal < 0) {
                                if (userCount < 2)
                                        break;
-                               if (get_user(data, ((u_short *)userPtr)++))
+                               if (get_user(data, (u_short __user *)userPtr))
                                        return -EFAULT;
+                               userPtr += 2;
                                userCount -= 2;
                                bal += hSpeed;
                        }
@@ -579,8 +590,9 @@ static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
                        if (bal < 0) {
                                if (userCount < 4)
                                        break;
-                               if (get_user(data, ((u_int *)userPtr)++))
+                               if (get_user(data, (u_int __user *)userPtr))
                                        return -EFAULT;
+                               userPtr += 4;
                                userCount -= 4;
                                bal += hSpeed;
                        }
@@ -597,7 +609,7 @@ static ssize_t ata_ctx_s16be(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_u16be(const u_char __user *userPtr, size_t userCount,
                             u_char frame[], ssize_t *frameUsed,
                             ssize_t frameLeft)
 {
@@ -615,8 +627,9 @@ static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
                        if (bal < 0) {
                                if (userCount < 2)
                                        break;
-                               if (get_user(data, ((u_short *)userPtr)++))
+                               if (get_user(data, (u_short __user *)userPtr))
                                        return -EFAULT;
+                               userPtr += 2;
                                data ^= 0x8000;
                                userCount -= 2;
                                bal += hSpeed;
@@ -634,8 +647,9 @@ static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
                        if (bal < 0) {
                                if (userCount < 4)
                                        break;
-                               if (get_user(data, ((u_int *)userPtr)++))
+                               if (get_user(data, (u_int __user *)userPtr))
                                        return -EFAULT;
+                               userPtr += 4;
                                data ^= 0x80008000;
                                userCount -= 4;
                                bal += hSpeed;
@@ -653,7 +667,7 @@ static ssize_t ata_ctx_u16be(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_s16le(const u_char __user *userPtr, size_t userCount,
                             u_char frame[], ssize_t *frameUsed,
                             ssize_t frameLeft)
 {
@@ -671,8 +685,9 @@ static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
                        if (bal < 0) {
                                if (userCount < 2)
                                        break;
-                               if (get_user(data, ((u_short *)userPtr)++))
+                               if (get_user(data, (u_short __user *)userPtr))
                                        return -EFAULT;
+                               userPtr += 2;
                                data = le2be16(data);
                                userCount -= 2;
                                bal += hSpeed;
@@ -690,8 +705,9 @@ static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
                        if (bal < 0) {
                                if (userCount < 4)
                                        break;
-                               if (get_user(data, ((u_int *)userPtr)++))
+                               if (get_user(data, (u_int __user *)userPtr))
                                        return -EFAULT;
+                               userPtr += 4;
                                data = le2be16dbl(data);
                                userCount -= 4;
                                bal += hSpeed;
@@ -709,7 +725,7 @@ static ssize_t ata_ctx_s16le(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount,
+static ssize_t ata_ctx_u16le(const u_char __user *userPtr, size_t userCount,
                             u_char frame[], ssize_t *frameUsed,
                             ssize_t frameLeft)
 {
@@ -727,8 +743,9 @@ static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount,
                        if (bal < 0) {
                                if (userCount < 2)
                                        break;
-                               if (get_user(data, ((u_short *)userPtr)++))
+                               if (get_user(data, (u_short __user *)userPtr))
                                        return -EFAULT;
+                               userPtr += 2;
                                data = le2be16(data) ^ 0x8000;
                                userCount -= 2;
                                bal += hSpeed;
@@ -746,8 +763,9 @@ static ssize_t ata_ctx_u16le(const u_char *userPtr, size_t userCount,
                        if (bal < 0) {
                                if (userCount < 4)
                                        break;
-                               if (get_user(data, ((u_int *)userPtr)++))
+                               if (get_user(data, (u_int __user *)userPtr))
                                        return -EFAULT;
+                               userPtr += 4;
                                data = le2be16dbl(data) ^ 0x80008000;
                                userCount -= 4;
                                bal += hSpeed;
index d59f60b2641096119a898f67319812aac988b6e4..494070a3f8702e27d95673f4b0253ca24eb29d13 100644 (file)
@@ -34,6 +34,7 @@
 #define DMASOUND_PAULA_REVISION 0
 #define DMASOUND_PAULA_EDITION 4
 
+#define custom amiga_custom
    /*
     *  The minimum period for audio depends on htotal (for OCS/ECS/AGA)
     *  (Imported from arch/m68k/amiga/amisound.c)
@@ -156,7 +157,7 @@ static int AmiStateInfo(char *buffer, size_t space);
      *  Native format
      */
 
-static ssize_t ami_ct_s8(const u_char *userPtr, size_t userCount,
+static ssize_t ami_ct_s8(const u_char __user *userPtr, size_t userCount,
                         u_char frame[], ssize_t *frameUsed, ssize_t frameLeft)
 {
        ssize_t count, used;
@@ -189,7 +190,7 @@ static ssize_t ami_ct_s8(const u_char *userPtr, size_t userCount,
      */
 
 #define GENERATE_AMI_CT8(funcname, convsample)                         \
-static ssize_t funcname(const u_char *userPtr, size_t userCount,       \
+static ssize_t funcname(const u_char __user *userPtr, size_t userCount,        \
                        u_char frame[], ssize_t *frameUsed,             \
                        ssize_t frameLeft)                              \
 {                                                                      \
@@ -240,10 +241,11 @@ GENERATE_AMI_CT8(ami_ct_u8, AMI_CT_U8)
      */
 
 #define GENERATE_AMI_CT_16(funcname, convsample)                       \
-static ssize_t funcname(const u_char *userPtr, size_t userCount,       \
+static ssize_t funcname(const u_char __user *userPtr, size_t userCount,        \
                        u_char frame[], ssize_t *frameUsed,             \
                        ssize_t frameLeft)                              \
 {                                                                      \
+       const u_short __user *ptr = (const u_short __user *)userPtr;    \
        ssize_t count, used;                                            \
        u_short data;                                                   \
                                                                        \
@@ -253,7 +255,7 @@ static ssize_t funcname(const u_char *userPtr, size_t userCount,    \
                count = min_t(size_t, userCount, frameLeft)>>1 & ~1;    \
                used = count*2;                                         \
                while (count > 0) {                                     \
-                       if (get_user(data, ((u_short *)userPtr)++))     \
+                       if (get_user(data, ptr++))                      \
                                return -EFAULT;                         \
                        data = convsample(data);                        \
                        *high++ = data>>8;                              \
@@ -268,12 +270,12 @@ static ssize_t funcname(const u_char *userPtr, size_t userCount,  \
                count = min_t(size_t, userCount, frameLeft)>>2 & ~1;    \
                used = count*4;                                         \
                while (count > 0) {                                     \
-                       if (get_user(data, ((u_short *)userPtr)++))     \
+                       if (get_user(data, ptr++))                      \
                                return -EFAULT;                         \
                        data = convsample(data);                        \
                        *lefth++ = data>>8;                             \
                        *leftl++ = (data>>2) & 0x3f;                    \
-                       if (get_user(data, ((u_short *)userPtr)++))     \
+                       if (get_user(data, ptr++))                      \
                                return -EFAULT;                         \
                        data = convsample(data);                        \
                        *righth++ = data>>8;                            \
index 1ddaa6284b08f745d800fffaf428bd41450d486a..e2081f32b0c45904246961f450889956e962ba6b 100644 (file)
@@ -58,7 +58,7 @@ static void Q40Interrupt(void);
 
 
 /* userCount, frameUsed, frameLeft == byte counts */
-static ssize_t q40_ct_law(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ct_law(const u_char __user *userPtr, size_t userCount,
                           u_char frame[], ssize_t *frameUsed,
                           ssize_t frameLeft)
 {
@@ -79,7 +79,7 @@ static ssize_t q40_ct_law(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t q40_ct_s8(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ct_s8(const u_char __user *userPtr, size_t userCount,
                          u_char frame[], ssize_t *frameUsed,
                          ssize_t frameLeft)
 {
@@ -98,7 +98,7 @@ static ssize_t q40_ct_s8(const u_char *userPtr, size_t userCount,
        return used;
 }
 
-static ssize_t q40_ct_u8(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ct_u8(const u_char __user *userPtr, size_t userCount,
                          u_char frame[], ssize_t *frameUsed,
                          ssize_t frameLeft)
 {
@@ -114,7 +114,7 @@ static ssize_t q40_ct_u8(const u_char *userPtr, size_t userCount,
 
 
 /* a bit too complicated to optimise right now ..*/
-static ssize_t q40_ctx_law(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ctx_law(const u_char __user *userPtr, size_t userCount,
                            u_char frame[], ssize_t *frameUsed,
                            ssize_t frameLeft)
 {
@@ -152,7 +152,7 @@ static ssize_t q40_ctx_law(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t q40_ctx_s8(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ctx_s8(const u_char __user *userPtr, size_t userCount,
                           u_char frame[], ssize_t *frameUsed,
                           ssize_t frameLeft)
 {
@@ -189,7 +189,7 @@ static ssize_t q40_ctx_s8(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t q40_ctx_u8(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ctx_u8(const u_char __user *userPtr, size_t userCount,
                           u_char frame[], ssize_t *frameUsed,
                           ssize_t frameLeft)
 {
@@ -224,7 +224,7 @@ static ssize_t q40_ctx_u8(const u_char *userPtr, size_t userCount,
 }
 
 /* compressing versions */
-static ssize_t q40_ctc_law(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ctc_law(const u_char __user *userPtr, size_t userCount,
                            u_char frame[], ssize_t *frameUsed,
                            ssize_t frameLeft)
 {
@@ -265,7 +265,7 @@ static ssize_t q40_ctc_law(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t q40_ctc_s8(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ctc_s8(const u_char __user *userPtr, size_t userCount,
                           u_char frame[], ssize_t *frameUsed,
                           ssize_t frameLeft)
 {
@@ -304,7 +304,7 @@ static ssize_t q40_ctc_s8(const u_char *userPtr, size_t userCount,
 }
 
 
-static ssize_t q40_ctc_u8(const u_char *userPtr, size_t userCount,
+static ssize_t q40_ctc_u8(const u_char __user *userPtr, size_t userCount,
                           u_char frame[], ssize_t *frameUsed,
                           ssize_t frameLeft)
 {
index 23562e947806bb73f05fc37b3e6305af6fda56cb..ca973ac2a30a653e698af58de3ac7a5f875b8bca 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/uaccess.h>
 #include "dmasound.h"
 
+extern int expand_bal; /* Balance factor for expanding (not volume!) */
 static short dmasound_alaw2dma16[] ;
 static short dmasound_ulaw2dma16[] ;