Merge tag 'lsk-v3.10-android-15.02'
authorHuang, Tao <huangtao@rock-chips.com>
Thu, 5 Mar 2015 09:11:40 +0000 (17:11 +0800)
committerHuang, Tao <huangtao@rock-chips.com>
Thu, 5 Mar 2015 09:11:40 +0000 (17:11 +0800)
LSK Android 15.02 v3.10

Conflicts:
drivers/Kconfig
drivers/regulator/core.c
include/linux/of.h

334 files changed:
Documentation/ABI/testing/sysfs-firmware-ofw [new file with mode: 0644]
Documentation/devicetree/changesets.txt [new file with mode: 0644]
Documentation/devicetree/dynamic-resolution-notes.txt [new file with mode: 0644]
Documentation/devicetree/overlay-notes.txt [new file with mode: 0644]
Documentation/kernel-parameters.txt
Makefile
arch/arc/boot/dts/nsimosci.dts
arch/arm/Kconfig
arch/arm/boot/dts/imx25.dtsi
arch/arm/boot/dts/testcases/tests-phandle.dtsi
arch/arm/crypto/.gitignore [new file with mode: 0644]
arch/arm/crypto/Makefile
arch/arm/crypto/aes_glue.c
arch/arm/crypto/aes_glue.h [new file with mode: 0644]
arch/arm/crypto/aesbs-core.S_shipped [new file with mode: 0644]
arch/arm/crypto/aesbs-glue.c [new file with mode: 0644]
arch/arm/crypto/bsaes-armv7.pl [new file with mode: 0644]
arch/arm/crypto/sha1-armv7-neon.S [new file with mode: 0644]
arch/arm/crypto/sha1_glue.c
arch/arm/crypto/sha1_neon_glue.c [new file with mode: 0644]
arch/arm/crypto/sha512-armv7-neon.S [new file with mode: 0644]
arch/arm/crypto/sha512_neon_glue.c [new file with mode: 0644]
arch/arm/include/asm/Kbuild
arch/arm/include/asm/atomic.h
arch/arm/include/asm/crypto/sha1.h [new file with mode: 0644]
arch/arm/include/asm/memory.h
arch/arm/include/asm/module.h
arch/arm/include/asm/neon.h [new file with mode: 0644]
arch/arm/include/asm/page.h
arch/arm/include/asm/pgtable-3level-hwdef.h
arch/arm/include/asm/pgtable-3level.h
arch/arm/include/asm/pgtable.h
arch/arm/kernel/head.S
arch/arm/kernel/module.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-shmobile/setup-sh73a0.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-v7-3level.S
arch/arm/vfp/vfpmodule.c
arch/powerpc/crypto/sha1.c
arch/powerpc/kernel/prom.c
arch/powerpc/mm/numa.c
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/reconfig.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/sysdev/msi_bitmap.c
arch/powerpc/xmon/xmon.c
arch/s390/crypto/aes_s390.c
arch/s390/crypto/des_s390.c
arch/s390/crypto/ghash_s390.c
arch/s390/crypto/sha1_s390.c
arch/s390/crypto/sha256_s390.c
arch/s390/crypto/sha512_s390.c
arch/sparc/crypto/aes_glue.c
arch/sparc/crypto/camellia_glue.c
arch/sparc/crypto/crc32c_glue.c
arch/sparc/crypto/des_glue.c
arch/sparc/crypto/md5_glue.c
arch/sparc/crypto/sha1_glue.c
arch/sparc/crypto/sha256_glue.c
arch/sparc/crypto/sha512_glue.c
arch/um/Kconfig.common
arch/x86/crypto/aes_glue.c
arch/x86/crypto/aesni-intel_glue.c
arch/x86/crypto/blowfish_avx2_glue.c
arch/x86/crypto/blowfish_glue.c
arch/x86/crypto/camellia_aesni_avx2_glue.c
arch/x86/crypto/camellia_aesni_avx_glue.c
arch/x86/crypto/camellia_glue.c
arch/x86/crypto/cast5_avx_glue.c
arch/x86/crypto/cast6_avx_glue.c
arch/x86/crypto/crc32-pclmul_glue.c
arch/x86/crypto/crc32c-intel_glue.c
arch/x86/crypto/fpu.c
arch/x86/crypto/ghash-clmulni-intel_glue.c
arch/x86/crypto/salsa20_glue.c
arch/x86/crypto/serpent_avx2_glue.c
arch/x86/crypto/serpent_avx_glue.c
arch/x86/crypto/serpent_sse2_glue.c
arch/x86/crypto/sha1_ssse3_glue.c
arch/x86/crypto/sha256_ssse3_glue.c
arch/x86/crypto/sha512_ssse3_glue.c
arch/x86/crypto/twofish_avx2_glue.c
arch/x86/crypto/twofish_avx_glue.c
arch/x86/crypto/twofish_glue.c
arch/x86/crypto/twofish_glue_3way.c
arch/x86/include/asm/desc.h
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/tls.c
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/um/sys_call_table_32.c
arch/x86/um/sys_call_table_64.c
crypto/842.c
crypto/Kconfig
crypto/aes_generic.c
crypto/algapi.c
crypto/ansi_cprng.c
crypto/anubis.c
crypto/api.c
crypto/arc4.c
crypto/authenc.c
crypto/authencesn.c
crypto/blowfish_generic.c
crypto/camellia_generic.c
crypto/cast5_generic.c
crypto/cast6_generic.c
crypto/cbc.c
crypto/ccm.c
crypto/chainiv.c
crypto/cmac.c
crypto/crc32.c
crypto/cryptd.c
crypto/crypto_null.c
crypto/ctr.c
crypto/cts.c
crypto/deflate.c
crypto/des_generic.c
crypto/ecb.c
crypto/eseqiv.c
crypto/fcrypt.c
crypto/gcm.c
crypto/ghash-generic.c
crypto/hmac.c
crypto/khazad.c
crypto/krng.c
crypto/lrw.c
crypto/lzo.c
crypto/md4.c
crypto/md5.c
crypto/michael_mic.c
crypto/pcbc.c
crypto/pcrypt.c
crypto/rmd128.c
crypto/rmd160.c
crypto/rmd256.c
crypto/rmd320.c
crypto/salsa20_generic.c
crypto/seed.c
crypto/seqiv.c
crypto/serpent_generic.c
crypto/sha1_generic.c
crypto/sha256_generic.c
crypto/sha512_generic.c
crypto/tea.c
crypto/tgr192.c
crypto/twofish_generic.c
crypto/vmac.c
crypto/wp512.c
crypto/xcbc.c
crypto/xts.c
crypto/zlib.c
drivers/Kconfig
drivers/Makefile
drivers/android/Kconfig [new file with mode: 0644]
drivers/android/Makefile [new file with mode: 0644]
drivers/android/binder.c [new file with mode: 0644]
drivers/android/binder_trace.h [new file with mode: 0644]
drivers/ata/libata-sff.c
drivers/ata/sata_dwc_460ex.c
drivers/base/platform.c
drivers/block/drbd/drbd_req.c
drivers/bus/mvebu-mbus.c
drivers/clocksource/exynos_mct.c
drivers/crypto/nx/nx-842.c
drivers/crypto/padlock-aes.c
drivers/crypto/padlock-sha.c
drivers/crypto/ux500/cryp/cryp_core.c
drivers/crypto/ux500/hash/hash_core.c
drivers/firmware/efi/efi-pstore.c
drivers/gpio/gpiolib-of.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/i2c/i2c-core.c
drivers/i2c/i2c-mux.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/i8042.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-thin.c
drivers/md/raid5.c
drivers/media/i2c/smiapp-pll.c
drivers/media/i2c/smiapp/smiapp-core.c
drivers/media/usb/dvb-usb/af9005.c
drivers/media/usb/uvc/uvc_driver.c
drivers/mmc/host/sdhci.c
drivers/net/can/dev.c
drivers/net/can/usb/kvaser_usb.c
drivers/net/ethernet/atheros/alx/main.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/ti/cpsw.c
drivers/of/Kconfig
drivers/of/Makefile
drivers/of/address.c
drivers/of/base.c
drivers/of/device.c
drivers/of/dynamic.c [new file with mode: 0644]
drivers/of/fdt.c
drivers/of/irq.c
drivers/of/of_i2c.c [deleted file]
drivers/of/of_private.h
drivers/of/overlay.c [new file with mode: 0644]
drivers/of/pdt.c
drivers/of/platform.c
drivers/of/resolver.c [new file with mode: 0644]
drivers/of/selftest.c
drivers/of/testcase-data/testcases.dtsi [new file with mode: 0644]
drivers/pinctrl/core.c
drivers/platform/x86/hp_accel.c
drivers/regulator/core.c
drivers/s390/char/con3215.c
drivers/s390/crypto/ap_bus.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/storvsc_drv.c
drivers/spi/spi-dw-mid.c
drivers/spi/spi-pxa2xx.c
drivers/staging/android/Kconfig
drivers/staging/android/Makefile
drivers/staging/android/alarm-dev.c
drivers/staging/android/binder.c [deleted file]
drivers/staging/android/binder.h [deleted file]
drivers/staging/android/binder_trace.h [deleted file]
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_heap.c
drivers/staging/android/ion/ion_priv.h
drivers/staging/android/ion/ion_system_heap.c
drivers/staging/android/logger.c
drivers/staging/android/sw_sync.c
drivers/staging/android/sync.c
drivers/staging/android/timed_gpio.c
drivers/staging/android/uapi/ashmem.h
drivers/staging/android/uapi/binder.h [deleted file]
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/loopback/tcm_loop.c
drivers/target/loopback/tcm_loop.h
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/android.c
drivers/usb/gadget/f_accessory.c
drivers/usb/host/pci-quirks.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/keyspan.c
drivers/vfio/pci/vfio_pci.c
drivers/vhost/scsi.c
drivers/video/logo/logo.c
drivers/xen/swiotlb-xen.c
fs/ext4/ext4.h
fs/ext4/extents.c
fs/ext4/mballoc.c
fs/lockd/svc.c
fs/nfs/direct.c
fs/nfs/nfs4client.c
fs/notify/inode_mark.c
fs/proc/Makefile
fs/proc/internal.h
fs/proc/proc_devtree.c [deleted file]
fs/proc/root.c
fs/pstore/Kconfig
fs/pstore/Makefile
fs/pstore/inode.c
fs/pstore/internal.h
fs/pstore/platform.c
fs/pstore/pmsg.c [new file with mode: 0644]
fs/pstore/ram.c
fs/pstore/ram_core.c
fs/quota/dquot.c
fs/stat.c
include/linux/crypto.h
include/linux/fs.h
include/linux/i2c.h
include/linux/of.h
include/linux/of_i2c.h
include/linux/of_platform.h
include/linux/pstore.h
include/linux/pstore_ram.h
include/linux/quotaops.h
include/linux/time.h
include/linux/wlan_plat.h
include/net/addrconf.h
include/uapi/linux/Kbuild
include/uapi/linux/android/Kbuild [new file with mode: 0644]
include/uapi/linux/android/binder.h [new file with mode: 0644]
kernel/irq/pm.c
kernel/sys.c
kernel/time.c
kernel/time/ntp.c
kernel/workqueue.c
lib/decompress_bunzip2.c
net/ipv4/tcp_output.c
net/ipv6/addrconf.c
net/ipv6/anycast.c
net/netfilter/ipset/ip_set_core.c
net/netfilter/ipvs/ip_vs_ftp.c
net/netfilter/xt_qtaguid.c
net/netlink/af_netlink.c
net/wireless/chan.c
net/wireless/nl80211.c
scripts/Makefile.modinst
scripts/recordmcount.pl
security/keys/gc.c
security/selinux/hooks.c
security/selinux/include/netif.h
security/selinux/include/netnode.h
security/selinux/include/netport.h
security/selinux/include/objsec.h
security/selinux/include/security.h
security/selinux/netif.c
security/selinux/netnode.c
security/selinux/netport.c
security/selinux/ss/constraint.h
security/selinux/ss/policydb.c
security/selinux/ss/policydb.h
sound/core/seq/seq_dummy.c
sound/soc/codecs/wm8960.c
sound/usb/mixer.c

diff --git a/Documentation/ABI/testing/sysfs-firmware-ofw b/Documentation/ABI/testing/sysfs-firmware-ofw
new file mode 100644 (file)
index 0000000..f562b18
--- /dev/null
@@ -0,0 +1,28 @@
+What:          /sys/firmware/devicetree/*
+Date:          November 2013
+Contact:       Grant Likely <grant.likely@linaro.org>
+Description:
+               When using OpenFirmware or a Flattened Device Tree to enumerate
+               hardware, the device tree structure will be exposed in this
+               directory.
+
+               It is possible for multiple device-tree directories to exist.
+               Some device drivers use a separate detached device tree which
+               have no attachment to the system tree and will appear in a
+               different subdirectory under /sys/firmware/devicetree.
+
+               Userspace must not use the /sys/firmware/devicetree/base
+               path directly, but instead should follow /proc/device-tree
+               symlink. It is possible that the absolute path will change
+               in the future, but the symlink is the stable ABI.
+
+               The /proc/device-tree symlink replaces the devicetree /proc
+               filesystem support, and has largely the same semantics and
+               should be compatible with existing userspace.
+
+               The contents of /sys/firmware/devicetree/ is a
+               hierarchy of directories, one per device tree node. The
+               directory name is the resolved path component name (node
+               name plus address). Properties are represented as files
+               in the directory. The contents of each file is the exact
+               binary data from the device tree.
diff --git a/Documentation/devicetree/changesets.txt b/Documentation/devicetree/changesets.txt
new file mode 100644 (file)
index 0000000..935ba5a
--- /dev/null
@@ -0,0 +1,40 @@
+A DT changeset is a method which allows one to apply changes
+in the live tree in such a way that either the full set of changes
+will be applied, or none of them will be. If an error occurs partway
+through applying the changeset, then the tree will be rolled back to the
+previous state. A changeset can also be removed after it has been
+applied.
+
+When a changeset is applied, all of the changes get applied to the tree
+at once before emitting OF_RECONFIG notifiers. This is so that the
+receiver sees a complete and consistent state of the tree when it
+receives the notifier.
+
+The sequence of a changeset is as follows.
+
+1. of_changeset_init() - initializes a changeset
+
+2. A number of DT tree change calls, of_changeset_attach_node(),
+of_changeset_detach_node(), of_changeset_add_property(),
+of_changeset_remove_property, of_changeset_update_property() to prepare
+a set of changes. No changes to the active tree are made at this point.
+All the change operations are recorded in the of_changeset 'entries'
+list.
+
+3. mutex_lock(of_mutex) - starts a changeset; The global of_mutex
+ensures there can only be one editor at a time.
+
+4. of_changeset_apply() - Apply the changes to the tree. Either the
+entire changeset will get applied, or if there is an error the tree will
+be restored to the previous state
+
+5. mutex_unlock(of_mutex) - All operations complete, release the mutex
+
+If a successfully applied changeset needs to be removed, it can be done
+with the following sequence.
+
+1. mutex_lock(of_mutex)
+
+2. of_changeset_revert()
+
+3. mutex_unlock(of_mutex)
diff --git a/Documentation/devicetree/dynamic-resolution-notes.txt b/Documentation/devicetree/dynamic-resolution-notes.txt
new file mode 100644 (file)
index 0000000..083d232
--- /dev/null
@@ -0,0 +1,25 @@
+Device Tree Dynamic Resolver Notes
+----------------------------------
+
+This document describes the implementation of the in-kernel
+Device Tree resolver, residing in drivers/of/resolver.c and is a
+companion document to Documentation/devicetree/dt-object-internal.txt[1]
+
+How the resolver works
+----------------------
+
+The resolver is given as an input an arbitrary tree compiled with the
+proper dtc option and having a /plugin/ tag. This generates the
+appropriate __fixups__ & __local_fixups__ nodes as described in [1].
+
+In sequence the resolver works by the following steps:
+
+1. Get the maximum device tree phandle value from the live tree + 1.
+2. Adjust all the local phandles of the tree to resolve by that amount.
+3. Using the __local__fixups__ node information adjust all local references
+   by the same amount.
+4. For each property in the __fixups__ node locate the node it references
+   in the live tree. This is the label used to tag the node.
+5. Retrieve the phandle of the target of the fixup.
+6. For each fixup in the property locate the node:property:offset location
+   and replace it with the phandle value.
diff --git a/Documentation/devicetree/overlay-notes.txt b/Documentation/devicetree/overlay-notes.txt
new file mode 100644 (file)
index 0000000..30ae758
--- /dev/null
@@ -0,0 +1,133 @@
+Device Tree Overlay Notes
+-------------------------
+
+This document describes the implementation of the in-kernel
+device tree overlay functionality residing in drivers/of/overlay.c and is a
+companion document to Documentation/devicetree/dt-object-internal.txt[1] &
+Documentation/devicetree/dynamic-resolution-notes.txt[2]
+
+How overlays work
+-----------------
+
+A Device Tree's overlay purpose is to modify the kernel's live tree, and
+have the modification affecting the state of the the kernel in a way that
+is reflecting the changes.
+Since the kernel mainly deals with devices, any new device node that result
+in an active device should have it created while if the device node is either
+disabled or removed all together, the affected device should be deregistered.
+
+Lets take an example where we have a foo board with the following base tree
+which is taken from [1].
+
+---- foo.dts -----------------------------------------------------------------
+       /* FOO platform */
+       / {
+               compatible = "corp,foo";
+
+               /* shared resources */
+               res: res {
+               };
+
+               /* On chip peripherals */
+               ocp: ocp {
+                       /* peripherals that are always instantiated */
+                       peripheral1 { ... };
+               }
+       };
+---- foo.dts -----------------------------------------------------------------
+
+The overlay bar.dts, when loaded (and resolved as described in [2]) should
+
+---- bar.dts -----------------------------------------------------------------
+/plugin/;      /* allow undefined label references and record them */
+/ {
+       ....    /* various properties for loader use; i.e. part id etc. */
+       fragment@0 {
+               target = <&ocp>;
+               __overlay__ {
+                       /* bar peripheral */
+                       bar {
+                               compatible = "corp,bar";
+                               ... /* various properties and child nodes */
+                       }
+               };
+       };
+};
+---- bar.dts -----------------------------------------------------------------
+
+result in foo+bar.dts
+
+---- foo+bar.dts -------------------------------------------------------------
+       /* FOO platform + bar peripheral */
+       / {
+               compatible = "corp,foo";
+
+               /* shared resources */
+               res: res {
+               };
+
+               /* On chip peripherals */
+               ocp: ocp {
+                       /* peripherals that are always instantiated */
+                       peripheral1 { ... };
+
+                       /* bar peripheral */
+                       bar {
+                               compatible = "corp,bar";
+                               ... /* various properties and child nodes */
+                       }
+               }
+       };
+---- foo+bar.dts -------------------------------------------------------------
+
+As a result of the the overlay, a new device node (bar) has been created
+so a bar platform device will be registered and if a matching device driver
+is loaded the device will be created as expected.
+
+Overlay in-kernel API
+--------------------------------
+
+The API is quite easy to use.
+
+1. Call of_overlay_create() to create and apply an overlay. The return value
+is a cookie identifying this overlay.
+
+2. Call of_overlay_destroy() to remove and cleanup the overlay previously
+created via the call to of_overlay_create(). Removal of an overlay that
+is stacked by another will not be permitted.
+
+Finally, if you need to remove all overlays in one-go, just call
+of_overlay_destroy_all() which will remove every single one in the correct
+order.
+
+Overlay DTS Format
+------------------
+
+The DTS of an overlay should have the following format:
+
+{
+       /* ignored properties by the overlay */
+
+       fragment@0 {    /* first child node */
+
+               target=<phandle>;       /* phandle target of the overlay */
+       or
+               target-path="/path";    /* target path of the overlay */
+
+               __overlay__ {
+                       property-a;     /* add property-a to the target */
+                       node-a {        /* add to an existing, or create a node-a */
+                               ...
+                       };
+               };
+       }
+       fragment@1 {    /* second child node */
+               ...
+       };
+       /* more fragments follow */
+}
+
+Using the non-phandle based target method allows one to use a base DT which does
+not contain a __symbols__ node, i.e. it was not compiled with the -@ option.
+The __symbols__ node is only required for the target=<phandle> method, since it
+contains the information required to map from a phandle to a tree location.
index 15b24a2be6b17faf1a70b98c845dd4c60f3b7c36..d592974d77d743f63613ba7bd833f61814c7df72 100644 (file)
@@ -1061,6 +1061,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        i8042.notimeout [HW] Ignore timeout condition signalled by controller
        i8042.reset     [HW] Reset the controller during init and cleanup
        i8042.unlock    [HW] Unlock (ignore) the keylock
+       i8042.kbdreset  [HW] Reset device connected to KBD port
 
        i810=           [HW,DRM]
 
index 78c59160e678d49277a0da016b0efa6f63f54153..40ba8a1511472bf78198228b02b80e75e096f496 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 10
-SUBLEVEL = 65
+SUBLEVEL = 68
 EXTRAVERSION =
 NAME = TOSSUG Baby Fish
 
index 398064cef746015563166c83e8a1d4db335f9eb9..4c169d825415f3407168f53c9ee7055d8c18b2c4 100644 (file)
@@ -20,7 +20,7 @@
                /* this is for console on PGU */
                /* bootargs = "console=tty0 consoleblank=0"; */
                /* this is for console on serial */
-               bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
+               bootargs = "earlycon=uart8250,mmio32,0xf0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
        };
 
        aliases {
@@ -46,9 +46,9 @@
                        #interrupt-cells = <1>;
                };
 
-               uart0: serial@c0000000 {
+               uart0: serial@f0000000 {
                        compatible = "ns8250";
-                       reg = <0xc0000000 0x2000>;
+                       reg = <0xf0000000 0x2000>;
                        interrupts = <11>;
                        clock-frequency = <3686400>;
                        baud = <115200>;
                        no-loopback-test = <1>;
                };
 
-               pgu0: pgu@c9000000 {
+               pgu0: pgu@f9000000 {
                        compatible = "snps,arcpgufb";
-                       reg = <0xc9000000 0x400>;
+                       reg = <0xf9000000 0x400>;
                };
 
-               ps2: ps2@c9001000 {
+               ps2: ps2@f9001000 {
                        compatible = "snps,arc_ps2";
-                       reg = <0xc9000400 0x14>;
+                       reg = <0xf9000400 0x14>;
                        interrupts = <13>;
                        interrupt-names = "arc_ps2_irq";
                };
 
-               eth0: ethernet@c0003000 {
+               eth0: ethernet@f0003000 {
                        compatible = "snps,oscilan";
-                       reg = <0xc0003000 0x44>;
+                       reg = <0xf0003000 0x44>;
                        interrupts = <7>, <8>;
                        interrupt-names = "rx", "tx";
                };
index eaa50a640a95ccdfc271b81e1bc8613cf117b450..67a7acc911829e30b0f21365af306f17a280e7bd 100644 (file)
@@ -2386,6 +2386,13 @@ config NEON
          Say Y to include support code for NEON, the ARMv7 Advanced SIMD
          Extension.
 
+config KERNEL_MODE_NEON
+       bool "Support for NEON in kernel mode"
+       default n
+       depends on NEON
+       help
+         Say Y to include support for NEON in kernel mode.
+
 endmenu
 
 menu "Userspace binary formats"
index 701153992c695bb5455c8cdfc13d1a796f3ceebd..97d1a550eb98f831bc5a75582e049c5b83ee669e 100644 (file)
                                #size-cells = <0>;
                                compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
                                reg = <0x43fa4000 0x4000>;
-                               clocks = <&clks 62>, <&clks 62>;
+                               clocks = <&clks 78>, <&clks 78>;
                                clock-names = "ipg", "per";
                                interrupts = <14>;
                                status = "disabled";
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
                                #pwm-cells = <2>;
                                reg = <0x53fa0000 0x4000>;
-                               clocks = <&clks 106>, <&clks 36>;
+                               clocks = <&clks 106>, <&clks 52>;
                                clock-names = "ipg", "per";
                                interrupts = <36>;
                        };
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
                                #pwm-cells = <2>;
                                reg = <0x53fa8000 0x4000>;
-                               clocks = <&clks 107>, <&clks 36>;
+                               clocks = <&clks 107>, <&clks 52>;
                                clock-names = "ipg", "per";
                                interrupts = <41>;
                        };
                        pwm4: pwm@53fc8000 {
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
                                reg = <0x53fc8000 0x4000>;
-                               clocks = <&clks 108>, <&clks 36>;
+                               clocks = <&clks 108>, <&clks 52>;
                                clock-names = "ipg", "per";
                                interrupts = <42>;
                        };
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
                                #pwm-cells = <2>;
                                reg = <0x53fe0000 0x4000>;
-                               clocks = <&clks 105>, <&clks 36>;
+                               clocks = <&clks 105>, <&clks 52>;
                                clock-names = "ipg", "per";
                                interrupts = <26>;
                        };
index 0007d3cd7dc25c5d674309079a24bd6e725b7195..788a4c24b8f5b60f0467b97a8cf98126e5d9b8dd 100644 (file)
@@ -1,6 +1,9 @@
 
 / {
        testcase-data {
+               security-password = "password";
+               duplicate-name = "duplicate";
+               duplicate-name { };
                phandle-tests {
                        provider0: provider0 {
                                #phandle-cells = <0>;
diff --git a/arch/arm/crypto/.gitignore b/arch/arm/crypto/.gitignore
new file mode 100644 (file)
index 0000000..6231d36
--- /dev/null
@@ -0,0 +1 @@
+aesbs-core.S
index a2c83851bc90a29f5f1d06415cb4a0db4dd726e1..b48fa341648d1766a49bf4553e3149836d264c66 100644 (file)
@@ -3,7 +3,21 @@
 #
 
 obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
+obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
 obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
+obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
+obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o
 
-aes-arm-y  := aes-armv4.o aes_glue.o
-sha1-arm-y := sha1-armv4-large.o sha1_glue.o
+aes-arm-y      := aes-armv4.o aes_glue.o
+aes-arm-bs-y   := aesbs-core.o aesbs-glue.o
+sha1-arm-y     := sha1-armv4-large.o sha1_glue.o
+sha1-arm-neon-y        := sha1-armv7-neon.o sha1_neon_glue.o
+sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o
+
+quiet_cmd_perl = PERL    $@
+      cmd_perl = $(PERL) $(<) > $(@)
+
+$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
+       $(call cmd,perl)
+
+.PRECIOUS: $(obj)/aesbs-core.S
index 59f7877ead6ac9ee3f8a31b43c6e0458de26cd8f..0409b8f897823f815af5581f0c9a73e29f80eaf8 100644 (file)
@@ -6,22 +6,12 @@
 #include <linux/crypto.h>
 #include <crypto/aes.h>
 
-#define AES_MAXNR 14
+#include "aes_glue.h"
 
-typedef struct {
-       unsigned int rd_key[4 *(AES_MAXNR + 1)];
-       int rounds;
-} AES_KEY;
-
-struct AES_CTX {
-       AES_KEY enc_key;
-       AES_KEY dec_key;
-};
-
-asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
-asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
-asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
+EXPORT_SYMBOL(AES_encrypt);
+EXPORT_SYMBOL(AES_decrypt);
+EXPORT_SYMBOL(private_AES_set_encrypt_key);
+EXPORT_SYMBOL(private_AES_set_decrypt_key);
 
 static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
 {
@@ -81,7 +71,7 @@ static struct crypto_alg aes_alg = {
                .cipher = {
                        .cia_min_keysize        = AES_MIN_KEY_SIZE,
                        .cia_max_keysize        = AES_MAX_KEY_SIZE,
-                       .cia_setkey                     = aes_set_key,
+                       .cia_setkey             = aes_set_key,
                        .cia_encrypt            = aes_encrypt,
                        .cia_decrypt            = aes_decrypt
                }
@@ -103,6 +93,6 @@ module_exit(aes_fini);
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm (ASM)");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("aes");
-MODULE_ALIAS("aes-asm");
+MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("aes-asm");
 MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
diff --git a/arch/arm/crypto/aes_glue.h b/arch/arm/crypto/aes_glue.h
new file mode 100644 (file)
index 0000000..cca3e51
--- /dev/null
@@ -0,0 +1,19 @@
+
+#define AES_MAXNR 14
+
+struct AES_KEY {
+       unsigned int rd_key[4 * (AES_MAXNR + 1)];
+       int rounds;
+};
+
+struct AES_CTX {
+       struct AES_KEY enc_key;
+       struct AES_KEY dec_key;
+};
+
+asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
+asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
+                                          const int bits, struct AES_KEY *key);
+asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
+                                          const int bits, struct AES_KEY *key);
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
new file mode 100644 (file)
index 0000000..71e5fc7
--- /dev/null
@@ -0,0 +1,2544 @@
+
+@ ====================================================================
+@ Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+@ project. The module is, however, dual licensed under OpenSSL and
+@ CRYPTOGAMS licenses depending on where you obtain it. For further
+@ details see http://www.openssl.org/~appro/cryptogams/.
+@
+@ Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+@ <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+@ granted.
+@ ====================================================================
+
+@ Bit-sliced AES for ARM NEON
+@
+@ February 2012.
+@
+@ This implementation is direct adaptation of bsaes-x86_64 module for
+@ ARM NEON. Except that this module is endian-neutral [in sense that
+@ it can be compiled for either endianness] by courtesy of vld1.8's
+@ neutrality. Initial version doesn't implement interface to OpenSSL,
+@ only low-level primitives and unsupported entry points, just enough
+@ to collect performance results, which for Cortex-A8 core are:
+@
+@ encrypt      19.5 cycles per byte processed with 128-bit key
+@ decrypt      22.1 cycles per byte processed with 128-bit key
+@ key conv.    440  cycles per 128-bit key/0.18 of 8x block
+@
+@ Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+@ which is [much] worse than anticipated (for further details see
+@ http://www.openssl.org/~appro/Snapdragon-S4.html).
+@
+@ Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+@ manages in 20.0 cycles].
+@
+@ When comparing to x86_64 results keep in mind that NEON unit is
+@ [mostly] single-issue and thus can't [fully] benefit from
+@ instruction-level parallelism. And when comparing to aes-armv4
+@ results keep in mind key schedule conversion overhead (see
+@ bsaes-x86_64.pl for further details)...
+@
+@                                              <appro@openssl.org>
+
+@ April-August 2013
+@
+@ Add CBC, CTR and XTS subroutines, adapt for kernel use.
+@
+@                                      <ard.biesheuvel@linaro.org>
+
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH  vstmdb  sp!,{d8-d15}
+# define VFP_ABI_POP   vldmia  sp!,{d8-d15}
+# define VFP_ABI_FRAME 0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME 0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__  7
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax        unified         @ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code   32
+#endif
+
+.fpu   neon
+
+.type  _bsaes_decrypt8,%function
+.align 4
+_bsaes_decrypt8:
+       adr     r6,_bsaes_decrypt8
+       vldmia  r4!, {q9}               @ round 0 key
+       add     r6,r6,#.LM0ISR-_bsaes_decrypt8
+
+       vldmia  r6!, {q8}               @ .LM0ISR
+       veor    q10, q0, q9     @ xor with round0 key
+       veor    q11, q1, q9
+        vtbl.8 d0, {q10}, d16
+        vtbl.8 d1, {q10}, d17
+       veor    q12, q2, q9
+        vtbl.8 d2, {q11}, d16
+        vtbl.8 d3, {q11}, d17
+       veor    q13, q3, q9
+        vtbl.8 d4, {q12}, d16
+        vtbl.8 d5, {q12}, d17
+       veor    q14, q4, q9
+        vtbl.8 d6, {q13}, d16
+        vtbl.8 d7, {q13}, d17
+       veor    q15, q5, q9
+        vtbl.8 d8, {q14}, d16
+        vtbl.8 d9, {q14}, d17
+       veor    q10, q6, q9
+        vtbl.8 d10, {q15}, d16
+        vtbl.8 d11, {q15}, d17
+       veor    q11, q7, q9
+        vtbl.8 d12, {q10}, d16
+        vtbl.8 d13, {q10}, d17
+        vtbl.8 d14, {q11}, d16
+        vtbl.8 d15, {q11}, d17
+       vmov.i8 q8,#0x55                        @ compose .LBS0
+       vmov.i8 q9,#0x33                        @ compose .LBS1
+       vshr.u64        q10, q6, #1
+        vshr.u64       q11, q4, #1
+       veor            q10, q10, q7
+        veor           q11, q11, q5
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #1
+        veor           q5, q5, q11
+        vshl.u64       q11, q11, #1
+       veor            q6, q6, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q2, #1
+        vshr.u64       q11, q0, #1
+       veor            q10, q10, q3
+        veor           q11, q11, q1
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q3, q3, q10
+       vshl.u64        q10, q10, #1
+        veor           q1, q1, q11
+        vshl.u64       q11, q11, #1
+       veor            q2, q2, q10
+        veor           q0, q0, q11
+       vmov.i8 q8,#0x0f                        @ compose .LBS2
+       vshr.u64        q10, q5, #2
+        vshr.u64       q11, q4, #2
+       veor            q10, q10, q7
+        veor           q11, q11, q6
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #2
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #2
+       veor            q5, q5, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q1, #2
+        vshr.u64       q11, q0, #2
+       veor            q10, q10, q3
+        veor           q11, q11, q2
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q3, q3, q10
+       vshl.u64        q10, q10, #2
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #2
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vshr.u64        q10, q3, #4
+        vshr.u64       q11, q2, #4
+       veor            q10, q10, q7
+        veor           q11, q11, q6
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #4
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #4
+       veor            q3, q3, q10
+        veor           q2, q2, q11
+       vshr.u64        q10, q1, #4
+        vshr.u64       q11, q0, #4
+       veor            q10, q10, q5
+        veor           q11, q11, q4
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #4
+        veor           q4, q4, q11
+        vshl.u64       q11, q11, #4
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       sub     r5,r5,#1
+       b       .Ldec_sbox
+.align 4
+.Ldec_loop:
+       vldmia  r4!, {q8-q11}
+       veor    q8, q8, q0
+       veor    q9, q9, q1
+       vtbl.8  d0, {q8}, d24
+       vtbl.8  d1, {q8}, d25
+       vldmia  r4!, {q8}
+       veor    q10, q10, q2
+       vtbl.8  d2, {q9}, d24
+       vtbl.8  d3, {q9}, d25
+       vldmia  r4!, {q9}
+       veor    q11, q11, q3
+       vtbl.8  d4, {q10}, d24
+       vtbl.8  d5, {q10}, d25
+       vldmia  r4!, {q10}
+       vtbl.8  d6, {q11}, d24
+       vtbl.8  d7, {q11}, d25
+       vldmia  r4!, {q11}
+       veor    q8, q8, q4
+       veor    q9, q9, q5
+       vtbl.8  d8, {q8}, d24
+       vtbl.8  d9, {q8}, d25
+       veor    q10, q10, q6
+       vtbl.8  d10, {q9}, d24
+       vtbl.8  d11, {q9}, d25
+       veor    q11, q11, q7
+       vtbl.8  d12, {q10}, d24
+       vtbl.8  d13, {q10}, d25
+       vtbl.8  d14, {q11}, d24
+       vtbl.8  d15, {q11}, d25
+.Ldec_sbox:
+        veor   q1, q1, q4
+       veor    q3, q3, q4
+
+       veor    q4, q4, q7
+        veor   q1, q1, q6
+       veor    q2, q2, q7
+       veor    q6, q6, q4
+
+       veor    q0, q0, q1
+       veor    q2, q2, q5
+        veor   q7, q7, q6
+       veor    q3, q3, q0
+       veor    q5, q5, q0
+       veor    q1, q1, q3
+       veor    q11, q3, q0
+       veor    q10, q7, q4
+       veor    q9, q1, q6
+       veor    q13, q4, q0
+        vmov   q8, q10
+       veor    q12, q5, q2
+
+       vorr    q10, q10, q9
+       veor    q15, q11, q8
+       vand    q14, q11, q12
+       vorr    q11, q11, q12
+       veor    q12, q12, q9
+       vand    q8, q8, q9
+       veor    q9, q6, q2
+       vand    q15, q15, q12
+       vand    q13, q13, q9
+       veor    q9, q3, q7
+       veor    q12, q1, q5
+       veor    q11, q11, q13
+       veor    q10, q10, q13
+       vand    q13, q9, q12
+       vorr    q9, q9, q12
+       veor    q11, q11, q15
+       veor    q8, q8, q13
+       veor    q10, q10, q14
+       veor    q9, q9, q15
+       veor    q8, q8, q14
+       vand    q12, q4, q6
+       veor    q9, q9, q14
+       vand    q13, q0, q2
+       vand    q14, q7, q1
+       vorr    q15, q3, q5
+       veor    q11, q11, q12
+       veor    q9, q9, q14
+       veor    q8, q8, q15
+       veor    q10, q10, q13
+
+       @ Inv_GF16      0,      1,      2,      3, s0, s1, s2, s3
+
+       @ new smaller inversion
+
+       vand    q14, q11, q9
+       vmov    q12, q8
+
+       veor    q13, q10, q14
+       veor    q15, q8, q14
+       veor    q14, q8, q14    @ q14=q15
+
+       vbsl    q13, q9, q8
+       vbsl    q15, q11, q10
+       veor    q11, q11, q10
+
+       vbsl    q12, q13, q14
+       vbsl    q8, q14, q13
+
+       vand    q14, q12, q15
+       veor    q9, q9, q8
+
+       veor    q14, q14, q11
+       veor    q12, q5, q2
+       veor    q8, q1, q6
+       veor    q10, q15, q14
+       vand    q10, q10, q5
+       veor    q5, q5, q1
+       vand    q11, q1, q15
+       vand    q5, q5, q14
+       veor    q1, q11, q10
+       veor    q5, q5, q11
+       veor    q15, q15, q13
+       veor    q14, q14, q9
+       veor    q11, q15, q14
+        veor   q10, q13, q9
+       vand    q11, q11, q12
+        vand   q10, q10, q2
+       veor    q12, q12, q8
+        veor   q2, q2, q6
+       vand    q8, q8, q15
+        vand   q6, q6, q13
+       vand    q12, q12, q14
+        vand   q2, q2, q9
+       veor    q8, q8, q12
+        veor   q2, q2, q6
+       veor    q12, q12, q11
+        veor   q6, q6, q10
+       veor    q5, q5, q12
+       veor    q2, q2, q12
+       veor    q1, q1, q8
+       veor    q6, q6, q8
+
+       veor    q12, q3, q0
+       veor    q8, q7, q4
+       veor    q11, q15, q14
+        veor   q10, q13, q9
+       vand    q11, q11, q12
+        vand   q10, q10, q0
+       veor    q12, q12, q8
+        veor   q0, q0, q4
+       vand    q8, q8, q15
+        vand   q4, q4, q13
+       vand    q12, q12, q14
+        vand   q0, q0, q9
+       veor    q8, q8, q12
+        veor   q0, q0, q4
+       veor    q12, q12, q11
+        veor   q4, q4, q10
+       veor    q15, q15, q13
+       veor    q14, q14, q9
+       veor    q10, q15, q14
+       vand    q10, q10, q3
+       veor    q3, q3, q7
+       vand    q11, q7, q15
+       vand    q3, q3, q14
+       veor    q7, q11, q10
+       veor    q3, q3, q11
+       veor    q3, q3, q12
+       veor    q0, q0, q12
+       veor    q7, q7, q8
+       veor    q4, q4, q8
+       veor    q1, q1, q7
+       veor    q6, q6, q5
+
+       veor    q4, q4, q1
+       veor    q2, q2, q7
+       veor    q5, q5, q7
+       veor    q4, q4, q2
+        veor   q7, q7, q0
+       veor    q4, q4, q5
+        veor   q3, q3, q6
+        veor   q6, q6, q1
+       veor    q3, q3, q4
+
+       veor    q4, q4, q0
+       veor    q7, q7, q3
+       subs    r5,r5,#1
+       bcc     .Ldec_done
+       @ multiplication by 0x05-0x00-0x04-0x00
+       vext.8  q8, q0, q0, #8
+       vext.8  q14, q3, q3, #8
+       vext.8  q15, q5, q5, #8
+       veor    q8, q8, q0
+       vext.8  q9, q1, q1, #8
+       veor    q14, q14, q3
+       vext.8  q10, q6, q6, #8
+       veor    q15, q15, q5
+       vext.8  q11, q4, q4, #8
+       veor    q9, q9, q1
+       vext.8  q12, q2, q2, #8
+       veor    q10, q10, q6
+       vext.8  q13, q7, q7, #8
+       veor    q11, q11, q4
+       veor    q12, q12, q2
+       veor    q13, q13, q7
+
+        veor   q0, q0, q14
+        veor   q1, q1, q14
+        veor   q6, q6, q8
+        veor   q2, q2, q10
+        veor   q4, q4, q9
+        veor   q1, q1, q15
+        veor   q6, q6, q15
+        veor   q2, q2, q14
+        veor   q7, q7, q11
+        veor   q4, q4, q14
+        veor   q3, q3, q12
+        veor   q2, q2, q15
+        veor   q7, q7, q15
+        veor   q5, q5, q13
+       vext.8  q8, q0, q0, #12 @ x0 <<< 32
+       vext.8  q9, q1, q1, #12
+        veor   q0, q0, q8              @ x0 ^ (x0 <<< 32)
+       vext.8  q10, q6, q6, #12
+        veor   q1, q1, q9
+       vext.8  q11, q4, q4, #12
+        veor   q6, q6, q10
+       vext.8  q12, q2, q2, #12
+        veor   q4, q4, q11
+       vext.8  q13, q7, q7, #12
+        veor   q2, q2, q12
+       vext.8  q14, q3, q3, #12
+        veor   q7, q7, q13
+       vext.8  q15, q5, q5, #12
+        veor   q3, q3, q14
+
+       veor    q9, q9, q0
+        veor   q5, q5, q15
+        vext.8 q0, q0, q0, #8          @ (x0 ^ (x0 <<< 32)) <<< 64)
+       veor    q10, q10, q1
+       veor    q8, q8, q5
+       veor    q9, q9, q5
+        vext.8 q1, q1, q1, #8
+       veor    q13, q13, q2
+        veor   q0, q0, q8
+       veor    q14, q14, q7
+        veor   q1, q1, q9
+        vext.8 q8, q2, q2, #8
+       veor    q12, q12, q4
+        vext.8 q9, q7, q7, #8
+       veor    q15, q15, q3
+        vext.8 q2, q4, q4, #8
+       veor    q11, q11, q6
+        vext.8 q7, q5, q5, #8
+       veor    q12, q12, q5
+        vext.8 q4, q3, q3, #8
+       veor    q11, q11, q5
+        vext.8 q3, q6, q6, #8
+       veor    q5, q9, q13
+       veor    q11, q11, q2
+       veor    q7, q7, q15
+       veor    q6, q4, q14
+       veor    q4, q8, q12
+       veor    q2, q3, q10
+       vmov    q3, q11
+        @ vmov q5, q9
+       vldmia  r6, {q12}               @ .LISR
+       ite     eq                              @ Thumb2 thing, sanity check in ARM
+       addeq   r6,r6,#0x10
+       bne     .Ldec_loop
+       vldmia  r6, {q12}               @ .LISRM0
+       b       .Ldec_loop
+.align 4
+.Ldec_done:
+       vmov.i8 q8,#0x55                        @ compose .LBS0
+       vmov.i8 q9,#0x33                        @ compose .LBS1
+       vshr.u64        q10, q3, #1
+        vshr.u64       q11, q2, #1
+       veor            q10, q10, q5
+        veor           q11, q11, q7
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #1
+        veor           q7, q7, q11
+        vshl.u64       q11, q11, #1
+       veor            q3, q3, q10
+        veor           q2, q2, q11
+       vshr.u64        q10, q6, #1
+        vshr.u64       q11, q0, #1
+       veor            q10, q10, q4
+        veor           q11, q11, q1
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q4, q4, q10
+       vshl.u64        q10, q10, #1
+        veor           q1, q1, q11
+        vshl.u64       q11, q11, #1
+       veor            q6, q6, q10
+        veor           q0, q0, q11
+       vmov.i8 q8,#0x0f                        @ compose .LBS2
+       vshr.u64        q10, q7, #2
+        vshr.u64       q11, q2, #2
+       veor            q10, q10, q5
+        veor           q11, q11, q3
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #2
+        veor           q3, q3, q11
+        vshl.u64       q11, q11, #2
+       veor            q7, q7, q10
+        veor           q2, q2, q11
+       vshr.u64        q10, q1, #2
+        vshr.u64       q11, q0, #2
+       veor            q10, q10, q4
+        veor           q11, q11, q6
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q4, q4, q10
+       vshl.u64        q10, q10, #2
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #2
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vshr.u64        q10, q4, #4
+        vshr.u64       q11, q6, #4
+       veor            q10, q10, q5
+        veor           q11, q11, q3
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #4
+        veor           q3, q3, q11
+        vshl.u64       q11, q11, #4
+       veor            q4, q4, q10
+        veor           q6, q6, q11
+       vshr.u64        q10, q1, #4
+        vshr.u64       q11, q0, #4
+       veor            q10, q10, q7
+        veor           q11, q11, q2
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #4
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #4
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vldmia  r4, {q8}                        @ last round key
+       veor    q6, q6, q8
+       veor    q4, q4, q8
+       veor    q2, q2, q8
+       veor    q7, q7, q8
+       veor    q3, q3, q8
+       veor    q5, q5, q8
+       veor    q0, q0, q8
+       veor    q1, q1, q8
+       bx      lr
+.size  _bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type  _bsaes_const,%object
+.align 6
+_bsaes_const:
+.LM0ISR:       @ InvShiftRows constants
+       .quad   0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+       .quad   0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+       .quad   0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR:                @ ShiftRows constants
+       .quad   0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+       .quad   0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+       .quad   0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+       .quad   0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+       .quad   0x090d01050c000408, 0x03070b0f060a0e02
+.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro@openssl.org>"
+.align 6
+.size  _bsaes_const,.-_bsaes_const
+
+.type  _bsaes_encrypt8,%function
+.align 4
+_bsaes_encrypt8:
+       adr     r6,_bsaes_encrypt8
+       vldmia  r4!, {q9}               @ round 0 key
+       sub     r6,r6,#_bsaes_encrypt8-.LM0SR
+
+       vldmia  r6!, {q8}               @ .LM0SR
+_bsaes_encrypt8_alt:
+       veor    q10, q0, q9     @ xor with round0 key
+       veor    q11, q1, q9
+        vtbl.8 d0, {q10}, d16
+        vtbl.8 d1, {q10}, d17
+       veor    q12, q2, q9
+        vtbl.8 d2, {q11}, d16
+        vtbl.8 d3, {q11}, d17
+       veor    q13, q3, q9
+        vtbl.8 d4, {q12}, d16
+        vtbl.8 d5, {q12}, d17
+       veor    q14, q4, q9
+        vtbl.8 d6, {q13}, d16
+        vtbl.8 d7, {q13}, d17
+       veor    q15, q5, q9
+        vtbl.8 d8, {q14}, d16
+        vtbl.8 d9, {q14}, d17
+       veor    q10, q6, q9
+        vtbl.8 d10, {q15}, d16
+        vtbl.8 d11, {q15}, d17
+       veor    q11, q7, q9
+        vtbl.8 d12, {q10}, d16
+        vtbl.8 d13, {q10}, d17
+        vtbl.8 d14, {q11}, d16
+        vtbl.8 d15, {q11}, d17
+_bsaes_encrypt8_bitslice:
+       vmov.i8 q8,#0x55                        @ compose .LBS0
+       vmov.i8 q9,#0x33                        @ compose .LBS1
+       vshr.u64        q10, q6, #1
+        vshr.u64       q11, q4, #1
+       veor            q10, q10, q7
+        veor           q11, q11, q5
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #1
+        veor           q5, q5, q11
+        vshl.u64       q11, q11, #1
+       veor            q6, q6, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q2, #1
+        vshr.u64       q11, q0, #1
+       veor            q10, q10, q3
+        veor           q11, q11, q1
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q3, q3, q10
+       vshl.u64        q10, q10, #1
+        veor           q1, q1, q11
+        vshl.u64       q11, q11, #1
+       veor            q2, q2, q10
+        veor           q0, q0, q11
+       vmov.i8 q8,#0x0f                        @ compose .LBS2
+       vshr.u64        q10, q5, #2
+        vshr.u64       q11, q4, #2
+       veor            q10, q10, q7
+        veor           q11, q11, q6
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #2
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #2
+       veor            q5, q5, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q1, #2
+        vshr.u64       q11, q0, #2
+       veor            q10, q10, q3
+        veor           q11, q11, q2
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q3, q3, q10
+       vshl.u64        q10, q10, #2
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #2
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vshr.u64        q10, q3, #4
+        vshr.u64       q11, q2, #4
+       veor            q10, q10, q7
+        veor           q11, q11, q6
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #4
+        veor           q6, q6, q11
+        vshl.u64       q11, q11, #4
+       veor            q3, q3, q10
+        veor           q2, q2, q11
+       vshr.u64        q10, q1, #4
+        vshr.u64       q11, q0, #4
+       veor            q10, q10, q5
+        veor           q11, q11, q4
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #4
+        veor           q4, q4, q11
+        vshl.u64       q11, q11, #4
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       sub     r5,r5,#1
+       b       .Lenc_sbox
+.align 4
+.Lenc_loop:
+       vldmia  r4!, {q8-q11}
+       veor    q8, q8, q0
+       veor    q9, q9, q1
+       vtbl.8  d0, {q8}, d24
+       vtbl.8  d1, {q8}, d25
+       vldmia  r4!, {q8}
+       veor    q10, q10, q2
+       vtbl.8  d2, {q9}, d24
+       vtbl.8  d3, {q9}, d25
+       vldmia  r4!, {q9}
+       veor    q11, q11, q3
+       vtbl.8  d4, {q10}, d24
+       vtbl.8  d5, {q10}, d25
+       vldmia  r4!, {q10}
+       vtbl.8  d6, {q11}, d24
+       vtbl.8  d7, {q11}, d25
+       vldmia  r4!, {q11}
+       veor    q8, q8, q4
+       veor    q9, q9, q5
+       vtbl.8  d8, {q8}, d24
+       vtbl.8  d9, {q8}, d25
+       veor    q10, q10, q6
+       vtbl.8  d10, {q9}, d24
+       vtbl.8  d11, {q9}, d25
+       veor    q11, q11, q7
+       vtbl.8  d12, {q10}, d24
+       vtbl.8  d13, {q10}, d25
+       vtbl.8  d14, {q11}, d24
+       vtbl.8  d15, {q11}, d25
+.Lenc_sbox:
+       veor    q2, q2, q1
+       veor    q5, q5, q6
+       veor    q3, q3, q0
+       veor    q6, q6, q2
+       veor    q5, q5, q0
+
+       veor    q6, q6, q3
+       veor    q3, q3, q7
+       veor    q7, q7, q5
+       veor    q3, q3, q4
+       veor    q4, q4, q5
+
+       veor    q2, q2, q7
+       veor    q3, q3, q1
+       veor    q1, q1, q5
+       veor    q11, q7, q4
+       veor    q10, q1, q2
+       veor    q9, q5, q3
+       veor    q13, q2, q4
+        vmov   q8, q10
+       veor    q12, q6, q0
+
+       vorr    q10, q10, q9
+       veor    q15, q11, q8
+       vand    q14, q11, q12
+       vorr    q11, q11, q12
+       veor    q12, q12, q9
+       vand    q8, q8, q9
+       veor    q9, q3, q0
+       vand    q15, q15, q12
+       vand    q13, q13, q9
+       veor    q9, q7, q1
+       veor    q12, q5, q6
+       veor    q11, q11, q13
+       veor    q10, q10, q13
+       vand    q13, q9, q12
+       vorr    q9, q9, q12
+       veor    q11, q11, q15
+       veor    q8, q8, q13
+       veor    q10, q10, q14
+       veor    q9, q9, q15
+       veor    q8, q8, q14
+       vand    q12, q2, q3
+       veor    q9, q9, q14
+       vand    q13, q4, q0
+       vand    q14, q1, q5
+       vorr    q15, q7, q6
+       veor    q11, q11, q12
+       veor    q9, q9, q14
+       veor    q8, q8, q15
+       veor    q10, q10, q13
+
+       @ Inv_GF16      0,      1,      2,      3, s0, s1, s2, s3
+
+       @ new smaller inversion
+
+       vand    q14, q11, q9
+       vmov    q12, q8
+
+       veor    q13, q10, q14
+       veor    q15, q8, q14
+       veor    q14, q8, q14    @ q14=q15
+
+       vbsl    q13, q9, q8
+       vbsl    q15, q11, q10
+       veor    q11, q11, q10
+
+       vbsl    q12, q13, q14
+       vbsl    q8, q14, q13
+
+       vand    q14, q12, q15
+       veor    q9, q9, q8
+
+       veor    q14, q14, q11
+       veor    q12, q6, q0
+       veor    q8, q5, q3
+       veor    q10, q15, q14
+       vand    q10, q10, q6
+       veor    q6, q6, q5
+       vand    q11, q5, q15
+       vand    q6, q6, q14
+       veor    q5, q11, q10
+       veor    q6, q6, q11
+       veor    q15, q15, q13
+       veor    q14, q14, q9
+       veor    q11, q15, q14
+        veor   q10, q13, q9
+       vand    q11, q11, q12
+        vand   q10, q10, q0
+       veor    q12, q12, q8
+        veor   q0, q0, q3
+       vand    q8, q8, q15
+        vand   q3, q3, q13
+       vand    q12, q12, q14
+        vand   q0, q0, q9
+       veor    q8, q8, q12
+        veor   q0, q0, q3
+       veor    q12, q12, q11
+        veor   q3, q3, q10
+       veor    q6, q6, q12
+       veor    q0, q0, q12
+       veor    q5, q5, q8
+       veor    q3, q3, q8
+
+       veor    q12, q7, q4
+       veor    q8, q1, q2
+       veor    q11, q15, q14
+        veor   q10, q13, q9
+       vand    q11, q11, q12
+        vand   q10, q10, q4
+       veor    q12, q12, q8
+        veor   q4, q4, q2
+       vand    q8, q8, q15
+        vand   q2, q2, q13
+       vand    q12, q12, q14
+        vand   q4, q4, q9
+       veor    q8, q8, q12
+        veor   q4, q4, q2
+       veor    q12, q12, q11
+        veor   q2, q2, q10
+       veor    q15, q15, q13
+       veor    q14, q14, q9
+       veor    q10, q15, q14
+       vand    q10, q10, q7
+       veor    q7, q7, q1
+       vand    q11, q1, q15
+       vand    q7, q7, q14
+       veor    q1, q11, q10
+       veor    q7, q7, q11
+       veor    q7, q7, q12
+       veor    q4, q4, q12
+       veor    q1, q1, q8
+       veor    q2, q2, q8
+       veor    q7, q7, q0
+       veor    q1, q1, q6
+       veor    q6, q6, q0
+       veor    q4, q4, q7
+       veor    q0, q0, q1
+
+       veor    q1, q1, q5
+       veor    q5, q5, q2
+       veor    q2, q2, q3
+       veor    q3, q3, q5
+       veor    q4, q4, q5
+
+       veor    q6, q6, q3
+       subs    r5,r5,#1
+       bcc     .Lenc_done
+       vext.8  q8, q0, q0, #12 @ x0 <<< 32
+       vext.8  q9, q1, q1, #12
+        veor   q0, q0, q8              @ x0 ^ (x0 <<< 32)
+       vext.8  q10, q4, q4, #12
+        veor   q1, q1, q9
+       vext.8  q11, q6, q6, #12
+        veor   q4, q4, q10
+       vext.8  q12, q3, q3, #12
+        veor   q6, q6, q11
+       vext.8  q13, q7, q7, #12
+        veor   q3, q3, q12
+       vext.8  q14, q2, q2, #12
+        veor   q7, q7, q13
+       vext.8  q15, q5, q5, #12
+        veor   q2, q2, q14
+
+       veor    q9, q9, q0
+        veor   q5, q5, q15
+        vext.8 q0, q0, q0, #8          @ (x0 ^ (x0 <<< 32)) <<< 64)
+       veor    q10, q10, q1
+       veor    q8, q8, q5
+       veor    q9, q9, q5
+        vext.8 q1, q1, q1, #8
+       veor    q13, q13, q3
+        veor   q0, q0, q8
+       veor    q14, q14, q7
+        veor   q1, q1, q9
+        vext.8 q8, q3, q3, #8
+       veor    q12, q12, q6
+        vext.8 q9, q7, q7, #8
+       veor    q15, q15, q2
+        vext.8 q3, q6, q6, #8
+       veor    q11, q11, q4
+        vext.8 q7, q5, q5, #8
+       veor    q12, q12, q5
+        vext.8 q6, q2, q2, #8
+       veor    q11, q11, q5
+        vext.8 q2, q4, q4, #8
+       veor    q5, q9, q13
+       veor    q4, q8, q12
+       veor    q3, q3, q11
+       veor    q7, q7, q15
+       veor    q6, q6, q14
+        @ vmov q4, q8
+       veor    q2, q2, q10
+        @ vmov q5, q9
+       vldmia  r6, {q12}               @ .LSR
+       ite     eq                              @ Thumb2 thing, samity check in ARM
+       addeq   r6,r6,#0x10
+       bne     .Lenc_loop
+       vldmia  r6, {q12}               @ .LSRM0
+       b       .Lenc_loop
+.align 4
+.Lenc_done:
+       vmov.i8 q8,#0x55                        @ compose .LBS0
+       vmov.i8 q9,#0x33                        @ compose .LBS1
+       vshr.u64        q10, q2, #1
+        vshr.u64       q11, q3, #1
+       veor            q10, q10, q5
+        veor           q11, q11, q7
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #1
+        veor           q7, q7, q11
+        vshl.u64       q11, q11, #1
+       veor            q2, q2, q10
+        veor           q3, q3, q11
+       vshr.u64        q10, q4, #1
+        vshr.u64       q11, q0, #1
+       veor            q10, q10, q6
+        veor           q11, q11, q1
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q6, q6, q10
+       vshl.u64        q10, q10, #1
+        veor           q1, q1, q11
+        vshl.u64       q11, q11, #1
+       veor            q4, q4, q10
+        veor           q0, q0, q11
+       vmov.i8 q8,#0x0f                        @ compose .LBS2
+       vshr.u64        q10, q7, #2
+        vshr.u64       q11, q3, #2
+       veor            q10, q10, q5
+        veor           q11, q11, q2
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #2
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #2
+       veor            q7, q7, q10
+        veor           q3, q3, q11
+       vshr.u64        q10, q1, #2
+        vshr.u64       q11, q0, #2
+       veor            q10, q10, q6
+        veor           q11, q11, q4
+       vand            q10, q10, q9
+        vand           q11, q11, q9
+       veor            q6, q6, q10
+       vshl.u64        q10, q10, #2
+        veor           q4, q4, q11
+        vshl.u64       q11, q11, #2
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vshr.u64        q10, q6, #4
+        vshr.u64       q11, q4, #4
+       veor            q10, q10, q5
+        veor           q11, q11, q2
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q5, q5, q10
+       vshl.u64        q10, q10, #4
+        veor           q2, q2, q11
+        vshl.u64       q11, q11, #4
+       veor            q6, q6, q10
+        veor           q4, q4, q11
+       vshr.u64        q10, q1, #4
+        vshr.u64       q11, q0, #4
+       veor            q10, q10, q7
+        veor           q11, q11, q3
+       vand            q10, q10, q8
+        vand           q11, q11, q8
+       veor            q7, q7, q10
+       vshl.u64        q10, q10, #4
+        veor           q3, q3, q11
+        vshl.u64       q11, q11, #4
+       veor            q1, q1, q10
+        veor           q0, q0, q11
+       vldmia  r4, {q8}                        @ last round key
+       veor    q4, q4, q8
+       veor    q6, q6, q8
+       veor    q3, q3, q8
+       veor    q7, q7, q8
+       veor    q2, q2, q8
+       veor    q5, q5, q8
+       veor    q0, q0, q8
+       veor    q1, q1, q8
+       bx      lr
+.size  _bsaes_encrypt8,.-_bsaes_encrypt8
+.type  _bsaes_key_convert,%function
+.align 4
+_bsaes_key_convert:
+       adr     r6,_bsaes_key_convert
+       vld1.8  {q7},  [r4]!            @ load round 0 key
+       sub     r6,r6,#_bsaes_key_convert-.LM0
+       vld1.8  {q15}, [r4]!            @ load round 1 key
+
+       vmov.i8 q8,  #0x01                      @ bit masks
+       vmov.i8 q9,  #0x02
+       vmov.i8 q10, #0x04
+       vmov.i8 q11, #0x08
+       vmov.i8 q12, #0x10
+       vmov.i8 q13, #0x20
+       vldmia  r6, {q14}               @ .LM0
+
+#ifdef __ARMEL__
+       vrev32.8        q7,  q7
+       vrev32.8        q15, q15
+#endif
+       sub     r5,r5,#1
+       vstmia  r12!, {q7}              @ save round 0 key
+       b       .Lkey_loop
+
+.align 4
+.Lkey_loop:
+       vtbl.8  d14,{q15},d28
+       vtbl.8  d15,{q15},d29
+       vmov.i8 q6,  #0x40
+       vmov.i8 q15, #0x80
+
+       vtst.8  q0, q7, q8
+       vtst.8  q1, q7, q9
+       vtst.8  q2, q7, q10
+       vtst.8  q3, q7, q11
+       vtst.8  q4, q7, q12
+       vtst.8  q5, q7, q13
+       vtst.8  q6, q7, q6
+       vtst.8  q7, q7, q15
+       vld1.8  {q15}, [r4]!            @ load next round key
+       vmvn    q0, q0          @ "pnot"
+       vmvn    q1, q1
+       vmvn    q5, q5
+       vmvn    q6, q6
+#ifdef __ARMEL__
+       vrev32.8        q15, q15
+#endif
+       subs    r5,r5,#1
+       vstmia  r12!,{q0-q7}            @ write bit-sliced round key
+       bne     .Lkey_loop
+
+       vmov.i8 q7,#0x63                        @ compose .L63
+       @ don't save last round key
+       bx      lr
+.size  _bsaes_key_convert,.-_bsaes_key_convert
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global        bsaes_cbc_encrypt
+.type  bsaes_cbc_encrypt,%function
+.align 5
+bsaes_cbc_encrypt:
+#ifndef        __KERNEL__
+       cmp     r2, #128
+#ifndef        __thumb__
+       blo     AES_cbc_encrypt
+#else
+       bhs     1f
+       b       AES_cbc_encrypt
+1:
+#endif
+#endif
+
+       @ it is up to the caller to make sure we are called with enc == 0
+
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}
+       VFP_ABI_PUSH
+       ldr     r8, [ip]                        @ IV is 1st arg on the stack
+       mov     r2, r2, lsr#4           @ len in 16 byte blocks
+       sub     sp, #0x10                       @ scratch space to carry over the IV
+       mov     r9, sp                          @ save sp
+
+       ldr     r10, [r3, #240]         @ get # of rounds
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, r10, lsl#7             @ 128 bytes per inner round key
+       add     r12, #96                        @ sifze of bit-slices key schedule
+
+       @ populate the key schedule
+       mov     r4, r3                  @ pass key
+       mov     r5, r10                 @ pass # of rounds
+       mov     sp, r12                         @ sp is sp
+       bl      _bsaes_key_convert
+       vldmia  sp, {q6}
+       vstmia  r12,  {q15}             @ save last round key
+       veor    q7, q7, q6      @ fix up round 0 key
+       vstmia  sp, {q7}
+#else
+       ldr     r12, [r3, #244]
+       eors    r12, #1
+       beq     0f
+
+       @ populate the key schedule
+       str     r12, [r3, #244]
+       mov     r4, r3                  @ pass key
+       mov     r5, r10                 @ pass # of rounds
+       add     r12, r3, #248                   @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, r3, #248
+       vldmia  r4, {q6}
+       vstmia  r12, {q15}                      @ save last round key
+       veor    q7, q7, q6      @ fix up round 0 key
+       vstmia  r4, {q7}
+
+.align 2
+0:
+#endif
+
+       vld1.8  {q15}, [r8]             @ load IV
+       b       .Lcbc_dec_loop
+
+.align 4
+.Lcbc_dec_loop:
+       subs    r2, r2, #0x8
+       bmi     .Lcbc_dec_loop_finish
+
+       vld1.8  {q0-q1}, [r0]!  @ load input
+       vld1.8  {q2-q3}, [r0]!
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       mov     r4, sp                  @ pass the key
+#else
+       add     r4, r3, #248
+#endif
+       vld1.8  {q4-q5}, [r0]!
+       mov     r5, r10
+       vld1.8  {q6-q7}, [r0]
+       sub     r0, r0, #0x60
+       vstmia  r9, {q15}                       @ put aside IV
+
+       bl      _bsaes_decrypt8
+
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10-q11}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q12-q13}, [r0]!
+       veor    q4, q4, q10
+       veor    q2, q2, q11
+       vld1.8  {q14-q15}, [r0]!
+       veor    q7, q7, q12
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       veor    q3, q3, q13
+       vst1.8  {q6}, [r1]!
+       veor    q5, q5, q14
+       vst1.8  {q4}, [r1]!
+       vst1.8  {q2}, [r1]!
+       vst1.8  {q7}, [r1]!
+       vst1.8  {q3}, [r1]!
+       vst1.8  {q5}, [r1]!
+
+       b       .Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+       adds    r2, r2, #8
+       beq     .Lcbc_dec_done
+
+       vld1.8  {q0}, [r0]!             @ load input
+       cmp     r2, #2
+       blo     .Lcbc_dec_one
+       vld1.8  {q1}, [r0]!
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       mov     r4, sp                  @ pass the key
+#else
+       add     r4, r3, #248
+#endif
+       mov     r5, r10
+       vstmia  r9, {q15}                       @ put aside IV
+       beq     .Lcbc_dec_two
+       vld1.8  {q2}, [r0]!
+       cmp     r2, #4
+       blo     .Lcbc_dec_three
+       vld1.8  {q3}, [r0]!
+       beq     .Lcbc_dec_four
+       vld1.8  {q4}, [r0]!
+       cmp     r2, #6
+       blo     .Lcbc_dec_five
+       vld1.8  {q5}, [r0]!
+       beq     .Lcbc_dec_six
+       vld1.8  {q6}, [r0]!
+       sub     r0, r0, #0x70
+
+       bl      _bsaes_decrypt8
+
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10-q11}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q12-q13}, [r0]!
+       veor    q4, q4, q10
+       veor    q2, q2, q11
+       vld1.8  {q15}, [r0]!
+       veor    q7, q7, q12
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       veor    q3, q3, q13
+       vst1.8  {q6}, [r1]!
+       vst1.8  {q4}, [r1]!
+       vst1.8  {q2}, [r1]!
+       vst1.8  {q7}, [r1]!
+       vst1.8  {q3}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_six:
+       sub     r0, r0, #0x60
+       bl      _bsaes_decrypt8
+       vldmia  r9,{q14}                        @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10-q11}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q12}, [r0]!
+       veor    q4, q4, q10
+       veor    q2, q2, q11
+       vld1.8  {q15}, [r0]!
+       veor    q7, q7, q12
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       vst1.8  {q6}, [r1]!
+       vst1.8  {q4}, [r1]!
+       vst1.8  {q2}, [r1]!
+       vst1.8  {q7}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_five:
+       sub     r0, r0, #0x50
+       bl      _bsaes_decrypt8
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10-q11}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q15}, [r0]!
+       veor    q4, q4, q10
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       veor    q2, q2, q11
+       vst1.8  {q6}, [r1]!
+       vst1.8  {q4}, [r1]!
+       vst1.8  {q2}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_four:
+       sub     r0, r0, #0x40
+       bl      _bsaes_decrypt8
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q10}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vld1.8  {q15}, [r0]!
+       veor    q4, q4, q10
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       vst1.8  {q6}, [r1]!
+       vst1.8  {q4}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_three:
+       sub     r0, r0, #0x30
+       bl      _bsaes_decrypt8
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8-q9}, [r0]!  @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q15}, [r0]!
+       veor    q1, q1, q8
+       veor    q6, q6, q9
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       vst1.8  {q6}, [r1]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_two:
+       sub     r0, r0, #0x20
+       bl      _bsaes_decrypt8
+       vldmia  r9, {q14}                       @ reload IV
+       vld1.8  {q8}, [r0]!             @ reload input
+       veor    q0, q0, q14     @ ^= IV
+       vld1.8  {q15}, [r0]!            @ reload input
+       veor    q1, q1, q8
+       vst1.8  {q0-q1}, [r1]!  @ write output
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_one:
+       sub     r0, r0, #0x10
+       mov     r10, r1                 @ save original out pointer
+       mov     r1, r9                  @ use the iv scratch space as out buffer
+       mov     r2, r3
+       vmov    q4,q15          @ just in case ensure that IV
+       vmov    q5,q0                   @ and input are preserved
+       bl      AES_decrypt
+       vld1.8  {q0}, [r9,:64]          @ load result
+       veor    q0, q0, q4      @ ^= IV
+       vmov    q15, q5         @ q5 holds input
+       vst1.8  {q0}, [r10]             @ write output
+
+.Lcbc_dec_done:
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+.Lcbc_dec_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r9
+       bne             .Lcbc_dec_bzero
+#endif
+
+       mov     sp, r9
+       add     sp, #0x10                       @ add sp,r9,#0x10 is no good for thumb
+       vst1.8  {q15}, [r8]             @ return IV
+       VFP_ABI_POP
+       ldmia   sp!, {r4-r10, pc}
+.size  bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+.extern        AES_encrypt
+.global        bsaes_ctr32_encrypt_blocks
+.type  bsaes_ctr32_encrypt_blocks,%function
+.align 5
+bsaes_ctr32_encrypt_blocks:
+       cmp     r2, #8                  @ use plain AES for
+       blo     .Lctr_enc_short                 @ small sizes
+
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}
+       VFP_ABI_PUSH
+       ldr     r8, [ip]                        @ ctr is 1st arg on the stack
+       sub     sp, sp, #0x10                   @ scratch space to carry over the ctr
+       mov     r9, sp                          @ save sp
+
+       ldr     r10, [r3, #240]         @ get # of rounds
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, r10, lsl#7             @ 128 bytes per inner round key
+       add     r12, #96                        @ size of bit-sliced key schedule
+
+       @ populate the key schedule
+       mov     r4, r3                  @ pass key
+       mov     r5, r10                 @ pass # of rounds
+       mov     sp, r12                         @ sp is sp
+       bl      _bsaes_key_convert
+       veor    q7,q7,q15       @ fix up last round key
+       vstmia  r12, {q7}                       @ save last round key
+
+       vld1.8  {q0}, [r8]              @ load counter
+       add     r8, r6, #.LREVM0SR-.LM0 @ borrow r8
+       vldmia  sp, {q4}                @ load round0 key
+#else
+       ldr     r12, [r3, #244]
+       eors    r12, #1
+       beq     0f
+
+       @ populate the key schedule
+       str     r12, [r3, #244]
+       mov     r4, r3                  @ pass key
+       mov     r5, r10                 @ pass # of rounds
+       add     r12, r3, #248                   @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    q7,q7,q15       @ fix up last round key
+       vstmia  r12, {q7}                       @ save last round key
+
+.align 2
+0:     add     r12, r3, #248
+       vld1.8  {q0}, [r8]              @ load counter
+       adrl    r8, .LREVM0SR                   @ borrow r8
+       vldmia  r12, {q4}                       @ load round0 key
+       sub     sp, #0x10                       @ place for adjusted round0 key
+#endif
+
+       vmov.i32        q8,#1           @ compose 1<<96
+       veor            q9,q9,q9
+       vrev32.8        q0,q0
+       vext.8          q8,q9,q8,#4
+       vrev32.8        q4,q4
+       vadd.u32        q9,q8,q8        @ compose 2<<96
+       vstmia  sp, {q4}                @ save adjusted round0 key
+       b       .Lctr_enc_loop
+
+.align 4
+.Lctr_enc_loop:
+       vadd.u32        q10, q8, q9     @ compose 3<<96
+       vadd.u32        q1, q0, q8      @ +1
+       vadd.u32        q2, q0, q9      @ +2
+       vadd.u32        q3, q0, q10     @ +3
+       vadd.u32        q4, q1, q10
+       vadd.u32        q5, q2, q10
+       vadd.u32        q6, q3, q10
+       vadd.u32        q7, q4, q10
+       vadd.u32        q10, q5, q10    @ next counter
+
+       @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+       @ to flip byte order in 32-bit counter
+
+       vldmia          sp, {q9}                @ load round0 key
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x10           @ pass next round key
+#else
+       add             r4, r3, #264
+#endif
+       vldmia          r8, {q8}                        @ .LREVM0SR
+       mov             r5, r10                 @ pass rounds
+       vstmia          r9, {q10}                       @ save next counter
+       sub             r6, r8, #.LREVM0SR-.LSR @ pass constants
+
+       bl              _bsaes_encrypt8_alt
+
+       subs            r2, r2, #8
+       blo             .Lctr_enc_loop_done
+
+       vld1.8          {q8-q9}, [r0]!  @ load input
+       vld1.8          {q10-q11}, [r0]!
+       veor            q0, q8
+       veor            q1, q9
+       vld1.8          {q12-q13}, [r0]!
+       veor            q4, q10
+       veor            q6, q11
+       vld1.8          {q14-q15}, [r0]!
+       veor            q3, q12
+       vst1.8          {q0-q1}, [r1]!  @ write output
+       veor            q7, q13
+       veor            q2, q14
+       vst1.8          {q4}, [r1]!
+       veor            q5, q15
+       vst1.8          {q6}, [r1]!
+       vmov.i32        q8, #1                  @ compose 1<<96
+       vst1.8          {q3}, [r1]!
+       veor            q9, q9, q9
+       vst1.8          {q7}, [r1]!
+       vext.8          q8, q9, q8, #4
+       vst1.8          {q2}, [r1]!
+       vadd.u32        q9,q8,q8                @ compose 2<<96
+       vst1.8          {q5}, [r1]!
+       vldmia          r9, {q0}                        @ load counter
+
+       bne             .Lctr_enc_loop
+       b               .Lctr_enc_done
+
+.align 4
+.Lctr_enc_loop_done:
+       add             r2, r2, #8
+       vld1.8          {q8}, [r0]!     @ load input
+       veor            q0, q8
+       vst1.8          {q0}, [r1]!     @ write output
+       cmp             r2, #2
+       blo             .Lctr_enc_done
+       vld1.8          {q9}, [r0]!
+       veor            q1, q9
+       vst1.8          {q1}, [r1]!
+       beq             .Lctr_enc_done
+       vld1.8          {q10}, [r0]!
+       veor            q4, q10
+       vst1.8          {q4}, [r1]!
+       cmp             r2, #4
+       blo             .Lctr_enc_done
+       vld1.8          {q11}, [r0]!
+       veor            q6, q11
+       vst1.8          {q6}, [r1]!
+       beq             .Lctr_enc_done
+       vld1.8          {q12}, [r0]!
+       veor            q3, q12
+       vst1.8          {q3}, [r1]!
+       cmp             r2, #6
+       blo             .Lctr_enc_done
+       vld1.8          {q13}, [r0]!
+       veor            q7, q13
+       vst1.8          {q7}, [r1]!
+       beq             .Lctr_enc_done
+       vld1.8          {q14}, [r0]
+       veor            q2, q14
+       vst1.8          {q2}, [r1]!
+
+.Lctr_enc_done:
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifndef        BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero:                       @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r9
+       bne             .Lctr_enc_bzero
+#else
+       vstmia          sp, {q0-q1}
+#endif
+
+       mov     sp, r9
+       add     sp, #0x10               @ add sp,r9,#0x10 is no good for thumb
+       VFP_ABI_POP
+       ldmia   sp!, {r4-r10, pc}       @ return
+
+.align 4
+.Lctr_enc_short:
+       ldr     ip, [sp]                @ ctr pointer is passed on stack
+       stmdb   sp!, {r4-r8, lr}
+
+       mov     r4, r0          @ copy arguments
+       mov     r5, r1
+       mov     r6, r2
+       mov     r7, r3
+       ldr     r8, [ip, #12]           @ load counter LSW
+       vld1.8  {q1}, [ip]              @ load whole counter value
+#ifdef __ARMEL__
+       rev     r8, r8
+#endif
+       sub     sp, sp, #0x10
+       vst1.8  {q1}, [sp,:64]  @ copy counter value
+       sub     sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+       add     r0, sp, #0x10           @ input counter value
+       mov     r1, sp                  @ output on the stack
+       mov     r2, r7                  @ key
+
+       bl      AES_encrypt
+
+       vld1.8  {q0}, [r4]!     @ load input
+       vld1.8  {q1}, [sp,:64]  @ load encrypted counter
+       add     r8, r8, #1
+#ifdef __ARMEL__
+       rev     r0, r8
+       str     r0, [sp, #0x1c]         @ next counter value
+#else
+       str     r8, [sp, #0x1c]         @ next counter value
+#endif
+       veor    q0,q0,q1
+       vst1.8  {q0}, [r5]!     @ store output
+       subs    r6, r6, #1
+       bne     .Lctr_enc_short_loop
+
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+       vstmia          sp!, {q0-q1}
+
+       ldmia   sp!, {r4-r8, pc}
+.size  bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+.globl bsaes_xts_encrypt
+.type  bsaes_xts_encrypt,%function
+.align 4
+bsaes_xts_encrypt:
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}               @ 0x20
+       VFP_ABI_PUSH
+       mov     r6, sp                          @ future r3
+
+       mov     r7, r0
+       mov     r8, r1
+       mov     r9, r2
+       mov     r10, r3
+
+       sub     r0, sp, #0x10                   @ 0x10
+       bic     r0, #0xf                        @ align at 16 bytes
+       mov     sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+       ldr     r0, [ip]                        @ pointer to input tweak
+#else
+       @ generate initial tweak
+       ldr     r0, [ip, #4]                    @ iv[]
+       mov     r1, sp
+       ldr     r2, [ip, #0]                    @ key2
+       bl      AES_encrypt
+       mov     r0,sp                           @ pointer to initial tweak
+#endif
+
+       ldr     r1, [r10, #240]         @ get # of rounds
+       mov     r3, r6
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, r1, lsl#7              @ 128 bytes per inner round key
+       @ add   r12, #96                        @ size of bit-sliced key schedule
+       sub     r12, #48                        @ place for tweak[9]
+
+       @ populate the key schedule
+       mov     r4, r10                 @ pass key
+       mov     r5, r1                  @ pass # of rounds
+       mov     sp, r12
+       add     r12, #0x90                      @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    q7, q7, q15     @ fix up last round key
+       vstmia  r12, {q7}                       @ save last round key
+#else
+       ldr     r12, [r10, #244]
+       eors    r12, #1
+       beq     0f
+
+       str     r12, [r10, #244]
+       mov     r4, r10                 @ pass key
+       mov     r5, r1                  @ pass # of rounds
+       add     r12, r10, #248                  @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    q7, q7, q15     @ fix up last round key
+       vstmia  r12, {q7}
+
+.align 2
+0:     sub     sp, #0x90                       @ place for tweak[9]
+#endif
+
+       vld1.8  {q8}, [r0]                      @ initial tweak
+       adr     r2, .Lxts_magic
+
+       subs    r9, #0x80
+       blo     .Lxts_enc_short
+       b       .Lxts_enc_loop
+
+.align 4
+.Lxts_enc_loop:
+       vldmia          r2, {q5}        @ load XTS magic
+       vshr.s64        q6, q8, #63
+       mov             r0, sp
+       vand            q6, q6, q5
+       vadd.u64        q9, q8, q8
+       vst1.64         {q8}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q9, #63
+       veor            q9, q9, q6
+       vand            q7, q7, q5
+       vadd.u64        q10, q9, q9
+       vst1.64         {q9}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q10, #63
+       veor            q10, q10, q7
+       vand            q6, q6, q5
+       vld1.8          {q0}, [r7]!
+       vadd.u64        q11, q10, q10
+       vst1.64         {q10}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q11, #63
+       veor            q11, q11, q6
+       vand            q7, q7, q5
+       vld1.8          {q1}, [r7]!
+       veor            q0, q0, q8
+       vadd.u64        q12, q11, q11
+       vst1.64         {q11}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q12, #63
+       veor            q12, q12, q7
+       vand            q6, q6, q5
+       vld1.8          {q2}, [r7]!
+       veor            q1, q1, q9
+       vadd.u64        q13, q12, q12
+       vst1.64         {q12}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q13, #63
+       veor            q13, q13, q6
+       vand            q7, q7, q5
+       vld1.8          {q3}, [r7]!
+       veor            q2, q2, q10
+       vadd.u64        q14, q13, q13
+       vst1.64         {q13}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q14, #63
+       veor            q14, q14, q7
+       vand            q6, q6, q5
+       vld1.8          {q4}, [r7]!
+       veor            q3, q3, q11
+       vadd.u64        q15, q14, q14
+       vst1.64         {q14}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q15, #63
+       veor            q15, q15, q6
+       vand            q7, q7, q5
+       vld1.8          {q5}, [r7]!
+       veor            q4, q4, q12
+       vadd.u64        q8, q15, q15
+       vst1.64         {q15}, [r0,:128]!
+       vswp            d15,d14
+       veor            q8, q8, q7
+       vst1.64         {q8}, [r0,:128]         @ next round tweak
+
+       vld1.8          {q6-q7}, [r7]!
+       veor            q5, q5, q13
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q6, q6, q14
+       mov             r5, r1                  @ pass rounds
+       veor            q7, q7, q15
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       vld1.64         {q14-q15}, [r0,:128]!
+       veor            q10, q3, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       veor            q12, q2, q14
+       vst1.8          {q10-q11}, [r8]!
+       veor            q13, q5, q15
+       vst1.8          {q12-q13}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+
+       subs            r9, #0x80
+       bpl             .Lxts_enc_loop
+
+.Lxts_enc_short:
+       adds            r9, #0x70
+       bmi             .Lxts_enc_done
+
+       vldmia          r2, {q5}        @ load XTS magic
+       vshr.s64        q7, q8, #63
+       mov             r0, sp
+       vand            q7, q7, q5
+       vadd.u64        q9, q8, q8
+       vst1.64         {q8}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q9, #63
+       veor            q9, q9, q7
+       vand            q6, q6, q5
+       vadd.u64        q10, q9, q9
+       vst1.64         {q9}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q10, #63
+       veor            q10, q10, q6
+       vand            q7, q7, q5
+       vld1.8          {q0}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_1
+       vadd.u64        q11, q10, q10
+       vst1.64         {q10}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q11, #63
+       veor            q11, q11, q7
+       vand            q6, q6, q5
+       vld1.8          {q1}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_2
+       veor            q0, q0, q8
+       vadd.u64        q12, q11, q11
+       vst1.64         {q11}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q12, #63
+       veor            q12, q12, q6
+       vand            q7, q7, q5
+       vld1.8          {q2}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_3
+       veor            q1, q1, q9
+       vadd.u64        q13, q12, q12
+       vst1.64         {q12}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q13, #63
+       veor            q13, q13, q7
+       vand            q6, q6, q5
+       vld1.8          {q3}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_4
+       veor            q2, q2, q10
+       vadd.u64        q14, q13, q13
+       vst1.64         {q13}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q14, #63
+       veor            q14, q14, q6
+       vand            q7, q7, q5
+       vld1.8          {q4}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_5
+       veor            q3, q3, q11
+       vadd.u64        q15, q14, q14
+       vst1.64         {q14}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q15, #63
+       veor            q15, q15, q7
+       vand            q6, q6, q5
+       vld1.8          {q5}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_enc_6
+       veor            q4, q4, q12
+       sub             r9, #0x10
+       vst1.64         {q15}, [r0,:128]                @ next round tweak
+
+       vld1.8          {q6}, [r7]!
+       veor            q5, q5, q13
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q6, q6, q14
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       vld1.64         {q14}, [r0,:128]!
+       veor            q10, q3, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       veor            q12, q2, q14
+       vst1.8          {q10-q11}, [r8]!
+       vst1.8          {q12}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_6:
+       vst1.64         {q14}, [r0,:128]                @ next round tweak
+
+       veor            q4, q4, q12
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q5, q5, q13
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       veor            q10, q3, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       vst1.8          {q10-q11}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align 5
+.Lxts_magic:
+       .quad   1, 0x87
+
+.align 5
+.Lxts_enc_5:
+       vst1.64         {q13}, [r0,:128]                @ next round tweak
+
+       veor            q3, q3, q11
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q4, q4, q12
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       veor            q10, q3, q12
+       vst1.8          {q8-q9}, [r8]!
+       vst1.8          {q10}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_4:
+       vst1.64         {q12}, [r0,:128]                @ next round tweak
+
+       veor            q2, q2, q10
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q3, q3, q11
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q6, q11
+       vst1.8          {q8-q9}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_3:
+       vst1.64         {q11}, [r0,:128]                @ next round tweak
+
+       veor            q1, q1, q9
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q2, q2, q10
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       veor            q8, q4, q10
+       vst1.8          {q0-q1}, [r8]!
+       vst1.8          {q8}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_2:
+       vst1.64         {q10}, [r0,:128]                @ next round tweak
+
+       veor            q0, q0, q8
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q1, q1, q9
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       vst1.8          {q0-q1}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_1:
+       mov             r0, sp
+       veor            q0, q8
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+       mov             r4, r3                          @ preserve fp
+
+       bl              AES_encrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q8
+       vst1.8          {q0}, [r8]!
+       mov             r3, r4
+
+       vmov            q8, q9          @ next round tweak
+
+.Lxts_enc_done:
+#ifndef        XTS_CHAIN_TWEAK
+       adds            r9, #0x10
+       beq             .Lxts_enc_ret
+       sub             r6, r8, #0x10
+
+.Lxts_enc_steal:
+       ldrb            r0, [r7], #1
+       ldrb            r1, [r8, #-0x10]
+       strb            r0, [r8, #-0x10]
+       strb            r1, [r8], #1
+
+       subs            r9, #1
+       bhi             .Lxts_enc_steal
+
+       vld1.8          {q0}, [r6]
+       mov             r0, sp
+       veor            q0, q0, q8
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+       mov             r4, r3                  @ preserve fp
+
+       bl              AES_encrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q8
+       vst1.8          {q0}, [r6]
+       mov             r3, r4
+#endif
+
+.Lxts_enc_ret:
+       bic             r0, r3, #0xf
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifdef XTS_CHAIN_TWEAK
+       ldr             r1, [r3, #0x20+VFP_ABI_FRAME]   @ chain tweak
+#endif
+.Lxts_enc_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r0
+       bne             .Lxts_enc_bzero
+
+       mov             sp, r3
+#ifdef XTS_CHAIN_TWEAK
+       vst1.8          {q8}, [r1]
+#endif
+       VFP_ABI_POP
+       ldmia           sp!, {r4-r10, pc}       @ return
+
+.size  bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type  bsaes_xts_decrypt,%function
+.align 4
+bsaes_xts_decrypt:
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}               @ 0x20
+       VFP_ABI_PUSH
+       mov     r6, sp                          @ future r3
+
+       mov     r7, r0
+       mov     r8, r1
+       mov     r9, r2
+       mov     r10, r3
+
+       sub     r0, sp, #0x10                   @ 0x10
+       bic     r0, #0xf                        @ align at 16 bytes
+       mov     sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+       ldr     r0, [ip]                        @ pointer to input tweak
+#else
+       @ generate initial tweak
+       ldr     r0, [ip, #4]                    @ iv[]
+       mov     r1, sp
+       ldr     r2, [ip, #0]                    @ key2
+       bl      AES_encrypt
+       mov     r0, sp                          @ pointer to initial tweak
+#endif
+
+       ldr     r1, [r10, #240]         @ get # of rounds
+       mov     r3, r6
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, r1, lsl#7              @ 128 bytes per inner round key
+       @ add   r12, #96                        @ size of bit-sliced key schedule
+       sub     r12, #48                        @ place for tweak[9]
+
+       @ populate the key schedule
+       mov     r4, r10                 @ pass key
+       mov     r5, r1                  @ pass # of rounds
+       mov     sp, r12
+       add     r12, #0x90                      @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, sp, #0x90
+       vldmia  r4, {q6}
+       vstmia  r12,  {q15}             @ save last round key
+       veor    q7, q7, q6      @ fix up round 0 key
+       vstmia  r4, {q7}
+#else
+       ldr     r12, [r10, #244]
+       eors    r12, #1
+       beq     0f
+
+       str     r12, [r10, #244]
+       mov     r4, r10                 @ pass key
+       mov     r5, r1                  @ pass # of rounds
+       add     r12, r10, #248                  @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, r10, #248
+       vldmia  r4, {q6}
+       vstmia  r12,  {q15}             @ save last round key
+       veor    q7, q7, q6      @ fix up round 0 key
+       vstmia  r4, {q7}
+
+.align 2
+0:     sub     sp, #0x90                       @ place for tweak[9]
+#endif
+       vld1.8  {q8}, [r0]                      @ initial tweak
+       adr     r2, .Lxts_magic
+
+       tst     r9, #0xf                        @ if not multiple of 16
+       it      ne                              @ Thumb2 thing, sanity check in ARM
+       subne   r9, #0x10                       @ subtract another 16 bytes
+       subs    r9, #0x80
+
+       blo     .Lxts_dec_short
+       b       .Lxts_dec_loop
+
+.align 4
+.Lxts_dec_loop:
+       vldmia          r2, {q5}        @ load XTS magic
+       vshr.s64        q6, q8, #63
+       mov             r0, sp
+       vand            q6, q6, q5
+       vadd.u64        q9, q8, q8
+       vst1.64         {q8}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q9, #63
+       veor            q9, q9, q6
+       vand            q7, q7, q5
+       vadd.u64        q10, q9, q9
+       vst1.64         {q9}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q10, #63
+       veor            q10, q10, q7
+       vand            q6, q6, q5
+       vld1.8          {q0}, [r7]!
+       vadd.u64        q11, q10, q10
+       vst1.64         {q10}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q11, #63
+       veor            q11, q11, q6
+       vand            q7, q7, q5
+       vld1.8          {q1}, [r7]!
+       veor            q0, q0, q8
+       vadd.u64        q12, q11, q11
+       vst1.64         {q11}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q12, #63
+       veor            q12, q12, q7
+       vand            q6, q6, q5
+       vld1.8          {q2}, [r7]!
+       veor            q1, q1, q9
+       vadd.u64        q13, q12, q12
+       vst1.64         {q12}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q13, #63
+       veor            q13, q13, q6
+       vand            q7, q7, q5
+       vld1.8          {q3}, [r7]!
+       veor            q2, q2, q10
+       vadd.u64        q14, q13, q13
+       vst1.64         {q13}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q14, #63
+       veor            q14, q14, q7
+       vand            q6, q6, q5
+       vld1.8          {q4}, [r7]!
+       veor            q3, q3, q11
+       vadd.u64        q15, q14, q14
+       vst1.64         {q14}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q15, #63
+       veor            q15, q15, q6
+       vand            q7, q7, q5
+       vld1.8          {q5}, [r7]!
+       veor            q4, q4, q12
+       vadd.u64        q8, q15, q15
+       vst1.64         {q15}, [r0,:128]!
+       vswp            d15,d14
+       veor            q8, q8, q7
+       vst1.64         {q8}, [r0,:128]         @ next round tweak
+
+       vld1.8          {q6-q7}, [r7]!
+       veor            q5, q5, q13
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q6, q6, q14
+       mov             r5, r1                  @ pass rounds
+       veor            q7, q7, q15
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       vld1.64         {q14-q15}, [r0,:128]!
+       veor            q10, q2, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       veor            q12, q3, q14
+       vst1.8          {q10-q11}, [r8]!
+       veor            q13, q5, q15
+       vst1.8          {q12-q13}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+
+       subs            r9, #0x80
+       bpl             .Lxts_dec_loop
+
+.Lxts_dec_short:
+       adds            r9, #0x70
+       bmi             .Lxts_dec_done
+
+       vldmia          r2, {q5}        @ load XTS magic
+       vshr.s64        q7, q8, #63
+       mov             r0, sp
+       vand            q7, q7, q5
+       vadd.u64        q9, q8, q8
+       vst1.64         {q8}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q9, #63
+       veor            q9, q9, q7
+       vand            q6, q6, q5
+       vadd.u64        q10, q9, q9
+       vst1.64         {q9}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q10, #63
+       veor            q10, q10, q6
+       vand            q7, q7, q5
+       vld1.8          {q0}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_1
+       vadd.u64        q11, q10, q10
+       vst1.64         {q10}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q11, #63
+       veor            q11, q11, q7
+       vand            q6, q6, q5
+       vld1.8          {q1}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_2
+       veor            q0, q0, q8
+       vadd.u64        q12, q11, q11
+       vst1.64         {q11}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q12, #63
+       veor            q12, q12, q6
+       vand            q7, q7, q5
+       vld1.8          {q2}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_3
+       veor            q1, q1, q9
+       vadd.u64        q13, q12, q12
+       vst1.64         {q12}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q13, #63
+       veor            q13, q13, q7
+       vand            q6, q6, q5
+       vld1.8          {q3}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_4
+       veor            q2, q2, q10
+       vadd.u64        q14, q13, q13
+       vst1.64         {q13}, [r0,:128]!
+       vswp            d13,d12
+       vshr.s64        q7, q14, #63
+       veor            q14, q14, q6
+       vand            q7, q7, q5
+       vld1.8          {q4}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_5
+       veor            q3, q3, q11
+       vadd.u64        q15, q14, q14
+       vst1.64         {q14}, [r0,:128]!
+       vswp            d15,d14
+       vshr.s64        q6, q15, #63
+       veor            q15, q15, q7
+       vand            q6, q6, q5
+       vld1.8          {q5}, [r7]!
+       subs            r9, #0x10
+       bmi             .Lxts_dec_6
+       veor            q4, q4, q12
+       sub             r9, #0x10
+       vst1.64         {q15}, [r0,:128]                @ next round tweak
+
+       vld1.8          {q6}, [r7]!
+       veor            q5, q5, q13
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q6, q6, q14
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       vld1.64         {q14}, [r0,:128]!
+       veor            q10, q2, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       veor            q12, q3, q14
+       vst1.8          {q10-q11}, [r8]!
+       vst1.8          {q12}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_6:
+       vst1.64         {q14}, [r0,:128]                @ next round tweak
+
+       veor            q4, q4, q12
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q5, q5, q13
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12-q13}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       veor            q10, q2, q12
+       vst1.8          {q8-q9}, [r8]!
+       veor            q11, q7, q13
+       vst1.8          {q10-q11}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_5:
+       vst1.64         {q13}, [r0,:128]                @ next round tweak
+
+       veor            q3, q3, q11
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q4, q4, q12
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       vld1.64         {q12}, [r0,:128]!
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       veor            q10, q2, q12
+       vst1.8          {q8-q9}, [r8]!
+       vst1.8          {q10}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_4:
+       vst1.64         {q12}, [r0,:128]                @ next round tweak
+
+       veor            q2, q2, q10
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q3, q3, q11
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10-q11}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       veor            q9, q4, q11
+       vst1.8          {q8-q9}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_3:
+       vst1.64         {q11}, [r0,:128]                @ next round tweak
+
+       veor            q1, q1, q9
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q2, q2, q10
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       vld1.64         {q10}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       veor            q8, q6, q10
+       vst1.8          {q0-q1}, [r8]!
+       vst1.8          {q8}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_2:
+       vst1.64         {q10}, [r0,:128]                @ next round tweak
+
+       veor            q0, q0, q8
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, r10, #248                   @ pass key schedule
+#endif
+       veor            q1, q1, q9
+       mov             r5, r1                  @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {q8-q9}, [r0,:128]!
+       veor            q0, q0, q8
+       veor            q1, q1, q9
+       vst1.8          {q0-q1}, [r8]!
+
+       vld1.64         {q8}, [r0,:128]         @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_1:
+       mov             r0, sp
+       veor            q0, q8
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+       mov             r4, r3                          @ preserve fp
+       mov             r5, r2                  @ preserve magic
+
+       bl              AES_decrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q8
+       vst1.8          {q0}, [r8]!
+       mov             r3, r4
+       mov             r2, r5
+
+       vmov            q8, q9          @ next round tweak
+
+.Lxts_dec_done:
+#ifndef        XTS_CHAIN_TWEAK
+       adds            r9, #0x10
+       beq             .Lxts_dec_ret
+
+       @ calculate one round of extra tweak for the stolen ciphertext
+       vldmia          r2, {q5}
+       vshr.s64        q6, q8, #63
+       vand            q6, q6, q5
+       vadd.u64        q9, q8, q8
+       vswp            d13,d12
+       veor            q9, q9, q6
+
+       @ perform the final decryption with the last tweak value
+       vld1.8          {q0}, [r7]!
+       mov             r0, sp
+       veor            q0, q0, q9
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+       mov             r4, r3                  @ preserve fp
+
+       bl              AES_decrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q9
+       vst1.8          {q0}, [r8]
+
+       mov             r6, r8
+.Lxts_dec_steal:
+       ldrb            r1, [r8]
+       ldrb            r0, [r7], #1
+       strb            r1, [r8, #0x10]
+       strb            r0, [r8], #1
+
+       subs            r9, #1
+       bhi             .Lxts_dec_steal
+
+       vld1.8          {q0}, [r6]
+       mov             r0, sp
+       veor            q0, q8
+       mov             r1, sp
+       vst1.8          {q0}, [sp,:128]
+       mov             r2, r10
+
+       bl              AES_decrypt
+
+       vld1.8          {q0}, [sp,:128]
+       veor            q0, q0, q8
+       vst1.8          {q0}, [r6]
+       mov             r3, r4
+#endif
+
+.Lxts_dec_ret:
+       bic             r0, r3, #0xf
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifdef XTS_CHAIN_TWEAK
+       ldr             r1, [r3, #0x20+VFP_ABI_FRAME]   @ chain tweak
+#endif
+.Lxts_dec_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r0
+       bne             .Lxts_dec_bzero
+
+       mov             sp, r3
+#ifdef XTS_CHAIN_TWEAK
+       vst1.8          {q8}, [r1]
+#endif
+       VFP_ABI_POP
+       ldmia           sp!, {r4-r10, pc}       @ return
+
+.size  bsaes_xts_decrypt,.-bsaes_xts_decrypt
+#endif
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c
new file mode 100644 (file)
index 0000000..15468fb
--- /dev/null
@@ -0,0 +1,434 @@
+/*
+ * linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/neon.h>
+#include <crypto/aes.h>
+#include <crypto/ablk_helper.h>
+#include <crypto/algapi.h>
+#include <linux/module.h>
+
+#include "aes_glue.h"
+
+#define BIT_SLICED_KEY_MAXSIZE (128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE)
+
+struct BS_KEY {
+       struct AES_KEY  rk;
+       int             converted;
+       u8 __aligned(8) bs[BIT_SLICED_KEY_MAXSIZE];
+} __aligned(8);
+
+asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in);
+asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in);
+
+asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes,
+                                 struct BS_KEY *key, u8 iv[]);
+
+asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks,
+                                          struct BS_KEY *key, u8 const iv[]);
+
+asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes,
+                                 struct BS_KEY *key, u8 tweak[]);
+
+asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes,
+                                 struct BS_KEY *key, u8 tweak[]);
+
+struct aesbs_cbc_ctx {
+       struct AES_KEY  enc;
+       struct BS_KEY   dec;
+};
+
+struct aesbs_ctr_ctx {
+       struct BS_KEY   enc;
+};
+
+struct aesbs_xts_ctx {
+       struct BS_KEY   enc;
+       struct BS_KEY   dec;
+       struct AES_KEY  twkey;
+};
+
+static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                            unsigned int key_len)
+{
+       struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
+       int bits = key_len * 8;
+
+       if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+       ctx->dec.rk = ctx->enc;
+       private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+       ctx->dec.converted = 0;
+       return 0;
+}
+
+static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                            unsigned int key_len)
+{
+       struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
+       int bits = key_len * 8;
+
+       if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+       ctx->enc.converted = 0;
+       return 0;
+}
+
+static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
+                            unsigned int key_len)
+{
+       struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
+       int bits = key_len * 4;
+
+       if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
+               tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+               return -EINVAL;
+       }
+       ctx->dec.rk = ctx->enc.rk;
+       private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
+       private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey);
+       ctx->enc.converted = ctx->dec.converted = 0;
+       return 0;
+}
+
+static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst,
+                            struct scatterlist *src, unsigned int nbytes)
+{
+       struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt(desc, &walk);
+
+       while (walk.nbytes) {
+               u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+               u8 *src = walk.src.virt.addr;
+
+               if (walk.dst.virt.addr == walk.src.virt.addr) {
+                       u8 *iv = walk.iv;
+
+                       do {
+                               crypto_xor(src, iv, AES_BLOCK_SIZE);
+                               AES_encrypt(src, src, &ctx->enc);
+                               iv = src;
+                               src += AES_BLOCK_SIZE;
+                       } while (--blocks);
+                       memcpy(walk.iv, iv, AES_BLOCK_SIZE);
+               } else {
+                       u8 *dst = walk.dst.virt.addr;
+
+                       do {
+                               crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
+                               AES_encrypt(walk.iv, dst, &ctx->enc);
+                               memcpy(walk.iv, dst, AES_BLOCK_SIZE);
+                               src += AES_BLOCK_SIZE;
+                               dst += AES_BLOCK_SIZE;
+                       } while (--blocks);
+               }
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+       }
+       return err;
+}
+
+static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst,
+                            struct scatterlist *src, unsigned int nbytes)
+{
+       struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+       while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
+               kernel_neon_begin();
+               bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+                                 walk.nbytes, &ctx->dec, walk.iv);
+               kernel_neon_end();
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+       }
+       while (walk.nbytes) {
+               u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
+               u8 *dst = walk.dst.virt.addr;
+               u8 *src = walk.src.virt.addr;
+               u8 bk[2][AES_BLOCK_SIZE];
+               u8 *iv = walk.iv;
+
+               do {
+                       if (walk.dst.virt.addr == walk.src.virt.addr)
+                               memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
+
+                       AES_decrypt(src, dst, &ctx->dec.rk);
+                       crypto_xor(dst, iv, AES_BLOCK_SIZE);
+
+                       if (walk.dst.virt.addr == walk.src.virt.addr)
+                               iv = bk[blocks & 1];
+                       else
+                               iv = src;
+
+                       dst += AES_BLOCK_SIZE;
+                       src += AES_BLOCK_SIZE;
+               } while (--blocks);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+       }
+       return err;
+}
+
+static void inc_be128_ctr(__be32 ctr[], u32 addend)
+{
+       int i;
+
+       for (i = 3; i >= 0; i--, addend = 1) {
+               u32 n = be32_to_cpu(ctr[i]) + addend;
+
+               ctr[i] = cpu_to_be32(n);
+               if (n >= addend)
+                       break;
+       }
+}
+
+static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst, struct scatterlist *src,
+                            unsigned int nbytes)
+{
+       struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       u32 blocks;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+       while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
+               u32 tail = walk.nbytes % AES_BLOCK_SIZE;
+               __be32 *ctr = (__be32 *)walk.iv;
+               u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);
+
+               /* avoid 32 bit counter overflow in the NEON code */
+               if (unlikely(headroom < blocks)) {
+                       blocks = headroom + 1;
+                       tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
+               }
+               kernel_neon_begin();
+               bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
+                                          walk.dst.virt.addr, blocks,
+                                          &ctx->enc, walk.iv);
+               kernel_neon_end();
+               inc_be128_ctr(ctr, blocks);
+
+               nbytes -= blocks * AES_BLOCK_SIZE;
+               if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
+                       break;
+
+               err = blkcipher_walk_done(desc, &walk, tail);
+       }
+       if (walk.nbytes) {
+               u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
+               u8 ks[AES_BLOCK_SIZE];
+
+               AES_encrypt(walk.iv, ks, &ctx->enc.rk);
+               if (tdst != tsrc)
+                       memcpy(tdst, tsrc, nbytes);
+               crypto_xor(tdst, ks, nbytes);
+               err = blkcipher_walk_done(desc, &walk, 0);
+       }
+       return err;
+}
+
+static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst,
+                            struct scatterlist *src, unsigned int nbytes)
+{
+       struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+       /* generate the initial tweak */
+       AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+       while (walk.nbytes) {
+               kernel_neon_begin();
+               bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
+                                 walk.nbytes, &ctx->enc, walk.iv);
+               kernel_neon_end();
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+       }
+       return err;
+}
+
+static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
+                            struct scatterlist *dst,
+                            struct scatterlist *src, unsigned int nbytes)
+{
+       struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
+       struct blkcipher_walk walk;
+       int err;
+
+       blkcipher_walk_init(&walk, dst, src, nbytes);
+       err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
+
+       /* generate the initial tweak */
+       AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
+
+       while (walk.nbytes) {
+               kernel_neon_begin();
+               bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
+                                 walk.nbytes, &ctx->dec, walk.iv);
+               kernel_neon_end();
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
+       }
+       return err;
+}
+
+static struct crypto_alg aesbs_algs[] = { {
+       .cra_name               = "__cbc-aes-neonbs",
+       .cra_driver_name        = "__driver-cbc-aes-neonbs",
+       .cra_priority           = 0,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct aesbs_cbc_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_blkcipher = {
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = aesbs_cbc_set_key,
+               .encrypt        = aesbs_cbc_encrypt,
+               .decrypt        = aesbs_cbc_decrypt,
+       },
+}, {
+       .cra_name               = "__ctr-aes-neonbs",
+       .cra_driver_name        = "__driver-ctr-aes-neonbs",
+       .cra_priority           = 0,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = 1,
+       .cra_ctxsize            = sizeof(struct aesbs_ctr_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_blkcipher = {
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = aesbs_ctr_set_key,
+               .encrypt        = aesbs_ctr_encrypt,
+               .decrypt        = aesbs_ctr_encrypt,
+       },
+}, {
+       .cra_name               = "__xts-aes-neonbs",
+       .cra_driver_name        = "__driver-xts-aes-neonbs",
+       .cra_priority           = 0,
+       .cra_flags              = CRYPTO_ALG_TYPE_BLKCIPHER,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct aesbs_xts_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_blkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_blkcipher = {
+               .min_keysize    = 2 * AES_MIN_KEY_SIZE,
+               .max_keysize    = 2 * AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = aesbs_xts_set_key,
+               .encrypt        = aesbs_xts_encrypt,
+               .decrypt        = aesbs_xts_decrypt,
+       },
+}, {
+       .cra_name               = "cbc(aes)",
+       .cra_driver_name        = "cbc-aes-neonbs",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct async_helper_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_ablkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_init               = ablk_init,
+       .cra_exit               = ablk_exit,
+       .cra_ablkcipher = {
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = ablk_set_key,
+               .encrypt        = __ablk_encrypt,
+               .decrypt        = ablk_decrypt,
+       }
+}, {
+       .cra_name               = "ctr(aes)",
+       .cra_driver_name        = "ctr-aes-neonbs",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+       .cra_blocksize          = 1,
+       .cra_ctxsize            = sizeof(struct async_helper_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_ablkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_init               = ablk_init,
+       .cra_exit               = ablk_exit,
+       .cra_ablkcipher = {
+               .min_keysize    = AES_MIN_KEY_SIZE,
+               .max_keysize    = AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = ablk_set_key,
+               .encrypt        = ablk_encrypt,
+               .decrypt        = ablk_decrypt,
+       }
+}, {
+       .cra_name               = "xts(aes)",
+       .cra_driver_name        = "xts-aes-neonbs",
+       .cra_priority           = 300,
+       .cra_flags              = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
+       .cra_blocksize          = AES_BLOCK_SIZE,
+       .cra_ctxsize            = sizeof(struct async_helper_ctx),
+       .cra_alignmask          = 7,
+       .cra_type               = &crypto_ablkcipher_type,
+       .cra_module             = THIS_MODULE,
+       .cra_init               = ablk_init,
+       .cra_exit               = ablk_exit,
+       .cra_ablkcipher = {
+               .min_keysize    = 2 * AES_MIN_KEY_SIZE,
+               .max_keysize    = 2 * AES_MAX_KEY_SIZE,
+               .ivsize         = AES_BLOCK_SIZE,
+               .setkey         = ablk_set_key,
+               .encrypt        = ablk_encrypt,
+               .decrypt        = ablk_decrypt,
+       }
+} };
+
+static int __init aesbs_mod_init(void)
+{
+       if (!cpu_has_neon())
+               return -ENODEV;
+
+       return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+static void __exit aesbs_mod_exit(void)
+{
+       crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
+}
+
+module_init(aesbs_mod_init);
+module_exit(aesbs_mod_exit);
+
+MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON");
+MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
new file mode 100644 (file)
index 0000000..be068db
--- /dev/null
@@ -0,0 +1,2467 @@
+#!/usr/bin/env perl
+
+# ====================================================================
+# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
+# project. The module is, however, dual licensed under OpenSSL and
+# CRYPTOGAMS licenses depending on where you obtain it. For further
+# details see http://www.openssl.org/~appro/cryptogams/.
+#
+# Specific modes and adaptation for Linux kernel by Ard Biesheuvel
+# <ard.biesheuvel@linaro.org>. Permission to use under GPL terms is
+# granted.
+# ====================================================================
+
+# Bit-sliced AES for ARM NEON
+#
+# February 2012.
+#
+# This implementation is direct adaptation of bsaes-x86_64 module for
+# ARM NEON. Except that this module is endian-neutral [in sense that
+# it can be compiled for either endianness] by courtesy of vld1.8's
+# neutrality. Initial version doesn't implement interface to OpenSSL,
+# only low-level primitives and unsupported entry points, just enough
+# to collect performance results, which for Cortex-A8 core are:
+#
+# encrypt      19.5 cycles per byte processed with 128-bit key
+# decrypt      22.1 cycles per byte processed with 128-bit key
+# key conv.    440  cycles per 128-bit key/0.18 of 8x block
+#
+# Snapdragon S4 encrypts byte in 17.6 cycles and decrypts in 19.7,
+# which is [much] worse than anticipated (for further details see
+# http://www.openssl.org/~appro/Snapdragon-S4.html).
+#
+# Cortex-A15 manages in 14.2/16.1 cycles [when integer-only code
+# manages in 20.0 cycles].
+#
+# When comparing to x86_64 results keep in mind that NEON unit is
+# [mostly] single-issue and thus can't [fully] benefit from
+# instruction-level parallelism. And when comparing to aes-armv4
+# results keep in mind key schedule conversion overhead (see
+# bsaes-x86_64.pl for further details)...
+#
+#                                              <appro@openssl.org>
+
+# April-August 2013
+#
+# Add CBC, CTR and XTS subroutines, adapt for kernel use.
+#
+#                                      <ard.biesheuvel@linaro.org>
+
+while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
+open STDOUT,">$output";
+
+my ($inp,$out,$len,$key)=("r0","r1","r2","r3");
+my @XMM=map("q$_",(0..15));
+
+{
+my ($key,$rounds,$const)=("r4","r5","r6");
+
+sub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
+sub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
+
+sub Sbox {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b0, b1, b4, b6, b3, b7, b2, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+       &InBasisChange  (@b);
+       &Inv_GF256      (@b[6,5,0,3,7,1,4,2],@t,@s);
+       &OutBasisChange (@b[7,1,4,2,6,5,0,3]);
+}
+
+sub InBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b5, b0, b3, b7, b1, b4, b2] < msb 
+my @b=@_[0..7];
+$code.=<<___;
+       veor    @b[2], @b[2], @b[1]
+       veor    @b[5], @b[5], @b[6]
+       veor    @b[3], @b[3], @b[0]
+       veor    @b[6], @b[6], @b[2]
+       veor    @b[5], @b[5], @b[0]
+
+       veor    @b[6], @b[6], @b[3]
+       veor    @b[3], @b[3], @b[7]
+       veor    @b[7], @b[7], @b[5]
+       veor    @b[3], @b[3], @b[4]
+       veor    @b[4], @b[4], @b[5]
+
+       veor    @b[2], @b[2], @b[7]
+       veor    @b[3], @b[3], @b[1]
+       veor    @b[1], @b[1], @b[5]
+___
+}
+
+sub OutBasisChange {
+# input in  lsb > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb > [b6, b1, b2, b4, b7, b0, b3, b5] < msb
+my @b=@_[0..7];
+$code.=<<___;
+       veor    @b[0], @b[0], @b[6]
+       veor    @b[1], @b[1], @b[4]
+       veor    @b[4], @b[4], @b[6]
+       veor    @b[2], @b[2], @b[0]
+       veor    @b[6], @b[6], @b[1]
+
+       veor    @b[1], @b[1], @b[5]
+       veor    @b[5], @b[5], @b[3]
+       veor    @b[3], @b[3], @b[7]
+       veor    @b[7], @b[7], @b[5]
+       veor    @b[2], @b[2], @b[5]
+
+       veor    @b[4], @b[4], @b[7]
+___
+}
+
+sub InvSbox {
+# input in lsb         > [b0, b1, b2, b3, b4, b5, b6, b7] < msb
+# output in lsb        > [b0, b1, b6, b4, b2, b7, b3, b5] < msb
+my @b=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+       &InvInBasisChange       (@b);
+       &Inv_GF256              (@b[5,1,2,6,3,7,0,4],@t,@s);
+       &InvOutBasisChange      (@b[3,7,0,4,5,1,2,6]);
+}
+
+sub InvInBasisChange {         # OutBasisChange in reverse (with twist)
+my @b=@_[5,1,2,6,3,7,0,4];
+$code.=<<___
+        veor   @b[1], @b[1], @b[7]
+       veor    @b[4], @b[4], @b[7]
+
+       veor    @b[7], @b[7], @b[5]
+        veor   @b[1], @b[1], @b[3]
+       veor    @b[2], @b[2], @b[5]
+       veor    @b[3], @b[3], @b[7]
+
+       veor    @b[6], @b[6], @b[1]
+       veor    @b[2], @b[2], @b[0]
+        veor   @b[5], @b[5], @b[3]
+       veor    @b[4], @b[4], @b[6]
+       veor    @b[0], @b[0], @b[6]
+       veor    @b[1], @b[1], @b[4]
+___
+}
+
+sub InvOutBasisChange {                # InBasisChange in reverse
+my @b=@_[2,5,7,3,6,1,0,4];
+$code.=<<___;
+       veor    @b[1], @b[1], @b[5]
+       veor    @b[2], @b[2], @b[7]
+
+       veor    @b[3], @b[3], @b[1]
+       veor    @b[4], @b[4], @b[5]
+       veor    @b[7], @b[7], @b[5]
+       veor    @b[3], @b[3], @b[4]
+        veor   @b[5], @b[5], @b[0]
+       veor    @b[3], @b[3], @b[7]
+        veor   @b[6], @b[6], @b[2]
+        veor   @b[2], @b[2], @b[1]
+       veor    @b[6], @b[6], @b[3]
+
+       veor    @b[3], @b[3], @b[0]
+       veor    @b[5], @b[5], @b[6]
+___
+}
+
+sub Mul_GF4 {
+#;*************************************************************
+#;* Mul_GF4: Input x0-x1,y0-y1 Output x0-x1 Temp t0 (8) *
+#;*************************************************************
+my ($x0,$x1,$y0,$y1,$t0,$t1)=@_;
+$code.=<<___;
+       veor    $t0, $y0, $y1
+       vand    $t0, $t0, $x0
+       veor    $x0, $x0, $x1
+       vand    $t1, $x1, $y0
+       vand    $x0, $x0, $y1
+       veor    $x1, $t1, $t0
+       veor    $x0, $x0, $t1
+___
+}
+
+sub Mul_GF4_N {                                # not used, see next subroutine
+# multiply and scale by N
+my ($x0,$x1,$y0,$y1,$t0)=@_;
+$code.=<<___;
+       veor    $t0, $y0, $y1
+       vand    $t0, $t0, $x0
+       veor    $x0, $x0, $x1
+       vand    $x1, $x1, $y0
+       vand    $x0, $x0, $y1
+       veor    $x1, $x1, $x0
+       veor    $x0, $x0, $t0
+___
+}
+
+sub Mul_GF4_N_GF4 {
+# interleaved Mul_GF4_N and Mul_GF4
+my ($x0,$x1,$y0,$y1,$t0,
+    $x2,$x3,$y2,$y3,$t1)=@_;
+$code.=<<___;
+       veor    $t0, $y0, $y1
+        veor   $t1, $y2, $y3
+       vand    $t0, $t0, $x0
+        vand   $t1, $t1, $x2
+       veor    $x0, $x0, $x1
+        veor   $x2, $x2, $x3
+       vand    $x1, $x1, $y0
+        vand   $x3, $x3, $y2
+       vand    $x0, $x0, $y1
+        vand   $x2, $x2, $y3
+       veor    $x1, $x1, $x0
+        veor   $x2, $x2, $x3
+       veor    $x0, $x0, $t0
+        veor   $x3, $x3, $t1
+___
+}
+sub Mul_GF16_2 {
+my @x=@_[0..7];
+my @y=@_[8..11];
+my @t=@_[12..15];
+$code.=<<___;
+       veor    @t[0], @x[0], @x[2]
+       veor    @t[1], @x[1], @x[3]
+___
+       &Mul_GF4        (@x[0], @x[1], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+       veor    @y[0], @y[0], @y[2]
+       veor    @y[1], @y[1], @y[3]
+___
+       Mul_GF4_N_GF4   (@t[0], @t[1], @y[0], @y[1], @t[3],
+                        @x[2], @x[3], @y[2], @y[3], @t[2]);
+$code.=<<___;
+       veor    @x[0], @x[0], @t[0]
+       veor    @x[2], @x[2], @t[0]
+       veor    @x[1], @x[1], @t[1]
+       veor    @x[3], @x[3], @t[1]
+
+       veor    @t[0], @x[4], @x[6]
+       veor    @t[1], @x[5], @x[7]
+___
+       &Mul_GF4_N_GF4  (@t[0], @t[1], @y[0], @y[1], @t[3],
+                        @x[6], @x[7], @y[2], @y[3], @t[2]);
+$code.=<<___;
+       veor    @y[0], @y[0], @y[2]
+       veor    @y[1], @y[1], @y[3]
+___
+       &Mul_GF4        (@x[4], @x[5], @y[0], @y[1], @t[2..3]);
+$code.=<<___;
+       veor    @x[4], @x[4], @t[0]
+       veor    @x[6], @x[6], @t[0]
+       veor    @x[5], @x[5], @t[1]
+       veor    @x[7], @x[7], @t[1]
+___
+}
+sub Inv_GF256 {
+#;********************************************************************
+#;* Inv_GF256: Input x0-x7 Output x0-x7 Temp t0-t3,s0-s3 (144)       *
+#;********************************************************************
+my @x=@_[0..7];
+my @t=@_[8..11];
+my @s=@_[12..15];
+# direct optimizations from hardware
+$code.=<<___;
+       veor    @t[3], @x[4], @x[6]
+       veor    @t[2], @x[5], @x[7]
+       veor    @t[1], @x[1], @x[3]
+       veor    @s[1], @x[7], @x[6]
+        vmov   @t[0], @t[2]
+       veor    @s[0], @x[0], @x[2]
+
+       vorr    @t[2], @t[2], @t[1]
+       veor    @s[3], @t[3], @t[0]
+       vand    @s[2], @t[3], @s[0]
+       vorr    @t[3], @t[3], @s[0]
+       veor    @s[0], @s[0], @t[1]
+       vand    @t[0], @t[0], @t[1]
+       veor    @t[1], @x[3], @x[2]
+       vand    @s[3], @s[3], @s[0]
+       vand    @s[1], @s[1], @t[1]
+       veor    @t[1], @x[4], @x[5]
+       veor    @s[0], @x[1], @x[0]
+       veor    @t[3], @t[3], @s[1]
+       veor    @t[2], @t[2], @s[1]
+       vand    @s[1], @t[1], @s[0]
+       vorr    @t[1], @t[1], @s[0]
+       veor    @t[3], @t[3], @s[3]
+       veor    @t[0], @t[0], @s[1]
+       veor    @t[2], @t[2], @s[2]
+       veor    @t[1], @t[1], @s[3]
+       veor    @t[0], @t[0], @s[2]
+       vand    @s[0], @x[7], @x[3]
+       veor    @t[1], @t[1], @s[2]
+       vand    @s[1], @x[6], @x[2]
+       vand    @s[2], @x[5], @x[1]
+       vorr    @s[3], @x[4], @x[0]
+       veor    @t[3], @t[3], @s[0]
+       veor    @t[1], @t[1], @s[2]
+       veor    @t[0], @t[0], @s[3]
+       veor    @t[2], @t[2], @s[1]
+
+       @ Inv_GF16 \t0, \t1, \t2, \t3, \s0, \s1, \s2, \s3
+
+       @ new smaller inversion
+
+       vand    @s[2], @t[3], @t[1]
+       vmov    @s[0], @t[0]
+
+       veor    @s[1], @t[2], @s[2]
+       veor    @s[3], @t[0], @s[2]
+       veor    @s[2], @t[0], @s[2]     @ @s[2]=@s[3]
+
+       vbsl    @s[1], @t[1], @t[0]
+       vbsl    @s[3], @t[3], @t[2]
+       veor    @t[3], @t[3], @t[2]
+
+       vbsl    @s[0], @s[1], @s[2]
+       vbsl    @t[0], @s[2], @s[1]
+
+       vand    @s[2], @s[0], @s[3]
+       veor    @t[1], @t[1], @t[0]
+
+       veor    @s[2], @s[2], @t[3]
+___
+# output in s3, s2, s1, t1
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \t2, \t3, \t0, \t1, \s0, \s1, \s2, \s3
+
+# Mul_GF16_2 \x0, \x1, \x2, \x3, \x4, \x5, \x6, \x7, \s3, \s2, \s1, \t1, \s0, \t0, \t2, \t3
+       &Mul_GF16_2(@x,@s[3,2,1],@t[1],@s[0],@t[0,2,3]);
+
+### output msb > [x3,x2,x1,x0,x7,x6,x5,x4] < lsb
+}
+
+# AES linear components
+
+sub ShiftRows {
+my @x=@_[0..7];
+my @t=@_[8..11];
+my $mask=pop;
+$code.=<<___;
+       vldmia  $key!, {@t[0]-@t[3]}
+       veor    @t[0], @t[0], @x[0]
+       veor    @t[1], @t[1], @x[1]
+       vtbl.8  `&Dlo(@x[0])`, {@t[0]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[0])`, {@t[0]}, `&Dhi($mask)`
+       vldmia  $key!, {@t[0]}
+       veor    @t[2], @t[2], @x[2]
+       vtbl.8  `&Dlo(@x[1])`, {@t[1]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[1])`, {@t[1]}, `&Dhi($mask)`
+       vldmia  $key!, {@t[1]}
+       veor    @t[3], @t[3], @x[3]
+       vtbl.8  `&Dlo(@x[2])`, {@t[2]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[2])`, {@t[2]}, `&Dhi($mask)`
+       vldmia  $key!, {@t[2]}
+       vtbl.8  `&Dlo(@x[3])`, {@t[3]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[3])`, {@t[3]}, `&Dhi($mask)`
+       vldmia  $key!, {@t[3]}
+       veor    @t[0], @t[0], @x[4]
+       veor    @t[1], @t[1], @x[5]
+       vtbl.8  `&Dlo(@x[4])`, {@t[0]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[4])`, {@t[0]}, `&Dhi($mask)`
+       veor    @t[2], @t[2], @x[6]
+       vtbl.8  `&Dlo(@x[5])`, {@t[1]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[5])`, {@t[1]}, `&Dhi($mask)`
+       veor    @t[3], @t[3], @x[7]
+       vtbl.8  `&Dlo(@x[6])`, {@t[2]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[6])`, {@t[2]}, `&Dhi($mask)`
+       vtbl.8  `&Dlo(@x[7])`, {@t[3]}, `&Dlo($mask)`
+       vtbl.8  `&Dhi(@x[7])`, {@t[3]}, `&Dhi($mask)`
+___
+}
+
+sub MixColumns {
+# modified to emit output in order suitable for feeding back to aesenc[last]
+my @x=@_[0..7];
+my @t=@_[8..15];
+my $inv=@_[16];        # optional
+$code.=<<___;
+       vext.8  @t[0], @x[0], @x[0], #12        @ x0 <<< 32
+       vext.8  @t[1], @x[1], @x[1], #12
+        veor   @x[0], @x[0], @t[0]             @ x0 ^ (x0 <<< 32)
+       vext.8  @t[2], @x[2], @x[2], #12
+        veor   @x[1], @x[1], @t[1]
+       vext.8  @t[3], @x[3], @x[3], #12
+        veor   @x[2], @x[2], @t[2]
+       vext.8  @t[4], @x[4], @x[4], #12
+        veor   @x[3], @x[3], @t[3]
+       vext.8  @t[5], @x[5], @x[5], #12
+        veor   @x[4], @x[4], @t[4]
+       vext.8  @t[6], @x[6], @x[6], #12
+        veor   @x[5], @x[5], @t[5]
+       vext.8  @t[7], @x[7], @x[7], #12
+        veor   @x[6], @x[6], @t[6]
+
+       veor    @t[1], @t[1], @x[0]
+        veor   @x[7], @x[7], @t[7]
+        vext.8 @x[0], @x[0], @x[0], #8         @ (x0 ^ (x0 <<< 32)) <<< 64)
+       veor    @t[2], @t[2], @x[1]
+       veor    @t[0], @t[0], @x[7]
+       veor    @t[1], @t[1], @x[7]
+        vext.8 @x[1], @x[1], @x[1], #8
+       veor    @t[5], @t[5], @x[4]
+        veor   @x[0], @x[0], @t[0]
+       veor    @t[6], @t[6], @x[5]
+        veor   @x[1], @x[1], @t[1]
+        vext.8 @t[0], @x[4], @x[4], #8
+       veor    @t[4], @t[4], @x[3]
+        vext.8 @t[1], @x[5], @x[5], #8
+       veor    @t[7], @t[7], @x[6]
+        vext.8 @x[4], @x[3], @x[3], #8
+       veor    @t[3], @t[3], @x[2]
+        vext.8 @x[5], @x[7], @x[7], #8
+       veor    @t[4], @t[4], @x[7]
+        vext.8 @x[3], @x[6], @x[6], #8
+       veor    @t[3], @t[3], @x[7]
+        vext.8 @x[6], @x[2], @x[2], #8
+       veor    @x[7], @t[1], @t[5]
+___
+$code.=<<___ if (!$inv);
+       veor    @x[2], @t[0], @t[4]
+       veor    @x[4], @x[4], @t[3]
+       veor    @x[5], @x[5], @t[7]
+       veor    @x[3], @x[3], @t[6]
+        @ vmov @x[2], @t[0]
+       veor    @x[6], @x[6], @t[2]
+        @ vmov @x[7], @t[1]
+___
+$code.=<<___ if ($inv);
+       veor    @t[3], @t[3], @x[4]
+       veor    @x[5], @x[5], @t[7]
+       veor    @x[2], @x[3], @t[6]
+       veor    @x[3], @t[0], @t[4]
+       veor    @x[4], @x[6], @t[2]
+       vmov    @x[6], @t[3]
+        @ vmov @x[7], @t[1]
+___
+}
+
+sub InvMixColumns_orig {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+$code.=<<___;
+       @ multiplication by 0x0e
+       vext.8  @t[7], @x[7], @x[7], #12
+       vmov    @t[2], @x[2]
+       veor    @x[2], @x[2], @x[5]             @ 2 5
+       veor    @x[7], @x[7], @x[5]             @ 7 5
+       vext.8  @t[0], @x[0], @x[0], #12
+       vmov    @t[5], @x[5]
+       veor    @x[5], @x[5], @x[0]             @ 5 0           [1]
+       veor    @x[0], @x[0], @x[1]             @ 0 1
+       vext.8  @t[1], @x[1], @x[1], #12
+       veor    @x[1], @x[1], @x[2]             @ 1 25
+       veor    @x[0], @x[0], @x[6]             @ 01 6          [2]
+       vext.8  @t[3], @x[3], @x[3], #12
+       veor    @x[1], @x[1], @x[3]             @ 125 3         [4]
+       veor    @x[2], @x[2], @x[0]             @ 25 016        [3]
+       veor    @x[3], @x[3], @x[7]             @ 3 75
+       veor    @x[7], @x[7], @x[6]             @ 75 6          [0]
+       vext.8  @t[6], @x[6], @x[6], #12
+       vmov    @t[4], @x[4]
+       veor    @x[6], @x[6], @x[4]             @ 6 4
+       veor    @x[4], @x[4], @x[3]             @ 4 375         [6]
+       veor    @x[3], @x[3], @x[7]             @ 375 756=36
+       veor    @x[6], @x[6], @t[5]             @ 64 5          [7]
+       veor    @x[3], @x[3], @t[2]             @ 36 2
+       vext.8  @t[5], @t[5], @t[5], #12
+       veor    @x[3], @x[3], @t[4]             @ 362 4         [5]
+___
+                                       my @y = @x[7,5,0,2,1,3,4,6];
+$code.=<<___;
+       @ multiplication by 0x0b
+       veor    @y[1], @y[1], @y[0]
+       veor    @y[0], @y[0], @t[0]
+       vext.8  @t[2], @t[2], @t[2], #12
+       veor    @y[1], @y[1], @t[1]
+       veor    @y[0], @y[0], @t[5]
+       vext.8  @t[4], @t[4], @t[4], #12
+       veor    @y[1], @y[1], @t[6]
+       veor    @y[0], @y[0], @t[7]
+       veor    @t[7], @t[7], @t[6]             @ clobber t[7]
+
+       veor    @y[3], @y[3], @t[0]
+        veor   @y[1], @y[1], @y[0]
+       vext.8  @t[0], @t[0], @t[0], #12
+       veor    @y[2], @y[2], @t[1]
+       veor    @y[4], @y[4], @t[1]
+       vext.8  @t[1], @t[1], @t[1], #12
+       veor    @y[2], @y[2], @t[2]
+       veor    @y[3], @y[3], @t[2]
+       veor    @y[5], @y[5], @t[2]
+       veor    @y[2], @y[2], @t[7]
+       vext.8  @t[2], @t[2], @t[2], #12
+       veor    @y[3], @y[3], @t[3]
+       veor    @y[6], @y[6], @t[3]
+       veor    @y[4], @y[4], @t[3]
+       veor    @y[7], @y[7], @t[4]
+       vext.8  @t[3], @t[3], @t[3], #12
+       veor    @y[5], @y[5], @t[4]
+       veor    @y[7], @y[7], @t[7]
+       veor    @t[7], @t[7], @t[5]             @ clobber t[7] even more
+       veor    @y[3], @y[3], @t[5]
+       veor    @y[4], @y[4], @t[4]
+
+       veor    @y[5], @y[5], @t[7]
+       vext.8  @t[4], @t[4], @t[4], #12
+       veor    @y[6], @y[6], @t[7]
+       veor    @y[4], @y[4], @t[7]
+
+       veor    @t[7], @t[7], @t[5]
+       vext.8  @t[5], @t[5], @t[5], #12
+
+       @ multiplication by 0x0d
+       veor    @y[4], @y[4], @y[7]
+        veor   @t[7], @t[7], @t[6]             @ restore t[7]
+       veor    @y[7], @y[7], @t[4]
+       vext.8  @t[6], @t[6], @t[6], #12
+       veor    @y[2], @y[2], @t[0]
+       veor    @y[7], @y[7], @t[5]
+       vext.8  @t[7], @t[7], @t[7], #12
+       veor    @y[2], @y[2], @t[2]
+
+       veor    @y[3], @y[3], @y[1]
+       veor    @y[1], @y[1], @t[1]
+       veor    @y[0], @y[0], @t[0]
+       veor    @y[3], @y[3], @t[0]
+       veor    @y[1], @y[1], @t[5]
+       veor    @y[0], @y[0], @t[5]
+       vext.8  @t[0], @t[0], @t[0], #12
+       veor    @y[1], @y[1], @t[7]
+       veor    @y[0], @y[0], @t[6]
+       veor    @y[3], @y[3], @y[1]
+       veor    @y[4], @y[4], @t[1]
+       vext.8  @t[1], @t[1], @t[1], #12
+
+       veor    @y[7], @y[7], @t[7]
+       veor    @y[4], @y[4], @t[2]
+       veor    @y[5], @y[5], @t[2]
+       veor    @y[2], @y[2], @t[6]
+       veor    @t[6], @t[6], @t[3]             @ clobber t[6]
+       vext.8  @t[2], @t[2], @t[2], #12
+       veor    @y[4], @y[4], @y[7]
+       veor    @y[3], @y[3], @t[6]
+
+       veor    @y[6], @y[6], @t[6]
+       veor    @y[5], @y[5], @t[5]
+       vext.8  @t[5], @t[5], @t[5], #12
+       veor    @y[6], @y[6], @t[4]
+       vext.8  @t[4], @t[4], @t[4], #12
+       veor    @y[5], @y[5], @t[6]
+       veor    @y[6], @y[6], @t[7]
+       vext.8  @t[7], @t[7], @t[7], #12
+       veor    @t[6], @t[6], @t[3]             @ restore t[6]
+       vext.8  @t[3], @t[3], @t[3], #12
+
+       @ multiplication by 0x09
+       veor    @y[4], @y[4], @y[1]
+       veor    @t[1], @t[1], @y[1]             @ t[1]=y[1]
+       veor    @t[0], @t[0], @t[5]             @ clobber t[0]
+       vext.8  @t[6], @t[6], @t[6], #12
+       veor    @t[1], @t[1], @t[5]
+       veor    @y[3], @y[3], @t[0]
+       veor    @t[0], @t[0], @y[0]             @ t[0]=y[0]
+       veor    @t[1], @t[1], @t[6]
+       veor    @t[6], @t[6], @t[7]             @ clobber t[6]
+       veor    @y[4], @y[4], @t[1]
+       veor    @y[7], @y[7], @t[4]
+       veor    @y[6], @y[6], @t[3]
+       veor    @y[5], @y[5], @t[2]
+       veor    @t[4], @t[4], @y[4]             @ t[4]=y[4]
+       veor    @t[3], @t[3], @y[3]             @ t[3]=y[3]
+       veor    @t[5], @t[5], @y[5]             @ t[5]=y[5]
+       veor    @t[2], @t[2], @y[2]             @ t[2]=y[2]
+       veor    @t[3], @t[3], @t[7]
+       veor    @XMM[5], @t[5], @t[6]
+       veor    @XMM[6], @t[6], @y[6]           @ t[6]=y[6]
+       veor    @XMM[2], @t[2], @t[6]
+       veor    @XMM[7], @t[7], @y[7]           @ t[7]=y[7]
+
+       vmov    @XMM[0], @t[0]
+       vmov    @XMM[1], @t[1]
+       @ vmov  @XMM[2], @t[2]
+       vmov    @XMM[3], @t[3]
+       vmov    @XMM[4], @t[4]
+       @ vmov  @XMM[5], @t[5]
+       @ vmov  @XMM[6], @t[6]
+       @ vmov  @XMM[7], @t[7]
+___
+}
+
+sub InvMixColumns {
+my @x=@_[0..7];
+my @t=@_[8..15];
+
+# Thanks to Jussi Kivilinna for providing pointer to
+#
+# | 0e 0b 0d 09 |   | 02 03 01 01 |   | 05 00 04 00 |
+# | 09 0e 0b 0d | = | 01 02 03 01 | x | 00 05 00 04 |
+# | 0d 09 0e 0b |   | 01 01 02 03 |   | 04 00 05 00 |
+# | 0b 0d 09 0e |   | 03 01 01 02 |   | 00 04 00 05 |
+
+$code.=<<___;
+       @ multiplication by 0x05-0x00-0x04-0x00
+       vext.8  @t[0], @x[0], @x[0], #8
+       vext.8  @t[6], @x[6], @x[6], #8
+       vext.8  @t[7], @x[7], @x[7], #8
+       veor    @t[0], @t[0], @x[0]
+       vext.8  @t[1], @x[1], @x[1], #8
+       veor    @t[6], @t[6], @x[6]
+       vext.8  @t[2], @x[2], @x[2], #8
+       veor    @t[7], @t[7], @x[7]
+       vext.8  @t[3], @x[3], @x[3], #8
+       veor    @t[1], @t[1], @x[1]
+       vext.8  @t[4], @x[4], @x[4], #8
+       veor    @t[2], @t[2], @x[2]
+       vext.8  @t[5], @x[5], @x[5], #8
+       veor    @t[3], @t[3], @x[3]
+       veor    @t[4], @t[4], @x[4]
+       veor    @t[5], @t[5], @x[5]
+
+        veor   @x[0], @x[0], @t[6]
+        veor   @x[1], @x[1], @t[6]
+        veor   @x[2], @x[2], @t[0]
+        veor   @x[4], @x[4], @t[2]
+        veor   @x[3], @x[3], @t[1]
+        veor   @x[1], @x[1], @t[7]
+        veor   @x[2], @x[2], @t[7]
+        veor   @x[4], @x[4], @t[6]
+        veor   @x[5], @x[5], @t[3]
+        veor   @x[3], @x[3], @t[6]
+        veor   @x[6], @x[6], @t[4]
+        veor   @x[4], @x[4], @t[7]
+        veor   @x[5], @x[5], @t[7]
+        veor   @x[7], @x[7], @t[5]
+___
+       &MixColumns     (@x,@t,1);      # flipped 2<->3 and 4<->6
+}
+
+sub swapmove {
+my ($a,$b,$n,$mask,$t)=@_;
+$code.=<<___;
+       vshr.u64        $t, $b, #$n
+       veor            $t, $t, $a
+       vand            $t, $t, $mask
+       veor            $a, $a, $t
+       vshl.u64        $t, $t, #$n
+       veor            $b, $b, $t
+___
+}
+sub swapmove2x {
+my ($a0,$b0,$a1,$b1,$n,$mask,$t0,$t1)=@_;
+$code.=<<___;
+       vshr.u64        $t0, $b0, #$n
+        vshr.u64       $t1, $b1, #$n
+       veor            $t0, $t0, $a0
+        veor           $t1, $t1, $a1
+       vand            $t0, $t0, $mask
+        vand           $t1, $t1, $mask
+       veor            $a0, $a0, $t0
+       vshl.u64        $t0, $t0, #$n
+        veor           $a1, $a1, $t1
+        vshl.u64       $t1, $t1, #$n
+       veor            $b0, $b0, $t0
+        veor           $b1, $b1, $t1
+___
+}
+
+sub bitslice {
+my @x=reverse(@_[0..7]);
+my ($t0,$t1,$t2,$t3)=@_[8..11];
+$code.=<<___;
+       vmov.i8 $t0,#0x55                       @ compose .LBS0
+       vmov.i8 $t1,#0x33                       @ compose .LBS1
+___
+       &swapmove2x(@x[0,1,2,3],1,$t0,$t2,$t3);
+       &swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+$code.=<<___;
+       vmov.i8 $t0,#0x0f                       @ compose .LBS2
+___
+       &swapmove2x(@x[0,2,1,3],2,$t1,$t2,$t3);
+       &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+
+       &swapmove2x(@x[0,4,1,5],4,$t0,$t2,$t3);
+       &swapmove2x(@x[2,6,3,7],4,$t0,$t2,$t3);
+}
+
+$code.=<<___;
+#ifndef __KERNEL__
+# include "arm_arch.h"
+
+# define VFP_ABI_PUSH  vstmdb  sp!,{d8-d15}
+# define VFP_ABI_POP   vldmia  sp!,{d8-d15}
+# define VFP_ABI_FRAME 0x40
+#else
+# define VFP_ABI_PUSH
+# define VFP_ABI_POP
+# define VFP_ABI_FRAME 0
+# define BSAES_ASM_EXTENDED_KEY
+# define XTS_CHAIN_TWEAK
+# define __ARM_ARCH__  7
+#endif
+
+#ifdef __thumb__
+# define adrl adr
+#endif
+
+#if __ARM_ARCH__>=7
+.text
+.syntax        unified         @ ARMv7-capable assembler is expected to handle this
+#ifdef __thumb2__
+.thumb
+#else
+.code   32
+#endif
+
+.fpu   neon
+
+.type  _bsaes_decrypt8,%function
+.align 4
+_bsaes_decrypt8:
+       adr     $const,_bsaes_decrypt8
+       vldmia  $key!, {@XMM[9]}                @ round 0 key
+       add     $const,$const,#.LM0ISR-_bsaes_decrypt8
+
+       vldmia  $const!, {@XMM[8]}              @ .LM0ISR
+       veor    @XMM[10], @XMM[0], @XMM[9]      @ xor with round0 key
+       veor    @XMM[11], @XMM[1], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+       veor    @XMM[12], @XMM[2], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+       veor    @XMM[13], @XMM[3], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+       veor    @XMM[14], @XMM[4], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+       veor    @XMM[15], @XMM[5], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+       veor    @XMM[10], @XMM[6], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+       veor    @XMM[11], @XMM[7], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+        vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+___
+       &bitslice       (@XMM[0..7, 8..11]);
+$code.=<<___;
+       sub     $rounds,$rounds,#1
+       b       .Ldec_sbox
+.align 4
+.Ldec_loop:
+___
+       &ShiftRows      (@XMM[0..7, 8..12]);
+$code.=".Ldec_sbox:\n";
+       &InvSbox        (@XMM[0..7, 8..15]);
+$code.=<<___;
+       subs    $rounds,$rounds,#1
+       bcc     .Ldec_done
+___
+       &InvMixColumns  (@XMM[0,1,6,4,2,7,3,5, 8..15]);
+$code.=<<___;
+       vldmia  $const, {@XMM[12]}              @ .LISR
+       ite     eq                              @ Thumb2 thing, sanity check in ARM
+       addeq   $const,$const,#0x10
+       bne     .Ldec_loop
+       vldmia  $const, {@XMM[12]}              @ .LISRM0
+       b       .Ldec_loop
+.align 4
+.Ldec_done:
+___
+       &bitslice       (@XMM[0,1,6,4,2,7,3,5, 8..11]);
+$code.=<<___;
+       vldmia  $key, {@XMM[8]}                 @ last round key
+       veor    @XMM[6], @XMM[6], @XMM[8]
+       veor    @XMM[4], @XMM[4], @XMM[8]
+       veor    @XMM[2], @XMM[2], @XMM[8]
+       veor    @XMM[7], @XMM[7], @XMM[8]
+       veor    @XMM[3], @XMM[3], @XMM[8]
+       veor    @XMM[5], @XMM[5], @XMM[8]
+       veor    @XMM[0], @XMM[0], @XMM[8]
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       bx      lr
+.size  _bsaes_decrypt8,.-_bsaes_decrypt8
+
+.type  _bsaes_const,%object
+.align 6
+_bsaes_const:
+.LM0ISR:       @ InvShiftRows constants
+       .quad   0x0a0e0206070b0f03, 0x0004080c0d010509
+.LISR:
+       .quad   0x0504070602010003, 0x0f0e0d0c080b0a09
+.LISRM0:
+       .quad   0x01040b0e0205080f, 0x0306090c00070a0d
+.LM0SR:                @ ShiftRows constants
+       .quad   0x0a0e02060f03070b, 0x0004080c05090d01
+.LSR:
+       .quad   0x0504070600030201, 0x0f0e0d0c0a09080b
+.LSRM0:
+       .quad   0x0304090e00050a0f, 0x01060b0c0207080d
+.LM0:
+       .quad   0x02060a0e03070b0f, 0x0004080c0105090d
+.LREVM0SR:
+       .quad   0x090d01050c000408, 0x03070b0f060a0e02
+.asciz "Bit-sliced AES for NEON, CRYPTOGAMS by <appro\@openssl.org>"
+.align 6
+.size  _bsaes_const,.-_bsaes_const
+
+.type  _bsaes_encrypt8,%function
+.align 4
+_bsaes_encrypt8:
+       adr     $const,_bsaes_encrypt8
+       vldmia  $key!, {@XMM[9]}                @ round 0 key
+       sub     $const,$const,#_bsaes_encrypt8-.LM0SR
+
+       vldmia  $const!, {@XMM[8]}              @ .LM0SR
+_bsaes_encrypt8_alt:
+       veor    @XMM[10], @XMM[0], @XMM[9]      @ xor with round0 key
+       veor    @XMM[11], @XMM[1], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[0])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[0])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+       veor    @XMM[12], @XMM[2], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[1])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[1])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+       veor    @XMM[13], @XMM[3], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[2])`, {@XMM[12]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[2])`, {@XMM[12]}, `&Dhi(@XMM[8])`
+       veor    @XMM[14], @XMM[4], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[3])`, {@XMM[13]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[3])`, {@XMM[13]}, `&Dhi(@XMM[8])`
+       veor    @XMM[15], @XMM[5], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[4])`, {@XMM[14]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[4])`, {@XMM[14]}, `&Dhi(@XMM[8])`
+       veor    @XMM[10], @XMM[6], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[5])`, {@XMM[15]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[5])`, {@XMM[15]}, `&Dhi(@XMM[8])`
+       veor    @XMM[11], @XMM[7], @XMM[9]
+        vtbl.8 `&Dlo(@XMM[6])`, {@XMM[10]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[6])`, {@XMM[10]}, `&Dhi(@XMM[8])`
+        vtbl.8 `&Dlo(@XMM[7])`, {@XMM[11]}, `&Dlo(@XMM[8])`
+        vtbl.8 `&Dhi(@XMM[7])`, {@XMM[11]}, `&Dhi(@XMM[8])`
+_bsaes_encrypt8_bitslice:
+___
+       &bitslice       (@XMM[0..7, 8..11]);
+$code.=<<___;
+       sub     $rounds,$rounds,#1
+       b       .Lenc_sbox
+.align 4
+.Lenc_loop:
+___
+       &ShiftRows      (@XMM[0..7, 8..12]);
+$code.=".Lenc_sbox:\n";
+       &Sbox           (@XMM[0..7, 8..15]);
+$code.=<<___;
+       subs    $rounds,$rounds,#1
+       bcc     .Lenc_done
+___
+       &MixColumns     (@XMM[0,1,4,6,3,7,2,5, 8..15]);
+$code.=<<___;
+       vldmia  $const, {@XMM[12]}              @ .LSR
+       ite     eq                              @ Thumb2 thing, samity check in ARM
+       addeq   $const,$const,#0x10
+       bne     .Lenc_loop
+       vldmia  $const, {@XMM[12]}              @ .LSRM0
+       b       .Lenc_loop
+.align 4
+.Lenc_done:
+___
+       # output in lsb > [t0, t1, t4, t6, t3, t7, t2, t5] < msb
+       &bitslice       (@XMM[0,1,4,6,3,7,2,5, 8..11]);
+$code.=<<___;
+       vldmia  $key, {@XMM[8]}                 @ last round key
+       veor    @XMM[4], @XMM[4], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[8]
+       veor    @XMM[3], @XMM[3], @XMM[8]
+       veor    @XMM[7], @XMM[7], @XMM[8]
+       veor    @XMM[2], @XMM[2], @XMM[8]
+       veor    @XMM[5], @XMM[5], @XMM[8]
+       veor    @XMM[0], @XMM[0], @XMM[8]
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       bx      lr
+.size  _bsaes_encrypt8,.-_bsaes_encrypt8
+___
+}
+{
+my ($out,$inp,$rounds,$const)=("r12","r4","r5","r6");
+
+sub bitslice_key {
+my @x=reverse(@_[0..7]);
+my ($bs0,$bs1,$bs2,$t2,$t3)=@_[8..12];
+
+       &swapmove       (@x[0,1],1,$bs0,$t2,$t3);
+$code.=<<___;
+       @ &swapmove(@x[2,3],1,$t0,$t2,$t3);
+       vmov    @x[2], @x[0]
+       vmov    @x[3], @x[1]
+___
+       #&swapmove2x(@x[4,5,6,7],1,$t0,$t2,$t3);
+
+       &swapmove2x     (@x[0,2,1,3],2,$bs1,$t2,$t3);
+$code.=<<___;
+       @ &swapmove2x(@x[4,6,5,7],2,$t1,$t2,$t3);
+       vmov    @x[4], @x[0]
+       vmov    @x[6], @x[2]
+       vmov    @x[5], @x[1]
+       vmov    @x[7], @x[3]
+___
+       &swapmove2x     (@x[0,4,1,5],4,$bs2,$t2,$t3);
+       &swapmove2x     (@x[2,6,3,7],4,$bs2,$t2,$t3);
+}
+
+$code.=<<___;
+.type  _bsaes_key_convert,%function
+.align 4
+_bsaes_key_convert:
+       adr     $const,_bsaes_key_convert
+       vld1.8  {@XMM[7]},  [$inp]!             @ load round 0 key
+       sub     $const,$const,#_bsaes_key_convert-.LM0
+       vld1.8  {@XMM[15]}, [$inp]!             @ load round 1 key
+
+       vmov.i8 @XMM[8],  #0x01                 @ bit masks
+       vmov.i8 @XMM[9],  #0x02
+       vmov.i8 @XMM[10], #0x04
+       vmov.i8 @XMM[11], #0x08
+       vmov.i8 @XMM[12], #0x10
+       vmov.i8 @XMM[13], #0x20
+       vldmia  $const, {@XMM[14]}              @ .LM0
+
+#ifdef __ARMEL__
+       vrev32.8        @XMM[7],  @XMM[7]
+       vrev32.8        @XMM[15], @XMM[15]
+#endif
+       sub     $rounds,$rounds,#1
+       vstmia  $out!, {@XMM[7]}                @ save round 0 key
+       b       .Lkey_loop
+
+.align 4
+.Lkey_loop:
+       vtbl.8  `&Dlo(@XMM[7])`,{@XMM[15]},`&Dlo(@XMM[14])`
+       vtbl.8  `&Dhi(@XMM[7])`,{@XMM[15]},`&Dhi(@XMM[14])`
+       vmov.i8 @XMM[6],  #0x40
+       vmov.i8 @XMM[15], #0x80
+
+       vtst.8  @XMM[0], @XMM[7], @XMM[8]
+       vtst.8  @XMM[1], @XMM[7], @XMM[9]
+       vtst.8  @XMM[2], @XMM[7], @XMM[10]
+       vtst.8  @XMM[3], @XMM[7], @XMM[11]
+       vtst.8  @XMM[4], @XMM[7], @XMM[12]
+       vtst.8  @XMM[5], @XMM[7], @XMM[13]
+       vtst.8  @XMM[6], @XMM[7], @XMM[6]
+       vtst.8  @XMM[7], @XMM[7], @XMM[15]
+       vld1.8  {@XMM[15]}, [$inp]!             @ load next round key
+       vmvn    @XMM[0], @XMM[0]                @ "pnot"
+       vmvn    @XMM[1], @XMM[1]
+       vmvn    @XMM[5], @XMM[5]
+       vmvn    @XMM[6], @XMM[6]
+#ifdef __ARMEL__
+       vrev32.8        @XMM[15], @XMM[15]
+#endif
+       subs    $rounds,$rounds,#1
+       vstmia  $out!,{@XMM[0]-@XMM[7]}         @ write bit-sliced round key
+       bne     .Lkey_loop
+
+       vmov.i8 @XMM[7],#0x63                   @ compose .L63
+       @ don't save last round key
+       bx      lr
+.size  _bsaes_key_convert,.-_bsaes_key_convert
+___
+}
+
+if (0) {               # following four functions are unsupported interface
+                       # used for benchmarking...
+$code.=<<___;
+.globl bsaes_enc_key_convert
+.type  bsaes_enc_key_convert,%function
+.align 4
+bsaes_enc_key_convert:
+       stmdb   sp!,{r4-r6,lr}
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+
+       ldr     r5,[$inp,#240]                  @ pass rounds
+       mov     r4,$inp                         @ pass key
+       mov     r12,$out                        @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    @XMM[7],@XMM[7],@XMM[15]        @ fix up last round key
+       vstmia  r12, {@XMM[7]}                  @ save last round key
+
+       vldmia  sp!,{d8-d15}
+       ldmia   sp!,{r4-r6,pc}
+.size  bsaes_enc_key_convert,.-bsaes_enc_key_convert
+
+.globl bsaes_encrypt_128
+.type  bsaes_encrypt_128,%function
+.align 4
+bsaes_encrypt_128:
+       stmdb   sp!,{r4-r6,lr}
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+.Lenc128_loop:
+       vld1.8  {@XMM[0]-@XMM[1]}, [$inp]!      @ load input
+       vld1.8  {@XMM[2]-@XMM[3]}, [$inp]!
+       mov     r4,$key                         @ pass the key
+       vld1.8  {@XMM[4]-@XMM[5]}, [$inp]!
+       mov     r5,#10                          @ pass rounds
+       vld1.8  {@XMM[6]-@XMM[7]}, [$inp]!
+
+       bl      _bsaes_encrypt8
+
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[3]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       subs    $len,$len,#0x80
+       vst1.8  {@XMM[5]}, [$out]!
+       bhi     .Lenc128_loop
+
+       vldmia  sp!,{d8-d15}
+       ldmia   sp!,{r4-r6,pc}
+.size  bsaes_encrypt_128,.-bsaes_encrypt_128
+
+.globl bsaes_dec_key_convert
+.type  bsaes_dec_key_convert,%function
+.align 4
+bsaes_dec_key_convert:
+       stmdb   sp!,{r4-r6,lr}
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+
+       ldr     r5,[$inp,#240]                  @ pass rounds
+       mov     r4,$inp                         @ pass key
+       mov     r12,$out                        @ pass key schedule
+       bl      _bsaes_key_convert
+       vldmia  $out, {@XMM[6]}
+       vstmia  r12,  {@XMM[15]}                @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  $out, {@XMM[7]}
+
+       vldmia  sp!,{d8-d15}
+       ldmia   sp!,{r4-r6,pc}
+.size  bsaes_dec_key_convert,.-bsaes_dec_key_convert
+
+.globl bsaes_decrypt_128
+.type  bsaes_decrypt_128,%function
+.align 4
+bsaes_decrypt_128:
+       stmdb   sp!,{r4-r6,lr}
+       vstmdb  sp!,{d8-d15}            @ ABI specification says so
+.Ldec128_loop:
+       vld1.8  {@XMM[0]-@XMM[1]}, [$inp]!      @ load input
+       vld1.8  {@XMM[2]-@XMM[3]}, [$inp]!
+       mov     r4,$key                         @ pass the key
+       vld1.8  {@XMM[4]-@XMM[5]}, [$inp]!
+       mov     r5,#10                          @ pass rounds
+       vld1.8  {@XMM[6]-@XMM[7]}, [$inp]!
+
+       bl      _bsaes_decrypt8
+
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       vst1.8  {@XMM[3]}, [$out]!
+       subs    $len,$len,#0x80
+       vst1.8  {@XMM[5]}, [$out]!
+       bhi     .Ldec128_loop
+
+       vldmia  sp!,{d8-d15}
+       ldmia   sp!,{r4-r6,pc}
+.size  bsaes_decrypt_128,.-bsaes_decrypt_128
+___
+}
+{
+my ($inp,$out,$len,$key, $ivp,$fp,$rounds)=map("r$_",(0..3,8..10));
+my ($keysched)=("sp");
+
+$code.=<<___;
+.extern AES_cbc_encrypt
+.extern AES_decrypt
+
+.global        bsaes_cbc_encrypt
+.type  bsaes_cbc_encrypt,%function
+.align 5
+bsaes_cbc_encrypt:
+#ifndef        __KERNEL__
+       cmp     $len, #128
+#ifndef        __thumb__
+       blo     AES_cbc_encrypt
+#else
+       bhs     1f
+       b       AES_cbc_encrypt
+1:
+#endif
+#endif
+
+       @ it is up to the caller to make sure we are called with enc == 0
+
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}
+       VFP_ABI_PUSH
+       ldr     $ivp, [ip]                      @ IV is 1st arg on the stack
+       mov     $len, $len, lsr#4               @ len in 16 byte blocks
+       sub     sp, #0x10                       @ scratch space to carry over the IV
+       mov     $fp, sp                         @ save sp
+
+       ldr     $rounds, [$key, #240]           @ get # of rounds
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, $rounds, lsl#7         @ 128 bytes per inner round key
+       add     r12, #`128-32`                  @ sifze of bit-slices key schedule
+
+       @ populate the key schedule
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       mov     sp, r12                         @ sp is $keysched
+       bl      _bsaes_key_convert
+       vldmia  $keysched, {@XMM[6]}
+       vstmia  r12,  {@XMM[15]}                @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  $keysched, {@XMM[7]}
+#else
+       ldr     r12, [$key, #244]
+       eors    r12, #1
+       beq     0f
+
+       @ populate the key schedule
+       str     r12, [$key, #244]
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       add     r12, $key, #248                 @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, $key, #248
+       vldmia  r4, {@XMM[6]}
+       vstmia  r12, {@XMM[15]}                 @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  r4, {@XMM[7]}
+
+.align 2
+0:
+#endif
+
+       vld1.8  {@XMM[15]}, [$ivp]              @ load IV
+       b       .Lcbc_dec_loop
+
+.align 4
+.Lcbc_dec_loop:
+       subs    $len, $len, #0x8
+       bmi     .Lcbc_dec_loop_finish
+
+       vld1.8  {@XMM[0]-@XMM[1]}, [$inp]!      @ load input
+       vld1.8  {@XMM[2]-@XMM[3]}, [$inp]!
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       mov     r4, $keysched                   @ pass the key
+#else
+       add     r4, $key, #248
+#endif
+       vld1.8  {@XMM[4]-@XMM[5]}, [$inp]!
+       mov     r5, $rounds
+       vld1.8  {@XMM[6]-@XMM[7]}, [$inp]
+       sub     $inp, $inp, #0x60
+       vstmia  $fp, {@XMM[15]}                 @ put aside IV
+
+       bl      _bsaes_decrypt8
+
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]-@XMM[11]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[12]-@XMM[13]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       veor    @XMM[2], @XMM[2], @XMM[11]
+       vld1.8  {@XMM[14]-@XMM[15]}, [$inp]!
+       veor    @XMM[7], @XMM[7], @XMM[12]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       veor    @XMM[3], @XMM[3], @XMM[13]
+       vst1.8  {@XMM[6]}, [$out]!
+       veor    @XMM[5], @XMM[5], @XMM[14]
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       vst1.8  {@XMM[3]}, [$out]!
+       vst1.8  {@XMM[5]}, [$out]!
+
+       b       .Lcbc_dec_loop
+
+.Lcbc_dec_loop_finish:
+       adds    $len, $len, #8
+       beq     .Lcbc_dec_done
+
+       vld1.8  {@XMM[0]}, [$inp]!              @ load input
+       cmp     $len, #2
+       blo     .Lcbc_dec_one
+       vld1.8  {@XMM[1]}, [$inp]!
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       mov     r4, $keysched                   @ pass the key
+#else
+       add     r4, $key, #248
+#endif
+       mov     r5, $rounds
+       vstmia  $fp, {@XMM[15]}                 @ put aside IV
+       beq     .Lcbc_dec_two
+       vld1.8  {@XMM[2]}, [$inp]!
+       cmp     $len, #4
+       blo     .Lcbc_dec_three
+       vld1.8  {@XMM[3]}, [$inp]!
+       beq     .Lcbc_dec_four
+       vld1.8  {@XMM[4]}, [$inp]!
+       cmp     $len, #6
+       blo     .Lcbc_dec_five
+       vld1.8  {@XMM[5]}, [$inp]!
+       beq     .Lcbc_dec_six
+       vld1.8  {@XMM[6]}, [$inp]!
+       sub     $inp, $inp, #0x70
+
+       bl      _bsaes_decrypt8
+
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]-@XMM[11]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[12]-@XMM[13]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       veor    @XMM[2], @XMM[2], @XMM[11]
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[7], @XMM[7], @XMM[12]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       veor    @XMM[3], @XMM[3], @XMM[13]
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       vst1.8  {@XMM[3]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_six:
+       sub     $inp, $inp, #0x60
+       bl      _bsaes_decrypt8
+       vldmia  $fp,{@XMM[14]}                  @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]-@XMM[11]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[12]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       veor    @XMM[2], @XMM[2], @XMM[11]
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[7], @XMM[7], @XMM[12]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       vst1.8  {@XMM[7]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_five:
+       sub     $inp, $inp, #0x50
+       bl      _bsaes_decrypt8
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]-@XMM[11]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       veor    @XMM[2], @XMM[2], @XMM[11]
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       vst1.8  {@XMM[2]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_four:
+       sub     $inp, $inp, #0x40
+       bl      _bsaes_decrypt8
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[10]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[4], @XMM[4], @XMM[10]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[6]}, [$out]!
+       vst1.8  {@XMM[4]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_three:
+       sub     $inp, $inp, #0x30
+       bl      _bsaes_decrypt8
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]-@XMM[9]}, [$inp]!      @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[15]}, [$inp]!
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       veor    @XMM[6], @XMM[6], @XMM[9]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       vst1.8  {@XMM[6]}, [$out]!
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_two:
+       sub     $inp, $inp, #0x20
+       bl      _bsaes_decrypt8
+       vldmia  $fp, {@XMM[14]}                 @ reload IV
+       vld1.8  {@XMM[8]}, [$inp]!              @ reload input
+       veor    @XMM[0], @XMM[0], @XMM[14]      @ ^= IV
+       vld1.8  {@XMM[15]}, [$inp]!             @ reload input
+       veor    @XMM[1], @XMM[1], @XMM[8]
+       vst1.8  {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       b       .Lcbc_dec_done
+.align 4
+.Lcbc_dec_one:
+       sub     $inp, $inp, #0x10
+       mov     $rounds, $out                   @ save original out pointer
+       mov     $out, $fp                       @ use the iv scratch space as out buffer
+       mov     r2, $key
+       vmov    @XMM[4],@XMM[15]                @ just in case ensure that IV
+       vmov    @XMM[5],@XMM[0]                 @ and input are preserved
+       bl      AES_decrypt
+       vld1.8  {@XMM[0]}, [$fp,:64]            @ load result
+       veor    @XMM[0], @XMM[0], @XMM[4]       @ ^= IV
+       vmov    @XMM[15], @XMM[5]               @ @XMM[5] holds input
+       vst1.8  {@XMM[0]}, [$rounds]            @ write output
+
+.Lcbc_dec_done:
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+.Lcbc_dec_bzero:                               @ wipe key schedule [if any]
+       vstmia          $keysched!, {q0-q1}
+       cmp             $keysched, $fp
+       bne             .Lcbc_dec_bzero
+#endif
+
+       mov     sp, $fp
+       add     sp, #0x10                       @ add sp,$fp,#0x10 is no good for thumb
+       vst1.8  {@XMM[15]}, [$ivp]              @ return IV
+       VFP_ABI_POP
+       ldmia   sp!, {r4-r10, pc}
+.size  bsaes_cbc_encrypt,.-bsaes_cbc_encrypt
+___
+}
+{
+my ($inp,$out,$len,$key, $ctr,$fp,$rounds)=(map("r$_",(0..3,8..10)));
+my $const = "r6";      # shared with _bsaes_encrypt8_alt
+my $keysched = "sp";
+
+$code.=<<___;
+.extern        AES_encrypt
+.global        bsaes_ctr32_encrypt_blocks
+.type  bsaes_ctr32_encrypt_blocks,%function
+.align 5
+bsaes_ctr32_encrypt_blocks:
+       cmp     $len, #8                        @ use plain AES for
+       blo     .Lctr_enc_short                 @ small sizes
+
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}
+       VFP_ABI_PUSH
+       ldr     $ctr, [ip]                      @ ctr is 1st arg on the stack
+       sub     sp, sp, #0x10                   @ scratch space to carry over the ctr
+       mov     $fp, sp                         @ save sp
+
+       ldr     $rounds, [$key, #240]           @ get # of rounds
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, $rounds, lsl#7         @ 128 bytes per inner round key
+       add     r12, #`128-32`                  @ size of bit-sliced key schedule
+
+       @ populate the key schedule
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       mov     sp, r12                         @ sp is $keysched
+       bl      _bsaes_key_convert
+       veor    @XMM[7],@XMM[7],@XMM[15]        @ fix up last round key
+       vstmia  r12, {@XMM[7]}                  @ save last round key
+
+       vld1.8  {@XMM[0]}, [$ctr]               @ load counter
+       add     $ctr, $const, #.LREVM0SR-.LM0   @ borrow $ctr
+       vldmia  $keysched, {@XMM[4]}            @ load round0 key
+#else
+       ldr     r12, [$key, #244]
+       eors    r12, #1
+       beq     0f
+
+       @ populate the key schedule
+       str     r12, [$key, #244]
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       add     r12, $key, #248                 @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    @XMM[7],@XMM[7],@XMM[15]        @ fix up last round key
+       vstmia  r12, {@XMM[7]}                  @ save last round key
+
+.align 2
+0:     add     r12, $key, #248
+       vld1.8  {@XMM[0]}, [$ctr]               @ load counter
+       adrl    $ctr, .LREVM0SR                 @ borrow $ctr
+       vldmia  r12, {@XMM[4]}                  @ load round0 key
+       sub     sp, #0x10                       @ place for adjusted round0 key
+#endif
+
+       vmov.i32        @XMM[8],#1              @ compose 1<<96
+       veor            @XMM[9],@XMM[9],@XMM[9]
+       vrev32.8        @XMM[0],@XMM[0]
+       vext.8          @XMM[8],@XMM[9],@XMM[8],#4
+       vrev32.8        @XMM[4],@XMM[4]
+       vadd.u32        @XMM[9],@XMM[8],@XMM[8] @ compose 2<<96
+       vstmia  $keysched, {@XMM[4]}            @ save adjusted round0 key
+       b       .Lctr_enc_loop
+
+.align 4
+.Lctr_enc_loop:
+       vadd.u32        @XMM[10], @XMM[8], @XMM[9]      @ compose 3<<96
+       vadd.u32        @XMM[1], @XMM[0], @XMM[8]       @ +1
+       vadd.u32        @XMM[2], @XMM[0], @XMM[9]       @ +2
+       vadd.u32        @XMM[3], @XMM[0], @XMM[10]      @ +3
+       vadd.u32        @XMM[4], @XMM[1], @XMM[10]
+       vadd.u32        @XMM[5], @XMM[2], @XMM[10]
+       vadd.u32        @XMM[6], @XMM[3], @XMM[10]
+       vadd.u32        @XMM[7], @XMM[4], @XMM[10]
+       vadd.u32        @XMM[10], @XMM[5], @XMM[10]     @ next counter
+
+       @ Borrow prologue from _bsaes_encrypt8 to use the opportunity
+       @ to flip byte order in 32-bit counter
+
+       vldmia          $keysched, {@XMM[9]}            @ load round0 key
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, $keysched, #0x10            @ pass next round key
+#else
+       add             r4, $key, #`248+16`
+#endif
+       vldmia          $ctr, {@XMM[8]}                 @ .LREVM0SR
+       mov             r5, $rounds                     @ pass rounds
+       vstmia          $fp, {@XMM[10]}                 @ save next counter
+       sub             $const, $ctr, #.LREVM0SR-.LSR   @ pass constants
+
+       bl              _bsaes_encrypt8_alt
+
+       subs            $len, $len, #8
+       blo             .Lctr_enc_loop_done
+
+       vld1.8          {@XMM[8]-@XMM[9]}, [$inp]!      @ load input
+       vld1.8          {@XMM[10]-@XMM[11]}, [$inp]!
+       veor            @XMM[0], @XMM[8]
+       veor            @XMM[1], @XMM[9]
+       vld1.8          {@XMM[12]-@XMM[13]}, [$inp]!
+       veor            @XMM[4], @XMM[10]
+       veor            @XMM[6], @XMM[11]
+       vld1.8          {@XMM[14]-@XMM[15]}, [$inp]!
+       veor            @XMM[3], @XMM[12]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!      @ write output
+       veor            @XMM[7], @XMM[13]
+       veor            @XMM[2], @XMM[14]
+       vst1.8          {@XMM[4]}, [$out]!
+       veor            @XMM[5], @XMM[15]
+       vst1.8          {@XMM[6]}, [$out]!
+       vmov.i32        @XMM[8], #1                     @ compose 1<<96
+       vst1.8          {@XMM[3]}, [$out]!
+       veor            @XMM[9], @XMM[9], @XMM[9]
+       vst1.8          {@XMM[7]}, [$out]!
+       vext.8          @XMM[8], @XMM[9], @XMM[8], #4
+       vst1.8          {@XMM[2]}, [$out]!
+       vadd.u32        @XMM[9],@XMM[8],@XMM[8]         @ compose 2<<96
+       vst1.8          {@XMM[5]}, [$out]!
+       vldmia          $fp, {@XMM[0]}                  @ load counter
+
+       bne             .Lctr_enc_loop
+       b               .Lctr_enc_done
+
+.align 4
+.Lctr_enc_loop_done:
+       add             $len, $len, #8
+       vld1.8          {@XMM[8]}, [$inp]!      @ load input
+       veor            @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [$out]!      @ write output
+       cmp             $len, #2
+       blo             .Lctr_enc_done
+       vld1.8          {@XMM[9]}, [$inp]!
+       veor            @XMM[1], @XMM[9]
+       vst1.8          {@XMM[1]}, [$out]!
+       beq             .Lctr_enc_done
+       vld1.8          {@XMM[10]}, [$inp]!
+       veor            @XMM[4], @XMM[10]
+       vst1.8          {@XMM[4]}, [$out]!
+       cmp             $len, #4
+       blo             .Lctr_enc_done
+       vld1.8          {@XMM[11]}, [$inp]!
+       veor            @XMM[6], @XMM[11]
+       vst1.8          {@XMM[6]}, [$out]!
+       beq             .Lctr_enc_done
+       vld1.8          {@XMM[12]}, [$inp]!
+       veor            @XMM[3], @XMM[12]
+       vst1.8          {@XMM[3]}, [$out]!
+       cmp             $len, #6
+       blo             .Lctr_enc_done
+       vld1.8          {@XMM[13]}, [$inp]!
+       veor            @XMM[7], @XMM[13]
+       vst1.8          {@XMM[7]}, [$out]!
+       beq             .Lctr_enc_done
+       vld1.8          {@XMM[14]}, [$inp]
+       veor            @XMM[2], @XMM[14]
+       vst1.8          {@XMM[2]}, [$out]!
+
+.Lctr_enc_done:
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifndef        BSAES_ASM_EXTENDED_KEY
+.Lctr_enc_bzero:                       @ wipe key schedule [if any]
+       vstmia          $keysched!, {q0-q1}
+       cmp             $keysched, $fp
+       bne             .Lctr_enc_bzero
+#else
+       vstmia          $keysched, {q0-q1}
+#endif
+
+       mov     sp, $fp
+       add     sp, #0x10               @ add sp,$fp,#0x10 is no good for thumb
+       VFP_ABI_POP
+       ldmia   sp!, {r4-r10, pc}       @ return
+
+.align 4
+.Lctr_enc_short:
+       ldr     ip, [sp]                @ ctr pointer is passed on stack
+       stmdb   sp!, {r4-r8, lr}
+
+       mov     r4, $inp                @ copy arguments
+       mov     r5, $out
+       mov     r6, $len
+       mov     r7, $key
+       ldr     r8, [ip, #12]           @ load counter LSW
+       vld1.8  {@XMM[1]}, [ip]         @ load whole counter value
+#ifdef __ARMEL__
+       rev     r8, r8
+#endif
+       sub     sp, sp, #0x10
+       vst1.8  {@XMM[1]}, [sp,:64]     @ copy counter value
+       sub     sp, sp, #0x10
+
+.Lctr_enc_short_loop:
+       add     r0, sp, #0x10           @ input counter value
+       mov     r1, sp                  @ output on the stack
+       mov     r2, r7                  @ key
+
+       bl      AES_encrypt
+
+       vld1.8  {@XMM[0]}, [r4]!        @ load input
+       vld1.8  {@XMM[1]}, [sp,:64]     @ load encrypted counter
+       add     r8, r8, #1
+#ifdef __ARMEL__
+       rev     r0, r8
+       str     r0, [sp, #0x1c]         @ next counter value
+#else
+       str     r8, [sp, #0x1c]         @ next counter value
+#endif
+       veor    @XMM[0],@XMM[0],@XMM[1]
+       vst1.8  {@XMM[0]}, [r5]!        @ store output
+       subs    r6, r6, #1
+       bne     .Lctr_enc_short_loop
+
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+       vstmia          sp!, {q0-q1}
+
+       ldmia   sp!, {r4-r8, pc}
+.size  bsaes_ctr32_encrypt_blocks,.-bsaes_ctr32_encrypt_blocks
+___
+}
+{
+######################################################################
+# void bsaes_xts_[en|de]crypt(const char *inp,char *out,size_t len,
+#      const AES_KEY *key1, const AES_KEY *key2,
+#      const unsigned char iv[16]);
+#
+my ($inp,$out,$len,$key,$rounds,$magic,$fp)=(map("r$_",(7..10,1..3)));
+my $const="r6";                # returned by _bsaes_key_convert
+my $twmask=@XMM[5];
+my @T=@XMM[6..7];
+
+$code.=<<___;
+.globl bsaes_xts_encrypt
+.type  bsaes_xts_encrypt,%function
+.align 4
+bsaes_xts_encrypt:
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}               @ 0x20
+       VFP_ABI_PUSH
+       mov     r6, sp                          @ future $fp
+
+       mov     $inp, r0
+       mov     $out, r1
+       mov     $len, r2
+       mov     $key, r3
+
+       sub     r0, sp, #0x10                   @ 0x10
+       bic     r0, #0xf                        @ align at 16 bytes
+       mov     sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+       ldr     r0, [ip]                        @ pointer to input tweak
+#else
+       @ generate initial tweak
+       ldr     r0, [ip, #4]                    @ iv[]
+       mov     r1, sp
+       ldr     r2, [ip, #0]                    @ key2
+       bl      AES_encrypt
+       mov     r0,sp                           @ pointer to initial tweak
+#endif
+
+       ldr     $rounds, [$key, #240]           @ get # of rounds
+       mov     $fp, r6
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, $rounds, lsl#7         @ 128 bytes per inner round key
+       @ add   r12, #`128-32`                  @ size of bit-sliced key schedule
+       sub     r12, #`32+16`                   @ place for tweak[9]
+
+       @ populate the key schedule
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       mov     sp, r12
+       add     r12, #0x90                      @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    @XMM[7], @XMM[7], @XMM[15]      @ fix up last round key
+       vstmia  r12, {@XMM[7]}                  @ save last round key
+#else
+       ldr     r12, [$key, #244]
+       eors    r12, #1
+       beq     0f
+
+       str     r12, [$key, #244]
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       add     r12, $key, #248                 @ pass key schedule
+       bl      _bsaes_key_convert
+       veor    @XMM[7], @XMM[7], @XMM[15]      @ fix up last round key
+       vstmia  r12, {@XMM[7]}
+
+.align 2
+0:     sub     sp, #0x90                       @ place for tweak[9]
+#endif
+
+       vld1.8  {@XMM[8]}, [r0]                 @ initial tweak
+       adr     $magic, .Lxts_magic
+
+       subs    $len, #0x80
+       blo     .Lxts_enc_short
+       b       .Lxts_enc_loop
+
+.align 4
+.Lxts_enc_loop:
+       vldmia          $magic, {$twmask}       @ load XTS magic
+       vshr.s64        @T[0], @XMM[8], #63
+       mov             r0, sp
+       vand            @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+       vadd.u64        @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+       vst1.64         {@XMM[$i-1]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       vshr.s64        @T[1], @XMM[$i], #63
+       veor            @XMM[$i], @XMM[$i], @T[0]
+       vand            @T[1], @T[1], $twmask
+___
+       @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+       vld1.8          {@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+       veor            @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+       vadd.u64        @XMM[8], @XMM[15], @XMM[15]
+       vst1.64         {@XMM[15]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       veor            @XMM[8], @XMM[8], @T[0]
+       vst1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+
+       vld1.8          {@XMM[6]-@XMM[7]}, [$inp]!
+       veor            @XMM[5], @XMM[5], @XMM[13]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[6], @XMM[6], @XMM[14]
+       mov             r5, $rounds                     @ pass rounds
+       veor            @XMM[7], @XMM[7], @XMM[15]
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       vld1.64         {@XMM[14]-@XMM[15]}, [r0,:128]!
+       veor            @XMM[10], @XMM[3], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       veor            @XMM[12], @XMM[2], @XMM[14]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+       veor            @XMM[13], @XMM[5], @XMM[15]
+       vst1.8          {@XMM[12]-@XMM[13]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+
+       subs            $len, #0x80
+       bpl             .Lxts_enc_loop
+
+.Lxts_enc_short:
+       adds            $len, #0x70
+       bmi             .Lxts_enc_done
+
+       vldmia          $magic, {$twmask}       @ load XTS magic
+       vshr.s64        @T[0], @XMM[8], #63
+       mov             r0, sp
+       vand            @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+       vadd.u64        @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+       vst1.64         {@XMM[$i-1]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       vshr.s64        @T[1], @XMM[$i], #63
+       veor            @XMM[$i], @XMM[$i], @T[0]
+       vand            @T[1], @T[1], $twmask
+___
+       @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+       vld1.8          {@XMM[$i-10]}, [$inp]!
+       subs            $len, #0x10
+       bmi             .Lxts_enc_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+       veor            @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+       sub             $len, #0x10
+       vst1.64         {@XMM[15]}, [r0,:128]           @ next round tweak
+
+       vld1.8          {@XMM[6]}, [$inp]!
+       veor            @XMM[5], @XMM[5], @XMM[13]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[6], @XMM[6], @XMM[14]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       vld1.64         {@XMM[14]}, [r0,:128]!
+       veor            @XMM[10], @XMM[3], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       veor            @XMM[12], @XMM[2], @XMM[14]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+       vst1.8          {@XMM[12]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_6:
+       vst1.64         {@XMM[14]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[4], @XMM[4], @XMM[12]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[5], @XMM[5], @XMM[13]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       veor            @XMM[10], @XMM[3], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+
+@ put this in range for both ARM and Thumb mode adr instructions
+.align 5
+.Lxts_magic:
+       .quad   1, 0x87
+
+.align 5
+.Lxts_enc_5:
+       vst1.64         {@XMM[13]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[3], @XMM[3], @XMM[11]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[4], @XMM[4], @XMM[12]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       veor            @XMM[10], @XMM[3], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       vst1.8          {@XMM[10]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_4:
+       vst1.64         {@XMM[12]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[2], @XMM[2], @XMM[10]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[3], @XMM[3], @XMM[11]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[6], @XMM[11]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_3:
+       vst1.64         {@XMM[11]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[1], @XMM[1], @XMM[9]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[2], @XMM[2], @XMM[10]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[8]-@XMM[9]}, [r0,:128]!
+       vld1.64         {@XMM[10]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[4], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       vst1.8          {@XMM[8]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_2:
+       vst1.64         {@XMM[10]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[0], @XMM[0], @XMM[8]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[1], @XMM[1], @XMM[9]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_encrypt8
+
+       vld1.64         {@XMM[8]-@XMM[9]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_enc_done
+.align 4
+.Lxts_enc_1:
+       mov             r0, sp
+       veor            @XMM[0], @XMM[8]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+       mov             r4, $fp                         @ preserve fp
+
+       bl              AES_encrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [$out]!
+       mov             $fp, r4
+
+       vmov            @XMM[8], @XMM[9]                @ next round tweak
+
+.Lxts_enc_done:
+#ifndef        XTS_CHAIN_TWEAK
+       adds            $len, #0x10
+       beq             .Lxts_enc_ret
+       sub             r6, $out, #0x10
+
+.Lxts_enc_steal:
+       ldrb            r0, [$inp], #1
+       ldrb            r1, [$out, #-0x10]
+       strb            r0, [$out, #-0x10]
+       strb            r1, [$out], #1
+
+       subs            $len, #1
+       bhi             .Lxts_enc_steal
+
+       vld1.8          {@XMM[0]}, [r6]
+       mov             r0, sp
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+       mov             r4, $fp                 @ preserve fp
+
+       bl              AES_encrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [r6]
+       mov             $fp, r4
+#endif
+
+.Lxts_enc_ret:
+       bic             r0, $fp, #0xf
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifdef XTS_CHAIN_TWEAK
+       ldr             r1, [$fp, #0x20+VFP_ABI_FRAME]  @ chain tweak
+#endif
+.Lxts_enc_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r0
+       bne             .Lxts_enc_bzero
+
+       mov             sp, $fp
+#ifdef XTS_CHAIN_TWEAK
+       vst1.8          {@XMM[8]}, [r1]
+#endif
+       VFP_ABI_POP
+       ldmia           sp!, {r4-r10, pc}       @ return
+
+.size  bsaes_xts_encrypt,.-bsaes_xts_encrypt
+
+.globl bsaes_xts_decrypt
+.type  bsaes_xts_decrypt,%function
+.align 4
+bsaes_xts_decrypt:
+       mov     ip, sp
+       stmdb   sp!, {r4-r10, lr}               @ 0x20
+       VFP_ABI_PUSH
+       mov     r6, sp                          @ future $fp
+
+       mov     $inp, r0
+       mov     $out, r1
+       mov     $len, r2
+       mov     $key, r3
+
+       sub     r0, sp, #0x10                   @ 0x10
+       bic     r0, #0xf                        @ align at 16 bytes
+       mov     sp, r0
+
+#ifdef XTS_CHAIN_TWEAK
+       ldr     r0, [ip]                        @ pointer to input tweak
+#else
+       @ generate initial tweak
+       ldr     r0, [ip, #4]                    @ iv[]
+       mov     r1, sp
+       ldr     r2, [ip, #0]                    @ key2
+       bl      AES_encrypt
+       mov     r0, sp                          @ pointer to initial tweak
+#endif
+
+       ldr     $rounds, [$key, #240]           @ get # of rounds
+       mov     $fp, r6
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       @ allocate the key schedule on the stack
+       sub     r12, sp, $rounds, lsl#7         @ 128 bytes per inner round key
+       @ add   r12, #`128-32`                  @ size of bit-sliced key schedule
+       sub     r12, #`32+16`                   @ place for tweak[9]
+
+       @ populate the key schedule
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       mov     sp, r12
+       add     r12, #0x90                      @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, sp, #0x90
+       vldmia  r4, {@XMM[6]}
+       vstmia  r12,  {@XMM[15]}                @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  r4, {@XMM[7]}
+#else
+       ldr     r12, [$key, #244]
+       eors    r12, #1
+       beq     0f
+
+       str     r12, [$key, #244]
+       mov     r4, $key                        @ pass key
+       mov     r5, $rounds                     @ pass # of rounds
+       add     r12, $key, #248                 @ pass key schedule
+       bl      _bsaes_key_convert
+       add     r4, $key, #248
+       vldmia  r4, {@XMM[6]}
+       vstmia  r12,  {@XMM[15]}                @ save last round key
+       veor    @XMM[7], @XMM[7], @XMM[6]       @ fix up round 0 key
+       vstmia  r4, {@XMM[7]}
+
+.align 2
+0:     sub     sp, #0x90                       @ place for tweak[9]
+#endif
+       vld1.8  {@XMM[8]}, [r0]                 @ initial tweak
+       adr     $magic, .Lxts_magic
+
+       tst     $len, #0xf                      @ if not multiple of 16
+       it      ne                              @ Thumb2 thing, sanity check in ARM
+       subne   $len, #0x10                     @ subtract another 16 bytes
+       subs    $len, #0x80
+
+       blo     .Lxts_dec_short
+       b       .Lxts_dec_loop
+
+.align 4
+.Lxts_dec_loop:
+       vldmia          $magic, {$twmask}       @ load XTS magic
+       vshr.s64        @T[0], @XMM[8], #63
+       mov             r0, sp
+       vand            @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+       vadd.u64        @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+       vst1.64         {@XMM[$i-1]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       vshr.s64        @T[1], @XMM[$i], #63
+       veor            @XMM[$i], @XMM[$i], @T[0]
+       vand            @T[1], @T[1], $twmask
+___
+       @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+       vld1.8          {@XMM[$i-10]}, [$inp]!
+___
+$code.=<<___ if ($i>=11);
+       veor            @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+       vadd.u64        @XMM[8], @XMM[15], @XMM[15]
+       vst1.64         {@XMM[15]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       veor            @XMM[8], @XMM[8], @T[0]
+       vst1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+
+       vld1.8          {@XMM[6]-@XMM[7]}, [$inp]!
+       veor            @XMM[5], @XMM[5], @XMM[13]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[6], @XMM[6], @XMM[14]
+       mov             r5, $rounds                     @ pass rounds
+       veor            @XMM[7], @XMM[7], @XMM[15]
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       vld1.64         {@XMM[14]-@XMM[15]}, [r0,:128]!
+       veor            @XMM[10], @XMM[2], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       veor            @XMM[12], @XMM[3], @XMM[14]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+       veor            @XMM[13], @XMM[5], @XMM[15]
+       vst1.8          {@XMM[12]-@XMM[13]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+
+       subs            $len, #0x80
+       bpl             .Lxts_dec_loop
+
+.Lxts_dec_short:
+       adds            $len, #0x70
+       bmi             .Lxts_dec_done
+
+       vldmia          $magic, {$twmask}       @ load XTS magic
+       vshr.s64        @T[0], @XMM[8], #63
+       mov             r0, sp
+       vand            @T[0], @T[0], $twmask
+___
+for($i=9;$i<16;$i++) {
+$code.=<<___;
+       vadd.u64        @XMM[$i], @XMM[$i-1], @XMM[$i-1]
+       vst1.64         {@XMM[$i-1]}, [r0,:128]!
+       vswp            `&Dhi("@T[0]")`,`&Dlo("@T[0]")`
+       vshr.s64        @T[1], @XMM[$i], #63
+       veor            @XMM[$i], @XMM[$i], @T[0]
+       vand            @T[1], @T[1], $twmask
+___
+       @T=reverse(@T);
+
+$code.=<<___ if ($i>=10);
+       vld1.8          {@XMM[$i-10]}, [$inp]!
+       subs            $len, #0x10
+       bmi             .Lxts_dec_`$i-9`
+___
+$code.=<<___ if ($i>=11);
+       veor            @XMM[$i-11], @XMM[$i-11], @XMM[$i-3]
+___
+}
+$code.=<<___;
+       sub             $len, #0x10
+       vst1.64         {@XMM[15]}, [r0,:128]           @ next round tweak
+
+       vld1.8          {@XMM[6]}, [$inp]!
+       veor            @XMM[5], @XMM[5], @XMM[13]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[6], @XMM[6], @XMM[14]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       vld1.64         {@XMM[14]}, [r0,:128]!
+       veor            @XMM[10], @XMM[2], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       veor            @XMM[12], @XMM[3], @XMM[14]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+       vst1.8          {@XMM[12]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_6:
+       vst1.64         {@XMM[14]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[4], @XMM[4], @XMM[12]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[5], @XMM[5], @XMM[13]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]-@XMM[13]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       veor            @XMM[10], @XMM[2], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       veor            @XMM[11], @XMM[7], @XMM[13]
+       vst1.8          {@XMM[10]-@XMM[11]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_5:
+       vst1.64         {@XMM[13]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[3], @XMM[3], @XMM[11]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[4], @XMM[4], @XMM[12]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       vld1.64         {@XMM[12]}, [r0,:128]!
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       veor            @XMM[10], @XMM[2], @XMM[12]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+       vst1.8          {@XMM[10]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_4:
+       vst1.64         {@XMM[12]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[2], @XMM[2], @XMM[10]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[3], @XMM[3], @XMM[11]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[ 8]-@XMM[ 9]}, [r0,:128]!
+       vld1.64         {@XMM[10]-@XMM[11]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       veor            @XMM[9], @XMM[4], @XMM[11]
+       vst1.8          {@XMM[8]-@XMM[9]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_3:
+       vst1.64         {@XMM[11]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[1], @XMM[1], @XMM[9]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[2], @XMM[2], @XMM[10]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[8]-@XMM[9]}, [r0,:128]!
+       vld1.64         {@XMM[10]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       veor            @XMM[8], @XMM[6], @XMM[10]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+       vst1.8          {@XMM[8]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_2:
+       vst1.64         {@XMM[10]}, [r0,:128]           @ next round tweak
+
+       veor            @XMM[0], @XMM[0], @XMM[8]
+#ifndef        BSAES_ASM_EXTENDED_KEY
+       add             r4, sp, #0x90                   @ pass key schedule
+#else
+       add             r4, $key, #248                  @ pass key schedule
+#endif
+       veor            @XMM[1], @XMM[1], @XMM[9]
+       mov             r5, $rounds                     @ pass rounds
+       mov             r0, sp
+
+       bl              _bsaes_decrypt8
+
+       vld1.64         {@XMM[8]-@XMM[9]}, [r0,:128]!
+       veor            @XMM[0], @XMM[0], @XMM[ 8]
+       veor            @XMM[1], @XMM[1], @XMM[ 9]
+       vst1.8          {@XMM[0]-@XMM[1]}, [$out]!
+
+       vld1.64         {@XMM[8]}, [r0,:128]            @ next round tweak
+       b               .Lxts_dec_done
+.align 4
+.Lxts_dec_1:
+       mov             r0, sp
+       veor            @XMM[0], @XMM[8]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+       mov             r4, $fp                         @ preserve fp
+       mov             r5, $magic                      @ preserve magic
+
+       bl              AES_decrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [$out]!
+       mov             $fp, r4
+       mov             $magic, r5
+
+       vmov            @XMM[8], @XMM[9]                @ next round tweak
+
+.Lxts_dec_done:
+#ifndef        XTS_CHAIN_TWEAK
+       adds            $len, #0x10
+       beq             .Lxts_dec_ret
+
+       @ calculate one round of extra tweak for the stolen ciphertext
+       vldmia          $magic, {$twmask}
+       vshr.s64        @XMM[6], @XMM[8], #63
+       vand            @XMM[6], @XMM[6], $twmask
+       vadd.u64        @XMM[9], @XMM[8], @XMM[8]
+       vswp            `&Dhi("@XMM[6]")`,`&Dlo("@XMM[6]")`
+       veor            @XMM[9], @XMM[9], @XMM[6]
+
+       @ perform the final decryption with the last tweak value
+       vld1.8          {@XMM[0]}, [$inp]!
+       mov             r0, sp
+       veor            @XMM[0], @XMM[0], @XMM[9]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+       mov             r4, $fp                 @ preserve fp
+
+       bl              AES_decrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[9]
+       vst1.8          {@XMM[0]}, [$out]
+
+       mov             r6, $out
+.Lxts_dec_steal:
+       ldrb            r1, [$out]
+       ldrb            r0, [$inp], #1
+       strb            r1, [$out, #0x10]
+       strb            r0, [$out], #1
+
+       subs            $len, #1
+       bhi             .Lxts_dec_steal
+
+       vld1.8          {@XMM[0]}, [r6]
+       mov             r0, sp
+       veor            @XMM[0], @XMM[8]
+       mov             r1, sp
+       vst1.8          {@XMM[0]}, [sp,:128]
+       mov             r2, $key
+
+       bl              AES_decrypt
+
+       vld1.8          {@XMM[0]}, [sp,:128]
+       veor            @XMM[0], @XMM[0], @XMM[8]
+       vst1.8          {@XMM[0]}, [r6]
+       mov             $fp, r4
+#endif
+
+.Lxts_dec_ret:
+       bic             r0, $fp, #0xf
+       vmov.i32        q0, #0
+       vmov.i32        q1, #0
+#ifdef XTS_CHAIN_TWEAK
+       ldr             r1, [$fp, #0x20+VFP_ABI_FRAME]  @ chain tweak
+#endif
+.Lxts_dec_bzero:                               @ wipe key schedule [if any]
+       vstmia          sp!, {q0-q1}
+       cmp             sp, r0
+       bne             .Lxts_dec_bzero
+
+       mov             sp, $fp
+#ifdef XTS_CHAIN_TWEAK
+       vst1.8          {@XMM[8]}, [r1]
+#endif
+       VFP_ABI_POP
+       ldmia           sp!, {r4-r10, pc}       @ return
+
+.size  bsaes_xts_decrypt,.-bsaes_xts_decrypt
+___
+}
+$code.=<<___;
+#endif
+___
+
+$code =~ s/\`([^\`]*)\`/eval($1)/gem;
+
+open SELF,$0;
+while(<SELF>) {
+       next if (/^#!/);
+        last if (!s/^#/@/ and !/^$/);
+        print;
+}
+close SELF;
+
+print $code;
+
+close STDOUT;
diff --git a/arch/arm/crypto/sha1-armv7-neon.S b/arch/arm/crypto/sha1-armv7-neon.S
new file mode 100644 (file)
index 0000000..50013c0
--- /dev/null
@@ -0,0 +1,634 @@
+/* sha1-armv7-neon.S - ARM/NEON accelerated SHA-1 transform function
+ *
+ * Copyright Â© 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/linkage.h>
+
+
+.syntax unified
+.code   32
+.fpu neon
+
+.text
+
+
+/* Context structure */
+
+#define state_h0 0
+#define state_h1 4
+#define state_h2 8
+#define state_h3 12
+#define state_h4 16
+
+
+/* Constants */
+
+#define K1  0x5A827999
+#define K2  0x6ED9EBA1
+#define K3  0x8F1BBCDC
+#define K4  0xCA62C1D6
+.align 4
+.LK_VEC:
+.LK1:  .long K1, K1, K1, K1
+.LK2:  .long K2, K2, K2, K2
+.LK3:  .long K3, K3, K3, K3
+.LK4:  .long K4, K4, K4, K4
+
+
+/* Register macros */
+
+#define RSTATE r0
+#define RDATA r1
+#define RNBLKS r2
+#define ROLDSTACK r3
+#define RWK lr
+
+#define _a r4
+#define _b r5
+#define _c r6
+#define _d r7
+#define _e r8
+
+#define RT0 r9
+#define RT1 r10
+#define RT2 r11
+#define RT3 r12
+
+#define W0 q0
+#define W1 q1
+#define W2 q2
+#define W3 q3
+#define W4 q4
+#define W5 q5
+#define W6 q6
+#define W7 q7
+
+#define tmp0 q8
+#define tmp1 q9
+#define tmp2 q10
+#define tmp3 q11
+
+#define qK1 q12
+#define qK2 q13
+#define qK3 q14
+#define qK4 q15
+
+
+/* Round function macros. */
+
+#define WK_offs(i) (((i) & 15) * 4)
+
+#define _R_F1(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+             W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       ldr RT3, [sp, WK_offs(i)]; \
+               pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+       bic RT0, d, b; \
+       add e, e, a, ror #(32 - 5); \
+       and RT1, c, b; \
+               pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+       add RT0, RT0, RT3; \
+       add e, e, RT1; \
+       ror b, #(32 - 30); \
+               pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+       add e, e, RT0;
+
+#define _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+             W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       ldr RT3, [sp, WK_offs(i)]; \
+               pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+       eor RT0, d, b; \
+       add e, e, a, ror #(32 - 5); \
+       eor RT0, RT0, c; \
+               pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+       add e, e, RT3; \
+       ror b, #(32 - 30); \
+               pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+       add e, e, RT0; \
+
+#define _R_F3(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+             W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       ldr RT3, [sp, WK_offs(i)]; \
+               pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+       eor RT0, b, c; \
+       and RT1, b, c; \
+       add e, e, a, ror #(32 - 5); \
+               pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+       and RT0, RT0, d; \
+       add RT1, RT1, RT3; \
+       add e, e, RT0; \
+       ror b, #(32 - 30); \
+               pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
+       add e, e, RT1;
+
+#define _R_F4(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+             W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+             W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
+
+#define _R(a,b,c,d,e,f,i,pre1,pre2,pre3,i16,\
+           W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       _R_##f(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
+              W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
+
+#define R(a,b,c,d,e,f,i) \
+       _R_##f(a,b,c,d,e,i,dummy,dummy,dummy,i16,\
+              W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
+
+#define dummy(...)
+
+
+/* Input expansion macros. */
+
+/********* Precalc macros for rounds 0-15 *************************************/
+
+#define W_PRECALC_00_15() \
+       add       RWK, sp, #(WK_offs(0));                       \
+       \
+       vld1.32   {tmp0, tmp1}, [RDATA]!;                       \
+       vrev32.8  W0, tmp0;             /* big => little */     \
+       vld1.32   {tmp2, tmp3}, [RDATA]!;                       \
+       vadd.u32  tmp0, W0, curK;                               \
+       vrev32.8  W7, tmp1;             /* big => little */     \
+       vrev32.8  W6, tmp2;             /* big => little */     \
+       vadd.u32  tmp1, W7, curK;                               \
+       vrev32.8  W5, tmp3;             /* big => little */     \
+       vadd.u32  tmp2, W6, curK;                               \
+       vst1.32   {tmp0, tmp1}, [RWK]!;                         \
+       vadd.u32  tmp3, W5, curK;                               \
+       vst1.32   {tmp2, tmp3}, [RWK];                          \
+
+#define WPRECALC_00_15_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vld1.32   {tmp0, tmp1}, [RDATA]!;                       \
+
+#define WPRECALC_00_15_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       add       RWK, sp, #(WK_offs(0));                       \
+
+#define WPRECALC_00_15_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vrev32.8  W0, tmp0;             /* big => little */     \
+
+#define WPRECALC_00_15_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vld1.32   {tmp2, tmp3}, [RDATA]!;                       \
+
+#define WPRECALC_00_15_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vadd.u32  tmp0, W0, curK;                               \
+
+#define WPRECALC_00_15_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vrev32.8  W7, tmp1;             /* big => little */     \
+
+#define WPRECALC_00_15_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vrev32.8  W6, tmp2;             /* big => little */     \
+
+#define WPRECALC_00_15_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vadd.u32  tmp1, W7, curK;                               \
+
+#define WPRECALC_00_15_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vrev32.8  W5, tmp3;             /* big => little */     \
+
+#define WPRECALC_00_15_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vadd.u32  tmp2, W6, curK;                               \
+
+#define WPRECALC_00_15_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vst1.32   {tmp0, tmp1}, [RWK]!;                         \
+
+#define WPRECALC_00_15_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vadd.u32  tmp3, W5, curK;                               \
+
+#define WPRECALC_00_15_12(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vst1.32   {tmp2, tmp3}, [RWK];                          \
+
+
+/********* Precalc macros for rounds 16-31 ************************************/
+
+#define WPRECALC_16_31_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       veor      tmp0, tmp0;                   \
+       vext.8    W, W_m16, W_m12, #8;          \
+
+#define WPRECALC_16_31_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       add       RWK, sp, #(WK_offs(i));       \
+       vext.8    tmp0, W_m04, tmp0, #4;        \
+
+#define WPRECALC_16_31_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       veor      tmp0, tmp0, W_m16;            \
+       veor.32   W, W, W_m08;                  \
+
+#define WPRECALC_16_31_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       veor      tmp1, tmp1;                   \
+       veor      W, W, tmp0;                   \
+
+#define WPRECALC_16_31_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vshl.u32  tmp0, W, #1;                  \
+
+#define WPRECALC_16_31_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vext.8    tmp1, tmp1, W, #(16-12);      \
+       vshr.u32  W, W, #31;                    \
+
+#define WPRECALC_16_31_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vorr      tmp0, tmp0, W;                \
+       vshr.u32  W, tmp1, #30;                 \
+
+#define WPRECALC_16_31_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vshl.u32  tmp1, tmp1, #2;               \
+
+#define WPRECALC_16_31_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       veor      tmp0, tmp0, W;                \
+
+#define WPRECALC_16_31_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       veor      W, tmp0, tmp1;                \
+
+#define WPRECALC_16_31_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vadd.u32  tmp0, W, curK;                \
+
+#define WPRECALC_16_31_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vst1.32   {tmp0}, [RWK];
+
+
+/********* Precalc macros for rounds 32-79 ************************************/
+
+#define WPRECALC_32_79_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       veor W, W_m28; \
+
+#define WPRECALC_32_79_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vext.8 tmp0, W_m08, W_m04, #8; \
+
+#define WPRECALC_32_79_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       veor W, W_m16; \
+
+#define WPRECALC_32_79_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       veor W, tmp0; \
+
+#define WPRECALC_32_79_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       add RWK, sp, #(WK_offs(i&~3)); \
+
+#define WPRECALC_32_79_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vshl.u32 tmp1, W, #2; \
+
+#define WPRECALC_32_79_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vshr.u32 tmp0, W, #30; \
+
+#define WPRECALC_32_79_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vorr W, tmp0, tmp1; \
+
+#define WPRECALC_32_79_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vadd.u32 tmp0, W, curK; \
+
+#define WPRECALC_32_79_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
+       vst1.32 {tmp0}, [RWK];
+
+
+/*
+ * Transform nblks*64 bytes (nblks*16 32-bit words) at DATA.
+ *
+ * unsigned int
+ * sha1_transform_neon (void *ctx, const unsigned char *data,
+ *                      unsigned int nblks)
+ */
+.align 3
+ENTRY(sha1_transform_neon)
+  /* input:
+   *   r0: ctx, CTX
+   *   r1: data (64*nblks bytes)
+   *   r2: nblks
+   */
+
+  cmp RNBLKS, #0;
+  beq .Ldo_nothing;
+
+  push {r4-r12, lr};
+  /*vpush {q4-q7};*/
+
+  adr RT3, .LK_VEC;
+
+  mov ROLDSTACK, sp;
+
+  /* Align stack. */
+  sub RT0, sp, #(16*4);
+  and RT0, #(~(16-1));
+  mov sp, RT0;
+
+  vld1.32 {qK1-qK2}, [RT3]!; /* Load K1,K2 */
+
+  /* Get the values of the chaining variables. */
+  ldm RSTATE, {_a-_e};
+
+  vld1.32 {qK3-qK4}, [RT3]; /* Load K3,K4 */
+
+#undef curK
+#define curK qK1
+  /* Precalc 0-15. */
+  W_PRECALC_00_15();
+
+.Loop:
+  /* Transform 0-15 + Precalc 16-31. */
+  _R( _a, _b, _c, _d, _e, F1,  0,
+      WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 16,
+      W4, W5, W6, W7, W0, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F1,  1,
+      WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 16,
+      W4, W5, W6, W7, W0, _, _, _ );
+  _R( _d, _e, _a, _b, _c, F1,  2,
+      WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 16,
+      W4, W5, W6, W7, W0, _, _, _ );
+  _R( _c, _d, _e, _a, _b, F1,  3,
+      WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,16,
+      W4, W5, W6, W7, W0, _, _, _ );
+
+#undef curK
+#define curK qK2
+  _R( _b, _c, _d, _e, _a, F1,  4,
+      WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 20,
+      W3, W4, W5, W6, W7, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F1,  5,
+      WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 20,
+      W3, W4, W5, W6, W7, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F1,  6,
+      WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 20,
+      W3, W4, W5, W6, W7, _, _, _ );
+  _R( _d, _e, _a, _b, _c, F1,  7,
+      WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,20,
+      W3, W4, W5, W6, W7, _, _, _ );
+
+  _R( _c, _d, _e, _a, _b, F1,  8,
+      WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 24,
+      W2, W3, W4, W5, W6, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F1,  9,
+      WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 24,
+      W2, W3, W4, W5, W6, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F1, 10,
+      WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 24,
+      W2, W3, W4, W5, W6, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F1, 11,
+      WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,24,
+      W2, W3, W4, W5, W6, _, _, _ );
+
+  _R( _d, _e, _a, _b, _c, F1, 12,
+      WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 28,
+      W1, W2, W3, W4, W5, _, _, _ );
+  _R( _c, _d, _e, _a, _b, F1, 13,
+      WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 28,
+      W1, W2, W3, W4, W5, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F1, 14,
+      WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 28,
+      W1, W2, W3, W4, W5, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F1, 15,
+      WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,28,
+      W1, W2, W3, W4, W5, _, _, _ );
+
+  /* Transform 16-63 + Precalc 32-79. */
+  _R( _e, _a, _b, _c, _d, F1, 16,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 32,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _d, _e, _a, _b, _c, F1, 17,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 32,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _c, _d, _e, _a, _b, F1, 18,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 32,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _b, _c, _d, _e, _a, F1, 19,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 32,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+
+  _R( _a, _b, _c, _d, _e, F2, 20,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 36,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _e, _a, _b, _c, _d, F2, 21,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 36,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _d, _e, _a, _b, _c, F2, 22,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 36,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _c, _d, _e, _a, _b, F2, 23,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 36,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+
+#undef curK
+#define curK qK3
+  _R( _b, _c, _d, _e, _a, F2, 24,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 40,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _a, _b, _c, _d, _e, F2, 25,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 40,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _e, _a, _b, _c, _d, F2, 26,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 40,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _d, _e, _a, _b, _c, F2, 27,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 40,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+
+  _R( _c, _d, _e, _a, _b, F2, 28,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 44,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _b, _c, _d, _e, _a, F2, 29,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 44,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _a, _b, _c, _d, _e, F2, 30,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 44,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _e, _a, _b, _c, _d, F2, 31,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 44,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+
+  _R( _d, _e, _a, _b, _c, F2, 32,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 48,
+      W4, W5, W6, W7, W0, W1, W2, W3);
+  _R( _c, _d, _e, _a, _b, F2, 33,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 48,
+      W4, W5, W6, W7, W0, W1, W2, W3);
+  _R( _b, _c, _d, _e, _a, F2, 34,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 48,
+      W4, W5, W6, W7, W0, W1, W2, W3);
+  _R( _a, _b, _c, _d, _e, F2, 35,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 48,
+      W4, W5, W6, W7, W0, W1, W2, W3);
+
+  _R( _e, _a, _b, _c, _d, F2, 36,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 52,
+      W3, W4, W5, W6, W7, W0, W1, W2);
+  _R( _d, _e, _a, _b, _c, F2, 37,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 52,
+      W3, W4, W5, W6, W7, W0, W1, W2);
+  _R( _c, _d, _e, _a, _b, F2, 38,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 52,
+      W3, W4, W5, W6, W7, W0, W1, W2);
+  _R( _b, _c, _d, _e, _a, F2, 39,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 52,
+      W3, W4, W5, W6, W7, W0, W1, W2);
+
+  _R( _a, _b, _c, _d, _e, F3, 40,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 56,
+      W2, W3, W4, W5, W6, W7, W0, W1);
+  _R( _e, _a, _b, _c, _d, F3, 41,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 56,
+      W2, W3, W4, W5, W6, W7, W0, W1);
+  _R( _d, _e, _a, _b, _c, F3, 42,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 56,
+      W2, W3, W4, W5, W6, W7, W0, W1);
+  _R( _c, _d, _e, _a, _b, F3, 43,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 56,
+      W2, W3, W4, W5, W6, W7, W0, W1);
+
+#undef curK
+#define curK qK4
+  _R( _b, _c, _d, _e, _a, F3, 44,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 60,
+      W1, W2, W3, W4, W5, W6, W7, W0);
+  _R( _a, _b, _c, _d, _e, F3, 45,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 60,
+      W1, W2, W3, W4, W5, W6, W7, W0);
+  _R( _e, _a, _b, _c, _d, F3, 46,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 60,
+      W1, W2, W3, W4, W5, W6, W7, W0);
+  _R( _d, _e, _a, _b, _c, F3, 47,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 60,
+      W1, W2, W3, W4, W5, W6, W7, W0);
+
+  _R( _c, _d, _e, _a, _b, F3, 48,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 64,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _b, _c, _d, _e, _a, F3, 49,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 64,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _a, _b, _c, _d, _e, F3, 50,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 64,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+  _R( _e, _a, _b, _c, _d, F3, 51,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 64,
+      W0, W1, W2, W3, W4, W5, W6, W7);
+
+  _R( _d, _e, _a, _b, _c, F3, 52,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 68,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _c, _d, _e, _a, _b, F3, 53,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 68,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _b, _c, _d, _e, _a, F3, 54,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 68,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+  _R( _a, _b, _c, _d, _e, F3, 55,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 68,
+      W7, W0, W1, W2, W3, W4, W5, W6);
+
+  _R( _e, _a, _b, _c, _d, F3, 56,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 72,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _d, _e, _a, _b, _c, F3, 57,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 72,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _c, _d, _e, _a, _b, F3, 58,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 72,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+  _R( _b, _c, _d, _e, _a, F3, 59,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 72,
+      W6, W7, W0, W1, W2, W3, W4, W5);
+
+  subs RNBLKS, #1;
+
+  _R( _a, _b, _c, _d, _e, F4, 60,
+      WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 76,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _e, _a, _b, _c, _d, F4, 61,
+      WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 76,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _d, _e, _a, _b, _c, F4, 62,
+      WPRECALC_32_79_6, dummy,            WPRECALC_32_79_7, 76,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+  _R( _c, _d, _e, _a, _b, F4, 63,
+      WPRECALC_32_79_8, dummy,            WPRECALC_32_79_9, 76,
+      W5, W6, W7, W0, W1, W2, W3, W4);
+
+  beq .Lend;
+
+  /* Transform 64-79 + Precalc 0-15 of next block. */
+#undef curK
+#define curK qK1
+  _R( _b, _c, _d, _e, _a, F4, 64,
+      WPRECALC_00_15_0, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F4, 65,
+      WPRECALC_00_15_1, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F4, 66,
+      WPRECALC_00_15_2, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _d, _e, _a, _b, _c, F4, 67,
+      WPRECALC_00_15_3, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+
+  _R( _c, _d, _e, _a, _b, F4, 68,
+      dummy,            dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F4, 69,
+      dummy,            dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F4, 70,
+      WPRECALC_00_15_4, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _e, _a, _b, _c, _d, F4, 71,
+      WPRECALC_00_15_5, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+
+  _R( _d, _e, _a, _b, _c, F4, 72,
+      dummy,            dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _c, _d, _e, _a, _b, F4, 73,
+      dummy,            dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F4, 74,
+      WPRECALC_00_15_6, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _a, _b, _c, _d, _e, F4, 75,
+      WPRECALC_00_15_7, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+
+  _R( _e, _a, _b, _c, _d, F4, 76,
+      WPRECALC_00_15_8, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _d, _e, _a, _b, _c, F4, 77,
+      WPRECALC_00_15_9, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _c, _d, _e, _a, _b, F4, 78,
+      WPRECALC_00_15_10, dummy, dummy, _, _, _, _, _, _, _, _, _ );
+  _R( _b, _c, _d, _e, _a, F4, 79,
+      WPRECALC_00_15_11, dummy, WPRECALC_00_15_12, _, _, _, _, _, _, _, _, _ );
+
+  /* Update the chaining variables. */
+  ldm RSTATE, {RT0-RT3};
+  add _a, RT0;
+  ldr RT0, [RSTATE, #state_h4];
+  add _b, RT1;
+  add _c, RT2;
+  add _d, RT3;
+  add _e, RT0;
+  stm RSTATE, {_a-_e};
+
+  b .Loop;
+
+.Lend:
+  /* Transform 64-79 */
+  R( _b, _c, _d, _e, _a, F4, 64 );
+  R( _a, _b, _c, _d, _e, F4, 65 );
+  R( _e, _a, _b, _c, _d, F4, 66 );
+  R( _d, _e, _a, _b, _c, F4, 67 );
+  R( _c, _d, _e, _a, _b, F4, 68 );
+  R( _b, _c, _d, _e, _a, F4, 69 );
+  R( _a, _b, _c, _d, _e, F4, 70 );
+  R( _e, _a, _b, _c, _d, F4, 71 );
+  R( _d, _e, _a, _b, _c, F4, 72 );
+  R( _c, _d, _e, _a, _b, F4, 73 );
+  R( _b, _c, _d, _e, _a, F4, 74 );
+  R( _a, _b, _c, _d, _e, F4, 75 );
+  R( _e, _a, _b, _c, _d, F4, 76 );
+  R( _d, _e, _a, _b, _c, F4, 77 );
+  R( _c, _d, _e, _a, _b, F4, 78 );
+  R( _b, _c, _d, _e, _a, F4, 79 );
+
+  mov sp, ROLDSTACK;
+
+  /* Update the chaining variables. */
+  ldm RSTATE, {RT0-RT3};
+  add _a, RT0;
+  ldr RT0, [RSTATE, #state_h4];
+  add _b, RT1;
+  add _c, RT2;
+  add _d, RT3;
+  /*vpop {q4-q7};*/
+  add _e, RT0;
+  stm RSTATE, {_a-_e};
+
+  pop {r4-r12, pc};
+
+.Ldo_nothing:
+  bx lr
+ENDPROC(sha1_transform_neon)
index 76cd976230bc4b7558c18a50daaa5278fe85565f..e31b0440c6139dc932b0efd4b98aaab077cb862d 100644 (file)
 #include <linux/types.h>
 #include <crypto/sha.h>
 #include <asm/byteorder.h>
+#include <asm/crypto/sha1.h>
 
-struct SHA1_CTX {
-       uint32_t h0,h1,h2,h3,h4;
-       u64 count;
-       u8 data[SHA1_BLOCK_SIZE];
-};
 
-asmlinkage void sha1_block_data_order(struct SHA1_CTX *digest,
+asmlinkage void sha1_block_data_order(u32 *digest,
                const unsigned char *data, unsigned int rounds);
 
 
 static int sha1_init(struct shash_desc *desc)
 {
-       struct SHA1_CTX *sctx = shash_desc_ctx(desc);
-       memset(sctx, 0, sizeof(*sctx));
-       sctx->h0 = SHA1_H0;
-       sctx->h1 = SHA1_H1;
-       sctx->h2 = SHA1_H2;
-       sctx->h3 = SHA1_H3;
-       sctx->h4 = SHA1_H4;
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+
+       *sctx = (struct sha1_state){
+               .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+       };
+
        return 0;
 }
 
 
-static int __sha1_update(struct SHA1_CTX *sctx, const u8 *data,
-                              unsigned int len, unsigned int partial)
+static int __sha1_update(struct sha1_state *sctx, const u8 *data,
+                        unsigned int len, unsigned int partial)
 {
        unsigned int done = 0;
 
@@ -56,43 +51,44 @@ static int __sha1_update(struct SHA1_CTX *sctx, const u8 *data,
 
        if (partial) {
                done = SHA1_BLOCK_SIZE - partial;
-               memcpy(sctx->data + partial, data, done);
-               sha1_block_data_order(sctx, sctx->data, 1);
+               memcpy(sctx->buffer + partial, data, done);
+               sha1_block_data_order(sctx->state, sctx->buffer, 1);
        }
 
        if (len - done >= SHA1_BLOCK_SIZE) {
                const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
-               sha1_block_data_order(sctx, data + done, rounds);
+               sha1_block_data_order(sctx->state, data + done, rounds);
                done += rounds * SHA1_BLOCK_SIZE;
        }
 
-       memcpy(sctx->data, data + done, len - done);
+       memcpy(sctx->buffer, data + done, len - done);
        return 0;
 }
 
 
-static int sha1_update(struct shash_desc *desc, const u8 *data,
-                            unsigned int len)
+int sha1_update_arm(struct shash_desc *desc, const u8 *data,
+                   unsigned int len)
 {
-       struct SHA1_CTX *sctx = shash_desc_ctx(desc);
+       struct sha1_state *sctx = shash_desc_ctx(desc);
        unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
        int res;
 
        /* Handle the fast case right here */
        if (partial + len < SHA1_BLOCK_SIZE) {
                sctx->count += len;
-               memcpy(sctx->data + partial, data, len);
+               memcpy(sctx->buffer + partial, data, len);
                return 0;
        }
        res = __sha1_update(sctx, data, len, partial);
        return res;
 }
+EXPORT_SYMBOL_GPL(sha1_update_arm);
 
 
 /* Add padding and return the message digest. */
 static int sha1_final(struct shash_desc *desc, u8 *out)
 {
-       struct SHA1_CTX *sctx = shash_desc_ctx(desc);
+       struct sha1_state *sctx = shash_desc_ctx(desc);
        unsigned int i, index, padlen;
        __be32 *dst = (__be32 *)out;
        __be64 bits;
@@ -106,7 +102,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
        /* We need to fill a whole block for __sha1_update() */
        if (padlen <= 56) {
                sctx->count += padlen;
-               memcpy(sctx->data + index, padding, padlen);
+               memcpy(sctx->buffer + index, padding, padlen);
        } else {
                __sha1_update(sctx, padding, padlen, index);
        }
@@ -114,7 +110,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
 
        /* Store state in digest */
        for (i = 0; i < 5; i++)
-               dst[i] = cpu_to_be32(((u32 *)sctx)[i]);
+               dst[i] = cpu_to_be32(sctx->state[i]);
 
        /* Wipe context */
        memset(sctx, 0, sizeof(*sctx));
@@ -124,7 +120,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
 
 static int sha1_export(struct shash_desc *desc, void *out)
 {
-       struct SHA1_CTX *sctx = shash_desc_ctx(desc);
+       struct sha1_state *sctx = shash_desc_ctx(desc);
        memcpy(out, sctx, sizeof(*sctx));
        return 0;
 }
@@ -132,7 +128,7 @@ static int sha1_export(struct shash_desc *desc, void *out)
 
 static int sha1_import(struct shash_desc *desc, const void *in)
 {
-       struct SHA1_CTX *sctx = shash_desc_ctx(desc);
+       struct sha1_state *sctx = shash_desc_ctx(desc);
        memcpy(sctx, in, sizeof(*sctx));
        return 0;
 }
@@ -141,12 +137,12 @@ static int sha1_import(struct shash_desc *desc, const void *in)
 static struct shash_alg alg = {
        .digestsize     =       SHA1_DIGEST_SIZE,
        .init           =       sha1_init,
-       .update         =       sha1_update,
+       .update         =       sha1_update_arm,
        .final          =       sha1_final,
        .export         =       sha1_export,
        .import         =       sha1_import,
-       .descsize       =       sizeof(struct SHA1_CTX),
-       .statesize      =       sizeof(struct SHA1_CTX),
+       .descsize       =       sizeof(struct sha1_state),
+       .statesize      =       sizeof(struct sha1_state),
        .base           =       {
                .cra_name       =       "sha1",
                .cra_driver_name=       "sha1-asm",
@@ -175,5 +171,5 @@ module_exit(sha1_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm (ARM)");
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
 MODULE_AUTHOR("David McCullough <ucdevel@gmail.com>");
diff --git a/arch/arm/crypto/sha1_neon_glue.c b/arch/arm/crypto/sha1_neon_glue.c
new file mode 100644 (file)
index 0000000..6f1b411
--- /dev/null
@@ -0,0 +1,197 @@
+/*
+ * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
+ * ARM NEON instructions.
+ *
+ * Copyright Â© 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This file is based on sha1_generic.c and sha1_ssse3_glue.c:
+ *  Copyright (c) Alan Smithee.
+ *  Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
+ *  Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
+ *  Copyright (c) Mathias Krause <minipli@googlemail.com>
+ *  Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <crypto/sha.h>
+#include <asm/byteorder.h>
+#include <asm/neon.h>
+#include <asm/simd.h>
+#include <asm/crypto/sha1.h>
+
+
+asmlinkage void sha1_transform_neon(void *state_h, const char *data,
+                                   unsigned int rounds);
+
+
+static int sha1_neon_init(struct shash_desc *desc)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+
+       *sctx = (struct sha1_state){
+               .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
+       };
+
+       return 0;
+}
+
+static int __sha1_neon_update(struct shash_desc *desc, const u8 *data,
+                              unsigned int len, unsigned int partial)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+       unsigned int done = 0;
+
+       sctx->count += len;
+
+       if (partial) {
+               done = SHA1_BLOCK_SIZE - partial;
+               memcpy(sctx->buffer + partial, data, done);
+               sha1_transform_neon(sctx->state, sctx->buffer, 1);
+       }
+
+       if (len - done >= SHA1_BLOCK_SIZE) {
+               const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
+
+               sha1_transform_neon(sctx->state, data + done, rounds);
+               done += rounds * SHA1_BLOCK_SIZE;
+       }
+
+       memcpy(sctx->buffer, data + done, len - done);
+
+       return 0;
+}
+
+static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
+                            unsigned int len)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+       unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
+       int res;
+
+       /* Handle the fast case right here */
+       if (partial + len < SHA1_BLOCK_SIZE) {
+               sctx->count += len;
+               memcpy(sctx->buffer + partial, data, len);
+
+               return 0;
+       }
+
+       if (!may_use_simd()) {
+               res = sha1_update_arm(desc, data, len);
+       } else {
+               kernel_neon_begin();
+               res = __sha1_neon_update(desc, data, len, partial);
+               kernel_neon_end();
+       }
+
+       return res;
+}
+
+
+/* Add padding and return the message digest. */
+static int sha1_neon_final(struct shash_desc *desc, u8 *out)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+       unsigned int i, index, padlen;
+       __be32 *dst = (__be32 *)out;
+       __be64 bits;
+       static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
+
+       bits = cpu_to_be64(sctx->count << 3);
+
+       /* Pad out to 56 mod 64 and append length */
+       index = sctx->count % SHA1_BLOCK_SIZE;
+       padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
+       if (!may_use_simd()) {
+               sha1_update_arm(desc, padding, padlen);
+               sha1_update_arm(desc, (const u8 *)&bits, sizeof(bits));
+       } else {
+               kernel_neon_begin();
+               /* We need to fill a whole block for __sha1_neon_update() */
+               if (padlen <= 56) {
+                       sctx->count += padlen;
+                       memcpy(sctx->buffer + index, padding, padlen);
+               } else {
+                       __sha1_neon_update(desc, padding, padlen, index);
+               }
+               __sha1_neon_update(desc, (const u8 *)&bits, sizeof(bits), 56);
+               kernel_neon_end();
+       }
+
+       /* Store state in digest */
+       for (i = 0; i < 5; i++)
+               dst[i] = cpu_to_be32(sctx->state[i]);
+
+       /* Wipe context */
+       memset(sctx, 0, sizeof(*sctx));
+
+       return 0;
+}
+
+static int sha1_neon_export(struct shash_desc *desc, void *out)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+
+       memcpy(out, sctx, sizeof(*sctx));
+
+       return 0;
+}
+
+static int sha1_neon_import(struct shash_desc *desc, const void *in)
+{
+       struct sha1_state *sctx = shash_desc_ctx(desc);
+
+       memcpy(sctx, in, sizeof(*sctx));
+
+       return 0;
+}
+
+static struct shash_alg alg = {
+       .digestsize     =       SHA1_DIGEST_SIZE,
+       .init           =       sha1_neon_init,
+       .update         =       sha1_neon_update,
+       .final          =       sha1_neon_final,
+       .export         =       sha1_neon_export,
+       .import         =       sha1_neon_import,
+       .descsize       =       sizeof(struct sha1_state),
+       .statesize      =       sizeof(struct sha1_state),
+       .base           =       {
+               .cra_name               = "sha1",
+               .cra_driver_name        = "sha1-neon",
+               .cra_priority           = 250,
+               .cra_flags              = CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize          = SHA1_BLOCK_SIZE,
+               .cra_module             = THIS_MODULE,
+       }
+};
+
+static int __init sha1_neon_mod_init(void)
+{
+       if (!cpu_has_neon())
+               return -ENODEV;
+
+       return crypto_register_shash(&alg);
+}
+
+static void __exit sha1_neon_mod_fini(void)
+{
+       crypto_unregister_shash(&alg);
+}
+
+module_init(sha1_neon_mod_init);
+module_exit(sha1_neon_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated");
+MODULE_ALIAS("sha1");
diff --git a/arch/arm/crypto/sha512-armv7-neon.S b/arch/arm/crypto/sha512-armv7-neon.S
new file mode 100644 (file)
index 0000000..fe99472
--- /dev/null
@@ -0,0 +1,455 @@
+/* sha512-armv7-neon.S  -  ARM/NEON assembly implementation of SHA-512 transform
+ *
+ * Copyright Â© 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/linkage.h>
+
+
+.syntax unified
+.code   32
+.fpu neon
+
+.text
+
+/* structure of SHA512_CONTEXT */
+#define hd_a 0
+#define hd_b ((hd_a) + 8)
+#define hd_c ((hd_b) + 8)
+#define hd_d ((hd_c) + 8)
+#define hd_e ((hd_d) + 8)
+#define hd_f ((hd_e) + 8)
+#define hd_g ((hd_f) + 8)
+
+/* register macros */
+#define RK %r2
+
+#define RA d0
+#define RB d1
+#define RC d2
+#define RD d3
+#define RE d4
+#define RF d5
+#define RG d6
+#define RH d7
+
+#define RT0 d8
+#define RT1 d9
+#define RT2 d10
+#define RT3 d11
+#define RT4 d12
+#define RT5 d13
+#define RT6 d14
+#define RT7 d15
+
+#define RT01q q4
+#define RT23q q5
+#define RT45q q6
+#define RT67q q7
+
+#define RW0 d16
+#define RW1 d17
+#define RW2 d18
+#define RW3 d19
+#define RW4 d20
+#define RW5 d21
+#define RW6 d22
+#define RW7 d23
+#define RW8 d24
+#define RW9 d25
+#define RW10 d26
+#define RW11 d27
+#define RW12 d28
+#define RW13 d29
+#define RW14 d30
+#define RW15 d31
+
+#define RW01q q8
+#define RW23q q9
+#define RW45q q10
+#define RW67q q11
+#define RW89q q12
+#define RW1011q q13
+#define RW1213q q14
+#define RW1415q q15
+
+/***********************************************************************
+ * ARM assembly implementation of sha512 transform
+ ***********************************************************************/
+#define rounds2_0_63(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, rw01q, rw2, \
+                     rw23q, rw1415q, rw9, rw10, interleave_op, arg1) \
+       /* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
+       vshr.u64 RT2, re, #14; \
+       vshl.u64 RT3, re, #64 - 14; \
+       interleave_op(arg1); \
+       vshr.u64 RT4, re, #18; \
+       vshl.u64 RT5, re, #64 - 18; \
+       vld1.64 {RT0}, [RK]!; \
+       veor.64 RT23q, RT23q, RT45q; \
+       vshr.u64 RT4, re, #41; \
+       vshl.u64 RT5, re, #64 - 41; \
+       vadd.u64 RT0, RT0, rw0; \
+       veor.64 RT23q, RT23q, RT45q; \
+       vmov.64 RT7, re; \
+       veor.64 RT1, RT2, RT3; \
+       vbsl.64 RT7, rf, rg; \
+       \
+       vadd.u64 RT1, RT1, rh; \
+       vshr.u64 RT2, ra, #28; \
+       vshl.u64 RT3, ra, #64 - 28; \
+       vadd.u64 RT1, RT1, RT0; \
+       vshr.u64 RT4, ra, #34; \
+       vshl.u64 RT5, ra, #64 - 34; \
+       vadd.u64 RT1, RT1, RT7; \
+       \
+       /* h = Sum0 (a) + Maj (a, b, c); */ \
+       veor.64 RT23q, RT23q, RT45q; \
+       vshr.u64 RT4, ra, #39; \
+       vshl.u64 RT5, ra, #64 - 39; \
+       veor.64 RT0, ra, rb; \
+       veor.64 RT23q, RT23q, RT45q; \
+       vbsl.64 RT0, rc, rb; \
+       vadd.u64 rd, rd, RT1; /* d+=t1; */ \
+       veor.64 rh, RT2, RT3; \
+       \
+       /* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
+       vshr.u64 RT2, rd, #14; \
+       vshl.u64 RT3, rd, #64 - 14; \
+       vadd.u64 rh, rh, RT0; \
+       vshr.u64 RT4, rd, #18; \
+       vshl.u64 RT5, rd, #64 - 18; \
+       vadd.u64 rh, rh, RT1; /* h+=t1; */ \
+       vld1.64 {RT0}, [RK]!; \
+       veor.64 RT23q, RT23q, RT45q; \
+       vshr.u64 RT4, rd, #41; \
+       vshl.u64 RT5, rd, #64 - 41; \
+       vadd.u64 RT0, RT0, rw1; \
+       veor.64 RT23q, RT23q, RT45q; \
+       vmov.64 RT7, rd; \
+       veor.64 RT1, RT2, RT3; \
+       vbsl.64 RT7, re, rf; \
+       \
+       vadd.u64 RT1, RT1, rg; \
+       vshr.u64 RT2, rh, #28; \
+       vshl.u64 RT3, rh, #64 - 28; \
+       vadd.u64 RT1, RT1, RT0; \
+       vshr.u64 RT4, rh, #34; \
+       vshl.u64 RT5, rh, #64 - 34; \
+       vadd.u64 RT1, RT1, RT7; \
+       \
+       /* g = Sum0 (h) + Maj (h, a, b); */ \
+       veor.64 RT23q, RT23q, RT45q; \
+       vshr.u64 RT4, rh, #39; \
+       vshl.u64 RT5, rh, #64 - 39; \
+       veor.64 RT0, rh, ra; \
+       veor.64 RT23q, RT23q, RT45q; \
+       vbsl.64 RT0, rb, ra; \
+       vadd.u64 rc, rc, RT1; /* c+=t1; */ \
+       veor.64 rg, RT2, RT3; \
+       \
+       /* w[0] += S1 (w[14]) + w[9] + S0 (w[1]); */ \
+       /* w[1] += S1 (w[15]) + w[10] + S0 (w[2]); */ \
+       \
+       /**** S0(w[1:2]) */ \
+       \
+       /* w[0:1] += w[9:10] */ \
+       /* RT23q = rw1:rw2 */ \
+       vext.u64 RT23q, rw01q, rw23q, #1; \
+       vadd.u64 rw0, rw9; \
+       vadd.u64 rg, rg, RT0; \
+       vadd.u64 rw1, rw10;\
+       vadd.u64 rg, rg, RT1; /* g+=t1; */ \
+       \
+       vshr.u64 RT45q, RT23q, #1; \
+       vshl.u64 RT67q, RT23q, #64 - 1; \
+       vshr.u64 RT01q, RT23q, #8; \
+       veor.u64 RT45q, RT45q, RT67q; \
+       vshl.u64 RT67q, RT23q, #64 - 8; \
+       veor.u64 RT45q, RT45q, RT01q; \
+       vshr.u64 RT01q, RT23q, #7; \
+       veor.u64 RT45q, RT45q, RT67q; \
+       \
+       /**** S1(w[14:15]) */ \
+       vshr.u64 RT23q, rw1415q, #6; \
+       veor.u64 RT01q, RT01q, RT45q; \
+       vshr.u64 RT45q, rw1415q, #19; \
+       vshl.u64 RT67q, rw1415q, #64 - 19; \
+       veor.u64 RT23q, RT23q, RT45q; \
+       vshr.u64 RT45q, rw1415q, #61; \
+       veor.u64 RT23q, RT23q, RT67q; \
+       vshl.u64 RT67q, rw1415q, #64 - 61; \
+       veor.u64 RT23q, RT23q, RT45q; \
+       vadd.u64 rw01q, RT01q; /* w[0:1] += S(w[1:2]) */ \
+       veor.u64 RT01q, RT23q, RT67q;
+#define vadd_RT01q(rw01q) \
+       /* w[0:1] += S(w[14:15]) */ \
+       vadd.u64 rw01q, RT01q;
+
+#define dummy(_) /*_*/
+
+#define rounds2_64_79(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, \
+                     interleave_op1, arg1, interleave_op2, arg2) \
+       /* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
+       vshr.u64 RT2, re, #14; \
+       vshl.u64 RT3, re, #64 - 14; \
+       interleave_op1(arg1); \
+       vshr.u64 RT4, re, #18; \
+       vshl.u64 RT5, re, #64 - 18; \
+       interleave_op2(arg2); \
+       vld1.64 {RT0}, [RK]!; \
+       veor.64 RT23q, RT23q, RT45q; \
+       vshr.u64 RT4, re, #41; \
+       vshl.u64 RT5, re, #64 - 41; \
+       vadd.u64 RT0, RT0, rw0; \
+       veor.64 RT23q, RT23q, RT45q; \
+       vmov.64 RT7, re; \
+       veor.64 RT1, RT2, RT3; \
+       vbsl.64 RT7, rf, rg; \
+       \
+       vadd.u64 RT1, RT1, rh; \
+       vshr.u64 RT2, ra, #28; \
+       vshl.u64 RT3, ra, #64 - 28; \
+       vadd.u64 RT1, RT1, RT0; \
+       vshr.u64 RT4, ra, #34; \
+       vshl.u64 RT5, ra, #64 - 34; \
+       vadd.u64 RT1, RT1, RT7; \
+       \
+       /* h = Sum0 (a) + Maj (a, b, c); */ \
+       veor.64 RT23q, RT23q, RT45q; \
+       vshr.u64 RT4, ra, #39; \
+       vshl.u64 RT5, ra, #64 - 39; \
+       veor.64 RT0, ra, rb; \
+       veor.64 RT23q, RT23q, RT45q; \
+       vbsl.64 RT0, rc, rb; \
+       vadd.u64 rd, rd, RT1; /* d+=t1; */ \
+       veor.64 rh, RT2, RT3; \
+       \
+       /* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
+       vshr.u64 RT2, rd, #14; \
+       vshl.u64 RT3, rd, #64 - 14; \
+       vadd.u64 rh, rh, RT0; \
+       vshr.u64 RT4, rd, #18; \
+       vshl.u64 RT5, rd, #64 - 18; \
+       vadd.u64 rh, rh, RT1; /* h+=t1; */ \
+       vld1.64 {RT0}, [RK]!; \
+       veor.64 RT23q, RT23q, RT45q; \
+       vshr.u64 RT4, rd, #41; \
+       vshl.u64 RT5, rd, #64 - 41; \
+       vadd.u64 RT0, RT0, rw1; \
+       veor.64 RT23q, RT23q, RT45q; \
+       vmov.64 RT7, rd; \
+       veor.64 RT1, RT2, RT3; \
+       vbsl.64 RT7, re, rf; \
+       \
+       vadd.u64 RT1, RT1, rg; \
+       vshr.u64 RT2, rh, #28; \
+       vshl.u64 RT3, rh, #64 - 28; \
+       vadd.u64 RT1, RT1, RT0; \
+       vshr.u64 RT4, rh, #34; \
+       vshl.u64 RT5, rh, #64 - 34; \
+       vadd.u64 RT1, RT1, RT7; \
+       \
+       /* g = Sum0 (h) + Maj (h, a, b); */ \
+       veor.64 RT23q, RT23q, RT45q; \
+       vshr.u64 RT4, rh, #39; \
+       vshl.u64 RT5, rh, #64 - 39; \
+       veor.64 RT0, rh, ra; \
+       veor.64 RT23q, RT23q, RT45q; \
+       vbsl.64 RT0, rb, ra; \
+       vadd.u64 rc, rc, RT1; /* c+=t1; */ \
+       veor.64 rg, RT2, RT3;
+#define vadd_rg_RT0(rg) \
+       vadd.u64 rg, rg, RT0;
+#define vadd_rg_RT1(rg) \
+       vadd.u64 rg, rg, RT1; /* g+=t1; */
+
+.align 3
+ENTRY(sha512_transform_neon)
+       /* Input:
+        *      %r0: SHA512_CONTEXT
+        *      %r1: data
+        *      %r2: u64 k[] constants
+        *      %r3: nblks
+        */
+       push {%lr};
+
+       mov %lr, #0;
+
+       /* Load context to d0-d7 */
+       vld1.64 {RA-RD}, [%r0]!;
+       vld1.64 {RE-RH}, [%r0];
+       sub %r0, #(4*8);
+
+       /* Load input to w[16], d16-d31 */
+       /* NOTE: Assumes that on ARMv7 unaligned accesses are always allowed. */
+       vld1.64 {RW0-RW3}, [%r1]!;
+       vld1.64 {RW4-RW7}, [%r1]!;
+       vld1.64 {RW8-RW11}, [%r1]!;
+       vld1.64 {RW12-RW15}, [%r1]!;
+#ifdef __ARMEL__
+       /* byteswap */
+       vrev64.8 RW01q, RW01q;
+       vrev64.8 RW23q, RW23q;
+       vrev64.8 RW45q, RW45q;
+       vrev64.8 RW67q, RW67q;
+       vrev64.8 RW89q, RW89q;
+       vrev64.8 RW1011q, RW1011q;
+       vrev64.8 RW1213q, RW1213q;
+       vrev64.8 RW1415q, RW1415q;
+#endif
+
+       /* EABI says that d8-d15 must be preserved by callee. */
+       /*vpush {RT0-RT7};*/
+
+.Loop:
+       rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2,
+                    RW23q, RW1415q, RW9, RW10, dummy, _);
+       b .Lenter_rounds;
+
+.Loop_rounds:
+       rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2,
+                    RW23q, RW1415q, RW9, RW10, vadd_RT01q, RW1415q);
+.Lenter_rounds:
+       rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3, RW23q, RW4,
+                    RW45q, RW01q, RW11, RW12, vadd_RT01q, RW01q);
+       rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5, RW45q, RW6,
+                    RW67q, RW23q, RW13, RW14, vadd_RT01q, RW23q);
+       rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7, RW67q, RW8,
+                    RW89q, RW45q, RW15, RW0, vadd_RT01q, RW45q);
+       rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9, RW89q, RW10,
+                    RW1011q, RW67q, RW1, RW2, vadd_RT01q, RW67q);
+       rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11, RW1011q, RW12,
+                    RW1213q, RW89q, RW3, RW4, vadd_RT01q, RW89q);
+       add %lr, #16;
+       rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13, RW1213q, RW14,
+                    RW1415q, RW1011q, RW5, RW6, vadd_RT01q, RW1011q);
+       cmp %lr, #64;
+       rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15, RW1415q, RW0,
+                    RW01q, RW1213q, RW7, RW8, vadd_RT01q, RW1213q);
+       bne .Loop_rounds;
+
+       subs %r3, #1;
+
+       rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1,
+                     vadd_RT01q, RW1415q, dummy, _);
+       rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3,
+                     vadd_rg_RT0, RG, vadd_rg_RT1, RG);
+       beq .Lhandle_tail;
+       vld1.64 {RW0-RW3}, [%r1]!;
+       rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5,
+                     vadd_rg_RT0, RE, vadd_rg_RT1, RE);
+       rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7,
+                     vadd_rg_RT0, RC, vadd_rg_RT1, RC);
+#ifdef __ARMEL__
+       vrev64.8 RW01q, RW01q;
+       vrev64.8 RW23q, RW23q;
+#endif
+       vld1.64 {RW4-RW7}, [%r1]!;
+       rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9,
+                     vadd_rg_RT0, RA, vadd_rg_RT1, RA);
+       rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11,
+                     vadd_rg_RT0, RG, vadd_rg_RT1, RG);
+#ifdef __ARMEL__
+       vrev64.8 RW45q, RW45q;
+       vrev64.8 RW67q, RW67q;
+#endif
+       vld1.64 {RW8-RW11}, [%r1]!;
+       rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13,
+                     vadd_rg_RT0, RE, vadd_rg_RT1, RE);
+       rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15,
+                     vadd_rg_RT0, RC, vadd_rg_RT1, RC);
+#ifdef __ARMEL__
+       vrev64.8 RW89q, RW89q;
+       vrev64.8 RW1011q, RW1011q;
+#endif
+       vld1.64 {RW12-RW15}, [%r1]!;
+       vadd_rg_RT0(RA);
+       vadd_rg_RT1(RA);
+
+       /* Load context */
+       vld1.64 {RT0-RT3}, [%r0]!;
+       vld1.64 {RT4-RT7}, [%r0];
+       sub %r0, #(4*8);
+
+#ifdef __ARMEL__
+       vrev64.8 RW1213q, RW1213q;
+       vrev64.8 RW1415q, RW1415q;
+#endif
+
+       vadd.u64 RA, RT0;
+       vadd.u64 RB, RT1;
+       vadd.u64 RC, RT2;
+       vadd.u64 RD, RT3;
+       vadd.u64 RE, RT4;
+       vadd.u64 RF, RT5;
+       vadd.u64 RG, RT6;
+       vadd.u64 RH, RT7;
+
+       /* Store the first half of context */
+       vst1.64 {RA-RD}, [%r0]!;
+       sub RK, $(8*80);
+       vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
+       mov %lr, #0;
+       sub %r0, #(4*8);
+
+       b .Loop;
+
+.Lhandle_tail:
+       rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5,
+                     vadd_rg_RT0, RE, vadd_rg_RT1, RE);
+       rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7,
+                     vadd_rg_RT0, RC, vadd_rg_RT1, RC);
+       rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9,
+                     vadd_rg_RT0, RA, vadd_rg_RT1, RA);
+       rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11,
+                     vadd_rg_RT0, RG, vadd_rg_RT1, RG);
+       rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13,
+                     vadd_rg_RT0, RE, vadd_rg_RT1, RE);
+       rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15,
+                     vadd_rg_RT0, RC, vadd_rg_RT1, RC);
+
+       /* Load context to d16-d23 */
+       vld1.64 {RW0-RW3}, [%r0]!;
+       vadd_rg_RT0(RA);
+       vld1.64 {RW4-RW7}, [%r0];
+       vadd_rg_RT1(RA);
+       sub %r0, #(4*8);
+
+       vadd.u64 RA, RW0;
+       vadd.u64 RB, RW1;
+       vadd.u64 RC, RW2;
+       vadd.u64 RD, RW3;
+       vadd.u64 RE, RW4;
+       vadd.u64 RF, RW5;
+       vadd.u64 RG, RW6;
+       vadd.u64 RH, RW7;
+
+       /* Store the first half of context */
+       vst1.64 {RA-RD}, [%r0]!;
+
+       /* Clear used registers */
+       /* d16-d31 */
+       veor.u64 RW01q, RW01q;
+       veor.u64 RW23q, RW23q;
+       veor.u64 RW45q, RW45q;
+       veor.u64 RW67q, RW67q;
+       vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
+       veor.u64 RW89q, RW89q;
+       veor.u64 RW1011q, RW1011q;
+       veor.u64 RW1213q, RW1213q;
+       veor.u64 RW1415q, RW1415q;
+       /* d8-d15 */
+       /*vpop {RT0-RT7};*/
+       /* d0-d7 (q0-q3) */
+       veor.u64 %q0, %q0;
+       veor.u64 %q1, %q1;
+       veor.u64 %q2, %q2;
+       veor.u64 %q3, %q3;
+
+       pop {%pc};
+ENDPROC(sha512_transform_neon)
diff --git a/arch/arm/crypto/sha512_neon_glue.c b/arch/arm/crypto/sha512_neon_glue.c
new file mode 100644 (file)
index 0000000..0d2758f
--- /dev/null
@@ -0,0 +1,305 @@
+/*
+ * Glue code for the SHA512 Secure Hash Algorithm assembly implementation
+ * using NEON instructions.
+ *
+ * Copyright Â© 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
+ *
+ * This file is based on sha512_ssse3_glue.c:
+ *   Copyright (C) 2013 Intel Corporation
+ *   Author: Tim Chen <tim.c.chen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ */
+
+#include <crypto/internal/hash.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/cryptohash.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <crypto/sha.h>
+#include <asm/byteorder.h>
+#include <asm/simd.h>
+#include <asm/neon.h>
+
+
+static const u64 sha512_k[] = {
+       0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
+       0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
+       0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
+       0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
+       0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
+       0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
+       0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
+       0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
+       0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
+       0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
+       0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
+       0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
+       0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
+       0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
+       0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
+       0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
+       0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
+       0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
+       0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
+       0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
+       0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
+       0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
+       0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
+       0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
+       0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
+       0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
+       0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
+       0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
+       0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
+       0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
+       0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
+       0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
+       0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
+       0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
+       0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
+       0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
+       0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
+       0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
+       0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
+       0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
+};
+
+
+asmlinkage void sha512_transform_neon(u64 *digest, const void *data,
+                                     const u64 k[], unsigned int num_blks);
+
+
+static int sha512_neon_init(struct shash_desc *desc)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+
+       sctx->state[0] = SHA512_H0;
+       sctx->state[1] = SHA512_H1;
+       sctx->state[2] = SHA512_H2;
+       sctx->state[3] = SHA512_H3;
+       sctx->state[4] = SHA512_H4;
+       sctx->state[5] = SHA512_H5;
+       sctx->state[6] = SHA512_H6;
+       sctx->state[7] = SHA512_H7;
+       sctx->count[0] = sctx->count[1] = 0;
+
+       return 0;
+}
+
+static int __sha512_neon_update(struct shash_desc *desc, const u8 *data,
+                               unsigned int len, unsigned int partial)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+       unsigned int done = 0;
+
+       sctx->count[0] += len;
+       if (sctx->count[0] < len)
+               sctx->count[1]++;
+
+       if (partial) {
+               done = SHA512_BLOCK_SIZE - partial;
+               memcpy(sctx->buf + partial, data, done);
+               sha512_transform_neon(sctx->state, sctx->buf, sha512_k, 1);
+       }
+
+       if (len - done >= SHA512_BLOCK_SIZE) {
+               const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE;
+
+               sha512_transform_neon(sctx->state, data + done, sha512_k,
+                                     rounds);
+
+               done += rounds * SHA512_BLOCK_SIZE;
+       }
+
+       memcpy(sctx->buf, data + done, len - done);
+
+       return 0;
+}
+
+static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
+                            unsigned int len)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+       unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
+       int res;
+
+       /* Handle the fast case right here */
+       if (partial + len < SHA512_BLOCK_SIZE) {
+               sctx->count[0] += len;
+               if (sctx->count[0] < len)
+                       sctx->count[1]++;
+               memcpy(sctx->buf + partial, data, len);
+
+               return 0;
+       }
+
+       if (!may_use_simd()) {
+               res = crypto_sha512_update(desc, data, len);
+       } else {
+               kernel_neon_begin();
+               res = __sha512_neon_update(desc, data, len, partial);
+               kernel_neon_end();
+       }
+
+       return res;
+}
+
+
+/* Add padding and return the message digest. */
+static int sha512_neon_final(struct shash_desc *desc, u8 *out)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+       unsigned int i, index, padlen;
+       __be64 *dst = (__be64 *)out;
+       __be64 bits[2];
+       static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, };
+
+       /* save number of bits */
+       bits[1] = cpu_to_be64(sctx->count[0] << 3);
+       bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
+
+       /* Pad out to 112 mod 128 and append length */
+       index = sctx->count[0] & 0x7f;
+       padlen = (index < 112) ? (112 - index) : ((128+112) - index);
+
+       if (!may_use_simd()) {
+               crypto_sha512_update(desc, padding, padlen);
+               crypto_sha512_update(desc, (const u8 *)&bits, sizeof(bits));
+       } else {
+               kernel_neon_begin();
+               /* We need to fill a whole block for __sha512_neon_update() */
+               if (padlen <= 112) {
+                       sctx->count[0] += padlen;
+                       if (sctx->count[0] < padlen)
+                               sctx->count[1]++;
+                       memcpy(sctx->buf + index, padding, padlen);
+               } else {
+                       __sha512_neon_update(desc, padding, padlen, index);
+               }
+               __sha512_neon_update(desc, (const u8 *)&bits,
+                                       sizeof(bits), 112);
+               kernel_neon_end();
+       }
+
+       /* Store state in digest */
+       for (i = 0; i < 8; i++)
+               dst[i] = cpu_to_be64(sctx->state[i]);
+
+       /* Wipe context */
+       memset(sctx, 0, sizeof(*sctx));
+
+       return 0;
+}
+
+static int sha512_neon_export(struct shash_desc *desc, void *out)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+
+       memcpy(out, sctx, sizeof(*sctx));
+
+       return 0;
+}
+
+static int sha512_neon_import(struct shash_desc *desc, const void *in)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+
+       memcpy(sctx, in, sizeof(*sctx));
+
+       return 0;
+}
+
+static int sha384_neon_init(struct shash_desc *desc)
+{
+       struct sha512_state *sctx = shash_desc_ctx(desc);
+
+       sctx->state[0] = SHA384_H0;
+       sctx->state[1] = SHA384_H1;
+       sctx->state[2] = SHA384_H2;
+       sctx->state[3] = SHA384_H3;
+       sctx->state[4] = SHA384_H4;
+       sctx->state[5] = SHA384_H5;
+       sctx->state[6] = SHA384_H6;
+       sctx->state[7] = SHA384_H7;
+
+       sctx->count[0] = sctx->count[1] = 0;
+
+       return 0;
+}
+
+static int sha384_neon_final(struct shash_desc *desc, u8 *hash)
+{
+       u8 D[SHA512_DIGEST_SIZE];
+
+       sha512_neon_final(desc, D);
+
+       memcpy(hash, D, SHA384_DIGEST_SIZE);
+       memset(D, 0, SHA512_DIGEST_SIZE);
+
+       return 0;
+}
+
+static struct shash_alg algs[] = { {
+       .digestsize     =       SHA512_DIGEST_SIZE,
+       .init           =       sha512_neon_init,
+       .update         =       sha512_neon_update,
+       .final          =       sha512_neon_final,
+       .export         =       sha512_neon_export,
+       .import         =       sha512_neon_import,
+       .descsize       =       sizeof(struct sha512_state),
+       .statesize      =       sizeof(struct sha512_state),
+       .base           =       {
+               .cra_name       =       "sha512",
+               .cra_driver_name =      "sha512-neon",
+               .cra_priority   =       250,
+               .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize  =       SHA512_BLOCK_SIZE,
+               .cra_module     =       THIS_MODULE,
+       }
+},  {
+       .digestsize     =       SHA384_DIGEST_SIZE,
+       .init           =       sha384_neon_init,
+       .update         =       sha512_neon_update,
+       .final          =       sha384_neon_final,
+       .export         =       sha512_neon_export,
+       .import         =       sha512_neon_import,
+       .descsize       =       sizeof(struct sha512_state),
+       .statesize      =       sizeof(struct sha512_state),
+       .base           =       {
+               .cra_name       =       "sha384",
+               .cra_driver_name =      "sha384-neon",
+               .cra_priority   =       250,
+               .cra_flags      =       CRYPTO_ALG_TYPE_SHASH,
+               .cra_blocksize  =       SHA384_BLOCK_SIZE,
+               .cra_module     =       THIS_MODULE,
+       }
+} };
+
+static int __init sha512_neon_mod_init(void)
+{
+       if (!cpu_has_neon())
+               return -ENODEV;
+
+       return crypto_register_shashes(algs, ARRAY_SIZE(algs));
+}
+
+static void __exit sha512_neon_mod_fini(void)
+{
+       crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
+}
+
+module_init(sha512_neon_mod_init);
+module_exit(sha512_neon_mod_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated");
+
+MODULE_ALIAS("sha512");
+MODULE_ALIAS("sha384");
index d3db39860b9cc5eb83053f99f9430b926443609c..6577b8aeb711996cb0318860bece91500a6b2cec 100644 (file)
@@ -24,6 +24,7 @@ generic-y += sembuf.h
 generic-y += serial.h
 generic-y += shmbuf.h
 generic-y += siginfo.h
+generic-y += simd.h
 generic-y += sizes.h
 generic-y += socket.h
 generic-y += sockios.h
index 6447a0b7b12721aa1b66ca6b1d92b776d9a321b0..bccea3925f116333a8cb0998cd22fead538ab23e 100644 (file)
@@ -114,7 +114,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
 
 static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
 {
-       unsigned long oldval, res;
+       int oldval;
+       unsigned long res;
 
        smp_mb();
 
@@ -238,15 +239,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 
 #ifndef CONFIG_GENERIC_ATOMIC64
 typedef struct {
-       u64 __aligned(8) counter;
+       long long counter;
 } atomic64_t;
 
 #define ATOMIC64_INIT(i) { (i) }
 
 #ifdef CONFIG_ARM_LPAE
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
 {
-       u64 result;
+       long long result;
 
        __asm__ __volatile__("@ atomic64_read\n"
 "      ldrd    %0, %H0, [%1]"
@@ -257,7 +258,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
        return result;
 }
 
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void atomic64_set(atomic64_t *v, long long i)
 {
        __asm__ __volatile__("@ atomic64_set\n"
 "      strd    %2, %H2, [%1]"
@@ -266,9 +267,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
        );
 }
 #else
-static inline u64 atomic64_read(const atomic64_t *v)
+static inline long long atomic64_read(const atomic64_t *v)
 {
-       u64 result;
+       long long result;
 
        __asm__ __volatile__("@ atomic64_read\n"
 "      ldrexd  %0, %H0, [%1]"
@@ -279,9 +280,9 @@ static inline u64 atomic64_read(const atomic64_t *v)
        return result;
 }
 
-static inline void atomic64_set(atomic64_t *v, u64 i)
+static inline void atomic64_set(atomic64_t *v, long long i)
 {
-       u64 tmp;
+       long long tmp;
 
        __asm__ __volatile__("@ atomic64_set\n"
 "1:    ldrexd  %0, %H0, [%2]\n"
@@ -294,9 +295,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
 }
 #endif
 
-static inline void atomic64_add(u64 i, atomic64_t *v)
+static inline void atomic64_add(long long i, atomic64_t *v)
 {
-       u64 result;
+       long long result;
        unsigned long tmp;
 
        __asm__ __volatile__("@ atomic64_add\n"
@@ -311,9 +312,9 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
        : "cc");
 }
 
-static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
+static inline long long atomic64_add_return(long long i, atomic64_t *v)
 {
-       u64 result;
+       long long result;
        unsigned long tmp;
 
        smp_mb();
@@ -334,9 +335,9 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
        return result;
 }
 
-static inline void atomic64_sub(u64 i, atomic64_t *v)
+static inline void atomic64_sub(long long i, atomic64_t *v)
 {
-       u64 result;
+       long long result;
        unsigned long tmp;
 
        __asm__ __volatile__("@ atomic64_sub\n"
@@ -351,9 +352,9 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
        : "cc");
 }
 
-static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
+static inline long long atomic64_sub_return(long long i, atomic64_t *v)
 {
-       u64 result;
+       long long result;
        unsigned long tmp;
 
        smp_mb();
@@ -374,9 +375,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
        return result;
 }
 
-static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
+static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
+                                       long long new)
 {
-       u64 oldval;
+       long long oldval;
        unsigned long res;
 
        smp_mb();
@@ -398,9 +400,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
        return oldval;
 }
 
-static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
+static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
 {
-       u64 result;
+       long long result;
        unsigned long tmp;
 
        smp_mb();
@@ -419,9 +421,9 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
        return result;
 }
 
-static inline u64 atomic64_dec_if_positive(atomic64_t *v)
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
 {
-       u64 result;
+       long long result;
        unsigned long tmp;
 
        smp_mb();
@@ -445,9 +447,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
        return result;
 }
 
-static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
+static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
 {
-       u64 val;
+       long long val;
        unsigned long tmp;
        int ret = 1;
 
diff --git a/arch/arm/include/asm/crypto/sha1.h b/arch/arm/include/asm/crypto/sha1.h
new file mode 100644 (file)
index 0000000..75e6a41
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef ASM_ARM_CRYPTO_SHA1_H
+#define ASM_ARM_CRYPTO_SHA1_H
+
+#include <linux/crypto.h>
+#include <crypto/sha.h>
+
+extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
+                          unsigned int len);
+
+#endif
index 21b458e6b0b8690bdd54fca1234b5173f0f4af98..d5acecb49af4320ce49bdca347749a42bc143a31 100644 (file)
 #define TASK_UNMAPPED_BASE     UL(0x00000000)
 #endif
 
-#ifndef PHYS_OFFSET
-#define PHYS_OFFSET            UL(CONFIG_DRAM_BASE)
-#endif
-
 #ifndef END_MEM
 #define END_MEM                (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
 #endif
 
 #ifndef PAGE_OFFSET
-#define PAGE_OFFSET            (PHYS_OFFSET)
+#define PAGE_OFFSET            PLAT_PHYS_OFFSET
 #endif
 
 /*
  * The module can be at any place in ram in nommu mode.
  */
 #define MODULES_END            (END_MEM)
-#define MODULES_VADDR          (PHYS_OFFSET)
+#define MODULES_VADDR          PAGE_OFFSET
 
 #define XIP_VIRT_ADDR(physaddr)  (physaddr)
 
 #define page_to_phys(page)     (__pfn_to_phys(page_to_pfn(page)))
 #define phys_to_page(phys)     (pfn_to_page(__phys_to_pfn(phys)))
 
+/*
+ * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
+ * memory.  This is used for XIP and NoMMU kernels, or by kernels which
+ * have their own mach/memory.h.  Assembly code must always use
+ * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
+ */
+#ifndef PLAT_PHYS_OFFSET
+#define PLAT_PHYS_OFFSET       UL(CONFIG_PHYS_OFFSET)
+#endif
+
 #ifndef __ASSEMBLY__
 
 /*
@@ -184,22 +190,15 @@ static inline unsigned long __phys_to_virt(unsigned long x)
        return t;
 }
 #else
+
+#define PHYS_OFFSET    PLAT_PHYS_OFFSET
+
 #define __virt_to_phys(x)      ((x) - PAGE_OFFSET + PHYS_OFFSET)
 #define __phys_to_virt(x)      ((x) - PHYS_OFFSET + PAGE_OFFSET)
-#endif
-#endif
-#endif /* __ASSEMBLY__ */
 
-#ifndef PHYS_OFFSET
-#ifdef PLAT_PHYS_OFFSET
-#define PHYS_OFFSET    PLAT_PHYS_OFFSET
-#else
-#define PHYS_OFFSET    UL(CONFIG_PHYS_OFFSET)
 #endif
 #endif
 
-#ifndef __ASSEMBLY__
-
 /*
  * PFNs are used to describe any physical page; this means
  * PFN 0 == physical address 0.
@@ -208,7 +207,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
  * direct-mapped view.  We assume this is the first page
  * of RAM in the mem_map as well.
  */
-#define PHYS_PFN_OFFSET        (PHYS_OFFSET >> PAGE_SHIFT)
+#define PHYS_PFN_OFFSET        ((unsigned long)(PHYS_OFFSET >> PAGE_SHIFT))
 
 /*
  * These are *only* valid on the kernel direct mapped RAM memory.
@@ -291,7 +290,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
 #define ARCH_PFN_OFFSET                PHYS_PFN_OFFSET
 
 #define virt_to_page(kaddr)    pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
+#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
+                                       && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
 
 #endif
 
index 0d3a28dbc8e5db05823e6b83f85ccc5d04b807ac..ed690c49ef93fae344aeaec993c939e336460283 100644 (file)
@@ -12,6 +12,8 @@ enum {
        ARM_SEC_CORE,
        ARM_SEC_EXIT,
        ARM_SEC_DEVEXIT,
+       ARM_SEC_HOT,
+       ARM_SEC_UNLIKELY,
        ARM_SEC_MAX,
 };
 
diff --git a/arch/arm/include/asm/neon.h b/arch/arm/include/asm/neon.h
new file mode 100644 (file)
index 0000000..8f730fe
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * linux/arch/arm/include/asm/neon.h
+ *
+ * Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <asm/hwcap.h>
+
+#define cpu_has_neon()         (!!(elf_hwcap & HWCAP_NEON))
+
+#ifdef __ARM_NEON__
+
+/*
+ * If you are affected by the BUILD_BUG below, it probably means that you are
+ * using NEON code /and/ calling the kernel_neon_begin() function from the same
+ * compilation unit. To prevent issues that may arise from GCC reordering or
+ * generating(1) NEON instructions outside of these begin/end functions, the
+ * only supported way of using NEON code in the kernel is by isolating it in a
+ * separate compilation unit, and calling it from another unit from inside a
+ * kernel_neon_begin/kernel_neon_end pair.
+ *
+ * (1) Current GCC (4.7) might generate NEON instructions at O3 level if
+ *     -mpfu=neon is set.
+ */
+
+#define kernel_neon_begin() \
+       BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code")
+
+#else
+void kernel_neon_begin(void);
+#endif
+void kernel_neon_end(void);
index cbdc7a21f869fcce44573119edb99c26d710c1a9..4355f0ec44d62e9b5d7c40132f28f0ff6710f387 100644 (file)
@@ -13,7 +13,7 @@
 /* PAGE_SHIFT determines the page size */
 #define PAGE_SHIFT             12
 #define PAGE_SIZE              (_AC(1,UL) << PAGE_SHIFT)
-#define PAGE_MASK              (~(PAGE_SIZE-1))
+#define PAGE_MASK              (~((1 << PAGE_SHIFT) - 1))
 
 #ifndef __ASSEMBLY__
 
index f088c864c9926723f615feb125ddb3b207e50c21..5d9f086e18cd17d3afbcbcbc3e471eaa07b21587 100644 (file)
@@ -72,6 +72,7 @@
 #define PTE_TABLE_BIT          (_AT(pteval_t, 1) << 1)
 #define PTE_BUFFERABLE         (_AT(pteval_t, 1) << 2)         /* AttrIndx[0] */
 #define PTE_CACHEABLE          (_AT(pteval_t, 1) << 3)         /* AttrIndx[1] */
+#define PTE_AP2                        (_AT(pteval_t, 1) << 7)         /* AP[2] */
 #define PTE_EXT_SHARED         (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
 #define PTE_EXT_AF             (_AT(pteval_t, 1) << 10)        /* Access Flag */
 #define PTE_EXT_NG             (_AT(pteval_t, 1) << 11)        /* nG */
index c5d94b31bde43757adf163cb346ab0610989a894..0ea7f99f6d76141a2bc8259ecdd4c54284c91e6e 100644 (file)
@@ -33,7 +33,7 @@
 #define PTRS_PER_PMD           512
 #define PTRS_PER_PGD           4
 
-#define PTE_HWTABLE_PTRS       (PTRS_PER_PTE)
+#define PTE_HWTABLE_PTRS       (0)
 #define PTE_HWTABLE_OFF                (0)
 #define PTE_HWTABLE_SIZE       (PTRS_PER_PTE * sizeof(u64))
 
 #define PMD_SHIFT              21
 
 #define PMD_SIZE               (1UL << PMD_SHIFT)
-#define PMD_MASK               (~(PMD_SIZE-1))
+#define PMD_MASK               (~((1 << PMD_SHIFT) - 1))
 #define PGDIR_SIZE             (1UL << PGDIR_SHIFT)
-#define PGDIR_MASK             (~(PGDIR_SIZE-1))
+#define PGDIR_MASK             (~((1 << PGDIR_SHIFT) - 1))
 
 /*
  * section address mask and size definitions.
  */
 #define SECTION_SHIFT          21
 #define SECTION_SIZE           (1UL << SECTION_SHIFT)
-#define SECTION_MASK           (~(SECTION_SIZE-1))
+#define SECTION_MASK           (~((1 << SECTION_SHIFT) - 1))
 
 #define USER_PTRS_PER_PGD      (PAGE_OFFSET / PGDIR_SIZE)
 
 #define L_PTE_PRESENT          (_AT(pteval_t, 3) << 0)         /* Present */
 #define L_PTE_FILE             (_AT(pteval_t, 1) << 2)         /* only when !PRESENT */
 #define L_PTE_USER             (_AT(pteval_t, 1) << 6)         /* AP[1] */
-#define L_PTE_RDONLY           (_AT(pteval_t, 1) << 7)         /* AP[2] */
 #define L_PTE_SHARED           (_AT(pteval_t, 3) << 8)         /* SH[1:0], inner shareable */
 #define L_PTE_YOUNG            (_AT(pteval_t, 1) << 10)        /* AF */
 #define L_PTE_XN               (_AT(pteval_t, 1) << 54)        /* XN */
-#define L_PTE_DIRTY            (_AT(pteval_t, 1) << 55)        /* unused */
-#define L_PTE_SPECIAL          (_AT(pteval_t, 1) << 56)        /* unused */
+#define L_PTE_DIRTY            (_AT(pteval_t, 1) << 55)
+#define L_PTE_SPECIAL          (_AT(pteval_t, 1) << 56)
 #define L_PTE_NONE             (_AT(pteval_t, 1) << 57)        /* PROT_NONE */
+#define L_PTE_RDONLY           (_AT(pteval_t, 1) << 58)        /* READ ONLY */
 
 #define PMD_SECT_VALID         (_AT(pmdval_t, 1) << 0)
 #define PMD_SECT_DIRTY         (_AT(pmdval_t, 1) << 55)
index b1b7a49074891a6caf4932a85b0f18e248149472..c312a72114f4c27e49c3971678ac194b61652fa1 100644 (file)
@@ -214,12 +214,16 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
 
 #define pte_clear(mm,addr,ptep)        set_pte_ext(ptep, __pte(0), 0)
 
+#define pte_isset(pte, val)    ((u32)(val) == (val) ? pte_val(pte) & (val) \
+                                               : !!(pte_val(pte) & (val)))
+#define pte_isclear(pte, val)  (!(pte_val(pte) & (val)))
+
 #define pte_none(pte)          (!pte_val(pte))
-#define pte_present(pte)       (pte_val(pte) & L_PTE_PRESENT)
-#define pte_write(pte)         (!(pte_val(pte) & L_PTE_RDONLY))
-#define pte_dirty(pte)         (pte_val(pte) & L_PTE_DIRTY)
-#define pte_young(pte)         (pte_val(pte) & L_PTE_YOUNG)
-#define pte_exec(pte)          (!(pte_val(pte) & L_PTE_XN))
+#define pte_present(pte)       (pte_isset((pte), L_PTE_PRESENT))
+#define pte_write(pte)         (pte_isclear((pte), L_PTE_RDONLY))
+#define pte_dirty(pte)         (pte_isset((pte), L_PTE_DIRTY))
+#define pte_young(pte)         (pte_isset((pte), L_PTE_YOUNG))
+#define pte_exec(pte)          (pte_isclear((pte), L_PTE_XN))
 #define pte_special(pte)       (0)
 
 #define pte_present_user(pte)  (pte_present(pte) && (pte_val(pte) & L_PTE_USER))
index 11284e744c801a0054502108320a5bd6bb10e2ff..2d1bf8c6353dfe0db8ba9ab6a7e90f6a01ec2f10 100644 (file)
@@ -110,7 +110,7 @@ ENTRY(stext)
        sub     r4, r3, r4                      @ (PHYS_OFFSET - PAGE_OFFSET)
        add     r8, r8, r4                      @ PHYS_OFFSET
 #else
-       ldr     r8, =PHYS_OFFSET                @ always constant in this case
+       ldr     r8, =PLAT_PHYS_OFFSET           @ always constant in this case
 #endif
 
        /*
index 7e137873083d6ef83f032fd9febc6a5ea0e17805..1705ee80d0977efa49ccfc62106ef059ce32e127 100644 (file)
@@ -307,6 +307,10 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
                        maps[ARM_SEC_EXIT].unw_sec = s;
                else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
                        maps[ARM_SEC_DEVEXIT].unw_sec = s;
+               else if (strcmp(".ARM.exidx.text.unlikely", secname) == 0)
+                       maps[ARM_SEC_UNLIKELY].unw_sec = s;
+               else if (strcmp(".ARM.exidx.text.hot", secname) == 0)
+                       maps[ARM_SEC_HOT].unw_sec = s;
                else if (strcmp(".init.text", secname) == 0)
                        maps[ARM_SEC_INIT].txt_sec = s;
                else if (strcmp(".devinit.text", secname) == 0)
@@ -317,6 +321,10 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
                        maps[ARM_SEC_EXIT].txt_sec = s;
                else if (strcmp(".devexit.text", secname) == 0)
                        maps[ARM_SEC_DEVEXIT].txt_sec = s;
+               else if (strcmp(".text.unlikely", secname) == 0)
+                       maps[ARM_SEC_UNLIKELY].txt_sec = s;
+               else if (strcmp(".text.hot", secname) == 0)
+                       maps[ARM_SEC_HOT].txt_sec = s;
        }
 
        for (i = 0; i < ARM_SEC_MAX; i++)
index 0b9e437719bd3040cfc4bf758433653a644c6499..2acaded8025d1487b94dd22eaf30e1158d0360cf 100644 (file)
@@ -301,8 +301,8 @@ int __init mx6q_clocks_init(void)
                post_div_table[1].div = 1;
                post_div_table[2].div = 1;
                video_div_table[1].div = 1;
-               video_div_table[2].div = 1;
-       };
+               video_div_table[3].div = 1;
+       }
 
        /*                   type                               name         parent_name  base     div_mask */
        clk[pll1_sys]      = imx_clk_pllv3(IMX_PLLV3_SYS,       "pll1_sys",     "osc", base,        0x7f);
index f8b23b8040d9aa61a1593d30a55c5ed951cc5f6a..3f3259e74fd35902384c7f0144d76db7373fc51d 100644 (file)
@@ -503,11 +503,11 @@ static void __init realtime_counter_init(void)
        rate = clk_get_rate(sys_clk);
        /* Numerator/denumerator values refer TRM Realtime Counter section */
        switch (rate) {
-       case 1200000:
+       case 12000000:
                num = 64;
                den = 125;
                break;
-       case 1300000:
+       case 13000000:
                num = 768;
                den = 1625;
                break;
@@ -515,11 +515,11 @@ static void __init realtime_counter_init(void)
                num = 8;
                den = 25;
                break;
-       case 2600000:
+       case 26000000:
                num = 384;
                den = 1625;
                break;
-       case 2700000:
+       case 27000000:
                num = 256;
                den = 1125;
                break;
index 9696f36468643956c37466a0e77d5de5292b2e5f..ea788c8e32e808cd2a700bb1c48cad0fe3dea922 100644 (file)
@@ -814,6 +814,7 @@ static struct platform_device ipmmu_device = {
 
 static struct renesas_intc_irqpin_config irqpin0_platform_data = {
        .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
+       .control_parent = true,
 };
 
 static struct resource irqpin0_resources[] = {
@@ -875,6 +876,7 @@ static struct platform_device irqpin1_device = {
 
 static struct renesas_intc_irqpin_config irqpin2_platform_data = {
        .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
+       .control_parent = true,
 };
 
 static struct resource irqpin2_resources[] = {
@@ -905,6 +907,7 @@ static struct platform_device irqpin2_device = {
 
 static struct renesas_intc_irqpin_config irqpin3_platform_data = {
        .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
+       .control_parent = true,
 };
 
 static struct resource irqpin3_resources[] = {
index bb7529dea84193e87d38b2b78ba3ff3a932bef17..1e1842cff808a0e720aa083f05636ba2056ee6f7 100644 (file)
@@ -417,12 +417,21 @@ void __init dma_contiguous_remap(void)
                map.type = MT_MEMORY_DMA_READY;
 
                /*
-                * Clear previous low-memory mapping
+                * Clear previous low-memory mapping to ensure that the
+                * TLB does not see any conflicting entries, then flush
+                * the TLB of the old entries before creating new mappings.
+                *
+                * This ensures that any speculatively loaded TLB entries
+                * (even though they may be rare) can not cause any problems,
+                * and ensures that this code is architecturally compliant.
                 */
                for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
                     addr += PMD_SIZE)
                        pmd_clear(pmd_off_k(addr));
 
+               flush_tlb_kernel_range(__phys_to_virt(start),
+                                      __phys_to_virt(end));
+
                iotable_init(&map, 1);
        }
 }
index 95f7a1c903262e6cacea7951e7371a57c13cd301..a4e5cd5c30676446dedd0f8f338206c225086ae9 100644 (file)
@@ -707,8 +707,9 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
 }
 
 static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
-       unsigned long end, unsigned long phys, const struct mem_type *type,
-       bool force_pages)
+                                 unsigned long end, phys_addr_t phys,
+                                 const struct mem_type *type,
+                                 bool force_pages)
 {
        pud_t *pud = pud_offset(pgd, addr);
        unsigned long next;
index 6f3b0476b72919f4ac2786f58d7623c51162e60e..110e738bc970f7288d9df72b3d62995fd67b0895 100644 (file)
@@ -78,8 +78,13 @@ ENTRY(cpu_v7_set_pte_ext)
        tst     rh, #1 << (57 - 32)             @ L_PTE_NONE
        bicne   rl, #L_PTE_VALID
        bne     1f
-       tst     rh, #1 << (55 - 32)             @ L_PTE_DIRTY
-       orreq   rl, #L_PTE_RDONLY
+
+       eor     ip, rh, #1 << (55 - 32) @ toggle L_PTE_DIRTY in temp reg to
+                                       @ test for !L_PTE_DIRTY || L_PTE_RDONLY
+       tst     ip, #1 << (55 - 32) | 1 << (58 - 32)
+       orrne   rl, #PTE_AP2
+       biceq   rl, #PTE_AP2
+
 1:     strd    r2, r3, [r0]
        ALT_SMP(W(nop))
        ALT_UP (mcr     p15, 0, r0, c7, c10, 1)         @ flush_pte
index 5dfbb0b8e7f4484ddeb97974080a51bfaec0dc3e..452fb3ad68aafea3c24236cb4b70d850528df1b8 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/init.h>
 #include <linux/uaccess.h>
 #include <linux/user.h>
+#include <linux/export.h>
 
 #include <asm/cp15.h>
 #include <asm/cputype.h>
@@ -648,6 +649,52 @@ static int vfp_hotplug(struct notifier_block *b, unsigned long action,
        return NOTIFY_OK;
 }
 
+#ifdef CONFIG_KERNEL_MODE_NEON
+
+/*
+ * Kernel-side NEON support functions
+ */
+void kernel_neon_begin(void)
+{
+       struct thread_info *thread = current_thread_info();
+       unsigned int cpu;
+       u32 fpexc;
+
+       /*
+        * Kernel mode NEON is only allowed outside of interrupt context
+        * with preemption disabled. This will make sure that the kernel
+        * mode NEON register contents never need to be preserved.
+        */
+       BUG_ON(in_interrupt());
+       cpu = get_cpu();
+
+       fpexc = fmrx(FPEXC) | FPEXC_EN;
+       fmxr(FPEXC, fpexc);
+
+       /*
+        * Save the userland NEON/VFP state. Under UP,
+        * the owner could be a task other than 'current'
+        */
+       if (vfp_state_in_hw(cpu, thread))
+               vfp_save_state(&thread->vfpstate, fpexc);
+#ifndef CONFIG_SMP
+       else if (vfp_current_hw_state[cpu] != NULL)
+               vfp_save_state(vfp_current_hw_state[cpu], fpexc);
+#endif
+       vfp_current_hw_state[cpu] = NULL;
+}
+EXPORT_SYMBOL(kernel_neon_begin);
+
+void kernel_neon_end(void)
+{
+       /* Disable the NEON/VFP unit. */
+       fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
+       put_cpu();
+}
+EXPORT_SYMBOL(kernel_neon_end);
+
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
 /*
  * VFP support code initialisation.
  */
@@ -731,4 +778,4 @@ static int __init vfp_init(void)
        return 0;
 }
 
-late_initcall(vfp_init);
+core_initcall(vfp_init);
index f9e8b9491efc2736afc66a14f65777ed43d976c3..b51da9132744061948e329f0b0bff35be699c883 100644 (file)
@@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
 
-MODULE_ALIAS("sha1-powerpc");
+MODULE_ALIAS_CRYPTO("sha1");
+MODULE_ALIAS_CRYPTO("sha1-powerpc");
index fe0c17dcfbd7d0e706a3d5d85630c6255bed11a1..bbfd8b75c77f88303b41af1d02dabd3551a59fa3 100644 (file)
@@ -757,77 +757,7 @@ struct device_node *of_find_next_cache_node(struct device_node *np)
        return NULL;
 }
 
-#ifdef CONFIG_PPC_PSERIES
-/*
- * Fix up the uninitialized fields in a new device node:
- * name, type and pci-specific fields
- */
-
-static int of_finish_dynamic_node(struct device_node *node)
-{
-       struct device_node *parent = of_get_parent(node);
-       int err = 0;
-       const phandle *ibm_phandle;
-
-       node->name = of_get_property(node, "name", NULL);
-       node->type = of_get_property(node, "device_type", NULL);
-
-       if (!node->name)
-               node->name = "<NULL>";
-       if (!node->type)
-               node->type = "<NULL>";
-
-       if (!parent) {
-               err = -ENODEV;
-               goto out;
-       }
-
-       /* We don't support that function on PowerMac, at least
-        * not yet
-        */
-       if (machine_is(powermac))
-               return -ENODEV;
-
-       /* fix up new node's phandle field */
-       if ((ibm_phandle = of_get_property(node, "ibm,phandle", NULL)))
-               node->phandle = *ibm_phandle;
-
-out:
-       of_node_put(parent);
-       return err;
-}
-
-static int prom_reconfig_notifier(struct notifier_block *nb,
-                                 unsigned long action, void *node)
-{
-       int err;
-
-       switch (action) {
-       case OF_RECONFIG_ATTACH_NODE:
-               err = of_finish_dynamic_node(node);
-               if (err < 0)
-                       printk(KERN_ERR "finish_node returned %d\n", err);
-               break;
-       default:
-               err = 0;
-               break;
-       }
-       return notifier_from_errno(err);
-}
-
-static struct notifier_block prom_reconfig_nb = {
-       .notifier_call = prom_reconfig_notifier,
-       .priority = 10, /* This one needs to run first */
-};
-
-static int __init prom_reconfig_setup(void)
-{
-       return of_reconfig_notifier_register(&prom_reconfig_nb);
-}
-__initcall(prom_reconfig_setup);
-#endif
-
-bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
 {
        return (int)phys_id == get_hard_smp_processor_id(cpu);
 }
index 08c6f3185d45b2914c0add10d99c7eaa56a993c4..51663518ea400818f29d9464e4414d2a5b3cde79 100644 (file)
@@ -1633,12 +1633,11 @@ static void stage_topology_update(int core_id)
 static int dt_update_callback(struct notifier_block *nb,
                                unsigned long action, void *data)
 {
-       struct of_prop_reconfig *update;
+       struct of_reconfig_data *update = data;
        int rc = NOTIFY_DONE;
 
        switch (action) {
        case OF_RECONFIG_UPDATE_PROPERTY:
-               update = (struct of_prop_reconfig *)data;
                if (!of_prop_cmp(update->dn->type, "cpu") &&
                    !of_prop_cmp(update->prop->name, "ibm,associativity")) {
                        u32 core_id;
index a1a7b9a67ffde37f588f3fc6c9db88faab3d02ba..3d0b6222536d8aa106fc4af56f61c8ad89c50d82 100644 (file)
@@ -11,7 +11,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/kref.h>
 #include <linux/notifier.h>
 #include <linux/spinlock.h>
 #include <linux/cpu.h>
@@ -83,6 +82,8 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
                return NULL;
        }
 
+       of_node_set_flag(dn, OF_DYNAMIC);
+
        return dn;
 }
 
index 2882d614221f7e679777e79a7b08b91825f2d2ca..adb36f2b2b316d1eeb1d588379dbbad6587170f6 100644 (file)
@@ -336,16 +336,17 @@ static void pseries_remove_processor(struct device_node *np)
 }
 
 static int pseries_smp_notifier(struct notifier_block *nb,
-                               unsigned long action, void *node)
+                               unsigned long action, void *data)
 {
+       struct of_reconfig_data *rd = data;
        int err = 0;
 
        switch (action) {
        case OF_RECONFIG_ATTACH_NODE:
-               err = pseries_add_processor(node);
+               err = pseries_add_processor(rd->dn);
                break;
        case OF_RECONFIG_DETACH_NODE:
-               pseries_remove_processor(node);
+               pseries_remove_processor(rd->dn);
                break;
        }
        return notifier_from_errno(err);
index bebe64ed5dc32ee84e2d4f7d280a02f3df54ce54..87aa8124087a521623319ca0f13790abf75f0b29 100644 (file)
@@ -198,7 +198,7 @@ static int pseries_add_memory(struct device_node *np)
        return (ret < 0) ? -EINVAL : 0;
 }
 
-static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
+static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
 {
        struct of_drconf_cell *new_drmem, *old_drmem;
        unsigned long memblock_size;
@@ -210,7 +210,7 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
        if (!memblock_size)
                return -EINVAL;
 
-       p = (u32 *)of_get_property(pr->dn, "ibm,dynamic-memory", NULL);
+       p = (u32 *) pr->old_prop->value;
        if (!p)
                return -EINVAL;
 
@@ -245,9 +245,9 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr)
 }
 
 static int pseries_memory_notifier(struct notifier_block *nb,
-                                  unsigned long action, void *node)
+                                  unsigned long action, void *data)
 {
-       struct of_prop_reconfig *pr;
+       struct of_reconfig_data *rd = data;
        int err = 0;
 
        switch (action) {
@@ -258,9 +258,8 @@ static int pseries_memory_notifier(struct notifier_block *nb,
                err = pseries_remove_memory(node);
                break;
        case OF_RECONFIG_UPDATE_PROPERTY:
-               pr = (struct of_prop_reconfig *)node;
-               if (!strcmp(pr->prop->name, "ibm,dynamic-memory"))
-                       err = pseries_update_drconf_memory(pr);
+               if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
+                       err = pseries_update_drconf_memory(rd);
                break;
        }
        return notifier_from_errno(err);
index 86ae364900d60cbde3000d44481e248e9895609a..ed02eb47ac70c8a3b7ae43119929864a48d54271 100644 (file)
@@ -1325,10 +1325,11 @@ static struct notifier_block iommu_mem_nb = {
        .notifier_call = iommu_mem_notifier,
 };
 
-static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
+static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
 {
        int err = NOTIFY_OK;
-       struct device_node *np = node;
+       struct of_reconfig_data *rd = data;
+       struct device_node *np = rd->dn;
        struct pci_dn *pci = PCI_DN(np);
        struct direct_window *window;
 
index f93cdf55628c13ab8832df5c4693303ba2d20a8f..0435bb65d0aaf616d9cf4257b15e242d18d5d58c 100644 (file)
@@ -12,7 +12,6 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/kref.h>
 #include <linux/notifier.h>
 #include <linux/proc_fs.h>
 #include <linux/slab.h>
@@ -70,7 +69,6 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
 
        np->properties = proplist;
        of_node_set_flag(np, OF_DYNAMIC);
-       kref_init(&np->kref);
 
        np->parent = derive_parent(path);
        if (IS_ERR(np->parent)) {
index 995cc0457c7615855735502a9913ff734e187622..de3a37d991427c7105975279d613267c0994fba8 100644 (file)
@@ -253,9 +253,10 @@ static void __init pseries_discover_pic(void)
               " interrupt-controller\n");
 }
 
-static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
+static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
 {
-       struct device_node *np = node;
+       struct of_reconfig_data *rd = data;
+       struct device_node *np = rd->dn;
        struct pci_dn *pci = NULL;
        int err = NOTIFY_OK;
 
index 0968b66b4cf96bc25b73c92a2c7f1fb4bdfa3605..8ba60424be953cb724c92b0b984daf79bfe9e559 100644 (file)
@@ -202,7 +202,7 @@ void __init test_of_node(void)
 
        /* There should really be a struct device_node allocator */
        memset(&of_node, 0, sizeof(of_node));
-       kref_init(&of_node.kref);
+       kref_init(&of_node.kobj.kref);
        of_node.full_name = node_name;
 
        check(0 == msi_bitmap_alloc(&bmp, size, &of_node));
index 94599a65cc669c44712de23a00561ff23b275b55..89e57280d2e259f5a7631bae05e1c9e131ea2fb8 100644 (file)
@@ -288,6 +288,7 @@ static inline void disable_surveillance(void)
        args.token = rtas_token("set-indicator");
        if (args.token == RTAS_UNKNOWN_SERVICE)
                return;
+       args.token = cpu_to_be32(args.token);
        args.nargs = cpu_to_be32(3);
        args.nret = cpu_to_be32(1);
        args.rets = &args.args[3];
index fd104db9cea1b6f8610f0e5d064d23ac29fb5de5..92eb4d6ad39dcf1480621f50b22ea95c933a6d5d 100644 (file)
@@ -970,7 +970,7 @@ static void __exit aes_s390_fini(void)
 module_init(aes_s390_init);
 module_exit(aes_s390_fini);
 
-MODULE_ALIAS("aes-all");
+MODULE_ALIAS_CRYPTO("aes-all");
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
 MODULE_LICENSE("GPL");
index f2d6cccddcf890fa843021619dc8db87062e9a09..a89feffb22b5283845ed8beedf0531e86be95796 100644 (file)
@@ -619,8 +619,8 @@ static void __exit des_s390_exit(void)
 module_init(des_s390_init);
 module_exit(des_s390_exit);
 
-MODULE_ALIAS("des");
-MODULE_ALIAS("des3_ede");
+MODULE_ALIAS_CRYPTO("des");
+MODULE_ALIAS_CRYPTO("des3_ede");
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
index d43485d142e911623d30d5abb5bd80d957f752d4..7940dc90e80bc6729371ab565bad743b1087ef72 100644 (file)
@@ -160,7 +160,7 @@ static void __exit ghash_mod_exit(void)
 module_init(ghash_mod_init);
 module_exit(ghash_mod_exit);
 
-MODULE_ALIAS("ghash");
+MODULE_ALIAS_CRYPTO("ghash");
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("GHASH Message Digest Algorithm, s390 implementation");
index a1b3a9dc9d8a0f420f6389bd56331890e259ab6b..5b2bee323694b2144c382dd9af85126b81b311a8 100644 (file)
@@ -103,6 +103,6 @@ static void __exit sha1_s390_fini(void)
 module_init(sha1_s390_init);
 module_exit(sha1_s390_fini);
 
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
index 9b853809a492b68e6781ba6a8c32e5a60fe78932..b74ff158108c9421a25b26a02f3a1f1669d1f69d 100644 (file)
@@ -143,7 +143,7 @@ static void __exit sha256_s390_fini(void)
 module_init(sha256_s390_init);
 module_exit(sha256_s390_fini);
 
-MODULE_ALIAS("sha256");
-MODULE_ALIAS("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA256 and SHA224 Secure Hash Algorithm");
index 32a81383b69c1b5036c12494802654ad453cbb19..0c36989ba182b1e411b56c3018ab610622cfe6ff 100644 (file)
@@ -86,7 +86,7 @@ static struct shash_alg sha512_alg = {
        }
 };
 
-MODULE_ALIAS("sha512");
+MODULE_ALIAS_CRYPTO("sha512");
 
 static int sha384_init(struct shash_desc *desc)
 {
@@ -126,7 +126,7 @@ static struct shash_alg sha384_alg = {
        }
 };
 
-MODULE_ALIAS("sha384");
+MODULE_ALIAS_CRYPTO("sha384");
 
 static int __init init(void)
 {
index 503e6d96ad4e49963727ef637f2d3fb985e25fb4..ded4cee35318fa63e7b45d345c0b8c040a88e341 100644 (file)
@@ -499,6 +499,6 @@ module_exit(aes_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("AES Secure Hash Algorithm, sparc64 aes opcode accelerated");
 
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
 
 #include "crop_devid.c"
index 888f6260b4ec5169d36c23860a6f0bc70c5f5404..641f55cb61c3a89a9a8eabe12577170a6e62e572 100644 (file)
@@ -322,6 +322,6 @@ module_exit(camellia_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Camellia Cipher Algorithm, sparc64 camellia opcode accelerated");
 
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
 
 #include "crop_devid.c"
index 5162fad912ce09faf6fcf5979e2ef60cadc03ba4..d1064e46efe8bea0b83d573e4b6eb0e35aef3596 100644 (file)
@@ -176,6 +176,6 @@ module_exit(crc32c_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("CRC32c (Castagnoli), sparc64 crc32c opcode accelerated");
 
-MODULE_ALIAS("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c");
 
 #include "crop_devid.c"
index 3065bc61f9d3bcf2ec96e78a2b3a15d96f981e14..d1150097299479224e99f143e6657422cd4db157 100644 (file)
@@ -532,6 +532,6 @@ module_exit(des_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms, sparc64 des opcode accelerated");
 
-MODULE_ALIAS("des");
+MODULE_ALIAS_CRYPTO("des");
 
 #include "crop_devid.c"
index 09a9ea1dfb697381a410cdaf043262d2e4e24898..64c7ff5f72a9f68fd19832eb6d4238e05b80c114 100644 (file)
@@ -185,6 +185,6 @@ module_exit(md5_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MD5 Secure Hash Algorithm, sparc64 md5 opcode accelerated");
 
-MODULE_ALIAS("md5");
+MODULE_ALIAS_CRYPTO("md5");
 
 #include "crop_devid.c"
index 6cd5f29e1e0d592050602afff567598d1062b713..1b3e47accc7466a90fb5729321df4e48273d7f31 100644 (file)
@@ -180,6 +180,6 @@ module_exit(sha1_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, sparc64 sha1 opcode accelerated");
 
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
 
 #include "crop_devid.c"
index 04f555ab268002d16ca0d1d1bf69e6e8829d4d04..41f27cca2a225ffd2d9d1bcd301e72287c3d42ae 100644 (file)
@@ -237,7 +237,7 @@ module_exit(sha256_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm, sparc64 sha256 opcode accelerated");
 
-MODULE_ALIAS("sha224");
-MODULE_ALIAS("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha256");
 
 #include "crop_devid.c"
index f04d1994d19aa3acfc9286a82265a4a031b8ea5f..9fff88541b8c0fabedba624d9c135926eff64fb8 100644 (file)
@@ -222,7 +222,7 @@ module_exit(sha512_sparc64_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA-384 and SHA-512 Secure Hash Algorithm, sparc64 sha512 opcode accelerated");
 
-MODULE_ALIAS("sha384");
-MODULE_ALIAS("sha512");
+MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha512");
 
 #include "crop_devid.c"
index bceee6623b00134023918cd5b62ec6144e3c66eb..d1ff46c0559bb50c6b42e46a709d6291fabb73f4 100644 (file)
@@ -8,6 +8,7 @@ config UML
        default y
        select HAVE_GENERIC_HARDIRQS
        select HAVE_UID16
+       select HAVE_FUTEX_CMPXCHG if FUTEX
        select GENERIC_IRQ_SHOW
        select GENERIC_CPU_DEVICES
        select GENERIC_IO
index aafe8ce0d65dd4f68b04dab1fecbf33038b4031e..e26984f7ab8d2fa838168be1893c5eacdc375d33 100644 (file)
@@ -66,5 +66,5 @@ module_exit(aes_fini);
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, asm optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("aes");
-MODULE_ALIAS("aes-asm");
+MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("aes-asm");
index f80e668785c0bf5f36383e5ab49495c24f93f843..f89e7490d30390d32804a970a4c63c23c5c84984 100644 (file)
@@ -1373,4 +1373,4 @@ module_exit(aesni_exit);
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm, Intel AES-NI instructions optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
index 4417e9aea78d16151825f181d804dd6debcc5f85..183395bfc724f2f12f58a762cd579370c950cc53 100644 (file)
@@ -581,5 +581,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Blowfish Cipher Algorithm, AVX2 optimized");
-MODULE_ALIAS("blowfish");
-MODULE_ALIAS("blowfish-asm");
+MODULE_ALIAS_CRYPTO("blowfish");
+MODULE_ALIAS_CRYPTO("blowfish-asm");
index 3548d76dbaa922727b291a6c092135c944ace419..9f7cc6bde5c8a226b39a4efa3def61e9ce264a1c 100644 (file)
@@ -465,5 +465,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Blowfish Cipher Algorithm, asm optimized");
-MODULE_ALIAS("blowfish");
-MODULE_ALIAS("blowfish-asm");
+MODULE_ALIAS_CRYPTO("blowfish");
+MODULE_ALIAS_CRYPTO("blowfish-asm");
index 414fe5d7946be077c25ba19160496c4bf910b1ab..da710fcf8631fdbf817ff86b9b3a076feafaf15c 100644 (file)
@@ -582,5 +582,5 @@ module_exit(camellia_aesni_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX2 optimized");
-MODULE_ALIAS("camellia");
-MODULE_ALIAS("camellia-asm");
+MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-asm");
index 37fd0c0a81ea8861f30a649b01cee8a6c11db4e5..883e1af10dc5e3b0ce62f1deead1c3fd7fe25286 100644 (file)
@@ -574,5 +574,5 @@ module_exit(camellia_aesni_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Camellia Cipher Algorithm, AES-NI/AVX optimized");
-MODULE_ALIAS("camellia");
-MODULE_ALIAS("camellia-asm");
+MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-asm");
index 5cb86ccd4acb9e6e105098525cb8fd7592a0cbd6..16d65b0d28d13229e979226fccd764502e01149f 100644 (file)
@@ -1725,5 +1725,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Camellia Cipher Algorithm, asm optimized");
-MODULE_ALIAS("camellia");
-MODULE_ALIAS("camellia-asm");
+MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-asm");
index c6631813dc115c609e186044790aa5461cb6f0c7..d416069e31846822a603c7192dc1a6a2bd3fad0e 100644 (file)
@@ -494,4 +494,4 @@ module_exit(cast5_exit);
 
 MODULE_DESCRIPTION("Cast5 Cipher Algorithm, AVX optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("cast5");
+MODULE_ALIAS_CRYPTO("cast5");
index 8d0dfb86a5593554e0d536a48c28572ecc92c91d..c19756265d4eb35a76619db2ccb0850851c12218 100644 (file)
@@ -611,4 +611,4 @@ module_exit(cast6_exit);
 
 MODULE_DESCRIPTION("Cast6 Cipher Algorithm, AVX optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("cast6");
+MODULE_ALIAS_CRYPTO("cast6");
index 9d014a74ef969ab91b2d2c61854052f0834c1c3b..1937fc1d876338aa0aa9bb5fddea9e0aa3541707 100644 (file)
@@ -197,5 +197,5 @@ module_exit(crc32_pclmul_mod_fini);
 MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
 MODULE_LICENSE("GPL");
 
-MODULE_ALIAS("crc32");
-MODULE_ALIAS("crc32-pclmul");
+MODULE_ALIAS_CRYPTO("crc32");
+MODULE_ALIAS_CRYPTO("crc32-pclmul");
index 6812ad98355c3d0e93be5e07c47e5e3956ebc8a4..28640c3d6af7f6172a8fe39d4553c98019614e24 100644 (file)
@@ -280,5 +280,5 @@ MODULE_AUTHOR("Austin Zhang <austin.zhang@intel.com>, Kent Liu <kent.liu@intel.c
 MODULE_DESCRIPTION("CRC32c (Castagnoli) optimization using Intel Hardware.");
 MODULE_LICENSE("GPL");
 
-MODULE_ALIAS("crc32c");
-MODULE_ALIAS("crc32c-intel");
+MODULE_ALIAS_CRYPTO("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c-intel");
index 98d7a188f46b0744ea53942eeab09d609f7d4847..f368ba261739fa09be28bc02fe34cf3112099fa8 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/crypto.h>
 #include <asm/i387.h>
 
 struct crypto_fpu_ctx {
@@ -159,3 +160,5 @@ void __exit crypto_fpu_exit(void)
 {
        crypto_unregister_template(&crypto_fpu_tmpl);
 }
+
+MODULE_ALIAS_CRYPTO("fpu");
index d785cf2c529c720ae2bdfce7a592aae643aa71cf..a8d6f69f92a3a92dadcb79296afa78bb98da9962 100644 (file)
@@ -341,4 +341,4 @@ module_exit(ghash_pclmulqdqni_mod_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("GHASH Message Digest Algorithm, "
                   "acclerated by PCLMULQDQ-NI");
-MODULE_ALIAS("ghash");
+MODULE_ALIAS_CRYPTO("ghash");
index 5e8e67739bb50a97c690139ff18517a2918d0386..399a29d067d6367603714633fb8c4de6ab77275a 100644 (file)
@@ -119,5 +119,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm (optimized assembly version)");
-MODULE_ALIAS("salsa20");
-MODULE_ALIAS("salsa20-asm");
+MODULE_ALIAS_CRYPTO("salsa20");
+MODULE_ALIAS_CRYPTO("salsa20-asm");
index 23aabc6c20a5376fa81cf49ff9893ec76b6cdf05..cb57caf13ef763a8465d5282dea1e07abbef7d54 100644 (file)
@@ -558,5 +558,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX2 optimized");
-MODULE_ALIAS("serpent");
-MODULE_ALIAS("serpent-asm");
+MODULE_ALIAS_CRYPTO("serpent");
+MODULE_ALIAS_CRYPTO("serpent-asm");
index 9ae83cf8d21e987e2e3bf9656a51ecaf61644427..0a86e8b65e604790b63587213c61acadc29f1cad 100644 (file)
@@ -617,4 +617,4 @@ module_exit(serpent_exit);
 
 MODULE_DESCRIPTION("Serpent Cipher Algorithm, AVX optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("serpent");
+MODULE_ALIAS_CRYPTO("serpent");
index 97a356ece24d2b74d18090760e988c45d2bc914a..279f3899c7799c4eb3e67b417a20ae55a7abb534 100644 (file)
@@ -618,4 +618,4 @@ module_exit(serpent_sse2_exit);
 
 MODULE_DESCRIPTION("Serpent Cipher Algorithm, SSE2 optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("serpent");
+MODULE_ALIAS_CRYPTO("serpent");
index 4a11a9d72451625c747e3111a6e09593f15c40b1..29e1060e900108e1bf9d43f92fface090229144c 100644 (file)
@@ -237,4 +237,4 @@ module_exit(sha1_ssse3_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated");
 
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
index 597d4da696561aa2458aa2932f6957b095d9d06b..ceafb01885ed27acdc8d231e90b688643fce6ca0 100644 (file)
@@ -272,4 +272,4 @@ module_exit(sha256_ssse3_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm, Supplemental SSE3 accelerated");
 
-MODULE_ALIAS("sha256");
+MODULE_ALIAS_CRYPTO("sha256");
index 9f5e71f066714e888fdb1b8f70bf0f83db6a4a63..d1ee9f638d1ca1fcbc0425668e120762c71bd188 100644 (file)
@@ -279,4 +279,4 @@ module_exit(sha512_ssse3_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, Supplemental SSE3 accelerated");
 
-MODULE_ALIAS("sha512");
+MODULE_ALIAS_CRYPTO("sha512");
index ce33b5be64ee5c9f1624d6ca0b65baaa7bed8e8d..bb1f0a194d974d93e7ffb5fd0408a54f64049c9e 100644 (file)
@@ -580,5 +580,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX2 optimized");
-MODULE_ALIAS("twofish");
-MODULE_ALIAS("twofish-asm");
+MODULE_ALIAS_CRYPTO("twofish");
+MODULE_ALIAS_CRYPTO("twofish-asm");
index 2047a562f6b3f0f8729d95442e1769a35fa2bad6..4a1f94422fbbb9366419f13180bfd88ade28132f 100644 (file)
@@ -589,4 +589,4 @@ module_exit(twofish_exit);
 
 MODULE_DESCRIPTION("Twofish Cipher Algorithm, AVX optimized");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("twofish");
+MODULE_ALIAS_CRYPTO("twofish");
index 0a5202303501e29215e5f5bb9e143417ddd35452..77e06c2da83d0ec5a9e468759727c0c25baca5b5 100644 (file)
@@ -96,5 +96,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION ("Twofish Cipher Algorithm, asm optimized");
-MODULE_ALIAS("twofish");
-MODULE_ALIAS("twofish-asm");
+MODULE_ALIAS_CRYPTO("twofish");
+MODULE_ALIAS_CRYPTO("twofish-asm");
index 13e63b3e1dfb44593ea2274a63adebfbdce7e6ce..56d8a08ee47908d06c80975428655629b5749c24 100644 (file)
@@ -495,5 +495,5 @@ module_exit(fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Twofish Cipher Algorithm, 3-way parallel asm optimized");
-MODULE_ALIAS("twofish");
-MODULE_ALIAS("twofish-asm");
+MODULE_ALIAS_CRYPTO("twofish");
+MODULE_ALIAS_CRYPTO("twofish-asm");
index 8bf1c06070d5655b3fcad7727ad53bd5e8ceaf8a..23fb67e6f84524bac316a04da2a4b4f6fc4af0b1 100644 (file)
@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
                gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
 }
 
-#define _LDT_empty(info)                               \
+/* This intentionally ignores lm, since 32-bit apps don't have that field. */
+#define LDT_empty(info)                                        \
        ((info)->base_addr              == 0    &&      \
         (info)->limit                  == 0    &&      \
         (info)->contents               == 0    &&      \
@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
         (info)->seg_not_present        == 1    &&      \
         (info)->useable                == 0)
 
-#ifdef CONFIG_X86_64
-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
-#else
-#define LDT_empty(info) (_LDT_empty(info))
-#endif
+/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
+static inline bool LDT_zero(const struct user_desc *info)
+{
+       return (info->base_addr         == 0 &&
+               info->limit             == 0 &&
+               info->contents          == 0 &&
+               info->read_exec_only    == 0 &&
+               info->seg_32bit         == 0 &&
+               info->limit_in_pages    == 0 &&
+               info->seg_not_present   == 0 &&
+               info->useable           == 0);
+}
 
 static inline void clear_LDT(void)
 {
index 8f4be53ea04b84f4eaa7a78a4bcd6e6e625c9afc..1853659820e0014175d7ec2bd80deaba24be6216 100644 (file)
@@ -60,6 +60,7 @@ static struct clocksource hyperv_cs = {
        .rating         = 400, /* use this when running on Hyperv*/
        .read           = read_hv_clock,
        .mask           = CLOCKSOURCE_MASK(64),
+       .flags          = CLOCK_SOURCE_IS_CONTINUOUS,
 };
 
 static void __init ms_hyperv_init_platform(void)
index 211bce445522d541cc1bcc05e44e401b376f8093..0c6c07cea3f7122f837a9c878768855b84feeab9 100644 (file)
@@ -1017,6 +1017,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
        regs->flags &= ~X86_EFLAGS_IF;
        trace_hardirqs_off();
        regs->ip = (unsigned long)(jp->entry);
+
+       /*
+        * jprobes use jprobe_return() which skips the normal return
+        * path of the function, and this messes up the accounting of the
+        * function graph tracer to get messed up.
+        *
+        * Pause function graph tracing while performing the jprobe function.
+        */
+       pause_graph_tracing();
        return 1;
 }
 
@@ -1042,24 +1051,25 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
        struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
        u8 *addr = (u8 *) (regs->ip - 1);
        struct jprobe *jp = container_of(p, struct jprobe, kp);
+       void *saved_sp = kcb->jprobe_saved_sp;
 
        if ((addr > (u8 *) jprobe_return) &&
            (addr < (u8 *) jprobe_return_end)) {
-               if (stack_addr(regs) != kcb->jprobe_saved_sp) {
+               if (stack_addr(regs) != saved_sp) {
                        struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
                        printk(KERN_ERR
                               "current sp %p does not match saved sp %p\n",
-                              stack_addr(regs), kcb->jprobe_saved_sp);
+                              stack_addr(regs), saved_sp);
                        printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
                        show_regs(saved_regs);
                        printk(KERN_ERR "Current registers\n");
                        show_regs(regs);
                        BUG();
                }
+               /* It's OK to start function graph tracing again */
+               unpause_graph_tracing();
                *regs = kcb->jprobe_saved_regs;
-               memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
-                      kcb->jprobes_stack,
-                      MIN_STACK_SIZE(kcb->jprobe_saved_sp));
+               memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
                preempt_enable_no_resched();
                return 1;
        }
index 4e942f31b1a7c9401a65fb37af093caab5ad0c2e..7fc5e843f247b358288b23e459eebfefcf6631f0 100644 (file)
@@ -29,7 +29,28 @@ static int get_free_idx(void)
 
 static bool tls_desc_okay(const struct user_desc *info)
 {
-       if (LDT_empty(info))
+       /*
+        * For historical reasons (i.e. no one ever documented how any
+        * of the segmentation APIs work), user programs can and do
+        * assume that a struct user_desc that's all zeros except for
+        * entry_number means "no segment at all".  This never actually
+        * worked.  In fact, up to Linux 3.19, a struct user_desc like
+        * this would create a 16-bit read-write segment with base and
+        * limit both equal to zero.
+        *
+        * That was close enough to "no segment at all" until we
+        * hardened this function to disallow 16-bit TLS segments.  Fix
+        * it up by interpreting these zeroed segments the way that they
+        * were almost certainly intended to be interpreted.
+        *
+        * The correct way to ask for "no segment at all" is to specify
+        * a user_desc that satisfies LDT_empty.  To keep everything
+        * working, we accept both.
+        *
+        * Note that there's a similar kludge in modify_ldt -- look at
+        * the distinction between modes 1 and 0x11.
+        */
+       if (LDT_empty(info) || LDT_zero(info))
                return true;
 
        /*
@@ -71,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
        cpu = get_cpu();
 
        while (n-- > 0) {
-               if (LDT_empty(info))
+               if (LDT_empty(info) || LDT_zero(info))
                        desc->a = desc->b = 0;
                else
                        fill_ldt(desc, info);
index 332cafe909ebe1edea606514c5fb04b94136c0c4..0010ed7c3ec2a8e341ff452e9f0c1e97895ea578 100644 (file)
@@ -362,7 +362,7 @@ exit:
  * for scheduling or signal handling. The actual stack switch is done in
  * entry.S
  */
-asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
+asmlinkage notrace __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
 {
        struct pt_regs *regs = eregs;
        /* Did already sync */
@@ -387,7 +387,7 @@ struct bad_iret_stack {
        struct pt_regs regs;
 };
 
-asmlinkage __visible
+asmlinkage __visible notrace __kprobes
 struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
 {
        /*
index 4e27ba53c40cde3f8d1c7256d32f2358dd4d8b65..27e3a14fc917bdceb64b55ca85ede59d1d68916d 100644 (file)
@@ -380,7 +380,7 @@ static unsigned long quick_pit_calibrate(void)
                        goto success;
                }
        }
-       pr_err("Fast TSC calibration failed\n");
+       pr_info("Fast TSC calibration failed\n");
        return 0;
 
 success:
index 531d4269e2e3c5303e8b40e6753dd20ddab3c405..bd16d6c370ec9aaeb3328779e277de8129ef61f3 100644 (file)
@@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
 
 extern asmlinkage void sys_ni_syscall(void);
 
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
        /*
         * Smells like a compiler bug -- it doesn't work
         * when the & below is removed.
index f2f0723070caea848a88fe1f96863943fc3df121..95783087f0d3432170d879355d6cb893c33bed51 100644 (file)
@@ -46,7 +46,7 @@ typedef void (*sys_call_ptr_t)(void);
 
 extern void sys_ni_syscall(void);
 
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
        /*
         * Smells like a compiler bug -- it doesn't work
         * when the & below is removed.
index 65c7a89cfa090a1a3a7bff6ad2f1ab9d40e1701f..b48f4f108c474104d484ceb316099ef78b20f5d3 100644 (file)
@@ -180,3 +180,4 @@ module_exit(nx842_mod_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("842 Compression Algorithm");
+MODULE_ALIAS_CRYPTO("842");
index a1eba1845367ef3cadc68bef9a06c5b2b1528ec1..b7cc3cb3a61304e0c72a41625c202a561c2269ba 100644 (file)
@@ -533,6 +533,17 @@ config CRYPTO_SHA1_PPC
          This is the powerpc hardware accelerated implementation of the
          SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2).
 
+config CRYPTO_SHA1_ARM_NEON
+       tristate "SHA1 digest algorithm (ARM NEON)"
+       depends on ARM && KERNEL_MODE_NEON && !CPU_BIG_ENDIAN
+       select CRYPTO_SHA1_ARM
+       select CRYPTO_SHA1
+       select CRYPTO_HASH
+       help
+         SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
+         using optimized ARM NEON assembly, when NEON instructions are
+         available.
+
 config CRYPTO_SHA256
        tristate "SHA224 and SHA256 digest algorithm"
        select CRYPTO_HASH
@@ -566,6 +577,21 @@ config CRYPTO_SHA512
          This code also includes SHA-384, a 384 bit hash with 192 bits
          of security against collision attacks.
 
+config CRYPTO_SHA512_ARM_NEON
+       tristate "SHA384 and SHA512 digest algorithm (ARM NEON)"
+       depends on ARM && KERNEL_MODE_NEON && !CPU_BIG_ENDIAN
+       select CRYPTO_SHA512
+       select CRYPTO_HASH
+       help
+         SHA-512 secure hash standard (DFIPS 180-2) implemented
+         using ARM NEON instructions, when available.
+
+         This version of SHA implements a 512 bit hash with 256 bits of
+         security against collision attacks.
+
+         This code also includes SHA-384, a 384 bit hash with 192 bits
+         of security against collision attacks.
+
 config CRYPTO_SHA512_SPARC64
        tristate "SHA384 and SHA512 digest algorithm (SPARC64)"
        depends on SPARC64
@@ -761,6 +787,22 @@ config CRYPTO_AES_ARM
 
          See <http://csrc.nist.gov/encryption/aes/> for more information.
 
+config CRYPTO_AES_ARM_BS
+       tristate "Bit sliced AES using NEON instructions"
+       depends on ARM && KERNEL_MODE_NEON
+       select CRYPTO_ALGAPI
+       select CRYPTO_AES_ARM
+       select CRYPTO_ABLK_HELPER
+       help
+         Use a faster and more secure NEON based implementation of AES in CBC,
+         CTR and XTS modes
+
+         Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
+         and for XTS mode encryption, CBC and XTS mode decryption speedup is
+         around 25%. (CBC encryption speed is not affected by this driver.)
+         This implementation does not rely on any lookup tables so it is
+         believed to be invulnerable to cache timing attacks.
+
 config CRYPTO_ANUBIS
        tristate "Anubis cipher algorithm"
        select CRYPTO_ALGAPI
index 47f2e5c717591847ed598db32b209bd6f1bbd254..e138ad85bd834958e64e3d580dde4d08efdedb12 100644 (file)
@@ -1474,4 +1474,5 @@ module_exit(aes_fini);
 
 MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
 MODULE_LICENSE("Dual BSD/GPL");
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("aes-generic");
index 7a1ae87f1683459711c0b0c6badd1d76ddd9c9f1..00d8d939733b23d2591763adf29664241d52337e 100644 (file)
@@ -495,8 +495,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name)
 
 struct crypto_template *crypto_lookup_template(const char *name)
 {
-       return try_then_request_module(__crypto_lookup_template(name), "%s",
-                                      name);
+       return try_then_request_module(__crypto_lookup_template(name),
+                                      "crypto-%s", name);
 }
 EXPORT_SYMBOL_GPL(crypto_lookup_template);
 
index 666f1962a160f5d547579b918b6229de0232607b..6f5bebc9bf01ebea38bca6dd616c6bd2c3ce2111 100644 (file)
@@ -476,4 +476,5 @@ module_param(dbg, int, 0);
 MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
 module_init(prng_mod_init);
 module_exit(prng_mod_fini);
-MODULE_ALIAS("stdrng");
+MODULE_ALIAS_CRYPTO("stdrng");
+MODULE_ALIAS_CRYPTO("ansi_cprng");
index 008c8a4fb67ca77de02bc1c0359b4ef77c2b452d..4bb187c2a9027bab28e82370bd54f128602a25f1 100644 (file)
@@ -704,3 +704,4 @@ module_exit(anubis_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Anubis Cryptographic Algorithm");
+MODULE_ALIAS_CRYPTO("anubis");
index 37c4c7213de070c9fbda60db25e92f1283cdcb6e..335abea14f19708b2165222ad2fb4f00fcd8f6e5 100644 (file)
@@ -216,11 +216,11 @@ struct crypto_alg *crypto_larval_lookup(const char *name, u32 type, u32 mask)
 
        alg = crypto_alg_lookup(name, type, mask);
        if (!alg) {
-               request_module("%s", name);
+               request_module("crypto-%s", name);
 
                if (!((type ^ CRYPTO_ALG_NEED_FALLBACK) & mask &
                      CRYPTO_ALG_NEED_FALLBACK))
-                       request_module("%s-all", name);
+                       request_module("crypto-%s-all", name);
 
                alg = crypto_alg_lookup(name, type, mask);
        }
index 5a772c3657d58d55c5bb453dbd6cc73a2e2a7bea..f1a81925558fa196650e3973daa44315caa613b0 100644 (file)
@@ -166,3 +166,4 @@ module_exit(arc4_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("ARC4 Cipher Algorithm");
 MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");
+MODULE_ALIAS_CRYPTO("arc4");
index 528b00bc476995cdae1d75372169b123630daa37..a2cfae251dd51f5959b2899ccbdc2cd2c0032396 100644 (file)
@@ -709,3 +709,4 @@ module_exit(crypto_authenc_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Simple AEAD wrapper for IPsec");
+MODULE_ALIAS_CRYPTO("authenc");
index ab53762fc309c5db55ccaab948f1862bcc7a237a..16c225cb28c26624039bce403429f060613a25f3 100644 (file)
@@ -832,3 +832,4 @@ module_exit(crypto_authenc_esn_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 MODULE_DESCRIPTION("AEAD wrapper for IPsec with extended sequence numbers");
+MODULE_ALIAS_CRYPTO("authencesn");
index 8baf5447d35b58f70bc43fa1bfb6f3de896813ae..87b392a77a9395a9e4164b7e9356e739c9f96455 100644 (file)
@@ -138,4 +138,5 @@ module_exit(blowfish_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
-MODULE_ALIAS("blowfish");
+MODULE_ALIAS_CRYPTO("blowfish");
+MODULE_ALIAS_CRYPTO("blowfish-generic");
index 75efa20523053661e35fe209feaab50b418dc364..029587f808f4746ee4c81a1c2cc0a8d9cdebc031 100644 (file)
@@ -1098,4 +1098,5 @@ module_exit(camellia_fini);
 
 MODULE_DESCRIPTION("Camellia Cipher Algorithm");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("camellia");
+MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-generic");
index 5558f630a0ebd254462cf6fc72486ea8d00bccd0..df5c72629383d99b9fa1c030112b8e0bfc3fbd96 100644 (file)
@@ -549,4 +549,5 @@ module_exit(cast5_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
-MODULE_ALIAS("cast5");
+MODULE_ALIAS_CRYPTO("cast5");
+MODULE_ALIAS_CRYPTO("cast5-generic");
index de732528a43042b0445dba87145653b0ce4f313a..058c8d755d0366532a7e824b7e22681a40624429 100644 (file)
@@ -291,4 +291,5 @@ module_exit(cast6_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
-MODULE_ALIAS("cast6");
+MODULE_ALIAS_CRYPTO("cast6");
+MODULE_ALIAS_CRYPTO("cast6-generic");
index 61ac42e1e32bb75816c0c1b0a7dd614cd80f4474..780ee27b2d43d5f3620d32c4df6c00670b6c48c8 100644 (file)
@@ -289,3 +289,4 @@ module_exit(crypto_cbc_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("CBC block cipher algorithm");
+MODULE_ALIAS_CRYPTO("cbc");
index ed009b77e67d1e99b771834fe55bb1950e3c529c..c569c9c6afe32b88760b571a77ae0f3774cd84d7 100644 (file)
@@ -879,5 +879,6 @@ module_exit(crypto_ccm_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Counter with CBC MAC");
-MODULE_ALIAS("ccm_base");
-MODULE_ALIAS("rfc4309");
+MODULE_ALIAS_CRYPTO("ccm_base");
+MODULE_ALIAS_CRYPTO("rfc4309");
+MODULE_ALIAS_CRYPTO("ccm");
index 834d8dd3d4fc7abf1c72da93cb24f81632989239..22b7e55b0e1b12f598d89c519da9d43d8b2790a3 100644 (file)
@@ -359,3 +359,4 @@ module_exit(chainiv_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Chain IV Generator");
+MODULE_ALIAS_CRYPTO("chainiv");
index 50880cf17fad702f9251ac7af4839842fe0ae8d5..7a8bfbd548f60835fbf417ab96e43b749a5d283f 100644 (file)
@@ -313,3 +313,4 @@ module_exit(crypto_cmac_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("CMAC keyed hash algorithm");
+MODULE_ALIAS_CRYPTO("cmac");
index 9d1c41569898a96e05b418344545ae6d01e8d00e..187ded28cb0bd76825475dfd3b4684d8043de752 100644 (file)
@@ -156,3 +156,4 @@ module_exit(crc32_mod_fini);
 MODULE_AUTHOR("Alexander Boyko <alexander_boyko@xyratex.com>");
 MODULE_DESCRIPTION("CRC32 calculations wrapper for lib/crc32");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_CRYPTO("crc32");
index 7bdd61b867c899901ed846ed5a229bc6ab31653b..75c415d370869e5ab121e8678ab8caa940e8e1d3 100644 (file)
@@ -955,3 +955,4 @@ module_exit(cryptd_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Software async crypto daemon");
+MODULE_ALIAS_CRYPTO("cryptd");
index fee7265cd35df5aa3d752e3294cb0f50fada64d2..7b39fa3deac2ae0e94af8d4a9aefe3fb8d51c179 100644 (file)
@@ -149,9 +149,9 @@ static struct crypto_alg null_algs[3] = { {
        .coa_decompress         =       null_compress } }
 } };
 
-MODULE_ALIAS("compress_null");
-MODULE_ALIAS("digest_null");
-MODULE_ALIAS("cipher_null");
+MODULE_ALIAS_CRYPTO("compress_null");
+MODULE_ALIAS_CRYPTO("digest_null");
+MODULE_ALIAS_CRYPTO("cipher_null");
 
 static int __init crypto_null_mod_init(void)
 {
index f2b94f27bb2cf9ac2b49ffa88d527f4f8ef98961..2386f731395207a2432d782d10c1421a0577dff3 100644 (file)
@@ -466,4 +466,5 @@ module_exit(crypto_ctr_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("CTR Counter block mode");
-MODULE_ALIAS("rfc3686");
+MODULE_ALIAS_CRYPTO("rfc3686");
+MODULE_ALIAS_CRYPTO("ctr");
index 042223f8e73364529be44a850d0dce174352c858..60b9da3fa7c1cd1fe4ec95c88fb1985c8ebea4e5 100644 (file)
@@ -350,3 +350,4 @@ module_exit(crypto_cts_module_exit);
 
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_DESCRIPTION("CTS-CBC CipherText Stealing for CBC");
+MODULE_ALIAS_CRYPTO("cts");
index b57d70eb156b8c424b87d217c59f4700a0d3edca..95d8d37c502183b9e426925928aa5c578523cb7d 100644 (file)
@@ -222,4 +222,4 @@ module_exit(deflate_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Deflate Compression Algorithm for IPCOMP");
 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
-
+MODULE_ALIAS_CRYPTO("deflate");
index f6cf63f8846826506fc0068ba53142a52688dfd8..3ec6071309d963d2cb56ab797c0c8cb157afda54 100644 (file)
@@ -971,8 +971,6 @@ static struct crypto_alg des_algs[2] = { {
        .cia_decrypt            =       des3_ede_decrypt } }
 } };
 
-MODULE_ALIAS("des3_ede");
-
 static int __init des_generic_mod_init(void)
 {
        return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs));
@@ -989,4 +987,7 @@ module_exit(des_generic_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
 MODULE_AUTHOR("Dag Arne Osvik <da@osvik.no>");
-MODULE_ALIAS("des");
+MODULE_ALIAS_CRYPTO("des");
+MODULE_ALIAS_CRYPTO("des-generic");
+MODULE_ALIAS_CRYPTO("des3_ede");
+MODULE_ALIAS_CRYPTO("des3_ede-generic");
index 935cfef4aa8479c54cd6490db8a9b97da9125ab5..12011aff097136331f5aca539acb487746efcd69 100644 (file)
@@ -185,3 +185,4 @@ module_exit(crypto_ecb_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("ECB block cipher algorithm");
+MODULE_ALIAS_CRYPTO("ecb");
index 42ce9f570aeccee6aada40d6742265af2eb8a746..388f582ab0b9438b55e38fefc8289c43554cce6c 100644 (file)
@@ -267,3 +267,4 @@ module_exit(eseqiv_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Encrypted Sequence Number IV Generator");
+MODULE_ALIAS_CRYPTO("eseqiv");
index 3b2cf569c684fa5c8a278a4919f9ee8513fb79b3..300f5b80a0740dcb4925badeea16470d7fccacec 100644 (file)
@@ -420,3 +420,4 @@ module_exit(fcrypt_mod_fini);
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_DESCRIPTION("FCrypt Cipher Algorithm");
 MODULE_AUTHOR("David Howells <dhowells@redhat.com>");
+MODULE_ALIAS_CRYPTO("fcrypt");
index 43e1fb05ea54878cbe136231a1a92c847b6d3119..b4c252066f7bf9265e34faa8fceae4f8e12d386b 100644 (file)
@@ -1441,6 +1441,7 @@ module_exit(crypto_gcm_module_exit);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Galois/Counter Mode");
 MODULE_AUTHOR("Mikko Herranen <mh1@iki.fi>");
-MODULE_ALIAS("gcm_base");
-MODULE_ALIAS("rfc4106");
-MODULE_ALIAS("rfc4543");
+MODULE_ALIAS_CRYPTO("gcm_base");
+MODULE_ALIAS_CRYPTO("rfc4106");
+MODULE_ALIAS_CRYPTO("rfc4543");
+MODULE_ALIAS_CRYPTO("gcm");
index 9d3f0c69a86ff2caeb33b7cddccbf8a80c7f7926..bac70995e0640a49fbc56797c4f7b605791ff98b 100644 (file)
@@ -172,4 +172,5 @@ module_exit(ghash_mod_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
-MODULE_ALIAS("ghash");
+MODULE_ALIAS_CRYPTO("ghash");
+MODULE_ALIAS_CRYPTO("ghash-generic");
index 8d9544cf8169fd30d12fdea1d6303cfd3f4e7158..ade790b454e9936f3e23a7af85357686c66bae5d 100644 (file)
@@ -271,3 +271,4 @@ module_exit(hmac_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("HMAC hash algorithm");
+MODULE_ALIAS_CRYPTO("hmac");
index 60e7cd66facc81c0ae051b4c71e3f220144ef6b8..873eb5ded6d7ae2f24e96221a131d5936d4101bd 100644 (file)
@@ -880,3 +880,4 @@ module_exit(khazad_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Khazad Cryptographic Algorithm");
+MODULE_ALIAS_CRYPTO("khazad");
index a2d2b72fc135b428102141ab181cc802994d1178..0224841b6579aa8a915406f7c3a944385c6fcbd6 100644 (file)
@@ -62,4 +62,5 @@ module_exit(krng_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Kernel Random Number Generator");
-MODULE_ALIAS("stdrng");
+MODULE_ALIAS_CRYPTO("stdrng");
+MODULE_ALIAS_CRYPTO("krng");
index ba42acc4deba8059c65dff053faef591e3175517..6f9908a7ebcbe19f76a4ee306f326f291aff8de9 100644 (file)
@@ -400,3 +400,4 @@ module_exit(crypto_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("LRW block cipher mode");
+MODULE_ALIAS_CRYPTO("lrw");
index 1c2aa69c54b8557cce3d7c06934a5cd8a9f575b5..d1ff69404353e5c91c8af25e021f40e297527ecc 100644 (file)
@@ -103,3 +103,4 @@ module_exit(lzo_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("LZO Compression Algorithm");
+MODULE_ALIAS_CRYPTO("lzo");
index 0477a6a01d58258eb07441561cb99640a2c3bd00..3515af425cc917b60669c6dbb1a460f1da1919c4 100644 (file)
@@ -255,4 +255,4 @@ module_exit(md4_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MD4 Message Digest Algorithm");
-
+MODULE_ALIAS_CRYPTO("md4");
index 7febeaab923bcd724f607ca536ae6ab202e182ea..36f5e5b103f302dbeda611466fb97ab94b87511f 100644 (file)
@@ -168,3 +168,4 @@ module_exit(md5_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("MD5 Message Digest Algorithm");
+MODULE_ALIAS_CRYPTO("md5");
index 079b761bc70d125b241da7d13d30d7cef8b43846..46195e0d0f4d1d30dd20b3bdc8f987ea14176643 100644 (file)
@@ -184,3 +184,4 @@ module_exit(michael_mic_exit);
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Michael MIC");
 MODULE_AUTHOR("Jouni Malinen <j@w1.fi>");
+MODULE_ALIAS_CRYPTO("michael_mic");
index d1b8bdfb58551e5b7844ec423f834f468fe9f3f1..f654965f09338dab066795d8c6ab8618ef263d93 100644 (file)
@@ -295,3 +295,4 @@ module_exit(crypto_pcbc_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("PCBC block cipher algorithm");
+MODULE_ALIAS_CRYPTO("pcbc");
index b2c99dc1c5e2f244bab9cf8fafcedb0860df67f6..61ff946db7484e17ef9cced2f35d66730a596611 100644 (file)
@@ -565,3 +565,4 @@ module_exit(pcrypt_exit);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
 MODULE_DESCRIPTION("Parallel crypto wrapper");
+MODULE_ALIAS_CRYPTO("pcrypt");
index 8a0f68b7f257fa08cb61e56f5cd778cc5fdafe38..049486ede938faa3ecc5254d91d3da529467d388 100644 (file)
@@ -327,3 +327,4 @@ module_exit(rmd128_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-128 Message Digest");
+MODULE_ALIAS_CRYPTO("rmd128");
index 525d7bb752cf6a7317d728c57dd3e5528bf7d6bb..de585e51d455f9a5070b62a59540b01cf096b8ec 100644 (file)
@@ -371,3 +371,4 @@ module_exit(rmd160_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-160 Message Digest");
+MODULE_ALIAS_CRYPTO("rmd160");
index 69293d9b56e0c4dca0ee4e739386ceb81e1a9173..4ec02a754e0992e5700fe683f89dde754fa1d1b8 100644 (file)
@@ -346,3 +346,4 @@ module_exit(rmd256_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-256 Message Digest");
+MODULE_ALIAS_CRYPTO("rmd256");
index 09f97dfdfbba37b50b4c6a76fde33d844561c21d..770f2cb369f870a74d2c19fdf943be5ecf5ae54a 100644 (file)
@@ -395,3 +395,4 @@ module_exit(rmd320_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Adrian-Ken Rueegsegger <ken@codelabs.ch>");
 MODULE_DESCRIPTION("RIPEMD-320 Message Digest");
+MODULE_ALIAS_CRYPTO("rmd320");
index 9a4770c022841542fc8a2a11e7f5c32fa8ae6ac3..f550b5d9463074b16670129341de59e069f8509c 100644 (file)
@@ -248,4 +248,5 @@ module_exit(salsa20_generic_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
-MODULE_ALIAS("salsa20");
+MODULE_ALIAS_CRYPTO("salsa20");
+MODULE_ALIAS_CRYPTO("salsa20-generic");
index 9c904d6d215140d38039aacd49922f374aac4ad6..c6ba8438be430f59988e52a158d306dcb832c5fe 100644 (file)
@@ -476,3 +476,4 @@ module_exit(seed_fini);
 MODULE_DESCRIPTION("SEED Cipher Algorithm");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Hye-Shik Chang <perky@FreeBSD.org>, Kim Hyun <hkim@kisa.or.kr>");
+MODULE_ALIAS_CRYPTO("seed");
index f2cba4ed6f256b804c4b9f7360cffd1eb63f5b2d..49a4069ff4532c5004e1eadcb3498fc7a30979f2 100644 (file)
@@ -362,3 +362,4 @@ module_exit(seqiv_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Sequence Number IV Generator");
+MODULE_ALIAS_CRYPTO("seqiv");
index 7ddbd7e888595dfb96ef80423d01e685278445b7..94970a794975ac2148fbc0d84bf2e830719070da 100644 (file)
@@ -665,5 +665,6 @@ module_exit(serpent_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Cipher Algorithm");
 MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
-MODULE_ALIAS("tnepres");
-MODULE_ALIAS("serpent");
+MODULE_ALIAS_CRYPTO("tnepres");
+MODULE_ALIAS_CRYPTO("serpent");
+MODULE_ALIAS_CRYPTO("serpent-generic");
index 42794803c480531a60cc465657741a42ea5485dc..fdf7c00de4b0dba9ffb9eac76f67fead9f8f6fcb 100644 (file)
@@ -153,4 +153,5 @@ module_exit(sha1_generic_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
 
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
+MODULE_ALIAS_CRYPTO("sha1-generic");
index 5433667795249ca298059a136998b9c2624f2c97..136381bdd48d2203e71e177f327a1598834f0299 100644 (file)
@@ -384,5 +384,7 @@ module_exit(sha256_generic_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
 
-MODULE_ALIAS("sha224");
-MODULE_ALIAS("sha256");
+MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha224-generic");
+MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha256-generic");
index 4c586209567937e04eb9fe7342809f142b62f67b..fb2d7b8f163fc2a6586a6ea5279b410b94a3b488 100644 (file)
@@ -285,5 +285,7 @@ module_exit(sha512_generic_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
 
-MODULE_ALIAS("sha384");
-MODULE_ALIAS("sha512");
+MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha384-generic");
+MODULE_ALIAS_CRYPTO("sha512");
+MODULE_ALIAS_CRYPTO("sha512-generic");
index 0a572323ee4a9e88cdb2d3e4bc92e56235af8a6a..b70b441c7d1e7e6135f000fa8fa58a3057671b20 100644 (file)
@@ -270,8 +270,9 @@ static void __exit tea_mod_fini(void)
        crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
 }
 
-MODULE_ALIAS("xtea");
-MODULE_ALIAS("xeta");
+MODULE_ALIAS_CRYPTO("tea");
+MODULE_ALIAS_CRYPTO("xtea");
+MODULE_ALIAS_CRYPTO("xeta");
 
 module_init(tea_mod_init);
 module_exit(tea_mod_fini);
index 87403556fd0bfa5c3a4ecd40a5a707b6892fbc04..f7ed2fba396c8b80e018bb367398c8a261c9d3f6 100644 (file)
@@ -676,8 +676,9 @@ static void __exit tgr192_mod_fini(void)
        crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
 }
 
-MODULE_ALIAS("tgr160");
-MODULE_ALIAS("tgr128");
+MODULE_ALIAS_CRYPTO("tgr192");
+MODULE_ALIAS_CRYPTO("tgr160");
+MODULE_ALIAS_CRYPTO("tgr128");
 
 module_init(tgr192_mod_init);
 module_exit(tgr192_mod_fini);
index 2d5000552d0f93b1bacb1e799e50605743c4d969..ebf7a3efb572715750c9529b8f54e9e724b6c5e7 100644 (file)
@@ -211,4 +211,5 @@ module_exit(twofish_mod_fini);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
-MODULE_ALIAS("twofish");
+MODULE_ALIAS_CRYPTO("twofish");
+MODULE_ALIAS_CRYPTO("twofish-generic");
index 2eb11a30c29cee93203a1b90a990cc4463368440..bf2d3a89845fb329de8cfe67aed4a8084f2b75b0 100644 (file)
@@ -713,3 +713,4 @@ module_exit(vmac_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("VMAC hash algorithm");
+MODULE_ALIAS_CRYPTO("vmac");
index 180f1d6e03f48fc20ef787061d8866e6af429c68..253db94b54799edc3315b564fda38fad2ee54323 100644 (file)
@@ -1167,8 +1167,9 @@ static void __exit wp512_mod_fini(void)
        crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
 }
 
-MODULE_ALIAS("wp384");
-MODULE_ALIAS("wp256");
+MODULE_ALIAS_CRYPTO("wp512");
+MODULE_ALIAS_CRYPTO("wp384");
+MODULE_ALIAS_CRYPTO("wp256");
 
 module_init(wp512_mod_init);
 module_exit(wp512_mod_fini);
index a5fbdf3738cfd4fa8f0938f53ceabd8fb4d9e2ea..df90b332554cf43fb595623af3e854534a9dc49b 100644 (file)
@@ -286,3 +286,4 @@ module_exit(crypto_xcbc_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("XCBC keyed hash algorithm");
+MODULE_ALIAS_CRYPTO("xcbc");
index ca1608f44cb56617d1b9e1d721d4a6dc12bca041..f6fd43f100c8c68c7150cad5224fafc5dc61db2c 100644 (file)
@@ -362,3 +362,4 @@ module_exit(crypto_module_exit);
 
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("XTS block cipher mode");
+MODULE_ALIAS_CRYPTO("xts");
index 06b62e5cdcc72a93281051a7c07b5090be9abeaa..d9807883528177a232c2c291cf7558956d2bd56d 100644 (file)
@@ -378,3 +378,4 @@ module_exit(zlib_mod_fini);
 MODULE_LICENSE("GPL");
 MODULE_DESCRIPTION("Zlib Compression Algorithm");
 MODULE_AUTHOR("Sony Corporation");
+MODULE_ALIAS_CRYPTO("zlib");
index 45a3e8015410c4e2ca7f6e6b63a6a207ce4c16bb..27d02dccf21bfc2431a1ce61b3d0dd98eafe7cf8 100755 (executable)
@@ -174,4 +174,6 @@ source "drivers/gator/Kconfig"
 
 source "drivers/headset_observe/Kconfig"
 
+source "drivers/android/Kconfig"
+
 endmenu
index a77fca50d5505cff8689081eabe8a0679bea1905..6f90a3177c700d40a7fcc6b2236fd10a7d75c3c6 100755 (executable)
@@ -159,3 +159,4 @@ obj-$(CONFIG_NTB)           += ntb/
 obj-$(CONFIG_GATOR)            += gator/
 obj-y                          += headset_observe/
 obj-$(CONFIG_CORESIGHT)                += coresight/
+obj-$(CONFIG_ANDROID)          += android/
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
new file mode 100644 (file)
index 0000000..bdfc6c6
--- /dev/null
@@ -0,0 +1,37 @@
+menu "Android"
+
+config ANDROID
+       bool "Android Drivers"
+       ---help---
+         Enable support for various drivers needed on the Android platform
+
+if ANDROID
+
+config ANDROID_BINDER_IPC
+       bool "Android Binder IPC Driver"
+       depends on MMU
+       default n
+       ---help---
+         Binder is used in Android for both communication between processes,
+         and remote method invocation.
+
+         This means one Android process can call a method/routine in another
+         Android process, using Binder to identify, invoke and pass arguments
+         between said processes.
+
+config ANDROID_BINDER_IPC_32BIT
+       bool
+       depends on !64BIT && ANDROID_BINDER_IPC
+       default y
+       ---help---
+         The Binder API has been changed to support both 32 and 64bit
+         applications in a mixed environment.
+
+         Enable this to support an old 32-bit Android user-space (v4.4 and
+         earlier).
+
+         Note that enabling this will break newer Android user-space.
+
+endif # if ANDROID
+
+endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
new file mode 100644 (file)
index 0000000..3b7e4b0
--- /dev/null
@@ -0,0 +1,3 @@
+ccflags-y += -I$(src)                  # needed for trace events
+
+obj-$(CONFIG_ANDROID_BINDER_IPC)       += binder.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
new file mode 100644 (file)
index 0000000..c048624
--- /dev/null
@@ -0,0 +1,3708 @@
+/* binder.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm/cacheflush.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/rtmutex.h>
+#include <linux/mutex.h>
+#include <linux/nsproxy.h>
+#include <linux/poll.h>
+#include <linux/debugfs.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/pid_namespace.h>
+#include <linux/security.h>
+
+#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
+#define BINDER_IPC_32BIT 1
+#endif
+
+#include <uapi/linux/android/binder.h>
+#include "binder_trace.h"
+
+static DEFINE_RT_MUTEX(binder_main_lock);
+static DEFINE_MUTEX(binder_deferred_lock);
+static DEFINE_MUTEX(binder_mmap_lock);
+
+static HLIST_HEAD(binder_procs);
+static HLIST_HEAD(binder_deferred_list);
+static HLIST_HEAD(binder_dead_nodes);
+
+static struct dentry *binder_debugfs_dir_entry_root;
+static struct dentry *binder_debugfs_dir_entry_proc;
+static struct binder_node *binder_context_mgr_node;
+static kuid_t binder_context_mgr_uid = INVALID_UID;
+static int binder_last_id;
+static struct workqueue_struct *binder_deferred_workqueue;
+
+#define BINDER_DEBUG_ENTRY(name) \
+static int binder_##name##_open(struct inode *inode, struct file *file) \
+{ \
+       return single_open(file, binder_##name##_show, inode->i_private); \
+} \
+\
+static const struct file_operations binder_##name##_fops = { \
+       .owner = THIS_MODULE, \
+       .open = binder_##name##_open, \
+       .read = seq_read, \
+       .llseek = seq_lseek, \
+       .release = single_release, \
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused);
+BINDER_DEBUG_ENTRY(proc);
+
+/* This is only defined in include/asm-arm/sizes.h */
+#ifndef SZ_1K
+#define SZ_1K                               0x400
+#endif
+
+#ifndef SZ_4M
+#define SZ_4M                               0x400000
+#endif
+
+#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
+
+#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
+
+enum {
+       BINDER_DEBUG_USER_ERROR             = 1U << 0,
+       BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
+       BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
+       BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
+       BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
+       BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
+       BINDER_DEBUG_READ_WRITE             = 1U << 6,
+       BINDER_DEBUG_USER_REFS              = 1U << 7,
+       BINDER_DEBUG_THREADS                = 1U << 8,
+       BINDER_DEBUG_TRANSACTION            = 1U << 9,
+       BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
+       BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
+       BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
+       BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
+       BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
+       BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
+};
+static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
+       BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
+module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+static bool binder_debug_no_lock;
+module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+
+static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
+static int binder_stop_on_user_error;
+
+static int binder_set_stop_on_user_error(const char *val,
+                                        struct kernel_param *kp)
+{
+       int ret;
+
+       ret = param_set_int(val, kp);
+       if (binder_stop_on_user_error < 2)
+               wake_up(&binder_user_error_wait);
+       return ret;
+}
+module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
+       param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+
+#define binder_debug(mask, x...) \
+       do { \
+               if (binder_debug_mask & mask) \
+                       pr_info(x); \
+       } while (0)
+
+#define binder_user_error(x...) \
+       do { \
+               if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
+                       pr_info(x); \
+               if (binder_stop_on_user_error) \
+                       binder_stop_on_user_error = 2; \
+       } while (0)
+
+enum binder_stat_types {
+       BINDER_STAT_PROC,
+       BINDER_STAT_THREAD,
+       BINDER_STAT_NODE,
+       BINDER_STAT_REF,
+       BINDER_STAT_DEATH,
+       BINDER_STAT_TRANSACTION,
+       BINDER_STAT_TRANSACTION_COMPLETE,
+       BINDER_STAT_COUNT
+};
+
+struct binder_stats {
+       int br[_IOC_NR(BR_FAILED_REPLY) + 1];
+       int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+       int obj_created[BINDER_STAT_COUNT];
+       int obj_deleted[BINDER_STAT_COUNT];
+};
+
+static struct binder_stats binder_stats;
+
+static inline void binder_stats_deleted(enum binder_stat_types type)
+{
+       binder_stats.obj_deleted[type]++;
+}
+
+static inline void binder_stats_created(enum binder_stat_types type)
+{
+       binder_stats.obj_created[type]++;
+}
+
+struct binder_transaction_log_entry {
+       int debug_id;
+       int call_type;
+       int from_proc;
+       int from_thread;
+       int target_handle;
+       int to_proc;
+       int to_thread;
+       int to_node;
+       int data_size;
+       int offsets_size;
+};
+struct binder_transaction_log {
+       int next;
+       int full;
+       struct binder_transaction_log_entry entry[32];
+};
+static struct binder_transaction_log binder_transaction_log;
+static struct binder_transaction_log binder_transaction_log_failed;
+
+static struct binder_transaction_log_entry *binder_transaction_log_add(
+       struct binder_transaction_log *log)
+{
+       struct binder_transaction_log_entry *e;
+
+       e = &log->entry[log->next];
+       memset(e, 0, sizeof(*e));
+       log->next++;
+       if (log->next == ARRAY_SIZE(log->entry)) {
+               log->next = 0;
+               log->full = 1;
+       }
+       return e;
+}
+
+struct binder_work {
+       struct list_head entry;
+       enum {
+               BINDER_WORK_TRANSACTION = 1,
+               BINDER_WORK_TRANSACTION_COMPLETE,
+               BINDER_WORK_NODE,
+               BINDER_WORK_DEAD_BINDER,
+               BINDER_WORK_DEAD_BINDER_AND_CLEAR,
+               BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+       } type;
+};
+
+struct binder_node {
+       int debug_id;
+       struct binder_work work;
+       union {
+               struct rb_node rb_node;
+               struct hlist_node dead_node;
+       };
+       struct binder_proc *proc;
+       struct hlist_head refs;
+       int internal_strong_refs;
+       int local_weak_refs;
+       int local_strong_refs;
+       binder_uintptr_t ptr;
+       binder_uintptr_t cookie;
+       unsigned has_strong_ref:1;
+       unsigned pending_strong_ref:1;
+       unsigned has_weak_ref:1;
+       unsigned pending_weak_ref:1;
+       unsigned has_async_transaction:1;
+       unsigned accept_fds:1;
+       unsigned min_priority:8;
+       struct list_head async_todo;
+};
+
+struct binder_ref_death {
+       struct binder_work work;
+       binder_uintptr_t cookie;
+};
+
+struct binder_ref {
+       /* Lookups needed: */
+       /*   node + proc => ref (transaction) */
+       /*   desc + proc => ref (transaction, inc/dec ref) */
+       /*   node => refs + procs (proc exit) */
+       int debug_id;
+       struct rb_node rb_node_desc;
+       struct rb_node rb_node_node;
+       struct hlist_node node_entry;
+       struct binder_proc *proc;
+       struct binder_node *node;
+       uint32_t desc;
+       int strong;
+       int weak;
+       struct binder_ref_death *death;
+};
+
+struct binder_buffer {
+       struct list_head entry; /* free and allocated entries by address */
+       struct rb_node rb_node; /* free entry by size or allocated entry */
+                               /* by address */
+       unsigned free:1;
+       unsigned allow_user_free:1;
+       unsigned async_transaction:1;
+       unsigned debug_id:29;
+
+       struct binder_transaction *transaction;
+
+       struct binder_node *target_node;
+       size_t data_size;
+       size_t offsets_size;
+       uint8_t data[0];
+};
+
+enum binder_deferred_state {
+       BINDER_DEFERRED_PUT_FILES    = 0x01,
+       BINDER_DEFERRED_FLUSH        = 0x02,
+       BINDER_DEFERRED_RELEASE      = 0x04,
+};
+
+struct binder_proc {
+       struct hlist_node proc_node;
+       struct rb_root threads;
+       struct rb_root nodes;
+       struct rb_root refs_by_desc;
+       struct rb_root refs_by_node;
+       int pid;
+       struct vm_area_struct *vma;
+       struct mm_struct *vma_vm_mm;
+       struct task_struct *tsk;
+       struct files_struct *files;
+       struct hlist_node deferred_work_node;
+       int deferred_work;
+       void *buffer;
+       ptrdiff_t user_buffer_offset;
+
+       struct list_head buffers;
+       struct rb_root free_buffers;
+       struct rb_root allocated_buffers;
+       size_t free_async_space;
+
+       struct page **pages;
+       size_t buffer_size;
+       uint32_t buffer_free;
+       struct list_head todo;
+       wait_queue_head_t wait;
+       struct binder_stats stats;
+       struct list_head delivered_death;
+       int max_threads;
+       int requested_threads;
+       int requested_threads_started;
+       int ready_threads;
+       long default_priority;
+       struct dentry *debugfs_entry;
+};
+
+enum {
+       BINDER_LOOPER_STATE_REGISTERED  = 0x01,
+       BINDER_LOOPER_STATE_ENTERED     = 0x02,
+       BINDER_LOOPER_STATE_EXITED      = 0x04,
+       BINDER_LOOPER_STATE_INVALID     = 0x08,
+       BINDER_LOOPER_STATE_WAITING     = 0x10,
+       BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+};
+
+struct binder_thread {
+       struct binder_proc *proc;
+       struct rb_node rb_node;
+       int pid;
+       int looper;
+       struct binder_transaction *transaction_stack;
+       struct list_head todo;
+       uint32_t return_error; /* Write failed, return error code in read buf */
+       uint32_t return_error2; /* Write failed, return error code in read */
+               /* buffer. Used when sending a reply to a dead process that */
+               /* we are also waiting on */
+       wait_queue_head_t wait;
+       struct binder_stats stats;
+};
+
+struct binder_transaction {
+       int debug_id;
+       struct binder_work work;
+       struct binder_thread *from;
+       struct binder_transaction *from_parent;
+       struct binder_proc *to_proc;
+       struct binder_thread *to_thread;
+       struct binder_transaction *to_parent;
+       unsigned need_reply:1;
+       /* unsigned is_dead:1; */       /* not used at the moment */
+
+       struct binder_buffer *buffer;
+       unsigned int    code;
+       unsigned int    flags;
+       long    priority;
+       long    saved_priority;
+       kuid_t  sender_euid;
+};
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+
+static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
+{
+       struct files_struct *files = proc->files;
+       unsigned long rlim_cur;
+       unsigned long irqs;
+
+       if (files == NULL)
+               return -ESRCH;
+
+       if (!lock_task_sighand(proc->tsk, &irqs))
+               return -EMFILE;
+
+       rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
+       unlock_task_sighand(proc->tsk, &irqs);
+
+       return __alloc_fd(files, 0, rlim_cur, flags);
+}
+
+/*
+ * copied from fd_install
+ */
+static void task_fd_install(
+       struct binder_proc *proc, unsigned int fd, struct file *file)
+{
+       if (proc->files)
+               __fd_install(proc->files, fd, file);
+}
+
+/*
+ * copied from sys_close
+ */
+static long task_close_fd(struct binder_proc *proc, unsigned int fd)
+{
+       int retval;
+
+       if (proc->files == NULL)
+               return -ESRCH;
+
+       retval = __close_fd(proc->files, fd);
+       /* can't restart close syscall because file table entry was cleared */
+       if (unlikely(retval == -ERESTARTSYS ||
+                    retval == -ERESTARTNOINTR ||
+                    retval == -ERESTARTNOHAND ||
+                    retval == -ERESTART_RESTARTBLOCK))
+               retval = -EINTR;
+
+       return retval;
+}
+
+static inline void binder_lock(const char *tag)
+{
+       trace_binder_lock(tag);
+       rt_mutex_lock(&binder_main_lock);
+       trace_binder_locked(tag);
+}
+
+static inline void binder_unlock(const char *tag)
+{
+       trace_binder_unlock(tag);
+       rt_mutex_unlock(&binder_main_lock);
+}
+
+static void binder_set_nice(long nice)
+{
+       long min_nice;
+
+       if (can_nice(current, nice)) {
+               set_user_nice(current, nice);
+               return;
+       }
+       min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
+       binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+                    "%d: nice value %ld not allowed use %ld instead\n",
+                     current->pid, nice, min_nice);
+       set_user_nice(current, min_nice);
+       if (min_nice < 20)
+               return;
+       binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
+}
+
+static size_t binder_buffer_size(struct binder_proc *proc,
+                                struct binder_buffer *buffer)
+{
+       if (list_is_last(&buffer->entry, &proc->buffers))
+               return proc->buffer + proc->buffer_size - (void *)buffer->data;
+       return (size_t)list_entry(buffer->entry.next,
+                         struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_proc *proc,
+                                     struct binder_buffer *new_buffer)
+{
+       struct rb_node **p = &proc->free_buffers.rb_node;
+       struct rb_node *parent = NULL;
+       struct binder_buffer *buffer;
+       size_t buffer_size;
+       size_t new_buffer_size;
+
+       BUG_ON(!new_buffer->free);
+
+       new_buffer_size = binder_buffer_size(proc, new_buffer);
+
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "%d: add free buffer, size %zd, at %p\n",
+                     proc->pid, new_buffer_size, new_buffer);
+
+       while (*p) {
+               parent = *p;
+               buffer = rb_entry(parent, struct binder_buffer, rb_node);
+               BUG_ON(!buffer->free);
+
+               buffer_size = binder_buffer_size(proc, buffer);
+
+               if (new_buffer_size < buffer_size)
+                       p = &parent->rb_left;
+               else
+                       p = &parent->rb_right;
+       }
+       rb_link_node(&new_buffer->rb_node, parent, p);
+       rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer(struct binder_proc *proc,
+                                          struct binder_buffer *new_buffer)
+{
+       struct rb_node **p = &proc->allocated_buffers.rb_node;
+       struct rb_node *parent = NULL;
+       struct binder_buffer *buffer;
+
+       BUG_ON(new_buffer->free);
+
+       while (*p) {
+               parent = *p;
+               buffer = rb_entry(parent, struct binder_buffer, rb_node);
+               BUG_ON(buffer->free);
+
+               if (new_buffer < buffer)
+                       p = &parent->rb_left;
+               else if (new_buffer > buffer)
+                       p = &parent->rb_right;
+               else
+                       BUG();
+       }
+       rb_link_node(&new_buffer->rb_node, parent, p);
+       rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
+                                                 uintptr_t user_ptr)
+{
+       struct rb_node *n = proc->allocated_buffers.rb_node;
+       struct binder_buffer *buffer;
+       struct binder_buffer *kern_ptr;
+
+       kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
+               - offsetof(struct binder_buffer, data));
+
+       while (n) {
+               buffer = rb_entry(n, struct binder_buffer, rb_node);
+               BUG_ON(buffer->free);
+
+               if (kern_ptr < buffer)
+                       n = n->rb_left;
+               else if (kern_ptr > buffer)
+                       n = n->rb_right;
+               else
+                       return buffer;
+       }
+       return NULL;
+}
+
+static int binder_update_page_range(struct binder_proc *proc, int allocate,
+                                   void *start, void *end,
+                                   struct vm_area_struct *vma)
+{
+       void *page_addr;
+       unsigned long user_page_addr;
+       struct vm_struct tmp_area;
+       struct page **page;
+       struct mm_struct *mm;
+
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "%d: %s pages %p-%p\n", proc->pid,
+                    allocate ? "allocate" : "free", start, end);
+
+       if (end <= start)
+               return 0;
+
+       trace_binder_update_page_range(proc, allocate, start, end);
+
+       if (vma)
+               mm = NULL;
+       else
+               mm = get_task_mm(proc->tsk);
+
+       if (mm) {
+               down_write(&mm->mmap_sem);
+               vma = proc->vma;
+               if (vma && mm != proc->vma_vm_mm) {
+                       pr_err("%d: vma mm and task mm mismatch\n",
+                               proc->pid);
+                       vma = NULL;
+               }
+       }
+
+       if (allocate == 0)
+               goto free_range;
+
+       if (vma == NULL) {
+               pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
+                       proc->pid);
+               goto err_no_vma;
+       }
+
+       for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+               int ret;
+               struct page **page_array_ptr;
+
+               page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+
+               BUG_ON(*page);
+               *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
+               if (*page == NULL) {
+                       pr_err("%d: binder_alloc_buf failed for page at %p\n",
+                               proc->pid, page_addr);
+                       goto err_alloc_page_failed;
+               }
+               tmp_area.addr = page_addr;
+               tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
+               page_array_ptr = page;
+               ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+               if (ret) {
+                       pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
+                              proc->pid, page_addr);
+                       goto err_map_kernel_failed;
+               }
+               user_page_addr =
+                       (uintptr_t)page_addr + proc->user_buffer_offset;
+               ret = vm_insert_page(vma, user_page_addr, page[0]);
+               if (ret) {
+                       pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
+                              proc->pid, user_page_addr);
+                       goto err_vm_insert_page_failed;
+               }
+               /* vm_insert_page does not seem to increment the refcount */
+       }
+       if (mm) {
+               up_write(&mm->mmap_sem);
+               mmput(mm);
+       }
+       return 0;
+
+free_range:
+       for (page_addr = end - PAGE_SIZE; page_addr >= start;
+            page_addr -= PAGE_SIZE) {
+               page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+               if (vma)
+                       zap_page_range(vma, (uintptr_t)page_addr +
+                               proc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+               unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+               __free_page(*page);
+               *page = NULL;
+err_alloc_page_failed:
+               ;
+       }
+err_no_vma:
+       if (mm) {
+               up_write(&mm->mmap_sem);
+               mmput(mm);
+       }
+       return -ENOMEM;
+}
+
+static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
+                                             size_t data_size,
+                                             size_t offsets_size, int is_async)
+{
+       struct rb_node *n = proc->free_buffers.rb_node;
+       struct binder_buffer *buffer;
+       size_t buffer_size;
+       struct rb_node *best_fit = NULL;
+       void *has_page_addr;
+       void *end_page_addr;
+       size_t size;
+
+       if (proc->vma == NULL) {
+               pr_err("%d: binder_alloc_buf, no vma\n",
+                      proc->pid);
+               return NULL;
+       }
+
+       size = ALIGN(data_size, sizeof(void *)) +
+               ALIGN(offsets_size, sizeof(void *));
+
+       if (size < data_size || size < offsets_size) {
+               binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
+                               proc->pid, data_size, offsets_size);
+               return NULL;
+       }
+
+       if (is_async &&
+           proc->free_async_space < size + sizeof(struct binder_buffer)) {
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                            "%d: binder_alloc_buf size %zd failed, no async space left\n",
+                             proc->pid, size);
+               return NULL;
+       }
+
+       while (n) {
+               buffer = rb_entry(n, struct binder_buffer, rb_node);
+               BUG_ON(!buffer->free);
+               buffer_size = binder_buffer_size(proc, buffer);
+
+               if (size < buffer_size) {
+                       best_fit = n;
+                       n = n->rb_left;
+               } else if (size > buffer_size)
+                       n = n->rb_right;
+               else {
+                       best_fit = n;
+                       break;
+               }
+       }
+       if (best_fit == NULL) {
+               pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
+                       proc->pid, size);
+               return NULL;
+       }
+       if (n == NULL) {
+               buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+               buffer_size = binder_buffer_size(proc, buffer);
+       }
+
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
+                     proc->pid, size, buffer, buffer_size);
+
+       has_page_addr =
+               (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+       if (n == NULL) {
+               if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+                       buffer_size = size; /* no room for other buffers */
+               else
+                       buffer_size = size + sizeof(struct binder_buffer);
+       }
+       end_page_addr =
+               (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+       if (end_page_addr > has_page_addr)
+               end_page_addr = has_page_addr;
+       if (binder_update_page_range(proc, 1,
+           (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
+               return NULL;
+
+       rb_erase(best_fit, &proc->free_buffers);
+       buffer->free = 0;
+       binder_insert_allocated_buffer(proc, buffer);
+       if (buffer_size != size) {
+               struct binder_buffer *new_buffer = (void *)buffer->data + size;
+
+               list_add(&new_buffer->entry, &buffer->entry);
+               new_buffer->free = 1;
+               binder_insert_free_buffer(proc, new_buffer);
+       }
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "%d: binder_alloc_buf size %zd got %p\n",
+                     proc->pid, size, buffer);
+       buffer->data_size = data_size;
+       buffer->offsets_size = offsets_size;
+       buffer->async_transaction = is_async;
+       if (is_async) {
+               proc->free_async_space -= size + sizeof(struct binder_buffer);
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+                            "%d: binder_alloc_buf size %zd async free %zd\n",
+                             proc->pid, size, proc->free_async_space);
+       }
+
+       return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+       return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+       return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_proc *proc,
+                                     struct binder_buffer *buffer)
+{
+       struct binder_buffer *prev, *next = NULL;
+       int free_page_end = 1;
+       int free_page_start = 1;
+
+       BUG_ON(proc->buffers.next == &buffer->entry);
+       prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+       BUG_ON(!prev->free);
+       if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+               free_page_start = 0;
+               if (buffer_end_page(prev) == buffer_end_page(buffer))
+                       free_page_end = 0;
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                            "%d: merge free, buffer %p share page with %p\n",
+                             proc->pid, buffer, prev);
+       }
+
+       if (!list_is_last(&buffer->entry, &proc->buffers)) {
+               next = list_entry(buffer->entry.next,
+                                 struct binder_buffer, entry);
+               if (buffer_start_page(next) == buffer_end_page(buffer)) {
+                       free_page_end = 0;
+                       if (buffer_start_page(next) ==
+                           buffer_start_page(buffer))
+                               free_page_start = 0;
+                       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                                    "%d: merge free, buffer %p share page with %p\n",
+                                     proc->pid, buffer, prev);
+               }
+       }
+       list_del(&buffer->entry);
+       if (free_page_start || free_page_end) {
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                            "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
+                            proc->pid, buffer, free_page_start ? "" : " end",
+                            free_page_end ? "" : " start", prev, next);
+               binder_update_page_range(proc, 0, free_page_start ?
+                       buffer_start_page(buffer) : buffer_end_page(buffer),
+                       (free_page_end ? buffer_end_page(buffer) :
+                       buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+       }
+}
+
+static void binder_free_buf(struct binder_proc *proc,
+                           struct binder_buffer *buffer)
+{
+       size_t size, buffer_size;
+
+       buffer_size = binder_buffer_size(proc, buffer);
+
+       size = ALIGN(buffer->data_size, sizeof(void *)) +
+               ALIGN(buffer->offsets_size, sizeof(void *));
+
+       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                    "%d: binder_free_buf %p size %zd buffer_size %zd\n",
+                     proc->pid, buffer, size, buffer_size);
+
+       BUG_ON(buffer->free);
+       BUG_ON(size > buffer_size);
+       BUG_ON(buffer->transaction != NULL);
+       BUG_ON((void *)buffer < proc->buffer);
+       BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+
+       if (buffer->async_transaction) {
+               proc->free_async_space += size + sizeof(struct binder_buffer);
+
+               binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+                            "%d: binder_free_buf size %zd async free %zd\n",
+                             proc->pid, size, proc->free_async_space);
+       }
+
+       binder_update_page_range(proc, 0,
+               (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+               (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+               NULL);
+       rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+       buffer->free = 1;
+       if (!list_is_last(&buffer->entry, &proc->buffers)) {
+               struct binder_buffer *next = list_entry(buffer->entry.next,
+                                               struct binder_buffer, entry);
+
+               if (next->free) {
+                       rb_erase(&next->rb_node, &proc->free_buffers);
+                       binder_delete_free_buffer(proc, next);
+               }
+       }
+       if (proc->buffers.next != &buffer->entry) {
+               struct binder_buffer *prev = list_entry(buffer->entry.prev,
+                                               struct binder_buffer, entry);
+
+               if (prev->free) {
+                       binder_delete_free_buffer(proc, buffer);
+                       rb_erase(&prev->rb_node, &proc->free_buffers);
+                       buffer = prev;
+               }
+       }
+       binder_insert_free_buffer(proc, buffer);
+}
+
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+                                          binder_uintptr_t ptr)
+{
+       struct rb_node *n = proc->nodes.rb_node;
+       struct binder_node *node;
+
+       while (n) {
+               node = rb_entry(n, struct binder_node, rb_node);
+
+               if (ptr < node->ptr)
+                       n = n->rb_left;
+               else if (ptr > node->ptr)
+                       n = n->rb_right;
+               else
+                       return node;
+       }
+       return NULL;
+}
+
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+                                          binder_uintptr_t ptr,
+                                          binder_uintptr_t cookie)
+{
+       struct rb_node **p = &proc->nodes.rb_node;
+       struct rb_node *parent = NULL;
+       struct binder_node *node;
+
+       while (*p) {
+               parent = *p;
+               node = rb_entry(parent, struct binder_node, rb_node);
+
+               if (ptr < node->ptr)
+                       p = &(*p)->rb_left;
+               else if (ptr > node->ptr)
+                       p = &(*p)->rb_right;
+               else
+                       return NULL;
+       }
+
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (node == NULL)
+               return NULL;
+       binder_stats_created(BINDER_STAT_NODE);
+       rb_link_node(&node->rb_node, parent, p);
+       rb_insert_color(&node->rb_node, &proc->nodes);
+       node->debug_id = ++binder_last_id;
+       node->proc = proc;
+       node->ptr = ptr;
+       node->cookie = cookie;
+       node->work.type = BINDER_WORK_NODE;
+       INIT_LIST_HEAD(&node->work.entry);
+       INIT_LIST_HEAD(&node->async_todo);
+       binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                    "%d:%d node %d u%016llx c%016llx created\n",
+                    proc->pid, current->pid, node->debug_id,
+                    (u64)node->ptr, (u64)node->cookie);
+       return node;
+}
+
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+                          struct list_head *target_list)
+{
+       if (strong) {
+               if (internal) {
+                       if (target_list == NULL &&
+                           node->internal_strong_refs == 0 &&
+                           !(node == binder_context_mgr_node &&
+                           node->has_strong_ref)) {
+                               pr_err("invalid inc strong node for %d\n",
+                                       node->debug_id);
+                               return -EINVAL;
+                       }
+                       node->internal_strong_refs++;
+               } else
+                       node->local_strong_refs++;
+               if (!node->has_strong_ref && target_list) {
+                       list_del_init(&node->work.entry);
+                       list_add_tail(&node->work.entry, target_list);
+               }
+       } else {
+               if (!internal)
+                       node->local_weak_refs++;
+               if (!node->has_weak_ref && list_empty(&node->work.entry)) {
+                       if (target_list == NULL) {
+                               pr_err("invalid inc weak node for %d\n",
+                                       node->debug_id);
+                               return -EINVAL;
+                       }
+                       list_add_tail(&node->work.entry, target_list);
+               }
+       }
+       return 0;
+}
+
+static int binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+       if (strong) {
+               if (internal)
+                       node->internal_strong_refs--;
+               else
+                       node->local_strong_refs--;
+               if (node->local_strong_refs || node->internal_strong_refs)
+                       return 0;
+       } else {
+               if (!internal)
+                       node->local_weak_refs--;
+               if (node->local_weak_refs || !hlist_empty(&node->refs))
+                       return 0;
+       }
+       if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+               if (list_empty(&node->work.entry)) {
+                       list_add_tail(&node->work.entry, &node->proc->todo);
+                       wake_up_interruptible(&node->proc->wait);
+               }
+       } else {
+               if (hlist_empty(&node->refs) && !node->local_strong_refs &&
+                   !node->local_weak_refs) {
+                       list_del_init(&node->work.entry);
+                       if (node->proc) {
+                               rb_erase(&node->rb_node, &node->proc->nodes);
+                               binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                                            "refless node %d deleted\n",
+                                            node->debug_id);
+                       } else {
+                               hlist_del(&node->dead_node);
+                               binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                                            "dead node %d deleted\n",
+                                            node->debug_id);
+                       }
+                       kfree(node);
+                       binder_stats_deleted(BINDER_STAT_NODE);
+               }
+       }
+
+       return 0;
+}
+
+
+static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+                                        uint32_t desc)
+{
+       struct rb_node *n = proc->refs_by_desc.rb_node;
+       struct binder_ref *ref;
+
+       while (n) {
+               ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+               if (desc < ref->desc)
+                       n = n->rb_left;
+               else if (desc > ref->desc)
+                       n = n->rb_right;
+               else
+                       return ref;
+       }
+       return NULL;
+}
+
+static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
+                                                 struct binder_node *node)
+{
+       struct rb_node *n;
+       struct rb_node **p = &proc->refs_by_node.rb_node;
+       struct rb_node *parent = NULL;
+       struct binder_ref *ref, *new_ref;
+
+       while (*p) {
+               parent = *p;
+               ref = rb_entry(parent, struct binder_ref, rb_node_node);
+
+               if (node < ref->node)
+                       p = &(*p)->rb_left;
+               else if (node > ref->node)
+                       p = &(*p)->rb_right;
+               else
+                       return ref;
+       }
+       new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+       if (new_ref == NULL)
+               return NULL;
+       binder_stats_created(BINDER_STAT_REF);
+       new_ref->debug_id = ++binder_last_id;
+       new_ref->proc = proc;
+       new_ref->node = node;
+       rb_link_node(&new_ref->rb_node_node, parent, p);
+       rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
+
+       new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+       for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+               ref = rb_entry(n, struct binder_ref, rb_node_desc);
+               if (ref->desc > new_ref->desc)
+                       break;
+               new_ref->desc = ref->desc + 1;
+       }
+
+       p = &proc->refs_by_desc.rb_node;
+       while (*p) {
+               parent = *p;
+               ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+
+               if (new_ref->desc < ref->desc)
+                       p = &(*p)->rb_left;
+               else if (new_ref->desc > ref->desc)
+                       p = &(*p)->rb_right;
+               else
+                       BUG();
+       }
+       rb_link_node(&new_ref->rb_node_desc, parent, p);
+       rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+       if (node) {
+               hlist_add_head(&new_ref->node_entry, &node->refs);
+
+               binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                            "%d new ref %d desc %d for node %d\n",
+                             proc->pid, new_ref->debug_id, new_ref->desc,
+                             node->debug_id);
+       } else {
+               binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                            "%d new ref %d desc %d for dead node\n",
+                             proc->pid, new_ref->debug_id, new_ref->desc);
+       }
+       return new_ref;
+}
+
+static void binder_delete_ref(struct binder_ref *ref)
+{
+       binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                    "%d delete ref %d desc %d for node %d\n",
+                     ref->proc->pid, ref->debug_id, ref->desc,
+                     ref->node->debug_id);
+
+       rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
+       rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
+       if (ref->strong)
+               binder_dec_node(ref->node, 1, 1);
+       hlist_del(&ref->node_entry);
+       binder_dec_node(ref->node, 0, 1);
+       if (ref->death) {
+               binder_debug(BINDER_DEBUG_DEAD_BINDER,
+                            "%d delete ref %d desc %d has death notification\n",
+                             ref->proc->pid, ref->debug_id, ref->desc);
+               list_del(&ref->death->work.entry);
+               kfree(ref->death);
+               binder_stats_deleted(BINDER_STAT_DEATH);
+       }
+       kfree(ref);
+       binder_stats_deleted(BINDER_STAT_REF);
+}
+
+static int binder_inc_ref(struct binder_ref *ref, int strong,
+                         struct list_head *target_list)
+{
+       int ret;
+
+       if (strong) {
+               if (ref->strong == 0) {
+                       ret = binder_inc_node(ref->node, 1, 1, target_list);
+                       if (ret)
+                               return ret;
+               }
+               ref->strong++;
+       } else {
+               if (ref->weak == 0) {
+                       ret = binder_inc_node(ref->node, 0, 1, target_list);
+                       if (ret)
+                               return ret;
+               }
+               ref->weak++;
+       }
+       return 0;
+}
+
+
+static int binder_dec_ref(struct binder_ref *ref, int strong)
+{
+       if (strong) {
+               if (ref->strong == 0) {
+                       binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
+                                         ref->proc->pid, ref->debug_id,
+                                         ref->desc, ref->strong, ref->weak);
+                       return -EINVAL;
+               }
+               ref->strong--;
+               if (ref->strong == 0) {
+                       int ret;
+
+                       ret = binder_dec_node(ref->node, strong, 1);
+                       if (ret)
+                               return ret;
+               }
+       } else {
+               if (ref->weak == 0) {
+                       binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
+                                         ref->proc->pid, ref->debug_id,
+                                         ref->desc, ref->strong, ref->weak);
+                       return -EINVAL;
+               }
+               ref->weak--;
+       }
+       if (ref->strong == 0 && ref->weak == 0)
+               binder_delete_ref(ref);
+       return 0;
+}
+
+static void binder_pop_transaction(struct binder_thread *target_thread,
+                                  struct binder_transaction *t)
+{
+       if (target_thread) {
+               BUG_ON(target_thread->transaction_stack != t);
+               BUG_ON(target_thread->transaction_stack->from != target_thread);
+               target_thread->transaction_stack =
+                       target_thread->transaction_stack->from_parent;
+               t->from = NULL;
+       }
+       t->need_reply = 0;
+       if (t->buffer)
+               t->buffer->transaction = NULL;
+       kfree(t);
+       binder_stats_deleted(BINDER_STAT_TRANSACTION);
+}
+
+static void binder_send_failed_reply(struct binder_transaction *t,
+                                    uint32_t error_code)
+{
+       struct binder_thread *target_thread;
+       struct binder_transaction *next;
+
+       BUG_ON(t->flags & TF_ONE_WAY);
+       while (1) {
+               target_thread = t->from;
+               if (target_thread) {
+                       if (target_thread->return_error != BR_OK &&
+                          target_thread->return_error2 == BR_OK) {
+                               target_thread->return_error2 =
+                                       target_thread->return_error;
+                               target_thread->return_error = BR_OK;
+                       }
+                       if (target_thread->return_error == BR_OK) {
+                               binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+                                            "send failed reply for transaction %d to %d:%d\n",
+                                             t->debug_id,
+                                             target_thread->proc->pid,
+                                             target_thread->pid);
+
+                               binder_pop_transaction(target_thread, t);
+                               target_thread->return_error = error_code;
+                               wake_up_interruptible(&target_thread->wait);
+                       } else {
+                               pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
+                                       target_thread->proc->pid,
+                                       target_thread->pid,
+                                       target_thread->return_error);
+                       }
+                       return;
+               }
+               next = t->from_parent;
+
+               binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+                            "send failed reply for transaction %d, target dead\n",
+                            t->debug_id);
+
+               binder_pop_transaction(target_thread, t);
+               if (next == NULL) {
+                       binder_debug(BINDER_DEBUG_DEAD_BINDER,
+                                    "reply failed, no target thread at root\n");
+                       return;
+               }
+               t = next;
+               binder_debug(BINDER_DEBUG_DEAD_BINDER,
+                            "reply failed, no target thread -- retry %d\n",
+                             t->debug_id);
+       }
+}
+
+static void binder_transaction_buffer_release(struct binder_proc *proc,
+                                             struct binder_buffer *buffer,
+                                             binder_size_t *failed_at)
+{
+       binder_size_t *offp, *off_end;
+       int debug_id = buffer->debug_id;
+
+       binder_debug(BINDER_DEBUG_TRANSACTION,
+                    "%d buffer release %d, size %zd-%zd, failed at %p\n",
+                    proc->pid, buffer->debug_id,
+                    buffer->data_size, buffer->offsets_size, failed_at);
+
+       if (buffer->target_node)
+               binder_dec_node(buffer->target_node, 1, 0);
+
+       offp = (binder_size_t *)(buffer->data +
+                                ALIGN(buffer->data_size, sizeof(void *)));
+       if (failed_at)
+               off_end = failed_at;
+       else
+               off_end = (void *)offp + buffer->offsets_size;
+       for (; offp < off_end; offp++) {
+               struct flat_binder_object *fp;
+
+               if (*offp > buffer->data_size - sizeof(*fp) ||
+                   buffer->data_size < sizeof(*fp) ||
+                   !IS_ALIGNED(*offp, sizeof(u32))) {
+                       pr_err("transaction release %d bad offset %lld, size %zd\n",
+                              debug_id, (u64)*offp, buffer->data_size);
+                       continue;
+               }
+               fp = (struct flat_binder_object *)(buffer->data + *offp);
+               switch (fp->type) {
+               case BINDER_TYPE_BINDER:
+               case BINDER_TYPE_WEAK_BINDER: {
+                       struct binder_node *node = binder_get_node(proc, fp->binder);
+
+                       if (node == NULL) {
+                               pr_err("transaction release %d bad node %016llx\n",
+                                      debug_id, (u64)fp->binder);
+                               break;
+                       }
+                       binder_debug(BINDER_DEBUG_TRANSACTION,
+                                    "        node %d u%016llx\n",
+                                    node->debug_id, (u64)node->ptr);
+                       binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+               } break;
+               case BINDER_TYPE_HANDLE:
+               case BINDER_TYPE_WEAK_HANDLE: {
+                       struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+
+                       if (ref == NULL) {
+                               pr_err("transaction release %d bad handle %d\n",
+                                debug_id, fp->handle);
+                               break;
+                       }
+                       binder_debug(BINDER_DEBUG_TRANSACTION,
+                                    "        ref %d desc %d (node %d)\n",
+                                    ref->debug_id, ref->desc, ref->node->debug_id);
+                       binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+               } break;
+
+               case BINDER_TYPE_FD:
+                       binder_debug(BINDER_DEBUG_TRANSACTION,
+                                    "        fd %d\n", fp->handle);
+                       if (failed_at)
+                               task_close_fd(proc, fp->handle);
+                       break;
+
+               default:
+                       pr_err("transaction release %d bad object type %x\n",
+                               debug_id, fp->type);
+                       break;
+               }
+       }
+}
+
+static void binder_transaction(struct binder_proc *proc,
+                              struct binder_thread *thread,
+                              struct binder_transaction_data *tr, int reply)
+{
+       struct binder_transaction *t;
+       struct binder_work *tcomplete;
+       binder_size_t *offp, *off_end;
+       binder_size_t off_min;
+       struct binder_proc *target_proc;
+       struct binder_thread *target_thread = NULL;
+       struct binder_node *target_node = NULL;
+       struct list_head *target_list;
+       wait_queue_head_t *target_wait;
+       struct binder_transaction *in_reply_to = NULL;
+       struct binder_transaction_log_entry *e;
+       uint32_t return_error;
+
+       e = binder_transaction_log_add(&binder_transaction_log);
+       e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
+       e->from_proc = proc->pid;
+       e->from_thread = thread->pid;
+       e->target_handle = tr->target.handle;
+       e->data_size = tr->data_size;
+       e->offsets_size = tr->offsets_size;
+
+       if (reply) {
+               in_reply_to = thread->transaction_stack;
+               if (in_reply_to == NULL) {
+                       binder_user_error("%d:%d got reply transaction with no transaction stack\n",
+                                         proc->pid, thread->pid);
+                       return_error = BR_FAILED_REPLY;
+                       goto err_empty_call_stack;
+               }
+               binder_set_nice(in_reply_to->saved_priority);
+               if (in_reply_to->to_thread != thread) {
+                       binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
+                               proc->pid, thread->pid, in_reply_to->debug_id,
+                               in_reply_to->to_proc ?
+                               in_reply_to->to_proc->pid : 0,
+                               in_reply_to->to_thread ?
+                               in_reply_to->to_thread->pid : 0);
+                       return_error = BR_FAILED_REPLY;
+                       in_reply_to = NULL;
+                       goto err_bad_call_stack;
+               }
+               thread->transaction_stack = in_reply_to->to_parent;
+               target_thread = in_reply_to->from;
+               if (target_thread == NULL) {
+                       return_error = BR_DEAD_REPLY;
+                       goto err_dead_binder;
+               }
+               if (target_thread->transaction_stack != in_reply_to) {
+                       binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
+                               proc->pid, thread->pid,
+                               target_thread->transaction_stack ?
+                               target_thread->transaction_stack->debug_id : 0,
+                               in_reply_to->debug_id);
+                       return_error = BR_FAILED_REPLY;
+                       in_reply_to = NULL;
+                       target_thread = NULL;
+                       goto err_dead_binder;
+               }
+               target_proc = target_thread->proc;
+       } else {
+               if (tr->target.handle) {
+                       struct binder_ref *ref;
+
+                       ref = binder_get_ref(proc, tr->target.handle);
+                       if (ref == NULL) {
+                               binder_user_error("%d:%d got transaction to invalid handle\n",
+                                       proc->pid, thread->pid);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_invalid_target_handle;
+                       }
+                       target_node = ref->node;
+               } else {
+                       target_node = binder_context_mgr_node;
+                       if (target_node == NULL) {
+                               return_error = BR_DEAD_REPLY;
+                               goto err_no_context_mgr_node;
+                       }
+               }
+               e->to_node = target_node->debug_id;
+               target_proc = target_node->proc;
+               if (target_proc == NULL) {
+                       return_error = BR_DEAD_REPLY;
+                       goto err_dead_binder;
+               }
+               if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
+                       return_error = BR_FAILED_REPLY;
+                       goto err_invalid_target_handle;
+               }
+               if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
+                       struct binder_transaction *tmp;
+
+                       tmp = thread->transaction_stack;
+                       if (tmp->to_thread != thread) {
+                               binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
+                                       proc->pid, thread->pid, tmp->debug_id,
+                                       tmp->to_proc ? tmp->to_proc->pid : 0,
+                                       tmp->to_thread ?
+                                       tmp->to_thread->pid : 0);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_bad_call_stack;
+                       }
+                       while (tmp) {
+                               if (tmp->from && tmp->from->proc == target_proc)
+                                       target_thread = tmp->from;
+                               tmp = tmp->from_parent;
+                       }
+               }
+       }
+       if (target_thread) {
+               e->to_thread = target_thread->pid;
+               target_list = &target_thread->todo;
+               target_wait = &target_thread->wait;
+       } else {
+               target_list = &target_proc->todo;
+               target_wait = &target_proc->wait;
+       }
+       e->to_proc = target_proc->pid;
+
+       /* TODO: reuse incoming transaction for reply */
+       t = kzalloc(sizeof(*t), GFP_KERNEL);
+       if (t == NULL) {
+               return_error = BR_FAILED_REPLY;
+               goto err_alloc_t_failed;
+       }
+       binder_stats_created(BINDER_STAT_TRANSACTION);
+
+       tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+       if (tcomplete == NULL) {
+               return_error = BR_FAILED_REPLY;
+               goto err_alloc_tcomplete_failed;
+       }
+       binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
+
+       t->debug_id = ++binder_last_id;
+       e->debug_id = t->debug_id;
+
+       if (reply)
+               binder_debug(BINDER_DEBUG_TRANSACTION,
+                            "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
+                            proc->pid, thread->pid, t->debug_id,
+                            target_proc->pid, target_thread->pid,
+                            (u64)tr->data.ptr.buffer,
+                            (u64)tr->data.ptr.offsets,
+                            (u64)tr->data_size, (u64)tr->offsets_size);
+       else
+               binder_debug(BINDER_DEBUG_TRANSACTION,
+                            "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
+                            proc->pid, thread->pid, t->debug_id,
+                            target_proc->pid, target_node->debug_id,
+                            (u64)tr->data.ptr.buffer,
+                            (u64)tr->data.ptr.offsets,
+                            (u64)tr->data_size, (u64)tr->offsets_size);
+
+       if (!reply && !(tr->flags & TF_ONE_WAY))
+               t->from = thread;
+       else
+               t->from = NULL;
+       t->sender_euid = task_euid(proc->tsk);
+       t->to_proc = target_proc;
+       t->to_thread = target_thread;
+       t->code = tr->code;
+       t->flags = tr->flags;
+       t->priority = task_nice(current);
+
+       trace_binder_transaction(reply, t, target_node);
+
+       t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+               tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+       if (t->buffer == NULL) {
+               return_error = BR_FAILED_REPLY;
+               goto err_binder_alloc_buf_failed;
+       }
+       t->buffer->allow_user_free = 0;
+       t->buffer->debug_id = t->debug_id;
+       t->buffer->transaction = t;
+       t->buffer->target_node = target_node;
+       trace_binder_transaction_alloc_buf(t->buffer);
+       if (target_node)
+               binder_inc_node(target_node, 1, 0, NULL);
+
+       offp = (binder_size_t *)(t->buffer->data +
+                                ALIGN(tr->data_size, sizeof(void *)));
+
+       if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
+                          tr->data.ptr.buffer, tr->data_size)) {
+               binder_user_error("%d:%d got transaction with invalid data ptr\n",
+                               proc->pid, thread->pid);
+               return_error = BR_FAILED_REPLY;
+               goto err_copy_data_failed;
+       }
+       if (copy_from_user(offp, (const void __user *)(uintptr_t)
+                          tr->data.ptr.offsets, tr->offsets_size)) {
+               binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
+                               proc->pid, thread->pid);
+               return_error = BR_FAILED_REPLY;
+               goto err_copy_data_failed;
+       }
+       if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
+               binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
+                               proc->pid, thread->pid, (u64)tr->offsets_size);
+               return_error = BR_FAILED_REPLY;
+               goto err_bad_offset;
+       }
+       off_end = (void *)offp + tr->offsets_size;
+       off_min = 0;
+       for (; offp < off_end; offp++) {
+               struct flat_binder_object *fp;
+
+               if (*offp > t->buffer->data_size - sizeof(*fp) ||
+                   *offp < off_min ||
+                   t->buffer->data_size < sizeof(*fp) ||
+                   !IS_ALIGNED(*offp, sizeof(u32))) {
+                       binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
+                                         proc->pid, thread->pid, (u64)*offp,
+                                         (u64)off_min,
+                                         (u64)(t->buffer->data_size -
+                                         sizeof(*fp)));
+                       return_error = BR_FAILED_REPLY;
+                       goto err_bad_offset;
+               }
+               fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+               off_min = *offp + sizeof(struct flat_binder_object);
+               switch (fp->type) {
+               case BINDER_TYPE_BINDER:
+               case BINDER_TYPE_WEAK_BINDER: {
+                       struct binder_ref *ref;
+                       struct binder_node *node = binder_get_node(proc, fp->binder);
+
+                       if (node == NULL) {
+                               node = binder_new_node(proc, fp->binder, fp->cookie);
+                               if (node == NULL) {
+                                       return_error = BR_FAILED_REPLY;
+                                       goto err_binder_new_node_failed;
+                               }
+                               node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+                               node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+                       }
+                       if (fp->cookie != node->cookie) {
+                               binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
+                                       proc->pid, thread->pid,
+                                       (u64)fp->binder, node->debug_id,
+                                       (u64)fp->cookie, (u64)node->cookie);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_binder_get_ref_for_node_failed;
+                       }
+                       if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+                               return_error = BR_FAILED_REPLY;
+                               goto err_binder_get_ref_for_node_failed;
+                       }
+                       ref = binder_get_ref_for_node(target_proc, node);
+                       if (ref == NULL) {
+                               return_error = BR_FAILED_REPLY;
+                               goto err_binder_get_ref_for_node_failed;
+                       }
+                       if (fp->type == BINDER_TYPE_BINDER)
+                               fp->type = BINDER_TYPE_HANDLE;
+                       else
+                               fp->type = BINDER_TYPE_WEAK_HANDLE;
+                       fp->handle = ref->desc;
+                       binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
+                                      &thread->todo);
+
+                       trace_binder_transaction_node_to_ref(t, node, ref);
+                       binder_debug(BINDER_DEBUG_TRANSACTION,
+                                    "        node %d u%016llx -> ref %d desc %d\n",
+                                    node->debug_id, (u64)node->ptr,
+                                    ref->debug_id, ref->desc);
+               } break;
+               case BINDER_TYPE_HANDLE:
+               case BINDER_TYPE_WEAK_HANDLE: {
+                       struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+
+                       if (ref == NULL) {
+                               binder_user_error("%d:%d got transaction with invalid handle, %d\n",
+                                               proc->pid,
+                                               thread->pid, fp->handle);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_binder_get_ref_failed;
+                       }
+                       if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+                               return_error = BR_FAILED_REPLY;
+                               goto err_binder_get_ref_failed;
+                       }
+                       if (ref->node->proc == target_proc) {
+                               if (fp->type == BINDER_TYPE_HANDLE)
+                                       fp->type = BINDER_TYPE_BINDER;
+                               else
+                                       fp->type = BINDER_TYPE_WEAK_BINDER;
+                               fp->binder = ref->node->ptr;
+                               fp->cookie = ref->node->cookie;
+                               binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
+                               trace_binder_transaction_ref_to_node(t, ref);
+                               binder_debug(BINDER_DEBUG_TRANSACTION,
+                                            "        ref %d desc %d -> node %d u%016llx\n",
+                                            ref->debug_id, ref->desc, ref->node->debug_id,
+                                            (u64)ref->node->ptr);
+                       } else {
+                               struct binder_ref *new_ref;
+
+                               new_ref = binder_get_ref_for_node(target_proc, ref->node);
+                               if (new_ref == NULL) {
+                                       return_error = BR_FAILED_REPLY;
+                                       goto err_binder_get_ref_for_node_failed;
+                               }
+                               fp->handle = new_ref->desc;
+                               binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+                               trace_binder_transaction_ref_to_ref(t, ref,
+                                                                   new_ref);
+                               binder_debug(BINDER_DEBUG_TRANSACTION,
+                                            "        ref %d desc %d -> ref %d desc %d (node %d)\n",
+                                            ref->debug_id, ref->desc, new_ref->debug_id,
+                                            new_ref->desc, ref->node->debug_id);
+                       }
+               } break;
+
+               case BINDER_TYPE_FD: {
+                       int target_fd;
+                       struct file *file;
+
+                       if (reply) {
+                               if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
+                                       binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
+                                               proc->pid, thread->pid, fp->handle);
+                                       return_error = BR_FAILED_REPLY;
+                                       goto err_fd_not_allowed;
+                               }
+                       } else if (!target_node->accept_fds) {
+                               binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
+                                       proc->pid, thread->pid, fp->handle);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_fd_not_allowed;
+                       }
+
+                       file = fget(fp->handle);
+                       if (file == NULL) {
+                               binder_user_error("%d:%d got transaction with invalid fd, %d\n",
+                                       proc->pid, thread->pid, fp->handle);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_fget_failed;
+                       }
+                       if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) {
+                               fput(file);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_get_unused_fd_failed;
+                       }
+                       target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+                       if (target_fd < 0) {
+                               fput(file);
+                               return_error = BR_FAILED_REPLY;
+                               goto err_get_unused_fd_failed;
+                       }
+                       task_fd_install(target_proc, target_fd, file);
+                       trace_binder_transaction_fd(t, fp->handle, target_fd);
+                       binder_debug(BINDER_DEBUG_TRANSACTION,
+                                    "        fd %d -> %d\n", fp->handle, target_fd);
+                       /* TODO: fput? */
+                       fp->handle = target_fd;
+               } break;
+
+               default:
+                       binder_user_error("%d:%d got transaction with invalid object type, %x\n",
+                               proc->pid, thread->pid, fp->type);
+                       return_error = BR_FAILED_REPLY;
+                       goto err_bad_object_type;
+               }
+       }
+       if (reply) {
+               BUG_ON(t->buffer->async_transaction != 0);
+               binder_pop_transaction(target_thread, in_reply_to);
+       } else if (!(t->flags & TF_ONE_WAY)) {
+               BUG_ON(t->buffer->async_transaction != 0);
+               t->need_reply = 1;
+               t->from_parent = thread->transaction_stack;
+               thread->transaction_stack = t;
+       } else {
+               BUG_ON(target_node == NULL);
+               BUG_ON(t->buffer->async_transaction != 1);
+               if (target_node->has_async_transaction) {
+                       target_list = &target_node->async_todo;
+                       target_wait = NULL;
+               } else
+                       target_node->has_async_transaction = 1;
+       }
+       t->work.type = BINDER_WORK_TRANSACTION;
+       list_add_tail(&t->work.entry, target_list);
+       tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+       list_add_tail(&tcomplete->entry, &thread->todo);
+       if (target_wait)
+               wake_up_interruptible(target_wait);
+       return;
+
+err_get_unused_fd_failed:
+err_fget_failed:
+err_fd_not_allowed:
+err_binder_get_ref_for_node_failed:
+err_binder_get_ref_failed:
+err_binder_new_node_failed:
+err_bad_object_type:
+err_bad_offset:
+err_copy_data_failed:
+       trace_binder_transaction_failed_buffer_release(t->buffer);
+       binder_transaction_buffer_release(target_proc, t->buffer, offp);
+       t->buffer->transaction = NULL;
+       binder_free_buf(target_proc, t->buffer);
+err_binder_alloc_buf_failed:
+       kfree(tcomplete);
+       binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+err_alloc_tcomplete_failed:
+       kfree(t);
+       binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+err_bad_call_stack:
+err_empty_call_stack:
+err_dead_binder:
+err_invalid_target_handle:
+err_no_context_mgr_node:
+       binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+                    "%d:%d transaction failed %d, size %lld-%lld\n",
+                    proc->pid, thread->pid, return_error,
+                    (u64)tr->data_size, (u64)tr->offsets_size);
+
+       {
+               struct binder_transaction_log_entry *fe;
+
+               fe = binder_transaction_log_add(&binder_transaction_log_failed);
+               *fe = *e;
+       }
+
+       BUG_ON(thread->return_error != BR_OK);
+       if (in_reply_to) {
+               thread->return_error = BR_TRANSACTION_COMPLETE;
+               binder_send_failed_reply(in_reply_to, return_error);
+       } else
+               thread->return_error = return_error;
+}
+
+static int binder_thread_write(struct binder_proc *proc,
+                       struct binder_thread *thread,
+                       binder_uintptr_t binder_buffer, size_t size,
+                       binder_size_t *consumed)
+{
+       uint32_t cmd;
+       void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
+       void __user *ptr = buffer + *consumed;
+       void __user *end = buffer + size;
+
+       while (ptr < end && thread->return_error == BR_OK) {
+               if (get_user(cmd, (uint32_t __user *)ptr))
+                       return -EFAULT;
+               ptr += sizeof(uint32_t);
+               trace_binder_command(cmd);
+               if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
+                       binder_stats.bc[_IOC_NR(cmd)]++;
+                       proc->stats.bc[_IOC_NR(cmd)]++;
+                       thread->stats.bc[_IOC_NR(cmd)]++;
+               }
+               switch (cmd) {
+               case BC_INCREFS:
+               case BC_ACQUIRE:
+               case BC_RELEASE:
+               case BC_DECREFS: {
+                       uint32_t target;
+                       struct binder_ref *ref;
+                       const char *debug_string;
+
+                       if (get_user(target, (uint32_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(uint32_t);
+                       if (target == 0 && binder_context_mgr_node &&
+                           (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
+                               ref = binder_get_ref_for_node(proc,
+                                              binder_context_mgr_node);
+                               if (ref->desc != target) {
+                                       binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
+                                               proc->pid, thread->pid,
+                                               ref->desc);
+                               }
+                       } else
+                               ref = binder_get_ref(proc, target);
+                       if (ref == NULL) {
+                               binder_user_error("%d:%d refcount change on invalid ref %d\n",
+                                       proc->pid, thread->pid, target);
+                               break;
+                       }
+                       switch (cmd) {
+                       case BC_INCREFS:
+                               debug_string = "IncRefs";
+                               binder_inc_ref(ref, 0, NULL);
+                               break;
+                       case BC_ACQUIRE:
+                               debug_string = "Acquire";
+                               binder_inc_ref(ref, 1, NULL);
+                               break;
+                       case BC_RELEASE:
+                               debug_string = "Release";
+                               binder_dec_ref(ref, 1);
+                               break;
+                       case BC_DECREFS:
+                       default:
+                               debug_string = "DecRefs";
+                               binder_dec_ref(ref, 0);
+                               break;
+                       }
+                       binder_debug(BINDER_DEBUG_USER_REFS,
+                                    "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
+                                    proc->pid, thread->pid, debug_string, ref->debug_id,
+                                    ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+                       break;
+               }
+               case BC_INCREFS_DONE:
+               case BC_ACQUIRE_DONE: {
+                       binder_uintptr_t node_ptr;
+                       binder_uintptr_t cookie;
+                       struct binder_node *node;
+
+                       if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(binder_uintptr_t);
+                       if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(binder_uintptr_t);
+                       node = binder_get_node(proc, node_ptr);
+                       if (node == NULL) {
+                               binder_user_error("%d:%d %s u%016llx no match\n",
+                                       proc->pid, thread->pid,
+                                       cmd == BC_INCREFS_DONE ?
+                                       "BC_INCREFS_DONE" :
+                                       "BC_ACQUIRE_DONE",
+                                       (u64)node_ptr);
+                               break;
+                       }
+                       if (cookie != node->cookie) {
+                               binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
+                                       proc->pid, thread->pid,
+                                       cmd == BC_INCREFS_DONE ?
+                                       "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+                                       (u64)node_ptr, node->debug_id,
+                                       (u64)cookie, (u64)node->cookie);
+                               break;
+                       }
+                       if (cmd == BC_ACQUIRE_DONE) {
+                               if (node->pending_strong_ref == 0) {
+                                       binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
+                                               proc->pid, thread->pid,
+                                               node->debug_id);
+                                       break;
+                               }
+                               node->pending_strong_ref = 0;
+                       } else {
+                               if (node->pending_weak_ref == 0) {
+                                       binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
+                                               proc->pid, thread->pid,
+                                               node->debug_id);
+                                       break;
+                               }
+                               node->pending_weak_ref = 0;
+                       }
+                       binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+                       binder_debug(BINDER_DEBUG_USER_REFS,
+                                    "%d:%d %s node %d ls %d lw %d\n",
+                                    proc->pid, thread->pid,
+                                    cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+                                    node->debug_id, node->local_strong_refs, node->local_weak_refs);
+                       break;
+               }
+               case BC_ATTEMPT_ACQUIRE:
+                       pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
+                       return -EINVAL;
+               case BC_ACQUIRE_RESULT:
+                       pr_err("BC_ACQUIRE_RESULT not supported\n");
+                       return -EINVAL;
+
+               case BC_FREE_BUFFER: {
+                       binder_uintptr_t data_ptr;
+                       struct binder_buffer *buffer;
+
+                       if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(binder_uintptr_t);
+
+                       buffer = binder_buffer_lookup(proc, data_ptr);
+                       if (buffer == NULL) {
+                               binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
+                                       proc->pid, thread->pid, (u64)data_ptr);
+                               break;
+                       }
+                       if (!buffer->allow_user_free) {
+                               binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
+                                       proc->pid, thread->pid, (u64)data_ptr);
+                               break;
+                       }
+                       binder_debug(BINDER_DEBUG_FREE_BUFFER,
+                                    "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
+                                    proc->pid, thread->pid, (u64)data_ptr,
+                                    buffer->debug_id,
+                                    buffer->transaction ? "active" : "finished");
+
+                       if (buffer->transaction) {
+                               buffer->transaction->buffer = NULL;
+                               buffer->transaction = NULL;
+                       }
+                       if (buffer->async_transaction && buffer->target_node) {
+                               BUG_ON(!buffer->target_node->has_async_transaction);
+                               if (list_empty(&buffer->target_node->async_todo))
+                                       buffer->target_node->has_async_transaction = 0;
+                               else
+                                       list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+                       }
+                       trace_binder_transaction_buffer_release(buffer);
+                       binder_transaction_buffer_release(proc, buffer, NULL);
+                       binder_free_buf(proc, buffer);
+                       break;
+               }
+
+               case BC_TRANSACTION:
+               case BC_REPLY: {
+                       struct binder_transaction_data tr;
+
+                       if (copy_from_user(&tr, ptr, sizeof(tr)))
+                               return -EFAULT;
+                       ptr += sizeof(tr);
+                       binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+                       break;
+               }
+
+               case BC_REGISTER_LOOPER:
+                       binder_debug(BINDER_DEBUG_THREADS,
+                                    "%d:%d BC_REGISTER_LOOPER\n",
+                                    proc->pid, thread->pid);
+                       if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
+                               thread->looper |= BINDER_LOOPER_STATE_INVALID;
+                               binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
+                                       proc->pid, thread->pid);
+                       } else if (proc->requested_threads == 0) {
+                               thread->looper |= BINDER_LOOPER_STATE_INVALID;
+                               binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
+                                       proc->pid, thread->pid);
+                       } else {
+                               proc->requested_threads--;
+                               proc->requested_threads_started++;
+                       }
+                       thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+                       break;
+               case BC_ENTER_LOOPER:
+                       binder_debug(BINDER_DEBUG_THREADS,
+                                    "%d:%d BC_ENTER_LOOPER\n",
+                                    proc->pid, thread->pid);
+                       if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
+                               thread->looper |= BINDER_LOOPER_STATE_INVALID;
+                               binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
+                                       proc->pid, thread->pid);
+                       }
+                       thread->looper |= BINDER_LOOPER_STATE_ENTERED;
+                       break;
+               case BC_EXIT_LOOPER:
+                       binder_debug(BINDER_DEBUG_THREADS,
+                                    "%d:%d BC_EXIT_LOOPER\n",
+                                    proc->pid, thread->pid);
+                       thread->looper |= BINDER_LOOPER_STATE_EXITED;
+                       break;
+
+               case BC_REQUEST_DEATH_NOTIFICATION:
+               case BC_CLEAR_DEATH_NOTIFICATION: {
+                       uint32_t target;
+                       binder_uintptr_t cookie;
+                       struct binder_ref *ref;
+                       struct binder_ref_death *death;
+
+                       if (get_user(target, (uint32_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(uint32_t);
+                       if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(binder_uintptr_t);
+                       ref = binder_get_ref(proc, target);
+                       if (ref == NULL) {
+                               binder_user_error("%d:%d %s invalid ref %d\n",
+                                       proc->pid, thread->pid,
+                                       cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+                                       "BC_REQUEST_DEATH_NOTIFICATION" :
+                                       "BC_CLEAR_DEATH_NOTIFICATION",
+                                       target);
+                               break;
+                       }
+
+                       binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+                                    "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
+                                    proc->pid, thread->pid,
+                                    cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+                                    "BC_REQUEST_DEATH_NOTIFICATION" :
+                                    "BC_CLEAR_DEATH_NOTIFICATION",
+                                    (u64)cookie, ref->debug_id, ref->desc,
+                                    ref->strong, ref->weak, ref->node->debug_id);
+
+                       if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+                               if (ref->death) {
+                                       binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
+                                               proc->pid, thread->pid);
+                                       break;
+                               }
+                               death = kzalloc(sizeof(*death), GFP_KERNEL);
+                               if (death == NULL) {
+                                       thread->return_error = BR_ERROR;
+                                       binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+                                                    "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
+                                                    proc->pid, thread->pid);
+                                       break;
+                               }
+                               binder_stats_created(BINDER_STAT_DEATH);
+                               INIT_LIST_HEAD(&death->work.entry);
+                               death->cookie = cookie;
+                               ref->death = death;
+                               if (ref->node->proc == NULL) {
+                                       ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+                                       if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+                                               list_add_tail(&ref->death->work.entry, &thread->todo);
+                                       } else {
+                                               list_add_tail(&ref->death->work.entry, &proc->todo);
+                                               wake_up_interruptible(&proc->wait);
+                                       }
+                               }
+                       } else {
+                               if (ref->death == NULL) {
+                                       binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
+                                               proc->pid, thread->pid);
+                                       break;
+                               }
+                               death = ref->death;
+                               if (death->cookie != cookie) {
+                                       binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
+                                               proc->pid, thread->pid,
+                                               (u64)death->cookie,
+                                               (u64)cookie);
+                                       break;
+                               }
+                               ref->death = NULL;
+                               if (list_empty(&death->work.entry)) {
+                                       death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+                                       if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+                                               list_add_tail(&death->work.entry, &thread->todo);
+                                       } else {
+                                               list_add_tail(&death->work.entry, &proc->todo);
+                                               wake_up_interruptible(&proc->wait);
+                                       }
+                               } else {
+                                       BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
+                                       death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
+                               }
+                       }
+               } break;
+               case BC_DEAD_BINDER_DONE: {
+                       struct binder_work *w;
+                       binder_uintptr_t cookie;
+                       struct binder_ref_death *death = NULL;
+
+                       if (get_user(cookie, (binder_uintptr_t __user *)ptr))
+                               return -EFAULT;
+
+                       ptr += sizeof(void *);
+                       list_for_each_entry(w, &proc->delivered_death, entry) {
+                               struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+
+                               if (tmp_death->cookie == cookie) {
+                                       death = tmp_death;
+                                       break;
+                               }
+                       }
+                       binder_debug(BINDER_DEBUG_DEAD_BINDER,
+                                    "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
+                                    proc->pid, thread->pid, (u64)cookie,
+                                    death);
+                       if (death == NULL) {
+                               binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
+                                       proc->pid, thread->pid, (u64)cookie);
+                               break;
+                       }
+
+                       list_del_init(&death->work.entry);
+                       if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
+                               death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+                               if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+                                       list_add_tail(&death->work.entry, &thread->todo);
+                               } else {
+                                       list_add_tail(&death->work.entry, &proc->todo);
+                                       wake_up_interruptible(&proc->wait);
+                               }
+                       }
+               } break;
+
+               default:
+                       pr_err("%d:%d unknown command %d\n",
+                              proc->pid, thread->pid, cmd);
+                       return -EINVAL;
+               }
+               *consumed = ptr - buffer;
+       }
+       return 0;
+}
+
+static void binder_stat_br(struct binder_proc *proc,
+                          struct binder_thread *thread, uint32_t cmd)
+{
+       trace_binder_return(cmd);
+       if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
+               binder_stats.br[_IOC_NR(cmd)]++;
+               proc->stats.br[_IOC_NR(cmd)]++;
+               thread->stats.br[_IOC_NR(cmd)]++;
+       }
+}
+
+static int binder_has_proc_work(struct binder_proc *proc,
+                               struct binder_thread *thread)
+{
+       return !list_empty(&proc->todo) ||
+               (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_has_thread_work(struct binder_thread *thread)
+{
+       return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
+               (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_thread_read(struct binder_proc *proc,
+                             struct binder_thread *thread,
+                             binder_uintptr_t binder_buffer, size_t size,
+                             binder_size_t *consumed, int non_block)
+{
+       void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
+       void __user *ptr = buffer + *consumed;
+       void __user *end = buffer + size;
+
+       int ret = 0;
+       int wait_for_proc_work;
+
+       if (*consumed == 0) {
+               if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+                       return -EFAULT;
+               ptr += sizeof(uint32_t);
+       }
+
+retry:
+       wait_for_proc_work = thread->transaction_stack == NULL &&
+                               list_empty(&thread->todo);
+
+       if (thread->return_error != BR_OK && ptr < end) {
+               if (thread->return_error2 != BR_OK) {
+                       if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(uint32_t);
+                       binder_stat_br(proc, thread, thread->return_error2);
+                       if (ptr == end)
+                               goto done;
+                       thread->return_error2 = BR_OK;
+               }
+               if (put_user(thread->return_error, (uint32_t __user *)ptr))
+                       return -EFAULT;
+               ptr += sizeof(uint32_t);
+               binder_stat_br(proc, thread, thread->return_error);
+               thread->return_error = BR_OK;
+               goto done;
+       }
+
+
+       thread->looper |= BINDER_LOOPER_STATE_WAITING;
+       if (wait_for_proc_work)
+               proc->ready_threads++;
+
+       binder_unlock(__func__);
+
+       trace_binder_wait_for_work(wait_for_proc_work,
+                                  !!thread->transaction_stack,
+                                  !list_empty(&thread->todo));
+       if (wait_for_proc_work) {
+               if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+                                       BINDER_LOOPER_STATE_ENTERED))) {
+                       binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
+                               proc->pid, thread->pid, thread->looper);
+                       wait_event_interruptible(binder_user_error_wait,
+                                                binder_stop_on_user_error < 2);
+               }
+               binder_set_nice(proc->default_priority);
+               if (non_block) {
+                       if (!binder_has_proc_work(proc, thread))
+                               ret = -EAGAIN;
+               } else
+                       ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+       } else {
+               if (non_block) {
+                       if (!binder_has_thread_work(thread))
+                               ret = -EAGAIN;
+               } else
+                       ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
+       }
+
+       binder_lock(__func__);
+
+       if (wait_for_proc_work)
+               proc->ready_threads--;
+       thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
+
+       if (ret)
+               return ret;
+
+       while (1) {
+               uint32_t cmd;
+               struct binder_transaction_data tr;
+               struct binder_work *w;
+               struct binder_transaction *t = NULL;
+
+               if (!list_empty(&thread->todo)) {
+                       w = list_first_entry(&thread->todo, struct binder_work,
+                                            entry);
+               } else if (!list_empty(&proc->todo) && wait_for_proc_work) {
+                       w = list_first_entry(&proc->todo, struct binder_work,
+                                            entry);
+               } else {
+                       /* no data added */
+                       if (ptr - buffer == 4 &&
+                           !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN))
+                               goto retry;
+                       break;
+               }
+
+               if (end - ptr < sizeof(tr) + 4)
+                       break;
+
+               switch (w->type) {
+               case BINDER_WORK_TRANSACTION: {
+                       t = container_of(w, struct binder_transaction, work);
+               } break;
+               case BINDER_WORK_TRANSACTION_COMPLETE: {
+                       cmd = BR_TRANSACTION_COMPLETE;
+                       if (put_user(cmd, (uint32_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(uint32_t);
+
+                       binder_stat_br(proc, thread, cmd);
+                       binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
+                                    "%d:%d BR_TRANSACTION_COMPLETE\n",
+                                    proc->pid, thread->pid);
+
+                       list_del(&w->entry);
+                       kfree(w);
+                       binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+               } break;
+               case BINDER_WORK_NODE: {
+                       struct binder_node *node = container_of(w, struct binder_node, work);
+                       uint32_t cmd = BR_NOOP;
+                       const char *cmd_name;
+                       int strong = node->internal_strong_refs || node->local_strong_refs;
+                       int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
+
+                       if (weak && !node->has_weak_ref) {
+                               cmd = BR_INCREFS;
+                               cmd_name = "BR_INCREFS";
+                               node->has_weak_ref = 1;
+                               node->pending_weak_ref = 1;
+                               node->local_weak_refs++;
+                       } else if (strong && !node->has_strong_ref) {
+                               cmd = BR_ACQUIRE;
+                               cmd_name = "BR_ACQUIRE";
+                               node->has_strong_ref = 1;
+                               node->pending_strong_ref = 1;
+                               node->local_strong_refs++;
+                       } else if (!strong && node->has_strong_ref) {
+                               cmd = BR_RELEASE;
+                               cmd_name = "BR_RELEASE";
+                               node->has_strong_ref = 0;
+                       } else if (!weak && node->has_weak_ref) {
+                               cmd = BR_DECREFS;
+                               cmd_name = "BR_DECREFS";
+                               node->has_weak_ref = 0;
+                       }
+                       if (cmd != BR_NOOP) {
+                               if (put_user(cmd, (uint32_t __user *)ptr))
+                                       return -EFAULT;
+                               ptr += sizeof(uint32_t);
+                               if (put_user(node->ptr,
+                                            (binder_uintptr_t __user *)ptr))
+                                       return -EFAULT;
+                               ptr += sizeof(binder_uintptr_t);
+                               if (put_user(node->cookie,
+                                            (binder_uintptr_t __user *)ptr))
+                                       return -EFAULT;
+                               ptr += sizeof(binder_uintptr_t);
+
+                               binder_stat_br(proc, thread, cmd);
+                               binder_debug(BINDER_DEBUG_USER_REFS,
+                                            "%d:%d %s %d u%016llx c%016llx\n",
+                                            proc->pid, thread->pid, cmd_name,
+                                            node->debug_id,
+                                            (u64)node->ptr, (u64)node->cookie);
+                       } else {
+                               list_del_init(&w->entry);
+                               if (!weak && !strong) {
+                                       binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                                                    "%d:%d node %d u%016llx c%016llx deleted\n",
+                                                    proc->pid, thread->pid,
+                                                    node->debug_id,
+                                                    (u64)node->ptr,
+                                                    (u64)node->cookie);
+                                       rb_erase(&node->rb_node, &proc->nodes);
+                                       kfree(node);
+                                       binder_stats_deleted(BINDER_STAT_NODE);
+                               } else {
+                                       binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+                                                    "%d:%d node %d u%016llx c%016llx state unchanged\n",
+                                                    proc->pid, thread->pid,
+                                                    node->debug_id,
+                                                    (u64)node->ptr,
+                                                    (u64)node->cookie);
+                               }
+                       }
+               } break;
+               case BINDER_WORK_DEAD_BINDER:
+               case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+               case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+                       struct binder_ref_death *death;
+                       uint32_t cmd;
+
+                       death = container_of(w, struct binder_ref_death, work);
+                       if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
+                               cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
+                       else
+                               cmd = BR_DEAD_BINDER;
+                       if (put_user(cmd, (uint32_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(uint32_t);
+                       if (put_user(death->cookie,
+                                    (binder_uintptr_t __user *)ptr))
+                               return -EFAULT;
+                       ptr += sizeof(binder_uintptr_t);
+                       binder_stat_br(proc, thread, cmd);
+                       binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+                                    "%d:%d %s %016llx\n",
+                                     proc->pid, thread->pid,
+                                     cmd == BR_DEAD_BINDER ?
+                                     "BR_DEAD_BINDER" :
+                                     "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+                                     (u64)death->cookie);
+
+                       if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
+                               list_del(&w->entry);
+                               kfree(death);
+                               binder_stats_deleted(BINDER_STAT_DEATH);
+                       } else
+                               list_move(&w->entry, &proc->delivered_death);
+                       if (cmd == BR_DEAD_BINDER)
+                               goto done; /* DEAD_BINDER notifications can cause transactions */
+               } break;
+               }
+
+               if (!t)
+                       continue;
+
+               BUG_ON(t->buffer == NULL);
+               if (t->buffer->target_node) {
+                       struct binder_node *target_node = t->buffer->target_node;
+
+                       tr.target.ptr = target_node->ptr;
+                       tr.cookie =  target_node->cookie;
+                       t->saved_priority = task_nice(current);
+                       if (t->priority < target_node->min_priority &&
+                           !(t->flags & TF_ONE_WAY))
+                               binder_set_nice(t->priority);
+                       else if (!(t->flags & TF_ONE_WAY) ||
+                                t->saved_priority > target_node->min_priority)
+                               binder_set_nice(target_node->min_priority);
+                       cmd = BR_TRANSACTION;
+               } else {
+                       tr.target.ptr = 0;
+                       tr.cookie = 0;
+                       cmd = BR_REPLY;
+               }
+               tr.code = t->code;
+               tr.flags = t->flags;
+               tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
+
+               if (t->from) {
+                       struct task_struct *sender = t->from->proc->tsk;
+
+                       tr.sender_pid = task_tgid_nr_ns(sender,
+                                                       task_active_pid_ns(current));
+               } else {
+                       tr.sender_pid = 0;
+               }
+
+               tr.data_size = t->buffer->data_size;
+               tr.offsets_size = t->buffer->offsets_size;
+               tr.data.ptr.buffer = (binder_uintptr_t)(
+                                       (uintptr_t)t->buffer->data +
+                                       proc->user_buffer_offset);
+               tr.data.ptr.offsets = tr.data.ptr.buffer +
+                                       ALIGN(t->buffer->data_size,
+                                           sizeof(void *));
+
+               if (put_user(cmd, (uint32_t __user *)ptr))
+                       return -EFAULT;
+               ptr += sizeof(uint32_t);
+               if (copy_to_user(ptr, &tr, sizeof(tr)))
+                       return -EFAULT;
+               ptr += sizeof(tr);
+
+               trace_binder_transaction_received(t);
+               binder_stat_br(proc, thread, cmd);
+               binder_debug(BINDER_DEBUG_TRANSACTION,
+                            "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
+                            proc->pid, thread->pid,
+                            (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
+                            "BR_REPLY",
+                            t->debug_id, t->from ? t->from->proc->pid : 0,
+                            t->from ? t->from->pid : 0, cmd,
+                            t->buffer->data_size, t->buffer->offsets_size,
+                            (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
+
+               list_del(&t->work.entry);
+               t->buffer->allow_user_free = 1;
+               if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+                       t->to_parent = thread->transaction_stack;
+                       t->to_thread = thread;
+                       thread->transaction_stack = t;
+               } else {
+                       t->buffer->transaction = NULL;
+                       kfree(t);
+                       binder_stats_deleted(BINDER_STAT_TRANSACTION);
+               }
+               break;
+       }
+
+done:
+
+       *consumed = ptr - buffer;
+       if (proc->requested_threads + proc->ready_threads == 0 &&
+           proc->requested_threads_started < proc->max_threads &&
+           (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+            BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
+            /*spawn a new thread if we leave this out */) {
+               proc->requested_threads++;
+               binder_debug(BINDER_DEBUG_THREADS,
+                            "%d:%d BR_SPAWN_LOOPER\n",
+                            proc->pid, thread->pid);
+               if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+                       return -EFAULT;
+               binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
+       }
+       return 0;
+}
+
+static void binder_release_work(struct list_head *list)
+{
+       struct binder_work *w;
+
+       while (!list_empty(list)) {
+               w = list_first_entry(list, struct binder_work, entry);
+               list_del_init(&w->entry);
+               switch (w->type) {
+               case BINDER_WORK_TRANSACTION: {
+                       struct binder_transaction *t;
+
+                       t = container_of(w, struct binder_transaction, work);
+                       if (t->buffer->target_node &&
+                           !(t->flags & TF_ONE_WAY)) {
+                               binder_send_failed_reply(t, BR_DEAD_REPLY);
+                       } else {
+                               binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+                                       "undelivered transaction %d\n",
+                                       t->debug_id);
+                               t->buffer->transaction = NULL;
+                               kfree(t);
+                               binder_stats_deleted(BINDER_STAT_TRANSACTION);
+                       }
+               } break;
+               case BINDER_WORK_TRANSACTION_COMPLETE: {
+                       binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+                               "undelivered TRANSACTION_COMPLETE\n");
+                       kfree(w);
+                       binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+               } break;
+               case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+               case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+                       struct binder_ref_death *death;
+
+                       death = container_of(w, struct binder_ref_death, work);
+                       binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+                               "undelivered death notification, %016llx\n",
+                               (u64)death->cookie);
+                       kfree(death);
+                       binder_stats_deleted(BINDER_STAT_DEATH);
+               } break;
+               default:
+                       pr_err("unexpected work type, %d, not freed\n",
+                              w->type);
+                       break;
+               }
+       }
+
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+       struct binder_thread *thread = NULL;
+       struct rb_node *parent = NULL;
+       struct rb_node **p = &proc->threads.rb_node;
+
+       while (*p) {
+               parent = *p;
+               thread = rb_entry(parent, struct binder_thread, rb_node);
+
+               if (current->pid < thread->pid)
+                       p = &(*p)->rb_left;
+               else if (current->pid > thread->pid)
+                       p = &(*p)->rb_right;
+               else
+                       break;
+       }
+       if (*p == NULL) {
+               thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+               if (thread == NULL)
+                       return NULL;
+               binder_stats_created(BINDER_STAT_THREAD);
+               thread->proc = proc;
+               thread->pid = current->pid;
+               init_waitqueue_head(&thread->wait);
+               INIT_LIST_HEAD(&thread->todo);
+               rb_link_node(&thread->rb_node, parent, p);
+               rb_insert_color(&thread->rb_node, &proc->threads);
+               thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+               thread->return_error = BR_OK;
+               thread->return_error2 = BR_OK;
+       }
+       return thread;
+}
+
+static int binder_free_thread(struct binder_proc *proc,
+                             struct binder_thread *thread)
+{
+       struct binder_transaction *t;
+       struct binder_transaction *send_reply = NULL;
+       int active_transactions = 0;
+
+       rb_erase(&thread->rb_node, &proc->threads);
+       t = thread->transaction_stack;
+       if (t && t->to_thread == thread)
+               send_reply = t;
+       while (t) {
+               active_transactions++;
+               binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+                            "release %d:%d transaction %d %s, still active\n",
+                             proc->pid, thread->pid,
+                            t->debug_id,
+                            (t->to_thread == thread) ? "in" : "out");
+
+               if (t->to_thread == thread) {
+                       t->to_proc = NULL;
+                       t->to_thread = NULL;
+                       if (t->buffer) {
+                               t->buffer->transaction = NULL;
+                               t->buffer = NULL;
+                       }
+                       t = t->to_parent;
+               } else if (t->from == thread) {
+                       t->from = NULL;
+                       t = t->from_parent;
+               } else
+                       BUG();
+       }
+       if (send_reply)
+               binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
+       binder_release_work(&thread->todo);
+       kfree(thread);
+       binder_stats_deleted(BINDER_STAT_THREAD);
+       return active_transactions;
+}
+
+static unsigned int binder_poll(struct file *filp,
+                               struct poll_table_struct *wait)
+{
+       struct binder_proc *proc = filp->private_data;
+       struct binder_thread *thread = NULL;
+       int wait_for_proc_work;
+
+       binder_lock(__func__);
+
+       thread = binder_get_thread(proc);
+
+       wait_for_proc_work = thread->transaction_stack == NULL &&
+               list_empty(&thread->todo) && thread->return_error == BR_OK;
+
+       binder_unlock(__func__);
+
+       if (wait_for_proc_work) {
+               if (binder_has_proc_work(proc, thread))
+                       return POLLIN;
+               poll_wait(filp, &proc->wait, wait);
+               if (binder_has_proc_work(proc, thread))
+                       return POLLIN;
+       } else {
+               if (binder_has_thread_work(thread))
+                       return POLLIN;
+               poll_wait(filp, &thread->wait, wait);
+               if (binder_has_thread_work(thread))
+                       return POLLIN;
+       }
+       return 0;
+}
+
+static int binder_ioctl_write_read(struct file *filp,
+                               unsigned int cmd, unsigned long arg,
+                               struct binder_thread *thread)
+{
+       int ret = 0;
+       struct binder_proc *proc = filp->private_data;
+       unsigned int size = _IOC_SIZE(cmd);
+       void __user *ubuf = (void __user *)arg;
+       struct binder_write_read bwr;
+
+       if (size != sizeof(struct binder_write_read)) {
+               ret = -EINVAL;
+               goto out;
+       }
+       if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+               ret = -EFAULT;
+               goto out;
+       }
+       binder_debug(BINDER_DEBUG_READ_WRITE,
+                    "%d:%d write %lld at %016llx, read %lld at %016llx\n",
+                    proc->pid, thread->pid,
+                    (u64)bwr.write_size, (u64)bwr.write_buffer,
+                    (u64)bwr.read_size, (u64)bwr.read_buffer);
+
+       if (bwr.write_size > 0) {
+               ret = binder_thread_write(proc, thread,
+                                         bwr.write_buffer,
+                                         bwr.write_size,
+                                         &bwr.write_consumed);
+               trace_binder_write_done(ret);
+               if (ret < 0) {
+                       bwr.read_consumed = 0;
+                       if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+                               ret = -EFAULT;
+                       goto out;
+               }
+       }
+       if (bwr.read_size > 0) {
+               ret = binder_thread_read(proc, thread, bwr.read_buffer,
+                                        bwr.read_size,
+                                        &bwr.read_consumed,
+                                        filp->f_flags & O_NONBLOCK);
+               trace_binder_read_done(ret);
+               if (!list_empty(&proc->todo))
+                       wake_up_interruptible(&proc->wait);
+               if (ret < 0) {
+                       if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+                               ret = -EFAULT;
+                       goto out;
+               }
+       }
+       binder_debug(BINDER_DEBUG_READ_WRITE,
+                    "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
+                    proc->pid, thread->pid,
+                    (u64)bwr.write_consumed, (u64)bwr.write_size,
+                    (u64)bwr.read_consumed, (u64)bwr.read_size);
+       if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+               ret = -EFAULT;
+               goto out;
+       }
+out:
+       return ret;
+}
+
+static int binder_ioctl_set_ctx_mgr(struct file *filp)
+{
+       int ret = 0;
+       struct binder_proc *proc = filp->private_data;
+       kuid_t curr_euid = current_euid();
+
+       if (binder_context_mgr_node != NULL) {
+               pr_err("BINDER_SET_CONTEXT_MGR already set\n");
+               ret = -EBUSY;
+               goto out;
+       }
+       ret = security_binder_set_context_mgr(proc->tsk);
+       if (ret < 0)
+               goto out;
+       if (uid_valid(binder_context_mgr_uid)) {
+               if (!uid_eq(binder_context_mgr_uid, curr_euid)) {
+                       pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
+                              from_kuid(&init_user_ns, curr_euid),
+                              from_kuid(&init_user_ns,
+                                       binder_context_mgr_uid));
+                       ret = -EPERM;
+                       goto out;
+               }
+       } else {
+               binder_context_mgr_uid = curr_euid;
+       }
+       binder_context_mgr_node = binder_new_node(proc, 0, 0);
+       if (binder_context_mgr_node == NULL) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       binder_context_mgr_node->local_weak_refs++;
+       binder_context_mgr_node->local_strong_refs++;
+       binder_context_mgr_node->has_strong_ref = 1;
+       binder_context_mgr_node->has_weak_ref = 1;
+out:
+       return ret;
+}
+
+static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+       int ret;
+       struct binder_proc *proc = filp->private_data;
+       struct binder_thread *thread;
+       unsigned int size = _IOC_SIZE(cmd);
+       void __user *ubuf = (void __user *)arg;
+
+       /*pr_info("binder_ioctl: %d:%d %x %lx\n",
+                       proc->pid, current->pid, cmd, arg);*/
+
+       trace_binder_ioctl(cmd, arg);
+
+       ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+       if (ret)
+               goto err_unlocked;
+
+       binder_lock(__func__);
+       thread = binder_get_thread(proc);
+       if (thread == NULL) {
+               ret = -ENOMEM;
+               goto err;
+       }
+
+       switch (cmd) {
+       case BINDER_WRITE_READ:
+               ret = binder_ioctl_write_read(filp, cmd, arg, thread);
+               if (ret)
+                       goto err;
+               break;
+       case BINDER_SET_MAX_THREADS:
+               if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+               break;
+       case BINDER_SET_CONTEXT_MGR:
+               ret = binder_ioctl_set_ctx_mgr(filp);
+               if (ret)
+                       goto err;
+               break;
+       case BINDER_THREAD_EXIT:
+               binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
+                            proc->pid, thread->pid);
+               binder_free_thread(proc, thread);
+               thread = NULL;
+               break;
+       case BINDER_VERSION: {
+               struct binder_version __user *ver = ubuf;
+
+               if (size != sizeof(struct binder_version)) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+               if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
+                            &ver->protocol_version)) {
+                       ret = -EINVAL;
+                       goto err;
+               }
+               break;
+       }
+       default:
+               ret = -EINVAL;
+               goto err;
+       }
+       ret = 0;
+err:
+       if (thread)
+               thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
+       binder_unlock(__func__);
+       wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+       if (ret && ret != -ERESTARTSYS)
+               pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
+err_unlocked:
+       trace_binder_ioctl_done(ret);
+       return ret;
+}
+
+static void binder_vma_open(struct vm_area_struct *vma)
+{
+       struct binder_proc *proc = vma->vm_private_data;
+
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+                    "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+                    proc->pid, vma->vm_start, vma->vm_end,
+                    (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+                    (unsigned long)pgprot_val(vma->vm_page_prot));
+}
+
+static void binder_vma_close(struct vm_area_struct *vma)
+{
+       struct binder_proc *proc = vma->vm_private_data;
+
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+                    "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+                    proc->pid, vma->vm_start, vma->vm_end,
+                    (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+                    (unsigned long)pgprot_val(vma->vm_page_prot));
+       proc->vma = NULL;
+       proc->vma_vm_mm = NULL;
+       binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
+}
+
+static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+       return VM_FAULT_SIGBUS;
+}
+
+static struct vm_operations_struct binder_vm_ops = {
+       .open = binder_vma_open,
+       .close = binder_vma_close,
+       .fault = binder_vm_fault,
+};
+
+static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+       int ret;
+       struct vm_struct *area;
+       struct binder_proc *proc = filp->private_data;
+       const char *failure_string;
+       struct binder_buffer *buffer;
+
+       if (proc->tsk != current)
+               return -EINVAL;
+
+       if ((vma->vm_end - vma->vm_start) > SZ_4M)
+               vma->vm_end = vma->vm_start + SZ_4M;
+
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+                    "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+                    proc->pid, vma->vm_start, vma->vm_end,
+                    (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+                    (unsigned long)pgprot_val(vma->vm_page_prot));
+
+       if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
+               ret = -EPERM;
+               failure_string = "bad vm_flags";
+               goto err_bad_arg;
+       }
+       vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
+
+       mutex_lock(&binder_mmap_lock);
+       if (proc->buffer) {
+               ret = -EBUSY;
+               failure_string = "already mapped";
+               goto err_already_mapped;
+       }
+
+       area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+       if (area == NULL) {
+               ret = -ENOMEM;
+               failure_string = "get_vm_area";
+               goto err_get_vm_area_failed;
+       }
+       proc->buffer = area->addr;
+       proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
+       mutex_unlock(&binder_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+       if (cache_is_vipt_aliasing()) {
+               while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
+                       pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+                       vma->vm_start += PAGE_SIZE;
+               }
+       }
+#endif
+       proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
+       if (proc->pages == NULL) {
+               ret = -ENOMEM;
+               failure_string = "alloc page array";
+               goto err_alloc_pages_failed;
+       }
+       proc->buffer_size = vma->vm_end - vma->vm_start;
+
+       vma->vm_ops = &binder_vm_ops;
+       vma->vm_private_data = proc;
+
+       if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+               ret = -ENOMEM;
+               failure_string = "alloc small buf";
+               goto err_alloc_small_buf_failed;
+       }
+       buffer = proc->buffer;
+       INIT_LIST_HEAD(&proc->buffers);
+       list_add(&buffer->entry, &proc->buffers);
+       buffer->free = 1;
+       binder_insert_free_buffer(proc, buffer);
+       proc->free_async_space = proc->buffer_size / 2;
+       barrier();
+       proc->files = get_files_struct(current);
+       proc->vma = vma;
+       proc->vma_vm_mm = vma->vm_mm;
+
+       /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
+                proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
+       return 0;
+
+err_alloc_small_buf_failed:
+       kfree(proc->pages);
+       proc->pages = NULL;
+err_alloc_pages_failed:
+       mutex_lock(&binder_mmap_lock);
+       vfree(proc->buffer);
+       proc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+       mutex_unlock(&binder_mmap_lock);
+err_bad_arg:
+       pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
+              proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+       return ret;
+}
+
+static int binder_open(struct inode *nodp, struct file *filp)
+{
+       struct binder_proc *proc;
+
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
+                    current->group_leader->pid, current->pid);
+
+       proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+       if (proc == NULL)
+               return -ENOMEM;
+       get_task_struct(current);
+       proc->tsk = current;
+       INIT_LIST_HEAD(&proc->todo);
+       init_waitqueue_head(&proc->wait);
+       proc->default_priority = task_nice(current);
+
+       binder_lock(__func__);
+
+       binder_stats_created(BINDER_STAT_PROC);
+       hlist_add_head(&proc->proc_node, &binder_procs);
+       proc->pid = current->group_leader->pid;
+       INIT_LIST_HEAD(&proc->delivered_death);
+       filp->private_data = proc;
+
+       binder_unlock(__func__);
+
+       if (binder_debugfs_dir_entry_proc) {
+               char strbuf[11];
+
+               snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+               proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
+                       binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+       }
+
+       return 0;
+}
+
+static int binder_flush(struct file *filp, fl_owner_t id)
+{
+       struct binder_proc *proc = filp->private_data;
+
+       binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
+
+       return 0;
+}
+
+static void binder_deferred_flush(struct binder_proc *proc)
+{
+       struct rb_node *n;
+       int wake_count = 0;
+
+       for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+               struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+
+               thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+               if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
+                       wake_up_interruptible(&thread->wait);
+                       wake_count++;
+               }
+       }
+       wake_up_interruptible_all(&proc->wait);
+
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+                    "binder_flush: %d woke %d threads\n", proc->pid,
+                    wake_count);
+}
+
+static int binder_release(struct inode *nodp, struct file *filp)
+{
+       struct binder_proc *proc = filp->private_data;
+
+       debugfs_remove(proc->debugfs_entry);
+       binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
+
+       return 0;
+}
+
+static int binder_node_release(struct binder_node *node, int refs)
+{
+       struct binder_ref *ref;
+       int death = 0;
+
+       list_del_init(&node->work.entry);
+       binder_release_work(&node->async_todo);
+
+       if (hlist_empty(&node->refs)) {
+               kfree(node);
+               binder_stats_deleted(BINDER_STAT_NODE);
+
+               return refs;
+       }
+
+       node->proc = NULL;
+       node->local_strong_refs = 0;
+       node->local_weak_refs = 0;
+       hlist_add_head(&node->dead_node, &binder_dead_nodes);
+
+       hlist_for_each_entry(ref, &node->refs, node_entry) {
+               refs++;
+
+               if (!ref->death)
+                       continue;
+
+               death++;
+
+               if (list_empty(&ref->death->work.entry)) {
+                       ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+                       list_add_tail(&ref->death->work.entry,
+                                     &ref->proc->todo);
+                       wake_up_interruptible(&ref->proc->wait);
+               } else
+                       BUG();
+       }
+
+       binder_debug(BINDER_DEBUG_DEAD_BINDER,
+                    "node %d now dead, refs %d, death %d\n",
+                    node->debug_id, refs, death);
+
+       return refs;
+}
+
+static void binder_deferred_release(struct binder_proc *proc)
+{
+       struct binder_transaction *t;
+       struct rb_node *n;
+       int threads, nodes, incoming_refs, outgoing_refs, buffers,
+               active_transactions, page_count;
+
+       BUG_ON(proc->vma);
+       BUG_ON(proc->files);
+
+       hlist_del(&proc->proc_node);
+
+       if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+               binder_debug(BINDER_DEBUG_DEAD_BINDER,
+                            "%s: %d context_mgr_node gone\n",
+                            __func__, proc->pid);
+               binder_context_mgr_node = NULL;
+       }
+
+       threads = 0;
+       active_transactions = 0;
+       while ((n = rb_first(&proc->threads))) {
+               struct binder_thread *thread;
+
+               thread = rb_entry(n, struct binder_thread, rb_node);
+               threads++;
+               active_transactions += binder_free_thread(proc, thread);
+       }
+
+       nodes = 0;
+       incoming_refs = 0;
+       while ((n = rb_first(&proc->nodes))) {
+               struct binder_node *node;
+
+               node = rb_entry(n, struct binder_node, rb_node);
+               nodes++;
+               rb_erase(&node->rb_node, &proc->nodes);
+               incoming_refs = binder_node_release(node, incoming_refs);
+       }
+
+       outgoing_refs = 0;
+       while ((n = rb_first(&proc->refs_by_desc))) {
+               struct binder_ref *ref;
+
+               ref = rb_entry(n, struct binder_ref, rb_node_desc);
+               outgoing_refs++;
+               binder_delete_ref(ref);
+       }
+
+       binder_release_work(&proc->todo);
+       binder_release_work(&proc->delivered_death);
+
+       buffers = 0;
+       while ((n = rb_first(&proc->allocated_buffers))) {
+               struct binder_buffer *buffer;
+
+               buffer = rb_entry(n, struct binder_buffer, rb_node);
+
+               t = buffer->transaction;
+               if (t) {
+                       t->buffer = NULL;
+                       buffer->transaction = NULL;
+                       pr_err("release proc %d, transaction %d, not freed\n",
+                              proc->pid, t->debug_id);
+                       /*BUG();*/
+               }
+
+               binder_free_buf(proc, buffer);
+               buffers++;
+       }
+
+       binder_stats_deleted(BINDER_STAT_PROC);
+
+       page_count = 0;
+       if (proc->pages) {
+               int i;
+
+               for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+                       void *page_addr;
+
+                       if (!proc->pages[i])
+                               continue;
+
+                       page_addr = proc->buffer + i * PAGE_SIZE;
+                       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+                                    "%s: %d: page %d at %p not freed\n",
+                                    __func__, proc->pid, i, page_addr);
+                       unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+                       __free_page(proc->pages[i]);
+                       page_count++;
+               }
+               kfree(proc->pages);
+               vfree(proc->buffer);
+       }
+
+       put_task_struct(proc->tsk);
+
+       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+                    "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
+                    __func__, proc->pid, threads, nodes, incoming_refs,
+                    outgoing_refs, active_transactions, buffers, page_count);
+
+       kfree(proc);
+}
+
+static void binder_deferred_func(struct work_struct *work)
+{
+       struct binder_proc *proc;
+       struct files_struct *files;
+
+       int defer;
+
+       do {
+               binder_lock(__func__);
+               mutex_lock(&binder_deferred_lock);
+               if (!hlist_empty(&binder_deferred_list)) {
+                       proc = hlist_entry(binder_deferred_list.first,
+                                       struct binder_proc, deferred_work_node);
+                       hlist_del_init(&proc->deferred_work_node);
+                       defer = proc->deferred_work;
+                       proc->deferred_work = 0;
+               } else {
+                       proc = NULL;
+                       defer = 0;
+               }
+               mutex_unlock(&binder_deferred_lock);
+
+               files = NULL;
+               if (defer & BINDER_DEFERRED_PUT_FILES) {
+                       files = proc->files;
+                       if (files)
+                               proc->files = NULL;
+               }
+
+               if (defer & BINDER_DEFERRED_FLUSH)
+                       binder_deferred_flush(proc);
+
+               if (defer & BINDER_DEFERRED_RELEASE)
+                       binder_deferred_release(proc); /* frees proc */
+
+               binder_unlock(__func__);
+               if (files)
+                       put_files_struct(files);
+       } while (proc);
+}
+static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
+{
+       mutex_lock(&binder_deferred_lock);
+       proc->deferred_work |= defer;
+       if (hlist_unhashed(&proc->deferred_work_node)) {
+               hlist_add_head(&proc->deferred_work_node,
+                               &binder_deferred_list);
+               queue_work(binder_deferred_workqueue, &binder_deferred_work);
+       }
+       mutex_unlock(&binder_deferred_lock);
+}
+
+static void print_binder_transaction(struct seq_file *m, const char *prefix,
+                                    struct binder_transaction *t)
+{
+       seq_printf(m,
+                  "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+                  prefix, t->debug_id, t,
+                  t->from ? t->from->proc->pid : 0,
+                  t->from ? t->from->pid : 0,
+                  t->to_proc ? t->to_proc->pid : 0,
+                  t->to_thread ? t->to_thread->pid : 0,
+                  t->code, t->flags, t->priority, t->need_reply);
+       if (t->buffer == NULL) {
+               seq_puts(m, " buffer free\n");
+               return;
+       }
+       if (t->buffer->target_node)
+               seq_printf(m, " node %d",
+                          t->buffer->target_node->debug_id);
+       seq_printf(m, " size %zd:%zd data %p\n",
+                  t->buffer->data_size, t->buffer->offsets_size,
+                  t->buffer->data);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+                               struct binder_buffer *buffer)
+{
+       seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
+                  prefix, buffer->debug_id, buffer->data,
+                  buffer->data_size, buffer->offsets_size,
+                  buffer->transaction ? "active" : "delivered");
+}
+
+static void print_binder_work(struct seq_file *m, const char *prefix,
+                             const char *transaction_prefix,
+                             struct binder_work *w)
+{
+       struct binder_node *node;
+       struct binder_transaction *t;
+
+       switch (w->type) {
+       case BINDER_WORK_TRANSACTION:
+               t = container_of(w, struct binder_transaction, work);
+               print_binder_transaction(m, transaction_prefix, t);
+               break;
+       case BINDER_WORK_TRANSACTION_COMPLETE:
+               seq_printf(m, "%stransaction complete\n", prefix);
+               break;
+       case BINDER_WORK_NODE:
+               node = container_of(w, struct binder_node, work);
+               seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
+                          prefix, node->debug_id,
+                          (u64)node->ptr, (u64)node->cookie);
+               break;
+       case BINDER_WORK_DEAD_BINDER:
+               seq_printf(m, "%shas dead binder\n", prefix);
+               break;
+       case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+               seq_printf(m, "%shas cleared dead binder\n", prefix);
+               break;
+       case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
+               seq_printf(m, "%shas cleared death notification\n", prefix);
+               break;
+       default:
+               seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
+               break;
+       }
+}
+
+static void print_binder_thread(struct seq_file *m,
+                               struct binder_thread *thread,
+                               int print_always)
+{
+       struct binder_transaction *t;
+       struct binder_work *w;
+       size_t start_pos = m->count;
+       size_t header_pos;
+
+       seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper);
+       header_pos = m->count;
+       t = thread->transaction_stack;
+       while (t) {
+               if (t->from == thread) {
+                       print_binder_transaction(m,
+                                                "    outgoing transaction", t);
+                       t = t->from_parent;
+               } else if (t->to_thread == thread) {
+                       print_binder_transaction(m,
+                                                "    incoming transaction", t);
+                       t = t->to_parent;
+               } else {
+                       print_binder_transaction(m, "    bad transaction", t);
+                       t = NULL;
+               }
+       }
+       list_for_each_entry(w, &thread->todo, entry) {
+               print_binder_work(m, "    ", "    pending transaction", w);
+       }
+       if (!print_always && m->count == header_pos)
+               m->count = start_pos;
+}
+
+static void print_binder_node(struct seq_file *m, struct binder_node *node)
+{
+       struct binder_ref *ref;
+       struct binder_work *w;
+       int count;
+
+       count = 0;
+       hlist_for_each_entry(ref, &node->refs, node_entry)
+               count++;
+
+       seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
+                  node->debug_id, (u64)node->ptr, (u64)node->cookie,
+                  node->has_strong_ref, node->has_weak_ref,
+                  node->local_strong_refs, node->local_weak_refs,
+                  node->internal_strong_refs, count);
+       if (count) {
+               seq_puts(m, " proc");
+               hlist_for_each_entry(ref, &node->refs, node_entry)
+                       seq_printf(m, " %d", ref->proc->pid);
+       }
+       seq_puts(m, "\n");
+       list_for_each_entry(w, &node->async_todo, entry)
+               print_binder_work(m, "    ",
+                                 "    pending async transaction", w);
+}
+
+static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+{
+       seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
+                  ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
+                  ref->node->debug_id, ref->strong, ref->weak, ref->death);
+}
+
+static void print_binder_proc(struct seq_file *m,
+                             struct binder_proc *proc, int print_all)
+{
+       struct binder_work *w;
+       struct rb_node *n;
+       size_t start_pos = m->count;
+       size_t header_pos;
+
+       seq_printf(m, "proc %d\n", proc->pid);
+       header_pos = m->count;
+
+       for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+               print_binder_thread(m, rb_entry(n, struct binder_thread,
+                                               rb_node), print_all);
+       for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+               struct binder_node *node = rb_entry(n, struct binder_node,
+                                                   rb_node);
+               if (print_all || node->has_async_transaction)
+                       print_binder_node(m, node);
+       }
+       if (print_all) {
+               for (n = rb_first(&proc->refs_by_desc);
+                    n != NULL;
+                    n = rb_next(n))
+                       print_binder_ref(m, rb_entry(n, struct binder_ref,
+                                                    rb_node_desc));
+       }
+       for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+               print_binder_buffer(m, "  buffer",
+                                   rb_entry(n, struct binder_buffer, rb_node));
+       list_for_each_entry(w, &proc->todo, entry)
+               print_binder_work(m, "  ", "  pending transaction", w);
+       list_for_each_entry(w, &proc->delivered_death, entry) {
+               seq_puts(m, "  has delivered dead binder\n");
+               break;
+       }
+       if (!print_all && m->count == header_pos)
+               m->count = start_pos;
+}
+
+static const char * const binder_return_strings[] = {
+       "BR_ERROR",
+       "BR_OK",
+       "BR_TRANSACTION",
+       "BR_REPLY",
+       "BR_ACQUIRE_RESULT",
+       "BR_DEAD_REPLY",
+       "BR_TRANSACTION_COMPLETE",
+       "BR_INCREFS",
+       "BR_ACQUIRE",
+       "BR_RELEASE",
+       "BR_DECREFS",
+       "BR_ATTEMPT_ACQUIRE",
+       "BR_NOOP",
+       "BR_SPAWN_LOOPER",
+       "BR_FINISHED",
+       "BR_DEAD_BINDER",
+       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+       "BR_FAILED_REPLY"
+};
+
+static const char * const binder_command_strings[] = {
+       "BC_TRANSACTION",
+       "BC_REPLY",
+       "BC_ACQUIRE_RESULT",
+       "BC_FREE_BUFFER",
+       "BC_INCREFS",
+       "BC_ACQUIRE",
+       "BC_RELEASE",
+       "BC_DECREFS",
+       "BC_INCREFS_DONE",
+       "BC_ACQUIRE_DONE",
+       "BC_ATTEMPT_ACQUIRE",
+       "BC_REGISTER_LOOPER",
+       "BC_ENTER_LOOPER",
+       "BC_EXIT_LOOPER",
+       "BC_REQUEST_DEATH_NOTIFICATION",
+       "BC_CLEAR_DEATH_NOTIFICATION",
+       "BC_DEAD_BINDER_DONE"
+};
+
+static const char * const binder_objstat_strings[] = {
+       "proc",
+       "thread",
+       "node",
+       "ref",
+       "death",
+       "transaction",
+       "transaction_complete"
+};
+
+static void print_binder_stats(struct seq_file *m, const char *prefix,
+                              struct binder_stats *stats)
+{
+       int i;
+
+       BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
+                    ARRAY_SIZE(binder_command_strings));
+       for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
+               if (stats->bc[i])
+                       seq_printf(m, "%s%s: %d\n", prefix,
+                                  binder_command_strings[i], stats->bc[i]);
+       }
+
+       BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
+                    ARRAY_SIZE(binder_return_strings));
+       for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
+               if (stats->br[i])
+                       seq_printf(m, "%s%s: %d\n", prefix,
+                                  binder_return_strings[i], stats->br[i]);
+       }
+
+       BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+                    ARRAY_SIZE(binder_objstat_strings));
+       BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+                    ARRAY_SIZE(stats->obj_deleted));
+       for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
+               if (stats->obj_created[i] || stats->obj_deleted[i])
+                       seq_printf(m, "%s%s: active %d total %d\n", prefix,
+                               binder_objstat_strings[i],
+                               stats->obj_created[i] - stats->obj_deleted[i],
+                               stats->obj_created[i]);
+       }
+}
+
+static void print_binder_proc_stats(struct seq_file *m,
+                                   struct binder_proc *proc)
+{
+       struct binder_work *w;
+       struct rb_node *n;
+       int count, strong, weak;
+
+       seq_printf(m, "proc %d\n", proc->pid);
+       count = 0;
+       for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+               count++;
+       seq_printf(m, "  threads: %d\n", count);
+       seq_printf(m, "  requested threads: %d+%d/%d\n"
+                       "  ready threads %d\n"
+                       "  free async space %zd\n", proc->requested_threads,
+                       proc->requested_threads_started, proc->max_threads,
+                       proc->ready_threads, proc->free_async_space);
+       count = 0;
+       for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+               count++;
+       seq_printf(m, "  nodes: %d\n", count);
+       count = 0;
+       strong = 0;
+       weak = 0;
+       for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+               struct binder_ref *ref = rb_entry(n, struct binder_ref,
+                                                 rb_node_desc);
+               count++;
+               strong += ref->strong;
+               weak += ref->weak;
+       }
+       seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
+
+       count = 0;
+       for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+               count++;
+       seq_printf(m, "  buffers: %d\n", count);
+
+       count = 0;
+       list_for_each_entry(w, &proc->todo, entry) {
+               switch (w->type) {
+               case BINDER_WORK_TRANSACTION:
+                       count++;
+                       break;
+               default:
+                       break;
+               }
+       }
+       seq_printf(m, "  pending transactions: %d\n", count);
+
+       print_binder_stats(m, "  ", &proc->stats);
+}
+
+
+static int binder_state_show(struct seq_file *m, void *unused)
+{
+       struct binder_proc *proc;
+       struct binder_node *node;
+       int do_lock = !binder_debug_no_lock;
+
+       if (do_lock)
+               binder_lock(__func__);
+
+       seq_puts(m, "binder state:\n");
+
+       if (!hlist_empty(&binder_dead_nodes))
+               seq_puts(m, "dead nodes:\n");
+       hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
+               print_binder_node(m, node);
+
+       hlist_for_each_entry(proc, &binder_procs, proc_node)
+               print_binder_proc(m, proc, 1);
+       if (do_lock)
+               binder_unlock(__func__);
+       return 0;
+}
+
+static int binder_stats_show(struct seq_file *m, void *unused)
+{
+       struct binder_proc *proc;
+       int do_lock = !binder_debug_no_lock;
+
+       if (do_lock)
+               binder_lock(__func__);
+
+       seq_puts(m, "binder stats:\n");
+
+       print_binder_stats(m, "", &binder_stats);
+
+       hlist_for_each_entry(proc, &binder_procs, proc_node)
+               print_binder_proc_stats(m, proc);
+       if (do_lock)
+               binder_unlock(__func__);
+       return 0;
+}
+
+static int binder_transactions_show(struct seq_file *m, void *unused)
+{
+       struct binder_proc *proc;
+       int do_lock = !binder_debug_no_lock;
+
+       if (do_lock)
+               binder_lock(__func__);
+
+       seq_puts(m, "binder transactions:\n");
+       hlist_for_each_entry(proc, &binder_procs, proc_node)
+               print_binder_proc(m, proc, 0);
+       if (do_lock)
+               binder_unlock(__func__);
+       return 0;
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused)
+{
+       struct binder_proc *proc = m->private;
+       int do_lock = !binder_debug_no_lock;
+
+       if (do_lock)
+               binder_lock(__func__);
+       seq_puts(m, "binder proc state:\n");
+       print_binder_proc(m, proc, 1);
+       if (do_lock)
+               binder_unlock(__func__);
+       return 0;
+}
+
+static void print_binder_transaction_log_entry(struct seq_file *m,
+                                       struct binder_transaction_log_entry *e)
+{
+       seq_printf(m,
+                  "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+                  e->debug_id, (e->call_type == 2) ? "reply" :
+                  ((e->call_type == 1) ? "async" : "call "), e->from_proc,
+                  e->from_thread, e->to_proc, e->to_thread, e->to_node,
+                  e->target_handle, e->data_size, e->offsets_size);
+}
+
+static int binder_transaction_log_show(struct seq_file *m, void *unused)
+{
+       struct binder_transaction_log *log = m->private;
+       int i;
+
+       if (log->full) {
+               for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
+                       print_binder_transaction_log_entry(m, &log->entry[i]);
+       }
+       for (i = 0; i < log->next; i++)
+               print_binder_transaction_log_entry(m, &log->entry[i]);
+       return 0;
+}
+
+static const struct file_operations binder_fops = {
+       .owner = THIS_MODULE,
+       .poll = binder_poll,
+       .unlocked_ioctl = binder_ioctl,
+       .compat_ioctl = binder_ioctl,
+       .mmap = binder_mmap,
+       .open = binder_open,
+       .flush = binder_flush,
+       .release = binder_release,
+};
+
+static struct miscdevice binder_miscdev = {
+       .minor = MISC_DYNAMIC_MINOR,
+       .name = "binder",
+       .fops = &binder_fops
+};
+
+BINDER_DEBUG_ENTRY(state);
+BINDER_DEBUG_ENTRY(stats);
+BINDER_DEBUG_ENTRY(transactions);
+BINDER_DEBUG_ENTRY(transaction_log);
+
+static int __init binder_init(void)
+{
+       int ret;
+
+       binder_deferred_workqueue = create_singlethread_workqueue("binder");
+       if (!binder_deferred_workqueue)
+               return -ENOMEM;
+
+       binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
+       if (binder_debugfs_dir_entry_root)
+               binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
+                                                binder_debugfs_dir_entry_root);
+       ret = misc_register(&binder_miscdev);
+       if (binder_debugfs_dir_entry_root) {
+               debugfs_create_file("state",
+                                   S_IRUGO,
+                                   binder_debugfs_dir_entry_root,
+                                   NULL,
+                                   &binder_state_fops);
+               debugfs_create_file("stats",
+                                   S_IRUGO,
+                                   binder_debugfs_dir_entry_root,
+                                   NULL,
+                                   &binder_stats_fops);
+               debugfs_create_file("transactions",
+                                   S_IRUGO,
+                                   binder_debugfs_dir_entry_root,
+                                   NULL,
+                                   &binder_transactions_fops);
+               debugfs_create_file("transaction_log",
+                                   S_IRUGO,
+                                   binder_debugfs_dir_entry_root,
+                                   &binder_transaction_log,
+                                   &binder_transaction_log_fops);
+               debugfs_create_file("failed_transaction_log",
+                                   S_IRUGO,
+                                   binder_debugfs_dir_entry_root,
+                                   &binder_transaction_log_failed,
+                                   &binder_transaction_log_fops);
+       }
+       return ret;
+}
+
+device_initcall(binder_init);
+
+#define CREATE_TRACE_POINTS
+#include "binder_trace.h"
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
new file mode 100644 (file)
index 0000000..7f20f3d
--- /dev/null
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM binder
+
+#if !defined(_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _BINDER_TRACE_H
+
+#include <linux/tracepoint.h>
+
+struct binder_buffer;
+struct binder_node;
+struct binder_proc;
+struct binder_ref;
+struct binder_thread;
+struct binder_transaction;
+
+TRACE_EVENT(binder_ioctl,
+       TP_PROTO(unsigned int cmd, unsigned long arg),
+       TP_ARGS(cmd, arg),
+
+       TP_STRUCT__entry(
+               __field(unsigned int, cmd)
+               __field(unsigned long, arg)
+       ),
+       TP_fast_assign(
+               __entry->cmd = cmd;
+               __entry->arg = arg;
+       ),
+       TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
+);
+
+DECLARE_EVENT_CLASS(binder_lock_class,
+       TP_PROTO(const char *tag),
+       TP_ARGS(tag),
+       TP_STRUCT__entry(
+               __field(const char *, tag)
+       ),
+       TP_fast_assign(
+               __entry->tag = tag;
+       ),
+       TP_printk("tag=%s", __entry->tag)
+);
+
+#define DEFINE_BINDER_LOCK_EVENT(name) \
+DEFINE_EVENT(binder_lock_class, name,  \
+       TP_PROTO(const char *func), \
+       TP_ARGS(func))
+
+DEFINE_BINDER_LOCK_EVENT(binder_lock);
+DEFINE_BINDER_LOCK_EVENT(binder_locked);
+DEFINE_BINDER_LOCK_EVENT(binder_unlock);
+
+DECLARE_EVENT_CLASS(binder_function_return_class,
+       TP_PROTO(int ret),
+       TP_ARGS(ret),
+       TP_STRUCT__entry(
+               __field(int, ret)
+       ),
+       TP_fast_assign(
+               __entry->ret = ret;
+       ),
+       TP_printk("ret=%d", __entry->ret)
+);
+
+#define DEFINE_BINDER_FUNCTION_RETURN_EVENT(name)      \
+DEFINE_EVENT(binder_function_return_class, name,       \
+       TP_PROTO(int ret), \
+       TP_ARGS(ret))
+
+DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done);
+DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
+DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
+
+TRACE_EVENT(binder_wait_for_work,
+       TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
+       TP_ARGS(proc_work, transaction_stack, thread_todo),
+
+       TP_STRUCT__entry(
+               __field(bool, proc_work)
+               __field(bool, transaction_stack)
+               __field(bool, thread_todo)
+       ),
+       TP_fast_assign(
+               __entry->proc_work = proc_work;
+               __entry->transaction_stack = transaction_stack;
+               __entry->thread_todo = thread_todo;
+       ),
+       TP_printk("proc_work=%d transaction_stack=%d thread_todo=%d",
+                 __entry->proc_work, __entry->transaction_stack,
+                 __entry->thread_todo)
+);
+
+TRACE_EVENT(binder_transaction,
+       TP_PROTO(bool reply, struct binder_transaction *t,
+                struct binder_node *target_node),
+       TP_ARGS(reply, t, target_node),
+       TP_STRUCT__entry(
+               __field(int, debug_id)
+               __field(int, target_node)
+               __field(int, to_proc)
+               __field(int, to_thread)
+               __field(int, reply)
+               __field(unsigned int, code)
+               __field(unsigned int, flags)
+       ),
+       TP_fast_assign(
+               __entry->debug_id = t->debug_id;
+               __entry->target_node = target_node ? target_node->debug_id : 0;
+               __entry->to_proc = t->to_proc->pid;
+               __entry->to_thread = t->to_thread ? t->to_thread->pid : 0;
+               __entry->reply = reply;
+               __entry->code = t->code;
+               __entry->flags = t->flags;
+       ),
+       TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x",
+                 __entry->debug_id, __entry->target_node,
+                 __entry->to_proc, __entry->to_thread,
+                 __entry->reply, __entry->flags, __entry->code)
+);
+
+TRACE_EVENT(binder_transaction_received,
+       TP_PROTO(struct binder_transaction *t),
+       TP_ARGS(t),
+
+       TP_STRUCT__entry(
+               __field(int, debug_id)
+       ),
+       TP_fast_assign(
+               __entry->debug_id = t->debug_id;
+       ),
+       TP_printk("transaction=%d", __entry->debug_id)
+);
+
+TRACE_EVENT(binder_transaction_node_to_ref,
+       TP_PROTO(struct binder_transaction *t, struct binder_node *node,
+                struct binder_ref *ref),
+       TP_ARGS(t, node, ref),
+
+       TP_STRUCT__entry(
+               __field(int, debug_id)
+               __field(int, node_debug_id)
+               __field(binder_uintptr_t, node_ptr)
+               __field(int, ref_debug_id)
+               __field(uint32_t, ref_desc)
+       ),
+       TP_fast_assign(
+               __entry->debug_id = t->debug_id;
+               __entry->node_debug_id = node->debug_id;
+               __entry->node_ptr = node->ptr;
+               __entry->ref_debug_id = ref->debug_id;
+               __entry->ref_desc = ref->desc;
+       ),
+       TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
+                 __entry->debug_id, __entry->node_debug_id,
+                 (u64)__entry->node_ptr,
+                 __entry->ref_debug_id, __entry->ref_desc)
+);
+
+TRACE_EVENT(binder_transaction_ref_to_node,
+       TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
+       TP_ARGS(t, ref),
+
+       TP_STRUCT__entry(
+               __field(int, debug_id)
+               __field(int, ref_debug_id)
+               __field(uint32_t, ref_desc)
+               __field(int, node_debug_id)
+               __field(binder_uintptr_t, node_ptr)
+       ),
+       TP_fast_assign(
+               __entry->debug_id = t->debug_id;
+               __entry->ref_debug_id = ref->debug_id;
+               __entry->ref_desc = ref->desc;
+               __entry->node_debug_id = ref->node->debug_id;
+               __entry->node_ptr = ref->node->ptr;
+       ),
+       TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
+                 __entry->debug_id, __entry->node_debug_id,
+                 __entry->ref_debug_id, __entry->ref_desc,
+                 (u64)__entry->node_ptr)
+);
+
+TRACE_EVENT(binder_transaction_ref_to_ref,
+       TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
+                struct binder_ref *dest_ref),
+       TP_ARGS(t, src_ref, dest_ref),
+
+       TP_STRUCT__entry(
+               __field(int, debug_id)
+               __field(int, node_debug_id)
+               __field(int, src_ref_debug_id)
+               __field(uint32_t, src_ref_desc)
+               __field(int, dest_ref_debug_id)
+               __field(uint32_t, dest_ref_desc)
+       ),
+       TP_fast_assign(
+               __entry->debug_id = t->debug_id;
+               __entry->node_debug_id = src_ref->node->debug_id;
+               __entry->src_ref_debug_id = src_ref->debug_id;
+               __entry->src_ref_desc = src_ref->desc;
+               __entry->dest_ref_debug_id = dest_ref->debug_id;
+               __entry->dest_ref_desc = dest_ref->desc;
+       ),
+       TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ref=%d dest_desc=%d",
+                 __entry->debug_id, __entry->node_debug_id,
+                 __entry->src_ref_debug_id, __entry->src_ref_desc,
+                 __entry->dest_ref_debug_id, __entry->dest_ref_desc)
+);
+
+TRACE_EVENT(binder_transaction_fd,
+       TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd),
+       TP_ARGS(t, src_fd, dest_fd),
+
+       TP_STRUCT__entry(
+               __field(int, debug_id)
+               __field(int, src_fd)
+               __field(int, dest_fd)
+       ),
+       TP_fast_assign(
+               __entry->debug_id = t->debug_id;
+               __entry->src_fd = src_fd;
+               __entry->dest_fd = dest_fd;
+       ),
+       TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d",
+                 __entry->debug_id, __entry->src_fd, __entry->dest_fd)
+);
+
+DECLARE_EVENT_CLASS(binder_buffer_class,
+       TP_PROTO(struct binder_buffer *buf),
+       TP_ARGS(buf),
+       TP_STRUCT__entry(
+               __field(int, debug_id)
+               __field(size_t, data_size)
+               __field(size_t, offsets_size)
+       ),
+       TP_fast_assign(
+               __entry->debug_id = buf->debug_id;
+               __entry->data_size = buf->data_size;
+               __entry->offsets_size = buf->offsets_size;
+       ),
+       TP_printk("transaction=%d data_size=%zd offsets_size=%zd",
+                 __entry->debug_id, __entry->data_size, __entry->offsets_size)
+);
+
+DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf,
+       TP_PROTO(struct binder_buffer *buffer),
+       TP_ARGS(buffer));
+
+DEFINE_EVENT(binder_buffer_class, binder_transaction_buffer_release,
+       TP_PROTO(struct binder_buffer *buffer),
+       TP_ARGS(buffer));
+
+DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
+       TP_PROTO(struct binder_buffer *buffer),
+       TP_ARGS(buffer));
+
+TRACE_EVENT(binder_update_page_range,
+       TP_PROTO(struct binder_proc *proc, bool allocate,
+                void *start, void *end),
+       TP_ARGS(proc, allocate, start, end),
+       TP_STRUCT__entry(
+               __field(int, proc)
+               __field(bool, allocate)
+               __field(size_t, offset)
+               __field(size_t, size)
+       ),
+       TP_fast_assign(
+               __entry->proc = proc->pid;
+               __entry->allocate = allocate;
+               __entry->offset = start - proc->buffer;
+               __entry->size = end - start;
+       ),
+       TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
+                 __entry->proc, __entry->allocate,
+                 __entry->offset, __entry->size)
+);
+
+TRACE_EVENT(binder_command,
+       TP_PROTO(uint32_t cmd),
+       TP_ARGS(cmd),
+       TP_STRUCT__entry(
+               __field(uint32_t, cmd)
+       ),
+       TP_fast_assign(
+               __entry->cmd = cmd;
+       ),
+       TP_printk("cmd=0x%x %s",
+                 __entry->cmd,
+                 _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_command_strings) ?
+                         binder_command_strings[_IOC_NR(__entry->cmd)] :
+                         "unknown")
+);
+
+TRACE_EVENT(binder_return,
+       TP_PROTO(uint32_t cmd),
+       TP_ARGS(cmd),
+       TP_STRUCT__entry(
+               __field(uint32_t, cmd)
+       ),
+       TP_fast_assign(
+               __entry->cmd = cmd;
+       ),
+       TP_printk("cmd=0x%x %s",
+                 __entry->cmd,
+                 _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_return_strings) ?
+                         binder_return_strings[_IOC_NR(__entry->cmd)] :
+                         "unknown")
+);
+
+#endif /* _BINDER_TRACE_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_FILE binder_trace
+#include <trace/define_trace.h>
index 37acda6fa7e4b4ad192767985692e31118968d24..136803c47cdb06f9e6bb4af7a27d8047c624c4cf 100644 (file)
@@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
        DPRINTK("ENTER\n");
 
        cancel_delayed_work_sync(&ap->sff_pio_task);
+
+       /*
+        * We wanna reset the HSM state to IDLE.  If we do so without
+        * grabbing the port lock, critical sections protected by it which
+        * expect the HSM state to stay stable may get surprised.  For
+        * example, we may set IDLE in between the time
+        * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
+        * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
+        */
+       spin_lock_irq(ap->lock);
        ap->hsm_task_state = HSM_ST_IDLE;
+       spin_unlock_irq(ap->lock);
+
        ap->sff_pio_task_link = NULL;
 
        if (ata_msg_ctl(ap))
index 2e391730e8bef0f080d32682538c57db0b941db5..776b59fbe861d60d9776a7a44fc03c3b64328de4 100644 (file)
@@ -797,7 +797,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
        if (err) {
                dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
                        " %d\n", __func__, err);
-               goto error_out;
+               return err;
        }
 
        /* Enabe DMA */
@@ -808,11 +808,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
                sata_dma_regs);
 
        return 0;
-
-error_out:
-       dma_dwc_exit(hsdev);
-
-       return err;
 }
 
 static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
@@ -1662,7 +1657,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
        char *ver = (char *)&versionr;
        u8 *base = NULL;
        int err = 0;
-       int irq, rc;
+       int irq;
        struct ata_host *host;
        struct ata_port_info pi = sata_dwc_port_info[0];
        const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -1725,7 +1720,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
        if (irq == NO_IRQ) {
                dev_err(&ofdev->dev, "no SATA DMA irq\n");
                err = -ENODEV;
-               goto error_out;
+               goto error_iomap;
        }
 
        /* Get physical SATA DMA register base address */
@@ -1734,14 +1729,16 @@ static int sata_dwc_probe(struct platform_device *ofdev)
                dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
                        " address\n");
                err = -ENODEV;
-               goto error_out;
+               goto error_iomap;
        }
 
        /* Save dev for later use in dev_xxx() routines */
        host_pvt.dwc_dev = &ofdev->dev;
 
        /* Initialize AHB DMAC */
-       dma_dwc_init(hsdev, irq);
+       err = dma_dwc_init(hsdev, irq);
+       if (err)
+               goto error_dma_iomap;
 
        /* Enable SATA Interrupts */
        sata_dwc_enable_interrupts(hsdev);
@@ -1759,9 +1756,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
         * device discovery process, invoking our port_start() handler &
         * error_handler() to execute a dummy Softreset EH session
         */
-       rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
-
-       if (rc != 0)
+       err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+       if (err)
                dev_err(&ofdev->dev, "failed to activate host");
 
        dev_set_drvdata(&ofdev->dev, host);
@@ -1770,7 +1766,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
 error_out:
        /* Free SATA DMA resources */
        dma_dwc_exit(hsdev);
-
+error_dma_iomap:
+       iounmap((void __iomem *)host_pvt.sata_dma_regs);
 error_iomap:
        iounmap(base);
 error_kmalloc:
@@ -1791,6 +1788,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
        /* Free SATA DMA resources */
        dma_dwc_exit(hsdev);
 
+       iounmap((void __iomem *)host_pvt.sata_dma_regs);
        iounmap(hsdev->reg_base);
        kfree(hsdev);
        kfree(host);
index 96a930387ebc749977b3f4ec6007f1478ba95fe3..2a129a0a504cb2c1e10190392a2d8409dd0d25a9 100644 (file)
@@ -912,6 +912,7 @@ int __init platform_bus_init(void)
        error =  bus_register(&platform_bus_type);
        if (error)
                device_unregister(&platform_bus);
+       of_platform_register_reconfig_notifier();
        return error;
 }
 
index c24379ffd4e309cb0344f138854a131e12cc804e..b2ae184a637ce5f42fb78c4506fe22b7bb0ab360 100644 (file)
@@ -1309,6 +1309,7 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
                struct request_queue * const b =
                        mdev->ldev->backing_bdev->bd_disk->queue;
                if (b->merge_bvec_fn) {
+                       bvm->bi_bdev = mdev->ldev->backing_bdev;
                        backing_limit = b->merge_bvec_fn(b, bvm, bvec);
                        limit = min(limit, backing_limit);
                }
index 5dcc8305abd15680e16c517bbdeab1c6ea4bd29c..711dcf4a0313fc059e7991e2a93bf123b98a4693 100644 (file)
@@ -209,12 +209,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
 }
 
 /* Checks whether the given window number is available */
+
+/* On Armada XP, 375 and 38x the MBus window 13 has the remap
+ * capability, like windows 0 to 7. However, the mvebu-mbus driver
+ * isn't currently taking into account this special case, which means
+ * that when window 13 is actually used, the remap registers are left
+ * to 0, making the device using this MBus window unavailable. The
+ * quick fix for stable is to not use window 13. A follow up patch
+ * will correctly handle this window.
+*/
 static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
                                     const int win)
 {
        void __iomem *addr = mbus->mbuswins_base +
                mbus->soc->win_cfg_offset(win);
        u32 ctrl = readl(addr + WIN_CTRL_OFF);
+
+       if (win == 13)
+               return false;
+
        return !(ctrl & WIN_CTRL_ENABLE);
 }
 
index b7960185919d08f23991b4aaa207eb146fd136f9..3dfa3e5e3705e7ababc17fdc7de4ce695bd83da8 100644 (file)
@@ -94,8 +94,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
        __raw_writel(value, reg_base + offset);
 
        if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
-               stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
-               switch (offset & EXYNOS4_MCT_L_MASK) {
+               stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
+               switch (offset & ~EXYNOS4_MCT_L_MASK) {
                case MCT_L_TCON_OFFSET:
                        mask = 1 << 3;          /* L_TCON write status */
                        break;
index 6c4c000671c50d88885bb39618c5ae9e4ceabe42..01783e46f25afbe96c943a37fb8705f7d65ba811 100644 (file)
@@ -927,28 +927,14 @@ static int nx842_OF_upd(struct property *new_prop)
                goto error_out;
        }
 
-       /* Set ptr to new property if provided */
-       if (new_prop) {
-               /* Single property */
-               if (!strncmp(new_prop->name, "status", new_prop->length)) {
-                       status = new_prop;
-
-               } else if (!strncmp(new_prop->name, "ibm,max-sg-len",
-                                       new_prop->length)) {
-                       maxsglen = new_prop;
-
-               } else if (!strncmp(new_prop->name, "ibm,max-sync-cop",
-                                       new_prop->length)) {
-                       maxsyncop = new_prop;
-
-               } else {
-                       /*
-                        * Skip the update, the property being updated
-                        * has no impact.
-                        */
-                       goto out;
-               }
-       }
+       /*
+        * If this is a property update, there are only certain properties that
+        * we care about. Bail if it isn't in the below list
+        */
+       if (new_prop && (strncmp(new_prop->name, "status", new_prop->length) ||
+                        strncmp(new_prop->name, "ibm,max-sg-len", new_prop->length) ||
+                        strncmp(new_prop->name, "ibm,max-sync-cop", new_prop->length)))
+               goto out;
 
        /* Perform property updates */
        ret = nx842_OF_upd_status(new_devdata, status);
@@ -1014,9 +1000,9 @@ error_out:
  *             notifier_to_errno() to decode this value
  */
 static int nx842_OF_notifier(struct notifier_block *np, unsigned long action,
-                            void *update)
+                            void *data)
 {
-       struct of_prop_reconfig *upd = update;
+       struct of_reconfig_data *upd = data;
        struct nx842_devdata *local_devdata;
        struct device_node *node = NULL;
 
index 633ba945e153da2a61f88f4580afab6caaff2c73..c178ed8c3908d3a92e55432aecb86e0e6e1501ee 100644 (file)
@@ -563,4 +563,4 @@ MODULE_DESCRIPTION("VIA PadLock AES algorithm support");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michal Ludvig");
 
-MODULE_ALIAS("aes");
+MODULE_ALIAS_CRYPTO("aes");
index 9266c0e2549297cfd403b85556ac86ae728f6f77..93d7753ab38a3563d9beaa19e7cf54fb85bcc8cc 100644 (file)
@@ -593,7 +593,7 @@ MODULE_DESCRIPTION("VIA PadLock SHA1/SHA256 algorithms support.");
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Michal Ludvig");
 
-MODULE_ALIAS("sha1-all");
-MODULE_ALIAS("sha256-all");
-MODULE_ALIAS("sha1-padlock");
-MODULE_ALIAS("sha256-padlock");
+MODULE_ALIAS_CRYPTO("sha1-all");
+MODULE_ALIAS_CRYPTO("sha256-all");
+MODULE_ALIAS_CRYPTO("sha1-padlock");
+MODULE_ALIAS_CRYPTO("sha256-padlock");
index 3833bd71cc5df4527f44a8ba7a369182060f93a9..e08275de37efbcd8a5d6489914dcc3adf765c16f 100644 (file)
@@ -1775,7 +1775,7 @@ module_exit(ux500_cryp_mod_fini);
 module_param(cryp_mode, int, 0);
 
 MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 CRYP crypto engine.");
-MODULE_ALIAS("aes-all");
-MODULE_ALIAS("des-all");
+MODULE_ALIAS_CRYPTO("aes-all");
+MODULE_ALIAS_CRYPTO("des-all");
 
 MODULE_LICENSE("GPL");
index cf55089675398145e4481d810076553465b6031f..6789c165391326a646c167af8df6e725d526b99c 100644 (file)
@@ -1998,7 +1998,7 @@ module_exit(ux500_hash_mod_fini);
 MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
 MODULE_LICENSE("GPL");
 
-MODULE_ALIAS("sha1-all");
-MODULE_ALIAS("sha256-all");
-MODULE_ALIAS("hmac-sha1-all");
-MODULE_ALIAS("hmac-sha256-all");
+MODULE_ALIAS_CRYPTO("sha1-all");
+MODULE_ALIAS_CRYPTO("sha256-all");
+MODULE_ALIAS_CRYPTO("hmac-sha1-all");
+MODULE_ALIAS_CRYPTO("hmac-sha256-all");
index bf8dd3d5bee785b1c8fd7a36d3ad08e6061beff1..fb3a2c1fbd34b34aa939b5d246bf6eac97784f95 100644 (file)
@@ -36,6 +36,12 @@ struct pstore_read_data {
        char **buf;
 };
 
+static inline u64 generic_id(unsigned long timestamp,
+                            unsigned int part, int count)
+{
+       return (timestamp * 100 + part) * 1000 + count;
+}
+
 static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
 {
        efi_guid_t vendor = LINUX_EFI_CRASH_GUID;
@@ -54,7 +60,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
 
        if (sscanf(name, "dump-type%u-%u-%d-%lu",
                   cb_data->type, &part, &cnt, &time) == 4) {
-               *cb_data->id = part;
+               *cb_data->id = generic_id(time, part, cnt);
                *cb_data->count = cnt;
                cb_data->timespec->tv_sec = time;
                cb_data->timespec->tv_nsec = 0;
@@ -65,7 +71,7 @@ static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
                 * which doesn't support holding
                 * multiple logs, remains.
                 */
-               *cb_data->id = part;
+               *cb_data->id = generic_id(time, part, 0);
                *cb_data->count = 0;
                cb_data->timespec->tv_sec = time;
                cb_data->timespec->tv_nsec = 0;
@@ -305,14 +311,16 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
        char name[DUMP_NAME_LEN];
        efi_char16_t efi_name[DUMP_NAME_LEN];
        int found, i;
+       unsigned int part;
 
-       sprintf(name, "dump-type%u-%u-%d-%lu", type, (unsigned int)id, count,
-               time.tv_sec);
+       do_div(id, 1000);
+       part = do_div(id, 100);
+       sprintf(name, "dump-type%u-%u-%d-%lu", type, part, count, time.tv_sec);
 
        for (i = 0; i < DUMP_NAME_LEN; i++)
                efi_name[i] = name[i];
 
-       edata.id = id;
+       edata.id = part;
        edata.type = type;
        edata.count = count;
        edata.time = time;
index 665f9530c950f96fd4cf54fb70c03edffbcc2ef2..74769724c94a4b6cdf3a83a8f4dcd3f364a4ff29 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 #include <linux/device.h>
+#include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/io.h>
@@ -42,8 +43,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
                return false;
 
        ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
-       if (ret < 0)
-               return false;
+       if (ret < 0) {
+               /* We've found the gpio chip, but the translation failed.
+                * Return true to stop looking and return the translation
+                * error via out_gpio
+                */
+               gg_data->out_gpio = ret;
+               return true;
+        }
 
        gg_data->out_gpio = ret + gc->base;
        return true;
index a35c5b932eba6c21606e1d8c378f1d0a6fa0a53c..060f24c7c6bad39ac8c9a0d1e75a6022db856402 100644 (file)
@@ -362,7 +362,7 @@ static ssize_t gpio_value_store(struct device *dev,
        return status;
 }
 
-static const DEVICE_ATTR(value, 0644,
+static DEVICE_ATTR(value, 0644,
                gpio_value_show, gpio_value_store);
 
 static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
@@ -580,17 +580,17 @@ static ssize_t gpio_active_low_store(struct device *dev,
        return status ? : size;
 }
 
-static const DEVICE_ATTR(active_low, 0644,
+static DEVICE_ATTR(active_low, 0644,
                gpio_active_low_show, gpio_active_low_store);
 
-static const struct attribute *gpio_attrs[] = {
+static struct attribute *gpio_attrs[] = {
        &dev_attr_value.attr,
        &dev_attr_active_low.attr,
        NULL,
 };
 
 static const struct attribute_group gpio_attr_group = {
-       .attrs = (struct attribute **) gpio_attrs,
+       .attrs = gpio_attrs,
 };
 
 /*
@@ -627,7 +627,7 @@ static ssize_t chip_ngpio_show(struct device *dev,
 }
 static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
 
-static const struct attribute *gpiochip_attrs[] = {
+static struct attribute *gpiochip_attrs[] = {
        &dev_attr_base.attr,
        &dev_attr_label.attr,
        &dev_attr_ngpio.attr,
@@ -635,7 +635,7 @@ static const struct attribute *gpiochip_attrs[] = {
 };
 
 static const struct attribute_group gpiochip_attr_group = {
-       .attrs = (struct attribute **) gpiochip_attrs,
+       .attrs = gpiochip_attrs,
 };
 
 /*
@@ -806,20 +806,24 @@ static int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
        if (direction_may_change) {
                status = device_create_file(dev, &dev_attr_direction);
                if (status)
-                       goto fail_unregister_device;
+                       goto fail_remove_attr_group;
        }
 
        if (gpiod_to_irq(desc) >= 0 && (direction_may_change ||
                                       !test_bit(FLAG_IS_OUT, &desc->flags))) {
                status = device_create_file(dev, &dev_attr_edge);
                if (status)
-                       goto fail_unregister_device;
+                       goto fail_remove_attr_direction;
        }
 
        set_bit(FLAG_EXPORT, &desc->flags);
        mutex_unlock(&sysfs_lock);
        return 0;
 
+fail_remove_attr_direction:
+       device_remove_file(dev, &dev_attr_direction);
+fail_remove_attr_group:
+       sysfs_remove_group(&dev->kobj, &gpio_attr_group);
 fail_unregister_device:
        device_unregister(dev);
 fail_unlock:
@@ -971,6 +975,9 @@ static void gpiod_unexport(struct gpio_desc *desc)
        mutex_unlock(&sysfs_lock);
 
        if (dev) {
+               device_remove_file(dev, &dev_attr_edge);
+               device_remove_file(dev, &dev_attr_direction);
+               sysfs_remove_group(&dev->kobj, &gpio_attr_group);
                device_unregister(dev);
                put_device(dev);
        }
@@ -1036,6 +1043,7 @@ static void gpiochip_unexport(struct gpio_chip *chip)
        mutex_lock(&sysfs_lock);
        dev = class_find_device(&gpio_class, NULL, chip, match_export);
        if (dev) {
+               sysfs_remove_group(&dev->kobj, &gpiochip_attr_group);
                put_device(dev);
                device_unregister(dev);
                chip->exported = 0;
index 0a30088178b05c9452d0d1bc63184eb7d8cf51d1..0b71a0aaf4fceea92136e5aac9c700b0f30f7f52 100644 (file)
@@ -4449,7 +4449,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
        if (!mutex_is_locked(mutex))
                return false;
 
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
        return mutex->owner == task;
 #else
        /* Since UP may be pre-empted, we cannot assume that we own the lock */
index 2d90f96c19d021337813bed6b0c5435939d23e7f..7695b5dd9d2d3d95106d2658b175434e691db0d3 100644 (file)
 #define   PIPE_CONTROL_GLOBAL_GTT_IVB                  (1<<24) /* gen7+ */
 #define   PIPE_CONTROL_CS_STALL                                (1<<20)
 #define   PIPE_CONTROL_TLB_INVALIDATE                  (1<<18)
+#define   PIPE_CONTROL_MEDIA_STATE_CLEAR               (1<<16)
 #define   PIPE_CONTROL_QW_WRITE                                (1<<14)
 #define   PIPE_CONTROL_DEPTH_STALL                     (1<<13)
 #define   PIPE_CONTROL_WRITE_FLUSH                     (1<<12)
index 4605c3877c955cc7374583df3b37784e1cd34056..ef4cde15c15c5153effbdf6d5cf5aded1ac808d5 100644 (file)
@@ -314,12 +314,15 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,
                flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+               flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
                /*
                 * TLB invalidate requires a post-sync write.
                 */
                flags |= PIPE_CONTROL_QW_WRITE;
                flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 
+               flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
+
                /* Workaround: we must issue a pipe_control with CS-stall bit
                 * set before a pipe_control command that has the state cache
                 * invalidate bit set. */
index 4c05f2b015cfe0013ffb7a4cf1b7db33934042d0..d4a5118911fdbd03aac349621022b27c0d9e138a 100644 (file)
@@ -574,6 +574,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
        struct radeon_connector_atom_dig *dig_connector;
        int dp_clock;
 
+       if ((mode->clock > 340000) &&
+           (!radeon_connector_is_dp12_capable(connector)))
+               return MODE_CLOCK_HIGH;
+
        if (!radeon_connector->con_priv)
                return MODE_CLOCK_HIGH;
        dig_connector = radeon_connector->con_priv;
index 6c0ce8915fac9efc399654a8b25e4bd180285d0a..4a14e113369d6f3de9574e34a7f9aef164544992 100644 (file)
@@ -189,7 +189,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
        rbo = container_of(bo, struct radeon_bo, tbo);
        switch (bo->mem.mem_type) {
        case TTM_PL_VRAM:
-               if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
+               if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
                else
                        radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
index c62d20e8a6f169cce1173c849b42f160c55a4f39..ee742f14ddc270154bf17a44b247dcf84e5ea14d 100644 (file)
@@ -1049,6 +1049,8 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
        if (ret != 0)
                goto out_no_queue;
 
+       return 0;
+
 out_no_queue:
        event->base.destroy(&event->base);
 out_no_event:
@@ -1123,17 +1125,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
 
        BUG_ON(fence == NULL);
 
-       if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME)
-               ret = vmw_event_fence_action_create(file_priv, fence,
-                                                   arg->flags,
-                                                   arg->user_data,
-                                                   true);
-       else
-               ret = vmw_event_fence_action_create(file_priv, fence,
-                                                   arg->flags,
-                                                   arg->user_data,
-                                                   true);
-
+       ret = vmw_event_fence_action_create(file_priv, fence,
+                                           arg->flags,
+                                           arg->user_data,
+                                           true);
        if (unlikely(ret != 0)) {
                if (ret != -ERESTARTSYS)
                        DRM_ERROR("Failed to attach event to fence.\n");
index c604ce8661b50899a37ddb9e4641fb4de668f9fd..c1f8ccd40b15dbf283772c062f58f3bf9c4bf5b0 100755 (executable)
    SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and
    Jean Delvare <khali@linux-fr.org>
    Mux support by Rodolfo Giometti <giometti@enneenne.com> and
-   Michael Lawnick <michael.lawnick.ext@nsn.com> */
+   Michael Lawnick <michael.lawnick.ext@nsn.com>
+   OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
+   (based on a previous patch from Jon Smirl <jonsmirl@gmail.com>) and
+   (c) 2013  Wolfram Sang <wsa@the-dreams.de>
+ */
 
 #include <linux/module.h>
 #include <linux/kernel.h>
@@ -35,7 +39,9 @@
 #include <linux/init.h>
 #include <linux/idr.h>
 #include <linux/mutex.h>
+#include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_irq.h>
 #include <linux/completion.h>
 #include <linux/hardirq.h>
 #include <linux/irqflags.h>
@@ -43,6 +49,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/acpi.h>
 #include <asm/uaccess.h>
+#include <linux/err.h>
 
 #include "i2c-core.h"
 
@@ -720,6 +727,10 @@ EXPORT_SYMBOL_GPL(i2c_new_device);
  */
 void i2c_unregister_device(struct i2c_client *client)
 {
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+       if (client->dev.of_node)
+               of_node_clear_flag(client->dev.of_node, OF_POPULATED);
+#endif
        device_unregister(&client->dev);
 }
 EXPORT_SYMBOL_GPL(i2c_unregister_device);
@@ -977,6 +988,113 @@ static void i2c_scan_static_board_info(struct i2c_adapter *adapter)
        up_read(&__i2c_board_lock);
 }
 
+/* OF support code */
+
+#if IS_ENABLED(CONFIG_OF)
+static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
+                                                struct device_node *node)
+{
+       struct i2c_client *result;
+       struct i2c_board_info info = {};
+       struct dev_archdata dev_ad = {};
+       const __be32 *addr;
+       int len;
+
+       dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name);
+
+       if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
+               dev_err(&adap->dev, "of_i2c: modalias failure on %s\n",
+                       node->full_name);
+               return ERR_PTR(-EINVAL);
+       }
+
+       addr = of_get_property(node, "reg", &len);
+       if (!addr || (len < sizeof(int))) {
+               dev_err(&adap->dev, "of_i2c: invalid reg on %s\n",
+                       node->full_name);
+               return ERR_PTR(-EINVAL);
+       }
+
+       info.addr = be32_to_cpup(addr);
+       if (info.addr > (1 << 10) - 1) {
+               dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
+                       info.addr, node->full_name);
+               return ERR_PTR(-EINVAL);
+       }
+
+       info.irq = irq_of_parse_and_map(node, 0);
+       info.of_node = of_node_get(node);
+       info.archdata = &dev_ad;
+
+       if (of_get_property(node, "wakeup-source", NULL))
+               info.flags |= I2C_CLIENT_WAKE;
+
+       request_module("%s%s", I2C_MODULE_PREFIX, info.type);
+
+       result = i2c_new_device(adap, &info);
+       if (result == NULL) {
+               dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
+                       node->full_name);
+               of_node_put(node);
+               irq_dispose_mapping(info.irq);
+               return ERR_PTR(-EINVAL);
+       }
+       return result;
+}
+
+static void internal_of_i2c_register_devices(struct i2c_adapter *adap)
+{
+       struct device_node *node;
+
+       /* Only register child devices if the adapter has a node pointer set */
+       if (!adap->dev.of_node)
+               return;
+
+       dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
+
+       for_each_available_child_of_node(adap->dev.of_node, node) {
+               if (of_node_test_and_set_flag(node, OF_POPULATED))
+                       continue;
+               of_i2c_register_device(adap, node);
+       }
+}
+
+static int of_dev_node_match(struct device *dev, void *data)
+{
+       return dev->of_node == data;
+}
+
+/* must call put_device() when done with returned i2c_client device */
+struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+       struct device *dev;
+
+       dev = bus_find_device(&i2c_bus_type, NULL, node,
+                                        of_dev_node_match);
+       if (!dev)
+               return NULL;
+
+       return i2c_verify_client(dev);
+}
+EXPORT_SYMBOL(of_find_i2c_device_by_node);
+
+/* must call put_device() when done with returned i2c_adapter device */
+struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+       struct device *dev;
+
+       dev = bus_find_device(&i2c_bus_type, NULL, node,
+                                        of_dev_node_match);
+       if (!dev)
+               return NULL;
+
+       return i2c_verify_adapter(dev);
+}
+EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
+#else
+static void internal_of_i2c_register_devices(struct i2c_adapter *adap) { }
+#endif /* CONFIG_OF */
+
 static int i2c_do_add_adapter(struct i2c_driver *driver,
                              struct i2c_adapter *adap)
 {
@@ -1081,6 +1199,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
 
 exit_recovery:
        /* create pre-declared device nodes */
+       internal_of_i2c_register_devices(adap);
+
        if (adap->nr < __i2c_first_dynamic_bus_num)
                i2c_scan_static_board_info(adap);
 
@@ -1305,7 +1425,6 @@ void i2c_del_adapter(struct i2c_adapter *adap)
 }
 EXPORT_SYMBOL(i2c_del_adapter);
 
-
 /* ------------------------------------------------------------------------- */
 
 int i2c_for_each_dev(void *data, int (*fn)(struct device *, void *))
@@ -1476,6 +1595,61 @@ void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg)
 }
 EXPORT_SYMBOL(i2c_clients_command);
 
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+static int of_i2c_notify(struct notifier_block *nb, unsigned long action,
+                        void *arg)
+{
+       struct of_reconfig_data *rd = arg;
+       struct i2c_adapter *adap;
+       struct i2c_client *client;
+
+       switch (of_reconfig_get_state_change(action, rd)) {
+       case OF_RECONFIG_CHANGE_ADD:
+               adap = of_find_i2c_adapter_by_node(rd->dn->parent);
+               if (adap == NULL)
+                       return NOTIFY_OK;       /* not for us */
+
+               if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) {
+                       put_device(&adap->dev);
+                       return NOTIFY_OK;
+               }
+
+               client = of_i2c_register_device(adap, rd->dn);
+               put_device(&adap->dev);
+
+               if (IS_ERR(client)) {
+                       pr_err("%s: failed to create for '%s'\n",
+                                       __func__, rd->dn->full_name);
+                       return notifier_from_errno(PTR_ERR(client));
+               }
+               break;
+       case OF_RECONFIG_CHANGE_REMOVE:
+               /* already depopulated? */
+               if (!of_node_check_flag(rd->dn, OF_POPULATED))
+                       return NOTIFY_OK;
+
+               /* find our device by node */
+               client = of_find_i2c_device_by_node(rd->dn);
+               if (client == NULL)
+                       return NOTIFY_OK;       /* no? not meant for us */
+
+               /* unregister takes one ref away */
+               i2c_unregister_device(client);
+
+               /* and put the reference of the find */
+               put_device(&client->dev);
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+static struct notifier_block i2c_of_notifier = {
+       .notifier_call = of_i2c_notify,
+};
+#else
+extern struct notifier_block i2c_of_notifier;
+#endif /* CONFIG_OF_DYNAMIC */
+
 static int __init i2c_init(void)
 {
        int retval;
@@ -1493,6 +1667,10 @@ static int __init i2c_init(void)
        retval = i2c_add_driver(&dummy_driver);
        if (retval)
                goto class_err;
+
+       if (IS_ENABLED(CONFIG_OF_DYNAMIC))
+               WARN_ON(of_reconfig_notifier_register(&i2c_of_notifier));
+
        return 0;
 
 class_err:
@@ -1506,6 +1684,8 @@ bus_err:
 
 static void __exit i2c_exit(void)
 {
+       if (IS_ENABLED(CONFIG_OF_DYNAMIC))
+               WARN_ON(of_reconfig_notifier_unregister(&i2c_of_notifier));
        i2c_del_driver(&dummy_driver);
 #ifdef CONFIG_I2C_COMPAT
        class_compat_unregister(i2c_adapter_compat_class);
index 7409ebb33c47df7166087f937d0d5eb91d9d70c9..797e3117bef7437ef2d6f734431a16e43acaed34 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/i2c.h>
 #include <linux/i2c-mux.h>
 #include <linux/of.h>
-#include <linux/of_i2c.h>
 
 /* multiplexer per channel data */
 struct i2c_mux_priv {
@@ -185,8 +184,6 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
        dev_info(&parent->dev, "Added multiplexed i2c bus %d\n",
                 i2c_adapter_id(&priv->adap));
 
-       of_i2c_register_devices(&priv->adap);
-
        return &priv->adap;
 }
 EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
index 5d4a4583d2df8b42b5f5fb9ab52c420441c28f31..8019e642d2f51d919619ef9818a534574c4c5744 100644 (file)
@@ -40,8 +40,15 @@ static DEFINE_MUTEX(device_list_mutex);
 static LIST_HEAD(device_list);
 static struct workqueue_struct *isert_rx_wq;
 static struct workqueue_struct *isert_comp_wq;
+static struct workqueue_struct *isert_release_wq;
 static struct kmem_cache *isert_cmd_cache;
 
+static int
+isert_rdma_post_recvl(struct isert_conn *isert_conn);
+static int
+isert_rdma_accept(struct isert_conn *isert_conn);
+struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+
 static void
 isert_qp_event_callback(struct ib_event *e, void *context)
 {
@@ -107,9 +114,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
        attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
        /*
         * FIXME: Use devattr.max_sge - 2 for max_send_sge as
-        * work-around for RDMA_READ..
+        * work-around for RDMA_READs with ConnectX-2.
+        *
+        * Also, still make sure to have at least two SGEs for
+        * outgoing control PDU responses.
         */
-       attr.cap.max_send_sge = devattr.max_sge - 2;
+       attr.cap.max_send_sge = max(2, devattr.max_sge - 2);
        isert_conn->max_sge = attr.cap.max_send_sge;
 
        attr.cap.max_recv_sge = 1;
@@ -124,12 +134,18 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
        ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
        if (ret) {
                pr_err("rdma_create_qp failed for cma_id %d\n", ret);
-               return ret;
+               goto err;
        }
        isert_conn->conn_qp = cma_id->qp;
        pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
 
        return 0;
+err:
+       mutex_lock(&device_list_mutex);
+       device->cq_active_qps[min_index]--;
+       mutex_unlock(&device_list_mutex);
+
+       return ret;
 }
 
 static void
@@ -212,6 +228,13 @@ isert_create_device_ib_res(struct isert_device *device)
        struct ib_device *ib_dev = device->ib_device;
        struct isert_cq_desc *cq_desc;
        int ret = 0, i, j;
+       int max_rx_cqe, max_tx_cqe;
+       struct ib_device_attr dev_attr;
+
+       memset(&dev_attr, 0, sizeof(struct ib_device_attr));
+       ret = isert_query_device(device->ib_device, &dev_attr);
+       if (ret)
+               return ret;
 
        device->cqs_used = min_t(int, num_online_cpus(),
                                 device->ib_device->num_comp_vectors);
@@ -234,6 +257,9 @@ isert_create_device_ib_res(struct isert_device *device)
                goto out_cq_desc;
        }
 
+       max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr.max_cqe);
+       max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr.max_cqe);
+
        for (i = 0; i < device->cqs_used; i++) {
                cq_desc[i].device = device;
                cq_desc[i].cq_index = i;
@@ -242,7 +268,7 @@ isert_create_device_ib_res(struct isert_device *device)
                                                isert_cq_rx_callback,
                                                isert_cq_event_callback,
                                                (void *)&cq_desc[i],
-                                               ISER_MAX_RX_CQ_LEN, i);
+                                               max_rx_cqe, i);
                if (IS_ERR(device->dev_rx_cq[i])) {
                        ret = PTR_ERR(device->dev_rx_cq[i]);
                        device->dev_rx_cq[i] = NULL;
@@ -253,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device)
                                                isert_cq_tx_callback,
                                                isert_cq_event_callback,
                                                (void *)&cq_desc[i],
-                                               ISER_MAX_TX_CQ_LEN, i);
+                                               max_tx_cqe, i);
                if (IS_ERR(device->dev_tx_cq[i])) {
                        ret = PTR_ERR(device->dev_tx_cq[i]);
                        device->dev_tx_cq[i] = NULL;
@@ -375,8 +401,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
 static int
 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 {
-       struct iscsi_np *np = cma_id->context;
-       struct isert_np *isert_np = np->np_context;
+       struct isert_np *isert_np = cma_id->context;
+       struct iscsi_np *np = isert_np->np;
        struct isert_conn *isert_conn;
        struct isert_device *device;
        struct ib_device *ib_dev = cma_id->device;
@@ -401,12 +427,12 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        isert_conn->state = ISER_CONN_INIT;
        INIT_LIST_HEAD(&isert_conn->conn_accept_node);
        init_completion(&isert_conn->conn_login_comp);
+       init_completion(&isert_conn->login_req_comp);
        init_completion(&isert_conn->conn_wait);
        init_completion(&isert_conn->conn_wait_comp_err);
        kref_init(&isert_conn->conn_kref);
        mutex_init(&isert_conn->conn_mutex);
 
-       cma_id->context = isert_conn;
        isert_conn->conn_cm_id = cma_id;
        isert_conn->responder_resources = event->param.conn.responder_resources;
        isert_conn->initiator_depth = event->param.conn.initiator_depth;
@@ -466,6 +492,14 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        if (ret)
                goto out_conn_dev;
 
+       ret = isert_rdma_post_recvl(isert_conn);
+       if (ret)
+               goto out_conn_dev;
+
+       ret = isert_rdma_accept(isert_conn);
+       if (ret)
+               goto out_conn_dev;
+
        mutex_lock(&isert_np->np_accept_mutex);
        list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
        mutex_unlock(&isert_np->np_accept_mutex);
@@ -486,6 +520,7 @@ out_login_buf:
        kfree(isert_conn->login_buf);
 out:
        kfree(isert_conn);
+       rdma_reject(cma_id, NULL, 0);
        return ret;
 }
 
@@ -498,18 +533,20 @@ isert_connect_release(struct isert_conn *isert_conn)
 
        pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
 
+       isert_free_rx_descriptors(isert_conn);
+       rdma_destroy_id(isert_conn->conn_cm_id);
+
        if (isert_conn->conn_qp) {
                cq_index = ((struct isert_cq_desc *)
                        isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
                pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
+               mutex_lock(&device_list_mutex);
                isert_conn->conn_device->cq_active_qps[cq_index]--;
+               mutex_unlock(&device_list_mutex);
 
-               rdma_destroy_qp(isert_conn->conn_cm_id);
+               ib_destroy_qp(isert_conn->conn_qp);
        }
 
-       isert_free_rx_descriptors(isert_conn);
-       rdma_destroy_id(isert_conn->conn_cm_id);
-
        if (isert_conn->login_buf) {
                ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
                                    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
@@ -529,9 +566,19 @@ isert_connect_release(struct isert_conn *isert_conn)
 static void
 isert_connected_handler(struct rdma_cm_id *cma_id)
 {
-       struct isert_conn *isert_conn = cma_id->context;
+       struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
+       pr_info("conn %p\n", isert_conn);
 
-       kref_get(&isert_conn->conn_kref);
+       if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
+               pr_warn("conn %p connect_release is running\n", isert_conn);
+               return;
+       }
+
+       mutex_lock(&isert_conn->conn_mutex);
+       if (isert_conn->state != ISER_CONN_FULL_FEATURE)
+               isert_conn->state = ISER_CONN_UP;
+       mutex_unlock(&isert_conn->conn_mutex);
 }
 
 static void
@@ -552,65 +599,108 @@ isert_put_conn(struct isert_conn *isert_conn)
        kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
 }
 
+/**
+ * isert_conn_terminate() - Initiate connection termination
+ * @isert_conn: isert connection struct
+ *
+ * Notes:
+ * In case the connection state is FULL_FEATURE, move state
+ * to TEMINATING and start teardown sequence (rdma_disconnect).
+ * In case the connection state is UP, complete flush as well.
+ *
+ * This routine must be called with conn_mutex held. Thus it is
+ * safe to call multiple times.
+ */
 static void
-isert_disconnect_work(struct work_struct *work)
+isert_conn_terminate(struct isert_conn *isert_conn)
 {
-       struct isert_conn *isert_conn = container_of(work,
-                               struct isert_conn, conn_logout_work);
+       int err;
 
-       pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
-       mutex_lock(&isert_conn->conn_mutex);
-       if (isert_conn->state == ISER_CONN_UP)
+       switch (isert_conn->state) {
+       case ISER_CONN_TERMINATING:
+               break;
+       case ISER_CONN_UP:
+               /*
+                * No flush completions will occur as we didn't
+                * get to ISER_CONN_FULL_FEATURE yet, complete
+                * to allow teardown progress.
+                */
+               complete(&isert_conn->conn_wait_comp_err);
+       case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+               pr_info("Terminating conn %p state %d\n",
+                          isert_conn, isert_conn->state);
                isert_conn->state = ISER_CONN_TERMINATING;
-
-       if (isert_conn->post_recv_buf_count == 0 &&
-           atomic_read(&isert_conn->post_send_buf_count) == 0) {
-               mutex_unlock(&isert_conn->conn_mutex);
-               goto wake_up;
-       }
-       if (!isert_conn->conn_cm_id) {
-               mutex_unlock(&isert_conn->conn_mutex);
-               isert_put_conn(isert_conn);
-               return;
+               err = rdma_disconnect(isert_conn->conn_cm_id);
+               if (err)
+                       pr_warn("Failed rdma_disconnect isert_conn %p\n",
+                                  isert_conn);
+               break;
+       default:
+               pr_warn("conn %p teminating in state %d\n",
+                          isert_conn, isert_conn->state);
        }
+}
 
-       if (isert_conn->disconnect) {
-               /* Send DREQ/DREP towards our initiator */
-               rdma_disconnect(isert_conn->conn_cm_id);
-       }
+static int
+isert_np_cma_handler(struct isert_np *isert_np,
+                    enum rdma_cm_event_type event)
+{
+       pr_debug("isert np %p, handling event %d\n", isert_np, event);
 
-       mutex_unlock(&isert_conn->conn_mutex);
+       switch (event) {
+       case RDMA_CM_EVENT_DEVICE_REMOVAL:
+               isert_np->np_cm_id = NULL;
+               break;
+       case RDMA_CM_EVENT_ADDR_CHANGE:
+               isert_np->np_cm_id = isert_setup_id(isert_np);
+               if (IS_ERR(isert_np->np_cm_id)) {
+                       pr_err("isert np %p setup id failed: %ld\n",
+                                isert_np, PTR_ERR(isert_np->np_cm_id));
+                       isert_np->np_cm_id = NULL;
+               }
+               break;
+       default:
+               pr_err("isert np %p Unexpected event %d\n",
+                         isert_np, event);
+       }
 
-wake_up:
-       complete(&isert_conn->conn_wait);
+       return -1;
 }
 
 static int
-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
+isert_disconnected_handler(struct rdma_cm_id *cma_id,
+                          enum rdma_cm_event_type event)
 {
+       struct isert_np *isert_np = cma_id->context;
        struct isert_conn *isert_conn;
 
-       if (!cma_id->qp) {
-               struct isert_np *isert_np = cma_id->context;
+       if (isert_np->np_cm_id == cma_id)
+               return isert_np_cma_handler(cma_id->context, event);
 
-               isert_np->np_cm_id = NULL;
-               return -1;
-       }
+       isert_conn = cma_id->qp->qp_context;
 
-       isert_conn = (struct isert_conn *)cma_id->context;
+       mutex_lock(&isert_conn->conn_mutex);
+       isert_conn_terminate(isert_conn);
+       mutex_unlock(&isert_conn->conn_mutex);
 
-       isert_conn->disconnect = disconnect;
-       INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
-       schedule_work(&isert_conn->conn_logout_work);
+       pr_info("conn %p completing conn_wait\n", isert_conn);
+       complete(&isert_conn->conn_wait);
 
        return 0;
 }
 
+static void
+isert_connect_error(struct rdma_cm_id *cma_id)
+{
+       struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
+       isert_put_conn(isert_conn);
+}
+
 static int
 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 {
        int ret = 0;
-       bool disconnect = false;
 
        pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
                 event->event, event->status, cma_id->context, cma_id);
@@ -628,11 +718,14 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
        case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
        case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
-               disconnect = true;
        case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
-               ret = isert_disconnected_handler(cma_id, disconnect);
+               ret = isert_disconnected_handler(cma_id, event->event);
                break;
+       case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
+       case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
        case RDMA_CM_EVENT_CONNECT_ERROR:
+               isert_connect_error(cma_id);
+               break;
        default:
                pr_err("Unhandled RDMA CMA event: %d\n", event->event);
                break;
@@ -834,7 +927,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
                        if (ret)
                                return ret;
 
-                       isert_conn->state = ISER_CONN_UP;
+                       /* Now we are in FULL_FEATURE phase */
+                       mutex_lock(&isert_conn->conn_mutex);
+                       isert_conn->state = ISER_CONN_FULL_FEATURE;
+                       mutex_unlock(&isert_conn->conn_mutex);
                        goto post_send;
                }
 
@@ -851,18 +947,17 @@ post_send:
 }
 
 static void
-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
-                  struct isert_conn *isert_conn)
+isert_rx_login_req(struct isert_conn *isert_conn)
 {
+       struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
+       int rx_buflen = isert_conn->login_req_len;
        struct iscsi_conn *conn = isert_conn->conn;
        struct iscsi_login *login = conn->conn_login;
        int size;
 
-       if (!login) {
-               pr_err("conn->conn_login is NULL\n");
-               dump_stack();
-               return;
-       }
+       pr_info("conn %p\n", isert_conn);
+
+       WARN_ON_ONCE(!login);
 
        if (login->first_request) {
                struct iscsi_login_req *login_req =
@@ -892,7 +987,8 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
                 size, rx_buflen, MAX_KEY_VALUE_PAIRS);
        memcpy(login->req_buf, &rx_desc->data[0], size);
 
-       complete(&isert_conn->conn_login_comp);
+       if (login->first_request)
+               complete(&isert_conn->conn_login_comp);
 }
 
 static void
@@ -1169,11 +1265,20 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
                 hdr->opcode, hdr->itt, hdr->flags,
                 (int)(xfer_len - ISER_HEADERS_LEN));
 
-       if ((char *)desc == isert_conn->login_req_buf)
-               isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
-                                  isert_conn);
-       else
+       if ((char *)desc == isert_conn->login_req_buf) {
+               isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
+               if (isert_conn->conn) {
+                       struct iscsi_login *login = isert_conn->conn->conn_login;
+
+                       if (login && !login->first_request)
+                               isert_rx_login_req(isert_conn);
+               }
+               mutex_lock(&isert_conn->conn_mutex);
+               complete(&isert_conn->login_req_comp);
+               mutex_unlock(&isert_conn->conn_mutex);
+       } else {
                isert_rx_do_work(desc, isert_conn);
+       }
 
        ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
                                      DMA_FROM_DEVICE);
@@ -1483,7 +1588,7 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
                msleep(3000);
 
        mutex_lock(&isert_conn->conn_mutex);
-       isert_conn->state = ISER_CONN_DOWN;
+       isert_conn_terminate(isert_conn);
        mutex_unlock(&isert_conn->conn_mutex);
 
        iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
@@ -2044,13 +2149,51 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
        return ret;
 }
 
+struct rdma_cm_id *
+isert_setup_id(struct isert_np *isert_np)
+{
+       struct iscsi_np *np = isert_np->np;
+       struct rdma_cm_id *id;
+       struct sockaddr *sa;
+       int ret;
+
+       sa = (struct sockaddr *)&np->np_sockaddr;
+       pr_debug("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
+
+       id = rdma_create_id(isert_cma_handler, isert_np,
+                           RDMA_PS_TCP, IB_QPT_RC);
+       if (IS_ERR(id)) {
+               pr_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
+               ret = PTR_ERR(id);
+               goto out;
+       }
+       pr_debug("id %p context %p\n", id, id->context);
+
+       ret = rdma_bind_addr(id, sa);
+       if (ret) {
+               pr_err("rdma_bind_addr() failed: %d\n", ret);
+               goto out_id;
+       }
+
+       ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
+       if (ret) {
+               pr_err("rdma_listen() failed: %d\n", ret);
+               goto out_id;
+       }
+
+       return id;
+out_id:
+       rdma_destroy_id(id);
+out:
+       return ERR_PTR(ret);
+}
+
 static int
 isert_setup_np(struct iscsi_np *np,
               struct __kernel_sockaddr_storage *ksockaddr)
 {
        struct isert_np *isert_np;
        struct rdma_cm_id *isert_lid;
-       struct sockaddr *sa;
        int ret;
 
        isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
@@ -2062,9 +2205,8 @@ isert_setup_np(struct iscsi_np *np,
        mutex_init(&isert_np->np_accept_mutex);
        INIT_LIST_HEAD(&isert_np->np_accept_list);
        init_completion(&isert_np->np_login_comp);
+       isert_np->np = np;
 
-       sa = (struct sockaddr *)ksockaddr;
-       pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
        /*
         * Setup the np->np_sockaddr from the passed sockaddr setup
         * in iscsi_target_configfs.c code..
@@ -2072,37 +2214,20 @@ isert_setup_np(struct iscsi_np *np,
        memcpy(&np->np_sockaddr, ksockaddr,
               sizeof(struct __kernel_sockaddr_storage));
 
-       isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
-                               IB_QPT_RC);
+       isert_lid = isert_setup_id(isert_np);
        if (IS_ERR(isert_lid)) {
-               pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
-                      PTR_ERR(isert_lid));
                ret = PTR_ERR(isert_lid);
                goto out;
        }
 
-       ret = rdma_bind_addr(isert_lid, sa);
-       if (ret) {
-               pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
-               goto out_lid;
-       }
-
-       ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
-       if (ret) {
-               pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
-               goto out_lid;
-       }
-
        isert_np->np_cm_id = isert_lid;
        np->np_context = isert_np;
-       pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
 
        return 0;
 
-out_lid:
-       rdma_destroy_id(isert_lid);
 out:
        kfree(isert_np);
+
        return ret;
 }
 
@@ -2138,13 +2263,27 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
        int ret;
 
-       pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
+       pr_info("before login_req comp conn: %p\n", isert_conn);
+       ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
+       if (ret) {
+               pr_err("isert_conn %p interrupted before got login req\n",
+                       isert_conn);
+               return ret;
+       }
+       isert_conn->login_req_comp.done = 0;
+
+       if (!login->first_request)
+               return 0;
+
+       isert_rx_login_req(isert_conn);
+
+       pr_info("before conn_login_comp conn: %p\n", conn);
 
        ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
        if (ret)
                return ret;
 
-       pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
+       pr_info("processing login->req: %p\n", login->req);
        return 0;
 }
 
@@ -2222,17 +2361,10 @@ accept_wait:
        isert_conn->conn = conn;
        max_accept = 0;
 
-       ret = isert_rdma_post_recvl(isert_conn);
-       if (ret)
-               return ret;
-
-       ret = isert_rdma_accept(isert_conn);
-       if (ret)
-               return ret;
-
        isert_set_conn_info(np, conn, isert_conn);
 
-       pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
+       pr_debug("Processing isert_conn: %p\n", isert_conn);
+
        return 0;
 }
 
@@ -2248,6 +2380,24 @@ isert_free_np(struct iscsi_np *np)
        kfree(isert_np);
 }
 
+static void isert_release_work(struct work_struct *work)
+{
+       struct isert_conn *isert_conn = container_of(work,
+                                                    struct isert_conn,
+                                                    release_work);
+
+       pr_info("Starting release conn %p\n", isert_conn);
+
+       wait_for_completion(&isert_conn->conn_wait);
+
+       mutex_lock(&isert_conn->conn_mutex);
+       isert_conn->state = ISER_CONN_DOWN;
+       mutex_unlock(&isert_conn->conn_mutex);
+
+       pr_info("Destroying conn %p\n", isert_conn);
+       isert_put_conn(isert_conn);
+}
+
 static void isert_wait_conn(struct iscsi_conn *conn)
 {
        struct isert_conn *isert_conn = conn->context;
@@ -2255,10 +2405,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
        pr_debug("isert_wait_conn: Starting \n");
 
        mutex_lock(&isert_conn->conn_mutex);
-       if (isert_conn->conn_cm_id) {
-               pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
-               rdma_disconnect(isert_conn->conn_cm_id);
-       }
        /*
         * Only wait for conn_wait_comp_err if the isert_conn made it
         * into full feature phase..
@@ -2267,14 +2413,13 @@ static void isert_wait_conn(struct iscsi_conn *conn)
                mutex_unlock(&isert_conn->conn_mutex);
                return;
        }
-       if (isert_conn->state == ISER_CONN_UP)
-               isert_conn->state = ISER_CONN_TERMINATING;
+       isert_conn_terminate(isert_conn);
        mutex_unlock(&isert_conn->conn_mutex);
 
        wait_for_completion(&isert_conn->conn_wait_comp_err);
 
-       wait_for_completion(&isert_conn->conn_wait);
-       isert_put_conn(isert_conn);
+       INIT_WORK(&isert_conn->release_work, isert_release_work);
+       queue_work(isert_release_wq, &isert_conn->release_work);
 }
 
 static void isert_free_conn(struct iscsi_conn *conn)
@@ -2320,20 +2465,30 @@ static int __init isert_init(void)
                goto destroy_rx_wq;
        }
 
+       isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
+                                       WQ_UNBOUND_MAX_ACTIVE);
+       if (!isert_release_wq) {
+               pr_err("Unable to allocate isert_release_wq\n");
+               ret = -ENOMEM;
+               goto destroy_comp_wq;
+       }
+
        isert_cmd_cache = kmem_cache_create("isert_cmd_cache",
                        sizeof(struct isert_cmd), __alignof__(struct isert_cmd),
                        0, NULL);
        if (!isert_cmd_cache) {
                pr_err("Unable to create isert_cmd_cache\n");
                ret = -ENOMEM;
-               goto destroy_tx_cq;
+               goto destroy_release_wq;
        }
 
        iscsit_register_transport(&iser_target_transport);
-       pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
+       pr_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
        return 0;
 
-destroy_tx_cq:
+destroy_release_wq:
+       destroy_workqueue(isert_release_wq);
+destroy_comp_wq:
        destroy_workqueue(isert_comp_wq);
 destroy_rx_wq:
        destroy_workqueue(isert_rx_wq);
@@ -2344,6 +2499,7 @@ static void __exit isert_exit(void)
 {
        flush_scheduled_work();
        kmem_cache_destroy(isert_cmd_cache);
+       destroy_workqueue(isert_release_wq);
        destroy_workqueue(isert_comp_wq);
        destroy_workqueue(isert_rx_wq);
        iscsit_unregister_transport(&iser_target_transport);
index 032f65abee3694ec56122dd0bd9091eb5e84caa6..b233ee5e46b0ac67cb735449a7a9ccc87e9d7478 100644 (file)
@@ -21,6 +21,7 @@ enum iser_ib_op_code {
 enum iser_conn_state {
        ISER_CONN_INIT,
        ISER_CONN_UP,
+       ISER_CONN_FULL_FEATURE,
        ISER_CONN_TERMINATING,
        ISER_CONN_DOWN,
 };
@@ -87,6 +88,7 @@ struct isert_conn {
        char                    *login_req_buf;
        char                    *login_rsp_buf;
        u64                     login_req_dma;
+       int                     login_req_len;
        u64                     login_rsp_dma;
        unsigned int            conn_rx_desc_head;
        struct iser_rx_desc     *conn_rx_descs;
@@ -94,18 +96,18 @@ struct isert_conn {
        struct iscsi_conn       *conn;
        struct list_head        conn_accept_node;
        struct completion       conn_login_comp;
+       struct completion       login_req_comp;
        struct iser_tx_desc     conn_login_tx_desc;
        struct rdma_cm_id       *conn_cm_id;
        struct ib_pd            *conn_pd;
        struct ib_mr            *conn_mr;
        struct ib_qp            *conn_qp;
        struct isert_device     *conn_device;
-       struct work_struct      conn_logout_work;
        struct mutex            conn_mutex;
        struct completion       conn_wait;
        struct completion       conn_wait_comp_err;
        struct kref             conn_kref;
-       bool                    disconnect;
+       struct work_struct      release_work;
 };
 
 #define ISERT_MAX_CQ 64
@@ -131,6 +133,7 @@ struct isert_device {
 };
 
 struct isert_np {
+       struct iscsi_np         *np;
        struct semaphore        np_sem;
        struct rdma_cm_id       *np_cm_id;
        struct mutex            np_accept_mutex;
index ce715b1bee46880bdb34b5360522367bc390b835..4de2571938b8b97870e91a04b7fac12a836dbaf9 100644 (file)
@@ -151,6 +151,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
                        DMI_MATCH(DMI_PRODUCT_VERSION, "5a"),
                },
        },
+       {
+               /* Medion Akoya E7225 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+                       DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+               },
+       },
        {
                /* Blue FB5601 */
                .matches = {
@@ -407,6 +415,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
                },
        },
+       {
+               /* Acer Aspire 7738 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
+               },
+       },
        {
                /* Gericom Bellagio */
                .matches = {
@@ -721,6 +736,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
        { }
 };
 
+/*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
+static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
+       {
+               /* Gigabyte P35 v2 - Elantech touchpad */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
+               },
+       },
+               {
+               /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+               },
+       },
+       {
+               /* Gigabyte P34 - Elantech touchpad */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+               },
+       },
+       { }
+};
+
 #endif /* CONFIG_X86 */
 
 #ifdef CONFIG_PNP
@@ -1001,6 +1045,9 @@ static int __init i8042_platform_init(void)
        if (dmi_check_system(i8042_dmi_dritek_table))
                i8042_dritek = true;
 
+       if (dmi_check_system(i8042_dmi_kbdreset_table))
+               i8042_kbdreset = true;
+
        /*
         * A20 was already enabled during early kernel init. But some buggy
         * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
index 78e4de42efaacec53c29f740fafe490dead2af84..9870c540e6fb1e9a82ccb06e164c9dc3a6c2c047 100644 (file)
@@ -67,6 +67,10 @@ static bool i8042_notimeout;
 module_param_named(notimeout, i8042_notimeout, bool, 0);
 MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
 
+static bool i8042_kbdreset;
+module_param_named(kbdreset, i8042_kbdreset, bool, 0);
+MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port");
+
 #ifdef CONFIG_X86
 static bool i8042_dritek;
 module_param_named(dritek, i8042_dritek, bool, 0);
@@ -782,6 +786,16 @@ static int __init i8042_check_aux(void)
        if (i8042_toggle_aux(true))
                return -1;
 
+/*
+ * Reset keyboard (needed on some laptops to successfully detect
+ * touchpad, e.g., some Gigabyte laptop models with Elantech
+ * touchpads).
+ */
+       if (i8042_kbdreset) {
+               pr_warn("Attempting to reset device connected to KBD port\n");
+               i8042_kbd_write(NULL, (unsigned char) 0xff);
+       }
+
 /*
  * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and
  * used it for a PCI card or somethig else.
index de737ba1d3519126da16f464dba34dd6fa4b9ce6..4a10c1562d0fee8c9a433d99c3b68ae405049f73 100644 (file)
@@ -88,6 +88,9 @@ struct cache_disk_superblock {
 } __packed;
 
 struct dm_cache_metadata {
+       atomic_t ref_count;
+       struct list_head list;
+
        struct block_device *bdev;
        struct dm_block_manager *bm;
        struct dm_space_map *metadata_sm;
@@ -634,10 +637,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
 
 /*----------------------------------------------------------------*/
 
-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
-                                                sector_t data_block_size,
-                                                bool may_format_device,
-                                                size_t policy_hint_size)
+static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
+                                              sector_t data_block_size,
+                                              bool may_format_device,
+                                              size_t policy_hint_size)
 {
        int r;
        struct dm_cache_metadata *cmd;
@@ -645,9 +648,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
        cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
        if (!cmd) {
                DMERR("could not allocate metadata struct");
-               return NULL;
+               return ERR_PTR(-ENOMEM);
        }
 
+       atomic_set(&cmd->ref_count, 1);
        init_rwsem(&cmd->root_lock);
        cmd->bdev = bdev;
        cmd->data_block_size = data_block_size;
@@ -670,10 +674,96 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
        return cmd;
 }
 
+/*
+ * We keep a little list of ref counted metadata objects to prevent two
+ * different target instances creating separate bufio instances.  This is
+ * an issue if a table is reloaded before the suspend.
+ */
+static DEFINE_MUTEX(table_lock);
+static LIST_HEAD(table);
+
+static struct dm_cache_metadata *lookup(struct block_device *bdev)
+{
+       struct dm_cache_metadata *cmd;
+
+       list_for_each_entry(cmd, &table, list)
+               if (cmd->bdev == bdev) {
+                       atomic_inc(&cmd->ref_count);
+                       return cmd;
+               }
+
+       return NULL;
+}
+
+static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+                                               sector_t data_block_size,
+                                               bool may_format_device,
+                                               size_t policy_hint_size)
+{
+       struct dm_cache_metadata *cmd, *cmd2;
+
+       mutex_lock(&table_lock);
+       cmd = lookup(bdev);
+       mutex_unlock(&table_lock);
+
+       if (cmd)
+               return cmd;
+
+       cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
+       if (!IS_ERR(cmd)) {
+               mutex_lock(&table_lock);
+               cmd2 = lookup(bdev);
+               if (cmd2) {
+                       mutex_unlock(&table_lock);
+                       __destroy_persistent_data_objects(cmd);
+                       kfree(cmd);
+                       return cmd2;
+               }
+               list_add(&cmd->list, &table);
+               mutex_unlock(&table_lock);
+       }
+
+       return cmd;
+}
+
+static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
+{
+       if (cmd->data_block_size != data_block_size) {
+               DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
+                     (unsigned long long) data_block_size,
+                     (unsigned long long) cmd->data_block_size);
+               return false;
+       }
+
+       return true;
+}
+
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+                                                sector_t data_block_size,
+                                                bool may_format_device,
+                                                size_t policy_hint_size)
+{
+       struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
+                                                      may_format_device, policy_hint_size);
+
+       if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
+               dm_cache_metadata_close(cmd);
+               return ERR_PTR(-EINVAL);
+       }
+
+       return cmd;
+}
+
 void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
 {
-       __destroy_persistent_data_objects(cmd);
-       kfree(cmd);
+       if (atomic_dec_and_test(&cmd->ref_count)) {
+               mutex_lock(&table_lock);
+               list_del(&cmd->list);
+               mutex_unlock(&table_lock);
+
+               __destroy_persistent_data_objects(cmd);
+               kfree(cmd);
+       }
 }
 
 int dm_cache_resize(struct dm_cache_metadata *cmd, dm_cblock_t new_cache_size)
index 86a2a5e3b26bacdc0c773076cf7283cacd188701..39996ca58ce60a0d228fc4cca271d79c5caa6e16 100644 (file)
@@ -2457,6 +2457,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
        struct pool_c *pt = ti->private;
        struct pool *pool = pt->pool;
 
+       if (get_pool_mode(pool) >= PM_READ_ONLY) {
+               DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
+                     dm_device_name(pool->pool_md));
+               return -EINVAL;
+       }
+
        if (!strcasecmp(argv[0], "create_thin"))
                r = process_create_thin_mesg(argc, argv, pool);
 
index 2332b5ced0dd0e9069237779e5edee246352c9aa..4daf5c03b33bfdc626d53e6015ccb7c565060117 100644 (file)
@@ -2678,7 +2678,8 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
             (s->failed >= 2 && fdev[1]->toread) ||
             (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
              !test_bit(R5_OVERWRITE, &fdev[0]->flags)) ||
-            (sh->raid_conf->level == 6 && s->failed && s->to_write))) {
+            ((sh->raid_conf->level == 6 || sh->sector >= sh->raid_conf->mddev->recovery_cp)
+             && s->failed && s->to_write))) {
                /* we would like to get this block, possibly by computing it,
                 * otherwise read it if the backing disk is insync
                 */
index d8d5da7c52dbb50759915ea7c76ca9c56c41a923..942305129e150e5ea80c8c2a91c6bca310674cc8 100644 (file)
@@ -67,7 +67,7 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
 {
        dev_dbg(dev, "pre_pll_clk_div\t%d\n",  pll->pre_pll_clk_div);
        dev_dbg(dev, "pll_multiplier \t%d\n",  pll->pll_multiplier);
-       if (pll->flags != SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
+       if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
                dev_dbg(dev, "op_sys_clk_div \t%d\n", pll->op_sys_clk_div);
                dev_dbg(dev, "op_pix_clk_div \t%d\n", pll->op_pix_clk_div);
        }
@@ -77,7 +77,7 @@ static void print_pll(struct device *dev, struct smiapp_pll *pll)
        dev_dbg(dev, "ext_clk_freq_hz \t%d\n", pll->ext_clk_freq_hz);
        dev_dbg(dev, "pll_ip_clk_freq_hz \t%d\n", pll->pll_ip_clk_freq_hz);
        dev_dbg(dev, "pll_op_clk_freq_hz \t%d\n", pll->pll_op_clk_freq_hz);
-       if (pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS) {
+       if (!(pll->flags & SMIAPP_PLL_FLAG_NO_OP_CLOCKS)) {
                dev_dbg(dev, "op_sys_clk_freq_hz \t%d\n",
                        pll->op_sys_clk_freq_hz);
                dev_dbg(dev, "op_pix_clk_freq_hz \t%d\n",
index b280216de31b6927a1a1e5d499cd6d8dbd17ad0c..de8bc35d8b0bf5c23fa07d1a92ac8fe33577a6c9 100644 (file)
@@ -2629,7 +2629,9 @@ static int smiapp_registered(struct v4l2_subdev *subdev)
                pll->flags |= SMIAPP_PLL_FLAG_OP_PIX_CLOCK_PER_LANE;
        pll->scale_n = sensor->limits[SMIAPP_LIMIT_SCALER_N_MIN];
 
+       mutex_lock(&sensor->mutex);
        rval = smiapp_update_mode(sensor);
+       mutex_unlock(&sensor->mutex);
        if (rval) {
                dev_err(&client->dev, "update mode failed\n");
                goto out_nvm_release;
index af176b6ce738e78d4dc366ae0d56597f833f4e59..e6d3561eea478f4b0baa7536cd23a9499ca9e8bf 100644 (file)
@@ -1081,9 +1081,12 @@ static int __init af9005_usb_module_init(void)
                err("usb_register failed. (%d)", result);
                return result;
        }
+#if IS_MODULE(CONFIG_DVB_USB_AF9005) || defined(CONFIG_DVB_USB_AF9005_REMOTE)
+       /* FIXME: convert to todays kernel IR infrastructure */
        rc_decode = symbol_request(af9005_rc_decode);
        rc_keys = symbol_request(rc_map_af9005_table);
        rc_keys_size = symbol_request(rc_map_af9005_table_size);
+#endif
        if (rc_decode == NULL || rc_keys == NULL || rc_keys_size == NULL) {
                err("af9005_rc_decode function not found, disabling remote");
                af9005_properties.rc.legacy.rc_query = NULL;
index 5dbefa68b1d20f0e0ca14fd725b99aafb9410fcd..363cdbf4ac8d8aa5f57072fcb2ae714dd5d5996b 100644 (file)
@@ -1603,12 +1603,12 @@ static void uvc_delete(struct uvc_device *dev)
 {
        struct list_head *p, *n;
 
-       usb_put_intf(dev->intf);
-       usb_put_dev(dev->udev);
-
        uvc_status_cleanup(dev);
        uvc_ctrl_cleanup_device(dev);
 
+       usb_put_intf(dev->intf);
+       usb_put_dev(dev->udev);
+
        if (dev->vdev.dev)
                v4l2_device_unregister(&dev->vdev);
 #ifdef CONFIG_MEDIA_CONTROLLER
index 2ea429c2771465070cbae9f921995cd8c221fef5..836e2ac36a0df682f00550a8c1d445bc8bd4b2e7 100644 (file)
@@ -1316,6 +1316,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 
        sdhci_runtime_pm_get(host);
 
+       present = mmc_gpio_get_cd(host->mmc);
+
        spin_lock_irqsave(&host->lock, flags);
 
        WARN_ON(host->mrq != NULL);
@@ -1344,7 +1346,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
         *     zero: cd-gpio is used, and card is removed
         *     one: cd-gpio is used, and card is present
         */
-       present = mmc_gpio_get_cd(host->mmc);
        if (present < 0) {
                /* If polling, assume that the card is always present. */
                if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
index 9bf47a064cdf61f245fc5eb3ee2f24679eaa5809..a4694aa20a3e02aaa38605613f9cb576fedf2a90 100644 (file)
@@ -643,10 +643,14 @@ static int can_changelink(struct net_device *dev,
                if (dev->flags & IFF_UP)
                        return -EBUSY;
                cm = nla_data(data[IFLA_CAN_CTRLMODE]);
-               if (cm->flags & ~priv->ctrlmode_supported)
+
+               /* check whether changed bits are allowed to be modified */
+               if (cm->mask & ~priv->ctrlmode_supported)
                        return -EOPNOTSUPP;
+
+               /* clear bits to be modified and copy the flag values */
                priv->ctrlmode &= ~cm->mask;
-               priv->ctrlmode |= cm->flags;
+               priv->ctrlmode |= (cm->flags & cm->mask);
        }
 
        if (data[IFLA_CAN_BITTIMING]) {
index cc3df8aebb87391d7ffcf4b70df7c792e611ce0c..a3fb8b51038a17ae5d4ec3989c942afee4644ca9 100644 (file)
@@ -579,7 +579,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
                          usb_sndbulkpipe(dev->udev,
                                          dev->bulk_out->bEndpointAddress),
                          buf, msg->len,
-                         kvaser_usb_simple_msg_callback, priv);
+                         kvaser_usb_simple_msg_callback, netdev);
        usb_anchor_urb(urb, &priv->tx_submitted);
 
        err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -654,11 +654,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
        priv = dev->nets[channel];
        stats = &priv->netdev->stats;
 
-       if (status & M16C_STATE_BUS_RESET) {
-               kvaser_usb_unlink_tx_urbs(priv);
-               return;
-       }
-
        skb = alloc_can_err_skb(priv->netdev, &cf);
        if (!skb) {
                stats->rx_dropped++;
@@ -669,7 +664,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
 
        netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
 
-       if (status & M16C_STATE_BUS_OFF) {
+       if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
                cf->can_id |= CAN_ERR_BUSOFF;
 
                priv->can.can_stats.bus_off++;
@@ -695,9 +690,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
                }
 
                new_state = CAN_STATE_ERROR_PASSIVE;
-       }
-
-       if (status == M16C_STATE_BUS_ERROR) {
+       } else if (status & M16C_STATE_BUS_ERROR) {
                if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
                    ((txerr >= 96) || (rxerr >= 96))) {
                        cf->can_id |= CAN_ERR_CRTL;
@@ -707,7 +700,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
 
                        priv->can.can_stats.error_warning++;
                        new_state = CAN_STATE_ERROR_WARNING;
-               } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+               } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
+                          ((txerr < 96) && (rxerr < 96))) {
                        cf->can_id |= CAN_ERR_PROT;
                        cf->data[2] = CAN_ERR_PROT_ACTIVE;
 
@@ -1238,6 +1232,9 @@ static int kvaser_usb_close(struct net_device *netdev)
        if (err)
                netdev_warn(netdev, "Cannot stop device, error %d\n", err);
 
+       /* reset tx contexts */
+       kvaser_usb_unlink_tx_urbs(priv);
+
        priv->can.state = CAN_STATE_STOPPED;
        close_candev(priv->netdev);
 
@@ -1286,12 +1283,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
        if (!urb) {
                netdev_err(netdev, "No memory left for URBs\n");
                stats->tx_dropped++;
-               goto nourbmem;
+               dev_kfree_skb(skb);
+               return NETDEV_TX_OK;
        }
 
        buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
        if (!buf) {
                stats->tx_dropped++;
+               dev_kfree_skb(skb);
                goto nobufmem;
        }
 
@@ -1326,6 +1325,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
                }
        }
 
+       /* This should never happen; it implies a flow control bug */
        if (!context) {
                netdev_warn(netdev, "cannot find free context\n");
                ret =  NETDEV_TX_BUSY;
@@ -1356,9 +1356,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
        if (unlikely(err)) {
                can_free_echo_skb(netdev, context->echo_index);
 
-               skb = NULL; /* set to NULL to avoid double free in
-                            * dev_kfree_skb(skb) */
-
                atomic_dec(&priv->active_tx_urbs);
                usb_unanchor_urb(urb);
 
@@ -1380,8 +1377,6 @@ releasebuf:
        kfree(buf);
 nobufmem:
        usb_free_urb(urb);
-nourbmem:
-       dev_kfree_skb(skb);
        return ret;
 }
 
@@ -1493,6 +1488,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
        struct kvaser_usb_net_priv *priv;
        int i, err;
 
+       err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
+       if (err)
+               return err;
+
        netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
        if (!netdev) {
                dev_err(&intf->dev, "Cannot alloc candev\n");
@@ -1578,7 +1577,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
 {
        struct kvaser_usb *dev;
        int err = -ENOMEM;
-       int i;
+       int i, retry = 3;
 
        dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
        if (!dev)
@@ -1596,10 +1595,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
 
        usb_set_intfdata(intf, dev);
 
-       for (i = 0; i < MAX_NET_DEVICES; i++)
-               kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
+       /* On some x86 laptops, plugging a Kvaser device again after
+        * an unplug makes the firmware always ignore the very first
+        * command. For such a case, provide some room for retries
+        * instead of completely exiting the driver.
+        */
+       do {
+               err = kvaser_usb_get_software_info(dev);
+       } while (--retry && err == -ETIMEDOUT);
 
-       err = kvaser_usb_get_software_info(dev);
        if (err) {
                dev_err(&intf->dev,
                        "Cannot get software infos, error %d\n", err);
index d30085c2b45494727895e6c6f69ab5be1e07f57b..a85a9c2f1385564998929e3a914cafb74d867d9a 100644 (file)
@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
        schedule_work(&alx->reset_wk);
 }
 
-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
 {
        struct alx_rx_queue *rxq = &alx->rxq;
        struct alx_rrd *rrd;
        struct alx_buffer *rxb;
        struct sk_buff *skb;
        u16 length, rfd_cleaned = 0;
+       int work = 0;
 
-       while (budget > 0) {
+       while (work < budget) {
                rrd = &rxq->rrd[rxq->rrd_read_idx];
                if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
                        break;
@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
                    ALX_GET_FIELD(le32_to_cpu(rrd->word0),
                                  RRD_NOR) != 1) {
                        alx_schedule_reset(alx);
-                       return 0;
+                       return work;
                }
 
                rxb = &rxq->bufs[rxq->read_idx];
@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
                }
 
                napi_gro_receive(&alx->napi, skb);
-               budget--;
+               work++;
 
 next_pkt:
                if (++rxq->read_idx == alx->rx_ringsz)
@@ -258,21 +259,22 @@ next_pkt:
        if (rfd_cleaned)
                alx_refill_rx_ring(alx, GFP_ATOMIC);
 
-       return budget > 0;
+       return work;
 }
 
 static int alx_poll(struct napi_struct *napi, int budget)
 {
        struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
        struct alx_hw *hw = &alx->hw;
-       bool complete = true;
        unsigned long flags;
+       bool tx_complete;
+       int work;
 
-       complete = alx_clean_tx_irq(alx) &&
-                  alx_clean_rx_irq(alx, budget);
+       tx_complete = alx_clean_tx_irq(alx);
+       work = alx_clean_rx_irq(alx, budget);
 
-       if (!complete)
-               return 1;
+       if (!tx_complete || work == budget)
+               return budget;
 
        napi_complete(&alx->napi);
 
@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
 
        alx_post_write(hw);
 
-       return 0;
+       return work;
 }
 
 static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
index 5501cad30cfa94207deaf0b2a5bbcfb01fa7cc89..8c1eab1151b859b23ff01e0e2fb137c1e9b96c14 100644 (file)
@@ -17389,23 +17389,6 @@ static int tg3_init_one(struct pci_dev *pdev,
                goto err_out_apeunmap;
        }
 
-       /*
-        * Reset chip in case UNDI or EFI driver did not shutdown
-        * DMA self test will enable WDMAC and we'll see (spurious)
-        * pending DMA on the PCI bus at that point.
-        */
-       if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
-           (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
-               tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
-               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
-       }
-
-       err = tg3_test_dma(tp);
-       if (err) {
-               dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
-               goto err_out_apeunmap;
-       }
-
        intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
        rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
        sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
@@ -17450,6 +17433,23 @@ static int tg3_init_one(struct pci_dev *pdev,
                        sndmbx += 0xc;
        }
 
+       /*
+        * Reset chip in case UNDI or EFI driver did not shutdown
+        * DMA self test will enable WDMAC and we'll see (spurious)
+        * pending DMA on the PCI bus at that point.
+        */
+       if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+           (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+               tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+               tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+       }
+
+       err = tg3_test_dma(tp);
+       if (err) {
+               dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+               goto err_out_apeunmap;
+       }
+
        tg3_init_coal(tp);
 
        pci_set_drvdata(pdev, dev);
index 635f55992d7e8f2788a5411974e448d7af20f3ec..8cc0eaa9d6f131997e66de43e64a455ba41ab4d5 100644 (file)
@@ -1294,10 +1294,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                skb_put(skb, bytes_written);
                skb->protocol = eth_type_trans(skb, netdev);
 
-               if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
-                       skb->csum = htons(checksum);
-                       skb->ip_summed = CHECKSUM_COMPLETE;
-               }
+               /* Hardware does not provide whole packet checksum. It only
+                * provides pseudo checksum. Since hw validates the packet
+                * checksum but not provide us the checksum value. use
+                * CHECSUM_UNNECESSARY.
+                */
+               if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
+                   ipv4_csum_ok)
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                if (vlan_stripped)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
index b1ab3a4956a5b83e70097242afd5e3d7d7fe4173..e18240de159c40e66b601323b215a19b6cbcdbb1 100644 (file)
@@ -1293,6 +1293,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
        if (vid == priv->data.default_vlan)
                return 0;
 
+       if (priv->data.dual_emac) {
+               /* In dual EMAC, reserved VLAN id should not be used for
+                * creating VLAN interfaces as this can break the dual
+                * EMAC port separation
+                */
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (vid == priv->slaves[i].port_vlan)
+                               return -EINVAL;
+               }
+       }
+
        dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
        return cpsw_add_vlan_ale_entry(priv, vid);
 }
@@ -1306,6 +1319,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
        if (vid == priv->data.default_vlan)
                return 0;
 
+       if (priv->data.dual_emac) {
+               int i;
+
+               for (i = 0; i < priv->data.slaves; i++) {
+                       if (vid == priv->slaves[i].port_vlan)
+                               return -EINVAL;
+               }
+       }
+
        dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
        ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
        if (ret != 0)
index a7bb5da6a96bfe5d8a38e7ea53faafbc87b77f0e..6adfa3e45f457da05d06c390348b7eab012f39e1 100644 (file)
@@ -7,14 +7,6 @@ config OF
 menu "Device Tree and Open Firmware support"
        depends on OF
 
-config PROC_DEVICETREE
-       bool "Support for device tree in /proc"
-       depends on PROC_FS && !SPARC
-       help
-         This option adds a device-tree directory under /proc which contains
-         an image of the device tree that the kernel copies from Open
-         Firmware or other boot firmware. If unsure, say Y here.
-
 config OF_SELFTEST
        bool "Device Tree Runtime self tests"
        help
@@ -52,12 +44,6 @@ config OF_IRQ
 config OF_DEVICE
        def_bool y
 
-config OF_I2C
-       def_tristate I2C
-       depends on I2C
-       help
-         OpenFirmware I2C accessors
-
 config OF_NET
        depends on NETDEVICES
        def_bool y
@@ -90,4 +76,14 @@ config OF_RESERVED_MEM
        help
          Helpers to allow for reservation of memory regions
 
+config OF_RESOLVE
+       bool
+
+config OF_OVERLAY
+       bool
+       depends on OF
+       select OF_DYNAMIC
+       select OF_DEVICE
+       select OF_RESOLVE
+
 endmenu # OF
index 2aaa7b90fc14ea2d1d3f143d48ff9a74e2e03f5b..4d916160d04e0e33fc38d5729b7342dfacd1427d 100644 (file)
@@ -1,10 +1,10 @@
 obj-y = base.o
+obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
 obj-$(CONFIG_OF_FLATTREE) += fdt.o
 obj-$(CONFIG_OF_PROMTREE) += pdt.o
 obj-$(CONFIG_OF_ADDRESS)  += address.o
 obj-$(CONFIG_OF_IRQ)    += irq.o
 obj-$(CONFIG_OF_DEVICE) += device.o platform.o
-obj-$(CONFIG_OF_I2C)   += of_i2c.o
 obj-$(CONFIG_OF_NET)   += of_net.o
 obj-$(CONFIG_OF_SELFTEST) += selftest.o
 obj-$(CONFIG_OF_MDIO)  += of_mdio.o
@@ -12,5 +12,7 @@ obj-$(CONFIG_OF_PCI)  += of_pci.o
 obj-$(CONFIG_OF_PCI_IRQ)  += of_pci_irq.o
 obj-$(CONFIG_OF_MTD)   += of_mtd.o
 obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
+obj-$(CONFIG_OF_RESOLVE)  += resolver.o
+obj-$(CONFIG_OF_OVERLAY) += overlay.o
 
 CFLAGS_fdt.o = -I$(src)/../../scripts/dtc/libfdt
index 8fb2b576973320621c105a606f231631e77f311c..2704df1ad6813a60dec4b0f61067512426803d5c 100644 (file)
@@ -428,7 +428,7 @@ static u64 __of_translate_address(struct device_node *dev,
        int na, ns, pna, pns;
        u64 result = OF_BAD_ADDR;
 
-       pr_debug("OF: ** translation for device %s **\n", dev->full_name);
+       pr_debug("OF: ** translation for device %s **\n", of_node_full_name(dev));
 
        /* Increase refcount at current level */
        of_node_get(dev);
@@ -443,13 +443,13 @@ static u64 __of_translate_address(struct device_node *dev,
        bus->count_cells(dev, &na, &ns);
        if (!OF_CHECK_COUNTS(na, ns)) {
                printk(KERN_ERR "prom_parse: Bad cell count for %s\n",
-                      dev->full_name);
+                      of_node_full_name(dev));
                goto bail;
        }
        memcpy(addr, in_addr, na * 4);
 
        pr_debug("OF: bus is %s (na=%d, ns=%d) on %s\n",
-           bus->name, na, ns, parent->full_name);
+           bus->name, na, ns, of_node_full_name(parent));
        of_dump_addr("OF: translating address:", addr, na);
 
        /* Translate */
index aa4d7e3a3050fe5534886d0f59f85239dc6cd34d..fb6704ecbf111fb8f623b639f9dafe6c8386a81a 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/of_graph.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
+#include <linux/string.h>
 #include <linux/proc_fs.h>
 
 #include "of_private.h"
@@ -36,7 +37,15 @@ struct device_node *of_chosen;
 struct device_node *of_aliases;
 static struct device_node *of_stdout;
 
-DEFINE_MUTEX(of_aliases_mutex);
+struct kset *of_kset;
+
+/*
+ * Used to protect the of_aliases, to hold off addition of nodes to sysfs.
+ * This mutex must be held whenever modifications are being made to the
+ * device tree. The of_{attach,detach}_node() and
+ * of_{add,remove,update}_property() helpers make sure this happens.
+ */
+DEFINE_MUTEX(of_mutex);
 
 /* use when traversing tree through the allnext, child, sibling,
  * or parent members of struct device_node.
@@ -75,80 +84,117 @@ int of_n_size_cells(struct device_node *np)
 }
 EXPORT_SYMBOL(of_n_size_cells);
 
-#if defined(CONFIG_OF_DYNAMIC)
-/**
- *     of_node_get - Increment refcount of a node
- *     @node:  Node to inc refcount, NULL is supported to
- *             simplify writing of callers
- *
- *     Returns node.
- */
-struct device_node *of_node_get(struct device_node *node)
+#ifndef CONFIG_OF_DYNAMIC
+static void of_node_release(struct kobject *kobj)
 {
-       if (node)
-               kref_get(&node->kref);
-       return node;
+       /* Without CONFIG_OF_DYNAMIC, no nodes gets freed */
 }
-EXPORT_SYMBOL(of_node_get);
+#endif /* CONFIG_OF_DYNAMIC */
+
+struct kobj_type of_node_ktype = {
+       .release = of_node_release,
+};
 
-static inline struct device_node *kref_to_device_node(struct kref *kref)
+static ssize_t of_node_property_read(struct file *filp, struct kobject *kobj,
+                               struct bin_attribute *bin_attr, char *buf,
+                               loff_t offset, size_t count)
 {
-       return container_of(kref, struct device_node, kref);
+       struct property *pp = container_of(bin_attr, struct property, attr);
+       return memory_read_from_buffer(buf, count, &offset, pp->value, pp->length);
 }
 
-/**
- *     of_node_release - release a dynamically allocated node
- *     @kref:  kref element of the node to be released
- *
- *     In of_node_put() this function is passed to kref_put()
- *     as the destructor.
- */
-static void of_node_release(struct kref *kref)
+static const char *safe_name(struct kobject *kobj, const char *orig_name)
 {
-       struct device_node *node = kref_to_device_node(kref);
-       struct property *prop = node->properties;
+       const char *name = orig_name;
+       struct sysfs_dirent *kn;
+       int i = 0;
 
-       /* We should never be releasing nodes that haven't been detached. */
-       if (!of_node_check_flag(node, OF_DETACHED)) {
-               pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name);
-               dump_stack();
-               kref_init(&node->kref);
-               return;
+       /* don't be a hero. After 16 tries give up */
+       while (i < 16 && (kn = sysfs_get_dirent(kobj->sd, NULL, name))) {
+               sysfs_put(kn);
+               if (name != orig_name)
+                       kfree(name);
+               name = kasprintf(GFP_KERNEL, "%s#%i", orig_name, ++i);
        }
 
-       if (!of_node_check_flag(node, OF_DYNAMIC))
-               return;
+       if (name != orig_name)
+               pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
+                       kobject_name(kobj), name);
+       return name;
+}
 
-       while (prop) {
-               struct property *next = prop->next;
-               kfree(prop->name);
-               kfree(prop->value);
-               kfree(prop);
-               prop = next;
+int __of_add_property_sysfs(struct device_node *np, struct property *pp)
+{
+       int rc;
 
-               if (!prop) {
-                       prop = node->deadprops;
-                       node->deadprops = NULL;
-               }
+       /* Important: Don't leak passwords */
+       bool secure = strncmp(pp->name, "security-", 9) == 0;
+
+       if (!of_kset || !of_node_is_attached(np))
+               return 0;
+
+       sysfs_bin_attr_init(&pp->attr);
+       pp->attr.attr.name = safe_name(&np->kobj, pp->name);
+       pp->attr.attr.mode = secure ? S_IRUSR : S_IRUGO;
+       pp->attr.size = secure ? 0 : pp->length;
+       pp->attr.read = of_node_property_read;
+
+       rc = sysfs_create_bin_file(&np->kobj, &pp->attr);
+       WARN(rc, "error adding attribute %s to node %s\n", pp->name, np->full_name);
+       return rc;
+}
+
+int __of_attach_node_sysfs(struct device_node *np)
+{
+       const char *name;
+       struct property *pp;
+       int rc;
+
+       if (!of_kset)
+               return 0;
+
+       np->kobj.kset = of_kset;
+       if (!np->parent) {
+               /* Nodes without parents are new top level trees */
+               rc = kobject_add(&np->kobj, NULL, safe_name(&of_kset->kobj, "base"));
+       } else {
+               name = safe_name(&np->parent->kobj, kbasename(np->full_name));
+               if (!name || !name[0])
+                       return -EINVAL;
+
+               rc = kobject_add(&np->kobj, &np->parent->kobj, "%s", name);
        }
-       kfree(node->full_name);
-       kfree(node->data);
-       kfree(node);
+       if (rc)
+               return rc;
+
+       for_each_property_of_node(np, pp)
+               __of_add_property_sysfs(np, pp);
+
+       return 0;
 }
 
-/**
- *     of_node_put - Decrement refcount of a node
- *     @node:  Node to dec refcount, NULL is supported to
- *             simplify writing of callers
- *
- */
-void of_node_put(struct device_node *node)
+static int __init of_init(void)
 {
-       if (node)
-               kref_put(&node->kref, of_node_release);
+       struct device_node *np;
+
+       /* Create the kset, and register existing nodes */
+       mutex_lock(&of_mutex);
+       of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
+       if (!of_kset) {
+               mutex_unlock(&of_mutex);
+               return -ENOMEM;
+       }
+       for_each_of_allnodes(np)
+               __of_attach_node_sysfs(np);
+       mutex_unlock(&of_mutex);
+
+       /* Symlink in /proc as required by userspace ABI */
+       if (of_allnodes)
+               proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
+
+       return 0;
 }
-EXPORT_SYMBOL(of_node_put);
-#endif /* CONFIG_OF_DYNAMIC */
+core_initcall(of_init);
 
 static struct property *__of_find_property(const struct device_node *np,
                                           const char *name, int *lenp)
@@ -212,8 +258,8 @@ EXPORT_SYMBOL(of_find_all_nodes);
  * Find a property with a given name for a given node
  * and return the value.
  */
-static const void *__of_get_property(const struct device_node *np,
-                                    const char *name, int *lenp)
+const void *__of_get_property(const struct device_node *np,
+                             const char *name, int *lenp)
 {
        struct property *pp = __of_find_property(np, name, lenp);
 
@@ -846,6 +892,38 @@ struct device_node *of_find_node_by_phandle(phandle handle)
 }
 EXPORT_SYMBOL(of_find_node_by_phandle);
 
+/**
+ * of_property_count_elems_of_size - Count the number of elements in a property
+ *
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ * @elem_size: size of the individual element
+ *
+ * Search for a property in a device node and count the number of elements of
+ * size elem_size in it. Returns number of elements on sucess, -EINVAL if the
+ * property does not exist or its length does not match a multiple of elem_size
+ * and -ENODATA if the property does not have a value.
+ */
+int of_property_count_elems_of_size(const struct device_node *np,
+                               const char *propname, int elem_size)
+{
+       struct property *prop = of_find_property(np, propname, NULL);
+
+       if (!prop)
+               return -EINVAL;
+       if (!prop->value)
+               return -ENODATA;
+
+       if (prop->length % elem_size != 0) {
+               pr_err("size of %s in node %s is not a multiple of %d\n",
+                      propname, np->full_name, elem_size);
+               return -EINVAL;
+       }
+
+       return prop->length / elem_size;
+}
+EXPORT_SYMBOL_GPL(of_property_count_elems_of_size);
+
 /**
  * of_find_property_value_of_size
  *
@@ -1151,65 +1229,10 @@ int of_property_read_string_helper(struct device_node *np, const char *propname,
 }
 EXPORT_SYMBOL_GPL(of_property_read_string_helper);
 
-/**
- * of_parse_phandle - Resolve a phandle property to a device_node pointer
- * @np: Pointer to device node holding phandle property
- * @phandle_name: Name of property holding a phandle value
- * @index: For properties holding a table of phandles, this is the index into
- *         the table
- *
- * Returns the device_node pointer with refcount incremented.  Use
- * of_node_put() on it when done.
- */
-struct device_node *of_parse_phandle(const struct device_node *np,
-                                    const char *phandle_name, int index)
-{
-       const __be32 *phandle;
-       int size;
-
-       phandle = of_get_property(np, phandle_name, &size);
-       if ((!phandle) || (size < sizeof(*phandle) * (index + 1)))
-               return NULL;
-
-       return of_find_node_by_phandle(be32_to_cpup(phandle + index));
-}
-EXPORT_SYMBOL(of_parse_phandle);
-
-/**
- * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
- * @np:                pointer to a device tree node containing a list
- * @list_name: property name that contains a list
- * @cells_name:        property name that specifies phandles' arguments count
- * @index:     index of a phandle to parse out
- * @out_args:  optional pointer to output arguments structure (will be filled)
- *
- * This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_args, on error returns appropriate
- * errno value.
- *
- * Caller is responsible to call of_node_put() on the returned out_args->node
- * pointer.
- *
- * Example:
- *
- * phandle1: node1 {
- *     #list-cells = <2>;
- * }
- *
- * phandle2: node2 {
- *     #list-cells = <1>;
- * }
- *
- * node3 {
- *     list = <&phandle1 1 2 &phandle2 3>;
- * }
- *
- * To get a device_node of the `node2' node you may call this:
- * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
- */
 static int __of_parse_phandle_with_args(const struct device_node *np,
                                        const char *list_name,
-                                       const char *cells_name, int index,
+                                       const char *cells_name,
+                                       int cell_count, int index,
                                        struct of_phandle_args *out_args)
 {
        const __be32 *list, *list_end;
@@ -1237,19 +1260,32 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
                if (phandle) {
                        /*
                         * Find the provider node and parse the #*-cells
-                        * property to determine the argument length
+                        * property to determine the argument length.
+                        *
+                        * This is not needed if the cell count is hard-coded
+                        * (i.e. cells_name not set, but cell_count is set),
+                        * except when we're going to return the found node
+                        * below.
                         */
-                       node = of_find_node_by_phandle(phandle);
-                       if (!node) {
-                               pr_err("%s: could not find phandle\n",
-                                        np->full_name);
-                               goto err;
+                       if (cells_name || cur_index == index) {
+                               node = of_find_node_by_phandle(phandle);
+                               if (!node) {
+                                       pr_err("%s: could not find phandle\n",
+                                               np->full_name);
+                                       goto err;
+                               }
                        }
-                       if (of_property_read_u32(node, cells_name, &count)) {
-                               pr_err("%s: could not get %s for %s\n",
-                                        np->full_name, cells_name,
-                                        node->full_name);
-                               goto err;
+
+                       if (cells_name) {
+                               if (of_property_read_u32(node, cells_name,
+                                                        &count)) {
+                                       pr_err("%s: could not get %s for %s\n",
+                                               np->full_name, cells_name,
+                                               node->full_name);
+                                       goto err;
+                               }
+                       } else {
+                               count = cell_count;
                        }
 
                        /*
@@ -1309,16 +1345,116 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
        return rc;
 }
 
+/**
+ * of_parse_phandle - Resolve a phandle property to a device_node pointer
+ * @np: Pointer to device node holding phandle property
+ * @phandle_name: Name of property holding a phandle value
+ * @index: For properties holding a table of phandles, this is the index into
+ *         the table
+ *
+ * Returns the device_node pointer with refcount incremented.  Use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_parse_phandle(const struct device_node *np,
+                                    const char *phandle_name, int index)
+{
+       struct of_phandle_args args;
+
+       if (index < 0)
+               return NULL;
+
+       if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
+                                        index, &args))
+               return NULL;
+
+       return args.np;
+}
+EXPORT_SYMBOL(of_parse_phandle);
+
+/**
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
+ * @np:                pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cells_name:        property name that specifies phandles' arguments count
+ * @index:     index of a phandle to parse out
+ * @out_args:  optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
+ *
+ * Example:
+ *
+ * phandle1: node1 {
+ *     #list-cells = <2>;
+ * }
+ *
+ * phandle2: node2 {
+ *     #list-cells = <1>;
+ * }
+ *
+ * node3 {
+ *     list = <&phandle1 1 2 &phandle2 3>;
+ * }
+ *
+ * To get a device_node of the `node2' node you may call this:
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
+ */
 int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
                                const char *cells_name, int index,
                                struct of_phandle_args *out_args)
 {
        if (index < 0)
                return -EINVAL;
-       return __of_parse_phandle_with_args(np, list_name, cells_name, index, out_args);
+       return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
+                                           index, out_args);
 }
 EXPORT_SYMBOL(of_parse_phandle_with_args);
 
+/**
+ * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
+ * @np:                pointer to a device tree node containing a list
+ * @list_name: property name that contains a list
+ * @cell_count: number of argument cells following the phandle
+ * @index:     index of a phandle to parse out
+ * @out_args:  optional pointer to output arguments structure (will be filled)
+ *
+ * This function is useful to parse lists of phandles and their arguments.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
+ *
+ * Example:
+ *
+ * phandle1: node1 {
+ * }
+ *
+ * phandle2: node2 {
+ * }
+ *
+ * node3 {
+ *     list = <&phandle1 0 2 &phandle2 2 3>;
+ * }
+ *
+ * To get a device_node of the `node2' node you may call this:
+ * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
+ */
+int of_parse_phandle_with_fixed_args(const struct device_node *np,
+                               const char *list_name, int cell_count,
+                               int index, struct of_phandle_args *out_args)
+{
+       if (index < 0)
+               return -EINVAL;
+       return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
+                                          index, out_args);
+}
+EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
+
 /**
  * of_count_phandle_with_args() - Find the number of phandles references in a property
  * @np:                pointer to a device tree node containing a list
@@ -1337,305 +1473,185 @@ EXPORT_SYMBOL(of_parse_phandle_with_args);
 int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
                                const char *cells_name)
 {
-       return __of_parse_phandle_with_args(np, list_name, cells_name, -1, NULL);
+       return __of_parse_phandle_with_args(np, list_name, cells_name, 0, -1,
+                                           NULL);
 }
 EXPORT_SYMBOL(of_count_phandle_with_args);
 
-#if defined(CONFIG_OF_DYNAMIC)
-static int of_property_notify(int action, struct device_node *np,
-                             struct property *prop)
-{
-       struct of_prop_reconfig pr;
-
-       pr.dn = np;
-       pr.prop = prop;
-       return of_reconfig_notify(action, &pr);
-}
-#else
-static int of_property_notify(int action, struct device_node *np,
-                             struct property *prop)
-{
-       return 0;
-}
-#endif
-
 /**
- * of_add_property - Add a property to a node
+ * __of_add_property - Add a property to a node without lock operations
  */
-int of_add_property(struct device_node *np, struct property *prop)
+int __of_add_property(struct device_node *np, struct property *prop)
 {
        struct property **next;
-       unsigned long flags;
-       int rc;
-
-       rc = of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop);
-       if (rc)
-               return rc;
 
        prop->next = NULL;
-       raw_spin_lock_irqsave(&devtree_lock, flags);
        next = &np->properties;
        while (*next) {
-               if (strcmp(prop->name, (*next)->name) == 0) {
+               if (strcmp(prop->name, (*next)->name) == 0)
                        /* duplicate ! don't insert it */
-                       raw_spin_unlock_irqrestore(&devtree_lock, flags);
-                       return -1;
-               }
+                       return -EEXIST;
+
                next = &(*next)->next;
        }
        *next = prop;
-       raw_spin_unlock_irqrestore(&devtree_lock, flags);
-
-#ifdef CONFIG_PROC_DEVICETREE
-       /* try to add to proc as well if it was initialized */
-       if (np->pde)
-               proc_device_tree_add_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
 
        return 0;
 }
 
 /**
- * of_remove_property - Remove a property from a node.
- *
- * Note that we don't actually remove it, since we have given out
- * who-knows-how-many pointers to the data using get-property.
- * Instead we just move the property to the "dead properties"
- * list, so it won't be found any more.
+ * of_add_property - Add a property to a node
  */
-int of_remove_property(struct device_node *np, struct property *prop)
+int of_add_property(struct device_node *np, struct property *prop)
 {
-       struct property **next;
        unsigned long flags;
-       int found = 0;
        int rc;
 
-       rc = of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop);
-       if (rc)
-               return rc;
+       mutex_lock(&of_mutex);
 
        raw_spin_lock_irqsave(&devtree_lock, flags);
-       next = &np->properties;
-       while (*next) {
-               if (*next == prop) {
-                       /* found the node */
-                       *next = prop->next;
-                       prop->next = np->deadprops;
-                       np->deadprops = prop;
-                       found = 1;
-                       break;
-               }
-               next = &(*next)->next;
-       }
+       rc = __of_add_property(np, prop);
        raw_spin_unlock_irqrestore(&devtree_lock, flags);
 
-       if (!found)
-               return -ENODEV;
+       if (!rc)
+               __of_add_property_sysfs(np, prop);
 
-#ifdef CONFIG_PROC_DEVICETREE
-       /* try to remove the proc node as well */
-       if (np->pde)
-               proc_device_tree_remove_prop(np->pde, prop);
-#endif /* CONFIG_PROC_DEVICETREE */
+       mutex_unlock(&of_mutex);
 
-       return 0;
+       if (!rc)
+               of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
+
+       return rc;
 }
 
-/*
- * of_update_property - Update a property in a node, if the property does
- * not exist, add it.
- *
- * Note that we don't actually remove it, since we have given out
- * who-knows-how-many pointers to the data using get-property.
- * Instead we just move the property to the "dead properties" list,
- * and add the new property to the property list
- */
-int of_update_property(struct device_node *np, struct property *newprop)
+int __of_remove_property(struct device_node *np, struct property *prop)
 {
-       struct property **next, *oldprop;
-       unsigned long flags;
-       int rc, found = 0;
-
-       rc = of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop);
-       if (rc)
-               return rc;
-
-       if (!newprop->name)
-               return -EINVAL;
-
-       oldprop = of_find_property(np, newprop->name, NULL);
-       if (!oldprop)
-               return of_add_property(np, newprop);
+       struct property **next;
 
-       raw_spin_lock_irqsave(&devtree_lock, flags);
-       next = &np->properties;
-       while (*next) {
-               if (*next == oldprop) {
-                       /* found the node */
-                       newprop->next = oldprop->next;
-                       *next = newprop;
-                       oldprop->next = np->deadprops;
-                       np->deadprops = oldprop;
-                       found = 1;
+       for (next = &np->properties; *next; next = &(*next)->next) {
+               if (*next == prop)
                        break;
-               }
-               next = &(*next)->next;
        }
-       raw_spin_unlock_irqrestore(&devtree_lock, flags);
-
-       if (!found)
+       if (*next == NULL)
                return -ENODEV;
 
-#ifdef CONFIG_PROC_DEVICETREE
-       /* try to add to proc as well if it was initialized */
-       if (np->pde)
-               proc_device_tree_update_prop(np->pde, newprop, oldprop);
-#endif /* CONFIG_PROC_DEVICETREE */
+       /* found the node */
+       *next = prop->next;
+       prop->next = np->deadprops;
+       np->deadprops = prop;
 
        return 0;
 }
 
-#if defined(CONFIG_OF_DYNAMIC)
-/*
- * Support for dynamic device trees.
- *
- * On some platforms, the device tree can be manipulated at runtime.
- * The routines in this section support adding, removing and changing
- * device tree nodes.
- */
-
-static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain);
-
-int of_reconfig_notifier_register(struct notifier_block *nb)
-{
-       return blocking_notifier_chain_register(&of_reconfig_chain, nb);
-}
-EXPORT_SYMBOL_GPL(of_reconfig_notifier_register);
-
-int of_reconfig_notifier_unregister(struct notifier_block *nb)
-{
-       return blocking_notifier_chain_unregister(&of_reconfig_chain, nb);
-}
-EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister);
-
-int of_reconfig_notify(unsigned long action, void *p)
-{
-       int rc;
-
-       rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p);
-       return notifier_to_errno(rc);
-}
-
-#ifdef CONFIG_PROC_DEVICETREE
-static void of_add_proc_dt_entry(struct device_node *dn)
-{
-       struct proc_dir_entry *ent;
-
-       ent = proc_mkdir(strrchr(dn->full_name, '/') + 1, dn->parent->pde);
-       if (ent)
-               proc_device_tree_add_node(dn, ent);
-}
-#else
-static void of_add_proc_dt_entry(struct device_node *dn)
+void __of_remove_property_sysfs(struct device_node *np, struct property *prop)
 {
-       return;
+       /* at early boot, bail here and defer setup to of_init() */
+       if (of_kset && of_node_is_attached(np))
+               sysfs_remove_bin_file(&np->kobj, &prop->attr);
 }
-#endif
 
 /**
- * of_attach_node - Plug a device node into the tree and global list.
+ * of_remove_property - Remove a property from a node.
+ *
+ * Note that we don't actually remove it, since we have given out
+ * who-knows-how-many pointers to the data using get-property.
+ * Instead we just move the property to the "dead properties"
+ * list, so it won't be found any more.
  */
-int of_attach_node(struct device_node *np)
+int of_remove_property(struct device_node *np, struct property *prop)
 {
        unsigned long flags;
        int rc;
 
-       rc = of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, np);
-       if (rc)
-               return rc;
+       mutex_lock(&of_mutex);
 
        raw_spin_lock_irqsave(&devtree_lock, flags);
-       np->sibling = np->parent->child;
-       np->allnext = of_allnodes;
-       np->parent->child = np;
-       of_allnodes = np;
+       rc = __of_remove_property(np, prop);
        raw_spin_unlock_irqrestore(&devtree_lock, flags);
 
-       of_add_proc_dt_entry(np);
-       return 0;
+       if (!rc)
+               __of_remove_property_sysfs(np, prop);
+
+       mutex_unlock(&of_mutex);
+
+       if (!rc)
+               of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
+
+       return rc;
 }
 
-#ifdef CONFIG_PROC_DEVICETREE
-static void of_remove_proc_dt_entry(struct device_node *dn)
+int __of_update_property(struct device_node *np, struct property *newprop,
+               struct property **oldpropp)
 {
-       proc_remove(dn->pde);
+       struct property **next, *oldprop;
+
+       for (next = &np->properties; *next; next = &(*next)->next) {
+               if (of_prop_cmp((*next)->name, newprop->name) == 0)
+                       break;
+       }
+       *oldpropp = oldprop = *next;
+
+       if (oldprop) {
+               /* replace the node */
+               newprop->next = oldprop->next;
+               *next = newprop;
+               oldprop->next = np->deadprops;
+               np->deadprops = oldprop;
+       } else {
+               /* new node */
+               newprop->next = NULL;
+               *next = newprop;
+       }
+
+       return 0;
 }
-#else
-static void of_remove_proc_dt_entry(struct device_node *dn)
+
+void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
+               struct property *oldprop)
 {
-       return;
+       /* At early boot, bail out and defer setup to of_init() */
+       if (!of_kset)
+               return;
+
+       if (oldprop)
+               sysfs_remove_bin_file(&np->kobj, &oldprop->attr);
+       __of_add_property_sysfs(np, newprop);
 }
-#endif
 
-/**
- * of_detach_node - "Unplug" a node from the device tree.
+/*
+ * of_update_property - Update a property in a node, if the property does
+ * not exist, add it.
  *
- * The caller must hold a reference to the node.  The memory associated with
- * the node is not freed until its refcount goes to zero.
+ * Note that we don't actually remove it, since we have given out
+ * who-knows-how-many pointers to the data using get-property.
+ * Instead we just move the property to the "dead properties" list,
+ * and add the new property to the property list
  */
-int of_detach_node(struct device_node *np)
+int of_update_property(struct device_node *np, struct property *newprop)
 {
-       struct device_node *parent;
+       struct property *oldprop;
        unsigned long flags;
-       int rc = 0;
-
-       rc = of_reconfig_notify(OF_RECONFIG_DETACH_NODE, np);
-       if (rc)
-               return rc;
+       int rc;
 
-       raw_spin_lock_irqsave(&devtree_lock, flags);
+       if (!newprop->name)
+               return -EINVAL;
 
-       if (of_node_check_flag(np, OF_DETACHED)) {
-               /* someone already detached it */
-               raw_spin_unlock_irqrestore(&devtree_lock, flags);
-               return rc;
-       }
+       mutex_lock(&of_mutex);
 
-       parent = np->parent;
-       if (!parent) {
-               raw_spin_unlock_irqrestore(&devtree_lock, flags);
-               return rc;
-       }
+       raw_spin_lock_irqsave(&devtree_lock, flags);
+       rc = __of_update_property(np, newprop, &oldprop);
+       raw_spin_unlock_irqrestore(&devtree_lock, flags);
 
-       if (of_allnodes == np)
-               of_allnodes = np->allnext;
-       else {
-               struct device_node *prev;
-               for (prev = of_allnodes;
-                    prev->allnext != np;
-                    prev = prev->allnext)
-                       ;
-               prev->allnext = np->allnext;
-       }
+       if (!rc)
+               __of_update_property_sysfs(np, newprop, oldprop);
 
-       if (parent->child == np)
-               parent->child = np->sibling;
-       else {
-               struct device_node *prevsib;
-               for (prevsib = np->parent->child;
-                    prevsib->sibling != np;
-                    prevsib = prevsib->sibling)
-                       ;
-               prevsib->sibling = np->sibling;
-       }
+       mutex_unlock(&of_mutex);
 
-       of_node_set_flag(np, OF_DETACHED);
-       raw_spin_unlock_irqrestore(&devtree_lock, flags);
+       if (!rc)
+               of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
 
-       of_remove_proc_dt_entry(np);
        return rc;
 }
-#endif /* defined(CONFIG_OF_DYNAMIC) */
 
 static void of_alias_add(struct alias_prop *ap, struct device_node *np,
                         int id, const char *stem, int stem_len)
@@ -1728,7 +1744,7 @@ int of_alias_get_id(struct device_node *np, const char *stem)
        struct alias_prop *app;
        int id = -ENODEV;
 
-       mutex_lock(&of_aliases_mutex);
+       mutex_lock(&of_mutex);
        list_for_each_entry(app, &aliases_lookup, link) {
                if (strcmp(app->stem, stem) != 0)
                        continue;
@@ -1738,7 +1754,7 @@ int of_alias_get_id(struct device_node *np, const char *stem)
                        break;
                }
        }
-       mutex_unlock(&of_aliases_mutex);
+       mutex_unlock(&of_mutex);
 
        return id;
 }
index f685e55e0717f611bf479f95d790675d6bb77bf5..4f942b56fab95d117eb7c2aff94f3e8528361032 100644 (file)
@@ -157,7 +157,7 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
        add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen);
 
        seen = 0;
-       mutex_lock(&of_aliases_mutex);
+       mutex_lock(&of_mutex);
        list_for_each_entry(app, &aliases_lookup, link) {
                if (dev->of_node == app->np) {
                        add_uevent_var(env, "OF_ALIAS_%d=%s", seen,
@@ -165,7 +165,7 @@ void of_device_uevent(struct device *dev, struct kobj_uevent_env *env)
                        seen++;
                }
        }
-       mutex_unlock(&of_aliases_mutex);
+       mutex_unlock(&of_mutex);
 }
 
 int of_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
new file mode 100644 (file)
index 0000000..16524c8
--- /dev/null
@@ -0,0 +1,791 @@
+/*
+ * Support for dynamic device trees.
+ *
+ * On some platforms, the device tree can be manipulated at runtime.
+ * The routines in this section support adding, removing and changing
+ * device tree nodes.
+ */
+
+#include <linux/of.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+
+#include "of_private.h"
+
+/**
+ * of_node_get() - Increment refcount of a node
+ * @node:      Node to inc refcount, NULL is supported to simplify writing of
+ *             callers
+ *
+ * Returns node.
+ */
+struct device_node *of_node_get(struct device_node *node)
+{
+       if (node)
+               kobject_get(&node->kobj);
+       return node;
+}
+EXPORT_SYMBOL(of_node_get);
+
+/**
+ * of_node_put() - Decrement refcount of a node
+ * @node:      Node to dec refcount, NULL is supported to simplify writing of
+ *             callers
+ */
+void of_node_put(struct device_node *node)
+{
+       if (node)
+               kobject_put(&node->kobj);
+}
+EXPORT_SYMBOL(of_node_put);
+
+void __of_detach_node_sysfs(struct device_node *np)
+{
+       struct property *pp;
+
+       BUG_ON(!of_node_is_initialized(np));
+       if (!of_kset)
+               return;
+
+       /* only remove properties if on sysfs */
+       if (of_node_is_attached(np)) {
+               for_each_property_of_node(np, pp)
+                       sysfs_remove_bin_file(&np->kobj, &pp->attr);
+               kobject_del(&np->kobj);
+       }
+
+       /* finally remove the kobj_init ref */
+       of_node_put(np);
+}
+
+static BLOCKING_NOTIFIER_HEAD(of_reconfig_chain);
+
+int of_reconfig_notifier_register(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_register(&of_reconfig_chain, nb);
+}
+EXPORT_SYMBOL_GPL(of_reconfig_notifier_register);
+
+int of_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+       return blocking_notifier_chain_unregister(&of_reconfig_chain, nb);
+}
+EXPORT_SYMBOL_GPL(of_reconfig_notifier_unregister);
+
+#ifdef DEBUG
+const char *action_names[] = {
+       [OF_RECONFIG_ATTACH_NODE] = "ATTACH_NODE",
+       [OF_RECONFIG_DETACH_NODE] = "DETACH_NODE",
+       [OF_RECONFIG_ADD_PROPERTY] = "ADD_PROPERTY",
+       [OF_RECONFIG_REMOVE_PROPERTY] = "REMOVE_PROPERTY",
+       [OF_RECONFIG_UPDATE_PROPERTY] = "UPDATE_PROPERTY",
+};
+#endif
+
+int of_reconfig_notify(unsigned long action, struct of_reconfig_data *p)
+{
+       int rc;
+#ifdef DEBUG
+       struct of_reconfig_data *pr = p;
+
+       switch (action) {
+       case OF_RECONFIG_ATTACH_NODE:
+       case OF_RECONFIG_DETACH_NODE:
+               pr_debug("of/notify %-15s %s\n", action_names[action],
+                       pr->dn->full_name);
+               break;
+       case OF_RECONFIG_ADD_PROPERTY:
+       case OF_RECONFIG_REMOVE_PROPERTY:
+       case OF_RECONFIG_UPDATE_PROPERTY:
+               pr_debug("of/notify %-15s %s:%s\n", action_names[action],
+                       pr->dn->full_name, pr->prop->name);
+               break;
+
+       }
+#endif
+       rc = blocking_notifier_call_chain(&of_reconfig_chain, action, p);
+       return notifier_to_errno(rc);
+}
+
+/*
+ * of_reconfig_get_state_change()      - Returns new state of device
+ * @action     - action of the of notifier
+ * @arg                - argument of the of notifier
+ *
+ * Returns the new state of a device based on the notifier used.
+ * Returns 0 on device going from enabled to disabled, 1 on device
+ * going from disabled to enabled and -1 on no change.
+ */
+int of_reconfig_get_state_change(unsigned long action, struct of_reconfig_data *pr)
+{
+       struct property *prop, *old_prop = NULL;
+       int is_status, status_state, old_status_state, prev_state, new_state;
+
+       /* figure out if a device should be created or destroyed */
+       switch (action) {
+       case OF_RECONFIG_ATTACH_NODE:
+       case OF_RECONFIG_DETACH_NODE:
+               prop = of_find_property(pr->dn, "status", NULL);
+               break;
+       case OF_RECONFIG_ADD_PROPERTY:
+       case OF_RECONFIG_REMOVE_PROPERTY:
+               prop = pr->prop;
+               break;
+       case OF_RECONFIG_UPDATE_PROPERTY:
+               prop = pr->prop;
+               old_prop = pr->old_prop;
+               break;
+       default:
+               return OF_RECONFIG_NO_CHANGE;
+       }
+
+       is_status = 0;
+       status_state = -1;
+       old_status_state = -1;
+       prev_state = -1;
+       new_state = -1;
+
+       if (prop && !strcmp(prop->name, "status")) {
+               is_status = 1;
+               status_state = !strcmp(prop->value, "okay") ||
+                              !strcmp(prop->value, "ok");
+               if (old_prop)
+                       old_status_state = !strcmp(old_prop->value, "okay") ||
+                                          !strcmp(old_prop->value, "ok");
+       }
+
+       switch (action) {
+       case OF_RECONFIG_ATTACH_NODE:
+               prev_state = 0;
+               /* -1 & 0 status either missing or okay */
+               new_state = status_state != 0;
+               break;
+       case OF_RECONFIG_DETACH_NODE:
+               /* -1 & 0 status either missing or okay */
+               prev_state = status_state != 0;
+               new_state = 0;
+               break;
+       case OF_RECONFIG_ADD_PROPERTY:
+               if (is_status) {
+                       /* no status property -> enabled (legacy) */
+                       prev_state = 1;
+                       new_state = status_state;
+               }
+               break;
+       case OF_RECONFIG_REMOVE_PROPERTY:
+               if (is_status) {
+                       prev_state = status_state;
+                       /* no status property -> enabled (legacy) */
+                       new_state = 1;
+               }
+               break;
+       case OF_RECONFIG_UPDATE_PROPERTY:
+               if (is_status) {
+                       prev_state = old_status_state != 0;
+                       new_state = status_state != 0;
+               }
+               break;
+       }
+
+       if (prev_state == new_state)
+               return OF_RECONFIG_NO_CHANGE;
+
+       return new_state ? OF_RECONFIG_CHANGE_ADD : OF_RECONFIG_CHANGE_REMOVE;
+}
+EXPORT_SYMBOL_GPL(of_reconfig_get_state_change);
+
+int of_property_notify(int action, struct device_node *np,
+                      struct property *prop, struct property *oldprop)
+{
+       struct of_reconfig_data pr;
+
+       /* only call notifiers if the node is attached */
+       if (!of_node_is_attached(np))
+               return 0;
+
+       pr.dn = np;
+       pr.prop = prop;
+       pr.old_prop = oldprop;
+       return of_reconfig_notify(action, &pr);
+}
+
+void __of_attach_node(struct device_node *np)
+{
+       const __be32 *phandle;
+       int sz;
+
+       np->name = __of_get_property(np, "name", NULL) ? : "<NULL>";
+       np->type = __of_get_property(np, "device_type", NULL) ? : "<NULL>";
+
+       phandle = __of_get_property(np, "phandle", &sz);
+       if (!phandle)
+               phandle = __of_get_property(np, "linux,phandle", &sz);
+       if (IS_ENABLED(PPC_PSERIES) && !phandle)
+               phandle = __of_get_property(np, "ibm,phandle", &sz);
+       np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0;
+
+       np->child = NULL;
+       np->sibling = np->parent->child;
+       np->allnext = np->parent->allnext;
+       np->parent->allnext = np;
+       np->parent->child = np;
+       of_node_clear_flag(np, OF_DETACHED);
+}
+
+/**
+ * of_attach_node() - Plug a device node into the tree and global list.
+ */
+int of_attach_node(struct device_node *np)
+{
+       struct of_reconfig_data rd;
+       unsigned long flags;
+
+       memset(&rd, 0, sizeof(rd));
+       rd.dn = np;
+
+       mutex_lock(&of_mutex);
+       raw_spin_lock_irqsave(&devtree_lock, flags);
+       __of_attach_node(np);
+       raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+       __of_attach_node_sysfs(np);
+       mutex_unlock(&of_mutex);
+
+       of_reconfig_notify(OF_RECONFIG_ATTACH_NODE, &rd);
+
+       return 0;
+}
+
+void __of_detach_node(struct device_node *np)
+{
+       struct device_node *parent;
+
+       if (WARN_ON(of_node_check_flag(np, OF_DETACHED)))
+               return;
+
+       parent = np->parent;
+       if (WARN_ON(!parent))
+               return;
+
+       if (of_allnodes == np)
+               of_allnodes = np->allnext;
+       else {
+               struct device_node *prev;
+               for (prev = of_allnodes;
+                    prev->allnext != np;
+                    prev = prev->allnext)
+                       ;
+               prev->allnext = np->allnext;
+       }
+
+       if (parent->child == np)
+               parent->child = np->sibling;
+       else {
+               struct device_node *prevsib;
+               for (prevsib = np->parent->child;
+                    prevsib->sibling != np;
+                    prevsib = prevsib->sibling)
+                       ;
+               prevsib->sibling = np->sibling;
+       }
+
+       of_node_set_flag(np, OF_DETACHED);
+}
+
+/**
+ * of_detach_node() - "Unplug" a node from the device tree.
+ *
+ * The caller must hold a reference to the node.  The memory associated with
+ * the node is not freed until its refcount goes to zero.
+ */
+int of_detach_node(struct device_node *np)
+{
+       struct of_reconfig_data rd;
+       unsigned long flags;
+       int rc = 0;
+
+       memset(&rd, 0, sizeof(rd));
+       rd.dn = np;
+
+       mutex_lock(&of_mutex);
+       raw_spin_lock_irqsave(&devtree_lock, flags);
+       __of_detach_node(np);
+       raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+       __of_detach_node_sysfs(np);
+       mutex_unlock(&of_mutex);
+
+       of_reconfig_notify(OF_RECONFIG_DETACH_NODE, &rd);
+
+       return rc;
+}
+
+/**
+ * of_node_release() - release a dynamically allocated node
+ * @kref: kref element of the node to be released
+ *
+ * In of_node_put() this function is passed to kref_put() as the destructor.
+ */
+void of_node_release(struct kobject *kobj)
+{
+       struct device_node *node = kobj_to_device_node(kobj);
+       struct property *prop = node->properties;
+
+       /* We should never be releasing nodes that haven't been detached. */
+       if (!of_node_check_flag(node, OF_DETACHED)) {
+               pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name);
+               dump_stack();
+               return;
+       }
+
+       if (!of_node_check_flag(node, OF_DYNAMIC))
+               return;
+
+       while (prop) {
+               struct property *next = prop->next;
+               kfree(prop->name);
+               kfree(prop->value);
+               kfree(prop);
+               prop = next;
+
+               if (!prop) {
+                       prop = node->deadprops;
+                       node->deadprops = NULL;
+               }
+       }
+       kfree(node->full_name);
+       kfree(node->data);
+       kfree(node);
+}
+
+/**
+ * __of_prop_dup - Copy a property dynamically.
+ * @prop:      Property to copy
+ * @allocflags:        Allocation flags (typically pass GFP_KERNEL)
+ *
+ * Copy a property by dynamically allocating the memory of both the
+ * property stucture and the property name & contents. The property's
+ * flags have the OF_DYNAMIC bit set so that we can differentiate between
+ * dynamically allocated properties and not.
+ * Returns the newly allocated property or NULL on out of memory error.
+ */
+struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags)
+{
+       struct property *new;
+
+       new = kzalloc(sizeof(*new), allocflags);
+       if (!new)
+               return NULL;
+
+       /*
+        * NOTE: There is no check for zero length value.
+        * In case of a boolean property This will allocate a value
+        * of zero bytes. We do this to work around the use
+        * of of_get_property() calls on boolean values.
+        */
+       new->name = kstrdup(prop->name, allocflags);
+       new->value = kmemdup(prop->value, prop->length, allocflags);
+       new->length = prop->length;
+       if (!new->name || !new->value)
+               goto err_free;
+
+       /* mark the property as dynamic */
+       of_property_set_flag(new, OF_DYNAMIC);
+
+       return new;
+
+ err_free:
+       kfree(new->name);
+       kfree(new->value);
+       kfree(new);
+       return NULL;
+}
+
+/**
+ * __of_node_dup() - Duplicate or create an empty device node dynamically.
+ * @fmt: Format string (plus vargs) for new full name of the device node
+ *
+ * Create an device tree node, either by duplicating an empty node or by allocating
+ * an empty one suitable for further modification.  The node data are
+ * dynamically allocated and all the node flags have the OF_DYNAMIC &
+ * OF_DETACHED bits set. Returns the newly allocated node or NULL on out of
+ * memory error.
+ */
+struct device_node *__of_node_dup(const struct device_node *np, const char *fmt, ...)
+{
+       va_list vargs;
+       struct device_node *node;
+
+       node = kzalloc(sizeof(*node), GFP_KERNEL);
+       if (!node)
+               return NULL;
+       va_start(vargs, fmt);
+       node->full_name = kvasprintf(GFP_KERNEL, fmt, vargs);
+       va_end(vargs);
+       if (!node->full_name) {
+               kfree(node);
+               return NULL;
+       }
+
+       of_node_set_flag(node, OF_DYNAMIC);
+       of_node_set_flag(node, OF_DETACHED);
+       of_node_init(node);
+
+       /* Iterate over and duplicate all properties */
+       if (np) {
+               struct property *pp, *new_pp;
+               for_each_property_of_node(np, pp) {
+                       new_pp = __of_prop_dup(pp, GFP_KERNEL);
+                       if (!new_pp)
+                               goto err_prop;
+                       if (__of_add_property(node, new_pp)) {
+                               kfree(new_pp->name);
+                               kfree(new_pp->value);
+                               kfree(new_pp);
+                               goto err_prop;
+                       }
+               }
+       }
+       return node;
+
+ err_prop:
+       of_node_put(node); /* Frees the node and properties */
+       return NULL;
+}
+
+static void __of_changeset_entry_destroy(struct of_changeset_entry *ce)
+{
+       of_node_put(ce->np);
+       list_del(&ce->node);
+       kfree(ce);
+}
+
+#ifdef DEBUG
+static void __of_changeset_entry_dump(struct of_changeset_entry *ce)
+{
+       switch (ce->action) {
+       case OF_RECONFIG_ADD_PROPERTY:
+       case OF_RECONFIG_REMOVE_PROPERTY:
+       case OF_RECONFIG_UPDATE_PROPERTY:
+               pr_debug("of/cset<%p> %-15s %s/%s\n", ce, action_names[ce->action],
+                       ce->np->full_name, ce->prop->name);
+               break;
+       case OF_RECONFIG_ATTACH_NODE:
+       case OF_RECONFIG_DETACH_NODE:
+               pr_debug("of/cset<%p> %-15s %s\n", ce, action_names[ce->action],
+                       ce->np->full_name);
+               break;
+       }
+}
+#else
+static inline void __of_changeset_entry_dump(struct of_changeset_entry *ce)
+{
+       /* empty */
+}
+#endif
+
+static void __of_changeset_entry_invert(struct of_changeset_entry *ce,
+                                         struct of_changeset_entry *rce)
+{
+       memcpy(rce, ce, sizeof(*rce));
+
+       switch (ce->action) {
+       case OF_RECONFIG_ATTACH_NODE:
+               rce->action = OF_RECONFIG_DETACH_NODE;
+               break;
+       case OF_RECONFIG_DETACH_NODE:
+               rce->action = OF_RECONFIG_ATTACH_NODE;
+               break;
+       case OF_RECONFIG_ADD_PROPERTY:
+               rce->action = OF_RECONFIG_REMOVE_PROPERTY;
+               break;
+       case OF_RECONFIG_REMOVE_PROPERTY:
+               rce->action = OF_RECONFIG_ADD_PROPERTY;
+               break;
+       case OF_RECONFIG_UPDATE_PROPERTY:
+               rce->old_prop = ce->prop;
+               rce->prop = ce->old_prop;
+               break;
+       }
+}
+
+static void __of_changeset_entry_notify(struct of_changeset_entry *ce, bool revert)
+{
+       struct of_reconfig_data rd;
+       struct of_changeset_entry ce_inverted;
+       int ret;
+
+       if (revert) {
+               __of_changeset_entry_invert(ce, &ce_inverted);
+               ce = &ce_inverted;
+       }
+
+       switch (ce->action) {
+       case OF_RECONFIG_ATTACH_NODE:
+       case OF_RECONFIG_DETACH_NODE:
+               memset(&rd, 0, sizeof(rd));
+               rd.dn = ce->np;
+               ret = of_reconfig_notify(ce->action, &rd);
+               break;
+       case OF_RECONFIG_ADD_PROPERTY:
+       case OF_RECONFIG_REMOVE_PROPERTY:
+       case OF_RECONFIG_UPDATE_PROPERTY:
+               ret = of_property_notify(ce->action, ce->np, ce->prop, ce->old_prop);
+               break;
+       default:
+               pr_err("%s: invalid devicetree changeset action: %i\n", __func__,
+                       (int)ce->action);
+               return;
+       }
+
+       if (ret)
+               pr_err("%s: notifier error @%s\n", __func__, ce->np->full_name);
+}
+
+static int __of_changeset_entry_apply(struct of_changeset_entry *ce)
+{
+       struct property *old_prop, **propp;
+       unsigned long flags;
+       int ret = 0;
+
+       __of_changeset_entry_dump(ce);
+
+       raw_spin_lock_irqsave(&devtree_lock, flags);
+       switch (ce->action) {
+       case OF_RECONFIG_ATTACH_NODE:
+               __of_attach_node(ce->np);
+               break;
+       case OF_RECONFIG_DETACH_NODE:
+               __of_detach_node(ce->np);
+               break;
+       case OF_RECONFIG_ADD_PROPERTY:
+               /* If the property is in deadprops then it must be removed */
+               for (propp = &ce->np->deadprops; *propp; propp = &(*propp)->next) {
+                       if (*propp == ce->prop) {
+                               *propp = ce->prop->next;
+                               ce->prop->next = NULL;
+                               break;
+                       }
+               }
+
+               ret = __of_add_property(ce->np, ce->prop);
+               if (ret) {
+                       pr_err("%s: add_property failed @%s/%s\n",
+                               __func__, ce->np->full_name,
+                               ce->prop->name);
+                       break;
+               }
+               break;
+       case OF_RECONFIG_REMOVE_PROPERTY:
+               ret = __of_remove_property(ce->np, ce->prop);
+               if (ret) {
+                       pr_err("%s: remove_property failed @%s/%s\n",
+                               __func__, ce->np->full_name,
+                               ce->prop->name);
+                       break;
+               }
+               break;
+
+       case OF_RECONFIG_UPDATE_PROPERTY:
+               /* If the property is in deadprops then it must be removed */
+               for (propp = &ce->np->deadprops; *propp; propp = &(*propp)->next) {
+                       if (*propp == ce->prop) {
+                               *propp = ce->prop->next;
+                               ce->prop->next = NULL;
+                               break;
+                       }
+               }
+
+               ret = __of_update_property(ce->np, ce->prop, &old_prop);
+               if (ret) {
+                       pr_err("%s: update_property failed @%s/%s\n",
+                               __func__, ce->np->full_name,
+                               ce->prop->name);
+                       break;
+               }
+               break;
+       default:
+               ret = -EINVAL;
+       }
+       raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+       if (ret)
+               return ret;
+
+       switch (ce->action) {
+       case OF_RECONFIG_ATTACH_NODE:
+               __of_attach_node_sysfs(ce->np);
+               break;
+       case OF_RECONFIG_DETACH_NODE:
+               __of_detach_node_sysfs(ce->np);
+               break;
+       case OF_RECONFIG_ADD_PROPERTY:
+               /* ignore duplicate names */
+               __of_add_property_sysfs(ce->np, ce->prop);
+               break;
+       case OF_RECONFIG_REMOVE_PROPERTY:
+               __of_remove_property_sysfs(ce->np, ce->prop);
+               break;
+       case OF_RECONFIG_UPDATE_PROPERTY:
+               __of_update_property_sysfs(ce->np, ce->prop, ce->old_prop);
+               break;
+       }
+
+       return 0;
+}
+
+static inline int __of_changeset_entry_revert(struct of_changeset_entry *ce)
+{
+       struct of_changeset_entry ce_inverted;
+
+       __of_changeset_entry_invert(ce, &ce_inverted);
+       return __of_changeset_entry_apply(&ce_inverted);
+}
+
+/**
+ * of_changeset_init - Initialize a changeset for use
+ *
+ * @ocs:       changeset pointer
+ *
+ * Initialize a changeset structure
+ */
+void of_changeset_init(struct of_changeset *ocs)
+{
+       memset(ocs, 0, sizeof(*ocs));
+       INIT_LIST_HEAD(&ocs->entries);
+}
+
+/**
+ * of_changeset_destroy - Destroy a changeset
+ *
+ * @ocs:       changeset pointer
+ *
+ * Destroys a changeset. Note that if a changeset is applied,
+ * its changes to the tree cannot be reverted.
+ */
+void of_changeset_destroy(struct of_changeset *ocs)
+{
+       struct of_changeset_entry *ce, *cen;
+
+       list_for_each_entry_safe_reverse(ce, cen, &ocs->entries, node)
+               __of_changeset_entry_destroy(ce);
+}
+
+/**
+ * of_changeset_apply - Applies a changeset
+ *
+ * @ocs:       changeset pointer
+ *
+ * Applies a changeset to the live tree.
+ * Any side-effects of live tree state changes are applied here on
+ * sucess, like creation/destruction of devices and side-effects
+ * like creation of sysfs properties and directories.
+ * Returns 0 on success, a negative error value in case of an error.
+ * On error the partially applied effects are reverted.
+ */
+int of_changeset_apply(struct of_changeset *ocs)
+{
+       struct of_changeset_entry *ce;
+       int ret;
+
+       /* perform the rest of the work */
+       pr_debug("of_changeset: applying...\n");
+       list_for_each_entry(ce, &ocs->entries, node) {
+               ret = __of_changeset_entry_apply(ce);
+               if (ret) {
+                       pr_err("%s: Error applying changeset (%d)\n", __func__, ret);
+                       list_for_each_entry_continue_reverse(ce, &ocs->entries, node)
+                               __of_changeset_entry_revert(ce);
+                       return ret;
+               }
+       }
+       pr_debug("of_changeset: applied, emitting notifiers.\n");
+
+       /* drop the global lock while emitting notifiers */
+       mutex_unlock(&of_mutex);
+       list_for_each_entry(ce, &ocs->entries, node)
+               __of_changeset_entry_notify(ce, 0);
+       mutex_lock(&of_mutex);
+       pr_debug("of_changeset: notifiers sent.\n");
+
+       return 0;
+}
+
+/**
+ * of_changeset_revert - Reverts an applied changeset
+ *
+ * @ocs:       changeset pointer
+ *
+ * Reverts a changeset returning the state of the tree to what it
+ * was before the application.
+ * Any side-effects like creation/destruction of devices and
+ * removal of sysfs properties and directories are applied.
+ * Returns 0 on success, a negative error value in case of an error.
+ */
+int of_changeset_revert(struct of_changeset *ocs)
+{
+       struct of_changeset_entry *ce;
+       int ret;
+
+       pr_debug("of_changeset: reverting...\n");
+       list_for_each_entry_reverse(ce, &ocs->entries, node) {
+               ret = __of_changeset_entry_revert(ce);
+               if (ret) {
+                       pr_err("%s: Error reverting changeset (%d)\n", __func__, ret);
+                       list_for_each_entry_continue(ce, &ocs->entries, node)
+                               __of_changeset_entry_apply(ce);
+                       return ret;
+               }
+       }
+       pr_debug("of_changeset: reverted, emitting notifiers.\n");
+
+       /* drop the global lock while emitting notifiers */
+       mutex_unlock(&of_mutex);
+       list_for_each_entry_reverse(ce, &ocs->entries, node)
+               __of_changeset_entry_notify(ce, 1);
+       mutex_lock(&of_mutex);
+       pr_debug("of_changeset: notifiers sent.\n");
+
+       return 0;
+}
+
+/**
+ * of_changeset_action - Perform a changeset action
+ *
+ * @ocs:       changeset pointer
+ * @action:    action to perform
+ * @np:                Pointer to device node
+ * @prop:      Pointer to property
+ *
+ * On action being one of:
+ * + OF_RECONFIG_ATTACH_NODE
+ * + OF_RECONFIG_DETACH_NODE,
+ * + OF_RECONFIG_ADD_PROPERTY
+ * + OF_RECONFIG_REMOVE_PROPERTY,
+ * + OF_RECONFIG_UPDATE_PROPERTY
+ * Returns 0 on success, a negative error value in case of an error.
+ */
+int of_changeset_action(struct of_changeset *ocs, unsigned long action,
+               struct device_node *np, struct property *prop)
+{
+       struct of_changeset_entry *ce;
+
+       ce = kzalloc(sizeof(*ce), GFP_KERNEL);
+       if (!ce) {
+               pr_err("%s: Failed to allocate\n", __func__);
+               return -ENOMEM;
+       }
+       /* get a reference to the node */
+       ce->action = action;
+       ce->np = of_node_get(np);
+       ce->prop = prop;
+
+       if (action == OF_RECONFIG_UPDATE_PROPERTY && prop)
+               ce->old_prop = of_find_property(np, prop->name, NULL);
+
+       /* add it to the list */
+       list_add_tail(&ce->node, &ocs->entries);
+       return 0;
+}
index 26d7060c17bfb49cd9069871609ad092df3d2e7b..34f41126873ea56cd3192ca746c156baef8ce9ad 100644 (file)
@@ -157,7 +157,7 @@ static void * unflatten_dt_node(struct boot_param_header *blob,
                                __alignof__(struct device_node));
        if (allnextpp) {
                char *fn;
-               memset(np, 0, sizeof(*np));
+               of_node_init(np);
                np->full_name = fn = ((char *)np) + sizeof(*np);
                if (new_format) {
                        /* rebuild full path for new format */
@@ -188,7 +188,6 @@ static void * unflatten_dt_node(struct boot_param_header *blob,
                                dad->next->sibling = np;
                        dad->next = np;
                }
-               kref_init(&np->kref);
        }
        /* process properties */
        for (offset = fdt_first_property_offset(blob, *poffset);
index a3c1c5aae6a9eaa5de0ecd0dd0335771ac8f7fab..5ecb3d83b2126a357c6c84c6c62fc89411f95e5e 100644 (file)
@@ -102,7 +102,7 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
        int imaplen, match, i;
 
        pr_debug("of_irq_map_raw: par=%s,intspec=[0x%08x 0x%08x...],ointsize=%d\n",
-                parent->full_name, be32_to_cpup(intspec),
+                of_node_full_name(parent), be32_to_cpup(intspec),
                 be32_to_cpup(intspec + 1), ointsize);
 
        ipar = of_node_get(parent);
@@ -126,7 +126,7 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
                goto fail;
        }
 
-       pr_debug("of_irq_map_raw: ipar=%s, size=%d\n", ipar->full_name, intsize);
+       pr_debug("of_irq_map_raw: ipar=%s, size=%d\n", of_node_full_name(ipar), intsize);
 
        if (ointsize != intsize)
                return -EINVAL;
@@ -287,7 +287,7 @@ int of_irq_map_one(struct device_node *device, int index, struct of_irq *out_irq
        u32 intsize, intlen;
        int res = -EINVAL;
 
-       pr_debug("of_irq_map_one: dev=%s, index=%d\n", device->full_name, index);
+       pr_debug("of_irq_map_one: dev=%s, index=%d\n", of_node_full_name(device), index);
 
        /* OldWorld mac stuff is "special", handle out of line */
        if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
@@ -345,6 +345,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
        if (r && irq) {
                const char *name = NULL;
 
+               memset(r, 0, sizeof(*r));
                /*
                 * Get optional "interrupts-names" property to add a name
                 * to the resource.
@@ -353,8 +354,8 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
                                              &name);
 
                r->start = r->end = irq;
-               r->flags = IORESOURCE_IRQ;
-               r->name = name ? name : dev->full_name;
+               r->flags = IORESOURCE_IRQ | irqd_get_trigger_type(irq_get_irq_data(irq));
+               r->name = name ? name : of_node_full_name(dev);
        }
 
        return irq;
@@ -482,8 +483,9 @@ void __init of_irq_init(const struct of_device_id *matches)
                }
 
                /* Get the next pending parent that might have children */
-               desc = list_first_entry(&intc_parent_list, typeof(*desc), list);
-               if (list_empty(&intc_parent_list) || !desc) {
+               desc = list_first_entry_or_null(&intc_parent_list,
+                                               typeof(*desc), list);
+               if (!desc) {
                        pr_err("of_irq_init: children remain, but no parents\n");
                        break;
                }
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
deleted file mode 100644 (file)
index b667264..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * OF helpers for the I2C API
- *
- * Copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
- *
- * Based on a previous patch from Jon Smirl <jonsmirl@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/i2c.h>
-#include <linux/irq.h>
-#include <linux/of.h>
-#include <linux/of_i2c.h>
-#include <linux/of_irq.h>
-#include <linux/module.h>
-
-void of_i2c_register_devices(struct i2c_adapter *adap)
-{
-       void *result;
-       struct device_node *node;
-
-       /* Only register child devices if the adapter has a node pointer set */
-       if (!adap->dev.of_node)
-               return;
-
-       dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
-
-       for_each_available_child_of_node(adap->dev.of_node, node) {
-               struct i2c_board_info info = {};
-               struct dev_archdata dev_ad = {};
-               const __be32 *addr;
-               int len;
-
-               dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name);
-
-               if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
-                       dev_err(&adap->dev, "of_i2c: modalias failure on %s\n",
-                               node->full_name);
-                       continue;
-               }
-
-               addr = of_get_property(node, "reg", &len);
-               if (!addr || (len < sizeof(int))) {
-                       dev_err(&adap->dev, "of_i2c: invalid reg on %s\n",
-                               node->full_name);
-                       continue;
-               }
-
-               info.addr = be32_to_cpup(addr);
-               if (info.addr > (1 << 10) - 1) {
-                       dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
-                               info.addr, node->full_name);
-                       continue;
-               }
-
-               info.irq = irq_of_parse_and_map(node, 0);
-               info.of_node = of_node_get(node);
-               info.archdata = &dev_ad;
-
-               if (of_get_property(node, "wakeup-source", NULL))
-                       info.flags |= I2C_CLIENT_WAKE;
-
-               request_module("%s%s", I2C_MODULE_PREFIX, info.type);
-
-               result = i2c_new_device(adap, &info);
-               if (result == NULL) {
-                       dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
-                               node->full_name);
-                       of_node_put(node);
-                       irq_dispose_mapping(info.irq);
-                       continue;
-               }
-       }
-}
-EXPORT_SYMBOL(of_i2c_register_devices);
-
-static int of_dev_node_match(struct device *dev, void *data)
-{
-        return dev->of_node == data;
-}
-
-/* must call put_device() when done with returned i2c_client device */
-struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
-{
-       struct device *dev;
-
-       dev = bus_find_device(&i2c_bus_type, NULL, node,
-                                        of_dev_node_match);
-       if (!dev)
-               return NULL;
-
-       return i2c_verify_client(dev);
-}
-EXPORT_SYMBOL(of_find_i2c_device_by_node);
-
-/* must call put_device() when done with returned i2c_adapter device */
-struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
-{
-       struct device *dev;
-
-       dev = bus_find_device(&i2c_bus_type, NULL, node,
-                                        of_dev_node_match);
-       if (!dev)
-               return NULL;
-
-       return i2c_verify_adapter(dev);
-}
-EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
-
-MODULE_LICENSE("GPL");
index ff350c8fa7acc4398c088ddc5d346906df3a5efd..8e882e706cd8c6b4edc61f577d7808e0e6e74df7 100644 (file)
@@ -31,6 +31,63 @@ struct alias_prop {
        char stem[0];
 };
 
-extern struct mutex of_aliases_mutex;
+extern struct mutex of_mutex;
 extern struct list_head aliases_lookup;
+extern struct kset *of_kset;
+
+
+static inline struct device_node *kobj_to_device_node(struct kobject *kobj)
+{
+       return container_of(kobj, struct device_node, kobj);
+}
+
+#if defined(CONFIG_OF_DYNAMIC)
+extern int of_property_notify(int action, struct device_node *np,
+                             struct property *prop, struct property *old_prop);
+extern void of_node_release(struct kobject *kobj);
+#else /* CONFIG_OF_DYNAMIC */
+static inline int of_property_notify(int action, struct device_node *np,
+                                    struct property *prop, struct property *old_prop)
+{
+       return 0;
+}
+#endif /* CONFIG_OF_DYNAMIC */
+
+/**
+ * General utilities for working with live trees.
+ *
+ * All functions with two leading underscores operate
+ * without taking node references, so you either have to
+ * own the devtree lock or work on detached trees only.
+ */
+struct property *__of_prop_dup(const struct property *prop, gfp_t allocflags);
+__printf(2, 3) struct device_node *__of_node_dup(const struct device_node *np, const char *fmt, ...);
+
+extern const void *__of_get_property(const struct device_node *np,
+                                    const char *name, int *lenp);
+extern int __of_add_property(struct device_node *np, struct property *prop);
+extern int __of_add_property_sysfs(struct device_node *np,
+               struct property *prop);
+extern int __of_remove_property(struct device_node *np, struct property *prop);
+extern void __of_remove_property_sysfs(struct device_node *np,
+               struct property *prop);
+extern int __of_update_property(struct device_node *np,
+               struct property *newprop, struct property **oldprop);
+extern void __of_update_property_sysfs(struct device_node *np,
+               struct property *newprop, struct property *oldprop);
+
+extern void __of_attach_node(struct device_node *np);
+extern int __of_attach_node_sysfs(struct device_node *np);
+extern void __of_detach_node(struct device_node *np);
+extern void __of_detach_node_sysfs(struct device_node *np);
+
+/* iterators for transactions, used for overlays */
+/* forward iterator */
+#define for_each_transaction_entry(_oft, _te) \
+       list_for_each_entry(_te, &(_oft)->te_list, node)
+
+/* reverse iterator */
+#define for_each_transaction_entry_reverse(_oft, _te) \
+       list_for_each_entry_reverse(_te, &(_oft)->te_list, node)
+
 #endif /* _LINUX_OF_PRIVATE_H */
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
new file mode 100644 (file)
index 0000000..4066648
--- /dev/null
@@ -0,0 +1,552 @@
+/*
+ * Functions for working with device tree overlays
+ *
+ * Copyright (C) 2012 Pantelis Antoniou <panto@antoniou-consulting.com>
+ * Copyright (C) 2012 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+#undef DEBUG
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/idr.h>
+
+#include "of_private.h"
+
+/**
+ * struct of_overlay_info - Holds a single overlay info
+ * @target:    target of the overlay operation
+ * @overlay:   pointer to the overlay contents node
+ *
+ * Holds a single overlay state, including all the overlay logs &
+ * records.
+ */
+struct of_overlay_info {
+       struct device_node *target;
+       struct device_node *overlay;
+};
+
+/**
+ * struct of_overlay - Holds a complete overlay transaction
+ * @node:      List on which we are located
+ * @count:     Count of ovinfo structures
+ * @ovinfo_tab:        Overlay info table (count sized)
+ * @cset:      Changeset to be used
+ *
+ * Holds a complete overlay transaction
+ */
+struct of_overlay {
+       int id;
+       struct list_head node;
+       int count;
+       struct of_overlay_info *ovinfo_tab;
+       struct of_changeset cset;
+};
+
+static int of_overlay_apply_one(struct of_overlay *ov,
+               struct device_node *target, const struct device_node *overlay);
+
+static int of_overlay_apply_single_property(struct of_overlay *ov,
+               struct device_node *target, struct property *prop)
+{
+       struct property *propn, *tprop;
+
+       /* NOTE: Multiple changes of single properties not supported */
+       tprop = of_find_property(target, prop->name, NULL);
+
+       /* special properties are not meant to be updated (silent NOP) */
+       if (of_prop_cmp(prop->name, "name") == 0 ||
+           of_prop_cmp(prop->name, "phandle") == 0 ||
+           of_prop_cmp(prop->name, "linux,phandle") == 0)
+               return 0;
+
+       propn = __of_prop_dup(prop, GFP_KERNEL);
+       if (propn == NULL)
+               return -ENOMEM;
+
+       /* not found? add */
+       if (tprop == NULL)
+               return of_changeset_add_property(&ov->cset, target, propn);
+
+       /* found? update */
+       return of_changeset_update_property(&ov->cset, target, propn);
+}
+
+static int of_overlay_apply_single_device_node(struct of_overlay *ov,
+               struct device_node *target, struct device_node *child)
+{
+       const char *cname;
+       struct device_node *tchild, *grandchild;
+       int ret = 0;
+
+       cname = kbasename(child->full_name);
+       if (cname == NULL)
+               return -ENOMEM;
+
+       /* NOTE: Multiple mods of created nodes not supported */
+       tchild = of_get_child_by_name(target, cname);
+       if (tchild != NULL) {
+               /* apply overlay recursively */
+               ret = of_overlay_apply_one(ov, tchild, child);
+               of_node_put(tchild);
+       } else {
+               /* create empty tree as a target */
+               tchild = __of_node_dup(child, "%s/%s", target->full_name, cname);
+               if (!tchild)
+                       return -ENOMEM;
+
+               /* point to parent */
+               tchild->parent = target;
+
+               ret = of_changeset_attach_node(&ov->cset, tchild);
+               if (ret)
+                       return ret;
+
+               ret = of_overlay_apply_one(ov, tchild, child);
+               if (ret)
+                       return ret;
+       }
+
+       return ret;
+}
+
+/*
+ * Apply a single overlay node recursively.
+ *
+ * Note that the in case of an error the target node is left
+ * in a inconsistent state. Error recovery should be performed
+ * by using the changeset.
+ */
+static int of_overlay_apply_one(struct of_overlay *ov,
+               struct device_node *target, const struct device_node *overlay)
+{
+       struct device_node *child;
+       struct property *prop;
+       int ret;
+
+       for_each_property_of_node(overlay, prop) {
+               ret = of_overlay_apply_single_property(ov, target, prop);
+               if (ret) {
+                       pr_err("%s: Failed to apply prop @%s/%s\n",
+                               __func__, target->full_name, prop->name);
+                       return ret;
+               }
+       }
+
+       for_each_child_of_node(overlay, child) {
+               ret = of_overlay_apply_single_device_node(ov, target, child);
+               if (ret != 0) {
+                       pr_err("%s: Failed to apply single node @%s/%s\n",
+                                       __func__, target->full_name,
+                                       child->name);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+/**
+ * of_overlay_apply() - Apply @count overlays pointed at by @ovinfo_tab
+ * @ov:                Overlay to apply
+ *
+ * Applies the overlays given, while handling all error conditions
+ * appropriately. Either the operation succeeds, or if it fails the
+ * live tree is reverted to the state before the attempt.
+ * Returns 0, or an error if the overlay attempt failed.
+ */
+static int of_overlay_apply(struct of_overlay *ov)
+{
+       int i, err;
+
+       /* first we apply the overlays atomically */
+       for (i = 0; i < ov->count; i++) {
+               struct of_overlay_info *ovinfo = &ov->ovinfo_tab[i];
+
+               err = of_overlay_apply_one(ov, ovinfo->target, ovinfo->overlay);
+               if (err != 0) {
+                       pr_err("%s: overlay failed '%s'\n",
+                               __func__, ovinfo->target->full_name);
+                       return err;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * Find the target node using a number of different strategies
+ * in order of preference
+ *
+ * "target" property containing the phandle of the target
+ * "target-path" property containing the path of the target
+ */
+static struct device_node *find_target_node(struct device_node *info_node)
+{
+       const char *path;
+       u32 val;
+       int ret;
+
+       /* first try to go by using the target as a phandle */
+       ret = of_property_read_u32(info_node, "target", &val);
+       if (ret == 0)
+               return of_find_node_by_phandle(val);
+
+       /* now try to locate by path */
+       ret = of_property_read_string(info_node, "target-path", &path);
+       if (ret == 0)
+               return of_find_node_by_path(path);
+
+       pr_err("%s: Failed to find target for node %p (%s)\n", __func__,
+               info_node, info_node->name);
+
+       return NULL;
+}
+
+/**
+ * of_fill_overlay_info() - Fill an overlay info structure
+ * @ov         Overlay to fill
+ * @info_node: Device node containing the overlay
+ * @ovinfo:    Pointer to the overlay info structure to fill
+ *
+ * Fills an overlay info structure with the overlay information
+ * from a device node. This device node must have a target property
+ * which contains a phandle of the overlay target node, and an
+ * __overlay__ child node which has the overlay contents.
+ * Both ovinfo->target & ovinfo->overlay have their references taken.
+ *
+ * Returns 0 on success, or a negative error value.
+ */
+static int of_fill_overlay_info(struct of_overlay *ov,
+               struct device_node *info_node, struct of_overlay_info *ovinfo)
+{
+       ovinfo->overlay = of_get_child_by_name(info_node, "__overlay__");
+       if (ovinfo->overlay == NULL)
+               goto err_fail;
+
+       ovinfo->target = find_target_node(info_node);
+       if (ovinfo->target == NULL)
+               goto err_fail;
+
+       return 0;
+
+err_fail:
+       of_node_put(ovinfo->target);
+       of_node_put(ovinfo->overlay);
+
+       memset(ovinfo, 0, sizeof(*ovinfo));
+       return -EINVAL;
+}
+
+/**
+ * of_build_overlay_info() - Build an overlay info array
+ * @ov         Overlay to build
+ * @tree:      Device node containing all the overlays
+ *
+ * Helper function that given a tree containing overlay information,
+ * allocates and builds an overlay info array containing it, ready
+ * for use using of_overlay_apply.
+ *
+ * Returns 0 on success with the @cntp @ovinfop pointers valid,
+ * while on error a negative error value is returned.
+ */
+static int of_build_overlay_info(struct of_overlay *ov,
+               struct device_node *tree)
+{
+       struct device_node *node;
+       struct of_overlay_info *ovinfo;
+       int cnt, err;
+
+       /* worst case; every child is a node */
+       cnt = 0;
+       for_each_child_of_node(tree, node)
+               cnt++;
+
+       ovinfo = kcalloc(cnt, sizeof(*ovinfo), GFP_KERNEL);
+       if (ovinfo == NULL)
+               return -ENOMEM;
+
+       cnt = 0;
+       for_each_child_of_node(tree, node) {
+               memset(&ovinfo[cnt], 0, sizeof(*ovinfo));
+               err = of_fill_overlay_info(ov, node, &ovinfo[cnt]);
+               if (err == 0)
+                       cnt++;
+       }
+
+       /* if nothing filled, return error */
+       if (cnt == 0) {
+               kfree(ovinfo);
+               return -ENODEV;
+       }
+
+       ov->count = cnt;
+       ov->ovinfo_tab = ovinfo;
+
+       return 0;
+}
+
+/**
+ * of_free_overlay_info() - Free an overlay info array
+ * @ov         Overlay to free the overlay info from
+ * @ovinfo_tab:        Array of overlay_info's to free
+ *
+ * Releases the memory of a previously allocated ovinfo array
+ * by of_build_overlay_info.
+ * Returns 0, or an error if the arguments are bogus.
+ */
+static int of_free_overlay_info(struct of_overlay *ov)
+{
+       struct of_overlay_info *ovinfo;
+       int i;
+
+       /* do it in reverse */
+       for (i = ov->count - 1; i >= 0; i--) {
+               ovinfo = &ov->ovinfo_tab[i];
+
+               of_node_put(ovinfo->target);
+               of_node_put(ovinfo->overlay);
+       }
+       kfree(ov->ovinfo_tab);
+
+       return 0;
+}
+
+static LIST_HEAD(ov_list);
+static DEFINE_IDR(ov_idr);
+
+/**
+ * of_overlay_create() - Create and apply an overlay
+ * @tree:      Device node containing all the overlays
+ *
+ * Creates and applies an overlay while also keeping track
+ * of the overlay in a list. This list can be used to prevent
+ * illegal overlay removals.
+ *
+ * Returns the id of the created overlay, or an negative error number
+ */
+int of_overlay_create(struct device_node *tree)
+{
+       struct of_overlay *ov;
+       int err, id;
+
+       /* allocate the overlay structure */
+       ov = kzalloc(sizeof(*ov), GFP_KERNEL);
+       if (ov == NULL)
+               return -ENOMEM;
+       ov->id = -1;
+
+       INIT_LIST_HEAD(&ov->node);
+
+       of_changeset_init(&ov->cset);
+
+       mutex_lock(&of_mutex);
+
+       id = idr_alloc(&ov_idr, ov, 0, 0, GFP_KERNEL);
+       if (id < 0) {
+               pr_err("%s: idr_alloc() failed for tree@%s\n",
+                               __func__, tree->full_name);
+               err = id;
+               goto err_destroy_trans;
+       }
+       ov->id = id;
+
+       /* build the overlay info structures */
+       err = of_build_overlay_info(ov, tree);
+       if (err) {
+               pr_err("%s: of_build_overlay_info() failed for tree@%s\n",
+                               __func__, tree->full_name);
+               goto err_free_idr;
+       }
+
+       /* apply the overlay */
+       err = of_overlay_apply(ov);
+       if (err) {
+               pr_err("%s: of_overlay_apply() failed for tree@%s\n",
+                               __func__, tree->full_name);
+               goto err_abort_trans;
+       }
+
+       /* apply the changeset */
+       err = of_changeset_apply(&ov->cset);
+       if (err) {
+               pr_err("%s: of_changeset_apply() failed for tree@%s\n",
+                               __func__, tree->full_name);
+               goto err_revert_overlay;
+       }
+
+       /* add to the tail of the overlay list */
+       list_add_tail(&ov->node, &ov_list);
+
+       mutex_unlock(&of_mutex);
+
+       return id;
+
+err_revert_overlay:
+err_abort_trans:
+       of_free_overlay_info(ov);
+err_free_idr:
+       idr_remove(&ov_idr, ov->id);
+err_destroy_trans:
+       of_changeset_destroy(&ov->cset);
+       kfree(ov);
+       mutex_unlock(&of_mutex);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(of_overlay_create);
+
+/* check whether the given node, lies under the given tree */
+static int overlay_subtree_check(struct device_node *tree,
+               struct device_node *dn)
+{
+       struct device_node *child;
+
+       /* match? */
+       if (tree == dn)
+               return 1;
+
+       for_each_child_of_node(tree, child) {
+               if (overlay_subtree_check(child, dn))
+                       return 1;
+       }
+
+       return 0;
+}
+
+/* check whether this overlay is the topmost */
+static int overlay_is_topmost(struct of_overlay *ov, struct device_node *dn)
+{
+       struct of_overlay *ovt;
+       struct of_changeset_entry *ce;
+
+       list_for_each_entry_reverse(ovt, &ov_list, node) {
+               /* if we hit ourselves, we're done */
+               if (ovt == ov)
+                       break;
+
+               /* check against each subtree affected by this overlay */
+               list_for_each_entry(ce, &ovt->cset.entries, node) {
+                       if (overlay_subtree_check(ce->np, dn)) {
+                               pr_err("%s: #%d clashes #%d @%s\n",
+                                       __func__, ov->id, ovt->id,
+                                       dn->full_name);
+                               return 0;
+                       }
+               }
+       }
+
+       /* overlay is topmost */
+       return 1;
+}
+
+/*
+ * We can safely remove the overlay only if it's the top-most one.
+ * Newly applied overlays are inserted at the tail of the overlay list,
+ * so a top most overlay is the one that is closest to the tail.
+ *
+ * The topmost check is done by exploiting this property. For each
+ * affected device node in the log list we check if this overlay is
+ * the one closest to the tail. If another overlay has affected this
+ * device node and is closest to the tail, then removal is not permited.
+ */
+static int overlay_removal_is_ok(struct of_overlay *ov)
+{
+       struct of_changeset_entry *ce;
+
+       list_for_each_entry(ce, &ov->cset.entries, node) {
+               if (!overlay_is_topmost(ov, ce->np)) {
+                       pr_err("%s: overlay #%d is not topmost\n",
+                                       __func__, ov->id);
+                       return 0;
+               }
+       }
+
+       return 1;
+}
+
+/**
+ * of_overlay_destroy() - Removes an overlay
+ * @id:        Overlay id number returned by a previous call to of_overlay_create
+ *
+ * Removes an overlay if it is permissible.
+ *
+ * Returns 0 on success, or an negative error number
+ */
+int of_overlay_destroy(int id)
+{
+       struct of_overlay *ov;
+       int err;
+
+       mutex_lock(&of_mutex);
+
+       ov = idr_find(&ov_idr, id);
+       if (ov == NULL) {
+               err = -ENODEV;
+               pr_err("%s: Could not find overlay #%d\n",
+                               __func__, id);
+               goto out;
+       }
+
+       /* check whether the overlay is safe to remove */
+       if (!overlay_removal_is_ok(ov)) {
+               err = -EBUSY;
+               pr_err("%s: removal check failed for overlay #%d\n",
+                               __func__, id);
+               goto out;
+       }
+
+
+       list_del(&ov->node);
+       of_changeset_revert(&ov->cset);
+       of_free_overlay_info(ov);
+       idr_remove(&ov_idr, id);
+       of_changeset_destroy(&ov->cset);
+       kfree(ov);
+
+       err = 0;
+
+out:
+       mutex_unlock(&of_mutex);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(of_overlay_destroy);
+
+/**
+ * of_overlay_destroy_all() - Removes all overlays from the system
+ *
+ * Removes all overlays from the system in the correct order.
+ *
+ * Returns 0 on success, or an negative error number
+ */
+int of_overlay_destroy_all(void)
+{
+       struct of_overlay *ov, *ovn;
+
+       mutex_lock(&of_mutex);
+
+       /* the tail of list is guaranteed to be safe to remove */
+       list_for_each_entry_safe_reverse(ov, ovn, &ov_list, node) {
+               list_del(&ov->node);
+               of_changeset_revert(&ov->cset);
+               of_free_overlay_info(ov);
+               idr_remove(&ov_idr, ov->id);
+               kfree(ov);
+       }
+
+       mutex_unlock(&of_mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(of_overlay_destroy_all);
index 37b56fd716e6683e97cfcc4c381e7246d86d77ee..37961b853b4136e1862728a652b4b9dbbd2d53ec 100644 (file)
@@ -177,11 +177,10 @@ static struct device_node * __init of_pdt_create_node(phandle node,
                return NULL;
 
        dp = prom_early_alloc(sizeof(*dp));
+       of_node_init(dp);
        of_pdt_incr_unique_id(dp);
        dp->parent = parent;
 
-       kref_init(&dp->kref);
-
        dp->name = of_pdt_get_one_property(node, "name");
        dp->type = of_pdt_get_one_property(node, "device_type");
        dp->phandle = node;
index e0a6514ab46c20eb902453c3b321774d1e787ba0..41bff21a08293757d0254fa54f7951b0e89308dc 100644 (file)
@@ -204,12 +204,13 @@ struct platform_device *of_platform_device_create_pdata(
 {
        struct platform_device *dev;
 
-       if (!of_device_is_available(np))
+       if (!of_device_is_available(np) ||
+           of_node_test_and_set_flag(np, OF_POPULATED))
                return NULL;
 
        dev = of_device_alloc(np, bus_id, parent);
        if (!dev)
-               return NULL;
+               goto err_clear_flag;
 
 #if defined(CONFIG_MICROBLAZE)
        dev->archdata.dma_mask = 0xffffffffUL;
@@ -225,10 +226,14 @@ struct platform_device *of_platform_device_create_pdata(
 
        if (of_device_add(dev) != 0) {
                platform_device_put(dev);
-               return NULL;
+               goto err_clear_flag;
        }
 
        return dev;
+
+err_clear_flag:
+       of_node_clear_flag(np, OF_POPULATED);
+       return NULL;
 }
 
 /**
@@ -260,12 +265,16 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
 
        pr_debug("Creating amba device %s\n", node->full_name);
 
-       if (!of_device_is_available(node))
+       if (!of_device_is_available(node) ||
+           of_node_test_and_set_flag(node, OF_POPULATED))
                return NULL;
 
        dev = amba_device_alloc(NULL, 0, 0);
-       if (!dev)
-               return NULL;
+       if (!dev) {
+               pr_err("%s(): amba_device_alloc() failed for %s\n",
+                      __func__, node->full_name);
+               goto err_clear_flag;
+       }
 
        /* setup generic device info */
        dev->dev.coherent_dma_mask = ~0;
@@ -301,6 +310,8 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
 
 err_free:
        amba_device_put(dev);
+err_clear_flag:
+       of_node_clear_flag(node, OF_POPULATED);
        return NULL;
 }
 #else /* CONFIG_ARM_AMBA */
@@ -390,6 +401,7 @@ static int of_platform_bus_create(struct device_node *bus,
                        break;
                }
        }
+       of_node_set_flag(bus, OF_POPULATED_BUS);
        return rc;
 }
 
@@ -473,4 +485,109 @@ int of_platform_populate(struct device_node *root,
        return rc;
 }
 EXPORT_SYMBOL_GPL(of_platform_populate);
+
+static int of_platform_device_destroy(struct device *dev, void *data)
+{
+       /* Do not touch devices not populated from the device tree */
+       if (!dev->of_node || !of_node_check_flag(dev->of_node, OF_POPULATED))
+               return 0;
+
+       /* Recurse for any nodes that were treated as busses */
+       if (of_node_check_flag(dev->of_node, OF_POPULATED_BUS))
+               device_for_each_child(dev, NULL, of_platform_device_destroy);
+
+       if (dev->bus == &platform_bus_type)
+               platform_device_unregister(to_platform_device(dev));
+#ifdef CONFIG_ARM_AMBA
+       else if (dev->bus == &amba_bustype)
+               amba_device_unregister(to_amba_device(dev));
+#endif
+
+       of_node_clear_flag(dev->of_node, OF_POPULATED);
+       of_node_clear_flag(dev->of_node, OF_POPULATED_BUS);
+       return 0;
+}
+
+/**
+ * of_platform_depopulate() - Remove devices populated from device tree
+ * @parent: device which children will be removed
+ *
+ * Complementary to of_platform_populate(), this function removes children
+ * of the given device (and, recurrently, their children) that have been
+ * created from their respective device tree nodes (and only those,
+ * leaving others - eg. manually created - unharmed).
+ *
+ * Returns 0 when all children devices have been removed or
+ * -EBUSY when some children remained.
+ */
+void of_platform_depopulate(struct device *parent)
+{
+       device_for_each_child(parent, NULL, of_platform_device_destroy);
+}
+EXPORT_SYMBOL_GPL(of_platform_depopulate);
+
+#ifdef CONFIG_OF_DYNAMIC
+static int of_platform_notify(struct notifier_block *nb,
+                               unsigned long action, void *arg)
+{
+       struct of_reconfig_data *rd = arg;
+       struct platform_device *pdev_parent, *pdev;
+       bool children_left;
+
+       switch (of_reconfig_get_state_change(action, rd)) {
+       case OF_RECONFIG_CHANGE_ADD:
+               /* verify that the parent is a bus */
+               if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS))
+                       return NOTIFY_OK;       /* not for us */
+
+               /* already populated? (driver using of_populate manually) */
+               if (of_node_check_flag(rd->dn, OF_POPULATED))
+                       return NOTIFY_OK;
+
+               /* pdev_parent may be NULL when no bus platform device */
+               pdev_parent = of_find_device_by_node(rd->dn->parent);
+               pdev = of_platform_device_create(rd->dn, NULL,
+                               pdev_parent ? &pdev_parent->dev : NULL);
+               of_dev_put(pdev_parent);
+
+               if (pdev == NULL) {
+                       pr_err("%s: failed to create for '%s'\n",
+                                       __func__, rd->dn->full_name);
+                       /* of_platform_device_create tosses the error code */
+                       return notifier_from_errno(-EINVAL);
+               }
+               break;
+
+       case OF_RECONFIG_CHANGE_REMOVE:
+
+               /* already depopulated? */
+               if (!of_node_check_flag(rd->dn, OF_POPULATED))
+                       return NOTIFY_OK;
+
+               /* find our device by node */
+               pdev = of_find_device_by_node(rd->dn);
+               if (pdev == NULL)
+                       return NOTIFY_OK;       /* no? not meant for us */
+
+               /* unregister takes one ref away */
+               of_platform_device_destroy(&pdev->dev, &children_left);
+
+               /* and put the reference of the find */
+               of_dev_put(pdev);
+               break;
+       }
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block platform_of_notifier = {
+       .notifier_call = of_platform_notify,
+};
+
+void of_platform_register_reconfig_notifier(void)
+{
+       WARN_ON(of_reconfig_notifier_register(&platform_of_notifier));
+}
+#endif /* CONFIG_OF_DYNAMIC */
+
 #endif /* CONFIG_OF_ADDRESS */
diff --git a/drivers/of/resolver.c b/drivers/of/resolver.c
new file mode 100644 (file)
index 0000000..640eb4c
--- /dev/null
@@ -0,0 +1,412 @@
+/*
+ * Functions for dealing with DT resolution
+ *
+ * Copyright (C) 2012 Pantelis Antoniou <panto@antoniou-consulting.com>
+ * Copyright (C) 2012 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+/* illegal phandle value (set when unresolved) */
+#define OF_PHANDLE_ILLEGAL     0xdeadbeef
+
+/**
+ * Find a node with the give full name by recursively following any of
+ * the child node links.
+ */
+static struct device_node *__of_find_node_by_full_name(struct device_node *node,
+               const char *full_name)
+{
+       struct device_node *child, *found;
+
+       if (node == NULL)
+               return NULL;
+
+       /* check */
+       if (of_node_cmp(node->full_name, full_name) == 0)
+               return node;
+
+       for_each_child_of_node(node, child) {
+               found = __of_find_node_by_full_name(child, full_name);
+               if (found != NULL)
+                       return found;
+       }
+
+       return NULL;
+}
+
+/*
+ * Find live tree's maximum phandle value.
+ */
+static phandle of_get_tree_max_phandle(void)
+{
+       struct device_node *node;
+       phandle phandle;
+       unsigned long flags;
+
+       /* now search recursively */
+       raw_spin_lock_irqsave(&devtree_lock, flags);
+       phandle = 0;
+       for_each_of_allnodes(node) {
+               if (node->phandle != OF_PHANDLE_ILLEGAL &&
+                               node->phandle > phandle)
+                       phandle = node->phandle;
+       }
+       raw_spin_unlock_irqrestore(&devtree_lock, flags);
+
+       return phandle;
+}
+
+/*
+ * Adjust a subtree's phandle values by a given delta.
+ * Makes sure not to just adjust the device node's phandle value,
+ * but modify the phandle properties values as well.
+ */
+static void __of_adjust_tree_phandles(struct device_node *node,
+               int phandle_delta)
+{
+       struct device_node *child;
+       struct property *prop;
+       phandle phandle;
+
+       /* first adjust the node's phandle direct value */
+       if (node->phandle != 0 && node->phandle != OF_PHANDLE_ILLEGAL)
+               node->phandle += phandle_delta;
+
+       /* now adjust phandle & linux,phandle values */
+       for_each_property_of_node(node, prop) {
+
+               /* only look for these two */
+               if (of_prop_cmp(prop->name, "phandle") != 0 &&
+                   of_prop_cmp(prop->name, "linux,phandle") != 0)
+                       continue;
+
+               /* must be big enough */
+               if (prop->length < 4)
+                       continue;
+
+               /* read phandle value */
+               phandle = be32_to_cpup(prop->value);
+               if (phandle == OF_PHANDLE_ILLEGAL)      /* unresolved */
+                       continue;
+
+               /* adjust */
+               *(uint32_t *)prop->value = cpu_to_be32(node->phandle);
+       }
+
+       /* now do the children recursively */
+       for_each_child_of_node(node, child)
+               __of_adjust_tree_phandles(child, phandle_delta);
+}
+
+static int __of_adjust_phandle_ref(struct device_node *node,
+               struct property *rprop, int value)
+{
+       phandle phandle;
+       struct device_node *refnode;
+       struct property *sprop;
+       char *propval, *propcur, *propend, *nodestr, *propstr, *s;
+       int offset, propcurlen;
+       int err = 0;
+
+       /* make a copy */
+       propval = kmalloc(rprop->length, GFP_KERNEL);
+       if (!propval) {
+               pr_err("%s: Could not copy value of '%s'\n",
+                               __func__, rprop->name);
+               return -ENOMEM;
+       }
+       memcpy(propval, rprop->value, rprop->length);
+
+       propend = propval + rprop->length;
+       for (propcur = propval; propcur < propend; propcur += propcurlen + 1) {
+               propcurlen = strlen(propcur);
+
+               nodestr = propcur;
+               s = strchr(propcur, ':');
+               if (!s) {
+                       pr_err("%s: Illegal symbol entry '%s' (1)\n",
+                               __func__, propcur);
+                       err = -EINVAL;
+                       goto err_fail;
+               }
+               *s++ = '\0';
+
+               propstr = s;
+               s = strchr(s, ':');
+               if (!s) {
+                       pr_err("%s: Illegal symbol entry '%s' (2)\n",
+                               __func__, (char *)rprop->value);
+                       err = -EINVAL;
+                       goto err_fail;
+               }
+
+               *s++ = '\0';
+               err = kstrtoint(s, 10, &offset);
+               if (err != 0) {
+                       pr_err("%s: Could get offset '%s'\n",
+                               __func__, (char *)rprop->value);
+                       goto err_fail;
+               }
+
+               /* look into the resolve node for the full path */
+               refnode = __of_find_node_by_full_name(node, nodestr);
+               if (!refnode) {
+                       pr_warn("%s: Could not find refnode '%s'\n",
+                               __func__, (char *)rprop->value);
+                       continue;
+               }
+
+               /* now find the property */
+               for_each_property_of_node(refnode, sprop) {
+                       if (of_prop_cmp(sprop->name, propstr) == 0)
+                               break;
+               }
+
+               if (!sprop) {
+                       pr_err("%s: Could not find property '%s'\n",
+                               __func__, (char *)rprop->value);
+                       err = -ENOENT;
+                       goto err_fail;
+               }
+
+               phandle = value;
+               *(__be32 *)(sprop->value + offset) = cpu_to_be32(phandle);
+       }
+
+err_fail:
+       kfree(propval);
+       return err;
+}
+
+/* compare nodes taking into account that 'name' strips out the @ part */
+static int __of_node_name_cmp(const struct device_node *dn1,
+               const struct device_node *dn2)
+{
+       const char *n1 = strrchr(dn1->full_name, '/') ? : "/";
+       const char *n2 = strrchr(dn2->full_name, '/') ? : "/";
+
+       return of_node_cmp(n1, n2);
+}
+
+/*
+ * Adjust the local phandle references by the given phandle delta.
+ * Assumes the existances of a __local_fixups__ node at the root.
+ * Assumes that __of_verify_tree_phandle_references has been called.
+ * Does not take any devtree locks so make sure you call this on a tree
+ * which is at the detached state.
+ */
+static int __of_adjust_tree_phandle_references(struct device_node *node,
+               struct device_node *target, int phandle_delta)
+{
+       struct device_node *child, *childtarget;
+       struct property *rprop, *sprop;
+       int err, i, count;
+       unsigned int off;
+       phandle phandle;
+
+       if (node == NULL)
+               return 0;
+
+       for_each_property_of_node(node, rprop) {
+
+               /* skip properties added automatically */
+               if (of_prop_cmp(rprop->name, "name") == 0 ||
+                   of_prop_cmp(rprop->name, "phandle") == 0 ||
+                   of_prop_cmp(rprop->name, "linux,phandle") == 0)
+                       continue;
+
+               if ((rprop->length % 4) != 0 || rprop->length == 0) {
+                       pr_err("%s: Illegal property (size) '%s' @%s\n",
+                                       __func__, rprop->name, node->full_name);
+                       return -EINVAL;
+               }
+               count = rprop->length / sizeof(__be32);
+
+               /* now find the target property */
+               for_each_property_of_node(target, sprop) {
+                       if (of_prop_cmp(sprop->name, rprop->name) == 0)
+                               break;
+               }
+
+               if (sprop == NULL) {
+                       pr_err("%s: Could not find target property '%s' @%s\n",
+                                       __func__, rprop->name, node->full_name);
+                       return -EINVAL;
+               }
+
+               for (i = 0; i < count; i++) {
+                       off = be32_to_cpu(((__be32 *)rprop->value)[i]);
+                       /* make sure the offset doesn't overstep (even wrap) */
+                       if (off >= sprop->length ||
+                                       (off + 4) > sprop->length) {
+                               pr_err("%s: Illegal property '%s' @%s\n",
+                                               __func__, rprop->name,
+                                               node->full_name);
+                               return -EINVAL;
+                       }
+
+                       if (phandle_delta) {
+                               /* adjust */
+                               phandle = be32_to_cpu(*(__be32 *)(sprop->value + off));
+                               phandle += phandle_delta;
+                               *(__be32 *)(sprop->value + off) = cpu_to_be32(phandle);
+                       }
+               }
+       }
+
+       for_each_child_of_node(node, child) {
+
+               for_each_child_of_node(target, childtarget)
+                       if (__of_node_name_cmp(child, childtarget) == 0)
+                               break;
+
+               if (!childtarget) {
+                       pr_err("%s: Could not find target child '%s' @%s\n",
+                                       __func__, child->name, node->full_name);
+                       return -EINVAL;
+               }
+
+               err = __of_adjust_tree_phandle_references(child, childtarget,
+                               phandle_delta);
+               if (err != 0)
+                       return err;
+       }
+
+       return 0;
+}
+
+/**
+ * of_resolve  - Resolve the given node against the live tree.
+ *
+ * @resolve:   Node to resolve
+ *
+ * Perform dynamic Device Tree resolution against the live tree
+ * to the given node to resolve. This depends on the live tree
+ * having a __symbols__ node, and the resolve node the __fixups__ &
+ * __local_fixups__ nodes (if needed).
+ * The result of the operation is a resolve node that it's contents
+ * are fit to be inserted or operate upon the live tree.
+ * Returns 0 on success or a negative error value on error.
+ */
+int of_resolve_phandles(struct device_node *resolve)
+{
+       struct device_node *child, *childroot, *refnode;
+       struct device_node *root_sym, *resolve_sym, *resolve_fix;
+       struct property *rprop;
+       const char *refpath;
+       phandle phandle, phandle_delta;
+       int err;
+
+       /* the resolve node must exist, and be detached */
+       if (!resolve || !of_node_check_flag(resolve, OF_DETACHED))
+               return -EINVAL;
+
+       /* first we need to adjust the phandles */
+       phandle_delta = of_get_tree_max_phandle() + 1;
+       __of_adjust_tree_phandles(resolve, phandle_delta);
+
+       /* locate the local fixups */
+       childroot = NULL;
+       for_each_child_of_node(resolve, childroot)
+               if (of_node_cmp(childroot->name, "__local_fixups__") == 0)
+                       break;
+
+       if (childroot != NULL) {
+               /* resolve root is guaranteed to be the '/' */
+               err = __of_adjust_tree_phandle_references(childroot,
+                               resolve, 0);
+               if (err != 0)
+                       return err;
+
+               BUG_ON(__of_adjust_tree_phandle_references(childroot,
+                               resolve, phandle_delta));
+       }
+
+       root_sym = NULL;
+       resolve_sym = NULL;
+       resolve_fix = NULL;
+
+       /* this may fail (if no fixups are required) */
+       root_sym = of_find_node_by_path("/__symbols__");
+
+       /* locate the symbols & fixups nodes on resolve */
+       for_each_child_of_node(resolve, child) {
+
+               if (!resolve_sym &&
+                               of_node_cmp(child->name, "__symbols__") == 0)
+                       resolve_sym = child;
+
+               if (!resolve_fix &&
+                               of_node_cmp(child->name, "__fixups__") == 0)
+                       resolve_fix = child;
+
+               /* both found, don't bother anymore */
+               if (resolve_sym && resolve_fix)
+                       break;
+       }
+
+       /* we do allow for the case where no fixups are needed */
+       if (!resolve_fix) {
+               err = 0;        /* no error */
+               goto out;
+       }
+
+       /* we need to fixup, but no root symbols... */
+       if (!root_sym) {
+               err = -EINVAL;
+               goto out;
+       }
+
+       for_each_property_of_node(resolve_fix, rprop) {
+
+               /* skip properties added automatically */
+               if (of_prop_cmp(rprop->name, "name") == 0)
+                       continue;
+
+               err = of_property_read_string(root_sym,
+                               rprop->name, &refpath);
+               if (err != 0) {
+                       pr_err("%s: Could not find symbol '%s'\n",
+                                       __func__, rprop->name);
+                       goto out;
+               }
+
+               refnode = of_find_node_by_path(refpath);
+               if (!refnode) {
+                       pr_err("%s: Could not find node by path '%s'\n",
+                                       __func__, refpath);
+                       err = -ENOENT;
+                       goto out;
+               }
+
+               phandle = refnode->phandle;
+               of_node_put(refnode);
+
+               pr_debug("%s: %s phandle is 0x%08x\n",
+                               __func__, rprop->name, phandle);
+
+               err = __of_adjust_phandle_ref(resolve, rprop, phandle);
+               if (err)
+                       break;
+       }
+
+out:
+       /* NULL is handled by of_node_put as NOP */
+       of_node_put(root_sym);
+
+       return err;
+}
+EXPORT_SYMBOL_GPL(of_resolve_phandles);
index f5e8dc7a725c201cd8c9a13b79ad8257c1428f14..37f57ad3bdbf1e91f1ddd26808e30e81b636c5b8 100644 (file)
 #include <linux/slab.h>
 #include <linux/device.h>
 
-static bool selftest_passed = true;
+#include "of_private.h"
+
+static struct selftest_results {
+       int passed;
+       int failed;
+} selftest_results;
+
 #define selftest(result, fmt, ...) { \
        if (!(result)) { \
                pr_err("FAIL %s:%i " fmt, __FILE__, __LINE__, ##__VA_ARGS__); \
-               selftest_passed = false; \
+               selftest_results.passed = false; \
        } else { \
                pr_info("pass %s:%i\n", __FILE__, __LINE__); \
        } \
@@ -208,6 +214,81 @@ static void __init of_selftest_property_string(void)
        selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
 }
 
+#define propcmp(p1, p2) (((p1)->length == (p2)->length) && \
+                       (p1)->value && (p2)->value && \
+                       !memcmp((p1)->value, (p2)->value, (p1)->length) && \
+                       !strcmp((p1)->name, (p2)->name))
+static void __init of_selftest_property_copy(void)
+{
+#ifdef CONFIG_OF_DYNAMIC
+       struct property p1 = { .name = "p1", .length = 0, .value = "" };
+       struct property p2 = { .name = "p2", .length = 5, .value = "abcd" };
+       struct property *new;
+
+       new = __of_prop_dup(&p1, GFP_KERNEL);
+       selftest(new && propcmp(&p1, new), "empty property didn't copy correctly\n");
+       kfree(new->value);
+       kfree(new->name);
+       kfree(new);
+
+       new = __of_prop_dup(&p2, GFP_KERNEL);
+       selftest(new && propcmp(&p2, new), "non-empty property didn't copy correctly\n");
+       kfree(new->value);
+       kfree(new->name);
+       kfree(new);
+#endif
+}
+
+static void __init of_selftest_changeset(void)
+{
+#ifdef CONFIG_OF_DYNAMIC
+       struct property *ppadd, padd = { .name = "prop-add", .length = 0, .value = "" };
+       struct property *ppupdate, pupdate = { .name = "prop-update", .length = 5, .value = "abcd" };
+       struct property *ppremove;
+       struct device_node *n1, *n2, *n21, *nremove, *parent;
+       struct of_changeset chgset;
+
+       of_changeset_init(&chgset);
+       n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1");
+       selftest(n1, "testcase setup failure\n");
+       n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2");
+       selftest(n2, "testcase setup failure\n");
+       n21 = __of_node_dup(NULL, "%s/%s", "/testcase-data/changeset/n2", "n21");
+       selftest(n21, "testcase setup failure %p\n", n21);
+       nremove = of_find_node_by_path("/testcase-data/changeset/node-remove");
+       selftest(nremove, "testcase setup failure\n");
+       ppadd = __of_prop_dup(&padd, GFP_KERNEL);
+       selftest(ppadd, "testcase setup failure\n");
+       ppupdate = __of_prop_dup(&pupdate, GFP_KERNEL);
+       selftest(ppupdate, "testcase setup failure\n");
+       parent = nremove->parent;
+       n1->parent = parent;
+       n2->parent = parent;
+       n21->parent = n2;
+       n2->child = n21;
+       ppremove = of_find_property(parent, "prop-remove", NULL);
+       selftest(ppremove, "failed to find removal prop");
+
+       of_changeset_init(&chgset);
+       selftest(!of_changeset_attach_node(&chgset, n1), "fail attach n1\n");
+       selftest(!of_changeset_attach_node(&chgset, n2), "fail attach n2\n");
+       selftest(!of_changeset_detach_node(&chgset, nremove), "fail remove node\n");
+       selftest(!of_changeset_attach_node(&chgset, n21), "fail attach n21\n");
+       selftest(!of_changeset_add_property(&chgset, parent, ppadd), "fail add prop\n");
+       selftest(!of_changeset_update_property(&chgset, parent, ppupdate), "fail update prop\n");
+       selftest(!of_changeset_remove_property(&chgset, parent, ppremove), "fail remove prop\n");
+       mutex_lock(&of_mutex);
+       selftest(!of_changeset_apply(&chgset), "apply failed\n");
+       mutex_unlock(&of_mutex);
+
+       mutex_lock(&of_mutex);
+       selftest(!of_changeset_revert(&chgset), "revert failed\n");
+       mutex_unlock(&of_mutex);
+
+       of_changeset_destroy(&chgset);
+#endif
+}
+
 static int __init of_selftest(void)
 {
        struct device_node *np;
@@ -221,8 +302,12 @@ static int __init of_selftest(void)
 
        pr_info("start of selftest - you will see error messages\n");
        of_selftest_parse_phandle_with_args();
+       of_selftest_property_match_string();
+       of_selftest_property_copy();
+       of_selftest_changeset();
        of_selftest_property_string();
-       pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL");
+       pr_info("end of selftest - %i passed, %i failed\n",
+               selftest_results.passed, selftest_results.failed);
        return 0;
 }
 late_initcall(of_selftest);
diff --git a/drivers/of/testcase-data/testcases.dtsi b/drivers/of/testcase-data/testcases.dtsi
new file mode 100644 (file)
index 0000000..669bb07
--- /dev/null
@@ -0,0 +1,14 @@
+/ {
+       testcase-data {
+               changeset {
+                       prop-update = "hello";
+                       prop-remove = "world";
+                       node-remove {
+                       };
+               };
+       };
+};
+#include "tests-phandle.dtsi"
+#include "tests-interrupts.dtsi"
+#include "tests-match.dtsi"
+#include "tests-platform.dtsi"
index ede9034ea783d4965601465104c00a89af6e3274..f962d98bd775222de9beb345638aadc86813b4b6 100644 (file)
@@ -1755,14 +1755,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
        if (pctldev == NULL)
                return;
 
-       mutex_lock(&pinctrldev_list_mutex);
        mutex_lock(&pctldev->mutex);
-
        pinctrl_remove_device_debugfs(pctldev);
+       mutex_unlock(&pctldev->mutex);
 
        if (!IS_ERR(pctldev->p))
                pinctrl_put(pctldev->p);
 
+       mutex_lock(&pinctrldev_list_mutex);
+       mutex_lock(&pctldev->mutex);
        /* TODO: check that no pinmuxes are still active? */
        list_del(&pctldev->node);
        /* Destroy descriptor tree */
index 0ed96df20162c761680f3d8333d6e95d83304353..3458eb6fd491a1df7cb6f245239568d9874cc7db 100644 (file)
@@ -237,6 +237,7 @@ static struct dmi_system_id lis3lv02d_dmi_ids[] = {
        AXIS_DMI_MATCH("HPB64xx", "HP ProBook 64", xy_swap),
        AXIS_DMI_MATCH("HPB64xx", "HP EliteBook 84", xy_swap),
        AXIS_DMI_MATCH("HPB65xx", "HP ProBook 65", x_inverted),
+       AXIS_DMI_MATCH("HPZBook15", "HP ZBook 15", x_inverted),
        { NULL, }
 /* Laptop models without axis info (yet):
  * "NC6910" "HP Compaq 6910"
index d7b9b4dc8a3db12ef7094e54e80b959289fc142c..bc9aed9b1ec7a2a549e4f9bd0ff60803c49df06e 100644 (file)
@@ -1392,7 +1392,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id)
 }
 EXPORT_SYMBOL_GPL(regulator_get_optional);
 
-/* Locks held by regulator_put() */
+/* regulator_list_mutex lock held by regulator_put() */
 static void _regulator_put(struct regulator *regulator)
 {
        struct regulator_dev *rdev;
@@ -1407,12 +1407,14 @@ static void _regulator_put(struct regulator *regulator)
        /* remove any sysfs entries */
        if (regulator->dev)
                sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
+       mutex_lock(&rdev->mutex);
        kfree(regulator->supply_name);
        list_del(&regulator->list);
        kfree(regulator);
 
        rdev->open_count--;
        rdev->exclusive = 0;
+       mutex_unlock(&rdev->mutex);
 
        module_put(rdev->owner);
 }
index bb86494e2b7b7878aecddf25b9df4b53fa443260..19915c5b256f79b3f58ca402c5edc70ebf2f1878 100644 (file)
@@ -288,12 +288,16 @@ static void raw3215_timeout(unsigned long __data)
        unsigned long flags;
 
        spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
-       if (raw->flags & RAW3215_TIMER_RUNS) {
-               del_timer(&raw->timer);
-               raw->flags &= ~RAW3215_TIMER_RUNS;
-               if (!(raw->port.flags & ASYNC_SUSPENDED)) {
-                       raw3215_mk_write_req(raw);
-                       raw3215_start_io(raw);
+       raw->flags &= ~RAW3215_TIMER_RUNS;
+       if (!(raw->port.flags & ASYNC_SUSPENDED)) {
+               raw3215_mk_write_req(raw);
+               raw3215_start_io(raw);
+               if ((raw->queued_read || raw->queued_write) &&
+                   !(raw->flags & RAW3215_WORKING) &&
+                   !(raw->flags & RAW3215_TIMER_RUNS)) {
+                       raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+                       add_timer(&raw->timer);
+                       raw->flags |= RAW3215_TIMER_RUNS;
                }
        }
        spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
@@ -317,17 +321,15 @@ static inline void raw3215_try_io(struct raw3215_info *raw)
                    (raw->flags & RAW3215_FLUSHING)) {
                        /* execute write requests bigger than minimum size */
                        raw3215_start_io(raw);
-                       if (raw->flags & RAW3215_TIMER_RUNS) {
-                               del_timer(&raw->timer);
-                               raw->flags &= ~RAW3215_TIMER_RUNS;
-                       }
-               } else if (!(raw->flags & RAW3215_TIMER_RUNS)) {
-                       /* delay small writes */
-                       raw->timer.expires = RAW3215_TIMEOUT + jiffies;
-                       add_timer(&raw->timer);
-                       raw->flags |= RAW3215_TIMER_RUNS;
                }
        }
+       if ((raw->queued_read || raw->queued_write) &&
+           !(raw->flags & RAW3215_WORKING) &&
+           !(raw->flags & RAW3215_TIMER_RUNS)) {
+               raw->timer.expires = RAW3215_TIMEOUT + jiffies;
+               add_timer(&raw->timer);
+               raw->flags |= RAW3215_TIMER_RUNS;
+       }
 }
 
 /*
@@ -1027,12 +1029,26 @@ static int tty3215_write(struct tty_struct * tty,
                         const unsigned char *buf, int count)
 {
        struct raw3215_info *raw;
+       int i, written;
 
        if (!tty)
                return 0;
        raw = (struct raw3215_info *) tty->driver_data;
-       raw3215_write(raw, buf, count);
-       return count;
+       written = count;
+       while (count > 0) {
+               for (i = 0; i < count; i++)
+                       if (buf[i] == '\t' || buf[i] == '\n')
+                               break;
+               raw3215_write(raw, buf, i);
+               count -= i;
+               buf += i;
+               if (count > 0) {
+                       raw3215_putchar(raw, *buf);
+                       count--;
+                       buf++;
+               }
+       }
+       return written;
 }
 
 /*
@@ -1180,7 +1196,7 @@ static int __init tty3215_init(void)
        driver->subtype = SYSTEM_TYPE_TTY;
        driver->init_termios = tty_std_termios;
        driver->init_termios.c_iflag = IGNBRK | IGNPAR;
-       driver->init_termios.c_oflag = ONLCR | XTABS;
+       driver->init_termios.c_oflag = ONLCR;
        driver->init_termios.c_lflag = ISIG;
        driver->flags = TTY_DRIVER_REAL_RAW;
        tty_set_operations(driver, &tty3215_ops);
index 9de41aa148965088c693815fe6ce165d5689414e..6f512fa4fa033eec9c4d7bd11565a362e601a01a 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/hrtimer.h>
 #include <linux/ktime.h>
 #include <asm/facility.h>
+#include <linux/crypto.h>
 
 #include "ap_bus.h"
 
index 0ff37a5e286cfa60bfb378ef758bbc8983eb953f..f7732f3b98042fd4f476d3159c11d6b397d2350d 100644 (file)
@@ -645,6 +645,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
        ipr_reinit_ipr_cmnd(ipr_cmd);
        ipr_cmd->u.scratch = 0;
        ipr_cmd->sibling = NULL;
+       ipr_cmd->eh_comp = NULL;
        ipr_cmd->fast_done = fast_done;
        init_timer(&ipr_cmd->timer);
 }
@@ -810,6 +811,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
 
        scsi_dma_unmap(ipr_cmd->scsi_cmd);
        scsi_cmd->scsi_done(scsi_cmd);
+       if (ipr_cmd->eh_comp)
+               complete(ipr_cmd->eh_comp);
        list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
 }
 
@@ -4767,6 +4770,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
        return rc;
 }
 
+/**
+ * ipr_match_lun - Match function for specified LUN
+ * @ipr_cmd:   ipr command struct
+ * @device:            device to match (sdev)
+ *
+ * Returns:
+ *     1 if command matches sdev / 0 if command does not match sdev
+ **/
+static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
+{
+       if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
+               return 1;
+       return 0;
+}
+
+/**
+ * ipr_wait_for_ops - Wait for matching commands to complete
+ * @ipr_cmd:   ipr command struct
+ * @device:            device to match (sdev)
+ * @match:             match function to use
+ *
+ * Returns:
+ *     SUCCESS / FAILED
+ **/
+static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
+                           int (*match)(struct ipr_cmnd *, void *))
+{
+       struct ipr_cmnd *ipr_cmd;
+       int wait;
+       unsigned long flags;
+       struct ipr_hrr_queue *hrrq;
+       signed long timeout = IPR_ABORT_TASK_TIMEOUT;
+       DECLARE_COMPLETION_ONSTACK(comp);
+
+       ENTER;
+       do {
+               wait = 0;
+
+               for_each_hrrq(hrrq, ioa_cfg) {
+                       spin_lock_irqsave(hrrq->lock, flags);
+                       list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+                               if (match(ipr_cmd, device)) {
+                                       ipr_cmd->eh_comp = &comp;
+                                       wait++;
+                               }
+                       }
+                       spin_unlock_irqrestore(hrrq->lock, flags);
+               }
+
+               if (wait) {
+                       timeout = wait_for_completion_timeout(&comp, timeout);
+
+                       if (!timeout) {
+                               wait = 0;
+
+                               for_each_hrrq(hrrq, ioa_cfg) {
+                                       spin_lock_irqsave(hrrq->lock, flags);
+                                       list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+                                               if (match(ipr_cmd, device)) {
+                                                       ipr_cmd->eh_comp = NULL;
+                                                       wait++;
+                                               }
+                                       }
+                                       spin_unlock_irqrestore(hrrq->lock, flags);
+                               }
+
+                               if (wait)
+                                       dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
+                               LEAVE;
+                               return wait ? FAILED : SUCCESS;
+                       }
+               }
+       } while (wait);
+
+       LEAVE;
+       return SUCCESS;
+}
+
 static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
 {
        struct ipr_ioa_cfg *ioa_cfg;
@@ -4985,11 +5066,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
 static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
 {
        int rc;
+       struct ipr_ioa_cfg *ioa_cfg;
+
+       ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
 
        spin_lock_irq(cmd->device->host->host_lock);
        rc = __ipr_eh_dev_reset(cmd);
        spin_unlock_irq(cmd->device->host->host_lock);
 
+       if (rc == SUCCESS)
+               rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
+
        return rc;
 }
 
@@ -5167,13 +5254,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
 {
        unsigned long flags;
        int rc;
+       struct ipr_ioa_cfg *ioa_cfg;
 
        ENTER;
 
+       ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+
        spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
        rc = ipr_cancel_op(scsi_cmd);
        spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
 
+       if (rc == SUCCESS)
+               rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
        LEAVE;
        return rc;
 }
index 07a85ce4178287417648847c360034a89b431415..535f57328a72d182948c3bb8b34bdb4a64c01886 100644 (file)
@@ -1578,6 +1578,7 @@ struct ipr_cmnd {
                struct scsi_device *sdev;
        } u;
 
+       struct completion *eh_comp;
        struct ipr_hrr_queue *hrrq;
        struct ipr_ioa_cfg *ioa_cfg;
 };
index 87ca72d36d5b31ffb21ec9330580d2dbf97f713b..a8990783ba66bad4038210f8a2d6d7372bbcd261 100644 (file)
@@ -1439,13 +1439,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
        if (ret == -EAGAIN) {
                /* no more space */
 
-               if (cmd_request->bounce_sgl_count) {
+               if (cmd_request->bounce_sgl_count)
                        destroy_bounce_buffer(cmd_request->bounce_sgl,
                                        cmd_request->bounce_sgl_count);
 
-                       ret = SCSI_MLQUEUE_DEVICE_BUSY;
-                       goto queue_error;
-               }
+               ret = SCSI_MLQUEUE_DEVICE_BUSY;
+               goto queue_error;
        }
 
        return 0;
index 0791c92e8c505cc9a35158bcdcdf0a7612ae92de..1389fefe8814105a857c0dcb24d5a64485080ca5 100644 (file)
@@ -222,7 +222,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
        iounmap(clk_reg);
 
        dws->num_cs = 16;
-       dws->fifo_len = 40;     /* FIFO has 40 words buffer */
 
 #ifdef CONFIG_SPI_DW_MID_DMA
        dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
index d26a2d195d217ca5f5c5181e68ec732ebae4f767..cc42ee5e19fba4a54ff41d3a7472de494f22b0e9 100644 (file)
@@ -393,8 +393,8 @@ static void giveback(struct driver_data *drv_data)
                        cs_deassert(drv_data);
        }
 
-       spi_finalize_current_message(drv_data->master);
        drv_data->cur_chip = NULL;
+       spi_finalize_current_message(drv_data->master);
 }
 
 static void reset_sccr1(struct driver_data *drv_data)
index f07d9f2a06f955759614bfa29cff41cca9f6142b..a817875bbf850bb2044b40f302ac6cf5ae9007f0 100644 (file)
@@ -8,25 +8,6 @@ config ANDROID
 
 if ANDROID
 
-config ANDROID_BINDER_IPC
-       bool "Android Binder IPC Driver"
-       default n
-       ---help---
-         Binder is used in Android for both communication between processes,
-         and remote method invocation.
-
-         This means one Android process can call a method/routine in another
-         Android process, using Binder to identify, invoke and pass arguments
-         between said processes.
-
-config ANDROID_BINDER_IPC_32BIT
-       bool
-        default y
-       depends on !64BIT && ANDROID_BINDER_IPC
-       ---help---
-         Enable to support an old 32-bit Android user-space. Breaks the new
-         Android user-space.
-
 config ASHMEM
        bool "Enable the Anonymous Shared Memory Subsystem"
        default n
index 907b62f562035affd91f4a27bead87f8cff6571e..c223ef72bb389e8df4303a731dc35e18af99c874 100644 (file)
@@ -3,7 +3,6 @@ ccflags-y += -I$(src)                   # needed for trace events
 obj-y                                  += ion/
 obj-$(CONFIG_FIQ_DEBUGGER)             += fiq_debugger/
 
-obj-$(CONFIG_ANDROID_BINDER_IPC)       += binder.o
 obj-$(CONFIG_ASHMEM)                   += ashmem.o
 obj-$(CONFIG_ANDROID_LOGGER)           += logger.o
 obj-$(CONFIG_ANDROID_TIMED_OUTPUT)     += timed_output.o
index 6dc27dac679d9133052b7aca2ad2ea06cf1d3486..053233c5dec17498dc5d1fac453b0b99f42525da 100644 (file)
@@ -326,6 +326,7 @@ static int alarm_release(struct inode *inode, struct file *file)
        if (file->private_data) {
                for (i = 0; i < ANDROID_ALARM_TYPE_COUNT; i++) {
                        uint32_t alarm_type_mask = 1U << i;
+
                        if (alarm_enabled & alarm_type_mask) {
                                alarm_dbg(INFO,
                                          "%s: clear alarm, pending %d\n",
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
deleted file mode 100644 (file)
index 9c821f3..0000000
+++ /dev/null
@@ -1,3611 +0,0 @@
-/* binder.c
- *
- * Android IPC Subsystem
- *
- * Copyright (C) 2007-2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <asm/cacheflush.h>
-#include <linux/fdtable.h>
-#include <linux/file.h>
-#include <linux/freezer.h>
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/miscdevice.h>
-#include <linux/mm.h>
-#include <linux/module.h>
-#include <linux/rtmutex.h>
-#include <linux/mutex.h>
-#include <linux/nsproxy.h>
-#include <linux/poll.h>
-#include <linux/debugfs.h>
-#include <linux/rbtree.h>
-#include <linux/sched.h>
-#include <linux/seq_file.h>
-#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
-#include <linux/slab.h>
-#include <linux/pid_namespace.h>
-#include <linux/security.h>
-
-#include "binder.h"
-#include "binder_trace.h"
-
-static DEFINE_RT_MUTEX(binder_main_lock);
-static DEFINE_MUTEX(binder_deferred_lock);
-static DEFINE_MUTEX(binder_mmap_lock);
-
-static HLIST_HEAD(binder_procs);
-static HLIST_HEAD(binder_deferred_list);
-static HLIST_HEAD(binder_dead_nodes);
-
-static struct dentry *binder_debugfs_dir_entry_root;
-static struct dentry *binder_debugfs_dir_entry_proc;
-static struct binder_node *binder_context_mgr_node;
-static kuid_t binder_context_mgr_uid = INVALID_UID;
-static int binder_last_id;
-static struct workqueue_struct *binder_deferred_workqueue;
-
-#define BINDER_DEBUG_ENTRY(name) \
-static int binder_##name##_open(struct inode *inode, struct file *file) \
-{ \
-       return single_open(file, binder_##name##_show, inode->i_private); \
-} \
-\
-static const struct file_operations binder_##name##_fops = { \
-       .owner = THIS_MODULE, \
-       .open = binder_##name##_open, \
-       .read = seq_read, \
-       .llseek = seq_lseek, \
-       .release = single_release, \
-}
-
-static int binder_proc_show(struct seq_file *m, void *unused);
-BINDER_DEBUG_ENTRY(proc);
-
-/* This is only defined in include/asm-arm/sizes.h */
-#ifndef SZ_1K
-#define SZ_1K                               0x400
-#endif
-
-#ifndef SZ_4M
-#define SZ_4M                               0x400000
-#endif
-
-#define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
-
-#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
-
-enum {
-       BINDER_DEBUG_USER_ERROR             = 1U << 0,
-       BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
-       BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
-       BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
-       BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
-       BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
-       BINDER_DEBUG_READ_WRITE             = 1U << 6,
-       BINDER_DEBUG_USER_REFS              = 1U << 7,
-       BINDER_DEBUG_THREADS                = 1U << 8,
-       BINDER_DEBUG_TRANSACTION            = 1U << 9,
-       BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
-       BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
-       BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
-       BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
-       BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
-       BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
-};
-static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
-       BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
-module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
-
-static bool binder_debug_no_lock;
-module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
-
-static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
-static int binder_stop_on_user_error;
-
-static int binder_set_stop_on_user_error(const char *val,
-                                        struct kernel_param *kp)
-{
-       int ret;
-       ret = param_set_int(val, kp);
-       if (binder_stop_on_user_error < 2)
-               wake_up(&binder_user_error_wait);
-       return ret;
-}
-module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
-       param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
-
-#define binder_debug(mask, x...) \
-       do { \
-               if (binder_debug_mask & mask) \
-                       pr_info(x); \
-       } while (0)
-
-#define binder_user_error(x...) \
-       do { \
-               if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
-                       pr_info(x); \
-               if (binder_stop_on_user_error) \
-                       binder_stop_on_user_error = 2; \
-       } while (0)
-
-enum binder_stat_types {
-       BINDER_STAT_PROC,
-       BINDER_STAT_THREAD,
-       BINDER_STAT_NODE,
-       BINDER_STAT_REF,
-       BINDER_STAT_DEATH,
-       BINDER_STAT_TRANSACTION,
-       BINDER_STAT_TRANSACTION_COMPLETE,
-       BINDER_STAT_COUNT
-};
-
-struct binder_stats {
-       int br[_IOC_NR(BR_FAILED_REPLY) + 1];
-       int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
-       int obj_created[BINDER_STAT_COUNT];
-       int obj_deleted[BINDER_STAT_COUNT];
-};
-
-static struct binder_stats binder_stats;
-
-static inline void binder_stats_deleted(enum binder_stat_types type)
-{
-       binder_stats.obj_deleted[type]++;
-}
-
-static inline void binder_stats_created(enum binder_stat_types type)
-{
-       binder_stats.obj_created[type]++;
-}
-
-struct binder_transaction_log_entry {
-       int debug_id;
-       int call_type;
-       int from_proc;
-       int from_thread;
-       int target_handle;
-       int to_proc;
-       int to_thread;
-       int to_node;
-       int data_size;
-       int offsets_size;
-};
-struct binder_transaction_log {
-       int next;
-       int full;
-       struct binder_transaction_log_entry entry[32];
-};
-static struct binder_transaction_log binder_transaction_log;
-static struct binder_transaction_log binder_transaction_log_failed;
-
-static struct binder_transaction_log_entry *binder_transaction_log_add(
-       struct binder_transaction_log *log)
-{
-       struct binder_transaction_log_entry *e;
-       e = &log->entry[log->next];
-       memset(e, 0, sizeof(*e));
-       log->next++;
-       if (log->next == ARRAY_SIZE(log->entry)) {
-               log->next = 0;
-               log->full = 1;
-       }
-       return e;
-}
-
-struct binder_work {
-       struct list_head entry;
-       enum {
-               BINDER_WORK_TRANSACTION = 1,
-               BINDER_WORK_TRANSACTION_COMPLETE,
-               BINDER_WORK_NODE,
-               BINDER_WORK_DEAD_BINDER,
-               BINDER_WORK_DEAD_BINDER_AND_CLEAR,
-               BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
-       } type;
-};
-
-struct binder_node {
-       int debug_id;
-       struct binder_work work;
-       union {
-               struct rb_node rb_node;
-               struct hlist_node dead_node;
-       };
-       struct binder_proc *proc;
-       struct hlist_head refs;
-       int internal_strong_refs;
-       int local_weak_refs;
-       int local_strong_refs;
-       binder_uintptr_t ptr;
-       binder_uintptr_t cookie;
-       unsigned has_strong_ref:1;
-       unsigned pending_strong_ref:1;
-       unsigned has_weak_ref:1;
-       unsigned pending_weak_ref:1;
-       unsigned has_async_transaction:1;
-       unsigned accept_fds:1;
-       unsigned min_priority:8;
-       struct list_head async_todo;
-};
-
-struct binder_ref_death {
-       struct binder_work work;
-       binder_uintptr_t cookie;
-};
-
-struct binder_ref {
-       /* Lookups needed: */
-       /*   node + proc => ref (transaction) */
-       /*   desc + proc => ref (transaction, inc/dec ref) */
-       /*   node => refs + procs (proc exit) */
-       int debug_id;
-       struct rb_node rb_node_desc;
-       struct rb_node rb_node_node;
-       struct hlist_node node_entry;
-       struct binder_proc *proc;
-       struct binder_node *node;
-       uint32_t desc;
-       int strong;
-       int weak;
-       struct binder_ref_death *death;
-};
-
-struct binder_buffer {
-       struct list_head entry; /* free and allocated entries by address */
-       struct rb_node rb_node; /* free entry by size or allocated entry */
-                               /* by address */
-       unsigned free:1;
-       unsigned allow_user_free:1;
-       unsigned async_transaction:1;
-       unsigned debug_id:29;
-
-       struct binder_transaction *transaction;
-
-       struct binder_node *target_node;
-       size_t data_size;
-       size_t offsets_size;
-       uint8_t data[0];
-};
-
-enum binder_deferred_state {
-       BINDER_DEFERRED_PUT_FILES    = 0x01,
-       BINDER_DEFERRED_FLUSH        = 0x02,
-       BINDER_DEFERRED_RELEASE      = 0x04,
-};
-
-struct binder_proc {
-       struct hlist_node proc_node;
-       struct rb_root threads;
-       struct rb_root nodes;
-       struct rb_root refs_by_desc;
-       struct rb_root refs_by_node;
-       int pid;
-       struct vm_area_struct *vma;
-       struct mm_struct *vma_vm_mm;
-       struct task_struct *tsk;
-       struct files_struct *files;
-       struct hlist_node deferred_work_node;
-       int deferred_work;
-       void *buffer;
-       ptrdiff_t user_buffer_offset;
-
-       struct list_head buffers;
-       struct rb_root free_buffers;
-       struct rb_root allocated_buffers;
-       size_t free_async_space;
-
-       struct page **pages;
-       size_t buffer_size;
-       uint32_t buffer_free;
-       struct list_head todo;
-       wait_queue_head_t wait;
-       struct binder_stats stats;
-       struct list_head delivered_death;
-       int max_threads;
-       int requested_threads;
-       int requested_threads_started;
-       int ready_threads;
-       long default_priority;
-       struct dentry *debugfs_entry;
-};
-
-enum {
-       BINDER_LOOPER_STATE_REGISTERED  = 0x01,
-       BINDER_LOOPER_STATE_ENTERED     = 0x02,
-       BINDER_LOOPER_STATE_EXITED      = 0x04,
-       BINDER_LOOPER_STATE_INVALID     = 0x08,
-       BINDER_LOOPER_STATE_WAITING     = 0x10,
-       BINDER_LOOPER_STATE_NEED_RETURN = 0x20
-};
-
-struct binder_thread {
-       struct binder_proc *proc;
-       struct rb_node rb_node;
-       int pid;
-       int looper;
-       struct binder_transaction *transaction_stack;
-       struct list_head todo;
-       uint32_t return_error; /* Write failed, return error code in read buf */
-       uint32_t return_error2; /* Write failed, return error code in read */
-               /* buffer. Used when sending a reply to a dead process that */
-               /* we are also waiting on */
-       wait_queue_head_t wait;
-       struct binder_stats stats;
-};
-
-struct binder_transaction {
-       int debug_id;
-       struct binder_work work;
-       struct binder_thread *from;
-       struct binder_transaction *from_parent;
-       struct binder_proc *to_proc;
-       struct binder_thread *to_thread;
-       struct binder_transaction *to_parent;
-       unsigned need_reply:1;
-       /* unsigned is_dead:1; */       /* not used at the moment */
-
-       struct binder_buffer *buffer;
-       unsigned int    code;
-       unsigned int    flags;
-       long    priority;
-       long    saved_priority;
-       kuid_t  sender_euid;
-};
-
-static void
-binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
-
-static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
-{
-       struct files_struct *files = proc->files;
-       unsigned long rlim_cur;
-       unsigned long irqs;
-
-       if (files == NULL)
-               return -ESRCH;
-
-       if (!lock_task_sighand(proc->tsk, &irqs))
-               return -EMFILE;
-
-       rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
-       unlock_task_sighand(proc->tsk, &irqs);
-
-       return __alloc_fd(files, 0, rlim_cur, flags);
-}
-
-/*
- * copied from fd_install
- */
-static void task_fd_install(
-       struct binder_proc *proc, unsigned int fd, struct file *file)
-{
-       if (proc->files)
-               __fd_install(proc->files, fd, file);
-}
-
-/*
- * copied from sys_close
- */
-static long task_close_fd(struct binder_proc *proc, unsigned int fd)
-{
-       int retval;
-
-       if (proc->files == NULL)
-               return -ESRCH;
-
-       retval = __close_fd(proc->files, fd);
-       /* can't restart close syscall because file table entry was cleared */
-       if (unlikely(retval == -ERESTARTSYS ||
-                    retval == -ERESTARTNOINTR ||
-                    retval == -ERESTARTNOHAND ||
-                    retval == -ERESTART_RESTARTBLOCK))
-               retval = -EINTR;
-
-       return retval;
-}
-
-static inline void binder_lock(const char *tag)
-{
-       trace_binder_lock(tag);
-       rt_mutex_lock(&binder_main_lock);
-       trace_binder_locked(tag);
-}
-
-static inline void binder_unlock(const char *tag)
-{
-       trace_binder_unlock(tag);
-       rt_mutex_unlock(&binder_main_lock);
-}
-
-static void binder_set_nice(long nice)
-{
-       long min_nice;
-       if (can_nice(current, nice)) {
-               set_user_nice(current, nice);
-               return;
-       }
-       min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
-       binder_debug(BINDER_DEBUG_PRIORITY_CAP,
-                    "%d: nice value %ld not allowed use %ld instead\n",
-                     current->pid, nice, min_nice);
-       set_user_nice(current, min_nice);
-       if (min_nice < 20)
-               return;
-       binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
-}
-
-static size_t binder_buffer_size(struct binder_proc *proc,
-                                struct binder_buffer *buffer)
-{
-       if (list_is_last(&buffer->entry, &proc->buffers))
-               return proc->buffer + proc->buffer_size - (void *)buffer->data;
-       else
-               return (size_t)list_entry(buffer->entry.next,
-                       struct binder_buffer, entry) - (size_t)buffer->data;
-}
-
-static void binder_insert_free_buffer(struct binder_proc *proc,
-                                     struct binder_buffer *new_buffer)
-{
-       struct rb_node **p = &proc->free_buffers.rb_node;
-       struct rb_node *parent = NULL;
-       struct binder_buffer *buffer;
-       size_t buffer_size;
-       size_t new_buffer_size;
-
-       BUG_ON(!new_buffer->free);
-
-       new_buffer_size = binder_buffer_size(proc, new_buffer);
-
-       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                    "%d: add free buffer, size %zd, at %p\n",
-                     proc->pid, new_buffer_size, new_buffer);
-
-       while (*p) {
-               parent = *p;
-               buffer = rb_entry(parent, struct binder_buffer, rb_node);
-               BUG_ON(!buffer->free);
-
-               buffer_size = binder_buffer_size(proc, buffer);
-
-               if (new_buffer_size < buffer_size)
-                       p = &parent->rb_left;
-               else
-                       p = &parent->rb_right;
-       }
-       rb_link_node(&new_buffer->rb_node, parent, p);
-       rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
-}
-
-static void binder_insert_allocated_buffer(struct binder_proc *proc,
-                                          struct binder_buffer *new_buffer)
-{
-       struct rb_node **p = &proc->allocated_buffers.rb_node;
-       struct rb_node *parent = NULL;
-       struct binder_buffer *buffer;
-
-       BUG_ON(new_buffer->free);
-
-       while (*p) {
-               parent = *p;
-               buffer = rb_entry(parent, struct binder_buffer, rb_node);
-               BUG_ON(buffer->free);
-
-               if (new_buffer < buffer)
-                       p = &parent->rb_left;
-               else if (new_buffer > buffer)
-                       p = &parent->rb_right;
-               else
-                       BUG();
-       }
-       rb_link_node(&new_buffer->rb_node, parent, p);
-       rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
-}
-
-static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
-                                                 uintptr_t user_ptr)
-{
-       struct rb_node *n = proc->allocated_buffers.rb_node;
-       struct binder_buffer *buffer;
-       struct binder_buffer *kern_ptr;
-
-       kern_ptr = (struct binder_buffer *)(user_ptr - proc->user_buffer_offset
-               - offsetof(struct binder_buffer, data));
-
-       while (n) {
-               buffer = rb_entry(n, struct binder_buffer, rb_node);
-               BUG_ON(buffer->free);
-
-               if (kern_ptr < buffer)
-                       n = n->rb_left;
-               else if (kern_ptr > buffer)
-                       n = n->rb_right;
-               else
-                       return buffer;
-       }
-       return NULL;
-}
-
-static int binder_update_page_range(struct binder_proc *proc, int allocate,
-                                   void *start, void *end,
-                                   struct vm_area_struct *vma)
-{
-       void *page_addr;
-       unsigned long user_page_addr;
-       struct vm_struct tmp_area;
-       struct page **page;
-       struct mm_struct *mm;
-
-       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                    "%d: %s pages %p-%p\n", proc->pid,
-                    allocate ? "allocate" : "free", start, end);
-
-       if (end <= start)
-               return 0;
-
-       trace_binder_update_page_range(proc, allocate, start, end);
-
-       if (vma)
-               mm = NULL;
-       else
-               mm = get_task_mm(proc->tsk);
-
-       if (mm) {
-               down_write(&mm->mmap_sem);
-               vma = proc->vma;
-               if (vma && mm != proc->vma_vm_mm) {
-                       pr_err("%d: vma mm and task mm mismatch\n",
-                               proc->pid);
-                       vma = NULL;
-               }
-       }
-
-       if (allocate == 0)
-               goto free_range;
-
-       if (vma == NULL) {
-               pr_err("%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
-                       proc->pid);
-               goto err_no_vma;
-       }
-
-       for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
-               int ret;
-               struct page **page_array_ptr;
-               page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
-
-               BUG_ON(*page);
-               *page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
-               if (*page == NULL) {
-                       pr_err("%d: binder_alloc_buf failed for page at %p\n",
-                               proc->pid, page_addr);
-                       goto err_alloc_page_failed;
-               }
-               tmp_area.addr = page_addr;
-               tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
-               page_array_ptr = page;
-               ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
-               if (ret) {
-                       pr_err("%d: binder_alloc_buf failed to map page at %p in kernel\n",
-                              proc->pid, page_addr);
-                       goto err_map_kernel_failed;
-               }
-               user_page_addr =
-                       (uintptr_t)page_addr + proc->user_buffer_offset;
-               ret = vm_insert_page(vma, user_page_addr, page[0]);
-               if (ret) {
-                       pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
-                              proc->pid, user_page_addr);
-                       goto err_vm_insert_page_failed;
-               }
-               /* vm_insert_page does not seem to increment the refcount */
-       }
-       if (mm) {
-               up_write(&mm->mmap_sem);
-               mmput(mm);
-       }
-       return 0;
-
-free_range:
-       for (page_addr = end - PAGE_SIZE; page_addr >= start;
-            page_addr -= PAGE_SIZE) {
-               page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
-               if (vma)
-                       zap_page_range(vma, (uintptr_t)page_addr +
-                               proc->user_buffer_offset, PAGE_SIZE, NULL);
-err_vm_insert_page_failed:
-               unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-err_map_kernel_failed:
-               __free_page(*page);
-               *page = NULL;
-err_alloc_page_failed:
-               ;
-       }
-err_no_vma:
-       if (mm) {
-               up_write(&mm->mmap_sem);
-               mmput(mm);
-       }
-       return -ENOMEM;
-}
-
-static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
-                                             size_t data_size,
-                                             size_t offsets_size, int is_async)
-{
-       struct rb_node *n = proc->free_buffers.rb_node;
-       struct binder_buffer *buffer;
-       size_t buffer_size;
-       struct rb_node *best_fit = NULL;
-       void *has_page_addr;
-       void *end_page_addr;
-       size_t size;
-
-       if (proc->vma == NULL) {
-               pr_err("%d: binder_alloc_buf, no vma\n",
-                      proc->pid);
-               return NULL;
-       }
-
-       size = ALIGN(data_size, sizeof(void *)) +
-               ALIGN(offsets_size, sizeof(void *));
-
-       if (size < data_size || size < offsets_size) {
-               binder_user_error("%d: got transaction with invalid size %zd-%zd\n",
-                               proc->pid, data_size, offsets_size);
-               return NULL;
-       }
-
-       if (is_async &&
-           proc->free_async_space < size + sizeof(struct binder_buffer)) {
-               binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                            "%d: binder_alloc_buf size %zd failed, no async space left\n",
-                             proc->pid, size);
-               return NULL;
-       }
-
-       while (n) {
-               buffer = rb_entry(n, struct binder_buffer, rb_node);
-               BUG_ON(!buffer->free);
-               buffer_size = binder_buffer_size(proc, buffer);
-
-               if (size < buffer_size) {
-                       best_fit = n;
-                       n = n->rb_left;
-               } else if (size > buffer_size)
-                       n = n->rb_right;
-               else {
-                       best_fit = n;
-                       break;
-               }
-       }
-       if (best_fit == NULL) {
-               pr_err("%d: binder_alloc_buf size %zd failed, no address space\n",
-                       proc->pid, size);
-               return NULL;
-       }
-       if (n == NULL) {
-               buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
-               buffer_size = binder_buffer_size(proc, buffer);
-       }
-
-       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                    "%d: binder_alloc_buf size %zd got buffer %p size %zd\n",
-                     proc->pid, size, buffer, buffer_size);
-
-       has_page_addr =
-               (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
-       if (n == NULL) {
-               if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
-                       buffer_size = size; /* no room for other buffers */
-               else
-                       buffer_size = size + sizeof(struct binder_buffer);
-       }
-       end_page_addr =
-               (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
-       if (end_page_addr > has_page_addr)
-               end_page_addr = has_page_addr;
-       if (binder_update_page_range(proc, 1,
-           (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
-               return NULL;
-
-       rb_erase(best_fit, &proc->free_buffers);
-       buffer->free = 0;
-       binder_insert_allocated_buffer(proc, buffer);
-       if (buffer_size != size) {
-               struct binder_buffer *new_buffer = (void *)buffer->data + size;
-               list_add(&new_buffer->entry, &buffer->entry);
-               new_buffer->free = 1;
-               binder_insert_free_buffer(proc, new_buffer);
-       }
-       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                    "%d: binder_alloc_buf size %zd got %p\n",
-                     proc->pid, size, buffer);
-       buffer->data_size = data_size;
-       buffer->offsets_size = offsets_size;
-       buffer->async_transaction = is_async;
-       if (is_async) {
-               proc->free_async_space -= size + sizeof(struct binder_buffer);
-               binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
-                            "%d: binder_alloc_buf size %zd async free %zd\n",
-                             proc->pid, size, proc->free_async_space);
-       }
-
-       return buffer;
-}
-
-static void *buffer_start_page(struct binder_buffer *buffer)
-{
-       return (void *)((uintptr_t)buffer & PAGE_MASK);
-}
-
-static void *buffer_end_page(struct binder_buffer *buffer)
-{
-       return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
-}
-
-static void binder_delete_free_buffer(struct binder_proc *proc,
-                                     struct binder_buffer *buffer)
-{
-       struct binder_buffer *prev, *next = NULL;
-       int free_page_end = 1;
-       int free_page_start = 1;
-
-       BUG_ON(proc->buffers.next == &buffer->entry);
-       prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
-       BUG_ON(!prev->free);
-       if (buffer_end_page(prev) == buffer_start_page(buffer)) {
-               free_page_start = 0;
-               if (buffer_end_page(prev) == buffer_end_page(buffer))
-                       free_page_end = 0;
-               binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                            "%d: merge free, buffer %p share page with %p\n",
-                             proc->pid, buffer, prev);
-       }
-
-       if (!list_is_last(&buffer->entry, &proc->buffers)) {
-               next = list_entry(buffer->entry.next,
-                                 struct binder_buffer, entry);
-               if (buffer_start_page(next) == buffer_end_page(buffer)) {
-                       free_page_end = 0;
-                       if (buffer_start_page(next) ==
-                           buffer_start_page(buffer))
-                               free_page_start = 0;
-                       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                                    "%d: merge free, buffer %p share page with %p\n",
-                                     proc->pid, buffer, prev);
-               }
-       }
-       list_del(&buffer->entry);
-       if (free_page_start || free_page_end) {
-               binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                            "%d: merge free, buffer %p do not share page%s%s with %p or %p\n",
-                            proc->pid, buffer, free_page_start ? "" : " end",
-                            free_page_end ? "" : " start", prev, next);
-               binder_update_page_range(proc, 0, free_page_start ?
-                       buffer_start_page(buffer) : buffer_end_page(buffer),
-                       (free_page_end ? buffer_end_page(buffer) :
-                       buffer_start_page(buffer)) + PAGE_SIZE, NULL);
-       }
-}
-
-static void binder_free_buf(struct binder_proc *proc,
-                           struct binder_buffer *buffer)
-{
-       size_t size, buffer_size;
-
-       buffer_size = binder_buffer_size(proc, buffer);
-
-       size = ALIGN(buffer->data_size, sizeof(void *)) +
-               ALIGN(buffer->offsets_size, sizeof(void *));
-
-       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                    "%d: binder_free_buf %p size %zd buffer_size %zd\n",
-                     proc->pid, buffer, size, buffer_size);
-
-       BUG_ON(buffer->free);
-       BUG_ON(size > buffer_size);
-       BUG_ON(buffer->transaction != NULL);
-       BUG_ON((void *)buffer < proc->buffer);
-       BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
-
-       if (buffer->async_transaction) {
-               proc->free_async_space += size + sizeof(struct binder_buffer);
-
-               binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
-                            "%d: binder_free_buf size %zd async free %zd\n",
-                             proc->pid, size, proc->free_async_space);
-       }
-
-       binder_update_page_range(proc, 0,
-               (void *)PAGE_ALIGN((uintptr_t)buffer->data),
-               (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
-               NULL);
-       rb_erase(&buffer->rb_node, &proc->allocated_buffers);
-       buffer->free = 1;
-       if (!list_is_last(&buffer->entry, &proc->buffers)) {
-               struct binder_buffer *next = list_entry(buffer->entry.next,
-                                               struct binder_buffer, entry);
-               if (next->free) {
-                       rb_erase(&next->rb_node, &proc->free_buffers);
-                       binder_delete_free_buffer(proc, next);
-               }
-       }
-       if (proc->buffers.next != &buffer->entry) {
-               struct binder_buffer *prev = list_entry(buffer->entry.prev,
-                                               struct binder_buffer, entry);
-               if (prev->free) {
-                       binder_delete_free_buffer(proc, buffer);
-                       rb_erase(&prev->rb_node, &proc->free_buffers);
-                       buffer = prev;
-               }
-       }
-       binder_insert_free_buffer(proc, buffer);
-}
-
-static struct binder_node *binder_get_node(struct binder_proc *proc,
-                                          binder_uintptr_t ptr)
-{
-       struct rb_node *n = proc->nodes.rb_node;
-       struct binder_node *node;
-
-       while (n) {
-               node = rb_entry(n, struct binder_node, rb_node);
-
-               if (ptr < node->ptr)
-                       n = n->rb_left;
-               else if (ptr > node->ptr)
-                       n = n->rb_right;
-               else
-                       return node;
-       }
-       return NULL;
-}
-
-static struct binder_node *binder_new_node(struct binder_proc *proc,
-                                          binder_uintptr_t ptr,
-                                          binder_uintptr_t cookie)
-{
-       struct rb_node **p = &proc->nodes.rb_node;
-       struct rb_node *parent = NULL;
-       struct binder_node *node;
-
-       while (*p) {
-               parent = *p;
-               node = rb_entry(parent, struct binder_node, rb_node);
-
-               if (ptr < node->ptr)
-                       p = &(*p)->rb_left;
-               else if (ptr > node->ptr)
-                       p = &(*p)->rb_right;
-               else
-                       return NULL;
-       }
-
-       node = kzalloc(sizeof(*node), GFP_KERNEL);
-       if (node == NULL)
-               return NULL;
-       binder_stats_created(BINDER_STAT_NODE);
-       rb_link_node(&node->rb_node, parent, p);
-       rb_insert_color(&node->rb_node, &proc->nodes);
-       node->debug_id = ++binder_last_id;
-       node->proc = proc;
-       node->ptr = ptr;
-       node->cookie = cookie;
-       node->work.type = BINDER_WORK_NODE;
-       INIT_LIST_HEAD(&node->work.entry);
-       INIT_LIST_HEAD(&node->async_todo);
-       binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-                    "%d:%d node %d u%016llx c%016llx created\n",
-                    proc->pid, current->pid, node->debug_id,
-                    (u64)node->ptr, (u64)node->cookie);
-       return node;
-}
-
-static int binder_inc_node(struct binder_node *node, int strong, int internal,
-                          struct list_head *target_list)
-{
-       if (strong) {
-               if (internal) {
-                       if (target_list == NULL &&
-                           node->internal_strong_refs == 0 &&
-                           !(node == binder_context_mgr_node &&
-                           node->has_strong_ref)) {
-                               pr_err("invalid inc strong node for %d\n",
-                                       node->debug_id);
-                               return -EINVAL;
-                       }
-                       node->internal_strong_refs++;
-               } else
-                       node->local_strong_refs++;
-               if (!node->has_strong_ref && target_list) {
-                       list_del_init(&node->work.entry);
-                       list_add_tail(&node->work.entry, target_list);
-               }
-       } else {
-               if (!internal)
-                       node->local_weak_refs++;
-               if (!node->has_weak_ref && list_empty(&node->work.entry)) {
-                       if (target_list == NULL) {
-                               pr_err("invalid inc weak node for %d\n",
-                                       node->debug_id);
-                               return -EINVAL;
-                       }
-                       list_add_tail(&node->work.entry, target_list);
-               }
-       }
-       return 0;
-}
-
-static int binder_dec_node(struct binder_node *node, int strong, int internal)
-{
-       if (strong) {
-               if (internal)
-                       node->internal_strong_refs--;
-               else
-                       node->local_strong_refs--;
-               if (node->local_strong_refs || node->internal_strong_refs)
-                       return 0;
-       } else {
-               if (!internal)
-                       node->local_weak_refs--;
-               if (node->local_weak_refs || !hlist_empty(&node->refs))
-                       return 0;
-       }
-       if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
-               if (list_empty(&node->work.entry)) {
-                       list_add_tail(&node->work.entry, &node->proc->todo);
-                       wake_up_interruptible(&node->proc->wait);
-               }
-       } else {
-               if (hlist_empty(&node->refs) && !node->local_strong_refs &&
-                   !node->local_weak_refs) {
-                       list_del_init(&node->work.entry);
-                       if (node->proc) {
-                               rb_erase(&node->rb_node, &node->proc->nodes);
-                               binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-                                            "refless node %d deleted\n",
-                                            node->debug_id);
-                       } else {
-                               hlist_del(&node->dead_node);
-                               binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-                                            "dead node %d deleted\n",
-                                            node->debug_id);
-                       }
-                       kfree(node);
-                       binder_stats_deleted(BINDER_STAT_NODE);
-               }
-       }
-
-       return 0;
-}
-
-
-static struct binder_ref *binder_get_ref(struct binder_proc *proc,
-                                        uint32_t desc)
-{
-       struct rb_node *n = proc->refs_by_desc.rb_node;
-       struct binder_ref *ref;
-
-       while (n) {
-               ref = rb_entry(n, struct binder_ref, rb_node_desc);
-
-               if (desc < ref->desc)
-                       n = n->rb_left;
-               else if (desc > ref->desc)
-                       n = n->rb_right;
-               else
-                       return ref;
-       }
-       return NULL;
-}
-
-static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
-                                                 struct binder_node *node)
-{
-       struct rb_node *n;
-       struct rb_node **p = &proc->refs_by_node.rb_node;
-       struct rb_node *parent = NULL;
-       struct binder_ref *ref, *new_ref;
-
-       while (*p) {
-               parent = *p;
-               ref = rb_entry(parent, struct binder_ref, rb_node_node);
-
-               if (node < ref->node)
-                       p = &(*p)->rb_left;
-               else if (node > ref->node)
-                       p = &(*p)->rb_right;
-               else
-                       return ref;
-       }
-       new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
-       if (new_ref == NULL)
-               return NULL;
-       binder_stats_created(BINDER_STAT_REF);
-       new_ref->debug_id = ++binder_last_id;
-       new_ref->proc = proc;
-       new_ref->node = node;
-       rb_link_node(&new_ref->rb_node_node, parent, p);
-       rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
-
-       new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
-       for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
-               ref = rb_entry(n, struct binder_ref, rb_node_desc);
-               if (ref->desc > new_ref->desc)
-                       break;
-               new_ref->desc = ref->desc + 1;
-       }
-
-       p = &proc->refs_by_desc.rb_node;
-       while (*p) {
-               parent = *p;
-               ref = rb_entry(parent, struct binder_ref, rb_node_desc);
-
-               if (new_ref->desc < ref->desc)
-                       p = &(*p)->rb_left;
-               else if (new_ref->desc > ref->desc)
-                       p = &(*p)->rb_right;
-               else
-                       BUG();
-       }
-       rb_link_node(&new_ref->rb_node_desc, parent, p);
-       rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
-       if (node) {
-               hlist_add_head(&new_ref->node_entry, &node->refs);
-
-               binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-                            "%d new ref %d desc %d for node %d\n",
-                             proc->pid, new_ref->debug_id, new_ref->desc,
-                             node->debug_id);
-       } else {
-               binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-                            "%d new ref %d desc %d for dead node\n",
-                             proc->pid, new_ref->debug_id, new_ref->desc);
-       }
-       return new_ref;
-}
-
-static void binder_delete_ref(struct binder_ref *ref)
-{
-       binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-                    "%d delete ref %d desc %d for node %d\n",
-                     ref->proc->pid, ref->debug_id, ref->desc,
-                     ref->node->debug_id);
-
-       rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
-       rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
-       if (ref->strong)
-               binder_dec_node(ref->node, 1, 1);
-       hlist_del(&ref->node_entry);
-       binder_dec_node(ref->node, 0, 1);
-       if (ref->death) {
-               binder_debug(BINDER_DEBUG_DEAD_BINDER,
-                            "%d delete ref %d desc %d has death notification\n",
-                             ref->proc->pid, ref->debug_id, ref->desc);
-               list_del(&ref->death->work.entry);
-               kfree(ref->death);
-               binder_stats_deleted(BINDER_STAT_DEATH);
-       }
-       kfree(ref);
-       binder_stats_deleted(BINDER_STAT_REF);
-}
-
-static int binder_inc_ref(struct binder_ref *ref, int strong,
-                         struct list_head *target_list)
-{
-       int ret;
-       if (strong) {
-               if (ref->strong == 0) {
-                       ret = binder_inc_node(ref->node, 1, 1, target_list);
-                       if (ret)
-                               return ret;
-               }
-               ref->strong++;
-       } else {
-               if (ref->weak == 0) {
-                       ret = binder_inc_node(ref->node, 0, 1, target_list);
-                       if (ret)
-                               return ret;
-               }
-               ref->weak++;
-       }
-       return 0;
-}
-
-
-static int binder_dec_ref(struct binder_ref *ref, int strong)
-{
-       if (strong) {
-               if (ref->strong == 0) {
-                       binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
-                                         ref->proc->pid, ref->debug_id,
-                                         ref->desc, ref->strong, ref->weak);
-                       return -EINVAL;
-               }
-               ref->strong--;
-               if (ref->strong == 0) {
-                       int ret;
-                       ret = binder_dec_node(ref->node, strong, 1);
-                       if (ret)
-                               return ret;
-               }
-       } else {
-               if (ref->weak == 0) {
-                       binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
-                                         ref->proc->pid, ref->debug_id,
-                                         ref->desc, ref->strong, ref->weak);
-                       return -EINVAL;
-               }
-               ref->weak--;
-       }
-       if (ref->strong == 0 && ref->weak == 0)
-               binder_delete_ref(ref);
-       return 0;
-}
-
-static void binder_pop_transaction(struct binder_thread *target_thread,
-                                  struct binder_transaction *t)
-{
-       if (target_thread) {
-               BUG_ON(target_thread->transaction_stack != t);
-               BUG_ON(target_thread->transaction_stack->from != target_thread);
-               target_thread->transaction_stack =
-                       target_thread->transaction_stack->from_parent;
-               t->from = NULL;
-       }
-       t->need_reply = 0;
-       if (t->buffer)
-               t->buffer->transaction = NULL;
-       kfree(t);
-       binder_stats_deleted(BINDER_STAT_TRANSACTION);
-}
-
-static void binder_send_failed_reply(struct binder_transaction *t,
-                                    uint32_t error_code)
-{
-       struct binder_thread *target_thread;
-       BUG_ON(t->flags & TF_ONE_WAY);
-       while (1) {
-               target_thread = t->from;
-               if (target_thread) {
-                       if (target_thread->return_error != BR_OK &&
-                          target_thread->return_error2 == BR_OK) {
-                               target_thread->return_error2 =
-                                       target_thread->return_error;
-                               target_thread->return_error = BR_OK;
-                       }
-                       if (target_thread->return_error == BR_OK) {
-                               binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-                                            "send failed reply for transaction %d to %d:%d\n",
-                                             t->debug_id, target_thread->proc->pid,
-                                             target_thread->pid);
-
-                               binder_pop_transaction(target_thread, t);
-                               target_thread->return_error = error_code;
-                               wake_up_interruptible(&target_thread->wait);
-                       } else {
-                               pr_err("reply failed, target thread, %d:%d, has error code %d already\n",
-                                       target_thread->proc->pid,
-                                       target_thread->pid,
-                                       target_thread->return_error);
-                       }
-                       return;
-               } else {
-                       struct binder_transaction *next = t->from_parent;
-
-                       binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-                                    "send failed reply for transaction %d, target dead\n",
-                                    t->debug_id);
-
-                       binder_pop_transaction(target_thread, t);
-                       if (next == NULL) {
-                               binder_debug(BINDER_DEBUG_DEAD_BINDER,
-                                            "reply failed, no target thread at root\n");
-                               return;
-                       }
-                       t = next;
-                       binder_debug(BINDER_DEBUG_DEAD_BINDER,
-                                    "reply failed, no target thread -- retry %d\n",
-                                     t->debug_id);
-               }
-       }
-}
-
-static void binder_transaction_buffer_release(struct binder_proc *proc,
-                                             struct binder_buffer *buffer,
-                                             binder_size_t *failed_at)
-{
-       binder_size_t *offp, *off_end;
-       int debug_id = buffer->debug_id;
-
-       binder_debug(BINDER_DEBUG_TRANSACTION,
-                    "%d buffer release %d, size %zd-%zd, failed at %p\n",
-                    proc->pid, buffer->debug_id,
-                    buffer->data_size, buffer->offsets_size, failed_at);
-
-       if (buffer->target_node)
-               binder_dec_node(buffer->target_node, 1, 0);
-
-       offp = (binder_size_t *)(buffer->data +
-                                ALIGN(buffer->data_size, sizeof(void *)));
-       if (failed_at)
-               off_end = failed_at;
-       else
-               off_end = (void *)offp + buffer->offsets_size;
-       for (; offp < off_end; offp++) {
-               struct flat_binder_object *fp;
-               if (*offp > buffer->data_size - sizeof(*fp) ||
-                   buffer->data_size < sizeof(*fp) ||
-                   !IS_ALIGNED(*offp, sizeof(u32))) {
-                       pr_err("transaction release %d bad offset %lld, size %zd\n",
-                              debug_id, (u64)*offp, buffer->data_size);
-                       continue;
-               }
-               fp = (struct flat_binder_object *)(buffer->data + *offp);
-               switch (fp->type) {
-               case BINDER_TYPE_BINDER:
-               case BINDER_TYPE_WEAK_BINDER: {
-                       struct binder_node *node = binder_get_node(proc, fp->binder);
-                       if (node == NULL) {
-                               pr_err("transaction release %d bad node %016llx\n",
-                                      debug_id, (u64)fp->binder);
-                               break;
-                       }
-                       binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        node %d u%016llx\n",
-                                    node->debug_id, (u64)node->ptr);
-                       binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
-               } break;
-               case BINDER_TYPE_HANDLE:
-               case BINDER_TYPE_WEAK_HANDLE: {
-                       struct binder_ref *ref = binder_get_ref(proc, fp->handle);
-                       if (ref == NULL) {
-                               pr_err("transaction release %d bad handle %d\n",
-                                debug_id, fp->handle);
-                               break;
-                       }
-                       binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        ref %d desc %d (node %d)\n",
-                                    ref->debug_id, ref->desc, ref->node->debug_id);
-                       binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
-               } break;
-
-               case BINDER_TYPE_FD:
-                       binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        fd %d\n", fp->handle);
-                       if (failed_at)
-                               task_close_fd(proc, fp->handle);
-                       break;
-
-               default:
-                       pr_err("transaction release %d bad object type %x\n",
-                               debug_id, fp->type);
-                       break;
-               }
-       }
-}
-
-static void binder_transaction(struct binder_proc *proc,
-                              struct binder_thread *thread,
-                              struct binder_transaction_data *tr, int reply)
-{
-       struct binder_transaction *t;
-       struct binder_work *tcomplete;
-       binder_size_t *offp, *off_end;
-       binder_size_t off_min;
-       struct binder_proc *target_proc;
-       struct binder_thread *target_thread = NULL;
-       struct binder_node *target_node = NULL;
-       struct list_head *target_list;
-       wait_queue_head_t *target_wait;
-       struct binder_transaction *in_reply_to = NULL;
-       struct binder_transaction_log_entry *e;
-       uint32_t return_error;
-
-       e = binder_transaction_log_add(&binder_transaction_log);
-       e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
-       e->from_proc = proc->pid;
-       e->from_thread = thread->pid;
-       e->target_handle = tr->target.handle;
-       e->data_size = tr->data_size;
-       e->offsets_size = tr->offsets_size;
-
-       if (reply) {
-               in_reply_to = thread->transaction_stack;
-               if (in_reply_to == NULL) {
-                       binder_user_error("%d:%d got reply transaction with no transaction stack\n",
-                                         proc->pid, thread->pid);
-                       return_error = BR_FAILED_REPLY;
-                       goto err_empty_call_stack;
-               }
-               binder_set_nice(in_reply_to->saved_priority);
-               if (in_reply_to->to_thread != thread) {
-                       binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
-                               proc->pid, thread->pid, in_reply_to->debug_id,
-                               in_reply_to->to_proc ?
-                               in_reply_to->to_proc->pid : 0,
-                               in_reply_to->to_thread ?
-                               in_reply_to->to_thread->pid : 0);
-                       return_error = BR_FAILED_REPLY;
-                       in_reply_to = NULL;
-                       goto err_bad_call_stack;
-               }
-               thread->transaction_stack = in_reply_to->to_parent;
-               target_thread = in_reply_to->from;
-               if (target_thread == NULL) {
-                       return_error = BR_DEAD_REPLY;
-                       goto err_dead_binder;
-               }
-               if (target_thread->transaction_stack != in_reply_to) {
-                       binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
-                               proc->pid, thread->pid,
-                               target_thread->transaction_stack ?
-                               target_thread->transaction_stack->debug_id : 0,
-                               in_reply_to->debug_id);
-                       return_error = BR_FAILED_REPLY;
-                       in_reply_to = NULL;
-                       target_thread = NULL;
-                       goto err_dead_binder;
-               }
-               target_proc = target_thread->proc;
-       } else {
-               if (tr->target.handle) {
-                       struct binder_ref *ref;
-                       ref = binder_get_ref(proc, tr->target.handle);
-                       if (ref == NULL) {
-                               binder_user_error("%d:%d got transaction to invalid handle\n",
-                                       proc->pid, thread->pid);
-                               return_error = BR_FAILED_REPLY;
-                               goto err_invalid_target_handle;
-                       }
-                       target_node = ref->node;
-               } else {
-                       target_node = binder_context_mgr_node;
-                       if (target_node == NULL) {
-                               return_error = BR_DEAD_REPLY;
-                               goto err_no_context_mgr_node;
-                       }
-               }
-               e->to_node = target_node->debug_id;
-               target_proc = target_node->proc;
-               if (target_proc == NULL) {
-                       return_error = BR_DEAD_REPLY;
-                       goto err_dead_binder;
-               }
-               if (security_binder_transaction(proc->tsk, target_proc->tsk) < 0) {
-                       return_error = BR_FAILED_REPLY;
-                       goto err_invalid_target_handle;
-               }
-               if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
-                       struct binder_transaction *tmp;
-                       tmp = thread->transaction_stack;
-                       if (tmp->to_thread != thread) {
-                               binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
-                                       proc->pid, thread->pid, tmp->debug_id,
-                                       tmp->to_proc ? tmp->to_proc->pid : 0,
-                                       tmp->to_thread ?
-                                       tmp->to_thread->pid : 0);
-                               return_error = BR_FAILED_REPLY;
-                               goto err_bad_call_stack;
-                       }
-                       while (tmp) {
-                               if (tmp->from && tmp->from->proc == target_proc)
-                                       target_thread = tmp->from;
-                               tmp = tmp->from_parent;
-                       }
-               }
-       }
-       if (target_thread) {
-               e->to_thread = target_thread->pid;
-               target_list = &target_thread->todo;
-               target_wait = &target_thread->wait;
-       } else {
-               target_list = &target_proc->todo;
-               target_wait = &target_proc->wait;
-       }
-       e->to_proc = target_proc->pid;
-
-       /* TODO: reuse incoming transaction for reply */
-       t = kzalloc(sizeof(*t), GFP_KERNEL);
-       if (t == NULL) {
-               return_error = BR_FAILED_REPLY;
-               goto err_alloc_t_failed;
-       }
-       binder_stats_created(BINDER_STAT_TRANSACTION);
-
-       tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
-       if (tcomplete == NULL) {
-               return_error = BR_FAILED_REPLY;
-               goto err_alloc_tcomplete_failed;
-       }
-       binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
-
-       t->debug_id = ++binder_last_id;
-       e->debug_id = t->debug_id;
-
-       if (reply)
-               binder_debug(BINDER_DEBUG_TRANSACTION,
-                            "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld\n",
-                            proc->pid, thread->pid, t->debug_id,
-                            target_proc->pid, target_thread->pid,
-                            (u64)tr->data.ptr.buffer,
-                            (u64)tr->data.ptr.offsets,
-                            (u64)tr->data_size, (u64)tr->offsets_size);
-       else
-               binder_debug(BINDER_DEBUG_TRANSACTION,
-                            "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld\n",
-                            proc->pid, thread->pid, t->debug_id,
-                            target_proc->pid, target_node->debug_id,
-                            (u64)tr->data.ptr.buffer,
-                            (u64)tr->data.ptr.offsets,
-                            (u64)tr->data_size, (u64)tr->offsets_size);
-
-       if (!reply && !(tr->flags & TF_ONE_WAY))
-               t->from = thread;
-       else
-               t->from = NULL;
-       t->sender_euid = proc->tsk->cred->euid;
-       t->to_proc = target_proc;
-       t->to_thread = target_thread;
-       t->code = tr->code;
-       t->flags = tr->flags;
-       t->priority = task_nice(current);
-
-       trace_binder_transaction(reply, t, target_node);
-
-       t->buffer = binder_alloc_buf(target_proc, tr->data_size,
-               tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
-       if (t->buffer == NULL) {
-               return_error = BR_FAILED_REPLY;
-               goto err_binder_alloc_buf_failed;
-       }
-       t->buffer->allow_user_free = 0;
-       t->buffer->debug_id = t->debug_id;
-       t->buffer->transaction = t;
-       t->buffer->target_node = target_node;
-       trace_binder_transaction_alloc_buf(t->buffer);
-       if (target_node)
-               binder_inc_node(target_node, 1, 0, NULL);
-
-       offp = (binder_size_t *)(t->buffer->data +
-                                ALIGN(tr->data_size, sizeof(void *)));
-
-       if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
-                          tr->data.ptr.buffer, tr->data_size)) {
-               binder_user_error("%d:%d got transaction with invalid data ptr\n",
-                               proc->pid, thread->pid);
-               return_error = BR_FAILED_REPLY;
-               goto err_copy_data_failed;
-       }
-       if (copy_from_user(offp, (const void __user *)(uintptr_t)
-                          tr->data.ptr.offsets, tr->offsets_size)) {
-               binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
-                               proc->pid, thread->pid);
-               return_error = BR_FAILED_REPLY;
-               goto err_copy_data_failed;
-       }
-       if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
-               binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
-                               proc->pid, thread->pid, (u64)tr->offsets_size);
-               return_error = BR_FAILED_REPLY;
-               goto err_bad_offset;
-       }
-       off_end = (void *)offp + tr->offsets_size;
-       off_min = 0;
-       for (; offp < off_end; offp++) {
-               struct flat_binder_object *fp;
-               if (*offp > t->buffer->data_size - sizeof(*fp) ||
-                   *offp < off_min ||
-                   t->buffer->data_size < sizeof(*fp) ||
-                   !IS_ALIGNED(*offp, sizeof(u32))) {
-                       binder_user_error("%d:%d got transaction with invalid offset, %lld (min %lld, max %lld)\n",
-                                         proc->pid, thread->pid, (u64)*offp,
-                                         (u64)off_min,
-                                         (u64)(t->buffer->data_size -
-                                         sizeof(*fp)));
-                       return_error = BR_FAILED_REPLY;
-                       goto err_bad_offset;
-               }
-               fp = (struct flat_binder_object *)(t->buffer->data + *offp);
-               off_min = *offp + sizeof(struct flat_binder_object);
-               switch (fp->type) {
-               case BINDER_TYPE_BINDER:
-               case BINDER_TYPE_WEAK_BINDER: {
-                       struct binder_ref *ref;
-                       struct binder_node *node = binder_get_node(proc, fp->binder);
-                       if (node == NULL) {
-                               node = binder_new_node(proc, fp->binder, fp->cookie);
-                               if (node == NULL) {
-                                       return_error = BR_FAILED_REPLY;
-                                       goto err_binder_new_node_failed;
-                               }
-                               node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
-                               node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
-                       }
-                       if (fp->cookie != node->cookie) {
-                               binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
-                                       proc->pid, thread->pid,
-                                       (u64)fp->binder, node->debug_id,
-                                       (u64)fp->cookie, (u64)node->cookie);
-                               goto err_binder_get_ref_for_node_failed;
-                       }
-                       if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
-                               return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_for_node_failed;
-                       }
-                       ref = binder_get_ref_for_node(target_proc, node);
-                       if (ref == NULL) {
-                               return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_for_node_failed;
-                       }
-                       if (fp->type == BINDER_TYPE_BINDER)
-                               fp->type = BINDER_TYPE_HANDLE;
-                       else
-                               fp->type = BINDER_TYPE_WEAK_HANDLE;
-                       fp->handle = ref->desc;
-                       binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
-                                      &thread->todo);
-
-                       trace_binder_transaction_node_to_ref(t, node, ref);
-                       binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        node %d u%016llx -> ref %d desc %d\n",
-                                    node->debug_id, (u64)node->ptr,
-                                    ref->debug_id, ref->desc);
-               } break;
-               case BINDER_TYPE_HANDLE:
-               case BINDER_TYPE_WEAK_HANDLE: {
-                       struct binder_ref *ref = binder_get_ref(proc, fp->handle);
-                       if (ref == NULL) {
-                               binder_user_error("%d:%d got transaction with invalid handle, %d\n",
-                                               proc->pid,
-                                               thread->pid, fp->handle);
-                               return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_failed;
-                       }
-                       if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
-                               return_error = BR_FAILED_REPLY;
-                               goto err_binder_get_ref_failed;
-                       }
-                       if (ref->node->proc == target_proc) {
-                               if (fp->type == BINDER_TYPE_HANDLE)
-                                       fp->type = BINDER_TYPE_BINDER;
-                               else
-                                       fp->type = BINDER_TYPE_WEAK_BINDER;
-                               fp->binder = ref->node->ptr;
-                               fp->cookie = ref->node->cookie;
-                               binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
-                               trace_binder_transaction_ref_to_node(t, ref);
-                               binder_debug(BINDER_DEBUG_TRANSACTION,
-                                            "        ref %d desc %d -> node %d u%016llx\n",
-                                            ref->debug_id, ref->desc, ref->node->debug_id,
-                                            (u64)ref->node->ptr);
-                       } else {
-                               struct binder_ref *new_ref;
-                               new_ref = binder_get_ref_for_node(target_proc, ref->node);
-                               if (new_ref == NULL) {
-                                       return_error = BR_FAILED_REPLY;
-                                       goto err_binder_get_ref_for_node_failed;
-                               }
-                               fp->handle = new_ref->desc;
-                               binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
-                               trace_binder_transaction_ref_to_ref(t, ref,
-                                                                   new_ref);
-                               binder_debug(BINDER_DEBUG_TRANSACTION,
-                                            "        ref %d desc %d -> ref %d desc %d (node %d)\n",
-                                            ref->debug_id, ref->desc, new_ref->debug_id,
-                                            new_ref->desc, ref->node->debug_id);
-                       }
-               } break;
-
-               case BINDER_TYPE_FD: {
-                       int target_fd;
-                       struct file *file;
-
-                       if (reply) {
-                               if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
-                                       binder_user_error("%d:%d got reply with fd, %d, but target does not allow fds\n",
-                                               proc->pid, thread->pid, fp->handle);
-                                       return_error = BR_FAILED_REPLY;
-                                       goto err_fd_not_allowed;
-                               }
-                       } else if (!target_node->accept_fds) {
-                               binder_user_error("%d:%d got transaction with fd, %d, but target does not allow fds\n",
-                                       proc->pid, thread->pid, fp->handle);
-                               return_error = BR_FAILED_REPLY;
-                               goto err_fd_not_allowed;
-                       }
-
-                       file = fget(fp->handle);
-                       if (file == NULL) {
-                               binder_user_error("%d:%d got transaction with invalid fd, %d\n",
-                                       proc->pid, thread->pid, fp->handle);
-                               return_error = BR_FAILED_REPLY;
-                               goto err_fget_failed;
-                       }
-                       if (security_binder_transfer_file(proc->tsk, target_proc->tsk, file) < 0) {
-                               fput(file);
-                               return_error = BR_FAILED_REPLY;
-                               goto err_get_unused_fd_failed;
-                       }
-                       target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
-                       if (target_fd < 0) {
-                               fput(file);
-                               return_error = BR_FAILED_REPLY;
-                               goto err_get_unused_fd_failed;
-                       }
-                       task_fd_install(target_proc, target_fd, file);
-                       trace_binder_transaction_fd(t, fp->handle, target_fd);
-                       binder_debug(BINDER_DEBUG_TRANSACTION,
-                                    "        fd %d -> %d\n", fp->handle, target_fd);
-                       /* TODO: fput? */
-                       fp->handle = target_fd;
-               } break;
-
-               default:
-                       binder_user_error("%d:%d got transaction with invalid object type, %x\n",
-                               proc->pid, thread->pid, fp->type);
-                       return_error = BR_FAILED_REPLY;
-                       goto err_bad_object_type;
-               }
-       }
-       if (reply) {
-               BUG_ON(t->buffer->async_transaction != 0);
-               binder_pop_transaction(target_thread, in_reply_to);
-       } else if (!(t->flags & TF_ONE_WAY)) {
-               BUG_ON(t->buffer->async_transaction != 0);
-               t->need_reply = 1;
-               t->from_parent = thread->transaction_stack;
-               thread->transaction_stack = t;
-       } else {
-               BUG_ON(target_node == NULL);
-               BUG_ON(t->buffer->async_transaction != 1);
-               if (target_node->has_async_transaction) {
-                       target_list = &target_node->async_todo;
-                       target_wait = NULL;
-               } else
-                       target_node->has_async_transaction = 1;
-       }
-       t->work.type = BINDER_WORK_TRANSACTION;
-       list_add_tail(&t->work.entry, target_list);
-       tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
-       list_add_tail(&tcomplete->entry, &thread->todo);
-       if (target_wait)
-               wake_up_interruptible(target_wait);
-       return;
-
-err_get_unused_fd_failed:
-err_fget_failed:
-err_fd_not_allowed:
-err_binder_get_ref_for_node_failed:
-err_binder_get_ref_failed:
-err_binder_new_node_failed:
-err_bad_object_type:
-err_bad_offset:
-err_copy_data_failed:
-       trace_binder_transaction_failed_buffer_release(t->buffer);
-       binder_transaction_buffer_release(target_proc, t->buffer, offp);
-       t->buffer->transaction = NULL;
-       binder_free_buf(target_proc, t->buffer);
-err_binder_alloc_buf_failed:
-       kfree(tcomplete);
-       binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
-err_alloc_tcomplete_failed:
-       kfree(t);
-       binder_stats_deleted(BINDER_STAT_TRANSACTION);
-err_alloc_t_failed:
-err_bad_call_stack:
-err_empty_call_stack:
-err_dead_binder:
-err_invalid_target_handle:
-err_no_context_mgr_node:
-       binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-                    "%d:%d transaction failed %d, size %lld-%lld\n",
-                    proc->pid, thread->pid, return_error,
-                    (u64)tr->data_size, (u64)tr->offsets_size);
-
-       {
-               struct binder_transaction_log_entry *fe;
-               fe = binder_transaction_log_add(&binder_transaction_log_failed);
-               *fe = *e;
-       }
-
-       BUG_ON(thread->return_error != BR_OK);
-       if (in_reply_to) {
-               thread->return_error = BR_TRANSACTION_COMPLETE;
-               binder_send_failed_reply(in_reply_to, return_error);
-       } else
-               thread->return_error = return_error;
-}
-
-int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
-                       binder_uintptr_t binder_buffer, size_t size,
-                       binder_size_t *consumed)
-{
-       uint32_t cmd;
-       void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
-       void __user *ptr = buffer + *consumed;
-       void __user *end = buffer + size;
-
-       while (ptr < end && thread->return_error == BR_OK) {
-               if (get_user(cmd, (uint32_t __user *)ptr))
-                       return -EFAULT;
-               ptr += sizeof(uint32_t);
-               trace_binder_command(cmd);
-               if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
-                       binder_stats.bc[_IOC_NR(cmd)]++;
-                       proc->stats.bc[_IOC_NR(cmd)]++;
-                       thread->stats.bc[_IOC_NR(cmd)]++;
-               }
-               switch (cmd) {
-               case BC_INCREFS:
-               case BC_ACQUIRE:
-               case BC_RELEASE:
-               case BC_DECREFS: {
-                       uint32_t target;
-                       struct binder_ref *ref;
-                       const char *debug_string;
-
-                       if (get_user(target, (uint32_t __user *)ptr))
-                               return -EFAULT;
-                       ptr += sizeof(uint32_t);
-                       if (target == 0 && binder_context_mgr_node &&
-                           (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
-                               ref = binder_get_ref_for_node(proc,
-                                              binder_context_mgr_node);
-                               if (ref->desc != target) {
-                                       binder_user_error("%d:%d tried to acquire reference to desc 0, got %d instead\n",
-                                               proc->pid, thread->pid,
-                                               ref->desc);
-                               }
-                       } else
-                               ref = binder_get_ref(proc, target);
-                       if (ref == NULL) {
-                               binder_user_error("%d:%d refcount change on invalid ref %d\n",
-                                       proc->pid, thread->pid, target);
-                               break;
-                       }
-                       switch (cmd) {
-                       case BC_INCREFS:
-                               debug_string = "IncRefs";
-                               binder_inc_ref(ref, 0, NULL);
-                               break;
-                       case BC_ACQUIRE:
-                               debug_string = "Acquire";
-                               binder_inc_ref(ref, 1, NULL);
-                               break;
-                       case BC_RELEASE:
-                               debug_string = "Release";
-                               binder_dec_ref(ref, 1);
-                               break;
-                       case BC_DECREFS:
-                       default:
-                               debug_string = "DecRefs";
-                               binder_dec_ref(ref, 0);
-                               break;
-                       }
-                       binder_debug(BINDER_DEBUG_USER_REFS,
-                                    "%d:%d %s ref %d desc %d s %d w %d for node %d\n",
-                                    proc->pid, thread->pid, debug_string, ref->debug_id,
-                                    ref->desc, ref->strong, ref->weak, ref->node->debug_id);
-                       break;
-               }
-               case BC_INCREFS_DONE:
-               case BC_ACQUIRE_DONE: {
-                       binder_uintptr_t node_ptr;
-                       binder_uintptr_t cookie;
-                       struct binder_node *node;
-
-                       if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
-                               return -EFAULT;
-                       ptr += sizeof(binder_uintptr_t);
-                       if (get_user(cookie, (binder_uintptr_t __user *)ptr))
-                               return -EFAULT;
-                       ptr += sizeof(binder_uintptr_t);
-                       node = binder_get_node(proc, node_ptr);
-                       if (node == NULL) {
-                               binder_user_error("%d:%d %s u%016llx no match\n",
-                                       proc->pid, thread->pid,
-                                       cmd == BC_INCREFS_DONE ?
-                                       "BC_INCREFS_DONE" :
-                                       "BC_ACQUIRE_DONE",
-                                       (u64)node_ptr);
-                               break;
-                       }
-                       if (cookie != node->cookie) {
-                               binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
-                                       proc->pid, thread->pid,
-                                       cmd == BC_INCREFS_DONE ?
-                                       "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
-                                       (u64)node_ptr, node->debug_id,
-                                       (u64)cookie, (u64)node->cookie);
-                               break;
-                       }
-                       if (cmd == BC_ACQUIRE_DONE) {
-                               if (node->pending_strong_ref == 0) {
-                                       binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
-                                               proc->pid, thread->pid,
-                                               node->debug_id);
-                                       break;
-                               }
-                               node->pending_strong_ref = 0;
-                       } else {
-                               if (node->pending_weak_ref == 0) {
-                                       binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
-                                               proc->pid, thread->pid,
-                                               node->debug_id);
-                                       break;
-                               }
-                               node->pending_weak_ref = 0;
-                       }
-                       binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
-                       binder_debug(BINDER_DEBUG_USER_REFS,
-                                    "%d:%d %s node %d ls %d lw %d\n",
-                                    proc->pid, thread->pid,
-                                    cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
-                                    node->debug_id, node->local_strong_refs, node->local_weak_refs);
-                       break;
-               }
-               case BC_ATTEMPT_ACQUIRE:
-                       pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
-                       return -EINVAL;
-               case BC_ACQUIRE_RESULT:
-                       pr_err("BC_ACQUIRE_RESULT not supported\n");
-                       return -EINVAL;
-
-               case BC_FREE_BUFFER: {
-                       binder_uintptr_t data_ptr;
-                       struct binder_buffer *buffer;
-
-                       if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
-                               return -EFAULT;
-                       ptr += sizeof(binder_uintptr_t);
-
-                       buffer = binder_buffer_lookup(proc, data_ptr);
-                       if (buffer == NULL) {
-                               binder_user_error("%d:%d BC_FREE_BUFFER u%016llx no match\n",
-                                       proc->pid, thread->pid, (u64)data_ptr);
-                               break;
-                       }
-                       if (!buffer->allow_user_free) {
-                               binder_user_error("%d:%d BC_FREE_BUFFER u%016llx matched unreturned buffer\n",
-                                       proc->pid, thread->pid, (u64)data_ptr);
-                               break;
-                       }
-                       binder_debug(BINDER_DEBUG_FREE_BUFFER,
-                                    "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
-                                    proc->pid, thread->pid, (u64)data_ptr, buffer->debug_id,
-                                    buffer->transaction ? "active" : "finished");
-
-                       if (buffer->transaction) {
-                               buffer->transaction->buffer = NULL;
-                               buffer->transaction = NULL;
-                       }
-                       if (buffer->async_transaction && buffer->target_node) {
-                               BUG_ON(!buffer->target_node->has_async_transaction);
-                               if (list_empty(&buffer->target_node->async_todo))
-                                       buffer->target_node->has_async_transaction = 0;
-                               else
-                                       list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
-                       }
-                       trace_binder_transaction_buffer_release(buffer);
-                       binder_transaction_buffer_release(proc, buffer, NULL);
-                       binder_free_buf(proc, buffer);
-                       break;
-               }
-
-               case BC_TRANSACTION:
-               case BC_REPLY: {
-                       struct binder_transaction_data tr;
-
-                       if (copy_from_user(&tr, ptr, sizeof(tr)))
-                               return -EFAULT;
-                       ptr += sizeof(tr);
-                       binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
-                       break;
-               }
-
-               case BC_REGISTER_LOOPER:
-                       binder_debug(BINDER_DEBUG_THREADS,
-                                    "%d:%d BC_REGISTER_LOOPER\n",
-                                    proc->pid, thread->pid);
-                       if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
-                               thread->looper |= BINDER_LOOPER_STATE_INVALID;
-                               binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
-                                       proc->pid, thread->pid);
-                       } else if (proc->requested_threads == 0) {
-                               thread->looper |= BINDER_LOOPER_STATE_INVALID;
-                               binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
-                                       proc->pid, thread->pid);
-                       } else {
-                               proc->requested_threads--;
-                               proc->requested_threads_started++;
-                       }
-                       thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
-                       break;
-               case BC_ENTER_LOOPER:
-                       binder_debug(BINDER_DEBUG_THREADS,
-                                    "%d:%d BC_ENTER_LOOPER\n",
-                                    proc->pid, thread->pid);
-                       if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
-                               thread->looper |= BINDER_LOOPER_STATE_INVALID;
-                               binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
-                                       proc->pid, thread->pid);
-                       }
-                       thread->looper |= BINDER_LOOPER_STATE_ENTERED;
-                       break;
-               case BC_EXIT_LOOPER:
-                       binder_debug(BINDER_DEBUG_THREADS,
-                                    "%d:%d BC_EXIT_LOOPER\n",
-                                    proc->pid, thread->pid);
-                       thread->looper |= BINDER_LOOPER_STATE_EXITED;
-                       break;
-
-               case BC_REQUEST_DEATH_NOTIFICATION:
-               case BC_CLEAR_DEATH_NOTIFICATION: {
-                       uint32_t target;
-                       binder_uintptr_t cookie;
-                       struct binder_ref *ref;
-                       struct binder_ref_death *death;
-
-                       if (get_user(target, (uint32_t __user *)ptr))
-                               return -EFAULT;
-                       ptr += sizeof(uint32_t);
-                       if (get_user(cookie, (binder_uintptr_t __user *)ptr))
-                               return -EFAULT;
-                       ptr += sizeof(binder_uintptr_t);
-                       ref = binder_get_ref(proc, target);
-                       if (ref == NULL) {
-                               binder_user_error("%d:%d %s invalid ref %d\n",
-                                       proc->pid, thread->pid,
-                                       cmd == BC_REQUEST_DEATH_NOTIFICATION ?
-                                       "BC_REQUEST_DEATH_NOTIFICATION" :
-                                       "BC_CLEAR_DEATH_NOTIFICATION",
-                                       target);
-                               break;
-                       }
-
-                       binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
-                                    "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
-                                    proc->pid, thread->pid,
-                                    cmd == BC_REQUEST_DEATH_NOTIFICATION ?
-                                    "BC_REQUEST_DEATH_NOTIFICATION" :
-                                    "BC_CLEAR_DEATH_NOTIFICATION",
-                                    (u64)cookie, ref->debug_id, ref->desc,
-                                    ref->strong, ref->weak, ref->node->debug_id);
-
-                       if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
-                               if (ref->death) {
-                                       binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
-                                               proc->pid, thread->pid);
-                                       break;
-                               }
-                               death = kzalloc(sizeof(*death), GFP_KERNEL);
-                               if (death == NULL) {
-                                       thread->return_error = BR_ERROR;
-                                       binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
-                                                    "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
-                                                    proc->pid, thread->pid);
-                                       break;
-                               }
-                               binder_stats_created(BINDER_STAT_DEATH);
-                               INIT_LIST_HEAD(&death->work.entry);
-                               death->cookie = cookie;
-                               ref->death = death;
-                               if (ref->node->proc == NULL) {
-                                       ref->death->work.type = BINDER_WORK_DEAD_BINDER;
-                                       if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
-                                               list_add_tail(&ref->death->work.entry, &thread->todo);
-                                       } else {
-                                               list_add_tail(&ref->death->work.entry, &proc->todo);
-                                               wake_up_interruptible(&proc->wait);
-                                       }
-                               }
-                       } else {
-                               if (ref->death == NULL) {
-                                       binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
-                                               proc->pid, thread->pid);
-                                       break;
-                               }
-                               death = ref->death;
-                               if (death->cookie != cookie) {
-                                       binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
-                                               proc->pid, thread->pid,
-                                               (u64)death->cookie, (u64)cookie);
-                                       break;
-                               }
-                               ref->death = NULL;
-                               if (list_empty(&death->work.entry)) {
-                                       death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
-                                       if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
-                                               list_add_tail(&death->work.entry, &thread->todo);
-                                       } else {
-                                               list_add_tail(&death->work.entry, &proc->todo);
-                                               wake_up_interruptible(&proc->wait);
-                                       }
-                               } else {
-                                       BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
-                                       death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
-                               }
-                       }
-               } break;
-               case BC_DEAD_BINDER_DONE: {
-                       struct binder_work *w;
-                       binder_uintptr_t cookie;
-                       struct binder_ref_death *death = NULL;
-                       if (get_user(cookie, (binder_uintptr_t __user *)ptr))
-                               return -EFAULT;
-
-                       ptr += sizeof(void *);
-                       list_for_each_entry(w, &proc->delivered_death, entry) {
-                               struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
-                               if (tmp_death->cookie == cookie) {
-                                       death = tmp_death;
-                                       break;
-                               }
-                       }
-                       binder_debug(BINDER_DEBUG_DEAD_BINDER,
-                                    "%d:%d BC_DEAD_BINDER_DONE %016llx found %p\n",
-                                    proc->pid, thread->pid, (u64)cookie, death);
-                       if (death == NULL) {
-                               binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
-                                       proc->pid, thread->pid, (u64)cookie);
-                               break;
-                       }
-
-                       list_del_init(&death->work.entry);
-                       if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
-                               death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
-                               if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
-                                       list_add_tail(&death->work.entry, &thread->todo);
-                               } else {
-                                       list_add_tail(&death->work.entry, &proc->todo);
-                                       wake_up_interruptible(&proc->wait);
-                               }
-                       }
-               } break;
-
-               default:
-                       pr_err("%d:%d unknown command %d\n",
-                              proc->pid, thread->pid, cmd);
-                       return -EINVAL;
-               }
-               *consumed = ptr - buffer;
-       }
-       return 0;
-}
-
-void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
-                   uint32_t cmd)
-{
-       trace_binder_return(cmd);
-       if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
-               binder_stats.br[_IOC_NR(cmd)]++;
-               proc->stats.br[_IOC_NR(cmd)]++;
-               thread->stats.br[_IOC_NR(cmd)]++;
-       }
-}
-
-static int binder_has_proc_work(struct binder_proc *proc,
-                               struct binder_thread *thread)
-{
-       return !list_empty(&proc->todo) ||
-               (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
-}
-
-static int binder_has_thread_work(struct binder_thread *thread)
-{
-       return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
-               (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
-}
-
-static int binder_thread_read(struct binder_proc *proc,
-                             struct binder_thread *thread,
-                             binder_uintptr_t binder_buffer, size_t size,
-                             binder_size_t *consumed, int non_block)
-{
-       void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
-       void __user *ptr = buffer + *consumed;
-       void __user *end = buffer + size;
-
-       int ret = 0;
-       int wait_for_proc_work;
-
-       if (*consumed == 0) {
-               if (put_user(BR_NOOP, (uint32_t __user *)ptr))
-                       return -EFAULT;
-               ptr += sizeof(uint32_t);
-       }
-
-retry:
-       wait_for_proc_work = thread->transaction_stack == NULL &&
-                               list_empty(&thread->todo);
-
-       if (thread->return_error != BR_OK && ptr < end) {
-               if (thread->return_error2 != BR_OK) {
-                       if (put_user(thread->return_error2, (uint32_t __user *)ptr))
-                               return -EFAULT;
-                       ptr += sizeof(uint32_t);
-                       binder_stat_br(proc, thread, thread->return_error2);
-                       if (ptr == end)
-                               goto done;
-                       thread->return_error2 = BR_OK;
-               }
-               if (put_user(thread->return_error, (uint32_t __user *)ptr))
-                       return -EFAULT;
-               ptr += sizeof(uint32_t);
-               binder_stat_br(proc, thread, thread->return_error);
-               thread->return_error = BR_OK;
-               goto done;
-       }
-
-
-       thread->looper |= BINDER_LOOPER_STATE_WAITING;
-       if (wait_for_proc_work)
-               proc->ready_threads++;
-
-       binder_unlock(__func__);
-
-       trace_binder_wait_for_work(wait_for_proc_work,
-                                  !!thread->transaction_stack,
-                                  !list_empty(&thread->todo));
-       if (wait_for_proc_work) {
-               if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
-                                       BINDER_LOOPER_STATE_ENTERED))) {
-                       binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
-                               proc->pid, thread->pid, thread->looper);
-                       wait_event_interruptible(binder_user_error_wait,
-                                                binder_stop_on_user_error < 2);
-               }
-               binder_set_nice(proc->default_priority);
-               if (non_block) {
-                       if (!binder_has_proc_work(proc, thread))
-                               ret = -EAGAIN;
-               } else
-                       ret = wait_event_freezable_exclusive(proc->wait, binder_has_proc_work(proc, thread));
-       } else {
-               if (non_block) {
-                       if (!binder_has_thread_work(thread))
-                               ret = -EAGAIN;
-               } else
-                       ret = wait_event_freezable(thread->wait, binder_has_thread_work(thread));
-       }
-
-       binder_lock(__func__);
-
-       if (wait_for_proc_work)
-               proc->ready_threads--;
-       thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
-
-       if (ret)
-               return ret;
-
-       while (1) {
-               uint32_t cmd;
-               struct binder_transaction_data tr;
-               struct binder_work *w;
-               struct binder_transaction *t = NULL;
-
-               if (!list_empty(&thread->todo))
-                       w = list_first_entry(&thread->todo, struct binder_work, entry);
-               else if (!list_empty(&proc->todo) && wait_for_proc_work)
-                       w = list_first_entry(&proc->todo, struct binder_work, entry);
-               else {
-                       if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
-                               goto retry;
-                       break;
-               }
-
-               if (end - ptr < sizeof(tr) + 4)
-                       break;
-
-               switch (w->type) {
-               case BINDER_WORK_TRANSACTION: {
-                       t = container_of(w, struct binder_transaction, work);
-               } break;
-               case BINDER_WORK_TRANSACTION_COMPLETE: {
-                       cmd = BR_TRANSACTION_COMPLETE;
-                       if (put_user(cmd, (uint32_t __user *)ptr))
-                               return -EFAULT;
-                       ptr += sizeof(uint32_t);
-
-                       binder_stat_br(proc, thread, cmd);
-                       binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
-                                    "%d:%d BR_TRANSACTION_COMPLETE\n",
-                                    proc->pid, thread->pid);
-
-                       list_del(&w->entry);
-                       kfree(w);
-                       binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
-               } break;
-               case BINDER_WORK_NODE: {
-                       struct binder_node *node = container_of(w, struct binder_node, work);
-                       uint32_t cmd = BR_NOOP;
-                       const char *cmd_name;
-                       int strong = node->internal_strong_refs || node->local_strong_refs;
-                       int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
-                       if (weak && !node->has_weak_ref) {
-                               cmd = BR_INCREFS;
-                               cmd_name = "BR_INCREFS";
-                               node->has_weak_ref = 1;
-                               node->pending_weak_ref = 1;
-                               node->local_weak_refs++;
-                       } else if (strong && !node->has_strong_ref) {
-                               cmd = BR_ACQUIRE;
-                               cmd_name = "BR_ACQUIRE";
-                               node->has_strong_ref = 1;
-                               node->pending_strong_ref = 1;
-                               node->local_strong_refs++;
-                       } else if (!strong && node->has_strong_ref) {
-                               cmd = BR_RELEASE;
-                               cmd_name = "BR_RELEASE";
-                               node->has_strong_ref = 0;
-                       } else if (!weak && node->has_weak_ref) {
-                               cmd = BR_DECREFS;
-                               cmd_name = "BR_DECREFS";
-                               node->has_weak_ref = 0;
-                       }
-                       if (cmd != BR_NOOP) {
-                               if (put_user(cmd, (uint32_t __user *)ptr))
-                                       return -EFAULT;
-                               ptr += sizeof(uint32_t);
-                               if (put_user(node->ptr,
-                                            (binder_uintptr_t __user *)ptr))
-                                       return -EFAULT;
-                               ptr += sizeof(binder_uintptr_t);
-                               if (put_user(node->cookie,
-                                            (binder_uintptr_t __user *)ptr))
-                                       return -EFAULT;
-                               ptr += sizeof(binder_uintptr_t);
-
-                               binder_stat_br(proc, thread, cmd);
-                               binder_debug(BINDER_DEBUG_USER_REFS,
-                                            "%d:%d %s %d u%016llx c%016llx\n",
-                                            proc->pid, thread->pid, cmd_name,
-                                            node->debug_id,
-                                            (u64)node->ptr, (u64)node->cookie);
-                       } else {
-                               list_del_init(&w->entry);
-                               if (!weak && !strong) {
-                                       binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-                                                    "%d:%d node %d u%016llx c%016llx deleted\n",
-                                                    proc->pid, thread->pid, node->debug_id,
-                                                    (u64)node->ptr, (u64)node->cookie);
-                                       rb_erase(&node->rb_node, &proc->nodes);
-                                       kfree(node);
-                                       binder_stats_deleted(BINDER_STAT_NODE);
-                               } else {
-                                       binder_debug(BINDER_DEBUG_INTERNAL_REFS,
-                                                    "%d:%d node %d u%016llx c%016llx state unchanged\n",
-                                                    proc->pid, thread->pid, node->debug_id,
-                                                    (u64)node->ptr, (u64)node->cookie);
-                               }
-                       }
-               } break;
-               case BINDER_WORK_DEAD_BINDER:
-               case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
-               case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
-                       struct binder_ref_death *death;
-                       uint32_t cmd;
-
-                       death = container_of(w, struct binder_ref_death, work);
-                       if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
-                               cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
-                       else
-                               cmd = BR_DEAD_BINDER;
-                       if (put_user(cmd, (uint32_t __user *)ptr))
-                               return -EFAULT;
-                       ptr += sizeof(uint32_t);
-                       if (put_user(death->cookie,
-                                    (binder_uintptr_t __user *)ptr))
-                               return -EFAULT;
-                       ptr += sizeof(binder_uintptr_t);
-                       binder_stat_br(proc, thread, cmd);
-                       binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
-                                    "%d:%d %s %016llx\n",
-                                     proc->pid, thread->pid,
-                                     cmd == BR_DEAD_BINDER ?
-                                     "BR_DEAD_BINDER" :
-                                     "BR_CLEAR_DEATH_NOTIFICATION_DONE",
-                                     (u64)death->cookie);
-
-                       if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
-                               list_del(&w->entry);
-                               kfree(death);
-                               binder_stats_deleted(BINDER_STAT_DEATH);
-                       } else
-                               list_move(&w->entry, &proc->delivered_death);
-                       if (cmd == BR_DEAD_BINDER)
-                               goto done; /* DEAD_BINDER notifications can cause transactions */
-               } break;
-               }
-
-               if (!t)
-                       continue;
-
-               BUG_ON(t->buffer == NULL);
-               if (t->buffer->target_node) {
-                       struct binder_node *target_node = t->buffer->target_node;
-                       tr.target.ptr = target_node->ptr;
-                       tr.cookie =  target_node->cookie;
-                       t->saved_priority = task_nice(current);
-                       if (t->priority < target_node->min_priority &&
-                           !(t->flags & TF_ONE_WAY))
-                               binder_set_nice(t->priority);
-                       else if (!(t->flags & TF_ONE_WAY) ||
-                                t->saved_priority > target_node->min_priority)
-                               binder_set_nice(target_node->min_priority);
-                       cmd = BR_TRANSACTION;
-               } else {
-                       tr.target.ptr = 0;
-                       tr.cookie = 0;
-                       cmd = BR_REPLY;
-               }
-               tr.code = t->code;
-               tr.flags = t->flags;
-               tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
-
-               if (t->from) {
-                       struct task_struct *sender = t->from->proc->tsk;
-                       tr.sender_pid = task_tgid_nr_ns(sender,
-                                                       task_active_pid_ns(current));
-               } else {
-                       tr.sender_pid = 0;
-               }
-
-               tr.data_size = t->buffer->data_size;
-               tr.offsets_size = t->buffer->offsets_size;
-               tr.data.ptr.buffer = (binder_uintptr_t)(
-                                       (uintptr_t)t->buffer->data +
-                                       proc->user_buffer_offset);
-               tr.data.ptr.offsets = tr.data.ptr.buffer +
-                                       ALIGN(t->buffer->data_size,
-                                           sizeof(void *));
-
-               if (put_user(cmd, (uint32_t __user *)ptr))
-                       return -EFAULT;
-               ptr += sizeof(uint32_t);
-               if (copy_to_user(ptr, &tr, sizeof(tr)))
-                       return -EFAULT;
-               ptr += sizeof(tr);
-
-               trace_binder_transaction_received(t);
-               binder_stat_br(proc, thread, cmd);
-               binder_debug(BINDER_DEBUG_TRANSACTION,
-                            "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
-                            proc->pid, thread->pid,
-                            (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
-                            "BR_REPLY",
-                            t->debug_id, t->from ? t->from->proc->pid : 0,
-                            t->from ? t->from->pid : 0, cmd,
-                            t->buffer->data_size, t->buffer->offsets_size,
-                            (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
-
-               list_del(&t->work.entry);
-               t->buffer->allow_user_free = 1;
-               if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
-                       t->to_parent = thread->transaction_stack;
-                       t->to_thread = thread;
-                       thread->transaction_stack = t;
-               } else {
-                       t->buffer->transaction = NULL;
-                       kfree(t);
-                       binder_stats_deleted(BINDER_STAT_TRANSACTION);
-               }
-               break;
-       }
-
-done:
-
-       *consumed = ptr - buffer;
-       if (proc->requested_threads + proc->ready_threads == 0 &&
-           proc->requested_threads_started < proc->max_threads &&
-           (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
-            BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
-            /*spawn a new thread if we leave this out */) {
-               proc->requested_threads++;
-               binder_debug(BINDER_DEBUG_THREADS,
-                            "%d:%d BR_SPAWN_LOOPER\n",
-                            proc->pid, thread->pid);
-               if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
-                       return -EFAULT;
-               binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
-       }
-       return 0;
-}
-
-static void binder_release_work(struct list_head *list)
-{
-       struct binder_work *w;
-       while (!list_empty(list)) {
-               w = list_first_entry(list, struct binder_work, entry);
-               list_del_init(&w->entry);
-               switch (w->type) {
-               case BINDER_WORK_TRANSACTION: {
-                       struct binder_transaction *t;
-
-                       t = container_of(w, struct binder_transaction, work);
-                       if (t->buffer->target_node &&
-                           !(t->flags & TF_ONE_WAY)) {
-                               binder_send_failed_reply(t, BR_DEAD_REPLY);
-                       } else {
-                               binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
-                                       "undelivered transaction %d\n",
-                                       t->debug_id);
-                               t->buffer->transaction = NULL;
-                               kfree(t);
-                               binder_stats_deleted(BINDER_STAT_TRANSACTION);
-                       }
-               } break;
-               case BINDER_WORK_TRANSACTION_COMPLETE: {
-                       binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
-                               "undelivered TRANSACTION_COMPLETE\n");
-                       kfree(w);
-                       binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
-               } break;
-               case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
-               case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
-                       struct binder_ref_death *death;
-
-                       death = container_of(w, struct binder_ref_death, work);
-                       binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
-                               "undelivered death notification, %016llx\n",
-                               (u64)death->cookie);
-                       kfree(death);
-                       binder_stats_deleted(BINDER_STAT_DEATH);
-               } break;
-               default:
-                       pr_err("unexpected work type, %d, not freed\n",
-                              w->type);
-                       break;
-               }
-       }
-
-}
-
-static struct binder_thread *binder_get_thread(struct binder_proc *proc)
-{
-       struct binder_thread *thread = NULL;
-       struct rb_node *parent = NULL;
-       struct rb_node **p = &proc->threads.rb_node;
-
-       while (*p) {
-               parent = *p;
-               thread = rb_entry(parent, struct binder_thread, rb_node);
-
-               if (current->pid < thread->pid)
-                       p = &(*p)->rb_left;
-               else if (current->pid > thread->pid)
-                       p = &(*p)->rb_right;
-               else
-                       break;
-       }
-       if (*p == NULL) {
-               thread = kzalloc(sizeof(*thread), GFP_KERNEL);
-               if (thread == NULL)
-                       return NULL;
-               binder_stats_created(BINDER_STAT_THREAD);
-               thread->proc = proc;
-               thread->pid = current->pid;
-               init_waitqueue_head(&thread->wait);
-               INIT_LIST_HEAD(&thread->todo);
-               rb_link_node(&thread->rb_node, parent, p);
-               rb_insert_color(&thread->rb_node, &proc->threads);
-               thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
-               thread->return_error = BR_OK;
-               thread->return_error2 = BR_OK;
-       }
-       return thread;
-}
-
-static int binder_free_thread(struct binder_proc *proc,
-                             struct binder_thread *thread)
-{
-       struct binder_transaction *t;
-       struct binder_transaction *send_reply = NULL;
-       int active_transactions = 0;
-
-       rb_erase(&thread->rb_node, &proc->threads);
-       t = thread->transaction_stack;
-       if (t && t->to_thread == thread)
-               send_reply = t;
-       while (t) {
-               active_transactions++;
-               binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
-                            "release %d:%d transaction %d %s, still active\n",
-                             proc->pid, thread->pid,
-                            t->debug_id,
-                            (t->to_thread == thread) ? "in" : "out");
-
-               if (t->to_thread == thread) {
-                       t->to_proc = NULL;
-                       t->to_thread = NULL;
-                       if (t->buffer) {
-                               t->buffer->transaction = NULL;
-                               t->buffer = NULL;
-                       }
-                       t = t->to_parent;
-               } else if (t->from == thread) {
-                       t->from = NULL;
-                       t = t->from_parent;
-               } else
-                       BUG();
-       }
-       if (send_reply)
-               binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
-       binder_release_work(&thread->todo);
-       kfree(thread);
-       binder_stats_deleted(BINDER_STAT_THREAD);
-       return active_transactions;
-}
-
-static unsigned int binder_poll(struct file *filp,
-                               struct poll_table_struct *wait)
-{
-       struct binder_proc *proc = filp->private_data;
-       struct binder_thread *thread = NULL;
-       int wait_for_proc_work;
-
-       binder_lock(__func__);
-
-       thread = binder_get_thread(proc);
-
-       wait_for_proc_work = thread->transaction_stack == NULL &&
-               list_empty(&thread->todo) && thread->return_error == BR_OK;
-
-       binder_unlock(__func__);
-
-       if (wait_for_proc_work) {
-               if (binder_has_proc_work(proc, thread))
-                       return POLLIN;
-               poll_wait(filp, &proc->wait, wait);
-               if (binder_has_proc_work(proc, thread))
-                       return POLLIN;
-       } else {
-               if (binder_has_thread_work(thread))
-                       return POLLIN;
-               poll_wait(filp, &thread->wait, wait);
-               if (binder_has_thread_work(thread))
-                       return POLLIN;
-       }
-       return 0;
-}
-
-static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-       int ret;
-       struct binder_proc *proc = filp->private_data;
-       struct binder_thread *thread;
-       unsigned int size = _IOC_SIZE(cmd);
-       void __user *ubuf = (void __user *)arg;
-
-       /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
-
-       trace_binder_ioctl(cmd, arg);
-
-       ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
-       if (ret)
-               goto err_unlocked;
-
-       binder_lock(__func__);
-       thread = binder_get_thread(proc);
-       if (thread == NULL) {
-               ret = -ENOMEM;
-               goto err;
-       }
-
-       switch (cmd) {
-       case BINDER_WRITE_READ: {
-               struct binder_write_read bwr;
-               if (size != sizeof(struct binder_write_read)) {
-                       ret = -EINVAL;
-                       goto err;
-               }
-               if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
-                       ret = -EFAULT;
-                       goto err;
-               }
-               binder_debug(BINDER_DEBUG_READ_WRITE,
-                            "%d:%d write %lld at %016llx, read %lld at %016llx\n",
-                            proc->pid, thread->pid,
-                            (u64)bwr.write_size, (u64)bwr.write_buffer,
-                            (u64)bwr.read_size, (u64)bwr.read_buffer);
-
-               if (bwr.write_size > 0) {
-                       ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
-                       trace_binder_write_done(ret);
-                       if (ret < 0) {
-                               bwr.read_consumed = 0;
-                               if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
-                                       ret = -EFAULT;
-                               goto err;
-                       }
-               }
-               if (bwr.read_size > 0) {
-                       ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
-                       trace_binder_read_done(ret);
-                       if (!list_empty(&proc->todo))
-                               wake_up_interruptible(&proc->wait);
-                       if (ret < 0) {
-                               if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
-                                       ret = -EFAULT;
-                               goto err;
-                       }
-               }
-               binder_debug(BINDER_DEBUG_READ_WRITE,
-                            "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
-                            proc->pid, thread->pid,
-                            (u64)bwr.write_consumed, (u64)bwr.write_size,
-                            (u64)bwr.read_consumed, (u64)bwr.read_size);
-               if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
-                       ret = -EFAULT;
-                       goto err;
-               }
-               break;
-       }
-       case BINDER_SET_MAX_THREADS:
-               if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
-                       ret = -EINVAL;
-                       goto err;
-               }
-               break;
-       case BINDER_SET_CONTEXT_MGR:
-               if (binder_context_mgr_node != NULL) {
-                       pr_err("BINDER_SET_CONTEXT_MGR already set\n");
-                       ret = -EBUSY;
-                       goto err;
-               }
-               ret = security_binder_set_context_mgr(proc->tsk);
-               if (ret < 0)
-                       goto err;
-               if (uid_valid(binder_context_mgr_uid)) {
-                       if (!uid_eq(binder_context_mgr_uid, current->cred->euid)) {
-                               pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
-                                      from_kuid(&init_user_ns, current->cred->euid),
-                                      from_kuid(&init_user_ns, binder_context_mgr_uid));
-                               ret = -EPERM;
-                               goto err;
-                       }
-               } else
-                       binder_context_mgr_uid = current->cred->euid;
-               binder_context_mgr_node = binder_new_node(proc, 0, 0);
-               if (binder_context_mgr_node == NULL) {
-                       ret = -ENOMEM;
-                       goto err;
-               }
-               binder_context_mgr_node->local_weak_refs++;
-               binder_context_mgr_node->local_strong_refs++;
-               binder_context_mgr_node->has_strong_ref = 1;
-               binder_context_mgr_node->has_weak_ref = 1;
-               break;
-       case BINDER_THREAD_EXIT:
-               binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
-                            proc->pid, thread->pid);
-               binder_free_thread(proc, thread);
-               thread = NULL;
-               break;
-       case BINDER_VERSION:
-               if (size != sizeof(struct binder_version)) {
-                       ret = -EINVAL;
-                       goto err;
-               }
-               if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
-                       ret = -EINVAL;
-                       goto err;
-               }
-               break;
-       default:
-               ret = -EINVAL;
-               goto err;
-       }
-       ret = 0;
-err:
-       if (thread)
-               thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
-       binder_unlock(__func__);
-       wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
-       if (ret && ret != -ERESTARTSYS)
-               pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
-err_unlocked:
-       trace_binder_ioctl_done(ret);
-       return ret;
-}
-
-static void binder_vma_open(struct vm_area_struct *vma)
-{
-       struct binder_proc *proc = vma->vm_private_data;
-       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
-                    "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
-                    proc->pid, vma->vm_start, vma->vm_end,
-                    (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
-                    (unsigned long)pgprot_val(vma->vm_page_prot));
-}
-
-static void binder_vma_close(struct vm_area_struct *vma)
-{
-       struct binder_proc *proc = vma->vm_private_data;
-       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
-                    "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
-                    proc->pid, vma->vm_start, vma->vm_end,
-                    (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
-                    (unsigned long)pgprot_val(vma->vm_page_prot));
-       proc->vma = NULL;
-       proc->vma_vm_mm = NULL;
-       binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
-}
-
-static struct vm_operations_struct binder_vm_ops = {
-       .open = binder_vma_open,
-       .close = binder_vma_close,
-};
-
-static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
-{
-       int ret;
-       struct vm_struct *area;
-       struct binder_proc *proc = filp->private_data;
-       const char *failure_string;
-       struct binder_buffer *buffer;
-
-       if (proc->tsk != current)
-               return -EINVAL;
-
-       if ((vma->vm_end - vma->vm_start) > SZ_4M)
-               vma->vm_end = vma->vm_start + SZ_4M;
-
-       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
-                    "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
-                    proc->pid, vma->vm_start, vma->vm_end,
-                    (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
-                    (unsigned long)pgprot_val(vma->vm_page_prot));
-
-       if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
-               ret = -EPERM;
-               failure_string = "bad vm_flags";
-               goto err_bad_arg;
-       }
-       vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
-
-       mutex_lock(&binder_mmap_lock);
-       if (proc->buffer) {
-               ret = -EBUSY;
-               failure_string = "already mapped";
-               goto err_already_mapped;
-       }
-
-       area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
-       if (area == NULL) {
-               ret = -ENOMEM;
-               failure_string = "get_vm_area";
-               goto err_get_vm_area_failed;
-       }
-       proc->buffer = area->addr;
-       proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
-       mutex_unlock(&binder_mmap_lock);
-
-#ifdef CONFIG_CPU_CACHE_VIPT
-       if (cache_is_vipt_aliasing()) {
-               while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
-                       pr_info("binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
-                       vma->vm_start += PAGE_SIZE;
-               }
-       }
-#endif
-       proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
-       if (proc->pages == NULL) {
-               ret = -ENOMEM;
-               failure_string = "alloc page array";
-               goto err_alloc_pages_failed;
-       }
-       proc->buffer_size = vma->vm_end - vma->vm_start;
-
-       vma->vm_ops = &binder_vm_ops;
-       vma->vm_private_data = proc;
-
-       if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
-               ret = -ENOMEM;
-               failure_string = "alloc small buf";
-               goto err_alloc_small_buf_failed;
-       }
-       buffer = proc->buffer;
-       INIT_LIST_HEAD(&proc->buffers);
-       list_add(&buffer->entry, &proc->buffers);
-       buffer->free = 1;
-       binder_insert_free_buffer(proc, buffer);
-       proc->free_async_space = proc->buffer_size / 2;
-       barrier();
-       proc->files = get_files_struct(current);
-       proc->vma = vma;
-       proc->vma_vm_mm = vma->vm_mm;
-
-       /*pr_info("binder_mmap: %d %lx-%lx maps %p\n",
-                proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
-       return 0;
-
-err_alloc_small_buf_failed:
-       kfree(proc->pages);
-       proc->pages = NULL;
-err_alloc_pages_failed:
-       mutex_lock(&binder_mmap_lock);
-       vfree(proc->buffer);
-       proc->buffer = NULL;
-err_get_vm_area_failed:
-err_already_mapped:
-       mutex_unlock(&binder_mmap_lock);
-err_bad_arg:
-       pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
-              proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
-       return ret;
-}
-
-static int binder_open(struct inode *nodp, struct file *filp)
-{
-       struct binder_proc *proc;
-
-       binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
-                    current->group_leader->pid, current->pid);
-
-       proc = kzalloc(sizeof(*proc), GFP_KERNEL);
-       if (proc == NULL)
-               return -ENOMEM;
-       get_task_struct(current);
-       proc->tsk = current;
-       INIT_LIST_HEAD(&proc->todo);
-       init_waitqueue_head(&proc->wait);
-       proc->default_priority = task_nice(current);
-
-       binder_lock(__func__);
-
-       binder_stats_created(BINDER_STAT_PROC);
-       hlist_add_head(&proc->proc_node, &binder_procs);
-       proc->pid = current->group_leader->pid;
-       INIT_LIST_HEAD(&proc->delivered_death);
-       filp->private_data = proc;
-
-       binder_unlock(__func__);
-
-       if (binder_debugfs_dir_entry_proc) {
-               char strbuf[11];
-               snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
-               proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
-                       binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
-       }
-
-       return 0;
-}
-
-static int binder_flush(struct file *filp, fl_owner_t id)
-{
-       struct binder_proc *proc = filp->private_data;
-
-       binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
-
-       return 0;
-}
-
-static void binder_deferred_flush(struct binder_proc *proc)
-{
-       struct rb_node *n;
-       int wake_count = 0;
-       for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
-               struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
-               thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
-               if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
-                       wake_up_interruptible(&thread->wait);
-                       wake_count++;
-               }
-       }
-       wake_up_interruptible_all(&proc->wait);
-
-       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
-                    "binder_flush: %d woke %d threads\n", proc->pid,
-                    wake_count);
-}
-
-static int binder_release(struct inode *nodp, struct file *filp)
-{
-       struct binder_proc *proc = filp->private_data;
-       debugfs_remove(proc->debugfs_entry);
-       binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
-
-       return 0;
-}
-
-static int binder_node_release(struct binder_node *node, int refs)
-{
-       struct binder_ref *ref;
-       int death = 0;
-
-       list_del_init(&node->work.entry);
-       binder_release_work(&node->async_todo);
-
-       if (hlist_empty(&node->refs)) {
-               kfree(node);
-               binder_stats_deleted(BINDER_STAT_NODE);
-
-               return refs;
-       }
-
-       node->proc = NULL;
-       node->local_strong_refs = 0;
-       node->local_weak_refs = 0;
-       hlist_add_head(&node->dead_node, &binder_dead_nodes);
-
-       hlist_for_each_entry(ref, &node->refs, node_entry) {
-               refs++;
-
-               if (!ref->death)
-                       continue;
-
-               death++;
-
-               if (list_empty(&ref->death->work.entry)) {
-                       ref->death->work.type = BINDER_WORK_DEAD_BINDER;
-                       list_add_tail(&ref->death->work.entry,
-                                     &ref->proc->todo);
-                       wake_up_interruptible(&ref->proc->wait);
-               } else
-                       BUG();
-       }
-
-       binder_debug(BINDER_DEBUG_DEAD_BINDER,
-                    "node %d now dead, refs %d, death %d\n",
-                    node->debug_id, refs, death);
-
-       return refs;
-}
-
-static void binder_deferred_release(struct binder_proc *proc)
-{
-       struct binder_transaction *t;
-       struct rb_node *n;
-       int threads, nodes, incoming_refs, outgoing_refs, buffers,
-               active_transactions, page_count;
-
-       BUG_ON(proc->vma);
-       BUG_ON(proc->files);
-
-       hlist_del(&proc->proc_node);
-
-       if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
-               binder_debug(BINDER_DEBUG_DEAD_BINDER,
-                            "%s: %d context_mgr_node gone\n",
-                            __func__, proc->pid);
-               binder_context_mgr_node = NULL;
-       }
-
-       threads = 0;
-       active_transactions = 0;
-       while ((n = rb_first(&proc->threads))) {
-               struct binder_thread *thread;
-
-               thread = rb_entry(n, struct binder_thread, rb_node);
-               threads++;
-               active_transactions += binder_free_thread(proc, thread);
-       }
-
-       nodes = 0;
-       incoming_refs = 0;
-       while ((n = rb_first(&proc->nodes))) {
-               struct binder_node *node;
-
-               node = rb_entry(n, struct binder_node, rb_node);
-               nodes++;
-               rb_erase(&node->rb_node, &proc->nodes);
-               incoming_refs = binder_node_release(node, incoming_refs);
-       }
-
-       outgoing_refs = 0;
-       while ((n = rb_first(&proc->refs_by_desc))) {
-               struct binder_ref *ref;
-
-               ref = rb_entry(n, struct binder_ref, rb_node_desc);
-               outgoing_refs++;
-               binder_delete_ref(ref);
-       }
-
-       binder_release_work(&proc->todo);
-       binder_release_work(&proc->delivered_death);
-
-       buffers = 0;
-       while ((n = rb_first(&proc->allocated_buffers))) {
-               struct binder_buffer *buffer;
-
-               buffer = rb_entry(n, struct binder_buffer, rb_node);
-
-               t = buffer->transaction;
-               if (t) {
-                       t->buffer = NULL;
-                       buffer->transaction = NULL;
-                       pr_err("release proc %d, transaction %d, not freed\n",
-                              proc->pid, t->debug_id);
-                       /*BUG();*/
-               }
-
-               binder_free_buf(proc, buffer);
-               buffers++;
-       }
-
-       binder_stats_deleted(BINDER_STAT_PROC);
-
-       page_count = 0;
-       if (proc->pages) {
-               int i;
-
-               for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
-                       void *page_addr;
-
-                       if (!proc->pages[i])
-                               continue;
-
-                       page_addr = proc->buffer + i * PAGE_SIZE;
-                       binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
-                                    "%s: %d: page %d at %p not freed\n",
-                                    __func__, proc->pid, i, page_addr);
-                       unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
-                       __free_page(proc->pages[i]);
-                       page_count++;
-               }
-               kfree(proc->pages);
-               vfree(proc->buffer);
-       }
-
-       put_task_struct(proc->tsk);
-
-       binder_debug(BINDER_DEBUG_OPEN_CLOSE,
-                    "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d, buffers %d, pages %d\n",
-                    __func__, proc->pid, threads, nodes, incoming_refs,
-                    outgoing_refs, active_transactions, buffers, page_count);
-
-       kfree(proc);
-}
-
-static void binder_deferred_func(struct work_struct *work)
-{
-       struct binder_proc *proc;
-       struct files_struct *files;
-
-       int defer;
-       do {
-               binder_lock(__func__);
-               mutex_lock(&binder_deferred_lock);
-               if (!hlist_empty(&binder_deferred_list)) {
-                       proc = hlist_entry(binder_deferred_list.first,
-                                       struct binder_proc, deferred_work_node);
-                       hlist_del_init(&proc->deferred_work_node);
-                       defer = proc->deferred_work;
-                       proc->deferred_work = 0;
-               } else {
-                       proc = NULL;
-                       defer = 0;
-               }
-               mutex_unlock(&binder_deferred_lock);
-
-               files = NULL;
-               if (defer & BINDER_DEFERRED_PUT_FILES) {
-                       files = proc->files;
-                       if (files)
-                               proc->files = NULL;
-               }
-
-               if (defer & BINDER_DEFERRED_FLUSH)
-                       binder_deferred_flush(proc);
-
-               if (defer & BINDER_DEFERRED_RELEASE)
-                       binder_deferred_release(proc); /* frees proc */
-
-               binder_unlock(__func__);
-               if (files)
-                       put_files_struct(files);
-       } while (proc);
-}
-static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
-
-static void
-binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
-{
-       mutex_lock(&binder_deferred_lock);
-       proc->deferred_work |= defer;
-       if (hlist_unhashed(&proc->deferred_work_node)) {
-               hlist_add_head(&proc->deferred_work_node,
-                               &binder_deferred_list);
-               queue_work(binder_deferred_workqueue, &binder_deferred_work);
-       }
-       mutex_unlock(&binder_deferred_lock);
-}
-
-static void print_binder_transaction(struct seq_file *m, const char *prefix,
-                                    struct binder_transaction *t)
-{
-       seq_printf(m,
-                  "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
-                  prefix, t->debug_id, t,
-                  t->from ? t->from->proc->pid : 0,
-                  t->from ? t->from->pid : 0,
-                  t->to_proc ? t->to_proc->pid : 0,
-                  t->to_thread ? t->to_thread->pid : 0,
-                  t->code, t->flags, t->priority, t->need_reply);
-       if (t->buffer == NULL) {
-               seq_puts(m, " buffer free\n");
-               return;
-       }
-       if (t->buffer->target_node)
-               seq_printf(m, " node %d",
-                          t->buffer->target_node->debug_id);
-       seq_printf(m, " size %zd:%zd data %p\n",
-                  t->buffer->data_size, t->buffer->offsets_size,
-                  t->buffer->data);
-}
-
-static void print_binder_buffer(struct seq_file *m, const char *prefix,
-                               struct binder_buffer *buffer)
-{
-       seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
-                  prefix, buffer->debug_id, buffer->data,
-                  buffer->data_size, buffer->offsets_size,
-                  buffer->transaction ? "active" : "delivered");
-}
-
-static void print_binder_work(struct seq_file *m, const char *prefix,
-                             const char *transaction_prefix,
-                             struct binder_work *w)
-{
-       struct binder_node *node;
-       struct binder_transaction *t;
-
-       switch (w->type) {
-       case BINDER_WORK_TRANSACTION:
-               t = container_of(w, struct binder_transaction, work);
-               print_binder_transaction(m, transaction_prefix, t);
-               break;
-       case BINDER_WORK_TRANSACTION_COMPLETE:
-               seq_printf(m, "%stransaction complete\n", prefix);
-               break;
-       case BINDER_WORK_NODE:
-               node = container_of(w, struct binder_node, work);
-               seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
-                          prefix, node->debug_id,
-                          (u64)node->ptr, (u64)node->cookie);
-               break;
-       case BINDER_WORK_DEAD_BINDER:
-               seq_printf(m, "%shas dead binder\n", prefix);
-               break;
-       case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
-               seq_printf(m, "%shas cleared dead binder\n", prefix);
-               break;
-       case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
-               seq_printf(m, "%shas cleared death notification\n", prefix);
-               break;
-       default:
-               seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
-               break;
-       }
-}
-
-static void print_binder_thread(struct seq_file *m,
-                               struct binder_thread *thread,
-                               int print_always)
-{
-       struct binder_transaction *t;
-       struct binder_work *w;
-       size_t start_pos = m->count;
-       size_t header_pos;
-
-       seq_printf(m, "  thread %d: l %02x\n", thread->pid, thread->looper);
-       header_pos = m->count;
-       t = thread->transaction_stack;
-       while (t) {
-               if (t->from == thread) {
-                       print_binder_transaction(m,
-                                                "    outgoing transaction", t);
-                       t = t->from_parent;
-               } else if (t->to_thread == thread) {
-                       print_binder_transaction(m,
-                                                "    incoming transaction", t);
-                       t = t->to_parent;
-               } else {
-                       print_binder_transaction(m, "    bad transaction", t);
-                       t = NULL;
-               }
-       }
-       list_for_each_entry(w, &thread->todo, entry) {
-               print_binder_work(m, "    ", "    pending transaction", w);
-       }
-       if (!print_always && m->count == header_pos)
-               m->count = start_pos;
-}
-
-static void print_binder_node(struct seq_file *m, struct binder_node *node)
-{
-       struct binder_ref *ref;
-       struct binder_work *w;
-       int count;
-
-       count = 0;
-       hlist_for_each_entry(ref, &node->refs, node_entry)
-               count++;
-
-       seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d",
-                  node->debug_id, (u64)node->ptr, (u64)node->cookie,
-                  node->has_strong_ref, node->has_weak_ref,
-                  node->local_strong_refs, node->local_weak_refs,
-                  node->internal_strong_refs, count);
-       if (count) {
-               seq_puts(m, " proc");
-               hlist_for_each_entry(ref, &node->refs, node_entry)
-                       seq_printf(m, " %d", ref->proc->pid);
-       }
-       seq_puts(m, "\n");
-       list_for_each_entry(w, &node->async_todo, entry)
-               print_binder_work(m, "    ",
-                                 "    pending async transaction", w);
-}
-
-static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
-{
-       seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %p\n",
-                  ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
-                  ref->node->debug_id, ref->strong, ref->weak, ref->death);
-}
-
-static void print_binder_proc(struct seq_file *m,
-                             struct binder_proc *proc, int print_all)
-{
-       struct binder_work *w;
-       struct rb_node *n;
-       size_t start_pos = m->count;
-       size_t header_pos;
-
-       seq_printf(m, "proc %d\n", proc->pid);
-       header_pos = m->count;
-
-       for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
-               print_binder_thread(m, rb_entry(n, struct binder_thread,
-                                               rb_node), print_all);
-       for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
-               struct binder_node *node = rb_entry(n, struct binder_node,
-                                                   rb_node);
-               if (print_all || node->has_async_transaction)
-                       print_binder_node(m, node);
-       }
-       if (print_all) {
-               for (n = rb_first(&proc->refs_by_desc);
-                    n != NULL;
-                    n = rb_next(n))
-                       print_binder_ref(m, rb_entry(n, struct binder_ref,
-                                                    rb_node_desc));
-       }
-       for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
-               print_binder_buffer(m, "  buffer",
-                                   rb_entry(n, struct binder_buffer, rb_node));
-       list_for_each_entry(w, &proc->todo, entry)
-               print_binder_work(m, "  ", "  pending transaction", w);
-       list_for_each_entry(w, &proc->delivered_death, entry) {
-               seq_puts(m, "  has delivered dead binder\n");
-               break;
-       }
-       if (!print_all && m->count == header_pos)
-               m->count = start_pos;
-}
-
-static const char * const binder_return_strings[] = {
-       "BR_ERROR",
-       "BR_OK",
-       "BR_TRANSACTION",
-       "BR_REPLY",
-       "BR_ACQUIRE_RESULT",
-       "BR_DEAD_REPLY",
-       "BR_TRANSACTION_COMPLETE",
-       "BR_INCREFS",
-       "BR_ACQUIRE",
-       "BR_RELEASE",
-       "BR_DECREFS",
-       "BR_ATTEMPT_ACQUIRE",
-       "BR_NOOP",
-       "BR_SPAWN_LOOPER",
-       "BR_FINISHED",
-       "BR_DEAD_BINDER",
-       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
-       "BR_FAILED_REPLY"
-};
-
-static const char * const binder_command_strings[] = {
-       "BC_TRANSACTION",
-       "BC_REPLY",
-       "BC_ACQUIRE_RESULT",
-       "BC_FREE_BUFFER",
-       "BC_INCREFS",
-       "BC_ACQUIRE",
-       "BC_RELEASE",
-       "BC_DECREFS",
-       "BC_INCREFS_DONE",
-       "BC_ACQUIRE_DONE",
-       "BC_ATTEMPT_ACQUIRE",
-       "BC_REGISTER_LOOPER",
-       "BC_ENTER_LOOPER",
-       "BC_EXIT_LOOPER",
-       "BC_REQUEST_DEATH_NOTIFICATION",
-       "BC_CLEAR_DEATH_NOTIFICATION",
-       "BC_DEAD_BINDER_DONE"
-};
-
-static const char * const binder_objstat_strings[] = {
-       "proc",
-       "thread",
-       "node",
-       "ref",
-       "death",
-       "transaction",
-       "transaction_complete"
-};
-
-static void print_binder_stats(struct seq_file *m, const char *prefix,
-                              struct binder_stats *stats)
-{
-       int i;
-
-       BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
-                    ARRAY_SIZE(binder_command_strings));
-       for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
-               if (stats->bc[i])
-                       seq_printf(m, "%s%s: %d\n", prefix,
-                                  binder_command_strings[i], stats->bc[i]);
-       }
-
-       BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
-                    ARRAY_SIZE(binder_return_strings));
-       for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
-               if (stats->br[i])
-                       seq_printf(m, "%s%s: %d\n", prefix,
-                                  binder_return_strings[i], stats->br[i]);
-       }
-
-       BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
-                    ARRAY_SIZE(binder_objstat_strings));
-       BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
-                    ARRAY_SIZE(stats->obj_deleted));
-       for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
-               if (stats->obj_created[i] || stats->obj_deleted[i])
-                       seq_printf(m, "%s%s: active %d total %d\n", prefix,
-                               binder_objstat_strings[i],
-                               stats->obj_created[i] - stats->obj_deleted[i],
-                               stats->obj_created[i]);
-       }
-}
-
-static void print_binder_proc_stats(struct seq_file *m,
-                                   struct binder_proc *proc)
-{
-       struct binder_work *w;
-       struct rb_node *n;
-       int count, strong, weak;
-
-       seq_printf(m, "proc %d\n", proc->pid);
-       count = 0;
-       for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
-               count++;
-       seq_printf(m, "  threads: %d\n", count);
-       seq_printf(m, "  requested threads: %d+%d/%d\n"
-                       "  ready threads %d\n"
-                       "  free async space %zd\n", proc->requested_threads,
-                       proc->requested_threads_started, proc->max_threads,
-                       proc->ready_threads, proc->free_async_space);
-       count = 0;
-       for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
-               count++;
-       seq_printf(m, "  nodes: %d\n", count);
-       count = 0;
-       strong = 0;
-       weak = 0;
-       for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
-               struct binder_ref *ref = rb_entry(n, struct binder_ref,
-                                                 rb_node_desc);
-               count++;
-               strong += ref->strong;
-               weak += ref->weak;
-       }
-       seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
-
-       count = 0;
-       for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
-               count++;
-       seq_printf(m, "  buffers: %d\n", count);
-
-       count = 0;
-       list_for_each_entry(w, &proc->todo, entry) {
-               switch (w->type) {
-               case BINDER_WORK_TRANSACTION:
-                       count++;
-                       break;
-               default:
-                       break;
-               }
-       }
-       seq_printf(m, "  pending transactions: %d\n", count);
-
-       print_binder_stats(m, "  ", &proc->stats);
-}
-
-
-static int binder_state_show(struct seq_file *m, void *unused)
-{
-       struct binder_proc *proc;
-       struct binder_node *node;
-       int do_lock = !binder_debug_no_lock;
-
-       if (do_lock)
-               binder_lock(__func__);
-
-       seq_puts(m, "binder state:\n");
-
-       if (!hlist_empty(&binder_dead_nodes))
-               seq_puts(m, "dead nodes:\n");
-       hlist_for_each_entry(node, &binder_dead_nodes, dead_node)
-               print_binder_node(m, node);
-
-       hlist_for_each_entry(proc, &binder_procs, proc_node)
-               print_binder_proc(m, proc, 1);
-       if (do_lock)
-               binder_unlock(__func__);
-       return 0;
-}
-
-static int binder_stats_show(struct seq_file *m, void *unused)
-{
-       struct binder_proc *proc;
-       int do_lock = !binder_debug_no_lock;
-
-       if (do_lock)
-               binder_lock(__func__);
-
-       seq_puts(m, "binder stats:\n");
-
-       print_binder_stats(m, "", &binder_stats);
-
-       hlist_for_each_entry(proc, &binder_procs, proc_node)
-               print_binder_proc_stats(m, proc);
-       if (do_lock)
-               binder_unlock(__func__);
-       return 0;
-}
-
-static int binder_transactions_show(struct seq_file *m, void *unused)
-{
-       struct binder_proc *proc;
-       int do_lock = !binder_debug_no_lock;
-
-       if (do_lock)
-               binder_lock(__func__);
-
-       seq_puts(m, "binder transactions:\n");
-       hlist_for_each_entry(proc, &binder_procs, proc_node)
-               print_binder_proc(m, proc, 0);
-       if (do_lock)
-               binder_unlock(__func__);
-       return 0;
-}
-
-static int binder_proc_show(struct seq_file *m, void *unused)
-{
-       struct binder_proc *proc = m->private;
-       int do_lock = !binder_debug_no_lock;
-
-       if (do_lock)
-               binder_lock(__func__);
-       seq_puts(m, "binder proc state:\n");
-       print_binder_proc(m, proc, 1);
-       if (do_lock)
-               binder_unlock(__func__);
-       return 0;
-}
-
-static void print_binder_transaction_log_entry(struct seq_file *m,
-                                       struct binder_transaction_log_entry *e)
-{
-       seq_printf(m,
-                  "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
-                  e->debug_id, (e->call_type == 2) ? "reply" :
-                  ((e->call_type == 1) ? "async" : "call "), e->from_proc,
-                  e->from_thread, e->to_proc, e->to_thread, e->to_node,
-                  e->target_handle, e->data_size, e->offsets_size);
-}
-
-static int binder_transaction_log_show(struct seq_file *m, void *unused)
-{
-       struct binder_transaction_log *log = m->private;
-       int i;
-
-       if (log->full) {
-               for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
-                       print_binder_transaction_log_entry(m, &log->entry[i]);
-       }
-       for (i = 0; i < log->next; i++)
-               print_binder_transaction_log_entry(m, &log->entry[i]);
-       return 0;
-}
-
-static const struct file_operations binder_fops = {
-       .owner = THIS_MODULE,
-       .poll = binder_poll,
-       .unlocked_ioctl = binder_ioctl,
-       .compat_ioctl = binder_ioctl,
-       .mmap = binder_mmap,
-       .open = binder_open,
-       .flush = binder_flush,
-       .release = binder_release,
-};
-
-static struct miscdevice binder_miscdev = {
-       .minor = MISC_DYNAMIC_MINOR,
-       .name = "binder",
-       .fops = &binder_fops
-};
-
-BINDER_DEBUG_ENTRY(state);
-BINDER_DEBUG_ENTRY(stats);
-BINDER_DEBUG_ENTRY(transactions);
-BINDER_DEBUG_ENTRY(transaction_log);
-
-static int __init binder_init(void)
-{
-       int ret;
-
-       binder_deferred_workqueue = create_singlethread_workqueue("binder");
-       if (!binder_deferred_workqueue)
-               return -ENOMEM;
-
-       binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
-       if (binder_debugfs_dir_entry_root)
-               binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
-                                                binder_debugfs_dir_entry_root);
-       ret = misc_register(&binder_miscdev);
-       if (binder_debugfs_dir_entry_root) {
-               debugfs_create_file("state",
-                                   S_IRUGO,
-                                   binder_debugfs_dir_entry_root,
-                                   NULL,
-                                   &binder_state_fops);
-               debugfs_create_file("stats",
-                                   S_IRUGO,
-                                   binder_debugfs_dir_entry_root,
-                                   NULL,
-                                   &binder_stats_fops);
-               debugfs_create_file("transactions",
-                                   S_IRUGO,
-                                   binder_debugfs_dir_entry_root,
-                                   NULL,
-                                   &binder_transactions_fops);
-               debugfs_create_file("transaction_log",
-                                   S_IRUGO,
-                                   binder_debugfs_dir_entry_root,
-                                   &binder_transaction_log,
-                                   &binder_transaction_log_fops);
-               debugfs_create_file("failed_transaction_log",
-                                   S_IRUGO,
-                                   binder_debugfs_dir_entry_root,
-                                   &binder_transaction_log_failed,
-                                   &binder_transaction_log_fops);
-       }
-       return ret;
-}
-
-device_initcall(binder_init);
-
-#define CREATE_TRACE_POINTS
-#include "binder_trace.h"
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
deleted file mode 100644 (file)
index eb08346..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Copyright (C) 2008 Google, Inc.
- *
- * Based on, but no longer compatible with, the original
- * OpenBinder.org binder driver interface, which is:
- *
- * Copyright (c) 2005 Palmsource, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_BINDER_H
-#define _LINUX_BINDER_H
-
-#ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
-#define BINDER_IPC_32BIT 1
-#endif
-
-#include "uapi/binder.h"
-
-#endif /* _LINUX_BINDER_H */
-
diff --git a/drivers/staging/android/binder_trace.h b/drivers/staging/android/binder_trace.h
deleted file mode 100644 (file)
index 7f20f3d..0000000
+++ /dev/null
@@ -1,329 +0,0 @@
-/*
- * Copyright (C) 2012 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM binder
-
-#if !defined(_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _BINDER_TRACE_H
-
-#include <linux/tracepoint.h>
-
-struct binder_buffer;
-struct binder_node;
-struct binder_proc;
-struct binder_ref;
-struct binder_thread;
-struct binder_transaction;
-
-TRACE_EVENT(binder_ioctl,
-       TP_PROTO(unsigned int cmd, unsigned long arg),
-       TP_ARGS(cmd, arg),
-
-       TP_STRUCT__entry(
-               __field(unsigned int, cmd)
-               __field(unsigned long, arg)
-       ),
-       TP_fast_assign(
-               __entry->cmd = cmd;
-               __entry->arg = arg;
-       ),
-       TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
-);
-
-DECLARE_EVENT_CLASS(binder_lock_class,
-       TP_PROTO(const char *tag),
-       TP_ARGS(tag),
-       TP_STRUCT__entry(
-               __field(const char *, tag)
-       ),
-       TP_fast_assign(
-               __entry->tag = tag;
-       ),
-       TP_printk("tag=%s", __entry->tag)
-);
-
-#define DEFINE_BINDER_LOCK_EVENT(name) \
-DEFINE_EVENT(binder_lock_class, name,  \
-       TP_PROTO(const char *func), \
-       TP_ARGS(func))
-
-DEFINE_BINDER_LOCK_EVENT(binder_lock);
-DEFINE_BINDER_LOCK_EVENT(binder_locked);
-DEFINE_BINDER_LOCK_EVENT(binder_unlock);
-
-DECLARE_EVENT_CLASS(binder_function_return_class,
-       TP_PROTO(int ret),
-       TP_ARGS(ret),
-       TP_STRUCT__entry(
-               __field(int, ret)
-       ),
-       TP_fast_assign(
-               __entry->ret = ret;
-       ),
-       TP_printk("ret=%d", __entry->ret)
-);
-
-#define DEFINE_BINDER_FUNCTION_RETURN_EVENT(name)      \
-DEFINE_EVENT(binder_function_return_class, name,       \
-       TP_PROTO(int ret), \
-       TP_ARGS(ret))
-
-DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done);
-DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
-DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
-
-TRACE_EVENT(binder_wait_for_work,
-       TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
-       TP_ARGS(proc_work, transaction_stack, thread_todo),
-
-       TP_STRUCT__entry(
-               __field(bool, proc_work)
-               __field(bool, transaction_stack)
-               __field(bool, thread_todo)
-       ),
-       TP_fast_assign(
-               __entry->proc_work = proc_work;
-               __entry->transaction_stack = transaction_stack;
-               __entry->thread_todo = thread_todo;
-       ),
-       TP_printk("proc_work=%d transaction_stack=%d thread_todo=%d",
-                 __entry->proc_work, __entry->transaction_stack,
-                 __entry->thread_todo)
-);
-
-TRACE_EVENT(binder_transaction,
-       TP_PROTO(bool reply, struct binder_transaction *t,
-                struct binder_node *target_node),
-       TP_ARGS(reply, t, target_node),
-       TP_STRUCT__entry(
-               __field(int, debug_id)
-               __field(int, target_node)
-               __field(int, to_proc)
-               __field(int, to_thread)
-               __field(int, reply)
-               __field(unsigned int, code)
-               __field(unsigned int, flags)
-       ),
-       TP_fast_assign(
-               __entry->debug_id = t->debug_id;
-               __entry->target_node = target_node ? target_node->debug_id : 0;
-               __entry->to_proc = t->to_proc->pid;
-               __entry->to_thread = t->to_thread ? t->to_thread->pid : 0;
-               __entry->reply = reply;
-               __entry->code = t->code;
-               __entry->flags = t->flags;
-       ),
-       TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x",
-                 __entry->debug_id, __entry->target_node,
-                 __entry->to_proc, __entry->to_thread,
-                 __entry->reply, __entry->flags, __entry->code)
-);
-
-TRACE_EVENT(binder_transaction_received,
-       TP_PROTO(struct binder_transaction *t),
-       TP_ARGS(t),
-
-       TP_STRUCT__entry(
-               __field(int, debug_id)
-       ),
-       TP_fast_assign(
-               __entry->debug_id = t->debug_id;
-       ),
-       TP_printk("transaction=%d", __entry->debug_id)
-);
-
-TRACE_EVENT(binder_transaction_node_to_ref,
-       TP_PROTO(struct binder_transaction *t, struct binder_node *node,
-                struct binder_ref *ref),
-       TP_ARGS(t, node, ref),
-
-       TP_STRUCT__entry(
-               __field(int, debug_id)
-               __field(int, node_debug_id)
-               __field(binder_uintptr_t, node_ptr)
-               __field(int, ref_debug_id)
-               __field(uint32_t, ref_desc)
-       ),
-       TP_fast_assign(
-               __entry->debug_id = t->debug_id;
-               __entry->node_debug_id = node->debug_id;
-               __entry->node_ptr = node->ptr;
-               __entry->ref_debug_id = ref->debug_id;
-               __entry->ref_desc = ref->desc;
-       ),
-       TP_printk("transaction=%d node=%d src_ptr=0x%016llx ==> dest_ref=%d dest_desc=%d",
-                 __entry->debug_id, __entry->node_debug_id,
-                 (u64)__entry->node_ptr,
-                 __entry->ref_debug_id, __entry->ref_desc)
-);
-
-TRACE_EVENT(binder_transaction_ref_to_node,
-       TP_PROTO(struct binder_transaction *t, struct binder_ref *ref),
-       TP_ARGS(t, ref),
-
-       TP_STRUCT__entry(
-               __field(int, debug_id)
-               __field(int, ref_debug_id)
-               __field(uint32_t, ref_desc)
-               __field(int, node_debug_id)
-               __field(binder_uintptr_t, node_ptr)
-       ),
-       TP_fast_assign(
-               __entry->debug_id = t->debug_id;
-               __entry->ref_debug_id = ref->debug_id;
-               __entry->ref_desc = ref->desc;
-               __entry->node_debug_id = ref->node->debug_id;
-               __entry->node_ptr = ref->node->ptr;
-       ),
-       TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ptr=0x%016llx",
-                 __entry->debug_id, __entry->node_debug_id,
-                 __entry->ref_debug_id, __entry->ref_desc,
-                 (u64)__entry->node_ptr)
-);
-
-TRACE_EVENT(binder_transaction_ref_to_ref,
-       TP_PROTO(struct binder_transaction *t, struct binder_ref *src_ref,
-                struct binder_ref *dest_ref),
-       TP_ARGS(t, src_ref, dest_ref),
-
-       TP_STRUCT__entry(
-               __field(int, debug_id)
-               __field(int, node_debug_id)
-               __field(int, src_ref_debug_id)
-               __field(uint32_t, src_ref_desc)
-               __field(int, dest_ref_debug_id)
-               __field(uint32_t, dest_ref_desc)
-       ),
-       TP_fast_assign(
-               __entry->debug_id = t->debug_id;
-               __entry->node_debug_id = src_ref->node->debug_id;
-               __entry->src_ref_debug_id = src_ref->debug_id;
-               __entry->src_ref_desc = src_ref->desc;
-               __entry->dest_ref_debug_id = dest_ref->debug_id;
-               __entry->dest_ref_desc = dest_ref->desc;
-       ),
-       TP_printk("transaction=%d node=%d src_ref=%d src_desc=%d ==> dest_ref=%d dest_desc=%d",
-                 __entry->debug_id, __entry->node_debug_id,
-                 __entry->src_ref_debug_id, __entry->src_ref_desc,
-                 __entry->dest_ref_debug_id, __entry->dest_ref_desc)
-);
-
-TRACE_EVENT(binder_transaction_fd,
-       TP_PROTO(struct binder_transaction *t, int src_fd, int dest_fd),
-       TP_ARGS(t, src_fd, dest_fd),
-
-       TP_STRUCT__entry(
-               __field(int, debug_id)
-               __field(int, src_fd)
-               __field(int, dest_fd)
-       ),
-       TP_fast_assign(
-               __entry->debug_id = t->debug_id;
-               __entry->src_fd = src_fd;
-               __entry->dest_fd = dest_fd;
-       ),
-       TP_printk("transaction=%d src_fd=%d ==> dest_fd=%d",
-                 __entry->debug_id, __entry->src_fd, __entry->dest_fd)
-);
-
-DECLARE_EVENT_CLASS(binder_buffer_class,
-       TP_PROTO(struct binder_buffer *buf),
-       TP_ARGS(buf),
-       TP_STRUCT__entry(
-               __field(int, debug_id)
-               __field(size_t, data_size)
-               __field(size_t, offsets_size)
-       ),
-       TP_fast_assign(
-               __entry->debug_id = buf->debug_id;
-               __entry->data_size = buf->data_size;
-               __entry->offsets_size = buf->offsets_size;
-       ),
-       TP_printk("transaction=%d data_size=%zd offsets_size=%zd",
-                 __entry->debug_id, __entry->data_size, __entry->offsets_size)
-);
-
-DEFINE_EVENT(binder_buffer_class, binder_transaction_alloc_buf,
-       TP_PROTO(struct binder_buffer *buffer),
-       TP_ARGS(buffer));
-
-DEFINE_EVENT(binder_buffer_class, binder_transaction_buffer_release,
-       TP_PROTO(struct binder_buffer *buffer),
-       TP_ARGS(buffer));
-
-DEFINE_EVENT(binder_buffer_class, binder_transaction_failed_buffer_release,
-       TP_PROTO(struct binder_buffer *buffer),
-       TP_ARGS(buffer));
-
-TRACE_EVENT(binder_update_page_range,
-       TP_PROTO(struct binder_proc *proc, bool allocate,
-                void *start, void *end),
-       TP_ARGS(proc, allocate, start, end),
-       TP_STRUCT__entry(
-               __field(int, proc)
-               __field(bool, allocate)
-               __field(size_t, offset)
-               __field(size_t, size)
-       ),
-       TP_fast_assign(
-               __entry->proc = proc->pid;
-               __entry->allocate = allocate;
-               __entry->offset = start - proc->buffer;
-               __entry->size = end - start;
-       ),
-       TP_printk("proc=%d allocate=%d offset=%zu size=%zu",
-                 __entry->proc, __entry->allocate,
-                 __entry->offset, __entry->size)
-);
-
-TRACE_EVENT(binder_command,
-       TP_PROTO(uint32_t cmd),
-       TP_ARGS(cmd),
-       TP_STRUCT__entry(
-               __field(uint32_t, cmd)
-       ),
-       TP_fast_assign(
-               __entry->cmd = cmd;
-       ),
-       TP_printk("cmd=0x%x %s",
-                 __entry->cmd,
-                 _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_command_strings) ?
-                         binder_command_strings[_IOC_NR(__entry->cmd)] :
-                         "unknown")
-);
-
-TRACE_EVENT(binder_return,
-       TP_PROTO(uint32_t cmd),
-       TP_ARGS(cmd),
-       TP_STRUCT__entry(
-               __field(uint32_t, cmd)
-       ),
-       TP_fast_assign(
-               __entry->cmd = cmd;
-       ),
-       TP_printk("cmd=0x%x %s",
-                 __entry->cmd,
-                 _IOC_NR(__entry->cmd) < ARRAY_SIZE(binder_return_strings) ?
-                         binder_return_strings[_IOC_NR(__entry->cmd)] :
-                         "unknown")
-);
-
-#endif /* _BINDER_TRACE_H */
-
-#undef TRACE_INCLUDE_PATH
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
-#define TRACE_INCLUDE_FILE binder_trace
-#include <trace/define_trace.h>
index 44178ca69a7522329a96acf0592fabb1a9bfdf84..f497979b6c4051ef317d116634661b6cd58c3239 100755 (executable)
@@ -430,6 +430,7 @@ static struct ion_handle *ion_handle_lookup(struct ion_client *client,
 
        while (n) {
                struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
+
                if (buffer < entry->buffer)
                        n = n->rb_left;
                else if (buffer > entry->buffer)
@@ -1047,9 +1048,11 @@ static int ion_get_client_serial(const struct rb_root *root,
 {
        int serial = -1;
        struct rb_node *node;
+
        for (node = rb_first(root); node; node = rb_next(node)) {
                struct ion_client *client = rb_entry(node, struct ion_client,
                                                node);
+
                if (strcmp(client->name, name))
                        continue;
                serial = max(serial, client->display_serial);
@@ -1379,12 +1382,14 @@ int ion_munmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
 static void ion_dma_buf_release(struct dma_buf *dmabuf)
 {
        struct ion_buffer *buffer = dmabuf->priv;
+
        ion_buffer_put(buffer);
 }
 
 static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
 {
        struct ion_buffer *buffer = dmabuf->priv;
+
        return buffer->vaddr + offset * PAGE_SIZE;
 }
 
@@ -1642,6 +1647,7 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
        case ION_IOC_IMPORT:
        {
                struct ion_handle *handle;
+
                handle = ion_import_dma_buf(client, data.fd.fd);
                if (IS_ERR(handle))
                        ret = PTR_ERR(handle);
@@ -1743,6 +1749,7 @@ static int ion_debug_heap_show(struct seq_file *s, void *unused)
                struct ion_client *client = rb_entry(n, struct ion_client,
                                                     node);
                size_t size = ion_debug_heap_total(client, heap->id);
+
                if (!size)
                        continue;
                if (client->task) {
@@ -1926,6 +1933,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
 
        if (!debug_file) {
                char buf[256], *path;
+
                path = dentry_path(dev->heaps_debug_root, buf, 256);
                pr_err("Failed to create heap debugfs at %s/%s\n",
                        path, heap->name);
@@ -1941,6 +1949,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
                        &debug_shrink_fops);
                if (!debug_file) {
                        char buf[256], *path;
+
                        path = dentry_path(dev->heaps_debug_root, buf, 256);
                        pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
                                path, debug_name);
@@ -2048,6 +2057,7 @@ void __init ion_reserve(struct ion_platform_data *data)
                        data->heaps[i].base = PFN_PHYS(dev_get_cma_area(dev)->base_pfn);
                } else if (data->heaps[i].base == 0) {
                        phys_addr_t paddr;
+
                        paddr = memblock_alloc_base(data->heaps[i].size,
                                                    data->heaps[i].align,
                                                    MEMBLOCK_ALLOC_ANYWHERE);
index 5c838ee384f9a7c0ea390dcf984b431b4a8ca428..7201bc55b3213b2d5c56809f376a716708f23f52 100755 (executable)
@@ -48,6 +48,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
        for_each_sg(table->sgl, sg, table->nents, i) {
                int npages_this_entry = PAGE_ALIGN(sg->length) / PAGE_SIZE;
                struct page *page = sg_page(sg);
+
                BUG_ON(i >= npages);
                for (j = 0; j < npages_this_entry; j++)
                        *(tmp++) = page++;
@@ -105,6 +106,7 @@ int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
 static int ion_heap_clear_pages(struct page **pages, int num, pgprot_t pgprot)
 {
        void *addr = vm_map_ram(pages, num, -1, pgprot);
+
        if (!addr)
                return -ENOMEM;
        memset(addr, 0, PAGE_SIZE * num);
index 671b05248edd0077c51c626e5454df411e211169..aaac72219b24d446de34d7a155a4100fc515f8a9 100755 (executable)
@@ -210,6 +210,7 @@ struct ion_heap {
        spinlock_t free_lock;
        wait_queue_head_t waitqueue;
        struct task_struct *task;
+
        int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
 };
 
index e53e0456787eada34ee292fea146b3b0a679cde4..2476b0358d11bf735c9763b027047ca4e88ca0ed 100755 (executable)
@@ -35,6 +35,7 @@ static const int num_orders = ARRAY_SIZE(orders);
 static int order_to_index(unsigned int order)
 {
        int i;
+
        for (i = 0; i < num_orders; i++)
                if (order == orders[i])
                        return i;
@@ -93,6 +94,7 @@ static void free_buffer_page(struct ion_system_heap *heap,
 
        if (!cached && !(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE)) {
                struct ion_page_pool *pool = heap->pools[order_to_index(order)];
+
                ion_page_pool_free(pool, page);
        } else {
                __free_pages(page, order);
@@ -243,6 +245,7 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
 
        for (i = 0; i < num_orders; i++) {
                struct ion_page_pool *pool = sys_heap->pools[i];
+
                nr_total += ion_page_pool_shrink(pool, gfp_mask, nr_to_scan);
        }
 
@@ -306,8 +309,10 @@ static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s,
                                                        struct ion_system_heap,
                                                        heap);
        int i;
+
        for (i = 0; i < num_orders; i++) {
                struct ion_page_pool *pool = sys_heap->pools[i];
+
                seq_printf(s, "%d order %u highmem pages in pool = %lu total\n",
                           pool->high_count, pool->order,
                           (1 << pool->order) * PAGE_SIZE * pool->high_count);
index 34519ea14b5429e0e3ac91125d472dbe0f1d234c..5f563acd34cc0929a7b4c06164e06fffd1805fcf 100644 (file)
@@ -108,6 +108,7 @@ static inline struct logger_log *file_get_log(struct file *file)
 {
        if (file->f_mode & FMODE_READ) {
                struct logger_reader *reader = file->private_data;
+
                return reader->log;
        } else
                return file->private_data;
@@ -124,6 +125,7 @@ static struct logger_entry *get_entry_header(struct logger_log *log,
                size_t off, struct logger_entry *scratch)
 {
        size_t len = min(sizeof(struct logger_entry), log->size - off);
+
        if (len != sizeof(struct logger_entry)) {
                memcpy(((void *) scratch), log->buffer + off, len);
                memcpy(((void *) scratch) + len, log->buffer,
@@ -642,6 +644,7 @@ static unsigned int logger_poll(struct file *file, poll_table *wait)
 static long logger_set_version(struct logger_reader *reader, void __user *arg)
 {
        int version;
+
        if (copy_from_user(&version, arg, sizeof(int)))
                return -EFAULT;
 
index 4928f93bdf3d6a0a8525b00e985a651586bd1ffb..820af5cfb830acb1cc962a4eef0553a4515a22a2 100644 (file)
@@ -97,6 +97,7 @@ static void sw_sync_pt_value_str(struct sync_pt *sync_pt,
                                       char *str, int size)
 {
        struct sw_sync_pt *pt = (struct sw_sync_pt *)sync_pt;
+
        snprintf(str, size, "%d", pt->value);
 }
 
@@ -156,6 +157,7 @@ static int sw_sync_open(struct inode *inode, struct file *file)
 static int sw_sync_release(struct inode *inode, struct file *file)
 {
        struct sw_sync_timeline *obj = file->private_data;
+
        sync_timeline_destroy(&obj->obj);
        return 0;
 }
index d38305b409306a95046023ba77af7bb7f3f7e5bf..61e624905916e1c00e78a2863082902767291791 100644 (file)
@@ -384,6 +384,7 @@ static void sync_fence_detach_pts(struct sync_fence *fence)
 
        list_for_each_safe(pos, n, &fence->pt_list_head) {
                struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+
                sync_timeline_remove_pt(pt);
        }
 }
@@ -394,6 +395,7 @@ static void sync_fence_free_pts(struct sync_fence *fence)
 
        list_for_each_safe(pos, n, &fence->pt_list_head) {
                struct sync_pt *pt = container_of(pos, struct sync_pt, pt_list);
+
                sync_pt_free(pt);
        }
 }
@@ -827,6 +829,7 @@ static long sync_fence_ioctl(struct file *file, unsigned int cmd,
                             unsigned long arg)
 {
        struct sync_fence *fence = file->private_data;
+
        switch (cmd) {
        case SYNC_IOC_WAIT:
                return sync_fence_ioctl_wait(fence, arg);
@@ -856,18 +859,21 @@ static const char *sync_status_str(int status)
 static void sync_print_pt(struct seq_file *s, struct sync_pt *pt, bool fence)
 {
        int status = pt->status;
+
        seq_printf(s, "  %s%spt %s",
                   fence ? pt->parent->name : "",
                   fence ? "_" : "",
                   sync_status_str(status));
        if (pt->status) {
                struct timeval tv = ktime_to_timeval(pt->timestamp);
+
                seq_printf(s, "@%ld.%06ld", tv.tv_sec, tv.tv_usec);
        }
 
        if (pt->parent->ops->timeline_value_str &&
            pt->parent->ops->pt_value_str) {
                char value[64];
+
                pt->parent->ops->pt_value_str(pt, value, sizeof(value));
                seq_printf(s, ": %s", value);
                if (fence) {
@@ -892,6 +898,7 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
 
        if (obj->ops->timeline_value_str) {
                char value[64];
+
                obj->ops->timeline_value_str(obj, value, sizeof(value));
                seq_printf(s, ": %s", value);
        } else if (obj->ops->print_obj) {
@@ -1001,6 +1008,7 @@ void sync_dump(void)
        for (i = 0; i < s.count; i += DUMP_CHUNK) {
                if ((s.count - i) > DUMP_CHUNK) {
                        char c = s.buf[i + DUMP_CHUNK];
+
                        s.buf[i + DUMP_CHUNK] = 0;
                        pr_cont("%s", s.buf + i);
                        s.buf[i + DUMP_CHUNK] = c;
index e81451425c012c42b7e032cfeb5455a17b6535b0..ae9966d1f7cce5366dbbfa880b4ce1221dc7e2bd 100644 (file)
@@ -51,6 +51,7 @@ static int gpio_get_time(struct timed_output_dev *dev)
        if (hrtimer_active(&data->timer)) {
                ktime_t r = hrtimer_get_remaining(&data->timer);
                struct timeval t = ktime_to_timeval(r);
+
                return t.tv_sec * 1000 + t.tv_usec / 1000;
        } else
                return 0;
index ba4743c71d6b45d6d42c0c9711609bbd773a34ba..13df42d200b7cc003e46cebd0e28b1997341fa87 100644 (file)
@@ -13,6 +13,7 @@
 #define _UAPI_LINUX_ASHMEM_H
 
 #include <linux/ioctl.h>
+#include <linux/types.h>
 
 #define ASHMEM_NAME_LEN                256
 
diff --git a/drivers/staging/android/uapi/binder.h b/drivers/staging/android/uapi/binder.h
deleted file mode 100644 (file)
index 4098c50..0000000
+++ /dev/null
@@ -1,347 +0,0 @@
-/*
- * Copyright (C) 2008 Google, Inc.
- *
- * Based on, but no longer compatible with, the original
- * OpenBinder.org binder driver interface, which is:
- *
- * Copyright (c) 2005 Palmsource, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _UAPI_LINUX_BINDER_H
-#define _UAPI_LINUX_BINDER_H
-
-#include <linux/ioctl.h>
-
-#define B_PACK_CHARS(c1, c2, c3, c4) \
-       ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
-#define B_TYPE_LARGE 0x85
-
-enum {
-       BINDER_TYPE_BINDER      = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
-       BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
-       BINDER_TYPE_HANDLE      = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
-       BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
-       BINDER_TYPE_FD          = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
-};
-
-enum {
-       FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
-       FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
-};
-
-#ifdef BINDER_IPC_32BIT
-typedef __u32 binder_size_t;
-typedef __u32 binder_uintptr_t;
-#else
-typedef __u64 binder_size_t;
-typedef __u64 binder_uintptr_t;
-#endif
-
-/*
- * This is the flattened representation of a Binder object for transfer
- * between processes.  The 'offsets' supplied as part of a binder transaction
- * contains offsets into the data where these structures occur.  The Binder
- * driver takes care of re-writing the structure type and data as it moves
- * between processes.
- */
-struct flat_binder_object {
-       /* 8 bytes for large_flat_header. */
-       __u32   type;
-       __u32   flags;
-
-       /* 8 bytes of data. */
-       union {
-               binder_uintptr_t        binder; /* local object */
-               __u32                   handle; /* remote object */
-       };
-
-       /* extra data associated with local object */
-       binder_uintptr_t        cookie;
-};
-
-/*
- * On 64-bit platforms where user code may run in 32-bits the driver must
- * translate the buffer (and local binder) addresses appropriately.
- */
-
-struct binder_write_read {
-       binder_size_t           write_size;     /* bytes to write */
-       binder_size_t           write_consumed; /* bytes consumed by driver */
-       binder_uintptr_t        write_buffer;
-       binder_size_t           read_size;      /* bytes to read */
-       binder_size_t           read_consumed;  /* bytes consumed by driver */
-       binder_uintptr_t        read_buffer;
-};
-
-/* Use with BINDER_VERSION, driver fills in fields. */
-struct binder_version {
-       /* driver protocol version -- increment with incompatible change */
-       __s32   protocol_version;
-};
-
-/* This is the current protocol version. */
-#ifdef BINDER_IPC_32BIT
-#define BINDER_CURRENT_PROTOCOL_VERSION 7
-#else
-#define BINDER_CURRENT_PROTOCOL_VERSION 8
-#endif
-
-#define BINDER_WRITE_READ              _IOWR('b', 1, struct binder_write_read)
-#define        BINDER_SET_IDLE_TIMEOUT         _IOW('b', 3, __s64)
-#define        BINDER_SET_MAX_THREADS          _IOW('b', 5, __u32)
-#define        BINDER_SET_IDLE_PRIORITY        _IOW('b', 6, __s32)
-#define        BINDER_SET_CONTEXT_MGR          _IOW('b', 7, __s32)
-#define        BINDER_THREAD_EXIT              _IOW('b', 8, __s32)
-#define BINDER_VERSION                 _IOWR('b', 9, struct binder_version)
-
-/*
- * NOTE: Two special error codes you should check for when calling
- * in to the driver are:
- *
- * EINTR -- The operation has been interupted.  This should be
- * handled by retrying the ioctl() until a different error code
- * is returned.
- *
- * ECONNREFUSED -- The driver is no longer accepting operations
- * from your process.  That is, the process is being destroyed.
- * You should handle this by exiting from your process.  Note
- * that once this error code is returned, all further calls to
- * the driver from any thread will return this same code.
- */
-
-enum transaction_flags {
-       TF_ONE_WAY      = 0x01, /* this is a one-way call: async, no return */
-       TF_ROOT_OBJECT  = 0x04, /* contents are the component's root object */
-       TF_STATUS_CODE  = 0x08, /* contents are a 32-bit status code */
-       TF_ACCEPT_FDS   = 0x10, /* allow replies with file descriptors */
-};
-
-struct binder_transaction_data {
-       /* The first two are only used for bcTRANSACTION and brTRANSACTION,
-        * identifying the target and contents of the transaction.
-        */
-       union {
-               __u32   handle; /* target descriptor of command transaction */
-               binder_uintptr_t ptr;   /* target descriptor of return transaction */
-       } target;
-       binder_uintptr_t        cookie; /* target object cookie */
-       __u32           code;           /* transaction command */
-
-       /* General information about the transaction. */
-       __u32           flags;
-       pid_t           sender_pid;
-       uid_t           sender_euid;
-       binder_size_t   data_size;      /* number of bytes of data */
-       binder_size_t   offsets_size;   /* number of bytes of offsets */
-
-       /* If this transaction is inline, the data immediately
-        * follows here; otherwise, it ends with a pointer to
-        * the data buffer.
-        */
-       union {
-               struct {
-                       /* transaction data */
-                       binder_uintptr_t        buffer;
-                       /* offsets from buffer to flat_binder_object structs */
-                       binder_uintptr_t        offsets;
-               } ptr;
-               __u8    buf[8];
-       } data;
-};
-
-struct binder_ptr_cookie {
-       binder_uintptr_t ptr;
-       binder_uintptr_t cookie;
-};
-
-struct binder_handle_cookie {
-       __u32 handle;
-       binder_uintptr_t cookie;
-} __attribute__((packed));
-
-struct binder_pri_desc {
-       __s32 priority;
-       __u32 desc;
-};
-
-struct binder_pri_ptr_cookie {
-       __s32 priority;
-       binder_uintptr_t ptr;
-       binder_uintptr_t cookie;
-};
-
-enum binder_driver_return_protocol {
-       BR_ERROR = _IOR('r', 0, __s32),
-       /*
-        * int: error code
-        */
-
-       BR_OK = _IO('r', 1),
-       /* No parameters! */
-
-       BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
-       BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
-       /*
-        * binder_transaction_data: the received command.
-        */
-
-       BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
-       /*
-        * not currently supported
-        * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
-        * Else the remote object has acquired a primary reference.
-        */
-
-       BR_DEAD_REPLY = _IO('r', 5),
-       /*
-        * The target of the last transaction (either a bcTRANSACTION or
-        * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
-        */
-
-       BR_TRANSACTION_COMPLETE = _IO('r', 6),
-       /*
-        * No parameters... always refers to the last transaction requested
-        * (including replies).  Note that this will be sent even for
-        * asynchronous transactions.
-        */
-
-       BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
-       BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
-       BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
-       BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
-       /*
-        * void *:      ptr to binder
-        * void *: cookie for binder
-        */
-
-       BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
-       /*
-        * not currently supported
-        * int: priority
-        * void *: ptr to binder
-        * void *: cookie for binder
-        */
-
-       BR_NOOP = _IO('r', 12),
-       /*
-        * No parameters.  Do nothing and examine the next command.  It exists
-        * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
-        */
-
-       BR_SPAWN_LOOPER = _IO('r', 13),
-       /*
-        * No parameters.  The driver has determined that a process has no
-        * threads waiting to service incoming transactions.  When a process
-        * receives this command, it must spawn a new service thread and
-        * register it via bcENTER_LOOPER.
-        */
-
-       BR_FINISHED = _IO('r', 14),
-       /*
-        * not currently supported
-        * stop threadpool thread
-        */
-
-       BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
-       /*
-        * void *: cookie
-        */
-       BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
-       /*
-        * void *: cookie
-        */
-
-       BR_FAILED_REPLY = _IO('r', 17),
-       /*
-        * The the last transaction (either a bcTRANSACTION or
-        * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
-        */
-};
-
-enum binder_driver_command_protocol {
-       BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
-       BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
-       /*
-        * binder_transaction_data: the sent command.
-        */
-
-       BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
-       /*
-        * not currently supported
-        * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
-        * Else you have acquired a primary reference on the object.
-        */
-
-       BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
-       /*
-        * void *: ptr to transaction data received on a read
-        */
-
-       BC_INCREFS = _IOW('c', 4, __u32),
-       BC_ACQUIRE = _IOW('c', 5, __u32),
-       BC_RELEASE = _IOW('c', 6, __u32),
-       BC_DECREFS = _IOW('c', 7, __u32),
-       /*
-        * int: descriptor
-        */
-
-       BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
-       BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
-       /*
-        * void *: ptr to binder
-        * void *: cookie for binder
-        */
-
-       BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
-       /*
-        * not currently supported
-        * int: priority
-        * int: descriptor
-        */
-
-       BC_REGISTER_LOOPER = _IO('c', 11),
-       /*
-        * No parameters.
-        * Register a spawned looper thread with the device.
-        */
-
-       BC_ENTER_LOOPER = _IO('c', 12),
-       BC_EXIT_LOOPER = _IO('c', 13),
-       /*
-        * No parameters.
-        * These two commands are sent as an application-level thread
-        * enters and exits the binder loop, respectively.  They are
-        * used so the binder can have an accurate count of the number
-        * of looping threads it has available.
-        */
-
-       BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_handle_cookie),
-       /*
-        * int: handle
-        * void *: cookie
-        */
-
-       BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_handle_cookie),
-       /*
-        * int: handle
-        * void *: cookie
-        */
-
-       BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
-       /*
-        * void *: cookie
-        */
-};
-
-#endif /* _UAPI_LINUX_BINDER_H */
-
index e14e105acff8f8bd1d7b5498621ab644a817b2b9..0493e8b1ba8fdd813ef28f23523b26dd3af2f6b2 100644 (file)
@@ -1360,6 +1360,9 @@ old_sess_out:
                conn->sock = NULL;
        }
 
+       if (conn->conn_transport->iscsit_wait_conn)
+               conn->conn_transport->iscsit_wait_conn(conn);
+
        if (conn->conn_transport->iscsit_free_conn)
                conn->conn_transport->iscsit_free_conn(conn);
 
index c9790f6fdd890dcccc375afef494d73e687962d9..016e882356d66796ceb670f6a524ba44c2c7c0ca 100644 (file)
@@ -1349,15 +1349,15 @@ static int iscsit_do_tx_data(
        struct iscsi_conn *conn,
        struct iscsi_data_count *count)
 {
-       int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
+       int ret, iov_len;
        struct kvec *iov_p;
        struct msghdr msg;
 
        if (!conn || !conn->sock || !conn->conn_ops)
                return -1;
 
-       if (data <= 0) {
-               pr_err("Data length is: %d\n", data);
+       if (count->data_length <= 0) {
+               pr_err("Data length is: %d\n", count->data_length);
                return -1;
        }
 
@@ -1366,20 +1366,16 @@ static int iscsit_do_tx_data(
        iov_p = count->iov;
        iov_len = count->iov_count;
 
-       while (total_tx < data) {
-               tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
-                                       (data - total_tx));
-               if (tx_loop <= 0) {
-                       pr_debug("tx_loop: %d total_tx %d\n",
-                               tx_loop, total_tx);
-                       return tx_loop;
-               }
-               total_tx += tx_loop;
-               pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
-                                       tx_loop, total_tx, data);
+       ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
+                            count->data_length);
+       if (ret != count->data_length) {
+               pr_err("Unexpected ret: %d send data %d\n",
+                      ret, count->data_length);
+               return -EPIPE;
        }
+       pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
 
-       return total_tx;
+       return ret;
 }
 
 int rx_data(
index 7c908141cc8a7980d29f151960369c063e06a122..9c02eb41ea90f830bf948b9072efb61f1004aa3c 100644 (file)
@@ -179,7 +179,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
                goto out_done;
        }
 
-       tl_nexus = tl_hba->tl_nexus;
+       tl_nexus = tl_tpg->tl_nexus;
        if (!tl_nexus) {
                scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
                                " does not exist\n");
@@ -257,21 +257,21 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
         * Locate the tcm_loop_hba_t pointer
         */
        tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
+       /*
+        * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
+        */
+       tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
+       se_tpg = &tl_tpg->tl_se_tpg;
        /*
         * Locate the tl_nexus and se_sess pointers
         */
-       tl_nexus = tl_hba->tl_nexus;
+       tl_nexus = tl_tpg->tl_nexus;
        if (!tl_nexus) {
                pr_err("Unable to perform device reset without"
                                " active I_T Nexus\n");
                return FAILED;
        }
        se_sess = tl_nexus->se_sess;
-       /*
-        * Locate the tl_tpg and se_tpg pointers from TargetID in sc->device->id
-        */
-       tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
-       se_tpg = &tl_tpg->tl_se_tpg;
 
        tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
        if (!tl_cmd) {
@@ -879,8 +879,8 @@ static int tcm_loop_make_nexus(
        struct tcm_loop_nexus *tl_nexus;
        int ret = -ENOMEM;
 
-       if (tl_tpg->tl_hba->tl_nexus) {
-               pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
+       if (tl_tpg->tl_nexus) {
+               pr_debug("tl_tpg->tl_nexus already exists\n");
                return -EEXIST;
        }
        se_tpg = &tl_tpg->tl_se_tpg;
@@ -915,7 +915,7 @@ static int tcm_loop_make_nexus(
         */
        __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
                        tl_nexus->se_sess, tl_nexus);
-       tl_tpg->tl_hba->tl_nexus = tl_nexus;
+       tl_tpg->tl_nexus = tl_nexus;
        pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
                " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
                name);
@@ -931,9 +931,8 @@ static int tcm_loop_drop_nexus(
 {
        struct se_session *se_sess;
        struct tcm_loop_nexus *tl_nexus;
-       struct tcm_loop_hba *tl_hba = tpg->tl_hba;
 
-       tl_nexus = tpg->tl_hba->tl_nexus;
+       tl_nexus = tpg->tl_nexus;
        if (!tl_nexus)
                return -ENODEV;
 
@@ -949,13 +948,13 @@ static int tcm_loop_drop_nexus(
        }
 
        pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
-               " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+               " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
                tl_nexus->se_sess->se_node_acl->initiatorname);
        /*
         * Release the SCSI I_T Nexus to the emulated SAS Target Port
         */
        transport_deregister_session(tl_nexus->se_sess);
-       tpg->tl_hba->tl_nexus = NULL;
+       tpg->tl_nexus = NULL;
        kfree(tl_nexus);
        return 0;
 }
@@ -971,7 +970,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
        struct tcm_loop_nexus *tl_nexus;
        ssize_t ret;
 
-       tl_nexus = tl_tpg->tl_hba->tl_nexus;
+       tl_nexus = tl_tpg->tl_nexus;
        if (!tl_nexus)
                return -ENODEV;
 
index dd7a84ee78e1129db36e7ab625731666b05125ef..4ed85886a1ee8fc5508c2cfa15bce58a8591aefc 100644 (file)
@@ -25,11 +25,6 @@ struct tcm_loop_tmr {
 };
 
 struct tcm_loop_nexus {
-       int it_nexus_active;
-       /*
-        * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
-        */
-       struct scsi_host *sh;
        /*
         * Pointer to TCM session for I_T Nexus
         */
@@ -45,6 +40,7 @@ struct tcm_loop_tpg {
        atomic_t tl_tpg_port_count;
        struct se_portal_group tl_se_tpg;
        struct tcm_loop_hba *tl_hba;
+       struct tcm_loop_nexus *tl_nexus;
 };
 
 struct tcm_loop_hba {
@@ -53,7 +49,6 @@ struct tcm_loop_hba {
        struct se_hba_s *se_hba;
        struct se_lun *tl_hba_lun;
        struct se_port *tl_hba_lun_sep;
-       struct tcm_loop_nexus *tl_nexus;
        struct device dev;
        struct Scsi_Host *sh;
        struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
index 2be407e22eb499902bd2320e976b0c178fe4bfd3..4deb0c997b1bfa70d2c4de64064f0313a8e847b2 100644 (file)
@@ -1037,10 +1037,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
                                " changed for TCM/pSCSI\n", dev);
                return -EINVAL;
        }
-       if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
+       if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
                pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
-                       " greater than fabric_max_sectors: %u\n", dev,
-                       optimal_sectors, dev->dev_attrib.fabric_max_sectors);
+                       " greater than hw_max_sectors: %u\n", dev,
+                       optimal_sectors, dev->dev_attrib.hw_max_sectors);
                return -EINVAL;
        }
 
@@ -1442,7 +1442,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
                                DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
        dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
        dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
-       dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
 
        return dev;
 }
@@ -1475,6 +1474,7 @@ int target_configure_device(struct se_device *dev)
        dev->dev_attrib.hw_max_sectors =
                se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
                                         dev->dev_attrib.hw_block_size);
+       dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
 
        dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
        dev->creation_time = get_jiffies_64();
index 3b2879316b879cebfc8e5fbb44c6a8679555a711..8baaa0a26d70c7cffb24a6b46d325686e2f89c78 100644 (file)
@@ -554,7 +554,16 @@ fd_execute_rw(struct se_cmd *cmd)
        enum dma_data_direction data_direction = cmd->data_direction;
        struct se_device *dev = cmd->se_dev;
        int ret = 0;
-
+       /*
+        * We are currently limited by the number of iovecs (2048) per
+        * single vfs_[writev,readv] call.
+        */
+       if (cmd->data_length > FD_MAX_BYTES) {
+               pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+                      "FD_MAX_BYTES: %u iovec count limitiation\n",
+                       cmd->data_length, FD_MAX_BYTES);
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        /*
         * Call vectorized fileio functions to map struct scatterlist
         * physical memory addresses to struct iovec virtual memory.
index aa1620abec6dc0b1ccb5a78305ca2ed41800ac5d..b358b3d6c20169d660ecce7fb73c13ab44906f0c 100644 (file)
@@ -122,7 +122,7 @@ static int iblock_configure_device(struct se_device *dev)
        q = bdev_get_queue(bd);
 
        dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
-       dev->dev_attrib.hw_max_sectors = UINT_MAX;
+       dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
        dev->dev_attrib.hw_queue_depth = q->nr_requests;
 
        /*
index 0ef75fb0ecbae3cafb85180bd86efbe0c0b75c35..92e6c510e5d0bfa3e7e85e49e9f9e0eabd9caf97 100644 (file)
@@ -561,21 +561,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
        if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
                unsigned long long end_lba;
 
-               if (sectors > dev->dev_attrib.fabric_max_sectors) {
-                       printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
-                               " big sectors %u exceeds fabric_max_sectors:"
-                               " %u\n", cdb[0], sectors,
-                               dev->dev_attrib.fabric_max_sectors);
-                       return TCM_INVALID_CDB_FIELD;
-               }
-               if (sectors > dev->dev_attrib.hw_max_sectors) {
-                       printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
-                               " big sectors %u exceeds backend hw_max_sectors:"
-                               " %u\n", cdb[0], sectors,
-                               dev->dev_attrib.hw_max_sectors);
-                       return TCM_INVALID_CDB_FIELD;
-               }
-
                end_lba = dev->transport->get_blocks(dev) + 1;
                if (cmd->t_task_lba + sectors > end_lba) {
                        pr_err("cmd exceeds last lba %llu "
index 34254b2ec4668fd0e285f5bb2d5770e4ee274b46..9998ae23cc7c429402a00bfe075fad68c0d8c274 100644 (file)
@@ -444,7 +444,6 @@ static sense_reason_t
 spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
 {
        struct se_device *dev = cmd->se_dev;
-       u32 max_sectors;
        int have_tp = 0;
 
        /*
@@ -469,9 +468,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
        /*
         * Set MAXIMUM TRANSFER LENGTH
         */
-       max_sectors = min(dev->dev_attrib.fabric_max_sectors,
-                         dev->dev_attrib.hw_max_sectors);
-       put_unaligned_be32(max_sectors, &buf[8]);
+       put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
 
        /*
         * Set OPTIMAL TRANSFER LENGTH
index 8f8e75e392de0efc47caf4fb401351269b1bac42..87f8fc63b3e1e0810b14c67e80e9968c94ce1846 100644 (file)
@@ -907,8 +907,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
 
                                if (i == (request->num_mapped_sgs - 1) ||
                                                sg_is_last(s)) {
-                                       if (list_is_last(&req->list,
-                                                       &dep->request_list))
+                                       if (list_empty(&dep->request_list))
                                                last_one = true;
                                        chain = false;
                                }
@@ -926,6 +925,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
                                if (last_one)
                                        break;
                        }
+
+                       if (last_one)
+                               break;
                } else {
                        dma = req->request.dma;
                        length = req->request.length;
index 58c2b3c1c4d1a0ce99546742caf2d7ddad41cdd1..8027ec940f96a571a0806a7bf1045a9826406789 100755 (executable)
@@ -90,6 +90,9 @@ struct android_dev {
        struct usb_composite_dev *cdev;
        struct device *dev;
 
+       void (*setup_complete)(struct usb_ep *ep,
+                               struct usb_request *req);
+
        bool enabled;
        int disable_depth;
        struct mutex mutex;
@@ -1327,6 +1330,9 @@ static int android_bind(struct usb_composite_dev *cdev)
        struct usb_gadget       *gadget = cdev->gadget;
        int                     id, ret;
 
+       /* Save the default handler */
+       dev->setup_complete = cdev->req->complete;
+
        /*
         * Start disconnected. Userspace will connect the gadget once
         * it is done configuring the functions.
@@ -1394,6 +1400,7 @@ android_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *c)
 
        req->zero = 0;
        req->length = 0;
+       req->complete = dev->setup_complete;
        gadget->ep0->driver_data = cdev;
 
        list_for_each_entry(f, &dev->enabled_functions, enabled_list) {
index a401acdceb4dc0f98dc7cd6c016aa95a7b41ec14..0237f1e059b47cb3dad8adfce5b0c0e40d5ac1e4 100644 (file)
@@ -951,6 +951,10 @@ kill_all_hid_devices(struct acc_dev *dev)
        struct list_head *entry, *temp;
        unsigned long flags;
 
+       /* do nothing if usb accessory device doesn't exist */
+       if (!dev)
+               return;
+
        spin_lock_irqsave(&dev->lock, flags);
        list_for_each_safe(entry, temp, &dev->hid_list) {
                hid = list_entry(entry, struct acc_hid_dev, list);
index 9cfe3af3101ac271093abbb5134afa96371f761c..66c90588449625e07ad77e14a06b7529088e17af 100644 (file)
@@ -470,7 +470,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
 {
        void __iomem *base;
        u32 control;
-       u32 fminterval;
+       u32 fminterval = 0;
+       bool no_fminterval = false;
        int cnt;
 
        if (!mmio_resource_enabled(pdev, 0))
@@ -480,6 +481,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
        if (base == NULL)
                return;
 
+       /*
+        * ULi M5237 OHCI controller locks the whole system when accessing
+        * the OHCI_FMINTERVAL offset.
+        */
+       if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
+               no_fminterval = true;
+
        control = readl(base + OHCI_CONTROL);
 
 /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
@@ -518,7 +526,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
        }
 
        /* software reset of the controller, preserving HcFmInterval */
-       fminterval = readl(base + OHCI_FMINTERVAL);
+       if (!no_fminterval)
+               fminterval = readl(base + OHCI_FMINTERVAL);
+
        writel(OHCI_HCR, base + OHCI_CMDSTATUS);
 
        /* reset requires max 10 us delay */
@@ -527,7 +537,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
                        break;
                udelay(1);
        }
-       writel(fminterval, base + OHCI_FMINTERVAL);
+
+       if (!no_fminterval)
+               writel(fminterval, base + OHCI_FMINTERVAL);
 
        /* Now the controller is safely in SUSPEND and nothing can wake it up */
        iounmap(base);
index a24714f6f88f942d470147c265a7d1187f8c569e..8e15acd204ef216c8ca9263c95a22de66d42dbf0 100644 (file)
@@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
        { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
        { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
-       { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
+       { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
+       { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
        { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+       { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
index f0e65c970d353c2913eb5f58c65c76948ba7f5b7..cec377b8bb8be3a33e812f918c3ec0fab22efd53 100644 (file)
@@ -418,6 +418,8 @@ static void usa26_instat_callback(struct urb *urb)
        }
        port = serial->port[msg->port];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -428,7 +430,7 @@ static void usa26_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
        /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
@@ -543,6 +545,8 @@ static void usa28_instat_callback(struct urb *urb)
        }
        port = serial->port[msg->port];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -553,7 +557,7 @@ static void usa28_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
                /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
@@ -630,6 +634,8 @@ static void usa49_instat_callback(struct urb *urb)
        }
        port = serial->port[msg->portNumber];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -640,7 +646,7 @@ static void usa49_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
        /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
@@ -878,6 +884,8 @@ static void usa90_instat_callback(struct urb *urb)
 
        port = serial->port[0];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -888,7 +896,7 @@ static void usa90_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
        /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
@@ -949,6 +957,8 @@ static void usa67_instat_callback(struct urb *urb)
 
        port = serial->port[msg->port];
        p_priv = usb_get_serial_port_data(port);
+       if (!p_priv)
+               goto resubmit;
 
        /* Update handshaking pin state information */
        old_dcd_state = p_priv->dcd_state;
@@ -957,7 +967,7 @@ static void usa67_instat_callback(struct urb *urb)
 
        if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
                tty_port_tty_hangup(&port->port, true);
-
+resubmit:
        /* Resubmit urb so we continue receiving */
        err = usb_submit_urb(urb, GFP_ATOMIC);
        if (err != 0)
index ac3725440d648ab4f3efefda246be390b6be643e..dc55bc254c5cf0eeeda29a9deebc244aca6165ea 100644 (file)
@@ -519,13 +519,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
 
 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
-       u8 type;
        struct vfio_pci_device *vdev;
        struct iommu_group *group;
        int ret;
 
-       pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
-       if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
+       if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
                return -EINVAL;
 
        group = iommu_group_get(&pdev->dev);
index 962c7e3c3baabf4de83ad50519a5ca8398d13bb8..fb97bc0b80e78a394b05e6ffe8b5d00fde6120b1 100644 (file)
@@ -820,6 +820,23 @@ static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd,
        return 0;
 }
 
+static int vhost_scsi_to_tcm_attr(int attr)
+{
+       switch (attr) {
+       case VIRTIO_SCSI_S_SIMPLE:
+               return MSG_SIMPLE_TAG;
+       case VIRTIO_SCSI_S_ORDERED:
+               return MSG_ORDERED_TAG;
+       case VIRTIO_SCSI_S_HEAD:
+               return MSG_HEAD_TAG;
+       case VIRTIO_SCSI_S_ACA:
+               return MSG_ACA_TAG;
+       default:
+               break;
+       }
+       return MSG_SIMPLE_TAG;
+}
+
 static void tcm_vhost_submission_work(struct work_struct *work)
 {
        struct tcm_vhost_cmd *tv_cmd =
@@ -846,9 +863,9 @@ static void tcm_vhost_submission_work(struct work_struct *work)
        rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
                        tv_cmd->tvc_cdb, &tv_cmd->tvc_sense_buf[0],
                        tv_cmd->tvc_lun, tv_cmd->tvc_exp_data_len,
-                       tv_cmd->tvc_task_attr, tv_cmd->tvc_data_direction,
-                       0, sg_ptr, tv_cmd->tvc_sgl_count,
-                       sg_bidi_ptr, sg_no_bidi);
+                       vhost_scsi_to_tcm_attr(tv_cmd->tvc_task_attr),
+                       tv_cmd->tvc_data_direction, 0, sg_ptr,
+                       tv_cmd->tvc_sgl_count, sg_bidi_ptr, sg_no_bidi);
        if (rc < 0) {
                transport_send_check_condition_and_sense(se_cmd,
                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@ -1150,6 +1167,7 @@ static int vhost_scsi_set_endpoint(
        struct vhost_scsi *vs,
        struct vhost_scsi_target *t)
 {
+       struct se_portal_group *se_tpg;
        struct tcm_vhost_tport *tv_tport;
        struct tcm_vhost_tpg *tv_tpg;
        struct tcm_vhost_tpg **vs_tpg;
@@ -1197,6 +1215,21 @@ static int vhost_scsi_set_endpoint(
                                ret = -EEXIST;
                                goto out;
                        }
+                       /*
+                        * In order to ensure individual vhost-scsi configfs
+                        * groups cannot be removed while in use by vhost ioctl,
+                        * go ahead and take an explicit se_tpg->tpg_group.cg_item
+                        * dependency now.
+                        */
+                       se_tpg = &tv_tpg->se_tpg;
+                       ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
+                                                  &se_tpg->tpg_group.cg_item);
+                       if (ret) {
+                               pr_warn("configfs_depend_item() failed: %d\n", ret);
+                               kfree(vs_tpg);
+                               mutex_unlock(&tv_tpg->tv_tpg_mutex);
+                               goto out;
+                       }
                        tv_tpg->tv_tpg_vhost_count++;
                        tv_tpg->vhost_scsi = vs;
                        vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
@@ -1240,6 +1273,7 @@ static int vhost_scsi_clear_endpoint(
        struct vhost_scsi *vs,
        struct vhost_scsi_target *t)
 {
+       struct se_portal_group *se_tpg;
        struct tcm_vhost_tport *tv_tport;
        struct tcm_vhost_tpg *tv_tpg;
        struct vhost_virtqueue *vq;
@@ -1288,6 +1322,13 @@ static int vhost_scsi_clear_endpoint(
                vs->vs_tpg[target] = NULL;
                match = true;
                mutex_unlock(&tv_tpg->tv_tpg_mutex);
+               /*
+                * Release se_tpg->tpg_group.cg_item configfs dependency now
+                * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
+                */
+               se_tpg = &tv_tpg->se_tpg;
+               configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
+                                      &se_tpg->tpg_group.cg_item);
        }
        if (match) {
                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
index 080c35b34bbb9d68cd6e6d2eb5456dbf85a1ca88..cc5dbb5b2f711d5fe592d3fd3861fa4a2ffd5d10 100644 (file)
@@ -25,6 +25,21 @@ static bool nologo;
 module_param(nologo, bool, 0);
 MODULE_PARM_DESC(nologo, "Disables startup logo");
 
+/*
+ * Logos are located in the initdata, and will be freed in kernel_init.
+ * Use late_init to mark the logos as freed to prevent any further use.
+ */
+
+static bool logos_freed;
+
+static int __init fb_logo_late_init(void)
+{
+       logos_freed = true;
+       return 0;
+}
+
+late_initcall(fb_logo_late_init);
+
 /* logo's are marked __initdata. Use __init_refok to tell
  * modpost that it is intended that this function uses data
  * marked __initdata.
@@ -33,7 +48,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
 {
        const struct linux_logo *logo = NULL;
 
-       if (nologo)
+       if (nologo || logos_freed)
                return NULL;
 
        if (depth >= 1) {
index 301b08496478b958ac889853814e4638fe32569a..1d94316f0ea46616ceda930896af210d7ad68284 100644 (file)
@@ -390,7 +390,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
 
        /* NOTE: We use dev_addr here, not paddr! */
        if (is_xen_swiotlb_buffer(dev_addr)) {
-               swiotlb_tbl_unmap_single(hwdev, dev_addr, size, dir);
+               swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
                return;
        }
 
index 6d1ffab091bc08568d47bf528d1c6bfc1f55faee..e5a77bb30ba3e66583c495f9cbd36d1deede81da 100644 (file)
@@ -589,6 +589,7 @@ enum {
 #define EXT4_FREE_BLOCKS_NO_QUOT_UPDATE        0x0008
 #define EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER  0x0010
 #define EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER   0x0020
+#define EXT4_FREE_BLOCKS_RESERVE               0x0040
 
 /*
  * Flags used by ext4_discard_partial_page_buffers
index 84d817b842a8f0cf01525f7508b4e078472bed03..7fbd1c5b74afee34170e614031138f1d2df39c85 100644 (file)
@@ -1722,7 +1722,8 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
 
        brelse(path[1].p_bh);
        ext4_free_blocks(handle, inode, NULL, blk, 1,
-                        EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
+                        EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET |
+                        EXT4_FREE_BLOCKS_RESERVE);
 }
 
 /*
index 576155cb0e49dca3932536ce5ffc4a90ef0d0016..aed1a67ab3a2bfedac039faadac4967d5d099a1f 100644 (file)
@@ -4611,6 +4611,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
        struct buffer_head *gd_bh;
        ext4_group_t block_group;
        struct ext4_sb_info *sbi;
+       struct ext4_inode_info *ei = EXT4_I(inode);
        struct ext4_buddy e4b;
        unsigned int count_clusters;
        int err = 0;
@@ -4810,7 +4811,6 @@ do_more:
        ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh);
        ext4_group_desc_csum_set(sb, block_group, gdp);
        ext4_unlock_group(sb, block_group);
-       percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
 
        if (sbi->s_log_groups_per_flex) {
                ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
@@ -4818,10 +4818,23 @@ do_more:
                             &sbi->s_flex_groups[flex_group].free_clusters);
        }
 
-       ext4_mb_unload_buddy(&e4b);
-
-       if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
+       if (flags & EXT4_FREE_BLOCKS_RESERVE && ei->i_reserved_data_blocks) {
+               percpu_counter_add(&sbi->s_dirtyclusters_counter,
+                                  count_clusters);
+               spin_lock(&ei->i_block_reservation_lock);
+               if (flags & EXT4_FREE_BLOCKS_METADATA)
+                       ei->i_reserved_meta_blocks += count_clusters;
+               else
+                       ei->i_reserved_data_blocks += count_clusters;
+               spin_unlock(&ei->i_block_reservation_lock);
+               if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
+                       dquot_reclaim_block(inode,
+                                       EXT4_C2B(sbi, count_clusters));
+       } else if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE))
                dquot_free_block(inode, EXT4_C2B(sbi, count_clusters));
+       percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
+
+       ext4_mb_unload_buddy(&e4b);
 
        /* We dirtied the bitmap block */
        BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
index 9c8a5a6d33dfed93e2819dd3215f9fb2cb7916ef..7a318480ab7a262f5a557ac02b4cf633c942b605 100644 (file)
@@ -137,10 +137,6 @@ lockd(void *vrqstp)
 
        dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
 
-       if (!nlm_timeout)
-               nlm_timeout = LOCKD_DFLT_TIMEO;
-       nlmsvc_timeout = nlm_timeout * HZ;
-
        /*
         * The main request loop. We don't terminate until the last
         * NFS mount or NFS daemon has gone away.
@@ -346,6 +342,10 @@ static struct svc_serv *lockd_create_svc(void)
                printk(KERN_WARNING
                        "lockd_up: no pid, %d users??\n", nlmsvc_users);
 
+       if (!nlm_timeout)
+               nlm_timeout = LOCKD_DFLT_TIMEO;
+       nlmsvc_timeout = nlm_timeout * HZ;
+
        serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
        if (!serv) {
                printk(KERN_WARNING "lockd_up: create service failed\n");
index 725e87538c98ad71ae5f28ded813d9e61fb8a6d1..615c5079db7c902b286f40823601a8578022fc37 100644 (file)
@@ -123,6 +123,12 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
  */
 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
 {
+       struct inode *inode = iocb->ki_filp->f_mapping->host;
+
+       /* we only support swap file calling nfs_direct_IO */
+       if (!IS_SWAPFILE(inode))
+               return 0;
+
 #ifndef CONFIG_NFS_SWAP
        dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
                        iocb->ki_filp->f_path.dentry->d_name.name,
index cc143ee7a56ed80695f93a1685d39aa1ed87f0f5..5f8d5ffdad8f8f2f39903682671363722d6ccc16 100644 (file)
@@ -394,20 +394,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
 }
 
 /*
- * Returns true if the server owners match
+ * Returns true if the server major ids match
  */
 static bool
-nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
+nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
 {
        struct nfs41_server_owner *o1 = a->cl_serverowner;
        struct nfs41_server_owner *o2 = b->cl_serverowner;
 
-       if (o1->minor_id != o2->minor_id) {
-               dprintk("NFS: --> %s server owner minor IDs do not match\n",
-                       __func__);
-               return false;
-       }
-
        if (o1->major_id_sz != o2->major_id_sz)
                goto out_major_mismatch;
        if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
@@ -468,7 +462,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
                        prev = pos;
 
                        status = nfs_wait_client_init_complete(pos);
-                       if (status == 0) {
+                       if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
                                nfs4_schedule_lease_recovery(pos);
                                status = nfs4_wait_clnt_recover(pos);
                        }
@@ -483,7 +477,12 @@ int nfs41_walk_client_list(struct nfs_client *new,
                if (!nfs4_match_clientids(pos, new))
                        continue;
 
-               if (!nfs4_match_serverowners(pos, new))
+               /*
+                * Note that session trunking is just a special subcase of
+                * client id trunking. In either case, we want to fall back
+                * to using the existing nfs_client.
+                */
+               if (!nfs4_check_clientid_trunking(pos, new))
                        continue;
 
                atomic_inc(&pos->cl_count);
index 74825be65b7bbec7eec8df37dda142479c00eb3d..fbb9dfb7b1d28a08471ffda0205de8ec0a8ecf65 100644 (file)
@@ -288,20 +288,25 @@ void fsnotify_unmount_inodes(struct list_head *list)
                spin_unlock(&inode->i_lock);
 
                /* In case the dropping of a reference would nuke next_i. */
-               if ((&next_i->i_sb_list != list) &&
-                   atomic_read(&next_i->i_count)) {
+               while (&next_i->i_sb_list != list) {
                        spin_lock(&next_i->i_lock);
-                       if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) {
+                       if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
+                                               atomic_read(&next_i->i_count)) {
                                __iget(next_i);
                                need_iput = next_i;
+                               spin_unlock(&next_i->i_lock);
+                               break;
                        }
                        spin_unlock(&next_i->i_lock);
+                       next_i = list_entry(next_i->i_sb_list.next,
+                                               struct inode, i_sb_list);
                }
 
                /*
-                * We can safely drop inode_sb_list_lock here because we hold
-                * references on both inode and next_i.  Also no new inodes
-                * will be added since the umount has begun.
+                * We can safely drop inode_sb_list_lock here because either
+                * we actually hold references on both inode and next_i or
+                * end of list.  Also no new inodes will be added since the
+                * umount has begun.
                 */
                spin_unlock(&inode_sb_list_lock);
 
index ab30716584f55c0dd488c6d567962172d2a5188c..239493ec718eb6389cebc61f5d35a2bf850e6656 100644 (file)
@@ -27,6 +27,5 @@ proc-$(CONFIG_PROC_SYSCTL)    += proc_sysctl.o
 proc-$(CONFIG_NET)             += proc_net.o
 proc-$(CONFIG_PROC_KCORE)      += kcore.o
 proc-$(CONFIG_PROC_VMCORE)     += vmcore.o
-proc-$(CONFIG_PROC_DEVICETREE) += proc_devtree.o
 proc-$(CONFIG_PRINTK)  += kmsg.o
 proc-$(CONFIG_PROC_PAGE_MONITOR)       += page.o
index d600fb098b6ad3ad51d227709d43161d8a9d0abc..e2cfe2968bf21d03a53084cdd64368935e556b17 100644 (file)
@@ -210,13 +210,6 @@ extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry
 extern int proc_fill_super(struct super_block *);
 extern void proc_entry_rundown(struct proc_dir_entry *);
 
-/*
- * proc_devtree.c
- */
-#ifdef CONFIG_PROC_DEVICETREE
-extern void proc_device_tree_init(void);
-#endif
-
 /*
  * proc_namespaces.c
  */
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
deleted file mode 100644 (file)
index 106a835..0000000
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * proc_devtree.c - handles /proc/device-tree
- *
- * Copyright 1997 Paul Mackerras
- */
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/time.h>
-#include <linux/proc_fs.h>
-#include <linux/seq_file.h>
-#include <linux/printk.h>
-#include <linux/stat.h>
-#include <linux/string.h>
-#include <linux/of.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <asm/prom.h>
-#include <asm/uaccess.h>
-#include "internal.h"
-
-static inline void set_node_proc_entry(struct device_node *np,
-                                      struct proc_dir_entry *de)
-{
-#ifdef HAVE_ARCH_DEVTREE_FIXUPS
-       np->pde = de;
-#endif
-}
-
-static struct proc_dir_entry *proc_device_tree;
-
-/*
- * Supply data on a read from /proc/device-tree/node/property.
- */
-static int property_proc_show(struct seq_file *m, void *v)
-{
-       struct property *pp = m->private;
-
-       seq_write(m, pp->value, pp->length);
-       return 0;
-}
-
-static int property_proc_open(struct inode *inode, struct file *file)
-{
-       return single_open(file, property_proc_show, __PDE_DATA(inode));
-}
-
-static const struct file_operations property_proc_fops = {
-       .owner          = THIS_MODULE,
-       .open           = property_proc_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = single_release,
-};
-
-/*
- * For a node with a name like "gc@10", we make symlinks called "gc"
- * and "@10" to it.
- */
-
-/*
- * Add a property to a node
- */
-static struct proc_dir_entry *
-__proc_device_tree_add_prop(struct proc_dir_entry *de, struct property *pp,
-               const char *name)
-{
-       struct proc_dir_entry *ent;
-
-       /*
-        * Unfortunately proc_register puts each new entry
-        * at the beginning of the list.  So we rearrange them.
-        */
-       ent = proc_create_data(name,
-                              strncmp(name, "security-", 9) ? S_IRUGO : S_IRUSR,
-                              de, &property_proc_fops, pp);
-       if (ent == NULL)
-               return NULL;
-
-       if (!strncmp(name, "security-", 9))
-               ent->size = 0; /* don't leak number of password chars */
-       else
-               ent->size = pp->length;
-
-       return ent;
-}
-
-
-void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop)
-{
-       __proc_device_tree_add_prop(pde, prop, prop->name);
-}
-
-void proc_device_tree_remove_prop(struct proc_dir_entry *pde,
-                                 struct property *prop)
-{
-       remove_proc_entry(prop->name, pde);
-}
-
-void proc_device_tree_update_prop(struct proc_dir_entry *pde,
-                                 struct property *newprop,
-                                 struct property *oldprop)
-{
-       struct proc_dir_entry *ent;
-
-       if (!oldprop) {
-               proc_device_tree_add_prop(pde, newprop);
-               return;
-       }
-
-       for (ent = pde->subdir; ent != NULL; ent = ent->next)
-               if (ent->data == oldprop)
-                       break;
-       if (ent == NULL) {
-               pr_warn("device-tree: property \"%s\" does not exist\n",
-                       oldprop->name);
-       } else {
-               ent->data = newprop;
-               ent->size = newprop->length;
-       }
-}
-
-/*
- * Various dodgy firmware might give us nodes and/or properties with
- * conflicting names. That's generally ok, except for exporting via /proc,
- * so munge names here to ensure they're unique.
- */
-
-static int duplicate_name(struct proc_dir_entry *de, const char *name)
-{
-       struct proc_dir_entry *ent;
-       int found = 0;
-
-       spin_lock(&proc_subdir_lock);
-
-       for (ent = de->subdir; ent != NULL; ent = ent->next) {
-               if (strcmp(ent->name, name) == 0) {
-                       found = 1;
-                       break;
-               }
-       }
-
-       spin_unlock(&proc_subdir_lock);
-
-       return found;
-}
-
-static const char *fixup_name(struct device_node *np, struct proc_dir_entry *de,
-               const char *name)
-{
-       char *fixed_name;
-       int fixup_len = strlen(name) + 2 + 1; /* name + #x + \0 */
-       int i = 1, size;
-
-realloc:
-       fixed_name = kmalloc(fixup_len, GFP_KERNEL);
-       if (fixed_name == NULL) {
-               pr_err("device-tree: Out of memory trying to fixup "
-                      "name \"%s\"\n", name);
-               return name;
-       }
-
-retry:
-       size = snprintf(fixed_name, fixup_len, "%s#%d", name, i);
-       size++; /* account for NULL */
-
-       if (size > fixup_len) {
-               /* We ran out of space, free and reallocate. */
-               kfree(fixed_name);
-               fixup_len = size;
-               goto realloc;
-       }
-
-       if (duplicate_name(de, fixed_name)) {
-               /* Multiple duplicates. Retry with a different offset. */
-               i++;
-               goto retry;
-       }
-
-       pr_warn("device-tree: Duplicate name in %s, renamed to \"%s\"\n",
-               np->full_name, fixed_name);
-
-       return fixed_name;
-}
-
-/*
- * Process a node, adding entries for its children and its properties.
- */
-void proc_device_tree_add_node(struct device_node *np,
-                              struct proc_dir_entry *de)
-{
-       struct property *pp;
-       struct proc_dir_entry *ent;
-       struct device_node *child;
-       const char *p;
-
-       set_node_proc_entry(np, de);
-       for (child = NULL; (child = of_get_next_child(np, child));) {
-               /* Use everything after the last slash, or the full name */
-               p = kbasename(child->full_name);
-
-               if (duplicate_name(de, p))
-                       p = fixup_name(np, de, p);
-
-               ent = proc_mkdir(p, de);
-               if (ent == NULL)
-                       break;
-               proc_device_tree_add_node(child, ent);
-       }
-       of_node_put(child);
-
-       for (pp = np->properties; pp != NULL; pp = pp->next) {
-               p = pp->name;
-
-               if (strchr(p, '/'))
-                       continue;
-
-               if (duplicate_name(de, p))
-                       p = fixup_name(np, de, p);
-
-               ent = __proc_device_tree_add_prop(de, pp, p);
-               if (ent == NULL)
-                       break;
-       }
-}
-
-/*
- * Called on initialization to set up the /proc/device-tree subtree
- */
-void __init proc_device_tree_init(void)
-{
-       struct device_node *root;
-
-       proc_device_tree = proc_mkdir("device-tree", NULL);
-       if (proc_device_tree == NULL)
-               return;
-       root = of_find_node_by_path("/");
-       if (root == NULL) {
-               pr_debug("/proc/device-tree: can't find root\n");
-               return;
-       }
-       proc_device_tree_add_node(root, proc_device_tree);
-       of_node_put(root);
-}
index 04ec276c7bab9dfcfd65a4bc5e61bb15ac413a2f..9459710c55aee0a287014eab85ab373cef3576f8 100644 (file)
@@ -180,9 +180,6 @@ void __init proc_root_init(void)
        proc_mkdir("openprom", NULL);
 #endif
        proc_tty_init();
-#ifdef CONFIG_PROC_DEVICETREE
-       proc_device_tree_init();
-#endif
        proc_mkdir("bus", NULL);
        proc_sys_init();
 }
index ca71db69da07a000814837c20cf76bac049b3ca1..da12fd4e3d471e3475990320c78e94aa7e4ad4aa 100644 (file)
@@ -19,6 +19,16 @@ config PSTORE_CONSOLE
          When the option is enabled, pstore will log all kernel
          messages, even if no oops or panic happened.
 
+config PSTORE_PMSG
+       bool "Log user space messages"
+       depends on PSTORE
+       help
+         When the option is enabled, pstore will export a character
+         interface /dev/pmsg0 to log user space messages. On reboot
+         data can be retrieved from /sys/fs/pstore/pmsg-ramoops-[ID].
+
+         If unsure, say N.
+
 config PSTORE_FTRACE
        bool "Persistent function tracer"
        depends on PSTORE
index 4c9095c2781e17c2d944081f20f9e2c639a9a6b8..e647d8e81712f7478cc6d5a0698fe27b7518d589 100644 (file)
@@ -7,5 +7,7 @@ obj-y += pstore.o
 pstore-objs += inode.o platform.o
 obj-$(CONFIG_PSTORE_FTRACE)    += ftrace.o
 
+obj-$(CONFIG_PSTORE_PMSG)      += pmsg.o
+
 ramoops-objs += ram.o ram_core.o
 obj-$(CONFIG_PSTORE_RAM)       += ramoops.o
index 3ba30825f387d847c04054f85608f735458e811b..0a11045356d4bfcab3bfd0bc8922a13e45e12f14 100644 (file)
@@ -178,6 +178,8 @@ static int pstore_unlink(struct inode *dir, struct dentry *dentry)
        if (p->psi->erase)
                p->psi->erase(p->type, p->id, p->count,
                              dentry->d_inode->i_ctime, p->psi);
+       else
+               return -EPERM;
 
        return simple_unlink(dir, dentry);
 }
@@ -313,30 +315,34 @@ int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
 
        switch (type) {
        case PSTORE_TYPE_DMESG:
-               sprintf(name, "dmesg-%s-%lld", psname, id);
+               scnprintf(name, sizeof(name), "dmesg-%s-%lld",
+                         psname, id);
                break;
        case PSTORE_TYPE_CONSOLE:
-               sprintf(name, "console-%s-%lld", psname, id);
+               scnprintf(name, sizeof(name), "console-%s", psname);
                break;
        case PSTORE_TYPE_FTRACE:
-               sprintf(name, "ftrace-%s-%lld", psname, id);
+               scnprintf(name, sizeof(name), "ftrace-%s", psname);
                break;
        case PSTORE_TYPE_MCE:
-               sprintf(name, "mce-%s-%lld", psname, id);
+               scnprintf(name, sizeof(name), "mce-%s-%lld", psname, id);
+               break;
+       case PSTORE_TYPE_PMSG:
+               scnprintf(name, sizeof(name), "pmsg-%s-%lld", psname, id);
                break;
        case PSTORE_TYPE_UNKNOWN:
-               sprintf(name, "unknown-%s-%lld", psname, id);
+               scnprintf(name, sizeof(name), "unknown-%s-%lld", psname, id);
                break;
        default:
-               sprintf(name, "type%d-%s-%lld", type, psname, id);
+               scnprintf(name, sizeof(name), "type%d-%s-%lld",
+                         type, psname, id);
                break;
        }
 
        mutex_lock(&root->d_inode->i_mutex);
 
-       rc = -ENOSPC;
        dentry = d_alloc_name(root, name);
-       if (IS_ERR(dentry))
+       if (!dentry)
                goto fail_lockedalloc;
 
        memcpy(private->data, data, size);
index 937d820f273c2b17c5b47aa365f230970b19cc32..fd8d248c285ce0491d7c2a73fd2c93a73c616669 100644 (file)
@@ -45,6 +45,12 @@ extern void pstore_register_ftrace(void);
 static inline void pstore_register_ftrace(void) {}
 #endif
 
+#ifdef CONFIG_PSTORE_PMSG
+extern void pstore_register_pmsg(void);
+#else
+static inline void pstore_register_pmsg(void) {}
+#endif
+
 extern struct pstore_info *psinfo;
 
 extern void    pstore_set_kmsg_bytes(int);
index 86d1038b5a1292b464c561ba7086518cdd7eaf80..4f11f2382e2440f6edf1e5523b27beb2a10704ce 100644 (file)
@@ -267,6 +267,7 @@ int pstore_register(struct pstore_info *psi)
        kmsg_dump_register(&pstore_dumper);
        pstore_register_console();
        pstore_register_ftrace();
+       pstore_register_pmsg();
 
        if (pstore_update_ms >= 0) {
                pstore_timer.expires = jiffies +
diff --git a/fs/pstore/pmsg.c b/fs/pstore/pmsg.c
new file mode 100644 (file)
index 0000000..db47810
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2014  Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include "internal.h"
+
+static DEFINE_MUTEX(pmsg_lock);
+#define PMSG_MAX_BOUNCE_BUFFER_SIZE (2*PAGE_SIZE)
+
+static ssize_t write_pmsg(struct file *file, const char __user *buf,
+                         size_t count, loff_t *ppos)
+{
+       size_t i, buffer_size;
+       char *buffer;
+
+       if (!count)
+               return 0;
+
+       if (!access_ok(VERIFY_READ, buf, count))
+               return -EFAULT;
+
+       buffer_size = count;
+       if (buffer_size > PMSG_MAX_BOUNCE_BUFFER_SIZE)
+               buffer_size = PMSG_MAX_BOUNCE_BUFFER_SIZE;
+       buffer = vmalloc(buffer_size);
+
+       mutex_lock(&pmsg_lock);
+       for (i = 0; i < count; ) {
+               size_t c = min(count - i, buffer_size);
+               u64 id;
+               long ret;
+
+               ret = __copy_from_user(buffer, buf + i, c);
+               if (unlikely(ret != 0)) {
+                       mutex_unlock(&pmsg_lock);
+                       vfree(buffer);
+                       return -EFAULT;
+               }
+               psinfo->write_buf(PSTORE_TYPE_PMSG, 0, &id, 0, buffer, c,
+                                 psinfo);
+
+               i += c;
+       }
+
+       mutex_unlock(&pmsg_lock);
+       vfree(buffer);
+       return count;
+}
+
+static const struct file_operations pmsg_fops = {
+       .owner          = THIS_MODULE,
+       .llseek         = noop_llseek,
+       .write          = write_pmsg,
+};
+
+static struct class *pmsg_class;
+static int pmsg_major;
+#define PMSG_NAME "pmsg"
+#undef pr_fmt
+#define pr_fmt(fmt) PMSG_NAME ": " fmt
+
+static char *pmsg_devnode(struct device *dev, umode_t *mode)
+{
+       if (mode)
+               *mode = 0220;
+       return NULL;
+}
+
+void pstore_register_pmsg(void)
+{
+       struct device *pmsg_device;
+
+       pmsg_major = register_chrdev(0, PMSG_NAME, &pmsg_fops);
+       if (pmsg_major < 0) {
+               pr_err("register_chrdev failed\n");
+               goto err;
+       }
+
+       pmsg_class = class_create(THIS_MODULE, PMSG_NAME);
+       if (IS_ERR(pmsg_class)) {
+               pr_err("device class file already in use\n");
+               goto err_class;
+       }
+       pmsg_class->devnode = pmsg_devnode;
+
+       pmsg_device = device_create(pmsg_class, NULL, MKDEV(pmsg_major, 0),
+                                       NULL, "%s%d", PMSG_NAME, 0);
+       if (IS_ERR(pmsg_device)) {
+               pr_err("failed to create device\n");
+               goto err_device;
+       }
+       return;
+
+err_device:
+       class_destroy(pmsg_class);
+err_class:
+       unregister_chrdev(pmsg_major, PMSG_NAME);
+err:
+       return;
+}
index c5684c92266ee0209d6a925d5b3830dab551638a..f721e47db8ad929fc8eb3911b8eb1b2473a5340e 100644 (file)
@@ -51,6 +51,10 @@ static ulong ramoops_ftrace_size = MIN_MEM_SIZE;
 module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400);
 MODULE_PARM_DESC(ftrace_size, "size of ftrace log");
 
+static ulong ramoops_pmsg_size = MIN_MEM_SIZE;
+module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400);
+MODULE_PARM_DESC(pmsg_size, "size of user space message log");
+
 static ulong mem_address;
 module_param(mem_address, ulong, 0400);
 MODULE_PARM_DESC(mem_address,
@@ -82,19 +86,23 @@ struct ramoops_context {
        struct persistent_ram_zone **przs;
        struct persistent_ram_zone *cprz;
        struct persistent_ram_zone *fprz;
+       struct persistent_ram_zone *mprz;
        phys_addr_t phys_addr;
        unsigned long size;
        unsigned int memtype;
        size_t record_size;
        size_t console_size;
        size_t ftrace_size;
+       size_t pmsg_size;
        int dump_oops;
        struct persistent_ram_ecc_info ecc_info;
        unsigned int max_dump_cnt;
        unsigned int dump_write_cnt;
+       /* _read_cnt need clear on ramoops_pstore_open */
        unsigned int dump_read_cnt;
        unsigned int console_read_cnt;
        unsigned int ftrace_read_cnt;
+       unsigned int pmsg_read_cnt;
        struct pstore_info pstore;
 };
 
@@ -107,6 +115,8 @@ static int ramoops_pstore_open(struct pstore_info *psi)
 
        cxt->dump_read_cnt = 0;
        cxt->console_read_cnt = 0;
+       cxt->ftrace_read_cnt = 0;
+       cxt->pmsg_read_cnt = 0;
        return 0;
 }
 
@@ -123,13 +133,15 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
                return NULL;
 
        prz = przs[i];
+       if (!prz)
+               return NULL;
 
-       if (update) {
-               /* Update old/shadowed buffer. */
+       /* Update old/shadowed buffer. */
+       if (update)
                persistent_ram_save_old(prz);
-               if (!persistent_ram_old_size(prz))
-                       return NULL;
-       }
+
+       if (!persistent_ram_old_size(prz))
+               return NULL;
 
        *typep = type;
        *id = i;
@@ -137,6 +149,12 @@ ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
        return prz;
 }
 
+static bool prz_ok(struct persistent_ram_zone *prz)
+{
+       return !!prz && !!(persistent_ram_old_size(prz) +
+                          persistent_ram_ecc_string(prz, NULL, 0));
+}
+
 static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
                                   int *count, struct timespec *time,
                                   char **buf, struct pstore_info *psi)
@@ -149,13 +167,16 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
        prz = ramoops_get_next_prz(cxt->przs, &cxt->dump_read_cnt,
                                   cxt->max_dump_cnt, id, type,
                                   PSTORE_TYPE_DMESG, 1);
-       if (!prz)
+       if (!prz_ok(prz))
                prz = ramoops_get_next_prz(&cxt->cprz, &cxt->console_read_cnt,
                                           1, id, type, PSTORE_TYPE_CONSOLE, 0);
-       if (!prz)
+       if (!prz_ok(prz))
                prz = ramoops_get_next_prz(&cxt->fprz, &cxt->ftrace_read_cnt,
                                           1, id, type, PSTORE_TYPE_FTRACE, 0);
-       if (!prz)
+       if (!prz_ok(prz))
+               prz = ramoops_get_next_prz(&cxt->mprz, &cxt->pmsg_read_cnt,
+                                          1, id, type, PSTORE_TYPE_PMSG, 0);
+       if (!prz_ok(prz))
                return 0;
 
        /* TODO(kees): Bogus time for the moment. */
@@ -218,6 +239,11 @@ static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
                        return -ENOMEM;
                persistent_ram_write(cxt->fprz, buf, size);
                return 0;
+       } else if (type == PSTORE_TYPE_PMSG) {
+               if (!cxt->mprz)
+                       return -ENOMEM;
+               persistent_ram_write(cxt->mprz, buf, size);
+               return 0;
        }
 
        if (type != PSTORE_TYPE_DMESG)
@@ -275,6 +301,9 @@ static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, int count,
        case PSTORE_TYPE_FTRACE:
                prz = cxt->fprz;
                break;
+       case PSTORE_TYPE_PMSG:
+               prz = cxt->mprz;
+               break;
        default:
                return -EINVAL;
        }
@@ -406,7 +435,7 @@ static int ramoops_probe(struct platform_device *pdev)
                goto fail_out;
 
        if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size &&
-                       !pdata->ftrace_size)) {
+                       !pdata->ftrace_size && !pdata->pmsg_size)) {
                pr_err("The memory size and the record/console size must be "
                        "non-zero\n");
                goto fail_out;
@@ -420,20 +449,23 @@ static int ramoops_probe(struct platform_device *pdev)
                pdata->console_size = rounddown_pow_of_two(pdata->console_size);
        if (!is_power_of_2(pdata->ftrace_size))
                pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
+       if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size))
+               pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size);
 
-       cxt->dump_read_cnt = 0;
        cxt->size = pdata->mem_size;
        cxt->phys_addr = pdata->mem_address;
        cxt->memtype = pdata->mem_type;
        cxt->record_size = pdata->record_size;
        cxt->console_size = pdata->console_size;
        cxt->ftrace_size = pdata->ftrace_size;
+       cxt->pmsg_size = pdata->pmsg_size;
        cxt->dump_oops = pdata->dump_oops;
        cxt->ecc_info = pdata->ecc_info;
 
        paddr = cxt->phys_addr;
 
-       dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size;
+       dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size
+                       - cxt->pmsg_size;
        err = ramoops_init_przs(dev, cxt, &paddr, dump_mem_sz);
        if (err)
                goto fail_out;
@@ -448,13 +480,9 @@ static int ramoops_probe(struct platform_device *pdev)
        if (err)
                goto fail_init_fprz;
 
-       if (!cxt->przs && !cxt->cprz && !cxt->fprz) {
-               pr_err("memory size too small, minimum is %zu\n",
-                       cxt->console_size + cxt->record_size +
-                       cxt->ftrace_size);
-               err = -EINVAL;
-               goto fail_cnt;
-       }
+       err = ramoops_init_prz(dev, cxt, &cxt->mprz, &paddr, cxt->pmsg_size, 0);
+       if (err)
+               goto fail_init_mprz;
 
        cxt->pstore.data = cxt;
        /*
@@ -500,7 +528,8 @@ fail_buf:
 fail_clear:
        cxt->pstore.bufsize = 0;
        cxt->max_dump_cnt = 0;
-fail_cnt:
+       kfree(cxt->mprz);
+fail_init_mprz:
        kfree(cxt->fprz);
 fail_init_fprz:
        kfree(cxt->cprz);
@@ -559,6 +588,7 @@ static void ramoops_register_dummy(void)
        dummy_data->record_size = record_size;
        dummy_data->console_size = ramoops_console_size;
        dummy_data->ftrace_size = ramoops_ftrace_size;
+       dummy_data->pmsg_size = ramoops_pmsg_size;
        dummy_data->dump_oops = dump_oops;
        /*
         * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC
index 6ff97553331b8d67192e8c1dba31cef75d3f6eec..bda61a759b684f58e6212064e0dc6307be99a13b 100644 (file)
@@ -46,7 +46,7 @@ static inline size_t buffer_start(struct persistent_ram_zone *prz)
 }
 
 /* increase and wrap the start pointer, returning the old value */
-static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
+static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
 {
        int old;
        int new;
@@ -62,7 +62,7 @@ static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
 }
 
 /* increase the size counter until it hits the max size */
-static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
+static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
 {
        size_t old;
        size_t new;
@@ -78,6 +78,53 @@ static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
        } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
 }
 
+static DEFINE_RAW_SPINLOCK(buffer_lock);
+
+/* increase and wrap the start pointer, returning the old value */
+static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
+{
+       int old;
+       int new;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&buffer_lock, flags);
+
+       old = atomic_read(&prz->buffer->start);
+       new = old + a;
+       while (unlikely(new > prz->buffer_size))
+               new -= prz->buffer_size;
+       atomic_set(&prz->buffer->start, new);
+
+       raw_spin_unlock_irqrestore(&buffer_lock, flags);
+
+       return old;
+}
+
+/* increase the size counter until it hits the max size */
+static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
+{
+       size_t old;
+       size_t new;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&buffer_lock, flags);
+
+       old = atomic_read(&prz->buffer->size);
+       if (old == prz->buffer_size)
+               goto exit;
+
+       new = old + a;
+       if (new > prz->buffer_size)
+               new = prz->buffer_size;
+       atomic_set(&prz->buffer->size, new);
+
+exit:
+       raw_spin_unlock_irqrestore(&buffer_lock, flags);
+}
+
+static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
+static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
+
 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
        uint8_t *data, size_t len, uint8_t *ecc)
 {
@@ -379,6 +426,9 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size,
                return NULL;
        }
 
+       buffer_start_add = buffer_start_add_locked;
+       buffer_size_add = buffer_size_add_locked;
+
        if (memtype)
                va = ioremap(start, size);
        else
index 7a10e047bc33bd039e0dbe7b82081254c47b64b4..4f7f451ca70dbae83eb40e14185f8b40ef8643f8 100644 (file)
@@ -1102,6 +1102,14 @@ static void dquot_claim_reserved_space(struct dquot *dquot, qsize_t number)
        dquot->dq_dqb.dqb_rsvspace -= number;
 }
 
+static void dquot_reclaim_reserved_space(struct dquot *dquot, qsize_t number)
+{
+       if (WARN_ON_ONCE(dquot->dq_dqb.dqb_curspace < number))
+               number = dquot->dq_dqb.dqb_curspace;
+       dquot->dq_dqb.dqb_rsvspace += number;
+       dquot->dq_dqb.dqb_curspace -= number;
+}
+
 static inline
 void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
 {
@@ -1536,6 +1544,15 @@ void inode_claim_rsv_space(struct inode *inode, qsize_t number)
 }
 EXPORT_SYMBOL(inode_claim_rsv_space);
 
+void inode_reclaim_rsv_space(struct inode *inode, qsize_t number)
+{
+       spin_lock(&inode->i_lock);
+       *inode_reserved_space(inode) += number;
+       __inode_sub_bytes(inode, number);
+       spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL(inode_reclaim_rsv_space);
+
 void inode_sub_rsv_space(struct inode *inode, qsize_t number)
 {
        spin_lock(&inode->i_lock);
@@ -1709,6 +1726,35 @@ int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
 }
 EXPORT_SYMBOL(dquot_claim_space_nodirty);
 
+/*
+ * Convert allocated space back to in-memory reserved quotas
+ */
+void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number)
+{
+       int cnt;
+
+       if (!dquot_active(inode)) {
+               inode_reclaim_rsv_space(inode, number);
+               return;
+       }
+
+       down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       spin_lock(&dq_data_lock);
+       /* Claim reserved quotas to allocated quotas */
+       for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+               if (inode->i_dquot[cnt])
+                       dquot_reclaim_reserved_space(inode->i_dquot[cnt],
+                                                    number);
+       }
+       /* Update inode bytes */
+       inode_reclaim_rsv_space(inode, number);
+       spin_unlock(&dq_data_lock);
+       mark_all_dquot_dirty(inode->i_dquot);
+       up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+       return;
+}
+EXPORT_SYMBOL(dquot_reclaim_space_nodirty);
+
 /*
  * This operation can block, but only after everything is updated
  */
index 04ce1ac20d20b393d13bdfcd0e24c8ca4cde743e..d0ea7ef75e264ef2cccf2c100d4e3a8c7393121c 100644 (file)
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -447,9 +447,8 @@ void inode_add_bytes(struct inode *inode, loff_t bytes)
 
 EXPORT_SYMBOL(inode_add_bytes);
 
-void inode_sub_bytes(struct inode *inode, loff_t bytes)
+void __inode_sub_bytes(struct inode *inode, loff_t bytes)
 {
-       spin_lock(&inode->i_lock);
        inode->i_blocks -= bytes >> 9;
        bytes &= 511;
        if (inode->i_bytes < bytes) {
@@ -457,6 +456,14 @@ void inode_sub_bytes(struct inode *inode, loff_t bytes)
                inode->i_bytes += 512;
        }
        inode->i_bytes -= bytes;
+}
+
+EXPORT_SYMBOL(__inode_sub_bytes);
+
+void inode_sub_bytes(struct inode *inode, loff_t bytes)
+{
+       spin_lock(&inode->i_lock);
+       __inode_sub_bytes(inode, bytes);
        spin_unlock(&inode->i_lock);
 }
 
index b92eadf92d72a06bcfebc64b345d3590977e5a40..2b00d92a6e6fcec18b0ccf532b25839813d0868e 100644 (file)
 #include <linux/string.h>
 #include <linux/uaccess.h>
 
+/*
+ * Autoloaded crypto modules should only use a prefixed name to avoid allowing
+ * arbitrary modules to be loaded. Loading from userspace may still need the
+ * unprefixed names, so retains those aliases as well.
+ * This uses __MODULE_INFO directly instead of MODULE_ALIAS because pre-4.3
+ * gcc (e.g. avr32 toolchain) uses __LINE__ for uniqueness, and this macro
+ * expands twice on the same line. Instead, use a separate base name for the
+ * alias.
+ */
+#define MODULE_ALIAS_CRYPTO(name)      \
+               __MODULE_INFO(alias, alias_userspace, name);    \
+               __MODULE_INFO(alias, alias_crypto, "crypto-" name)
+
 /*
  * Algorithm masks and types.
  */
index 65c2be22b601cc40c17b175e4a88f8980979cc62..d57bc5df7225af64e2a59f2756f988e7680e6cdb 100644 (file)
@@ -2489,6 +2489,7 @@ extern void generic_fillattr(struct inode *, struct kstat *);
 extern int vfs_getattr(struct path *, struct kstat *);
 void __inode_add_bytes(struct inode *inode, loff_t bytes);
 void inode_add_bytes(struct inode *inode, loff_t bytes);
+void __inode_sub_bytes(struct inode *inode, loff_t bytes);
 void inode_sub_bytes(struct inode *inode, loff_t bytes);
 loff_t inode_get_bytes(struct inode *inode);
 void inode_set_bytes(struct inode *inode, loff_t bytes);
index e988fa935b3c4ad22e5a6e47e8f4ccea0f0e1c3d..21891898ced0a9f3d90013aa44a9a30ae01d6311 100644 (file)
@@ -542,6 +542,26 @@ static inline int i2c_adapter_id(struct i2c_adapter *adap)
 
 #endif /* I2C */
 
+#if IS_ENABLED(CONFIG_OF)
+/* must call put_device() when done with returned i2c_client device */
+extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+
+/* must call put_device() when done with returned i2c_adapter device */
+extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+
+#else
+
+static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
+{
+       return NULL;
+}
+
+static inline struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
+{
+       return NULL;
+}
+#endif /* CONFIG_OF */
+
 #if IS_ENABLED(CONFIG_ACPI_I2C)
 extern void acpi_i2c_register_devices(struct i2c_adapter *adap);
 #else
index 59f21325ed64c5d08d814efb74bd30cad9cdfcbe..43e3746d6ce53e111b2f8218974257343595ab42 100644 (file)
 #include <linux/types.h>
 #include <linux/bitops.h>
 #include <linux/errno.h>
-#include <linux/kref.h>
+#include <linux/kobject.h>
 #include <linux/mod_devicetable.h>
 #include <linux/spinlock.h>
 #include <linux/topology.h>
 #include <linux/notifier.h>
+#include <linux/list.h>
 
 #include <asm/byteorder.h>
 #include <asm/errno.h>
@@ -37,6 +38,7 @@ struct property {
        struct property *next;
        unsigned long _flags;
        unsigned int unique_id;
+       struct bin_attribute attr;
 };
 
 #if defined(CONFIG_SPARC)
@@ -56,8 +58,7 @@ struct device_node {
        struct  device_node *sibling;
        struct  device_node *next;      /* next device of same type */
        struct  device_node *allnext;   /* next in list of all nodes */
-       struct  proc_dir_entry *pde;    /* this node's proc directory */
-       struct  kref kref;
+       struct  kobject kobj;
        unsigned long _flags;
        void    *data;
 #if defined(CONFIG_SPARC)
@@ -74,6 +75,31 @@ struct of_phandle_args {
        uint32_t args[MAX_PHANDLE_ARGS];
 };
 
+struct of_reconfig_data {
+       struct device_node      *dn;
+       struct property         *prop;
+       struct property         *old_prop;
+};
+
+/* initialize a node */
+extern struct kobj_type of_node_ktype;
+static inline void of_node_init(struct device_node *node)
+{
+       kobject_init(&node->kobj, &of_node_ktype);
+}
+
+/* true when node is initialized */
+static inline int of_node_is_initialized(struct device_node *node)
+{
+       return node && node->kobj.state_initialized;
+}
+
+/* true when node is attached (i.e. present on sysfs) */
+static inline int of_node_is_attached(struct device_node *node)
+{
+       return node && node->kobj.state_in_sysfs;
+}
+
 #ifdef CONFIG_OF_DYNAMIC
 extern struct device_node *of_node_get(struct device_node *node);
 extern void of_node_put(struct device_node *node);
@@ -109,11 +135,37 @@ static inline int of_node_check_flag(struct device_node *n, unsigned long flag)
        return test_bit(flag, &n->_flags);
 }
 
+static inline int of_node_test_and_set_flag(struct device_node *n,
+                                           unsigned long flag)
+{
+       return test_and_set_bit(flag, &n->_flags);
+}
+
 static inline void of_node_set_flag(struct device_node *n, unsigned long flag)
 {
        set_bit(flag, &n->_flags);
 }
 
+static inline void of_node_clear_flag(struct device_node *n, unsigned long flag)
+{
+       clear_bit(flag, &n->_flags);
+}
+
+static inline int of_property_check_flag(struct property *p, unsigned long flag)
+{
+       return test_bit(flag, &p->_flags);
+}
+
+static inline void of_property_set_flag(struct property *p, unsigned long flag)
+{
+       set_bit(flag, &p->_flags);
+}
+
+static inline void of_property_clear_flag(struct property *p, unsigned long flag)
+{
+       clear_bit(flag, &p->_flags);
+}
+
 extern struct device_node *of_find_all_nodes(struct device_node *prev);
 
 /*
@@ -154,6 +206,8 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
 /* flag descriptions */
 #define OF_DYNAMIC     1 /* node and properties were allocated via kmalloc */
 #define OF_DETACHED    2 /* node has been detached from the device tree */
+#define OF_POPULATED   3 /* device already created for the node */
+#define OF_POPULATED_BUS       4 /* of_platform_populate recursed to children of this node */
 
 #define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
 #define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
@@ -165,6 +219,8 @@ static inline const char *of_node_full_name(const struct device_node *np)
        return np ? np->full_name : "<no-node>";
 }
 
+#define for_each_of_allnodes(dn) \
+       for (dn = of_allnodes; dn; dn = dn->allnext)
 extern struct device_node *of_find_node_by_name(struct device_node *from,
        const char *name);
 #define for_each_node_by_name(dn, name) \
@@ -235,6 +291,8 @@ extern struct device_node *of_find_node_with_property(
 extern struct property *of_find_property(const struct device_node *np,
                                         const char *name,
                                         int *lenp);
+extern int of_property_count_elems_of_size(const struct device_node *np,
+                               const char *propname, int elem_size);
 extern int of_property_read_u32_index(const struct device_node *np,
                                       const char *propname,
                                       u32 index, u32 *out_value);
@@ -283,6 +341,9 @@ extern struct device_node *of_parse_phandle(const struct device_node *np,
 extern int of_parse_phandle_with_args(const struct device_node *np,
        const char *list_name, const char *cells_name, int index,
        struct of_phandle_args *out_args);
+extern int of_parse_phandle_with_fixed_args(const struct device_node *np,
+       const char *list_name, int cells_count, int index,
+       struct of_phandle_args *out_args);
 extern int of_count_phandle_with_args(const struct device_node *np,
        const char *list_name, const char *cells_name);
 
@@ -302,15 +363,6 @@ extern int of_update_property(struct device_node *np, struct property *newprop);
 #define OF_RECONFIG_REMOVE_PROPERTY    0x0004
 #define OF_RECONFIG_UPDATE_PROPERTY    0x0005
 
-struct of_prop_reconfig {
-       struct device_node      *dn;
-       struct property         *prop;
-};
-
-extern int of_reconfig_notifier_register(struct notifier_block *);
-extern int of_reconfig_notifier_unregister(struct notifier_block *);
-extern int of_reconfig_notify(unsigned long, void *);
-
 extern int of_attach_node(struct device_node *);
 extern int of_detach_node(struct device_node *);
 
@@ -412,6 +464,11 @@ static inline struct device_node *of_find_compatible_node(
        return NULL;
 }
 
+static inline int of_property_count_elems_of_size(const struct device_node *np,
+                       const char *propname, int elem_size)
+{
+       return -ENOSYS;
+}
 
 static inline int of_property_read_u32_index(const struct device_node *np,
                        const char *propname, u32 index, u32 *out_value)
@@ -502,6 +559,13 @@ static inline int of_parse_phandle_with_args(struct device_node *np,
        return -ENOSYS;
 }
 
+static inline int of_parse_phandle_with_fixed_args(const struct device_node *np,
+       const char *list_name, int cells_count, int index,
+       struct of_phandle_args *out_args)
+{
+       return -ENOSYS;
+}
+
 static inline int of_count_phandle_with_args(struct device_node *np,
                                             const char *list_name,
                                             const char *cells_name)
@@ -605,6 +669,74 @@ static inline int of_property_read_string_index(struct device_node *np,
        return rc < 0 ? rc : 0;
 }
 
+/*
+ * of_property_count_u8_elems - Count the number of u8 elements in a property
+ *
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u8 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u8 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u8_elems(const struct device_node *np,
+                               const char *propname)
+{
+       return of_property_count_elems_of_size(np, propname, sizeof(u8));
+}
+
+/**
+ * of_property_count_u16_elems - Count the number of u16 elements in a property
+ *
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u16 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u16 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u16_elems(const struct device_node *np,
+                               const char *propname)
+{
+       return of_property_count_elems_of_size(np, propname, sizeof(u16));
+}
+
+/**
+ * of_property_count_u32_elems - Count the number of u32 elements in a property
+ *
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u32 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u32 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u32_elems(const struct device_node *np,
+                               const char *propname)
+{
+       return of_property_count_elems_of_size(np, propname, sizeof(u32));
+}
+
+/**
+ * of_property_count_u64_elems - Count the number of u64 elements in a property
+ *
+ * @np:                device node from which the property value is to be read.
+ * @propname:  name of the property to be searched.
+ *
+ * Search for a property in a device node and count the number of u64 elements
+ * in it. Returns number of elements on sucess, -EINVAL if the property does
+ * not exist or its length does not match a multiple of u64 and -ENODATA if the
+ * property does not have a value.
+ */
+static inline int of_property_count_u64_elems(const struct device_node *np,
+                               const char *propname)
+{
+       return of_property_count_elems_of_size(np, propname, sizeof(u64));
+}
+
 /**
  * of_property_read_bool - Findfrom a property
  * @np:                device node from which the property value is to be read.
@@ -642,14 +774,144 @@ static inline int of_property_read_u32(const struct device_node *np,
        return of_property_read_u32_array(np, propname, out_value, 1);
 }
 
-#if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE)
-extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
-extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
-extern void proc_device_tree_remove_prop(struct proc_dir_entry *pde,
-                                        struct property *prop);
-extern void proc_device_tree_update_prop(struct proc_dir_entry *pde,
-                                        struct property *newprop,
-                                        struct property *oldprop);
+/**
+ * struct of_changeset_entry   - Holds a changeset entry
+ *
+ * @node:      list_head for the log list
+ * @action:    notifier action
+ * @np:                pointer to the device node affected
+ * @prop:      pointer to the property affected
+ * @old_prop:  hold a pointer to the original property
+ *
+ * Every modification of the device tree during a changeset
+ * is held in a list of of_changeset_entry structures.
+ * That way we can recover from a partial application, or we can
+ * revert the changeset
+ */
+struct of_changeset_entry {
+       struct list_head node;
+       unsigned long action;
+       struct device_node *np;
+       struct property *prop;
+       struct property *old_prop;
+};
+
+/**
+ * struct of_changeset - changeset tracker structure
+ *
+ * @entries:   list_head for the changeset entries
+ *
+ * changesets are a convenient way to apply bulk changes to the
+ * live tree. In case of an error, changes are rolled-back.
+ * changesets live on after initial application, and if not
+ * destroyed after use, they can be reverted in one single call.
+ */
+struct of_changeset {
+       struct list_head entries;
+};
+
+enum of_reconfig_change {
+       OF_RECONFIG_NO_CHANGE = 0,
+       OF_RECONFIG_CHANGE_ADD,
+       OF_RECONFIG_CHANGE_REMOVE,
+};
+
+#ifdef CONFIG_OF_DYNAMIC
+extern int of_reconfig_notifier_register(struct notifier_block *);
+extern int of_reconfig_notifier_unregister(struct notifier_block *);
+extern int of_reconfig_notify(unsigned long, struct of_reconfig_data *rd);
+extern int of_reconfig_get_state_change(unsigned long action,
+                                       struct of_reconfig_data *arg);
+
+extern void of_changeset_init(struct of_changeset *ocs);
+extern void of_changeset_destroy(struct of_changeset *ocs);
+extern int of_changeset_apply(struct of_changeset *ocs);
+extern int of_changeset_revert(struct of_changeset *ocs);
+extern int of_changeset_action(struct of_changeset *ocs,
+               unsigned long action, struct device_node *np,
+               struct property *prop);
+
+static inline int of_changeset_attach_node(struct of_changeset *ocs,
+               struct device_node *np)
+{
+       return of_changeset_action(ocs, OF_RECONFIG_ATTACH_NODE, np, NULL);
+}
+
+static inline int of_changeset_detach_node(struct of_changeset *ocs,
+               struct device_node *np)
+{
+       return of_changeset_action(ocs, OF_RECONFIG_DETACH_NODE, np, NULL);
+}
+
+static inline int of_changeset_add_property(struct of_changeset *ocs,
+               struct device_node *np, struct property *prop)
+{
+       return of_changeset_action(ocs, OF_RECONFIG_ADD_PROPERTY, np, prop);
+}
+
+static inline int of_changeset_remove_property(struct of_changeset *ocs,
+               struct device_node *np, struct property *prop)
+{
+       return of_changeset_action(ocs, OF_RECONFIG_REMOVE_PROPERTY, np, prop);
+}
+
+static inline int of_changeset_update_property(struct of_changeset *ocs,
+               struct device_node *np, struct property *prop)
+{
+       return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
+}
+#else /* CONFIG_OF_DYNAMIC */
+static inline int of_reconfig_notifier_register(struct notifier_block *nb)
+{
+       return -EINVAL;
+}
+static inline int of_reconfig_notifier_unregister(struct notifier_block *nb)
+{
+       return -EINVAL;
+}
+static inline int of_reconfig_notify(unsigned long action,
+                                    struct of_reconfig_data *arg)
+{
+       return -EINVAL;
+}
+static inline int of_reconfig_get_state_change(unsigned long action,
+                                               struct of_reconfig_data *arg)
+{
+       return -EINVAL;
+}
+#endif /* CONFIG_OF_DYNAMIC */
+
+/* CONFIG_OF_RESOLVE api */
+extern int of_resolve_phandles(struct device_node *tree);
+
+/**
+ * Overlay support
+ */
+
+#ifdef CONFIG_OF_OVERLAY
+
+/* ID based overlays; the API for external users */
+int of_overlay_create(struct device_node *tree);
+int of_overlay_destroy(int id);
+int of_overlay_destroy_all(void);
+
+#else
+
+static inline int of_overlay_create(struct device_node *tree)
+{
+       return -ENOTSUPP;
+}
+
+static inline int of_overlay_destroy(int id)
+{
+       return -ENOTSUPP;
+}
+
+static inline int of_overlay_destroy_all(void)
+{
+       return -ENOTSUPP;
+}
+
 #endif
 
 #endif /* _LINUX_OF_H */
index cfb545cd86b5a1baaf6c39b7ba91f2d90c129c43..686786c87c73c220d19e4f79d801028f1994b6a1 100644 (file)
 #ifndef __LINUX_OF_I2C_H
 #define __LINUX_OF_I2C_H
 
-#if defined(CONFIG_OF_I2C) || defined(CONFIG_OF_I2C_MODULE)
 #include <linux/i2c.h>
 
-extern void of_i2c_register_devices(struct i2c_adapter *adap);
-
-/* must call put_device() when done with returned i2c_client device */
-extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
-
-/* must call put_device() when done with returned i2c_adapter device */
-extern struct i2c_adapter *of_find_i2c_adapter_by_node(
-                                               struct device_node *node);
-
-#else
-static inline void of_i2c_register_devices(struct i2c_adapter *adap)
-{
-       return;
-}
-
-static inline struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
-{
-       return NULL;
-}
-
-/* must call put_device() when done with returned i2c_adapter device */
-static inline struct i2c_adapter *of_find_i2c_adapter_by_node(
-                                               struct device_node *node)
-{
-       return NULL;
-}
-#endif /* CONFIG_OF_I2C */
+static inline void of_i2c_register_devices(struct i2c_adapter *adap) { };
 
 #endif /* __LINUX_OF_I2C_H */
index 2a93b64a3869f2af9ea623b8ddc0a435668b3c8c..30ebd762063a07e8bfc4f75d41e79b4040456fce 100644 (file)
@@ -95,6 +95,7 @@ extern int of_platform_populate(struct device_node *root,
                                const struct of_device_id *matches,
                                const struct of_dev_auxdata *lookup,
                                struct device *parent);
+extern void of_platform_depopulate(struct device *parent);
 #endif /* CONFIG_OF_ADDRESS */
 
 #endif /* CONFIG_OF_DEVICE */
@@ -109,6 +110,13 @@ static inline int of_platform_populate(struct device_node *root,
 {
        return -ENODEV;
 }
-#endif /* !CONFIG_OF_ADDRESS */
+static inline void of_platform_depopulate(struct device *parent) { }
+#endif
+
+#ifdef CONFIG_OF_DYNAMIC
+extern void of_platform_register_reconfig_notifier(void);
+#else
+static inline void of_platform_register_reconfig_notifier(void) { }
+#endif
 
 #endif /* _LINUX_OF_PLATFORM_H */
index 75d01760c91197e0a44066773b52af5bc063349b..289884121d30eba768b5167110c70aa8eaaf5bb5 100644 (file)
@@ -35,6 +35,7 @@ enum pstore_type_id {
        PSTORE_TYPE_MCE         = 1,
        PSTORE_TYPE_CONSOLE     = 2,
        PSTORE_TYPE_FTRACE      = 3,
+       PSTORE_TYPE_PMSG        = 4, /* Backport: 7 in upstream 3.19.0-rc3 */
        PSTORE_TYPE_UNKNOWN     = 255
 };
 
index 17baad3a0c57c178ba35f2fc71b67bda7aee946f..712757f320a459262cb23d0c16d4651c78f4791d 100644 (file)
@@ -83,6 +83,7 @@ struct ramoops_platform_data {
        unsigned long   record_size;
        unsigned long   console_size;
        unsigned long   ftrace_size;
+       unsigned long   pmsg_size;
        int             dump_oops;
        struct persistent_ram_ecc_info ecc_info;
 };
index 1c50093ae656d97211374aeaf30b6998a9e0a764..6965fe394c3bb9d3b66681a8bfd1b9f84a970def 100644 (file)
@@ -41,6 +41,7 @@ void __quota_error(struct super_block *sb, const char *func,
 void inode_add_rsv_space(struct inode *inode, qsize_t number);
 void inode_claim_rsv_space(struct inode *inode, qsize_t number);
 void inode_sub_rsv_space(struct inode *inode, qsize_t number);
+void inode_reclaim_rsv_space(struct inode *inode, qsize_t number);
 
 void dquot_initialize(struct inode *inode);
 void dquot_drop(struct inode *inode);
@@ -59,6 +60,7 @@ int dquot_alloc_inode(const struct inode *inode);
 
 int dquot_claim_space_nodirty(struct inode *inode, qsize_t number);
 void dquot_free_inode(const struct inode *inode);
+void dquot_reclaim_space_nodirty(struct inode *inode, qsize_t number);
 
 int dquot_disable(struct super_block *sb, int type, unsigned int flags);
 /* Suspend quotas on remount RO */
@@ -238,6 +240,13 @@ static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
        return 0;
 }
 
+static inline int dquot_reclaim_space_nodirty(struct inode *inode,
+                                             qsize_t number)
+{
+       inode_sub_bytes(inode, number);
+       return 0;
+}
+
 static inline int dquot_disable(struct super_block *sb, int type,
                unsigned int flags)
 {
@@ -336,6 +345,12 @@ static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
        return ret;
 }
 
+static inline void dquot_reclaim_block(struct inode *inode, qsize_t nr)
+{
+       dquot_reclaim_space_nodirty(inode, nr << inode->i_blkbits);
+       mark_inode_dirty_sync(inode);
+}
+
 static inline void dquot_free_space_nodirty(struct inode *inode, qsize_t nr)
 {
        __dquot_free_space(inode, nr, 0);
index d5d229b2e5af1815d38d7d87d217ee8340b9ec1d..7d532a32ff3abec05d9466ec795f9693c5e49c35 100644 (file)
@@ -173,6 +173,19 @@ extern void getboottime(struct timespec *ts);
 extern void monotonic_to_bootbased(struct timespec *ts);
 extern void get_monotonic_boottime(struct timespec *ts);
 
+static inline bool timeval_valid(const struct timeval *tv)
+{
+       /* Dates before 1970 are bogus */
+       if (tv->tv_sec < 0)
+               return false;
+
+       /* Can't have more microseconds then a second */
+       if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
+               return false;
+
+       return true;
+}
+
 extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
 extern int timekeeping_valid_for_hres(void);
 extern u64 timekeeping_max_deferment(void);
index 8ad2dbd0c296783850c6680bba0c197cc42d30a0..8e8b06f1ba4ae3eb5ec51ffa97e97e74d06fadb3 100644 (file)
@@ -23,6 +23,7 @@ struct wifi_platform_data {
        int (*set_carddetect)(int val);
        void *(*mem_prealloc)(int section, unsigned long size);
        int (*get_mac_addr)(unsigned char *buf);
+       int (*get_wake_irq)(void);
        void *(*get_country_code)(char *ccode, u32 flags);
 };
 
index cfa6b2ebfc0e7e16afad1defae51a5feae4299c0..2cbf0baa622613f744f4ce5ef90e67259d130dd5 100644 (file)
@@ -203,6 +203,7 @@ extern int ipv6_dev_ac_inc(struct net_device *dev, const struct in6_addr *addr);
 extern int __ipv6_dev_ac_dec(struct inet6_dev *idev, const struct in6_addr *addr);
 extern bool ipv6_chk_acast_addr(struct net *net, struct net_device *dev,
                                const struct in6_addr *addr);
+extern void ipv6_ac_destroy_dev(struct inet6_dev *idev);
 
 
 /* Device notifier */
index 405887bec8b35a1c00c8160d83b84cdd3e8dae84..c5268778832e36df8c1dc8c77c6beb58d56289fc 100644 (file)
@@ -1,4 +1,5 @@
 # UAPI Header export list
+header-y += android/
 header-y += byteorder/
 header-y += can/
 header-y += caif/
diff --git a/include/uapi/linux/android/Kbuild b/include/uapi/linux/android/Kbuild
new file mode 100644 (file)
index 0000000..ca011ee
--- /dev/null
@@ -0,0 +1,2 @@
+# UAPI Header export list
+header-y += binder.h
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
new file mode 100644 (file)
index 0000000..c653800
--- /dev/null
@@ -0,0 +1,349 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_BINDER_H
+#define _UAPI_LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+       ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+       BINDER_TYPE_BINDER      = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+       BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+       BINDER_TYPE_HANDLE      = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+       BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+       BINDER_TYPE_FD          = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+       FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+       FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+#ifdef BINDER_IPC_32BIT
+typedef __u32 binder_size_t;
+typedef __u32 binder_uintptr_t;
+#else
+typedef __u64 binder_size_t;
+typedef __u64 binder_uintptr_t;
+#endif
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes.  The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur.  The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+       /* 8 bytes for large_flat_header. */
+       __u32   type;
+       __u32   flags;
+
+       /* 8 bytes of data. */
+       union {
+               binder_uintptr_t        binder; /* local object */
+               __u32                   handle; /* remote object */
+       };
+
+       /* extra data associated with local object */
+       binder_uintptr_t        cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses appropriately.
+ */
+
+struct binder_write_read {
+       binder_size_t           write_size;     /* bytes to write */
+       binder_size_t           write_consumed; /* bytes consumed by driver */
+       binder_uintptr_t        write_buffer;
+       binder_size_t           read_size;      /* bytes to read */
+       binder_size_t           read_consumed;  /* bytes consumed by driver */
+       binder_uintptr_t        read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+       /* driver protocol version -- increment with incompatible change */
+       __s32   protocol_version;
+};
+
+/* This is the current protocol version. */
+#ifdef BINDER_IPC_32BIT
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+#else
+#define BINDER_CURRENT_PROTOCOL_VERSION 8
+#endif
+
+#define BINDER_WRITE_READ              _IOWR('b', 1, struct binder_write_read)
+#define        BINDER_SET_IDLE_TIMEOUT         _IOW('b', 3, __s64)
+#define        BINDER_SET_MAX_THREADS          _IOW('b', 5, __u32)
+#define        BINDER_SET_IDLE_PRIORITY        _IOW('b', 6, __s32)
+#define        BINDER_SET_CONTEXT_MGR          _IOW('b', 7, __s32)
+#define        BINDER_THREAD_EXIT              _IOW('b', 8, __s32)
+#define BINDER_VERSION                 _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted.  This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process.  That is, the process is being destroyed.
+ * You should handle this by exiting from your process.  Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+       TF_ONE_WAY      = 0x01, /* this is a one-way call: async, no return */
+       TF_ROOT_OBJECT  = 0x04, /* contents are the component's root object */
+       TF_STATUS_CODE  = 0x08, /* contents are a 32-bit status code */
+       TF_ACCEPT_FDS   = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+       /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+        * identifying the target and contents of the transaction.
+        */
+       union {
+               /* target descriptor of command transaction */
+               __u32   handle;
+               /* target descriptor of return transaction */
+               binder_uintptr_t ptr;
+       } target;
+       binder_uintptr_t        cookie; /* target object cookie */
+       __u32           code;           /* transaction command */
+
+       /* General information about the transaction. */
+       __u32           flags;
+       pid_t           sender_pid;
+       uid_t           sender_euid;
+       binder_size_t   data_size;      /* number of bytes of data */
+       binder_size_t   offsets_size;   /* number of bytes of offsets */
+
+       /* If this transaction is inline, the data immediately
+        * follows here; otherwise, it ends with a pointer to
+        * the data buffer.
+        */
+       union {
+               struct {
+                       /* transaction data */
+                       binder_uintptr_t        buffer;
+                       /* offsets from buffer to flat_binder_object structs */
+                       binder_uintptr_t        offsets;
+               } ptr;
+               __u8    buf[8];
+       } data;
+};
+
+struct binder_ptr_cookie {
+       binder_uintptr_t ptr;
+       binder_uintptr_t cookie;
+};
+
+struct binder_handle_cookie {
+       __u32 handle;
+       binder_uintptr_t cookie;
+} __packed;
+
+struct binder_pri_desc {
+       __s32 priority;
+       __u32 desc;
+};
+
+struct binder_pri_ptr_cookie {
+       __s32 priority;
+       binder_uintptr_t ptr;
+       binder_uintptr_t cookie;
+};
+
+enum binder_driver_return_protocol {
+       BR_ERROR = _IOR('r', 0, __s32),
+       /*
+        * int: error code
+        */
+
+       BR_OK = _IO('r', 1),
+       /* No parameters! */
+
+       BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+       BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+       /*
+        * binder_transaction_data: the received command.
+        */
+
+       BR_ACQUIRE_RESULT = _IOR('r', 4, __s32),
+       /*
+        * not currently supported
+        * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+        * Else the remote object has acquired a primary reference.
+        */
+
+       BR_DEAD_REPLY = _IO('r', 5),
+       /*
+        * The target of the last transaction (either a bcTRANSACTION or
+        * a bcATTEMPT_ACQUIRE) is no longer with us.  No parameters.
+        */
+
+       BR_TRANSACTION_COMPLETE = _IO('r', 6),
+       /*
+        * No parameters... always refers to the last transaction requested
+        * (including replies).  Note that this will be sent even for
+        * asynchronous transactions.
+        */
+
+       BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+       BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+       BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+       BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+       /*
+        * void *:      ptr to binder
+        * void *: cookie for binder
+        */
+
+       BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+       /*
+        * not currently supported
+        * int: priority
+        * void *: ptr to binder
+        * void *: cookie for binder
+        */
+
+       BR_NOOP = _IO('r', 12),
+       /*
+        * No parameters.  Do nothing and examine the next command.  It exists
+        * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+        */
+
+       BR_SPAWN_LOOPER = _IO('r', 13),
+       /*
+        * No parameters.  The driver has determined that a process has no
+        * threads waiting to service incoming transactions.  When a process
+        * receives this command, it must spawn a new service thread and
+        * register it via bcENTER_LOOPER.
+        */
+
+       BR_FINISHED = _IO('r', 14),
+       /*
+        * not currently supported
+        * stop threadpool thread
+        */
+
+       BR_DEAD_BINDER = _IOR('r', 15, binder_uintptr_t),
+       /*
+        * void *: cookie
+        */
+       BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, binder_uintptr_t),
+       /*
+        * void *: cookie
+        */
+
+       BR_FAILED_REPLY = _IO('r', 17),
+       /*
+        * The the last transaction (either a bcTRANSACTION or
+        * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory).  No parameters.
+        */
+};
+
+enum binder_driver_command_protocol {
+       BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+       BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+       /*
+        * binder_transaction_data: the sent command.
+        */
+
+       BC_ACQUIRE_RESULT = _IOW('c', 2, __s32),
+       /*
+        * not currently supported
+        * int:  0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+        * Else you have acquired a primary reference on the object.
+        */
+
+       BC_FREE_BUFFER = _IOW('c', 3, binder_uintptr_t),
+       /*
+        * void *: ptr to transaction data received on a read
+        */
+
+       BC_INCREFS = _IOW('c', 4, __u32),
+       BC_ACQUIRE = _IOW('c', 5, __u32),
+       BC_RELEASE = _IOW('c', 6, __u32),
+       BC_DECREFS = _IOW('c', 7, __u32),
+       /*
+        * int: descriptor
+        */
+
+       BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+       BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+       /*
+        * void *: ptr to binder
+        * void *: cookie for binder
+        */
+
+       BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+       /*
+        * not currently supported
+        * int: priority
+        * int: descriptor
+        */
+
+       BC_REGISTER_LOOPER = _IO('c', 11),
+       /*
+        * No parameters.
+        * Register a spawned looper thread with the device.
+        */
+
+       BC_ENTER_LOOPER = _IO('c', 12),
+       BC_EXIT_LOOPER = _IO('c', 13),
+       /*
+        * No parameters.
+        * These two commands are sent as an application-level thread
+        * enters and exits the binder loop, respectively.  They are
+        * used so the binder can have an accurate count of the number
+        * of looping threads it has available.
+        */
+
+       BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_handle_cookie),
+       /*
+        * int: handle
+        * void *: cookie
+        */
+
+       BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_handle_cookie),
+       /*
+        * int: handle
+        * void *: cookie
+        */
+
+       BC_DEAD_BINDER_DONE = _IOW('c', 16, binder_uintptr_t),
+       /*
+        * void *: cookie
+        */
+};
+
+#endif /* _UAPI_LINUX_BINDER_H */
+
index bbb5f65665b94e7908d080d2f8ab54ea4ed72951..da5f2ffa7106189a06c92d864b18a50521f8a215 100644 (file)
@@ -100,7 +100,6 @@ EXPORT_SYMBOL_GPL(resume_device_irqs);
 int check_wakeup_irqs(void)
 {
        struct irq_desc *desc;
-       char suspend_abort[MAX_SUSPEND_ABORT_LEN];
        int irq;
 
        for_each_irq_desc(irq, desc) {
index 768ec2301d4f65fd1750744141c3c3d55fbe7c86..b1a203d9b1e7b84508f5cfdf2ead87b6e84b59f0 100644 (file)
@@ -2383,26 +2383,6 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                        else
                                return -EINVAL;
                        break;
-               case PR_SET_TIMERSLACK_PID:
-                       if (current->pid != (pid_t)arg3 &&
-                                       !capable(CAP_SYS_NICE))
-                               return -EPERM;
-                       rcu_read_lock();
-                       tsk = find_task_by_pid_ns((pid_t)arg3, &init_pid_ns);
-                       if (tsk == NULL) {
-                               rcu_read_unlock();
-                               return -EINVAL;
-                       }
-                       get_task_struct(tsk);
-                       rcu_read_unlock();
-                       if (arg2 <= 0)
-                               tsk->timer_slack_ns =
-                                       tsk->default_timer_slack_ns;
-                       else
-                               tsk->timer_slack_ns = arg2;
-                       put_task_struct(tsk);
-                       error = 0;
-                       break;
                default:
                        return -EINVAL;
                }
@@ -2422,6 +2402,26 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
        case PR_GET_TID_ADDRESS:
                error = prctl_get_tid_address(me, (int __user **)arg2);
                break;
+       case PR_SET_TIMERSLACK_PID:
+               if (task_pid_vnr(current) != (pid_t)arg3 &&
+                               !capable(CAP_SYS_NICE))
+                       return -EPERM;
+               rcu_read_lock();
+               tsk = find_task_by_vpid((pid_t)arg3);
+               if (tsk == NULL) {
+                       rcu_read_unlock();
+                       return -EINVAL;
+               }
+               get_task_struct(tsk);
+               rcu_read_unlock();
+               if (arg2 <= 0)
+                       tsk->timer_slack_ns =
+                               tsk->default_timer_slack_ns;
+               else
+                       tsk->timer_slack_ns = arg2;
+               put_task_struct(tsk);
+               error = 0;
+               break;
        case PR_SET_CHILD_SUBREAPER:
                me->signal->is_child_subreaper = !!arg2;
                break;
index d21398e6da87f19d62e3b20fd1d7ded9ffa9cabf..31ec845d0e80f615ba01a1dfead04d4a6c0dc8fc 100644 (file)
@@ -195,6 +195,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
        if (tv) {
                if (copy_from_user(&user_tv, tv, sizeof(*tv)))
                        return -EFAULT;
+
+               if (!timeval_valid(&user_tv))
+                       return -EINVAL;
+
                new_ts.tv_sec = user_tv.tv_sec;
                new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
        }
index af8d1d4f3d55156eaae7936b1a34d8cd7141248f..28db9bedc857103bc9b35c69602a82598e8828b0 100644 (file)
@@ -631,6 +631,13 @@ int ntp_validate_timex(struct timex *txc)
        if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
                return -EPERM;
 
+       if (txc->modes & ADJ_FREQUENCY) {
+               if (LONG_MIN / PPM_SCALE > txc->freq)
+                       return -EINVAL;
+               if (LONG_MAX / PPM_SCALE < txc->freq)
+                       return -EINVAL;
+       }
+
        return 0;
 }
 
index a2c7e437796039b7e24fc992222379486b9a1d9c..a73834513b2185c080cfef00fe74fd1babab8ee1 100644 (file)
@@ -1947,17 +1947,13 @@ static void pool_mayday_timeout(unsigned long __pool)
  * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Does GFP_KERNEL allocations.  Called only from
  * manager.
- *
- * RETURNS:
- * %false if no action was taken and pool->lock stayed locked, %true
- * otherwise.
  */
-static bool maybe_create_worker(struct worker_pool *pool)
+static void maybe_create_worker(struct worker_pool *pool)
 __releases(&pool->lock)
 __acquires(&pool->lock)
 {
        if (!need_to_create_worker(pool))
-               return false;
+               return;
 restart:
        spin_unlock_irq(&pool->lock);
 
@@ -1974,7 +1970,7 @@ restart:
                        start_worker(worker);
                        if (WARN_ON_ONCE(need_to_create_worker(pool)))
                                goto restart;
-                       return true;
+                       return;
                }
 
                if (!need_to_create_worker(pool))
@@ -1991,7 +1987,7 @@ restart:
        spin_lock_irq(&pool->lock);
        if (need_to_create_worker(pool))
                goto restart;
-       return true;
+       return;
 }
 
 /**
@@ -2004,15 +2000,9 @@ restart:
  * LOCKING:
  * spin_lock_irq(pool->lock) which may be released and regrabbed
  * multiple times.  Called only from manager.
- *
- * RETURNS:
- * %false if no action was taken and pool->lock stayed locked, %true
- * otherwise.
  */
-static bool maybe_destroy_workers(struct worker_pool *pool)
+static void maybe_destroy_workers(struct worker_pool *pool)
 {
-       bool ret = false;
-
        while (too_many_workers(pool)) {
                struct worker *worker;
                unsigned long expires;
@@ -2026,10 +2016,7 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
                }
 
                destroy_worker(worker);
-               ret = true;
        }
-
-       return ret;
 }
 
 /**
@@ -2049,13 +2036,14 @@ static bool maybe_destroy_workers(struct worker_pool *pool)
  * multiple times.  Does GFP_KERNEL allocations.
  *
  * RETURNS:
- * spin_lock_irq(pool->lock) which may be released and regrabbed
- * multiple times.  Does GFP_KERNEL allocations.
+ * %false if the pool doesn't need management and the caller can safely
+ * start processing works, %true if management function was performed and
+ * the conditions that the caller verified before calling the function may
+ * no longer be true.
  */
 static bool manage_workers(struct worker *worker)
 {
        struct worker_pool *pool = worker->pool;
-       bool ret = false;
 
        /*
         * Managership is governed by two mutexes - manager_arb and
@@ -2079,7 +2067,7 @@ static bool manage_workers(struct worker *worker)
         * manager_mutex.
         */
        if (!mutex_trylock(&pool->manager_arb))
-               return ret;
+               return false;
 
        /*
         * With manager arbitration won, manager_mutex would be free in
@@ -2089,7 +2077,6 @@ static bool manage_workers(struct worker *worker)
                spin_unlock_irq(&pool->lock);
                mutex_lock(&pool->manager_mutex);
                spin_lock_irq(&pool->lock);
-               ret = true;
        }
 
        pool->flags &= ~POOL_MANAGE_WORKERS;
@@ -2098,12 +2085,12 @@ static bool manage_workers(struct worker *worker)
         * Destroy and then create so that may_start_working() is true
         * on return.
         */
-       ret |= maybe_destroy_workers(pool);
-       ret |= maybe_create_worker(pool);
+       maybe_destroy_workers(pool);
+       maybe_create_worker(pool);
 
        mutex_unlock(&pool->manager_mutex);
        mutex_unlock(&pool->manager_arb);
-       return ret;
+       return true;
 }
 
 /**
index 31c5f7675fbfa270a70a005247c8a3773067f5c0..f504027d66a8b1e8469daaba016c53559383333e 100644 (file)
@@ -184,7 +184,7 @@ static int INIT get_next_block(struct bunzip_data *bd)
        if (get_bits(bd, 1))
                return RETVAL_OBSOLETE_INPUT;
        origPtr = get_bits(bd, 24);
-       if (origPtr > dbufSize)
+       if (origPtr >= dbufSize)
                return RETVAL_DATA_ERROR;
        /* mapping table: if some byte values are never used (encoding things
           like ascii text), the compression code removes the gaps to have fewer
index 1fd846463d333d4422d9d7c8a1274abe6e779b48..fd776c85852005f360cfa854d9e1e539fb9c8922 100644 (file)
@@ -1861,7 +1861,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
                        break;
 
-               if (tso_segs == 1) {
+               if (tso_segs == 1 || !sk->sk_gso_max_segs) {
                        if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
                                                     (tcp_skb_is_last(sk, skb) ?
                                                      nonagle : TCP_NAGLE_PUSH))))
@@ -1898,7 +1898,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
                }
 
                limit = mss_now;
-               if (tso_segs > 1 && !tcp_urg_mode(tp))
+               if (tso_segs > 1 && sk->sk_gso_max_segs && !tcp_urg_mode(tp))
                        limit = tcp_mss_split_point(sk, skb, mss_now,
                                                    min_t(unsigned int,
                                                          cwnd_quota,
index 05e05968c5dd1c566e6139098dcb98fe8a2c2dfd..69e8b50a45bd5f28b49216878a9f9af611e26bcc 100644 (file)
@@ -3201,11 +3201,13 @@ static int addrconf_ifdown(struct net_device *dev, int how)
 
        write_unlock_bh(&idev->lock);
 
-       /* Step 5: Discard multicast list */
-       if (how)
+       /* Step 5: Discard anycast and multicast list */
+       if (how) {
+               ipv6_ac_destroy_dev(idev);
                ipv6_mc_destroy_dev(idev);
-       else
+       } else {
                ipv6_mc_down(idev);
+       }
 
        idev->tstamp = jiffies;
 
index 5a80f15a9de21e5b53be2087815c5c01abdbe582..1e97d08865970efa6c652386992b99e67c088b77 100644 (file)
@@ -341,6 +341,27 @@ static int ipv6_dev_ac_dec(struct net_device *dev, const struct in6_addr *addr)
        return __ipv6_dev_ac_dec(idev, addr);
 }
 
+void ipv6_ac_destroy_dev(struct inet6_dev *idev)
+{
+       struct ifacaddr6 *aca;
+
+       write_lock_bh(&idev->lock);
+       while ((aca = idev->ac_list) != NULL) {
+               idev->ac_list = aca->aca_next;
+               write_unlock_bh(&idev->lock);
+
+               addrconf_leave_solict(idev, &aca->aca_addr);
+
+               dst_hold(&aca->aca_rt->dst);
+               ip6_del_rt(aca->aca_rt);
+
+               aca_put(aca);
+
+               write_lock_bh(&idev->lock);
+       }
+       write_unlock_bh(&idev->lock);
+}
+
 /*
  *     check if the interface has this anycast address
  *     called with rcu_read_lock()
index f7713900798308557c134b00e9db3975e3697ecb..3b04dfd34554ea63a677f87564a5a048b2796ab1 100644 (file)
@@ -1754,6 +1754,12 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
        if (*op < IP_SET_OP_VERSION) {
                /* Check the version at the beginning of operations */
                struct ip_set_req_version *req_version = data;
+
+               if (*len < sizeof(struct ip_set_req_version)) {
+                       ret = -EINVAL;
+                       goto done;
+               }
+
                if (req_version->version != IPSET_PROTOCOL) {
                        ret = -EPROTO;
                        goto done;
index 77c173282f388ce81bbd54ea6e41761656d95045..4a662f15eaee8cb04d57cc0551a1d99b9fae8792 100644 (file)
@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
        struct nf_conn *ct;
        struct net *net;
 
+       *diff = 0;
+
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
         * so turn this into a no-op for IPv6 packets
@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
                return 1;
 #endif
 
-       *diff = 0;
-
        /* Only useful for established sessions */
        if (cp->state != IP_VS_TCP_S_ESTABLISHED)
                return 1;
@@ -321,6 +321,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
        struct ip_vs_conn *n_cp;
        struct net *net;
 
+       /* no diff required for incoming packets */
+       *diff = 0;
+
 #ifdef CONFIG_IP_VS_IPV6
        /* This application helper doesn't work with IPv6 yet,
         * so turn this into a no-op for IPv6 packets
@@ -329,9 +332,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
                return 1;
 #endif
 
-       /* no diff required for incoming packets */
-       *diff = 0;
-
        /* Only useful for established sessions */
        if (cp->state != IP_VS_TCP_S_ESTABLISHED)
                return 1;
index f2c01df7b6b573366dd84a3bd1296c2eec739e7f..1f576dab947257631ad8e034a67af7e69615b4f2 100644 (file)
@@ -1658,6 +1658,7 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
        struct sock *sk;
        uid_t sock_uid;
        bool res;
+       bool set_sk_callback_lock = false;
 
        if (unlikely(module_passive))
                return (info->match ^ info->invert) == 0;
@@ -1715,6 +1716,8 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
        MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
                 par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
        if (sk != NULL) {
+               set_sk_callback_lock = true;
+               read_lock_bh(&sk->sk_callback_lock);
                MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
                        par->hooknum, sk, sk->sk_socket,
                        sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
@@ -1794,6 +1797,8 @@ static bool qtaguid_mt(const struct sk_buff *skb, struct xt_action_param *par)
 put_sock_ret_res:
        if (got_sock)
                xt_socket_put_sk(sk);
+       if (set_sk_callback_lock)
+               read_unlock_bh(&sk->sk_callback_lock);
 ret_res:
        MT_DEBUG("qtaguid[%d]: left %d\n", par->hooknum, res);
        return res;
index afe41178c9fb7f8ff00533c5b148a6b0ff068346..f7ad5c630b654fa193d3e95e7fc21cd72c0927e7 100644 (file)
@@ -374,14 +374,14 @@ out:
        return err;
 }
 
-static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr)
+static void netlink_frame_flush_dcache(const struct nl_mmap_hdr *hdr, unsigned int nm_len)
 {
 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
        struct page *p_start, *p_end;
 
        /* First page is flushed through netlink_{get,set}_status */
        p_start = pgvec_to_page(hdr + PAGE_SIZE);
-       p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + hdr->nm_len - 1);
+       p_end   = pgvec_to_page((void *)hdr + NL_MMAP_HDRLEN + nm_len - 1);
        while (p_start <= p_end) {
                flush_dcache_page(p_start);
                p_start++;
@@ -399,9 +399,9 @@ static enum nl_mmap_status netlink_get_status(const struct nl_mmap_hdr *hdr)
 static void netlink_set_status(struct nl_mmap_hdr *hdr,
                               enum nl_mmap_status status)
 {
+       smp_mb();
        hdr->nm_status = status;
        flush_dcache_page(pgvec_to_page(hdr));
-       smp_wmb();
 }
 
 static struct nl_mmap_hdr *
@@ -563,24 +563,16 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
        struct nl_mmap_hdr *hdr;
        struct sk_buff *skb;
        unsigned int maxlen;
-       bool excl = true;
        int err = 0, len = 0;
 
-       /* Netlink messages are validated by the receiver before processing.
-        * In order to avoid userspace changing the contents of the message
-        * after validation, the socket and the ring may only be used by a
-        * single process, otherwise we fall back to copying.
-        */
-       if (atomic_long_read(&sk->sk_socket->file->f_count) > 1 ||
-           atomic_read(&nlk->mapped) > 1)
-               excl = false;
-
        mutex_lock(&nlk->pg_vec_lock);
 
        ring   = &nlk->tx_ring;
        maxlen = ring->frame_size - NL_MMAP_HDRLEN;
 
        do {
+               unsigned int nm_len;
+
                hdr = netlink_current_frame(ring, NL_MMAP_STATUS_VALID);
                if (hdr == NULL) {
                        if (!(msg->msg_flags & MSG_DONTWAIT) &&
@@ -588,35 +580,23 @@ static int netlink_mmap_sendmsg(struct sock *sk, struct msghdr *msg,
                                schedule();
                        continue;
                }
-               if (hdr->nm_len > maxlen) {
+
+               nm_len = ACCESS_ONCE(hdr->nm_len);
+               if (nm_len > maxlen) {
                        err = -EINVAL;
                        goto out;
                }
 
-               netlink_frame_flush_dcache(hdr);
+               netlink_frame_flush_dcache(hdr, nm_len);
 
-               if (likely(dst_portid == 0 && dst_group == 0 && excl)) {
-                       skb = alloc_skb_head(GFP_KERNEL);
-                       if (skb == NULL) {
-                               err = -ENOBUFS;
-                               goto out;
-                       }
-                       sock_hold(sk);
-                       netlink_ring_setup_skb(skb, sk, ring, hdr);
-                       NETLINK_CB(skb).flags |= NETLINK_SKB_TX;
-                       __skb_put(skb, hdr->nm_len);
-                       netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
-                       atomic_inc(&ring->pending);
-               } else {
-                       skb = alloc_skb(hdr->nm_len, GFP_KERNEL);
-                       if (skb == NULL) {
-                               err = -ENOBUFS;
-                               goto out;
-                       }
-                       __skb_put(skb, hdr->nm_len);
-                       memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, hdr->nm_len);
-                       netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
+               skb = alloc_skb(nm_len, GFP_KERNEL);
+               if (skb == NULL) {
+                       err = -ENOBUFS;
+                       goto out;
                }
+               __skb_put(skb, nm_len);
+               memcpy(skb->data, (void *)hdr + NL_MMAP_HDRLEN, nm_len);
+               netlink_set_status(hdr, NL_MMAP_STATUS_UNUSED);
 
                netlink_increment_head(ring);
 
@@ -662,7 +642,7 @@ static void netlink_queue_mmaped_skb(struct sock *sk, struct sk_buff *skb)
        hdr->nm_pid     = NETLINK_CB(skb).creds.pid;
        hdr->nm_uid     = from_kuid(sk_user_ns(sk), NETLINK_CB(skb).creds.uid);
        hdr->nm_gid     = from_kgid(sk_user_ns(sk), NETLINK_CB(skb).creds.gid);
-       netlink_frame_flush_dcache(hdr);
+       netlink_frame_flush_dcache(hdr, hdr->nm_len);
        netlink_set_status(hdr, NL_MMAP_STATUS_VALID);
 
        NETLINK_CB(skb).flags |= NETLINK_SKB_DELIVERED;
index fd556ac05fdbc5c48c365005a0e7deddef79a0ac..e69a17da1e8483e10a9c7ef8b488454e3ae4ec32 100644 (file)
@@ -338,7 +338,7 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
 {
        struct ieee80211_sta_ht_cap *ht_cap;
        struct ieee80211_sta_vht_cap *vht_cap;
-       u32 width, control_freq;
+       u32 width, control_freq, cap;
 
        if (WARN_ON(!cfg80211_chandef_valid(chandef)))
                return false;
@@ -370,7 +370,8 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
                        return false;
                break;
        case NL80211_CHAN_WIDTH_80P80:
-               if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ))
+               cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+               if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
                        return false;
        case NL80211_CHAN_WIDTH_80:
                if (!vht_cap->vht_supported)
@@ -381,7 +382,9 @@ bool cfg80211_chandef_usable(struct wiphy *wiphy,
        case NL80211_CHAN_WIDTH_160:
                if (!vht_cap->vht_supported)
                        return false;
-               if (!(vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ))
+               cap = vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
+               if (cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
+                   cap != IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ)
                        return false;
                prohibited_flags |= IEEE80211_CHAN_NO_160MHZ;
                width = 160;
index 43ab4b03d306119b5546fe3c8cdae4432cee28b1..a3aa0406f5b4559ac8fffdf3c96ec9916e80d95f 100644 (file)
@@ -2668,6 +2668,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
        if (!rdev->ops->get_key)
                return -EOPNOTSUPP;
 
+       if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+               return -ENOENT;
+
        msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!msg)
                return -ENOMEM;
@@ -2687,10 +2690,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
            nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
                goto nla_put_failure;
 
-       if (pairwise && mac_addr &&
-           !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
-               return -ENOENT;
-
        err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
                           get_key_callback);
 
@@ -2861,7 +2860,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
        wdev_lock(dev->ieee80211_ptr);
        err = nl80211_key_allowed(dev->ieee80211_ptr);
 
-       if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
+       if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
            !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
                err = -ENOENT;
 
index ecbb44797e28226e85027c277435d053415aec8c..8900c5c5390de179c839d300e2a5ee67ca601b6d 100644 (file)
@@ -24,7 +24,7 @@ quiet_cmd_modules_install = INSTALL $@
 INSTALL_MOD_DIR ?= extra
 ext-mod-dir = $(INSTALL_MOD_DIR)$(subst $(patsubst %/,%,$(KBUILD_EXTMOD)),,$(@D))
 
-modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
+modinst_dir ?= $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
 
 $(modules):
        $(call cmd,modules_install,$(MODLIB)/$(modinst_dir))
index 151739b4e4814aece1f0d8747994f3be1bd888bf..aead5de0574e33534e107f2c172fc1a3ebc5cb8d 100755 (executable)
@@ -262,7 +262,6 @@ if ($arch eq "x86_64") {
     # force flags for this arch
     $ld .= " -m shlelf_linux";
     $objcopy .= " -O elf32-sh-linux";
-    $cc .= " -m32";
 
 } elsif ($arch eq "powerpc") {
     $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
index d67c97bb10256d5dc5a9b74b3b8aaa37022f96b1..797818695c87a93855ad54a0b86dcbb194ab5a31 100644 (file)
@@ -201,12 +201,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
                if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
                        atomic_dec(&key->user->nikeys);
 
-               key_user_put(key->user);
-
                /* now throw away the key memory */
                if (key->type->destroy)
                        key->type->destroy(key);
 
+               key_user_put(key->user);
+
                kfree(key->description);
 
 #ifdef KEY_DEBUGGING
index 2eca0e3f7b868a7fb3103b0f5fadd0c0c137d78b..fdc96f6e50f599a566fde1948dc08958d8cb4b2d 100644 (file)
@@ -147,6 +147,17 @@ static int selinux_secmark_enabled(void)
        return (atomic_read(&selinux_secmark_refcount) > 0);
 }
 
+static int selinux_netcache_avc_callback(u32 event)
+{
+       if (event == AVC_CALLBACK_RESET) {
+               sel_netif_flush();
+               sel_netnode_flush();
+               sel_netport_flush();
+               synchronize_net();
+       }
+       return 0;
+}
+
 /*
  * initialise the security for the init task
  */
@@ -419,15 +430,11 @@ static int sb_finish_set_opts(struct super_block *sb)
            sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
                sbsec->flags &= ~SE_SBLABELSUPP;
 
-       /* Special handling for sysfs. Is genfs but also has setxattr handler*/
-       if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
-               sbsec->flags |= SE_SBLABELSUPP;
-
-       /*
-        * Special handling for rootfs. Is genfs but supports
-        * setting SELinux context on in-core inodes.
-        */
-       if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0)
+       /* Special handling. Is genfs but also has in-core setxattr handler*/
+       if (!strcmp(sb->s_type->name, "sysfs") ||
+           !strcmp(sb->s_type->name, "pstore") ||
+           !strcmp(sb->s_type->name, "debugfs") ||
+           !strcmp(sb->s_type->name, "rootfs"))
                sbsec->flags |= SE_SBLABELSUPP;
 
        /* Initialize the root inode. */
@@ -1901,12 +1908,10 @@ static int selinux_binder_transfer_file(struct task_struct *from, struct task_st
        struct inode *inode = file->f_path.dentry->d_inode;
        struct inode_security_struct *isec = inode->i_security;
        struct common_audit_data ad;
-       struct selinux_audit_data sad = {0,};
        int rc;
 
        ad.type = LSM_AUDIT_DATA_PATH;
        ad.u.path = file->f_path;
-       ad.selinux_audit_data = &sad;
 
        if (sid != fsec->sid) {
                rc = avc_has_perm(sid, fsec->sid,
@@ -3190,24 +3195,20 @@ error:
 
 static int selinux_mmap_addr(unsigned long addr)
 {
-       int rc = 0;
-       u32 sid = current_sid();
+       int rc;
+
+       /* do DAC check on address space usage */
+       rc = cap_mmap_addr(addr);
+       if (rc)
+               return rc;
 
-       /*
-        * notice that we are intentionally putting the SELinux check before
-        * the secondary cap_file_mmap check.  This is such a likely attempt
-        * at bad behaviour/exploit that we always want to get the AVC, even
-        * if DAC would have also denied the operation.
-        */
        if (addr < CONFIG_LSM_MMAP_MIN_ADDR) {
+               u32 sid = current_sid();
                rc = avc_has_perm(sid, sid, SECCLASS_MEMPROTECT,
                                  MEMPROTECT__MMAP_ZERO, NULL);
-               if (rc)
-                       return rc;
        }
 
-       /* do DAC check on address space usage */
-       return cap_mmap_addr(addr);
+       return rc;
 }
 
 static int selinux_mmap_file(struct file *file, unsigned long reqprot,
@@ -4222,15 +4223,15 @@ static int selinux_socket_unix_may_send(struct socket *sock,
                            &ad);
 }
 
-static int selinux_inet_sys_rcv_skb(int ifindex, char *addrp, u16 family,
-                                   u32 peer_sid,
+static int selinux_inet_sys_rcv_skb(struct net *ns, int ifindex,
+                                   char *addrp, u16 family, u32 peer_sid,
                                    struct common_audit_data *ad)
 {
        int err;
        u32 if_sid;
        u32 node_sid;
 
-       err = sel_netif_sid(ifindex, &if_sid);
+       err = sel_netif_sid(ns, ifindex, &if_sid);
        if (err)
                return err;
        err = avc_has_perm(peer_sid, if_sid,
@@ -4323,8 +4324,8 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
                err = selinux_skb_peerlbl_sid(skb, family, &peer_sid);
                if (err)
                        return err;
-               err = selinux_inet_sys_rcv_skb(skb->skb_iif, addrp, family,
-                                              peer_sid, &ad);
+               err = selinux_inet_sys_rcv_skb(sock_net(sk), skb->skb_iif,
+                                              addrp, family, peer_sid, &ad);
                if (err) {
                        selinux_netlbl_err(skb, err, 0);
                        return err;
@@ -4667,7 +4668,8 @@ out:
 
 #ifdef CONFIG_NETFILTER
 
-static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
+static unsigned int selinux_ip_forward(struct sk_buff *skb,
+                                      const struct net_device *indev,
                                       u16 family)
 {
        int err;
@@ -4693,14 +4695,14 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
 
        ad.type = LSM_AUDIT_DATA_NET;
        ad.u.net = &net;
-       ad.u.net->netif = ifindex;
+       ad.u.net->netif = indev->ifindex;
        ad.u.net->family = family;
        if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
                return NF_DROP;
 
        if (peerlbl_active) {
-               err = selinux_inet_sys_rcv_skb(ifindex, addrp, family,
-                                              peer_sid, &ad);
+               err = selinux_inet_sys_rcv_skb(dev_net(indev), indev->ifindex,
+                                              addrp, family, peer_sid, &ad);
                if (err) {
                        selinux_netlbl_err(skb, err, 1);
                        return NF_DROP;
@@ -4729,7 +4731,7 @@ static unsigned int selinux_ipv4_forward(unsigned int hooknum,
                                         const struct net_device *out,
                                         int (*okfn)(struct sk_buff *))
 {
-       return selinux_ip_forward(skb, in->ifindex, PF_INET);
+       return selinux_ip_forward(skb, in, PF_INET);
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -4739,7 +4741,7 @@ static unsigned int selinux_ipv6_forward(unsigned int hooknum,
                                         const struct net_device *out,
                                         int (*okfn)(struct sk_buff *))
 {
-       return selinux_ip_forward(skb, in->ifindex, PF_INET6);
+       return selinux_ip_forward(skb, in, PF_INET6);
 }
 #endif /* IPV6 */
 
@@ -4827,11 +4829,13 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
        return NF_ACCEPT;
 }
 
-static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
+static unsigned int selinux_ip_postroute(struct sk_buff *skb,
+                                        const struct net_device *outdev,
                                         u16 family)
 {
        u32 secmark_perm;
        u32 peer_sid;
+       int ifindex = outdev->ifindex;
        struct sock *sk;
        struct common_audit_data ad;
        struct lsm_network_audit net = {0,};
@@ -4944,7 +4948,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
                u32 if_sid;
                u32 node_sid;
 
-               if (sel_netif_sid(ifindex, &if_sid))
+               if (sel_netif_sid(dev_net(outdev), ifindex, &if_sid))
                        return NF_DROP;
                if (avc_has_perm(peer_sid, if_sid,
                                 SECCLASS_NETIF, NETIF__EGRESS, &ad))
@@ -4966,7 +4970,7 @@ static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
                                           const struct net_device *out,
                                           int (*okfn)(struct sk_buff *))
 {
-       return selinux_ip_postroute(skb, out->ifindex, PF_INET);
+       return selinux_ip_postroute(skb, out, PF_INET);
 }
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -4976,7 +4980,7 @@ static unsigned int selinux_ipv6_postroute(unsigned int hooknum,
                                           const struct net_device *out,
                                           int (*okfn)(struct sk_buff *))
 {
-       return selinux_ip_postroute(skb, out->ifindex, PF_INET6);
+       return selinux_ip_postroute(skb, out, PF_INET6);
 }
 #endif /* IPV6 */
 
@@ -5956,6 +5960,9 @@ static __init int selinux_init(void)
        if (register_security(&selinux_ops))
                panic("SELinux: Unable to register with kernel.\n");
 
+       if (avc_add_callback(selinux_netcache_avc_callback, AVC_CALLBACK_RESET))
+               panic("SELinux: Unable to register AVC netcache callback\n");
+
        if (selinux_enforcing)
                printk(KERN_DEBUG "SELinux:  Starting in enforcing mode\n");
        else
index 43d507242b42f1ecdbe0f2971074d5d7b80165c5..c7214544409015a3fbc6cda9a7386e394f50be2b 100644 (file)
 #ifndef _SELINUX_NETIF_H_
 #define _SELINUX_NETIF_H_
 
-int sel_netif_sid(int ifindex, u32 *sid);
+#include <net/net_namespace.h>
+
+void sel_netif_flush(void);
+
+int sel_netif_sid(struct net *ns, int ifindex, u32 *sid);
 
 #endif /* _SELINUX_NETIF_H_ */
 
index df7a5ed6c6943e01008b010acb9782c2b727d824..937668dd30247731dbb416e162c105d29f6e3253 100644 (file)
@@ -27,6 +27,8 @@
 #ifndef _SELINUX_NETNODE_H
 #define _SELINUX_NETNODE_H
 
+void sel_netnode_flush(void);
+
 int sel_netnode_sid(void *addr, u16 family, u32 *sid);
 
 #endif
index 4d965b83d735b976029231274c2ace509b498e1f..d1ce896b2cb0f84bdef2e72ce282cc040f206e64 100644 (file)
@@ -26,6 +26,8 @@
 #ifndef _SELINUX_NETPORT_H
 #define _SELINUX_NETPORT_H
 
+void sel_netport_flush(void);
+
 int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid);
 
 #endif
index 6fd9dd256a6288af158b68591cb16496c13ef7bd..2a3be7adc071daacf7981a4829692298633fbe4c 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/binfmts.h>
 #include <linux/in.h>
 #include <linux/spinlock.h>
+#include <net/net_namespace.h>
 #include "flask.h"
 #include "avc.h"
 
@@ -78,6 +79,7 @@ struct ipc_security_struct {
 };
 
 struct netif_security_struct {
+       struct net *ns;                 /* network namespace */
        int ifindex;                    /* device index */
        u32 sid;                        /* SID for this interface */
 };
index 6d3885165d143a27fb218e785aa4a13ac7937045..b214ef5f86e1077251e21df1f1acc42521988427 100644 (file)
 #define POLICYDB_VERSION_ROLETRANS     26
 #define POLICYDB_VERSION_NEW_OBJECT_DEFAULTS   27
 #define POLICYDB_VERSION_DEFAULT_TYPE  28
+#define POLICYDB_VERSION_CONSTRAINT_NAMES      29
 
 /* Range of policy versions we understand*/
 #define POLICYDB_VERSION_MIN   POLICYDB_VERSION_BASE
 #ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
 #define POLICYDB_VERSION_MAX   CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
 #else
-#define POLICYDB_VERSION_MAX   POLICYDB_VERSION_DEFAULT_TYPE
+#define POLICYDB_VERSION_MAX   POLICYDB_VERSION_CONSTRAINT_NAMES
 #endif
 
 /* Mask for just the mount related flags */
index 47a49d1a6f6adf7b60873fcfb1249b1ee023f5b9..548651a7387623d886267dd7979841aa1f4b84ee 100644 (file)
@@ -45,6 +45,7 @@ static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
 
 /**
  * sel_netif_hashfn - Hashing function for the interface table
+ * @ns: the network namespace
  * @ifindex: the network interface
  *
  * Description:
@@ -52,13 +53,14 @@ static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
  * bucket number for the given interface.
  *
  */
-static inline u32 sel_netif_hashfn(int ifindex)
+static inline u32 sel_netif_hashfn(const struct net *ns, int ifindex)
 {
-       return (ifindex & (SEL_NETIF_HASH_SIZE - 1));
+       return (((uintptr_t)ns + ifindex) & (SEL_NETIF_HASH_SIZE - 1));
 }
 
 /**
  * sel_netif_find - Search for an interface record
+ * @ns: the network namespace
  * @ifindex: the network interface
  *
  * Description:
@@ -66,15 +68,15 @@ static inline u32 sel_netif_hashfn(int ifindex)
  * If an entry can not be found in the table return NULL.
  *
  */
-static inline struct sel_netif *sel_netif_find(int ifindex)
+static inline struct sel_netif *sel_netif_find(const struct net *ns,
+                                              int ifindex)
 {
-       int idx = sel_netif_hashfn(ifindex);
+       int idx = sel_netif_hashfn(ns, ifindex);
        struct sel_netif *netif;
 
        list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list)
-               /* all of the devices should normally fit in the hash, so we
-                * optimize for that case */
-               if (likely(netif->nsec.ifindex == ifindex))
+               if (net_eq(netif->nsec.ns, ns) &&
+                   netif->nsec.ifindex == ifindex)
                        return netif;
 
        return NULL;
@@ -96,7 +98,7 @@ static int sel_netif_insert(struct sel_netif *netif)
        if (sel_netif_total >= SEL_NETIF_HASH_MAX)
                return -ENOSPC;
 
-       idx = sel_netif_hashfn(netif->nsec.ifindex);
+       idx = sel_netif_hashfn(netif->nsec.ns, netif->nsec.ifindex);
        list_add_rcu(&netif->list, &sel_netif_hash[idx]);
        sel_netif_total++;
 
@@ -120,6 +122,7 @@ static void sel_netif_destroy(struct sel_netif *netif)
 
 /**
  * sel_netif_sid_slow - Lookup the SID of a network interface using the policy
+ * @ns: the network namespace
  * @ifindex: the network interface
  * @sid: interface SID
  *
@@ -130,7 +133,7 @@ static void sel_netif_destroy(struct sel_netif *netif)
  * failure.
  *
  */
-static int sel_netif_sid_slow(int ifindex, u32 *sid)
+static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
 {
        int ret;
        struct sel_netif *netif;
@@ -140,7 +143,7 @@ static int sel_netif_sid_slow(int ifindex, u32 *sid)
        /* NOTE: we always use init's network namespace since we don't
         * currently support containers */
 
-       dev = dev_get_by_index(&init_net, ifindex);
+       dev = dev_get_by_index(ns, ifindex);
        if (unlikely(dev == NULL)) {
                printk(KERN_WARNING
                       "SELinux: failure in sel_netif_sid_slow(),"
@@ -149,7 +152,7 @@ static int sel_netif_sid_slow(int ifindex, u32 *sid)
        }
 
        spin_lock_bh(&sel_netif_lock);
-       netif = sel_netif_find(ifindex);
+       netif = sel_netif_find(ns, ifindex);
        if (netif != NULL) {
                *sid = netif->nsec.sid;
                ret = 0;
@@ -163,6 +166,7 @@ static int sel_netif_sid_slow(int ifindex, u32 *sid)
        ret = security_netif_sid(dev->name, &new->nsec.sid);
        if (ret != 0)
                goto out;
+       new->nsec.ns = ns;
        new->nsec.ifindex = ifindex;
        ret = sel_netif_insert(new);
        if (ret != 0)
@@ -184,6 +188,7 @@ out:
 
 /**
  * sel_netif_sid - Lookup the SID of a network interface
+ * @ns: the network namespace
  * @ifindex: the network interface
  * @sid: interface SID
  *
@@ -195,12 +200,12 @@ out:
  * on failure.
  *
  */
-int sel_netif_sid(int ifindex, u32 *sid)
+int sel_netif_sid(struct net *ns, int ifindex, u32 *sid)
 {
        struct sel_netif *netif;
 
        rcu_read_lock();
-       netif = sel_netif_find(ifindex);
+       netif = sel_netif_find(ns, ifindex);
        if (likely(netif != NULL)) {
                *sid = netif->nsec.sid;
                rcu_read_unlock();
@@ -208,11 +213,12 @@ int sel_netif_sid(int ifindex, u32 *sid)
        }
        rcu_read_unlock();
 
-       return sel_netif_sid_slow(ifindex, sid);
+       return sel_netif_sid_slow(ns, ifindex, sid);
 }
 
 /**
  * sel_netif_kill - Remove an entry from the network interface table
+ * @ns: the network namespace
  * @ifindex: the network interface
  *
  * Description:
@@ -220,13 +226,13 @@ int sel_netif_sid(int ifindex, u32 *sid)
  * table if it exists.
  *
  */
-static void sel_netif_kill(int ifindex)
+static void sel_netif_kill(const struct net *ns, int ifindex)
 {
        struct sel_netif *netif;
 
        rcu_read_lock();
        spin_lock_bh(&sel_netif_lock);
-       netif = sel_netif_find(ifindex);
+       netif = sel_netif_find(ns, ifindex);
        if (netif)
                sel_netif_destroy(netif);
        spin_unlock_bh(&sel_netif_lock);
@@ -240,7 +246,7 @@ static void sel_netif_kill(int ifindex)
  * Remove all entries from the network interface table.
  *
  */
-static void sel_netif_flush(void)
+void sel_netif_flush(void)
 {
        int idx;
        struct sel_netif *netif;
@@ -252,25 +258,13 @@ static void sel_netif_flush(void)
        spin_unlock_bh(&sel_netif_lock);
 }
 
-static int sel_netif_avc_callback(u32 event)
-{
-       if (event == AVC_CALLBACK_RESET) {
-               sel_netif_flush();
-               synchronize_net();
-       }
-       return 0;
-}
-
 static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
                                             unsigned long event, void *ptr)
 {
        struct net_device *dev = ptr;
 
-       if (dev_net(dev) != &init_net)
-               return NOTIFY_DONE;
-
        if (event == NETDEV_DOWN)
-               sel_netif_kill(dev->ifindex);
+               sel_netif_kill(dev_net(dev), dev->ifindex);
 
        return NOTIFY_DONE;
 }
@@ -291,10 +285,6 @@ static __init int sel_netif_init(void)
 
        register_netdevice_notifier(&sel_netif_netdev_notifier);
 
-       err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET);
-       if (err)
-               panic("avc_add_callback() failed, error %d\n", err);
-
        return err;
 }
 
index c5454c0477c346e4d814f5ff209feba86e5b86ad..bb8de9d9b429468337ca36cb766a237c8427fed5 100644 (file)
@@ -281,7 +281,7 @@ int sel_netnode_sid(void *addr, u16 family, u32 *sid)
  * Remove all entries from the network address table.
  *
  */
-static void sel_netnode_flush(void)
+void sel_netnode_flush(void)
 {
        unsigned int idx;
        struct sel_netnode *node, *node_tmp;
@@ -298,15 +298,6 @@ static void sel_netnode_flush(void)
        spin_unlock_bh(&sel_netnode_lock);
 }
 
-static int sel_netnode_avc_callback(u32 event)
-{
-       if (event == AVC_CALLBACK_RESET) {
-               sel_netnode_flush();
-               synchronize_net();
-       }
-       return 0;
-}
-
 static __init int sel_netnode_init(void)
 {
        int iter;
@@ -320,10 +311,6 @@ static __init int sel_netnode_init(void)
                sel_netnode_hash[iter].size = 0;
        }
 
-       ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET);
-       if (ret != 0)
-               panic("avc_add_callback() failed, error %d\n", ret);
-
        return ret;
 }
 
index d35379781c2c44ab188cb7aed071579da189bb53..73ac6784d091574fc756f01a569ad087a0611869 100644 (file)
@@ -217,7 +217,7 @@ int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid)
  * Remove all entries from the network address table.
  *
  */
-static void sel_netport_flush(void)
+void sel_netport_flush(void)
 {
        unsigned int idx;
        struct sel_netport *port, *port_tmp;
@@ -234,15 +234,6 @@ static void sel_netport_flush(void)
        spin_unlock_bh(&sel_netport_lock);
 }
 
-static int sel_netport_avc_callback(u32 event)
-{
-       if (event == AVC_CALLBACK_RESET) {
-               sel_netport_flush();
-               synchronize_net();
-       }
-       return 0;
-}
-
 static __init int sel_netport_init(void)
 {
        int iter;
@@ -256,10 +247,6 @@ static __init int sel_netport_init(void)
                sel_netport_hash[iter].size = 0;
        }
 
-       ret = avc_add_callback(sel_netport_avc_callback, AVC_CALLBACK_RESET);
-       if (ret != 0)
-               panic("avc_add_callback() failed, error %d\n", ret);
-
        return ret;
 }
 
index 149dda731fd3ff9becf6368567866b105b26e66c..96fd947c494b64827f89e745393483669cc07d8e 100644 (file)
@@ -48,6 +48,7 @@ struct constraint_expr {
        u32 op;                 /* operator */
 
        struct ebitmap names;   /* names */
+       struct type_set *type_names;
 
        struct constraint_expr *next;   /* next expression */
 };
index bcdca73033f3da87bfd3035fd9e9e40d4ad2a25f..fc6950b8ec99d0bbb8dc38cba839805f5659cec4 100644 (file)
@@ -143,6 +143,11 @@ static struct policydb_compat_info policydb_compat[] = {
                .sym_num        = SYM_NUM,
                .ocon_num       = OCON_NUM,
        },
+       {
+               .version        = POLICYDB_VERSION_CONSTRAINT_NAMES,
+               .sym_num        = SYM_NUM,
+               .ocon_num       = OCON_NUM,
+       },
 };
 
 static struct policydb_compat_info *policydb_lookup_compat(int version)
@@ -613,6 +618,19 @@ static int common_destroy(void *key, void *datum, void *p)
        return 0;
 }
 
+static void constraint_expr_destroy(struct constraint_expr *expr)
+{
+       if (expr) {
+               ebitmap_destroy(&expr->names);
+               if (expr->type_names) {
+                       ebitmap_destroy(&expr->type_names->types);
+                       ebitmap_destroy(&expr->type_names->negset);
+                       kfree(expr->type_names);
+               }
+               kfree(expr);
+       }
+}
+
 static int cls_destroy(void *key, void *datum, void *p)
 {
        struct class_datum *cladatum;
@@ -628,10 +646,9 @@ static int cls_destroy(void *key, void *datum, void *p)
                while (constraint) {
                        e = constraint->expr;
                        while (e) {
-                               ebitmap_destroy(&e->names);
                                etmp = e;
                                e = e->next;
-                               kfree(etmp);
+                               constraint_expr_destroy(etmp);
                        }
                        ctemp = constraint;
                        constraint = constraint->next;
@@ -642,16 +659,14 @@ static int cls_destroy(void *key, void *datum, void *p)
                while (constraint) {
                        e = constraint->expr;
                        while (e) {
-                               ebitmap_destroy(&e->names);
                                etmp = e;
                                e = e->next;
-                               kfree(etmp);
+                               constraint_expr_destroy(etmp);
                        }
                        ctemp = constraint;
                        constraint = constraint->next;
                        kfree(ctemp);
                }
-
                kfree(cladatum->comkey);
        }
        kfree(datum);
@@ -1156,8 +1171,34 @@ bad:
        return rc;
 }
 
-static int read_cons_helper(struct constraint_node **nodep, int ncons,
-                           int allowxtarget, void *fp)
+static void type_set_init(struct type_set *t)
+{
+       ebitmap_init(&t->types);
+       ebitmap_init(&t->negset);
+}
+
+static int type_set_read(struct type_set *t, void *fp)
+{
+       __le32 buf[1];
+       int rc;
+
+       if (ebitmap_read(&t->types, fp))
+               return -EINVAL;
+       if (ebitmap_read(&t->negset, fp))
+               return -EINVAL;
+
+       rc = next_entry(buf, fp, sizeof(u32));
+       if (rc < 0)
+               return -EINVAL;
+       t->flags = le32_to_cpu(buf[0]);
+
+       return 0;
+}
+
+
+static int read_cons_helper(struct policydb *p,
+                               struct constraint_node **nodep,
+                               int ncons, int allowxtarget, void *fp)
 {
        struct constraint_node *c, *lc;
        struct constraint_expr *e, *le;
@@ -1225,6 +1266,18 @@ static int read_cons_helper(struct constraint_node **nodep, int ncons,
                                rc = ebitmap_read(&e->names, fp);
                                if (rc)
                                        return rc;
+                               if (p->policyvers >=
+                                       POLICYDB_VERSION_CONSTRAINT_NAMES) {
+                                               e->type_names = kzalloc(sizeof
+                                               (*e->type_names),
+                                               GFP_KERNEL);
+                                       if (!e->type_names)
+                                               return -ENOMEM;
+                                       type_set_init(e->type_names);
+                                       rc = type_set_read(e->type_names, fp);
+                                       if (rc)
+                                               return rc;
+                               }
                                break;
                        default:
                                return -EINVAL;
@@ -1301,7 +1354,7 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
                        goto bad;
        }
 
-       rc = read_cons_helper(&cladatum->constraints, ncons, 0, fp);
+       rc = read_cons_helper(p, &cladatum->constraints, ncons, 0, fp);
        if (rc)
                goto bad;
 
@@ -1311,7 +1364,8 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
                if (rc)
                        goto bad;
                ncons = le32_to_cpu(buf[0]);
-               rc = read_cons_helper(&cladatum->validatetrans, ncons, 1, fp);
+               rc = read_cons_helper(p, &cladatum->validatetrans,
+                               ncons, 1, fp);
                if (rc)
                        goto bad;
        }
@@ -2762,6 +2816,24 @@ static int common_write(void *vkey, void *datum, void *ptr)
        return 0;
 }
 
+static int type_set_write(struct type_set *t, void *fp)
+{
+       int rc;
+       __le32 buf[1];
+
+       if (ebitmap_write(&t->types, fp))
+               return -EINVAL;
+       if (ebitmap_write(&t->negset, fp))
+               return -EINVAL;
+
+       buf[0] = cpu_to_le32(t->flags);
+       rc = put_entry(buf, sizeof(u32), 1, fp);
+       if (rc)
+               return -EINVAL;
+
+       return 0;
+}
+
 static int write_cons_helper(struct policydb *p, struct constraint_node *node,
                             void *fp)
 {
@@ -2793,6 +2865,12 @@ static int write_cons_helper(struct policydb *p, struct constraint_node *node,
                                rc = ebitmap_write(&e->names, fp);
                                if (rc)
                                        return rc;
+                               if (p->policyvers >=
+                                       POLICYDB_VERSION_CONSTRAINT_NAMES) {
+                                       rc = type_set_write(e->type_names, fp);
+                                       if (rc)
+                                               return rc;
+                               }
                                break;
                        default:
                                break;
index da637471d4ce4d08fdb0706136184a3a1dcf7e57..725d5945a97e961f6fb6fde88eab0bba5f638345 100644 (file)
@@ -153,6 +153,17 @@ struct cond_bool_datum {
 
 struct cond_node;
 
+/*
+ * type set preserves data needed to determine constraint info from
+ * policy source. This is not used by the kernel policy but allows
+ * utilities such as audit2allow to determine constraint denials.
+ */
+struct type_set {
+       struct ebitmap types;
+       struct ebitmap negset;
+       u32 flags;
+};
+
 /*
  * The configuration data includes security contexts for
  * initial SIDs, unlabeled file systems, TCP and UDP port numbers,
index dbc55071679081568e6e3e89e256d78eccc74b55..f60d81497f282c2900682ef064bf833992cccbec 100644 (file)
@@ -81,36 +81,6 @@ struct snd_seq_dummy_port {
 
 static int my_client = -1;
 
-/*
- * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
- * to subscribers.
- * Note: this callback is called only after all subscribers are removed.
- */
-static int
-dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
-{
-       struct snd_seq_dummy_port *p;
-       int i;
-       struct snd_seq_event ev;
-
-       p = private_data;
-       memset(&ev, 0, sizeof(ev));
-       if (p->duplex)
-               ev.source.port = p->connect;
-       else
-               ev.source.port = p->port;
-       ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
-       ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
-       for (i = 0; i < 16; i++) {
-               ev.data.control.channel = i;
-               ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
-               snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
-               ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
-               snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
-       }
-       return 0;
-}
-
 /*
  * event input callback - just redirect events to subscribers
  */
@@ -175,7 +145,6 @@ create_port(int idx, int type)
                | SNDRV_SEQ_PORT_TYPE_PORT;
        memset(&pcb, 0, sizeof(pcb));
        pcb.owner = THIS_MODULE;
-       pcb.unuse = dummy_unuse;
        pcb.event_input = dummy_input;
        pcb.private_free = dummy_free;
        pcb.private_data = rec;
index 5e5af898f7f837164c4b1df775fafda5bb1cfafa..412d90f7b256250ebee6b1b33f4d95150a3c01c3 100644 (file)
@@ -555,7 +555,7 @@ static struct {
        { 22050, 2 },
        { 24000, 2 },
        { 16000, 3 },
-       { 11250, 4 },
+       { 11025, 4 },
        { 12000, 4 },
        {  8000, 5 },
 };
index be4db47cb2d96a2936c2321f090644591554a080..061be0e5fa5ade0a2ee05ec5372b416bd73e42c5 100644 (file)
@@ -886,6 +886,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
        case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
        case USB_ID(0x046d, 0x0808):
        case USB_ID(0x046d, 0x0809):
+       case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */
        case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
        case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
        case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */