Merge tag 'lsk-v3.10-android-15.01'
authorHuang, Tao <huangtao@rock-chips.com>
Tue, 10 Feb 2015 08:24:25 +0000 (16:24 +0800)
committerHuang, Tao <huangtao@rock-chips.com>
Tue, 10 Feb 2015 08:24:25 +0000 (16:24 +0800)
LSK Android 15.01 v3.10

Conflicts:
arch/arm/Kconfig.debug
arch/arm64/mm/init.c
drivers/Makefile
drivers/cpufreq/cpufreq_interactive.c

222 files changed:
Documentation/devicetree/bindings/arm/coresight.txt [new file with mode: 0644]
Documentation/ramoops.txt
Documentation/trace/coresight.txt [new file with mode: 0644]
MAINTAINERS
Makefile
arch/arm/Kconfig.debug
arch/arm/boot/dts/hip04.dtsi [new file with mode: 0644]
arch/arm/include/asm/hardware/coresight.h [deleted file]
arch/arm/include/asm/hardware/cp14.h [new file with mode: 0644]
arch/arm/kernel/Makefile
arch/arm/kernel/etm.c [deleted file]
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/sched_clock.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/emu.c [deleted file]
arch/arm/mm/dma-mapping.c
arch/arm64/Kconfig
arch/arm64/Kconfig.debug
arch/arm64/Makefile
arch/arm64/include/asm/cacheflush.h
arch/arm64/include/asm/cpu_ops.h
arch/arm64/include/asm/cpuidle.h [new file with mode: 0644]
arch/arm64/include/asm/cputype.h
arch/arm64/include/asm/dma-mapping.h
arch/arm64/include/asm/page.h
arch/arm64/include/asm/percpu.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/proc-fns.h
arch/arm64/include/asm/psci.h
arch/arm64/include/asm/sparsemem.h
arch/arm64/include/asm/stackprotector.h [new file with mode: 0644]
arch/arm64/include/asm/string.h
arch/arm64/include/asm/thread_info.h
arch/arm64/include/asm/tlbflush.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/arm64ksyms.c
arch/arm64/kernel/cpu_ops.c
arch/arm64/kernel/cpuidle.c [new file with mode: 0644]
arch/arm64/kernel/debug-monitors.c
arch/arm64/kernel/head.S
arch/arm64/kernel/irq.c
arch/arm64/kernel/process.c
arch/arm64/kernel/psci.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/time.c
arch/arm64/kernel/traps.c
arch/arm64/kernel/vdso/Makefile
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/lib/Makefile
arch/arm64/lib/memcmp.S [new file with mode: 0644]
arch/arm64/lib/memcpy.S
arch/arm64/lib/memmove.S
arch/arm64/lib/memset.S
arch/arm64/lib/strcmp.S [new file with mode: 0644]
arch/arm64/lib/strlen.S [new file with mode: 0644]
arch/arm64/lib/strncmp.S [new file with mode: 0644]
arch/arm64/lib/strnlen.S [new file with mode: 0644]
arch/arm64/mm/Makefile
arch/arm64/mm/cache.S
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/init.c
arch/arm64/mm/ioremap.c
arch/arm64/mm/pageattr.c [new file with mode: 0644]
arch/arm64/mm/proc.S
arch/powerpc/kernel/vdso32/getcpu.S
arch/s390/kernel/compat_linux.c
arch/x86/include/asm/vsyscall.h
arch/x86/include/uapi/asm/ldt.h
arch/x86/include/uapi/asm/vmx.h
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/kvm.c
arch/x86/kernel/kvmclock.c
arch/x86/kernel/process_64.c
arch/x86/kernel/reboot.c
arch/x86/kernel/tls.c
arch/x86/kvm/vmx.c
arch/x86/vdso/vma.c
block/genhd.c
crypto/af_alg.c
drivers/Makefile
drivers/amba/bus.c
drivers/ata/ahci.c
drivers/ata/sata_fsl.c
drivers/base/bus.c
drivers/base/dma-mapping.c
drivers/coresight/Makefile [new file with mode: 0644]
drivers/coresight/coresight-etb10.c [new file with mode: 0644]
drivers/coresight/coresight-etm-cp14.c [new file with mode: 0644]
drivers/coresight/coresight-etm.h [new file with mode: 0644]
drivers/coresight/coresight-etm3x.c [new file with mode: 0644]
drivers/coresight/coresight-funnel.c [new file with mode: 0644]
drivers/coresight/coresight-priv.h [new file with mode: 0644]
drivers/coresight/coresight-replicator.c [new file with mode: 0644]
drivers/coresight/coresight-tmc.c [new file with mode: 0644]
drivers/coresight/coresight-tpiu.c [new file with mode: 0644]
drivers/coresight/coresight.c [new file with mode: 0644]
drivers/coresight/of_coresight.c [new file with mode: 0644]
drivers/cpufreq/cpufreq_interactive.c
drivers/cpuidle/Kconfig
drivers/cpuidle/Kconfig.arm64
drivers/cpuidle/Makefile
drivers/cpuidle/cpuidle-arm64.c
drivers/cpuidle/dt_idle_states.c [new file with mode: 0644]
drivers/cpuidle/dt_idle_states.h [new file with mode: 0644]
drivers/cpuidle/of_idle_states.c [deleted file]
drivers/cpuidle/of_idle_states.h [deleted file]
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/hid-kye.c
drivers/hid/hid-roccat-pyra.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-quirks.c
drivers/i2c/busses/i2c-davinci.c
drivers/i2c/busses/i2c-omap.c
drivers/iommu/intel-iommu.c
drivers/md/bitmap.c
drivers/md/dm-bufio.c
drivers/md/persistent-data/dm-space-map-metadata.c
drivers/media/i2c/smiapp/smiapp-core.c
drivers/mfd/tc6393xb.c
drivers/mmc/card/block.c
drivers/mtd/ubi/upd.c
drivers/mtd/ubi/wl.c
drivers/net/can/usb/peak_usb/pcan_usb_core.c
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/wireless/ath/ath5k/qcu.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/xen-netfront.c
drivers/of/base.c
drivers/pci/probe.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/tty/serial/samsung.c
drivers/usb/class/cdc-acm.c
drivers/video/adf/adf_fbdev.c
drivers/xen/swiotlb-xen.c
fs/btrfs/delayed-inode.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_map.c
fs/ecryptfs/crypto.c
fs/ecryptfs/file.c
fs/ecryptfs/main.c
fs/ext2/super.c
fs/fs-writeback.c
fs/isofs/rock.c
fs/namespace.c
fs/ncpfs/ioctl.c
fs/nfs/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/nilfs2/inode.c
fs/nilfs2/namei.c
fs/ocfs2/aops.c
fs/proc/base.c
fs/pstore/ram.c
fs/pstore/ram_core.c
fs/udf/symlink.c
include/asm-generic/dma-mapping-common.h
include/linux/amba/bus.h
include/linux/bitops.h
include/linux/clockchips.h
include/linux/coresight.h [new file with mode: 0644]
include/linux/cred.h
include/linux/device.h
include/linux/genalloc.h
include/linux/mm.h
include/linux/of_device.h
include/linux/of_graph.h [new file with mode: 0644]
include/linux/pstore_ram.h
include/linux/reboot.h
include/linux/sysfs.h
include/linux/user_namespace.h
kernel/events/core.c
kernel/groups.c
kernel/pid.c
kernel/time/Makefile
kernel/time/clockevents.c
kernel/time/tick-broadcast-hrtimer.c [new file with mode: 0644]
kernel/time/tick-broadcast.c
kernel/time/tick-common.c
kernel/time/tick-internal.h
kernel/uid16.c
kernel/user.c
kernel/user_namespace.c
lib/genalloc.c
linaro/configs/booting-test.conf [new file with mode: 0644]
mm/backing-dev.c
mm/frontswap.c
mm/memory.c
mm/mmap.c
mm/vmscan.c
net/core/rtnetlink.c
net/ipv6/addrconf.c
net/ipv6/ip6_gre.c
net/mac80211/key.c
net/mac80211/rx.c
net/sctp/output.c
scripts/kernel-doc
security/keys/encrypted-keys/encrypted.c
sound/pci/hda/hda_codec.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/max98090.c
sound/soc/codecs/sigmadsp.c
sound/soc/dwc/designware_i2s.c
sound/usb/midi.c
sound/usb/mixer_maps.c
tools/perf/util/hist.h
tools/perf/util/session.c
tools/testing/selftests/mount/unprivileged-remount-test.c

diff --git a/Documentation/devicetree/bindings/arm/coresight.txt b/Documentation/devicetree/bindings/arm/coresight.txt
new file mode 100644 (file)
index 0000000..d790f49
--- /dev/null
@@ -0,0 +1,204 @@
+* CoreSight Components:
+
+CoreSight components are compliant with the ARM CoreSight architecture
+specification and can be connected in various topologies to suit a particular
+SoCs tracing needs. These trace components can generally be classified as
+sinks, links and sources. Trace data produced by one or more sources flows
+through the intermediate links connecting the source to the currently selected
+sink. Each CoreSight component device should use these properties to describe
+its hardware characteristcs.
+
+* Required properties for all components *except* non-configurable replicators:
+
+       * compatible: These have to be supplemented with "arm,primecell" as
+         drivers are using the AMBA bus interface.  Possible values include:
+               - "arm,coresight-etb10", "arm,primecell";
+               - "arm,coresight-tpiu", "arm,primecell";
+               - "arm,coresight-tmc", "arm,primecell";
+               - "arm,coresight-funnel", "arm,primecell";
+               - "arm,coresight-etm3x", "arm,primecell";
+
+       * reg: physical base address and length of the register
+         set(s) of the component.
+
+       * clocks: the clock associated to this component.
+
+       * clock-names: the name of the clock as referenced by the code.
+         Since we are using the AMBA framework, the name should be
+         "apb_pclk".
+
+       * port or ports: The representation of the component's port
+         layout using the generic DT graph presentation found in
+         "bindings/graph.txt".
+
+* Required properties for devices that don't show up on the AMBA bus, such as
+  non-configurable replicators:
+
+       * compatible: Currently supported value is (note the absence of the
+         AMBA markee):
+               - "arm,coresight-replicator"
+
+       * id: a unique number that will identify this replicator.
+
+       * port or ports: same as above.
+
+* Optional properties for ETM/PTMs:
+
+       * arm,cp14: must be present if the system accesses ETM/PTM management
+         registers via co-processor 14.
+
+       * cpu: the cpu phandle this ETM/PTM is affined to. When omitted the
+         source is considered to belong to CPU0.
+
+* Optional property for TMC:
+
+       * arm,buffer-size: size of contiguous buffer space for TMC ETR
+        (embedded trace router)
+
+
+Example:
+
+1. Sinks
+       etb@20010000 {
+               compatible = "arm,coresight-etb10", "arm,primecell";
+               reg = <0 0x20010000 0 0x1000>;
+
+               coresight-default-sink;
+               clocks = <&oscclk6a>;
+               clock-names = "apb_pclk";
+               port {
+                       etb_in_port: endpoint@0 {
+                               slave-mode;
+                               remote-endpoint = <&replicator_out_port0>;
+                       };
+               };
+       };
+
+       tpiu@20030000 {
+               compatible = "arm,coresight-tpiu", "arm,primecell";
+               reg = <0 0x20030000 0 0x1000>;
+
+               clocks = <&oscclk6a>;
+               clock-names = "apb_pclk";
+               port {
+                       tpiu_in_port: endpoint@0 {
+                               slave-mode;
+                               remote-endpoint = <&replicator_out_port1>;
+                       };
+               };
+       };
+
+2. Links
+       replicator {
+               /* non-configurable replicators don't show up on the
+                * AMBA bus.  As such no need to add "arm,primecell".
+                */
+               compatible = "arm,coresight-replicator";
+               /* this will show up in debugfs as "0.replicator" */
+               id = <0>;
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* replicator output ports */
+                       port@0 {
+                               reg = <0>;
+                               replicator_out_port0: endpoint {
+                                       remote-endpoint = <&etb_in_port>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               replicator_out_port1: endpoint {
+                                       remote-endpoint = <&tpiu_in_port>;
+                               };
+                       };
+
+                       /* replicator input port */
+                       port@2 {
+                               reg = <0>;
+                               replicator_in_port0: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&funnel_out_port0>;
+                               };
+                       };
+               };
+       };
+
+       funnel@20040000 {
+               compatible = "arm,coresight-funnel", "arm,primecell";
+               reg = <0 0x20040000 0 0x1000>;
+
+               clocks = <&oscclk6a>;
+               clock-names = "apb_pclk";
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* funnel output port */
+                       port@0 {
+                               reg = <0>;
+                               funnel_out_port0: endpoint {
+                                       remote-endpoint =
+                                                       <&replicator_in_port0>;
+                               };
+                       };
+
+                       /* funnel input ports */
+                       port@1 {
+                               reg = <0>;
+                               funnel_in_port0: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm0_out_port>;
+                               };
+                       };
+
+                       port@2 {
+                               reg = <1>;
+                               funnel_in_port1: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm1_out_port>;
+                               };
+                       };
+
+                       port@3 {
+                               reg = <2>;
+                               funnel_in_port2: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&etm0_out_port>;
+                               };
+                       };
+
+               };
+       };
+
+3. Sources
+       ptm@2201c000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0x2201c000 0 0x1000>;
+
+               cpu = <&cpu0>;
+               clocks = <&oscclk6a>;
+               clock-names = "apb_pclk";
+               port {
+                       ptm0_out_port: endpoint {
+                               remote-endpoint = <&funnel_in_port0>;
+                       };
+               };
+       };
+
+       ptm@2201d000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0x2201d000 0 0x1000>;
+
+               cpu = <&cpu1>;
+               clocks = <&oscclk6a>;
+               clock-names = "apb_pclk";
+               port {
+                       ptm1_out_port: endpoint {
+                               remote-endpoint = <&funnel_in_port1>;
+                       };
+               };
+       };
index 69b3cac4749d76811be9c39a6e3605023a7639fd..5d8675615e59c40c6564710a0a9b73ae060e2a00 100644 (file)
@@ -14,11 +14,19 @@ survive after a restart.
 
 1. Ramoops concepts
 
-Ramoops uses a predefined memory area to store the dump. The start and size of
-the memory area are set using two variables:
+Ramoops uses a predefined memory area to store the dump. The start and size
+and type of the memory area are set using three variables:
   * "mem_address" for the start
   * "mem_size" for the size. The memory size will be rounded down to a
   power of two.
+  * "mem_type" to specifiy if the memory type (default is pgprot_writecombine).
+
+Typically the default value of mem_type=0 should be used as that sets the pstore
+mapping to pgprot_writecombine. Setting mem_type=1 attempts to use
+pgprot_noncached, which only works on some platforms. This is because pstore
+depends on atomic operations. At least on ARM, pgprot_noncached causes the
+memory to be mapped strongly ordered, and atomic operations on strongly ordered
+memory are implementation defined, and won't work on many ARMs such as omaps.
 
 The memory area is divided into "record_size" chunks (also rounded down to
 power of two) and each oops/panic writes a "record_size" chunk of
@@ -55,6 +63,7 @@ Setting the ramoops parameters can be done in 2 different manners:
 static struct ramoops_platform_data ramoops_data = {
         .mem_size               = <...>,
         .mem_address            = <...>,
+        .mem_type               = <...>,
         .record_size            = <...>,
         .dump_oops              = <...>,
         .ecc                    = <...>,
diff --git a/Documentation/trace/coresight.txt b/Documentation/trace/coresight.txt
new file mode 100644 (file)
index 0000000..bba7dbf
--- /dev/null
@@ -0,0 +1,299 @@
+               Coresight - HW Assisted Tracing on ARM
+               ======================================
+
+   Author:   Mathieu Poirier <mathieu.poirier@linaro.org>
+   Date:     September 11th, 2014
+
+Introduction
+------------
+
+Coresight is an umbrella of technologies allowing for the debugging of ARM
+based SoC.  It includes solutions for JTAG and HW assisted tracing.  This
+document is concerned with the latter.
+
+HW assisted tracing is becoming increasingly useful when dealing with systems
+that have many SoCs and other components like GPU and DMA engines.  ARM has
+developed a HW assisted tracing solution by means of different components, each
+being added to a design at systhesis time to cater to specific tracing needs.
+Compoments are generally categorised as source, link and sinks and are
+(usually) discovered using the AMBA bus.
+
+"Sources" generate a compressed stream representing the processor instruction
+path based on tracing scenarios as configured by users.  From there the stream
+flows through the coresight system (via ATB bus) using links that are connecting
+the emanating source to a sink(s).  Sinks serve as endpoints to the coresight
+implementation, either storing the compressed stream in a memory buffer or
+creating an interface to the outside world where data can be transferred to a
+host without fear of filling up the onboard coresight memory buffer.
+
+At typical coresight system would look like this:
+
+  *****************************************************************
+ **************************** AMBA AXI  ****************************===||
+  *****************************************************************    ||
+        ^                    ^                            |            ||
+        |                    |                            *            **
+     0000000    :::::     0000000    :::::    :::::    @@@@@@@    ||||||||||||
+     0 CPU 0<-->: C :     0 CPU 0<-->: C :    : C :    @ STM @    || System ||
+  |->0000000    : T :  |->0000000    : T :    : T :<--->@@@@@     || Memory ||
+  |  #######<-->: I :  |  #######<-->: I :    : I :      @@@<-|   ||||||||||||
+  |  # ETM #    :::::  |  # PTM #    :::::    :::::       @   |
+  |   #####      ^ ^   |   #####      ^ !      ^ !        .   |   |||||||||
+  | |->###       | !   | |->###       | !      | !        .   |   || DAP ||
+  | |   #        | !   | |   #        | !      | !        .   |   |||||||||
+  | |   .        | !   | |   .        | !      | !        .   |      |  |
+  | |   .        | !   | |   .        | !      | !        .   |      |  *
+  | |   .        | !   | |   .        | !      | !        .   |      | SWD/
+  | |   .        | !   | |   .        | !      | !        .   |      | JTAG
+  *****************************************************************<-|
+ *************************** AMBA Debug ABP ************************
+  *****************************************************************
+   |    .          !         .          !        !        .    |
+   |    .          *         .          *        *        .    |
+  *****************************************************************
+ ******************** Cross Trigger Matrix (CTM) *******************
+  *****************************************************************
+   |    .     ^              .                            .    |
+   |    *     !              *                            *    |
+  *****************************************************************
+ ****************** AMBA Advanced Trace Bus (ATB) ******************
+  *****************************************************************
+   |          !                        ===============         |
+   |          *                         ===== F =====<---------|
+   |   :::::::::                         ==== U ====
+   |-->:: CTI ::<!!                       === N ===
+   |   :::::::::  !                        == N ==
+   |    ^         *                        == E ==
+   |    !  &&&&&&&&&       IIIIIII         == L ==
+   |------>&& ETB &&<......II     I        =======
+   |    !  &&&&&&&&&       II     I           .
+   |    !                    I     I          .
+   |    !                    I REP I<..........
+   |    !                    I     I
+   |    !!>&&&&&&&&&       II     I           *Source: ARM ltd.
+   |------>& TPIU  &<......II    I            DAP = Debug Access Port
+           &&&&&&&&&       IIIIIII            ETM = Embedded Trace Macrocell
+               ;                              PTM = Program Trace Macrocell
+               ;                              CTI = Cross Trigger Interface
+               *                              ETB = Embedded Trace Buffer
+          To trace port                       TPIU= Trace Port Interface Unit
+                                              SWD = Serial Wire Debug
+
+While on target configuration of the components is done via the ABP bus,
+all trace data are carried out-of-band on the ATB bus.  The CTM provides
+a way to aggregate and distribute signals between CoreSight components.
+
+The coresight framework provides a central point to represent, configure and
+manage coresight devices on a platform.  This first implementation centers on
+the basic tracing functionality, enabling components such ETM/PTM, funnel,
+replicator, TMC, TPIU and ETB.  Future work will enable more
+intricate IP blocks such as STM and CTI.
+
+
+Acronyms and Classification
+---------------------------
+
+Acronyms:
+
+PTM:     Program Trace Macrocell
+ETM:     Embedded Trace Macrocell
+STM:     System trace Macrocell
+ETB:     Embedded Trace Buffer
+ITM:     Instrumentation Trace Macrocell
+TPIU:    Trace Port Interface Unit
+TMC-ETR: Trace Memory Controller, configured as Embedded Trace Router
+TMC-ETF: Trace Memory Controller, configured as Embedded Trace FIFO
+CTI:     Cross Trigger Interface
+
+Classification:
+
+Source:
+   ETMv3.x ETMv4, PTMv1.0, PTMv1.1, STM, STM500, ITM
+Link:
+   Funnel, replicator (intelligent or not), TMC-ETR
+Sinks:
+   ETBv1.0, ETB1.1, TPIU, TMC-ETF
+Misc:
+   CTI
+
+
+Device Tree Bindings
+----------------------
+
+See Documentation/devicetree/bindings/arm/coresight.txt for details.
+
+As of this writing drivers for ITM, STMs and CTIs are not provided but are
+expected to be added as the solution matures.
+
+
+Framework and implementation
+----------------------------
+
+The coresight framework provides a central point to represent, configure and
+manage coresight devices on a platform.  Any coresight compliant device can
+register with the framework for as long as they use the right APIs:
+
+struct coresight_device *coresight_register(struct coresight_desc *desc);
+void coresight_unregister(struct coresight_device *csdev);
+
+The registering function is taking a "struct coresight_device *csdev" and
+register the device with the core framework.  The unregister function takes
+a reference to a "strut coresight_device", obtained at registration time.
+
+If everything goes well during the registration process the new devices will
+show up under /sys/bus/coresight/devices, as showns here for a TC2 platform:
+
+root:~# ls /sys/bus/coresight/devices/
+replicator  20030000.tpiu    2201c000.ptm  2203c000.etm  2203e000.etm
+20010000.etb         20040000.funnel  2201d000.ptm  2203d000.etm
+root:~#
+
+The functions take a "struct coresight_device", which looks like this:
+
+struct coresight_desc {
+        enum coresight_dev_type type;
+        struct coresight_dev_subtype subtype;
+        const struct coresight_ops *ops;
+        struct coresight_platform_data *pdata;
+        struct device *dev;
+        const struct attribute_group **groups;
+};
+
+
+The "coresight_dev_type" identifies what the device is, i.e, source link or
+sink while the "coresight_dev_subtype" will characterise that type further.
+
+The "struct coresight_ops" is mandatory and will tell the framework how to
+perform base operations related to the components, each component having
+a different set of requirement.  For that "struct coresight_ops_sink",
+"struct coresight_ops_link" and "struct coresight_ops_source" have been
+provided.
+
+The next field, "struct coresight_platform_data *pdata" is acquired by calling
+"of_get_coresight_platform_data()", as part of the driver's _probe routine and
+"struct device *dev" gets the device reference embedded in the "amba_device":
+
+static int etm_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ ...
+ ...
+ drvdata->dev = &adev->dev;
+ ...
+}
+
+Specific class of device (source, link, or sink) have generic operations
+that can be performed on them (see "struct coresight_ops").  The
+"**groups" is a list of sysfs entries pertaining to operations
+specific to that component only.  "Implementation defined" customisations are
+expected to be accessed and controlled using those entries.
+
+Last but not least, "struct module *owner" is expected to be set to reflect
+the information carried in "THIS_MODULE".
+
+How to use
+----------
+
+Before trace collection can start, a coresight sink needs to be identify.
+There is no limit on the amount of sinks (nor sources) that can be enabled at
+any given moment.  As a generic operation, all device pertaining to the sink
+class will have an "active" entry in sysfs:
+
+root:/sys/bus/coresight/devices# ls
+replicator  20030000.tpiu    2201c000.ptm  2203c000.etm  2203e000.etm
+20010000.etb         20040000.funnel  2201d000.ptm  2203d000.etm
+root:/sys/bus/coresight/devices# ls 20010000.etb
+enable_sink  status  trigger_cntr
+root:/sys/bus/coresight/devices# echo 1 > 20010000.etb/enable_sink
+root:/sys/bus/coresight/devices# cat 20010000.etb/enable_sink
+1
+root:/sys/bus/coresight/devices#
+
+At boot time the current etm3x driver will configure the first address
+comparator with "_stext" and "_etext", essentially tracing any instruction
+that falls within that range.  As such "enabling" a source will immediately
+trigger a trace capture:
+
+root:/sys/bus/coresight/devices# echo 1 > 2201c000.ptm/enable_source
+root:/sys/bus/coresight/devices# cat 2201c000.ptm/enable_source
+1
+root:/sys/bus/coresight/devices# cat 20010000.etb/status
+Depth:          0x2000
+Status:         0x1
+RAM read ptr:   0x0
+RAM wrt ptr:    0x19d3   <----- The write pointer is moving
+Trigger cnt:    0x0
+Control:        0x1
+Flush status:   0x0
+Flush ctrl:     0x2001
+root:/sys/bus/coresight/devices#
+
+Trace collection is stopped the same way:
+
+root:/sys/bus/coresight/devices# echo 0 > 2201c000.ptm/enable_source
+root:/sys/bus/coresight/devices#
+
+The content of the ETB buffer can be harvested directly from /dev:
+
+root:/sys/bus/coresight/devices# dd if=/dev/20010000.etb \
+of=~/cstrace.bin
+
+64+0 records in
+64+0 records out
+32768 bytes (33 kB) copied, 0.00125258 s, 26.2 MB/s
+root:/sys/bus/coresight/devices#
+
+The file cstrace.bin can be decompressed using "ptm2human", DS-5 or Trace32.
+
+Following is a DS-5 output of an experimental loop that increments a variable up
+to a certain value.  The example is simple and yet provides a glimpse of the
+wealth of possibilities that coresight provides.
+
+Info                                    Tracing enabled
+Instruction     106378866       0x8026B53C      E52DE004        false   PUSH     {lr}
+Instruction     0       0x8026B540      E24DD00C        false   SUB      sp,sp,#0xc
+Instruction     0       0x8026B544      E3A03000        false   MOV      r3,#0
+Instruction     0       0x8026B548      E58D3004        false   STR      r3,[sp,#4]
+Instruction     0       0x8026B54C      E59D3004        false   LDR      r3,[sp,#4]
+Instruction     0       0x8026B550      E3530004        false   CMP      r3,#4
+Instruction     0       0x8026B554      E2833001        false   ADD      r3,r3,#1
+Instruction     0       0x8026B558      E58D3004        false   STR      r3,[sp,#4]
+Instruction     0       0x8026B55C      DAFFFFFA        true    BLE      {pc}-0x10 ; 0x8026b54c
+Timestamp                                       Timestamp: 17106715833
+Instruction     319     0x8026B54C      E59D3004        false   LDR      r3,[sp,#4]
+Instruction     0       0x8026B550      E3530004        false   CMP      r3,#4
+Instruction     0       0x8026B554      E2833001        false   ADD      r3,r3,#1
+Instruction     0       0x8026B558      E58D3004        false   STR      r3,[sp,#4]
+Instruction     0       0x8026B55C      DAFFFFFA        true    BLE      {pc}-0x10 ; 0x8026b54c
+Instruction     9       0x8026B54C      E59D3004        false   LDR      r3,[sp,#4]
+Instruction     0       0x8026B550      E3530004        false   CMP      r3,#4
+Instruction     0       0x8026B554      E2833001        false   ADD      r3,r3,#1
+Instruction     0       0x8026B558      E58D3004        false   STR      r3,[sp,#4]
+Instruction     0       0x8026B55C      DAFFFFFA        true    BLE      {pc}-0x10 ; 0x8026b54c
+Instruction     7       0x8026B54C      E59D3004        false   LDR      r3,[sp,#4]
+Instruction     0       0x8026B550      E3530004        false   CMP      r3,#4
+Instruction     0       0x8026B554      E2833001        false   ADD      r3,r3,#1
+Instruction     0       0x8026B558      E58D3004        false   STR      r3,[sp,#4]
+Instruction     0       0x8026B55C      DAFFFFFA        true    BLE      {pc}-0x10 ; 0x8026b54c
+Instruction     7       0x8026B54C      E59D3004        false   LDR      r3,[sp,#4]
+Instruction     0       0x8026B550      E3530004        false   CMP      r3,#4
+Instruction     0       0x8026B554      E2833001        false   ADD      r3,r3,#1
+Instruction     0       0x8026B558      E58D3004        false   STR      r3,[sp,#4]
+Instruction     0       0x8026B55C      DAFFFFFA        true    BLE      {pc}-0x10 ; 0x8026b54c
+Instruction     10      0x8026B54C      E59D3004        false   LDR      r3,[sp,#4]
+Instruction     0       0x8026B550      E3530004        false   CMP      r3,#4
+Instruction     0       0x8026B554      E2833001        false   ADD      r3,r3,#1
+Instruction     0       0x8026B558      E58D3004        false   STR      r3,[sp,#4]
+Instruction     0       0x8026B55C      DAFFFFFA        true    BLE      {pc}-0x10 ; 0x8026b54c
+Instruction     6       0x8026B560      EE1D3F30        false   MRC      p15,#0x0,r3,c13,c0,#1
+Instruction     0       0x8026B564      E1A0100D        false   MOV      r1,sp
+Instruction     0       0x8026B568      E3C12D7F        false   BIC      r2,r1,#0x1fc0
+Instruction     0       0x8026B56C      E3C2203F        false   BIC      r2,r2,#0x3f
+Instruction     0       0x8026B570      E59D1004        false   LDR      r1,[sp,#4]
+Instruction     0       0x8026B574      E59F0010        false   LDR      r0,[pc,#16] ; [0x8026B58C] = 0x80550368
+Instruction     0       0x8026B578      E592200C        false   LDR      r2,[r2,#0xc]
+Instruction     0       0x8026B57C      E59221D0        false   LDR      r2,[r2,#0x1d0]
+Instruction     0       0x8026B580      EB07A4CF        true    BL       {pc}+0x1e9344 ; 0x804548c4
+Info                                    Tracing enabled
+Instruction     13570831        0x8026B584      E28DD00C        false   ADD      sp,sp,#0xc
+Instruction     0       0x8026B588      E8BD8000        true    LDM      sp!,{pc}
+Timestamp                                       Timestamp: 17107041535
index 7433b84439f3bde2cdb623b7d8a505cb49c8c0b1..8a8a48b39cd823ce44a2c8143e4892c50b3a5fe4 100644 (file)
@@ -783,6 +783,14 @@ M: Hubert Feurstein <hubert.feurstein@contec.at>
 S:     Maintained
 F:     arch/arm/mach-ep93xx/micro9.c
 
+ARM/CORESIGHT FRAMEWORK AND DRIVERS
+M:     Mathieu Poirier <mathieu.poirier@linaro.org>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     drivers/coresight/*
+F:     Documentation/trace/coresight.txt
+F:     Documentation/devicetree/bindings/arm/coresight.txt
+
 ARM/CORGI MACHINE SUPPORT
 M:     Richard Purdie <rpurdie@rpsys.net>
 S:     Maintained
index 89c72066a698a65e13bc4057697dd44c4517ac53..78c59160e678d49277a0da016b0efa6f63f54153 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 10
-SUBLEVEL = 62
+SUBLEVEL = 65
 EXTRAVERSION =
 NAME = TOSSUG Baby Fish
 
index 31508289061dee0fba9468a95346a7ee8f6d8dcc..11acee61fa5f09f16b7d0073924478add41b6c0a 100644 (file)
@@ -706,14 +706,6 @@ config EARLY_PRINTK_DIRECT
          kernel low-level debugging functions and EARLY_PRINTK is
          not early enough.
 
-config OC_ETM
-       bool "On-chip ETM and ETB"
-       depends on ARM_AMBA
-       help
-         Enables the on-chip embedded trace macrocell and embedded trace
-         buffer driver that will allow you to collect traces of the
-         kernel code.
-
 config ARM_KPROBES_TEST
        tristate "Kprobes test module"
        depends on KPROBES && MODULES
@@ -729,4 +721,70 @@ config PID_IN_CONTEXTIDR
          additional instructions during context switch. Say Y here only if you
          are planning to use hardware trace tools with this kernel.
 
+config DEBUG_SET_MODULE_RONX
+       bool "Set loadable kernel module data as NX and text as RO"
+       depends on MODULES
+       ---help---
+         This option helps catch unintended modifications to loadable
+         kernel module's text and read-only data. It also prevents execution
+         of module data. Such protection may interfere with run-time code
+         patching and dynamic kernel tracing - and they might also protect
+         against certain classes of kernel exploits.
+         If in doubt, say "N".
+
+menuconfig CORESIGHT
+       bool "CoreSight Tracing Support"
+       select ARM_AMBA
+       help
+         This framework provides a kernel interface for the CoreSight debug
+         and trace drivers to register themselves with. It's intended to build
+         a topological view of the CoreSight components based on a DT
+         specification and configure the right serie of components when a
+         trace source gets enabled.
+
+if CORESIGHT
+config CORESIGHT_LINKS_AND_SINKS
+       bool "CoreSight Link and Sink drivers"
+       help
+         This enables support for CoreSight link and sink drivers that are
+         responsible for transporting and collecting the trace data
+         respectively.  Link and sinks are dynamically aggregated with a trace
+         entity at run time to form a complete trace path.
+
+config CORESIGHT_LINK_AND_SINK_TMC
+       bool "Coresight generic TMC driver"
+       depends on CORESIGHT_LINKS_AND_SINKS
+       help
+         This enables support for the Trace Memory Controller driver.  Depending
+         on its configuration the device can act as a link (embedded trace router
+         - ETR) or sink (embedded trace FIFO).  The driver complies with the
+         generic implementation of the component without special enhancement or
+         added features.
+
+config CORESIGHT_SINK_TPIU
+       bool "Coresight generic TPIU driver"
+       depends on CORESIGHT_LINKS_AND_SINKS
+       help
+         This enables support for the Trace Port Interface Unit driver, responsible
+         for bridging the gap between the on-chip coresight components and a trace
+         port collection engine, typically connected to an external host for use
+         case capturing more traces than the on-board coresight memory can handle.
+
+config CORESIGHT_SINK_ETBV10
+       bool "Coresight ETBv1.0 driver"
+       depends on CORESIGHT_LINKS_AND_SINKS
+       help
+         This enables support for the Embedded Trace Buffer version 1.0 driver
+         that complies with the generic implementation of the component without
+         special enhancement or added features.
+
+config CORESIGHT_SOURCE_ETM3X
+       bool "CoreSight Embedded Trace Macrocell 3.x driver"
+       select CORESIGHT_LINKS_AND_SINKS
+       help
+         This driver provides support for processor ETM3.x and PTM1.x modules,
+         which allows tracing the instructions that a processor is executing
+         This is primarily useful for instruction level tracing.  Depending
+         the ETM version data tracing may also be available.
+endif
 endmenu
diff --git a/arch/arm/boot/dts/hip04.dtsi b/arch/arm/boot/dts/hip04.dtsi
new file mode 100644 (file)
index 0000000..2388145
--- /dev/null
@@ -0,0 +1,984 @@
+/*
+ * Hisilicon Ltd. HiP04 SoC
+ *
+ * Copyright (C) 2013-2014 Hisilicon Ltd.
+ * Copyright (C) 2013-2014 Linaro Ltd.
+ *
+ * Author: Haojian Zhuang <haojian.zhuang@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/ {
+       /* memory bus is 64-bit */
+       #address-cells = <2>;
+       #size-cells = <2>;
+
+       aliases {
+               serial0 = &uart0;
+       };
+
+       bootwrapper {
+               compatible = "hisilicon,hip04-bootwrapper";
+               boot-method = <0x10c00000 0x10000>, <0xe0000100 0x1000>;
+       };
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu-map {
+                       cluster0 {
+                               core0 {
+                                       cpu = <&CPU0>;
+                               };
+                               core1 {
+                                       cpu = <&CPU1>;
+                               };
+                               core2 {
+                                       cpu = <&CPU2>;
+                               };
+                               core3 {
+                                       cpu = <&CPU3>;
+                               };
+                       };
+                       cluster1 {
+                               core0 {
+                                       cpu = <&CPU4>;
+                               };
+                               core1 {
+                                       cpu = <&CPU5>;
+                               };
+                               core2 {
+                                       cpu = <&CPU6>;
+                               };
+                               core3 {
+                                       cpu = <&CPU7>;
+                               };
+                       };
+                       cluster2 {
+                               core0 {
+                                       cpu = <&CPU8>;
+                               };
+                               core1 {
+                                       cpu = <&CPU9>;
+                               };
+                               core2 {
+                                       cpu = <&CPU10>;
+                               };
+                               core3 {
+                                       cpu = <&CPU11>;
+                               };
+                       };
+                       cluster3 {
+                               core0 {
+                                       cpu = <&CPU12>;
+                               };
+                               core1 {
+                                       cpu = <&CPU13>;
+                               };
+                               core2 {
+                                       cpu = <&CPU14>;
+                               };
+                               core3 {
+                                       cpu = <&CPU15>;
+                               };
+                       };
+               };
+               CPU0: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0>;
+               };
+               CPU1: cpu@1 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <1>;
+               };
+               CPU2: cpu@2 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <2>;
+               };
+               CPU3: cpu@3 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <3>;
+               };
+               CPU4: cpu@100 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x100>;
+               };
+               CPU5: cpu@101 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x101>;
+               };
+               CPU6: cpu@102 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x102>;
+               };
+               CPU7: cpu@103 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x103>;
+               };
+               CPU8: cpu@200 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x200>;
+               };
+               CPU9: cpu@201 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x201>;
+               };
+               CPU10: cpu@202 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x202>;
+               };
+               CPU11: cpu@203 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x203>;
+               };
+               CPU12: cpu@300 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x300>;
+               };
+               CPU13: cpu@301 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x301>;
+               };
+               CPU14: cpu@302 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x302>;
+               };
+               CPU15: cpu@303 {
+                       device_type = "cpu";
+                       compatible = "arm,cortex-a15";
+                       reg = <0x303>;
+               };
+       };
+
+       timer {
+               compatible = "arm,armv7-timer";
+               interrupt-parent = <&gic>;
+               interrupts = <1 13 0xf08>,
+                            <1 14 0xf08>,
+                            <1 11 0xf08>,
+                            <1 10 0xf08>;
+       };
+
+       clk_50m: clk_50m {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <50000000>;
+       };
+
+       clk_168m: clk_168m {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <168000000>;
+       };
+
+       clk_375m: clk_375m {
+               #clock-cells = <0>;
+               compatible = "fixed-clock";
+               clock-frequency = <375000000>;
+       };
+
+       soc {
+               /* It's a 32-bit SoC. */
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "simple-bus";
+               interrupt-parent = <&gic>;
+               ranges = <0 0 0xe0000000 0x10000000>;
+
+               gic: interrupt-controller@c01000 {
+                       compatible = "hisilicon,hip04-intc";
+                       #interrupt-cells = <3>;
+                       #address-cells = <0>;
+                       interrupt-controller;
+                       interrupts = <1 9 0xf04>;
+
+                       reg = <0xc01000 0x1000>, <0xc02000 0x1000>,
+                             <0xc04000 0x2000>, <0xc06000 0x2000>;
+               };
+
+               sysctrl: sysctrl {
+                       compatible = "hisilicon,sysctrl";
+                       reg = <0x3e00000 0x00100000>;
+               };
+
+               fabric: fabric {
+                       compatible = "hisilicon,hip04-fabric";
+                       reg = <0x302a000 0x1000>;
+               };
+
+               dual_timer0: dual_timer@3000000 {
+                       compatible = "arm,sp804", "arm,primecell";
+                       reg = <0x3000000 0x1000>;
+                       interrupts = <0 224 4>;
+                       clocks = <&clk_50m>, <&clk_50m>;
+                       clock-names = "apb_pclk";
+               };
+
+               arm-pmu {
+                       compatible = "arm,cortex-a15-pmu";
+                       interrupts = <0 64 4>,
+                                    <0 65 4>,
+                                    <0 66 4>,
+                                    <0 67 4>,
+                                    <0 68 4>,
+                                    <0 69 4>,
+                                    <0 70 4>,
+                                    <0 71 4>,
+                                    <0 72 4>,
+                                    <0 73 4>,
+                                    <0 74 4>,
+                                    <0 75 4>,
+                                    <0 76 4>,
+                                    <0 77 4>,
+                                    <0 78 4>,
+                                    <0 79 4>;
+               };
+
+               uart0: uart@4007000 {
+                       compatible = "snps,dw-apb-uart";
+                       reg = <0x4007000 0x1000>;
+                       interrupts = <0 381 4>;
+                       clocks = <&clk_168m>;
+                       clock-names = "uartclk";
+                       reg-shift = <2>;
+                       status = "disabled";
+               };
+
+               sata0: sata@a000000 {
+                       compatible = "hisilicon,hisi-ahci";
+                       reg = <0xa000000 0x1000000>;
+                       interrupts = <0 372 4>;
+               };
+
+       };
+
+       etb@0,e3c42000 {
+               compatible = "arm,coresight-etb10", "arm,primecell";
+               reg = <0 0xe3c42000 0 0x1000>;
+
+               coresight-default-sink;
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               port {
+                       etb0_in_port: endpoint@0 {
+                               slave-mode;
+                               remote-endpoint = <&replicator0_out_port0>;
+                       };
+               };
+       };
+
+       etb@0,e3c82000 {
+               compatible = "arm,coresight-etb10", "arm,primecell";
+               reg = <0 0xe3c82000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               port {
+                       etb1_in_port: endpoint@0 {
+                               slave-mode;
+                               remote-endpoint = <&replicator1_out_port0>;
+                       };
+               };
+       };
+
+       etb@0,e3cc2000 {
+               compatible = "arm,coresight-etb10", "arm,primecell";
+               reg = <0 0xe3cc2000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               port {
+                       etb2_in_port: endpoint@0 {
+                               slave-mode;
+                               remote-endpoint = <&replicator2_out_port0>;
+                       };
+               };
+       };
+
+       etb@0,e3d02000 {
+               compatible = "arm,coresight-etb10", "arm,primecell";
+               reg = <0 0xe3d02000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               port {
+                       etb3_in_port: endpoint@0 {
+                               slave-mode;
+                               remote-endpoint = <&replicator3_out_port0>;
+                       };
+               };
+       };
+
+       tpiu@0,e3c05000 {
+               compatible = "arm,coresight-tpiu", "arm,primecell";
+               reg = <0 0xe3c05000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               port {
+                       tpiu_in_port: endpoint@0 {
+                               slave-mode;
+                               remote-endpoint = <&funnel4_out_port0>;
+                       };
+               };
+       };
+
+       replicator0 {
+               /* non-configurable replicators don't show up on the
+                * AMBA bus.  As such no need to add "arm,primecell".
+                */
+               compatible = "arm,coresight-replicator";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* replicator output ports */
+                       port@0 {
+                               reg = <0>;
+                               replicator0_out_port0: endpoint {
+                                       remote-endpoint = <&etb0_in_port>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               replicator0_out_port1: endpoint {
+                                       remote-endpoint = <&funnel4_in_port0>;
+                               };
+                       };
+
+                       /* replicator input port */
+                       port@2 {
+                               reg = <0>;
+                               replicator0_in_port0: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&funnel0_out_port0>;
+                               };
+                       };
+               };
+       };
+
+       replicator1 {
+               /* non-configurable replicators don't show up on the
+                * AMBA bus.  As such no need to add "arm,primecell".
+                */
+               compatible = "arm,coresight-replicator";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* replicator output ports */
+                       port@0 {
+                               reg = <0>;
+                               replicator1_out_port0: endpoint {
+                                       remote-endpoint = <&etb1_in_port>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               replicator1_out_port1: endpoint {
+                                       remote-endpoint = <&funnel4_in_port1>;
+                               };
+                       };
+
+                       /* replicator input port */
+                       port@2 {
+                               reg = <0>;
+                               replicator1_in_port0: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&funnel1_out_port0>;
+                               };
+                       };
+               };
+       };
+
+       replicator2 {
+               /* non-configurable replicators don't show up on the
+                * AMBA bus.  As such no need to add "arm,primecell".
+                */
+               compatible = "arm,coresight-replicator";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* replicator output ports */
+                       port@0 {
+                               reg = <0>;
+                               replicator2_out_port0: endpoint {
+                                       remote-endpoint = <&etb2_in_port>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                                       replicator2_out_port1: endpoint {
+                                       remote-endpoint = <&funnel4_in_port2>;
+                               };
+                       };
+
+                       /* replicator input port */
+                       port@2 {
+                               reg = <0>;
+                               replicator2_in_port0: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&funnel2_out_port0>;
+                               };
+                       };
+               };
+       };
+
+       replicator3 {
+               /* non-configurable replicators don't show up on the
+                * AMBA bus.  As such no need to add "arm,primecell".
+                */
+               compatible = "arm,coresight-replicator";
+
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* replicator output ports */
+                       port@0 {
+                               reg = <0>;
+                               replicator3_out_port0: endpoint {
+                                       remote-endpoint = <&etb3_in_port>;
+                               };
+                       };
+
+                       port@1 {
+                               reg = <1>;
+                               replicator3_out_port1: endpoint {
+                                       remote-endpoint = <&funnel4_in_port3>;
+                               };
+                       };
+
+                       /* replicator input port */
+                       port@2 {
+                               reg = <0>;
+                               replicator3_in_port0: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&funnel3_out_port0>;
+                               };
+                       };
+               };
+       };
+
+       funnel@0,e3c41000 {
+               compatible = "arm,coresight-funnel", "arm,primecell";
+               reg = <0 0xe3c41000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* funnel output port */
+                       port@0 {
+                               reg = <0>;
+                               funnel0_out_port0: endpoint {
+                                       remote-endpoint =
+                                               <&replicator0_in_port0>;
+                               };
+                       };
+
+                       /* funnel input ports */
+                       port@1 {
+                               reg = <0>;
+                               funnel0_in_port0: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm0_out_port>;
+                               };
+                       };
+
+                       port@2 {
+                               reg = <1>;
+                               funnel0_in_port1: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm1_out_port>;
+                               };
+                       };
+
+                       port@3 {
+                               reg = <2>;
+                               funnel0_in_port2: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm2_out_port>;
+                               };
+                       };
+
+                       port@4 {
+                               reg = <3>;
+                               funnel0_in_port3: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm3_out_port>;
+                               };
+                       };
+               };
+       };
+
+       funnel@0,e3c81000 {
+               compatible = "arm,coresight-funnel", "arm,primecell";
+               reg = <0 0xe3c81000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* funnel output port */
+                       port@0 {
+                               reg = <0>;
+                               funnel1_out_port0: endpoint {
+                                       remote-endpoint =
+                                               <&replicator1_in_port0>;
+                               };
+                       };
+
+                       /* funnel input ports */
+                       port@1 {
+                               reg = <0>;
+                               funnel1_in_port0: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm4_out_port>;
+                               };
+                       };
+
+                       port@2 {
+                               reg = <1>;
+                               funnel1_in_port1: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm5_out_port>;
+                               };
+                       };
+
+                       port@3 {
+                               reg = <2>;
+                               funnel1_in_port2: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm6_out_port>;
+                               };
+                       };
+
+                       port@4 {
+                               reg = <3>;
+                               funnel1_in_port3: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm7_out_port>;
+                               };
+                       };
+               };
+       };
+
+       funnel@0,e3cc1000 {
+               compatible = "arm,coresight-funnel", "arm,primecell";
+               reg = <0 0xe3cc1000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* funnel output port */
+                       port@0 {
+                               reg = <0>;
+                               funnel2_out_port0: endpoint {
+                                       remote-endpoint =
+                                               <&replicator2_in_port0>;
+                               };
+                       };
+
+                       /* funnel input ports */
+                       port@1 {
+                               reg = <0>;
+                               funnel2_in_port0: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm8_out_port>;
+                               };
+                       };
+
+                       port@2 {
+                               reg = <1>;
+                               funnel2_in_port1: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm9_out_port>;
+                               };
+                       };
+
+                       port@3 {
+                               reg = <2>;
+                               funnel2_in_port2: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm10_out_port>;
+                               };
+                       };
+
+                       port@4 {
+                               reg = <3>;
+                               funnel2_in_port3: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm11_out_port>;
+                               };
+                       };
+               };
+       };
+
+       funnel@0,e3d01000 {
+               compatible = "arm,coresight-funnel", "arm,primecell";
+               reg = <0 0xe3d01000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* funnel output port */
+                       port@0 {
+                               reg = <0>;
+                               funnel3_out_port0: endpoint {
+                                       remote-endpoint =
+                                               <&replicator3_in_port0>;
+                               };
+                       };
+
+                       /* funnel input ports */
+                       port@1 {
+                               reg = <0>;
+                               funnel3_in_port0: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm12_out_port>;
+                               };
+                       };
+
+                       port@2 {
+                               reg = <1>;
+                               funnel3_in_port1: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm13_out_port>;
+                               };
+                       };
+
+                       port@3 {
+                               reg = <2>;
+                               funnel3_in_port2: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm14_out_port>;
+                               };
+                       };
+
+                       port@4 {
+                               reg = <3>;
+                               funnel3_in_port3: endpoint {
+                                       slave-mode;
+                                       remote-endpoint = <&ptm15_out_port>;
+                               };
+                       };
+               };
+       };
+
+       funnel@0,e3c04000 {
+               compatible = "arm,coresight-funnel", "arm,primecell";
+               reg = <0 0xe3c04000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               ports {
+                       #address-cells = <1>;
+                       #size-cells = <0>;
+
+                       /* funnel output port */
+                       port@0 {
+                               reg = <0>;
+                               funnel4_out_port0: endpoint {
+                                       remote-endpoint = <&tpiu_in_port>;
+                               };
+                       };
+
+                       /* funnel input ports */
+                       port@1 {
+                               reg = <0>;
+                               funnel4_in_port0: endpoint {
+                                       slave-mode;
+                                       remote-endpoint =
+                                               <&replicator0_out_port1>;
+                               };
+                       };
+
+                       port@2 {
+                               reg = <1>;
+                               funnel4_in_port1: endpoint {
+                                       slave-mode;
+                                       remote-endpoint =
+                                               <&replicator1_out_port1>;
+                               };
+                       };
+
+                       port@3 {
+                               reg = <2>;
+                               funnel4_in_port2: endpoint {
+                                       slave-mode;
+                                       remote-endpoint =
+                                               <&replicator2_out_port1>;
+                               };
+                       };
+
+                       port@4 {
+                               reg = <3>;
+                               funnel4_in_port3: endpoint {
+                                       slave-mode;
+                                       remote-endpoint =
+                                               <&replicator3_out_port1>;
+                               };
+                       };
+               };
+       };
+
+       ptm@0,e3c7c000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3c7c000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU0>;
+               port {
+                       ptm0_out_port: endpoint {
+                               remote-endpoint = <&funnel0_in_port0>;
+                       };
+               };
+       };
+
+       ptm@0,e3c7d000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3c7d000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU1>;
+               port {
+                       ptm1_out_port: endpoint {
+                               remote-endpoint = <&funnel0_in_port1>;
+                       };
+               };
+       };
+
+       ptm@0,e3c7e000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3c7e000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU2>;
+               port {
+                       ptm2_out_port: endpoint {
+                               remote-endpoint = <&funnel0_in_port2>;
+                       };
+               };
+       };
+
+       ptm@0,e3c7f000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3c7f000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU3>;
+               port {
+                       ptm3_out_port: endpoint {
+                               remote-endpoint = <&funnel0_in_port3>;
+                       };
+               };
+       };
+
+       ptm@0,e3cbc000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3cbc000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU4>;
+               port {
+                       ptm4_out_port: endpoint {
+                               remote-endpoint = <&funnel1_in_port0>;
+                       };
+               };
+       };
+
+       ptm@0,e3cbd000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3cbd000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU5>;
+               port {
+                       ptm5_out_port: endpoint {
+                               remote-endpoint = <&funnel1_in_port1>;
+                       };
+               };
+       };
+
+       ptm@0,e3cbe000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3cbe000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU6>;
+               port {
+                       ptm6_out_port: endpoint {
+                               remote-endpoint = <&funnel1_in_port2>;
+                       };
+               };
+       };
+
+       ptm@0,e3cbf000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3cbf000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU7>;
+               port {
+                       ptm7_out_port: endpoint {
+                               remote-endpoint = <&funnel1_in_port3>;
+                       };
+               };
+       };
+
+       ptm@0,e3cfc000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3cfc000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU8>;
+               port {
+                       ptm8_out_port: endpoint {
+                               remote-endpoint = <&funnel2_in_port0>;
+                       };
+               };
+       };
+
+       ptm@0,e3cfd000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3cfd000 0 0x1000>;
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU9>;
+               port {
+                       ptm9_out_port: endpoint {
+                               remote-endpoint = <&funnel2_in_port1>;
+                       };
+               };
+       };
+
+       ptm@0,e3cfe000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3cfe000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU10>;
+               port {
+                       ptm10_out_port: endpoint {
+                               remote-endpoint = <&funnel2_in_port2>;
+                       };
+               };
+       };
+
+       ptm@0,e3cff000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3cff000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU11>;
+               port {
+                       ptm11_out_port: endpoint {
+                               remote-endpoint = <&funnel2_in_port3>;
+                       };
+               };
+       };
+
+       ptm@0,e3d3c000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3d3c000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU12>;
+               port {
+                       ptm12_out_port: endpoint {
+                               remote-endpoint = <&funnel3_in_port0>;
+                       };
+               };
+       };
+
+       ptm@0,e3d3d000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3d3d000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU13>;
+               port {
+                       ptm13_out_port: endpoint {
+                               remote-endpoint = <&funnel3_in_port1>;
+                       };
+               };
+       };
+
+       ptm@0,e3d3e000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3d3e000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU14>;
+               port {
+                       ptm14_out_port: endpoint {
+                               remote-endpoint = <&funnel3_in_port2>;
+                       };
+               };
+       };
+
+       ptm@0,e3d3f000 {
+               compatible = "arm,coresight-etm3x", "arm,primecell";
+               reg = <0 0xe3d3f000 0 0x1000>;
+
+               clocks = <&clk_375m>;
+               clock-names = "apb_pclk";
+               cpu = <&CPU15>;
+               port {
+                       ptm15_out_port: endpoint {
+                               remote-endpoint = <&funnel3_in_port3>;
+                       };
+               };
+       };
+};
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
deleted file mode 100644 (file)
index fc53019..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-/*
- * linux/arch/arm/include/asm/hardware/coresight.h
- *
- * CoreSight components' registers
- *
- * Copyright (C) 2009 Nokia Corporation.
- * Alexander Shishkin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ASM_HARDWARE_CORESIGHT_H
-#define __ASM_HARDWARE_CORESIGHT_H
-
-#define TRACER_ACCESSED_BIT    0
-#define TRACER_RUNNING_BIT     1
-#define TRACER_CYCLE_ACC_BIT   2
-#define TRACER_TRACE_DATA_BIT  3
-#define TRACER_TIMESTAMP_BIT   4
-#define TRACER_BRANCHOUTPUT_BIT        5
-#define TRACER_RETURN_STACK_BIT        6
-#define TRACER_ACCESSED                BIT(TRACER_ACCESSED_BIT)
-#define TRACER_RUNNING         BIT(TRACER_RUNNING_BIT)
-#define TRACER_CYCLE_ACC       BIT(TRACER_CYCLE_ACC_BIT)
-#define TRACER_TRACE_DATA      BIT(TRACER_TRACE_DATA_BIT)
-#define TRACER_TIMESTAMP       BIT(TRACER_TIMESTAMP_BIT)
-#define TRACER_BRANCHOUTPUT    BIT(TRACER_BRANCHOUTPUT_BIT)
-#define TRACER_RETURN_STACK    BIT(TRACER_RETURN_STACK_BIT)
-
-#define TRACER_TIMEOUT 10000
-
-#define etm_writel(t, v, x) \
-       (writel_relaxed((v), (t)->etm_regs + (x)))
-#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
-
-/* CoreSight Management Registers */
-#define CSMR_LOCKACCESS 0xfb0
-#define CSMR_LOCKSTATUS 0xfb4
-#define CSMR_AUTHSTATUS 0xfb8
-#define CSMR_DEVID     0xfc8
-#define CSMR_DEVTYPE   0xfcc
-/* CoreSight Component Registers */
-#define CSCR_CLASS     0xff4
-
-#define CS_LAR_KEY     0xc5acce55
-
-/* ETM control register, "ETM Architecture", 3.3.1 */
-#define ETMR_CTRL              0
-#define ETMCTRL_POWERDOWN      1
-#define ETMCTRL_PROGRAM                (1 << 10)
-#define ETMCTRL_PORTSEL                (1 << 11)
-#define ETMCTRL_CONTEXTIDSIZE(x) (((x) & 3) << 14)
-#define ETMCTRL_PORTMASK1      (7 << 4)
-#define ETMCTRL_PORTMASK2      (1 << 21)
-#define ETMCTRL_PORTMASK       (ETMCTRL_PORTMASK1 | ETMCTRL_PORTMASK2)
-#define ETMCTRL_PORTSIZE(x) ((((x) & 7) << 4) | (!!((x) & 8)) << 21)
-#define ETMCTRL_DO_CPRT                (1 << 1)
-#define ETMCTRL_DATAMASK       (3 << 2)
-#define ETMCTRL_DATA_DO_DATA   (1 << 2)
-#define ETMCTRL_DATA_DO_ADDR   (1 << 3)
-#define ETMCTRL_DATA_DO_BOTH   (ETMCTRL_DATA_DO_DATA | ETMCTRL_DATA_DO_ADDR)
-#define ETMCTRL_BRANCH_OUTPUT  (1 << 8)
-#define ETMCTRL_CYCLEACCURATE  (1 << 12)
-#define ETMCTRL_TIMESTAMP_EN   (1 << 28)
-#define ETMCTRL_RETURN_STACK_EN        (1 << 29)
-
-/* ETM configuration code register */
-#define ETMR_CONFCODE          (0x04)
-#define ETMCCR_ETMIDR_PRESENT  BIT(31)
-
-/* ETM trace start/stop resource control register */
-#define ETMR_TRACESSCTRL       (0x18)
-
-/* ETM trigger event register */
-#define ETMR_TRIGEVT           (0x08)
-
-/* address access type register bits, "ETM architecture",
- * table 3-27 */
-/* - access type */
-#define ETMAAT_IFETCH          0
-#define ETMAAT_IEXEC           1
-#define ETMAAT_IEXECPASS       2
-#define ETMAAT_IEXECFAIL       3
-#define ETMAAT_DLOADSTORE      4
-#define ETMAAT_DLOAD           5
-#define ETMAAT_DSTORE          6
-/* - comparison access size */
-#define ETMAAT_JAVA            (0 << 3)
-#define ETMAAT_THUMB           (1 << 3)
-#define ETMAAT_ARM             (3 << 3)
-/* - data value comparison control */
-#define ETMAAT_NOVALCMP                (0 << 5)
-#define ETMAAT_VALMATCH                (1 << 5)
-#define ETMAAT_VALNOMATCH      (3 << 5)
-/* - exact match */
-#define ETMAAT_EXACTMATCH      (1 << 7)
-/* - context id comparator control */
-#define ETMAAT_IGNCONTEXTID    (0 << 8)
-#define ETMAAT_VALUE1          (1 << 8)
-#define ETMAAT_VALUE2          (2 << 8)
-#define ETMAAT_VALUE3          (3 << 8)
-/* - security level control */
-#define ETMAAT_IGNSECURITY     (0 << 10)
-#define ETMAAT_NSONLY          (1 << 10)
-#define ETMAAT_SONLY           (2 << 10)
-
-#define ETMR_COMP_VAL(x)       (0x40 + (x) * 4)
-#define ETMR_COMP_ACC_TYPE(x)  (0x80 + (x) * 4)
-
-/* ETM status register, "ETM Architecture", 3.3.2 */
-#define ETMR_STATUS            (0x10)
-#define ETMST_OVERFLOW         BIT(0)
-#define ETMST_PROGBIT          BIT(1)
-#define ETMST_STARTSTOP                BIT(2)
-#define ETMST_TRIGGER          BIT(3)
-
-#define etm_progbit(t)         (etm_readl((t), ETMR_STATUS) & ETMST_PROGBIT)
-#define etm_started(t)         (etm_readl((t), ETMR_STATUS) & ETMST_STARTSTOP)
-#define etm_triggered(t)       (etm_readl((t), ETMR_STATUS) & ETMST_TRIGGER)
-
-#define ETMR_TRACEENCTRL2      0x1c
-#define ETMR_TRACEENCTRL       0x24
-#define ETMTE_INCLEXCL         BIT(24)
-#define ETMR_TRACEENEVT                0x20
-
-#define ETMR_VIEWDATAEVT       0x30
-#define ETMR_VIEWDATACTRL1     0x34
-#define ETMR_VIEWDATACTRL2     0x38
-#define ETMR_VIEWDATACTRL3     0x3c
-#define ETMVDC3_EXCLONLY       BIT(16)
-
-#define ETMCTRL_OPTS           (ETMCTRL_DO_CPRT)
-
-#define ETMR_ID                        0x1e4
-#define ETMIDR_VERSION(x)      (((x) >> 4) & 0xff)
-#define ETMIDR_VERSION_3_1     0x21
-#define ETMIDR_VERSION_PFT_1_0 0x30
-
-#define ETMR_CCE               0x1e8
-#define ETMCCER_RETURN_STACK_IMPLEMENTED       BIT(23)
-#define ETMCCER_TIMESTAMPING_IMPLEMENTED       BIT(22)
-
-#define ETMR_TRACEIDR          0x200
-
-/* ETM management registers, "ETM Architecture", 3.5.24 */
-#define ETMMR_OSLAR    0x300
-#define ETMMR_OSLSR    0x304
-#define ETMMR_OSSRR    0x308
-#define ETMMR_PDSR     0x314
-
-/* ETB registers, "CoreSight Components TRM", 9.3 */
-#define ETBR_DEPTH             0x04
-#define ETBR_STATUS            0x0c
-#define ETBR_READMEM           0x10
-#define ETBR_READADDR          0x14
-#define ETBR_WRITEADDR         0x18
-#define ETBR_TRIGGERCOUNT      0x1c
-#define ETBR_CTRL              0x20
-#define ETBR_FORMATTERCTRL     0x304
-#define ETBFF_ENFTC            1
-#define ETBFF_ENFCONT          BIT(1)
-#define ETBFF_FONFLIN          BIT(4)
-#define ETBFF_MANUAL_FLUSH     BIT(6)
-#define ETBFF_TRIGIN           BIT(8)
-#define ETBFF_TRIGEVT          BIT(9)
-#define ETBFF_TRIGFL           BIT(10)
-#define ETBFF_STOPFL           BIT(12)
-
-#define etb_writel(t, v, x) \
-       (writel_relaxed((v), (t)->etb_regs + (x)))
-#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
-
-#define etm_lock(t, id) \
-       do { etm_writel((t), (id), 0, CSMR_LOCKACCESS); } while (0)
-#define etm_unlock(t, id) \
-       do { etm_writel((t), (id), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
-
-#define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)
-#define etb_unlock(t) \
-       do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)
-
-#endif /* __ASM_HARDWARE_CORESIGHT_H */
-
diff --git a/arch/arm/include/asm/hardware/cp14.h b/arch/arm/include/asm/hardware/cp14.h
new file mode 100644 (file)
index 0000000..61576dc
--- /dev/null
@@ -0,0 +1,542 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __ASM_HARDWARE_CP14_H
+#define __ASM_HARDWARE_CP14_H
+
+#include <linux/types.h>
+
+/* Accessors for CP14 registers */
+#define dbg_read(reg)                  RCP14_##reg()
+#define dbg_write(val, reg)            WCP14_##reg(val)
+#define etm_read(reg)                  RCP14_##reg()
+#define etm_write(val, reg)            WCP14_##reg(val)
+
+/* MRC14 and MCR14 */
+#define MRC14(op1, crn, crm, op2)                                      \
+({                                                                     \
+u32 val;                                                               \
+asm volatile("mrc p14, "#op1", %0, "#crn", "#crm", "#op2 : "=r" (val));        \
+val;                                                                   \
+})
+
+#define MCR14(val, op1, crn, crm, op2)                                 \
+({                                                                     \
+asm volatile("mcr p14, "#op1", %0, "#crn", "#crm", "#op2 : : "r" (val));\
+})
+
+/*
+ * Debug Registers
+ *
+ * Available only in DBGv7
+ * DBGECR, DBGDSCCR, DBGDSMCR, DBGDRCR
+ *
+ * Available only in DBGv7.1
+ * DBGBXVRm, DBGOSDLR, DBGDEVID2, DBGDEVID1
+ *
+ * Read only
+ * DBGDIDR, DBGDSCRint, DBGDTRRXint, DBGDRAR, DBGOSLSR, DBGOSSRR, DBGPRSR,
+ * DBGPRSR, DBGDSAR, DBGAUTHSTATUS, DBGDEVID2, DBGDEVID1, DBGDEVID
+ *
+ * Write only
+ * DBGDTRTXint, DBGOSLAR
+ */
+#define RCP14_DBGDIDR()                        MRC14(0, c0, c0, 0)
+#define RCP14_DBGDSCRint()             MRC14(0, c0, c1, 0)
+#define RCP14_DBGDTRRXint()            MRC14(0, c0, c5, 0)
+#define RCP14_DBGWFAR()                        MRC14(0, c0, c6, 0)
+#define RCP14_DBGVCR()                 MRC14(0, c0, c7, 0)
+#define RCP14_DBGECR()                 MRC14(0, c0, c9, 0)
+#define RCP14_DBGDSCCR()               MRC14(0, c0, c10, 0)
+#define RCP14_DBGDSMCR()               MRC14(0, c0, c11, 0)
+#define RCP14_DBGDTRRXext()            MRC14(0, c0, c0, 2)
+#define RCP14_DBGDSCRext()             MRC14(0, c0, c2, 2)
+#define RCP14_DBGDTRTXext()            MRC14(0, c0, c3, 2)
+#define RCP14_DBGDRCR()                        MRC14(0, c0, c4, 2)
+#define RCP14_DBGBVR0()                        MRC14(0, c0, c0, 4)
+#define RCP14_DBGBVR1()                        MRC14(0, c0, c1, 4)
+#define RCP14_DBGBVR2()                        MRC14(0, c0, c2, 4)
+#define RCP14_DBGBVR3()                        MRC14(0, c0, c3, 4)
+#define RCP14_DBGBVR4()                        MRC14(0, c0, c4, 4)
+#define RCP14_DBGBVR5()                        MRC14(0, c0, c5, 4)
+#define RCP14_DBGBVR6()                        MRC14(0, c0, c6, 4)
+#define RCP14_DBGBVR7()                        MRC14(0, c0, c7, 4)
+#define RCP14_DBGBVR8()                        MRC14(0, c0, c8, 4)
+#define RCP14_DBGBVR9()                        MRC14(0, c0, c9, 4)
+#define RCP14_DBGBVR10()               MRC14(0, c0, c10, 4)
+#define RCP14_DBGBVR11()               MRC14(0, c0, c11, 4)
+#define RCP14_DBGBVR12()               MRC14(0, c0, c12, 4)
+#define RCP14_DBGBVR13()               MRC14(0, c0, c13, 4)
+#define RCP14_DBGBVR14()               MRC14(0, c0, c14, 4)
+#define RCP14_DBGBVR15()               MRC14(0, c0, c15, 4)
+#define RCP14_DBGBCR0()                        MRC14(0, c0, c0, 5)
+#define RCP14_DBGBCR1()                        MRC14(0, c0, c1, 5)
+#define RCP14_DBGBCR2()                        MRC14(0, c0, c2, 5)
+#define RCP14_DBGBCR3()                        MRC14(0, c0, c3, 5)
+#define RCP14_DBGBCR4()                        MRC14(0, c0, c4, 5)
+#define RCP14_DBGBCR5()                        MRC14(0, c0, c5, 5)
+#define RCP14_DBGBCR6()                        MRC14(0, c0, c6, 5)
+#define RCP14_DBGBCR7()                        MRC14(0, c0, c7, 5)
+#define RCP14_DBGBCR8()                        MRC14(0, c0, c8, 5)
+#define RCP14_DBGBCR9()                        MRC14(0, c0, c9, 5)
+#define RCP14_DBGBCR10()               MRC14(0, c0, c10, 5)
+#define RCP14_DBGBCR11()               MRC14(0, c0, c11, 5)
+#define RCP14_DBGBCR12()               MRC14(0, c0, c12, 5)
+#define RCP14_DBGBCR13()               MRC14(0, c0, c13, 5)
+#define RCP14_DBGBCR14()               MRC14(0, c0, c14, 5)
+#define RCP14_DBGBCR15()               MRC14(0, c0, c15, 5)
+#define RCP14_DBGWVR0()                        MRC14(0, c0, c0, 6)
+#define RCP14_DBGWVR1()                        MRC14(0, c0, c1, 6)
+#define RCP14_DBGWVR2()                        MRC14(0, c0, c2, 6)
+#define RCP14_DBGWVR3()                        MRC14(0, c0, c3, 6)
+#define RCP14_DBGWVR4()                        MRC14(0, c0, c4, 6)
+#define RCP14_DBGWVR5()                        MRC14(0, c0, c5, 6)
+#define RCP14_DBGWVR6()                        MRC14(0, c0, c6, 6)
+#define RCP14_DBGWVR7()                        MRC14(0, c0, c7, 6)
+#define RCP14_DBGWVR8()                        MRC14(0, c0, c8, 6)
+#define RCP14_DBGWVR9()                        MRC14(0, c0, c9, 6)
+#define RCP14_DBGWVR10()               MRC14(0, c0, c10, 6)
+#define RCP14_DBGWVR11()               MRC14(0, c0, c11, 6)
+#define RCP14_DBGWVR12()               MRC14(0, c0, c12, 6)
+#define RCP14_DBGWVR13()               MRC14(0, c0, c13, 6)
+#define RCP14_DBGWVR14()               MRC14(0, c0, c14, 6)
+#define RCP14_DBGWVR15()               MRC14(0, c0, c15, 6)
+#define RCP14_DBGWCR0()                        MRC14(0, c0, c0, 7)
+#define RCP14_DBGWCR1()                        MRC14(0, c0, c1, 7)
+#define RCP14_DBGWCR2()                        MRC14(0, c0, c2, 7)
+#define RCP14_DBGWCR3()                        MRC14(0, c0, c3, 7)
+#define RCP14_DBGWCR4()                        MRC14(0, c0, c4, 7)
+#define RCP14_DBGWCR5()                        MRC14(0, c0, c5, 7)
+#define RCP14_DBGWCR6()                        MRC14(0, c0, c6, 7)
+#define RCP14_DBGWCR7()                        MRC14(0, c0, c7, 7)
+#define RCP14_DBGWCR8()                        MRC14(0, c0, c8, 7)
+#define RCP14_DBGWCR9()                        MRC14(0, c0, c9, 7)
+#define RCP14_DBGWCR10()               MRC14(0, c0, c10, 7)
+#define RCP14_DBGWCR11()               MRC14(0, c0, c11, 7)
+#define RCP14_DBGWCR12()               MRC14(0, c0, c12, 7)
+#define RCP14_DBGWCR13()               MRC14(0, c0, c13, 7)
+#define RCP14_DBGWCR14()               MRC14(0, c0, c14, 7)
+#define RCP14_DBGWCR15()               MRC14(0, c0, c15, 7)
+#define RCP14_DBGDRAR()                        MRC14(0, c1, c0, 0)
+#define RCP14_DBGBXVR0()               MRC14(0, c1, c0, 1)
+#define RCP14_DBGBXVR1()               MRC14(0, c1, c1, 1)
+#define RCP14_DBGBXVR2()               MRC14(0, c1, c2, 1)
+#define RCP14_DBGBXVR3()               MRC14(0, c1, c3, 1)
+#define RCP14_DBGBXVR4()               MRC14(0, c1, c4, 1)
+#define RCP14_DBGBXVR5()               MRC14(0, c1, c5, 1)
+#define RCP14_DBGBXVR6()               MRC14(0, c1, c6, 1)
+#define RCP14_DBGBXVR7()               MRC14(0, c1, c7, 1)
+#define RCP14_DBGBXVR8()               MRC14(0, c1, c8, 1)
+#define RCP14_DBGBXVR9()               MRC14(0, c1, c9, 1)
+#define RCP14_DBGBXVR10()              MRC14(0, c1, c10, 1)
+#define RCP14_DBGBXVR11()              MRC14(0, c1, c11, 1)
+#define RCP14_DBGBXVR12()              MRC14(0, c1, c12, 1)
+#define RCP14_DBGBXVR13()              MRC14(0, c1, c13, 1)
+#define RCP14_DBGBXVR14()              MRC14(0, c1, c14, 1)
+#define RCP14_DBGBXVR15()              MRC14(0, c1, c15, 1)
+#define RCP14_DBGOSLSR()               MRC14(0, c1, c1, 4)
+#define RCP14_DBGOSSRR()               MRC14(0, c1, c2, 4)
+#define RCP14_DBGOSDLR()               MRC14(0, c1, c3, 4)
+#define RCP14_DBGPRCR()                        MRC14(0, c1, c4, 4)
+#define RCP14_DBGPRSR()                        MRC14(0, c1, c5, 4)
+#define RCP14_DBGDSAR()                        MRC14(0, c2, c0, 0)
+#define RCP14_DBGITCTRL()              MRC14(0, c7, c0, 4)
+#define RCP14_DBGCLAIMSET()            MRC14(0, c7, c8, 6)
+#define RCP14_DBGCLAIMCLR()            MRC14(0, c7, c9, 6)
+#define RCP14_DBGAUTHSTATUS()          MRC14(0, c7, c14, 6)
+#define RCP14_DBGDEVID2()              MRC14(0, c7, c0, 7)
+#define RCP14_DBGDEVID1()              MRC14(0, c7, c1, 7)
+#define RCP14_DBGDEVID()               MRC14(0, c7, c2, 7)
+
+#define WCP14_DBGDTRTXint(val)         MCR14(val, 0, c0, c5, 0)
+#define WCP14_DBGWFAR(val)             MCR14(val, 0, c0, c6, 0)
+#define WCP14_DBGVCR(val)              MCR14(val, 0, c0, c7, 0)
+#define WCP14_DBGECR(val)              MCR14(val, 0, c0, c9, 0)
+#define WCP14_DBGDSCCR(val)            MCR14(val, 0, c0, c10, 0)
+#define WCP14_DBGDSMCR(val)            MCR14(val, 0, c0, c11, 0)
+#define WCP14_DBGDTRRXext(val)         MCR14(val, 0, c0, c0, 2)
+#define WCP14_DBGDSCRext(val)          MCR14(val, 0, c0, c2, 2)
+#define WCP14_DBGDTRTXext(val)         MCR14(val, 0, c0, c3, 2)
+#define WCP14_DBGDRCR(val)             MCR14(val, 0, c0, c4, 2)
+#define WCP14_DBGBVR0(val)             MCR14(val, 0, c0, c0, 4)
+#define WCP14_DBGBVR1(val)             MCR14(val, 0, c0, c1, 4)
+#define WCP14_DBGBVR2(val)             MCR14(val, 0, c0, c2, 4)
+#define WCP14_DBGBVR3(val)             MCR14(val, 0, c0, c3, 4)
+#define WCP14_DBGBVR4(val)             MCR14(val, 0, c0, c4, 4)
+#define WCP14_DBGBVR5(val)             MCR14(val, 0, c0, c5, 4)
+#define WCP14_DBGBVR6(val)             MCR14(val, 0, c0, c6, 4)
+#define WCP14_DBGBVR7(val)             MCR14(val, 0, c0, c7, 4)
+#define WCP14_DBGBVR8(val)             MCR14(val, 0, c0, c8, 4)
+#define WCP14_DBGBVR9(val)             MCR14(val, 0, c0, c9, 4)
+#define WCP14_DBGBVR10(val)            MCR14(val, 0, c0, c10, 4)
+#define WCP14_DBGBVR11(val)            MCR14(val, 0, c0, c11, 4)
+#define WCP14_DBGBVR12(val)            MCR14(val, 0, c0, c12, 4)
+#define WCP14_DBGBVR13(val)            MCR14(val, 0, c0, c13, 4)
+#define WCP14_DBGBVR14(val)            MCR14(val, 0, c0, c14, 4)
+#define WCP14_DBGBVR15(val)            MCR14(val, 0, c0, c15, 4)
+#define WCP14_DBGBCR0(val)             MCR14(val, 0, c0, c0, 5)
+#define WCP14_DBGBCR1(val)             MCR14(val, 0, c0, c1, 5)
+#define WCP14_DBGBCR2(val)             MCR14(val, 0, c0, c2, 5)
+#define WCP14_DBGBCR3(val)             MCR14(val, 0, c0, c3, 5)
+#define WCP14_DBGBCR4(val)             MCR14(val, 0, c0, c4, 5)
+#define WCP14_DBGBCR5(val)             MCR14(val, 0, c0, c5, 5)
+#define WCP14_DBGBCR6(val)             MCR14(val, 0, c0, c6, 5)
+#define WCP14_DBGBCR7(val)             MCR14(val, 0, c0, c7, 5)
+#define WCP14_DBGBCR8(val)             MCR14(val, 0, c0, c8, 5)
+#define WCP14_DBGBCR9(val)             MCR14(val, 0, c0, c9, 5)
+#define WCP14_DBGBCR10(val)            MCR14(val, 0, c0, c10, 5)
+#define WCP14_DBGBCR11(val)            MCR14(val, 0, c0, c11, 5)
+#define WCP14_DBGBCR12(val)            MCR14(val, 0, c0, c12, 5)
+#define WCP14_DBGBCR13(val)            MCR14(val, 0, c0, c13, 5)
+#define WCP14_DBGBCR14(val)            MCR14(val, 0, c0, c14, 5)
+#define WCP14_DBGBCR15(val)            MCR14(val, 0, c0, c15, 5)
+#define WCP14_DBGWVR0(val)             MCR14(val, 0, c0, c0, 6)
+#define WCP14_DBGWVR1(val)             MCR14(val, 0, c0, c1, 6)
+#define WCP14_DBGWVR2(val)             MCR14(val, 0, c0, c2, 6)
+#define WCP14_DBGWVR3(val)             MCR14(val, 0, c0, c3, 6)
+#define WCP14_DBGWVR4(val)             MCR14(val, 0, c0, c4, 6)
+#define WCP14_DBGWVR5(val)             MCR14(val, 0, c0, c5, 6)
+#define WCP14_DBGWVR6(val)             MCR14(val, 0, c0, c6, 6)
+#define WCP14_DBGWVR7(val)             MCR14(val, 0, c0, c7, 6)
+#define WCP14_DBGWVR8(val)             MCR14(val, 0, c0, c8, 6)
+#define WCP14_DBGWVR9(val)             MCR14(val, 0, c0, c9, 6)
+#define WCP14_DBGWVR10(val)            MCR14(val, 0, c0, c10, 6)
+#define WCP14_DBGWVR11(val)            MCR14(val, 0, c0, c11, 6)
+#define WCP14_DBGWVR12(val)            MCR14(val, 0, c0, c12, 6)
+#define WCP14_DBGWVR13(val)            MCR14(val, 0, c0, c13, 6)
+#define WCP14_DBGWVR14(val)            MCR14(val, 0, c0, c14, 6)
+#define WCP14_DBGWVR15(val)            MCR14(val, 0, c0, c15, 6)
+#define WCP14_DBGWCR0(val)             MCR14(val, 0, c0, c0, 7)
+#define WCP14_DBGWCR1(val)             MCR14(val, 0, c0, c1, 7)
+#define WCP14_DBGWCR2(val)             MCR14(val, 0, c0, c2, 7)
+#define WCP14_DBGWCR3(val)             MCR14(val, 0, c0, c3, 7)
+#define WCP14_DBGWCR4(val)             MCR14(val, 0, c0, c4, 7)
+#define WCP14_DBGWCR5(val)             MCR14(val, 0, c0, c5, 7)
+#define WCP14_DBGWCR6(val)             MCR14(val, 0, c0, c6, 7)
+#define WCP14_DBGWCR7(val)             MCR14(val, 0, c0, c7, 7)
+#define WCP14_DBGWCR8(val)             MCR14(val, 0, c0, c8, 7)
+#define WCP14_DBGWCR9(val)             MCR14(val, 0, c0, c9, 7)
+#define WCP14_DBGWCR10(val)            MCR14(val, 0, c0, c10, 7)
+#define WCP14_DBGWCR11(val)            MCR14(val, 0, c0, c11, 7)
+#define WCP14_DBGWCR12(val)            MCR14(val, 0, c0, c12, 7)
+#define WCP14_DBGWCR13(val)            MCR14(val, 0, c0, c13, 7)
+#define WCP14_DBGWCR14(val)            MCR14(val, 0, c0, c14, 7)
+#define WCP14_DBGWCR15(val)            MCR14(val, 0, c0, c15, 7)
+#define WCP14_DBGBXVR0(val)            MCR14(val, 0, c1, c0, 1)
+#define WCP14_DBGBXVR1(val)            MCR14(val, 0, c1, c1, 1)
+#define WCP14_DBGBXVR2(val)            MCR14(val, 0, c1, c2, 1)
+#define WCP14_DBGBXVR3(val)            MCR14(val, 0, c1, c3, 1)
+#define WCP14_DBGBXVR4(val)            MCR14(val, 0, c1, c4, 1)
+#define WCP14_DBGBXVR5(val)            MCR14(val, 0, c1, c5, 1)
+#define WCP14_DBGBXVR6(val)            MCR14(val, 0, c1, c6, 1)
+#define WCP14_DBGBXVR7(val)            MCR14(val, 0, c1, c7, 1)
+#define WCP14_DBGBXVR8(val)            MCR14(val, 0, c1, c8, 1)
+#define WCP14_DBGBXVR9(val)            MCR14(val, 0, c1, c9, 1)
+#define WCP14_DBGBXVR10(val)           MCR14(val, 0, c1, c10, 1)
+#define WCP14_DBGBXVR11(val)           MCR14(val, 0, c1, c11, 1)
+#define WCP14_DBGBXVR12(val)           MCR14(val, 0, c1, c12, 1)
+#define WCP14_DBGBXVR13(val)           MCR14(val, 0, c1, c13, 1)
+#define WCP14_DBGBXVR14(val)           MCR14(val, 0, c1, c14, 1)
+#define WCP14_DBGBXVR15(val)           MCR14(val, 0, c1, c15, 1)
+#define WCP14_DBGOSLAR(val)            MCR14(val, 0, c1, c0, 4)
+#define WCP14_DBGOSSRR(val)            MCR14(val, 0, c1, c2, 4)
+#define WCP14_DBGOSDLR(val)            MCR14(val, 0, c1, c3, 4)
+#define WCP14_DBGPRCR(val)             MCR14(val, 0, c1, c4, 4)
+#define WCP14_DBGITCTRL(val)           MCR14(val, 0, c7, c0, 4)
+#define WCP14_DBGCLAIMSET(val)         MCR14(val, 0, c7, c8, 6)
+#define WCP14_DBGCLAIMCLR(val)         MCR14(val, 0, c7, c9, 6)
+
+/*
+ * ETM Registers
+ *
+ * Available only in ETMv3.3, 3.4, 3.5
+ * ETMASICCR, ETMTECR2, ETMFFRR, ETMVDEVR, ETMVDCR1, ETMVDCR2, ETMVDCR3,
+ * ETMDCVRn, ETMDCMRn
+ *
+ * Available only in ETMv3.5 as read only
+ * ETMIDR2
+ *
+ * Available only in ETMv3.5, PFTv1.0, 1.1
+ * ETMTSEVR, ETMVMIDCVR, ETMPDCR
+ *
+ * Read only
+ * ETMCCR, ETMSCR, ETMIDR, ETMCCER, ETMOSLSR
+ * ETMLSR, ETMAUTHSTATUS, ETMDEVID, ETMDEVTYPE, ETMPIDR4, ETMPIDR5, ETMPIDR6,
+ * ETMPIDR7, ETMPIDR0, ETMPIDR1, ETMPIDR2, ETMPIDR2, ETMPIDR3, ETMCIDR0,
+ * ETMCIDR1, ETMCIDR2, ETMCIDR3
+ *
+ * Write only
+ * ETMOSLAR, ETMLAR
+ * Note: ETMCCER[11] controls WO nature of certain regs. Refer ETM arch spec.
+ */
+#define RCP14_ETMCR()                  MRC14(1, c0, c0, 0)
+#define RCP14_ETMCCR()                 MRC14(1, c0, c1, 0)
+#define RCP14_ETMTRIGGER()             MRC14(1, c0, c2, 0)
+#define RCP14_ETMASICCR()              MRC14(1, c0, c3, 0)
+#define RCP14_ETMSR()                  MRC14(1, c0, c4, 0)
+#define RCP14_ETMSCR()                 MRC14(1, c0, c5, 0)
+#define RCP14_ETMTSSCR()               MRC14(1, c0, c6, 0)
+#define RCP14_ETMTECR2()               MRC14(1, c0, c7, 0)
+#define RCP14_ETMTEEVR()               MRC14(1, c0, c8, 0)
+#define RCP14_ETMTECR1()               MRC14(1, c0, c9, 0)
+#define RCP14_ETMFFRR()                        MRC14(1, c0, c10, 0)
+#define RCP14_ETMFFLR()                        MRC14(1, c0, c11, 0)
+#define RCP14_ETMVDEVR()               MRC14(1, c0, c12, 0)
+#define RCP14_ETMVDCR1()               MRC14(1, c0, c13, 0)
+#define RCP14_ETMVDCR2()               MRC14(1, c0, c14, 0)
+#define RCP14_ETMVDCR3()               MRC14(1, c0, c15, 0)
+#define RCP14_ETMACVR0()               MRC14(1, c0, c0, 1)
+#define RCP14_ETMACVR1()               MRC14(1, c0, c1, 1)
+#define RCP14_ETMACVR2()               MRC14(1, c0, c2, 1)
+#define RCP14_ETMACVR3()               MRC14(1, c0, c3, 1)
+#define RCP14_ETMACVR4()               MRC14(1, c0, c4, 1)
+#define RCP14_ETMACVR5()               MRC14(1, c0, c5, 1)
+#define RCP14_ETMACVR6()               MRC14(1, c0, c6, 1)
+#define RCP14_ETMACVR7()               MRC14(1, c0, c7, 1)
+#define RCP14_ETMACVR8()               MRC14(1, c0, c8, 1)
+#define RCP14_ETMACVR9()               MRC14(1, c0, c9, 1)
+#define RCP14_ETMACVR10()              MRC14(1, c0, c10, 1)
+#define RCP14_ETMACVR11()              MRC14(1, c0, c11, 1)
+#define RCP14_ETMACVR12()              MRC14(1, c0, c12, 1)
+#define RCP14_ETMACVR13()              MRC14(1, c0, c13, 1)
+#define RCP14_ETMACVR14()              MRC14(1, c0, c14, 1)
+#define RCP14_ETMACVR15()              MRC14(1, c0, c15, 1)
+#define RCP14_ETMACTR0()               MRC14(1, c0, c0, 2)
+#define RCP14_ETMACTR1()               MRC14(1, c0, c1, 2)
+#define RCP14_ETMACTR2()               MRC14(1, c0, c2, 2)
+#define RCP14_ETMACTR3()               MRC14(1, c0, c3, 2)
+#define RCP14_ETMACTR4()               MRC14(1, c0, c4, 2)
+#define RCP14_ETMACTR5()               MRC14(1, c0, c5, 2)
+#define RCP14_ETMACTR6()               MRC14(1, c0, c6, 2)
+#define RCP14_ETMACTR7()               MRC14(1, c0, c7, 2)
+#define RCP14_ETMACTR8()               MRC14(1, c0, c8, 2)
+#define RCP14_ETMACTR9()               MRC14(1, c0, c9, 2)
+#define RCP14_ETMACTR10()              MRC14(1, c0, c10, 2)
+#define RCP14_ETMACTR11()              MRC14(1, c0, c11, 2)
+#define RCP14_ETMACTR12()              MRC14(1, c0, c12, 2)
+#define RCP14_ETMACTR13()              MRC14(1, c0, c13, 2)
+#define RCP14_ETMACTR14()              MRC14(1, c0, c14, 2)
+#define RCP14_ETMACTR15()              MRC14(1, c0, c15, 2)
+#define RCP14_ETMDCVR0()               MRC14(1, c0, c0, 3)
+#define RCP14_ETMDCVR2()               MRC14(1, c0, c2, 3)
+#define RCP14_ETMDCVR4()               MRC14(1, c0, c4, 3)
+#define RCP14_ETMDCVR6()               MRC14(1, c0, c6, 3)
+#define RCP14_ETMDCVR8()               MRC14(1, c0, c8, 3)
+#define RCP14_ETMDCVR10()              MRC14(1, c0, c10, 3)
+#define RCP14_ETMDCVR12()              MRC14(1, c0, c12, 3)
+#define RCP14_ETMDCVR14()              MRC14(1, c0, c14, 3)
+#define RCP14_ETMDCMR0()               MRC14(1, c0, c0, 4)
+#define RCP14_ETMDCMR2()               MRC14(1, c0, c2, 4)
+#define RCP14_ETMDCMR4()               MRC14(1, c0, c4, 4)
+#define RCP14_ETMDCMR6()               MRC14(1, c0, c6, 4)
+#define RCP14_ETMDCMR8()               MRC14(1, c0, c8, 4)
+#define RCP14_ETMDCMR10()              MRC14(1, c0, c10, 4)
+#define RCP14_ETMDCMR12()              MRC14(1, c0, c12, 4)
+#define RCP14_ETMDCMR14()              MRC14(1, c0, c14, 4)
+#define RCP14_ETMCNTRLDVR0()           MRC14(1, c0, c0, 5)
+#define RCP14_ETMCNTRLDVR1()           MRC14(1, c0, c1, 5)
+#define RCP14_ETMCNTRLDVR2()           MRC14(1, c0, c2, 5)
+#define RCP14_ETMCNTRLDVR3()           MRC14(1, c0, c3, 5)
+#define RCP14_ETMCNTENR0()             MRC14(1, c0, c4, 5)
+#define RCP14_ETMCNTENR1()             MRC14(1, c0, c5, 5)
+#define RCP14_ETMCNTENR2()             MRC14(1, c0, c6, 5)
+#define RCP14_ETMCNTENR3()             MRC14(1, c0, c7, 5)
+#define RCP14_ETMCNTRLDEVR0()          MRC14(1, c0, c8, 5)
+#define RCP14_ETMCNTRLDEVR1()          MRC14(1, c0, c9, 5)
+#define RCP14_ETMCNTRLDEVR2()          MRC14(1, c0, c10, 5)
+#define RCP14_ETMCNTRLDEVR3()          MRC14(1, c0, c11, 5)
+#define RCP14_ETMCNTVR0()              MRC14(1, c0, c12, 5)
+#define RCP14_ETMCNTVR1()              MRC14(1, c0, c13, 5)
+#define RCP14_ETMCNTVR2()              MRC14(1, c0, c14, 5)
+#define RCP14_ETMCNTVR3()              MRC14(1, c0, c15, 5)
+#define RCP14_ETMSQ12EVR()             MRC14(1, c0, c0, 6)
+#define RCP14_ETMSQ21EVR()             MRC14(1, c0, c1, 6)
+#define RCP14_ETMSQ23EVR()             MRC14(1, c0, c2, 6)
+#define RCP14_ETMSQ31EVR()             MRC14(1, c0, c3, 6)
+#define RCP14_ETMSQ32EVR()             MRC14(1, c0, c4, 6)
+#define RCP14_ETMSQ13EVR()             MRC14(1, c0, c5, 6)
+#define RCP14_ETMSQR()                 MRC14(1, c0, c7, 6)
+#define RCP14_ETMEXTOUTEVR0()          MRC14(1, c0, c8, 6)
+#define RCP14_ETMEXTOUTEVR1()          MRC14(1, c0, c9, 6)
+#define RCP14_ETMEXTOUTEVR2()          MRC14(1, c0, c10, 6)
+#define RCP14_ETMEXTOUTEVR3()          MRC14(1, c0, c11, 6)
+#define RCP14_ETMCIDCVR0()             MRC14(1, c0, c12, 6)
+#define RCP14_ETMCIDCVR1()             MRC14(1, c0, c13, 6)
+#define RCP14_ETMCIDCVR2()             MRC14(1, c0, c14, 6)
+#define RCP14_ETMCIDCMR()              MRC14(1, c0, c15, 6)
+#define RCP14_ETMIMPSPEC0()            MRC14(1, c0, c0, 7)
+#define RCP14_ETMIMPSPEC1()            MRC14(1, c0, c1, 7)
+#define RCP14_ETMIMPSPEC2()            MRC14(1, c0, c2, 7)
+#define RCP14_ETMIMPSPEC3()            MRC14(1, c0, c3, 7)
+#define RCP14_ETMIMPSPEC4()            MRC14(1, c0, c4, 7)
+#define RCP14_ETMIMPSPEC5()            MRC14(1, c0, c5, 7)
+#define RCP14_ETMIMPSPEC6()            MRC14(1, c0, c6, 7)
+#define RCP14_ETMIMPSPEC7()            MRC14(1, c0, c7, 7)
+#define RCP14_ETMSYNCFR()              MRC14(1, c0, c8, 7)
+#define RCP14_ETMIDR()                 MRC14(1, c0, c9, 7)
+#define RCP14_ETMCCER()                        MRC14(1, c0, c10, 7)
+#define RCP14_ETMEXTINSELR()           MRC14(1, c0, c11, 7)
+#define RCP14_ETMTESSEICR()            MRC14(1, c0, c12, 7)
+#define RCP14_ETMEIBCR()               MRC14(1, c0, c13, 7)
+#define RCP14_ETMTSEVR()               MRC14(1, c0, c14, 7)
+#define RCP14_ETMAUXCR()               MRC14(1, c0, c15, 7)
+#define RCP14_ETMTRACEIDR()            MRC14(1, c1, c0, 0)
+#define RCP14_ETMIDR2()                        MRC14(1, c1, c2, 0)
+#define RCP14_ETMVMIDCVR()             MRC14(1, c1, c0, 1)
+#define RCP14_ETMOSLSR()               MRC14(1, c1, c1, 4)
+/* Not available in PFTv1.1 */
+#define RCP14_ETMOSSRR()               MRC14(1, c1, c2, 4)
+#define RCP14_ETMPDCR()                        MRC14(1, c1, c4, 4)
+#define RCP14_ETMPDSR()                        MRC14(1, c1, c5, 4)
+#define RCP14_ETMITCTRL()              MRC14(1, c7, c0, 4)
+#define RCP14_ETMCLAIMSET()            MRC14(1, c7, c8, 6)
+#define RCP14_ETMCLAIMCLR()            MRC14(1, c7, c9, 6)
+#define RCP14_ETMLSR()                 MRC14(1, c7, c13, 6)
+#define RCP14_ETMAUTHSTATUS()          MRC14(1, c7, c14, 6)
+#define RCP14_ETMDEVID()               MRC14(1, c7, c2, 7)
+#define RCP14_ETMDEVTYPE()             MRC14(1, c7, c3, 7)
+#define RCP14_ETMPIDR4()               MRC14(1, c7, c4, 7)
+#define RCP14_ETMPIDR5()               MRC14(1, c7, c5, 7)
+#define RCP14_ETMPIDR6()               MRC14(1, c7, c6, 7)
+#define RCP14_ETMPIDR7()               MRC14(1, c7, c7, 7)
+#define RCP14_ETMPIDR0()               MRC14(1, c7, c8, 7)
+#define RCP14_ETMPIDR1()               MRC14(1, c7, c9, 7)
+#define RCP14_ETMPIDR2()               MRC14(1, c7, c10, 7)
+#define RCP14_ETMPIDR3()               MRC14(1, c7, c11, 7)
+#define RCP14_ETMCIDR0()               MRC14(1, c7, c12, 7)
+#define RCP14_ETMCIDR1()               MRC14(1, c7, c13, 7)
+#define RCP14_ETMCIDR2()               MRC14(1, c7, c14, 7)
+#define RCP14_ETMCIDR3()               MRC14(1, c7, c15, 7)
+
+#define WCP14_ETMCR(val)               MCR14(val, 1, c0, c0, 0)
+#define WCP14_ETMTRIGGER(val)          MCR14(val, 1, c0, c2, 0)
+#define WCP14_ETMASICCR(val)           MCR14(val, 1, c0, c3, 0)
+#define WCP14_ETMSR(val)               MCR14(val, 1, c0, c4, 0)
+#define WCP14_ETMTSSCR(val)            MCR14(val, 1, c0, c6, 0)
+#define WCP14_ETMTECR2(val)            MCR14(val, 1, c0, c7, 0)
+#define WCP14_ETMTEEVR(val)            MCR14(val, 1, c0, c8, 0)
+#define WCP14_ETMTECR1(val)            MCR14(val, 1, c0, c9, 0)
+#define WCP14_ETMFFRR(val)             MCR14(val, 1, c0, c10, 0)
+#define WCP14_ETMFFLR(val)             MCR14(val, 1, c0, c11, 0)
+#define WCP14_ETMVDEVR(val)            MCR14(val, 1, c0, c12, 0)
+#define WCP14_ETMVDCR1(val)            MCR14(val, 1, c0, c13, 0)
+#define WCP14_ETMVDCR2(val)            MCR14(val, 1, c0, c14, 0)
+#define WCP14_ETMVDCR3(val)            MCR14(val, 1, c0, c15, 0)
+#define WCP14_ETMACVR0(val)            MCR14(val, 1, c0, c0, 1)
+#define WCP14_ETMACVR1(val)            MCR14(val, 1, c0, c1, 1)
+#define WCP14_ETMACVR2(val)            MCR14(val, 1, c0, c2, 1)
+#define WCP14_ETMACVR3(val)            MCR14(val, 1, c0, c3, 1)
+#define WCP14_ETMACVR4(val)            MCR14(val, 1, c0, c4, 1)
+#define WCP14_ETMACVR5(val)            MCR14(val, 1, c0, c5, 1)
+#define WCP14_ETMACVR6(val)            MCR14(val, 1, c0, c6, 1)
+#define WCP14_ETMACVR7(val)            MCR14(val, 1, c0, c7, 1)
+#define WCP14_ETMACVR8(val)            MCR14(val, 1, c0, c8, 1)
+#define WCP14_ETMACVR9(val)            MCR14(val, 1, c0, c9, 1)
+#define WCP14_ETMACVR10(val)           MCR14(val, 1, c0, c10, 1)
+#define WCP14_ETMACVR11(val)           MCR14(val, 1, c0, c11, 1)
+#define WCP14_ETMACVR12(val)           MCR14(val, 1, c0, c12, 1)
+#define WCP14_ETMACVR13(val)           MCR14(val, 1, c0, c13, 1)
+#define WCP14_ETMACVR14(val)           MCR14(val, 1, c0, c14, 1)
+#define WCP14_ETMACVR15(val)           MCR14(val, 1, c0, c15, 1)
+#define WCP14_ETMACTR0(val)            MCR14(val, 1, c0, c0, 2)
+#define WCP14_ETMACTR1(val)            MCR14(val, 1, c0, c1, 2)
+#define WCP14_ETMACTR2(val)            MCR14(val, 1, c0, c2, 2)
+#define WCP14_ETMACTR3(val)            MCR14(val, 1, c0, c3, 2)
+#define WCP14_ETMACTR4(val)            MCR14(val, 1, c0, c4, 2)
+#define WCP14_ETMACTR5(val)            MCR14(val, 1, c0, c5, 2)
+#define WCP14_ETMACTR6(val)            MCR14(val, 1, c0, c6, 2)
+#define WCP14_ETMACTR7(val)            MCR14(val, 1, c0, c7, 2)
+#define WCP14_ETMACTR8(val)            MCR14(val, 1, c0, c8, 2)
+#define WCP14_ETMACTR9(val)            MCR14(val, 1, c0, c9, 2)
+#define WCP14_ETMACTR10(val)           MCR14(val, 1, c0, c10, 2)
+#define WCP14_ETMACTR11(val)           MCR14(val, 1, c0, c11, 2)
+#define WCP14_ETMACTR12(val)           MCR14(val, 1, c0, c12, 2)
+#define WCP14_ETMACTR13(val)           MCR14(val, 1, c0, c13, 2)
+#define WCP14_ETMACTR14(val)           MCR14(val, 1, c0, c14, 2)
+#define WCP14_ETMACTR15(val)           MCR14(val, 1, c0, c15, 2)
+#define WCP14_ETMDCVR0(val)            MCR14(val, 1, c0, c0, 3)
+#define WCP14_ETMDCVR2(val)            MCR14(val, 1, c0, c2, 3)
+#define WCP14_ETMDCVR4(val)            MCR14(val, 1, c0, c4, 3)
+#define WCP14_ETMDCVR6(val)            MCR14(val, 1, c0, c6, 3)
+#define WCP14_ETMDCVR8(val)            MCR14(val, 1, c0, c8, 3)
+#define WCP14_ETMDCVR10(val)           MCR14(val, 1, c0, c10, 3)
+#define WCP14_ETMDCVR12(val)           MCR14(val, 1, c0, c12, 3)
+#define WCP14_ETMDCVR14(val)           MCR14(val, 1, c0, c14, 3)
+#define WCP14_ETMDCMR0(val)            MCR14(val, 1, c0, c0, 4)
+#define WCP14_ETMDCMR2(val)            MCR14(val, 1, c0, c2, 4)
+#define WCP14_ETMDCMR4(val)            MCR14(val, 1, c0, c4, 4)
+#define WCP14_ETMDCMR6(val)            MCR14(val, 1, c0, c6, 4)
+#define WCP14_ETMDCMR8(val)            MCR14(val, 1, c0, c8, 4)
+#define WCP14_ETMDCMR10(val)           MCR14(val, 1, c0, c10, 4)
+#define WCP14_ETMDCMR12(val)           MCR14(val, 1, c0, c12, 4)
+#define WCP14_ETMDCMR14(val)           MCR14(val, 1, c0, c14, 4)
+#define WCP14_ETMCNTRLDVR0(val)                MCR14(val, 1, c0, c0, 5)
+#define WCP14_ETMCNTRLDVR1(val)                MCR14(val, 1, c0, c1, 5)
+#define WCP14_ETMCNTRLDVR2(val)                MCR14(val, 1, c0, c2, 5)
+#define WCP14_ETMCNTRLDVR3(val)                MCR14(val, 1, c0, c3, 5)
+#define WCP14_ETMCNTENR0(val)          MCR14(val, 1, c0, c4, 5)
+#define WCP14_ETMCNTENR1(val)          MCR14(val, 1, c0, c5, 5)
+#define WCP14_ETMCNTENR2(val)          MCR14(val, 1, c0, c6, 5)
+#define WCP14_ETMCNTENR3(val)          MCR14(val, 1, c0, c7, 5)
+#define WCP14_ETMCNTRLDEVR0(val)       MCR14(val, 1, c0, c8, 5)
+#define WCP14_ETMCNTRLDEVR1(val)       MCR14(val, 1, c0, c9, 5)
+#define WCP14_ETMCNTRLDEVR2(val)       MCR14(val, 1, c0, c10, 5)
+#define WCP14_ETMCNTRLDEVR3(val)       MCR14(val, 1, c0, c11, 5)
+#define WCP14_ETMCNTVR0(val)           MCR14(val, 1, c0, c12, 5)
+#define WCP14_ETMCNTVR1(val)           MCR14(val, 1, c0, c13, 5)
+#define WCP14_ETMCNTVR2(val)           MCR14(val, 1, c0, c14, 5)
+#define WCP14_ETMCNTVR3(val)           MCR14(val, 1, c0, c15, 5)
+#define WCP14_ETMSQ12EVR(val)          MCR14(val, 1, c0, c0, 6)
+#define WCP14_ETMSQ21EVR(val)          MCR14(val, 1, c0, c1, 6)
+#define WCP14_ETMSQ23EVR(val)          MCR14(val, 1, c0, c2, 6)
+#define WCP14_ETMSQ31EVR(val)          MCR14(val, 1, c0, c3, 6)
+#define WCP14_ETMSQ32EVR(val)          MCR14(val, 1, c0, c4, 6)
+#define WCP14_ETMSQ13EVR(val)          MCR14(val, 1, c0, c5, 6)
+#define WCP14_ETMSQR(val)              MCR14(val, 1, c0, c7, 6)
+#define WCP14_ETMEXTOUTEVR0(val)       MCR14(val, 1, c0, c8, 6)
+#define WCP14_ETMEXTOUTEVR1(val)       MCR14(val, 1, c0, c9, 6)
+#define WCP14_ETMEXTOUTEVR2(val)       MCR14(val, 1, c0, c10, 6)
+#define WCP14_ETMEXTOUTEVR3(val)       MCR14(val, 1, c0, c11, 6)
+#define WCP14_ETMCIDCVR0(val)          MCR14(val, 1, c0, c12, 6)
+#define WCP14_ETMCIDCVR1(val)          MCR14(val, 1, c0, c13, 6)
+#define WCP14_ETMCIDCVR2(val)          MCR14(val, 1, c0, c14, 6)
+#define WCP14_ETMCIDCMR(val)           MCR14(val, 1, c0, c15, 6)
+#define WCP14_ETMIMPSPEC0(val)         MCR14(val, 1, c0, c0, 7)
+#define WCP14_ETMIMPSPEC1(val)         MCR14(val, 1, c0, c1, 7)
+#define WCP14_ETMIMPSPEC2(val)         MCR14(val, 1, c0, c2, 7)
+#define WCP14_ETMIMPSPEC3(val)         MCR14(val, 1, c0, c3, 7)
+#define WCP14_ETMIMPSPEC4(val)         MCR14(val, 1, c0, c4, 7)
+#define WCP14_ETMIMPSPEC5(val)         MCR14(val, 1, c0, c5, 7)
+#define WCP14_ETMIMPSPEC6(val)         MCR14(val, 1, c0, c6, 7)
+#define WCP14_ETMIMPSPEC7(val)         MCR14(val, 1, c0, c7, 7)
+/* Can be read only in ETMv3.4, ETMv3.5 */
+#define WCP14_ETMSYNCFR(val)           MCR14(val, 1, c0, c8, 7)
+#define WCP14_ETMEXTINSELR(val)                MCR14(val, 1, c0, c11, 7)
+#define WCP14_ETMTESSEICR(val)         MCR14(val, 1, c0, c12, 7)
+#define WCP14_ETMEIBCR(val)            MCR14(val, 1, c0, c13, 7)
+#define WCP14_ETMTSEVR(val)            MCR14(val, 1, c0, c14, 7)
+#define WCP14_ETMAUXCR(val)            MCR14(val, 1, c0, c15, 7)
+#define WCP14_ETMTRACEIDR(val)         MCR14(val, 1, c1, c0, 0)
+#define WCP14_ETMIDR2(val)             MCR14(val, 1, c1, c2, 0)
+#define WCP14_ETMVMIDCVR(val)          MCR14(val, 1, c1, c0, 1)
+#define WCP14_ETMOSLAR(val)            MCR14(val, 1, c1, c0, 4)
+/* Not available in PFTv1.1 */
+#define WCP14_ETMOSSRR(val)            MCR14(val, 1, c1, c2, 4)
+#define WCP14_ETMPDCR(val)             MCR14(val, 1, c1, c4, 4)
+#define WCP14_ETMPDSR(val)             MCR14(val, 1, c1, c5, 4)
+#define WCP14_ETMITCTRL(val)           MCR14(val, 1, c7, c0, 4)
+#define WCP14_ETMCLAIMSET(val)         MCR14(val, 1, c7, c8, 6)
+#define WCP14_ETMCLAIMCLR(val)         MCR14(val, 1, c7, c9, 6)
+/* Writes to this from CP14 interface are ignored */
+#define WCP14_ETMLAR(val)              MCR14(val, 1, c7, c12, 6)
+
+#endif
index 33373564018b6a05e9437418a5966d524b1a36c5..f2aea1155f84fe35f3cbe8de42a38ebc3e140d77 100644 (file)
@@ -24,7 +24,6 @@ obj-$(CONFIG_ATAGS)           += atags_parse.o
 obj-$(CONFIG_ATAGS_PROC)       += atags_proc.o
 obj-$(CONFIG_DEPRECATED_PARAM_STRUCT) += atags_compat.o
 
-obj-$(CONFIG_OC_ETM)           += etm.o
 obj-$(CONFIG_CPU_IDLE)         += cpuidle.o
 obj-$(CONFIG_ISA_DMA_API)      += dma.o
 obj-$(CONFIG_FIQ)              += fiq.o fiqasm.o
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
deleted file mode 100644 (file)
index 7db3247..0000000
+++ /dev/null
@@ -1,1076 +0,0 @@
-/*
- * linux/arch/arm/kernel/etm.c
- *
- * Driver for ARM's Embedded Trace Macrocell and Embedded Trace Buffer.
- *
- * Copyright (C) 2009 Nokia Corporation.
- * Alexander Shishkin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/sysrq.h>
-#include <linux/device.h>
-#include <linux/clk.h>
-#include <linux/amba/bus.h>
-#include <linux/fs.h>
-#include <linux/uaccess.h>
-#include <linux/miscdevice.h>
-#include <linux/vmalloc.h>
-#include <linux/mutex.h>
-#include <linux/module.h>
-#include <asm/hardware/coresight.h>
-#include <asm/sections.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Alexander Shishkin");
-
-/*
- * ETM tracer state
- */
-struct tracectx {
-       unsigned int    etb_bufsz;
-       void __iomem    *etb_regs;
-       void __iomem    **etm_regs;
-       int             etm_regs_count;
-       unsigned long   flags;
-       int             ncmppairs;
-       int             etm_portsz;
-       int             etm_contextid_size;
-       u32             etb_fc;
-       unsigned long   range_start;
-       unsigned long   range_end;
-       unsigned long   data_range_start;
-       unsigned long   data_range_end;
-       bool            dump_initial_etb;
-       struct device   *dev;
-       struct clk      *emu_clk;
-       struct mutex    mutex;
-};
-
-static struct tracectx tracer = {
-       .range_start = (unsigned long)_stext,
-       .range_end = (unsigned long)_etext,
-};
-
-static inline bool trace_isrunning(struct tracectx *t)
-{
-       return !!(t->flags & TRACER_RUNNING);
-}
-
-static int etm_setup_address_range(struct tracectx *t, int id, int n,
-               unsigned long start, unsigned long end, int exclude, int data)
-{
-       u32 flags = ETMAAT_ARM | ETMAAT_IGNCONTEXTID | ETMAAT_IGNSECURITY |
-                   ETMAAT_NOVALCMP;
-
-       if (n < 1 || n > t->ncmppairs)
-               return -EINVAL;
-
-       /* comparators and ranges are numbered starting with 1 as opposed
-        * to bits in a word */
-       n--;
-
-       if (data)
-               flags |= ETMAAT_DLOADSTORE;
-       else
-               flags |= ETMAAT_IEXEC;
-
-       /* first comparator for the range */
-       etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2));
-       etm_writel(t, id, start, ETMR_COMP_VAL(n * 2));
-
-       /* second comparator is right next to it */
-       etm_writel(t, id, flags, ETMR_COMP_ACC_TYPE(n * 2 + 1));
-       etm_writel(t, id, end, ETMR_COMP_VAL(n * 2 + 1));
-
-       if (data) {
-               flags = exclude ? ETMVDC3_EXCLONLY : 0;
-               if (exclude)
-                       n += 8;
-               etm_writel(t, id, flags | BIT(n), ETMR_VIEWDATACTRL3);
-       } else {
-               flags = exclude ? ETMTE_INCLEXCL : 0;
-               etm_writel(t, id, flags | (1 << n), ETMR_TRACEENCTRL);
-       }
-
-       return 0;
-}
-
-static int trace_start_etm(struct tracectx *t, int id)
-{
-       u32 v;
-       unsigned long timeout = TRACER_TIMEOUT;
-
-       v = ETMCTRL_OPTS | ETMCTRL_PROGRAM | ETMCTRL_PORTSIZE(t->etm_portsz);
-       v |= ETMCTRL_CONTEXTIDSIZE(t->etm_contextid_size);
-
-       if (t->flags & TRACER_CYCLE_ACC)
-               v |= ETMCTRL_CYCLEACCURATE;
-
-       if (t->flags & TRACER_BRANCHOUTPUT)
-               v |= ETMCTRL_BRANCH_OUTPUT;
-
-       if (t->flags & TRACER_TRACE_DATA)
-               v |= ETMCTRL_DATA_DO_ADDR;
-
-       if (t->flags & TRACER_TIMESTAMP)
-               v |= ETMCTRL_TIMESTAMP_EN;
-
-       if (t->flags & TRACER_RETURN_STACK)
-               v |= ETMCTRL_RETURN_STACK_EN;
-
-       etm_unlock(t, id);
-
-       etm_writel(t, id, v, ETMR_CTRL);
-
-       while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
-               ;
-       if (!timeout) {
-               dev_dbg(t->dev, "Waiting for progbit to assert timed out\n");
-               etm_lock(t, id);
-               return -EFAULT;
-       }
-
-       if (t->range_start || t->range_end)
-               etm_setup_address_range(t, id, 1,
-                                       t->range_start, t->range_end, 0, 0);
-       else
-               etm_writel(t, id, ETMTE_INCLEXCL, ETMR_TRACEENCTRL);
-
-       etm_writel(t, id, 0, ETMR_TRACEENCTRL2);
-       etm_writel(t, id, 0, ETMR_TRACESSCTRL);
-       etm_writel(t, id, 0x6f, ETMR_TRACEENEVT);
-
-       etm_writel(t, id, 0, ETMR_VIEWDATACTRL1);
-       etm_writel(t, id, 0, ETMR_VIEWDATACTRL2);
-
-       if (t->data_range_start || t->data_range_end)
-               etm_setup_address_range(t, id, 2, t->data_range_start,
-                                       t->data_range_end, 0, 1);
-       else
-               etm_writel(t, id, ETMVDC3_EXCLONLY, ETMR_VIEWDATACTRL3);
-
-       etm_writel(t, id, 0x6f, ETMR_VIEWDATAEVT);
-
-       v &= ~ETMCTRL_PROGRAM;
-       v |= ETMCTRL_PORTSEL;
-
-       etm_writel(t, id, v, ETMR_CTRL);
-
-       timeout = TRACER_TIMEOUT;
-       while (etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM && --timeout)
-               ;
-       if (!timeout) {
-               dev_dbg(t->dev, "Waiting for progbit to deassert timed out\n");
-               etm_lock(t, id);
-               return -EFAULT;
-       }
-
-       etm_lock(t, id);
-       return 0;
-}
-
-static int trace_start(struct tracectx *t)
-{
-       int ret;
-       int id;
-       u32 etb_fc = t->etb_fc;
-
-       etb_unlock(t);
-
-       t->dump_initial_etb = false;
-       etb_writel(t, 0, ETBR_WRITEADDR);
-       etb_writel(t, etb_fc, ETBR_FORMATTERCTRL);
-       etb_writel(t, 1, ETBR_CTRL);
-
-       etb_lock(t);
-
-       /* configure etm(s) */
-       for (id = 0; id < t->etm_regs_count; id++) {
-               ret = trace_start_etm(t, id);
-               if (ret)
-                       return ret;
-       }
-
-       t->flags |= TRACER_RUNNING;
-
-       return 0;
-}
-
-static int trace_stop_etm(struct tracectx *t, int id)
-{
-       unsigned long timeout = TRACER_TIMEOUT;
-
-       etm_unlock(t, id);
-
-       etm_writel(t, id, 0x440, ETMR_CTRL);
-       while (!(etm_readl(t, id, ETMR_CTRL) & ETMCTRL_PROGRAM) && --timeout)
-               ;
-       if (!timeout) {
-               dev_err(t->dev,
-                       "etm%d: Waiting for progbit to assert timed out\n",
-                       id);
-               etm_lock(t, id);
-               return -EFAULT;
-       }
-
-       etm_lock(t, id);
-       return 0;
-}
-
-static int trace_power_down_etm(struct tracectx *t, int id)
-{
-       unsigned long timeout = TRACER_TIMEOUT;
-       etm_unlock(t, id);
-       while (!(etm_readl(t, id, ETMR_STATUS) & ETMST_PROGBIT) && --timeout)
-               ;
-       if (!timeout) {
-               dev_err(t->dev, "etm%d: Waiting for status progbit to assert timed out\n",
-                       id);
-               etm_lock(t, id);
-               return -EFAULT;
-       }
-
-       etm_writel(t, id, 0x441, ETMR_CTRL);
-
-       etm_lock(t, id);
-       return 0;
-}
-
-static int trace_stop(struct tracectx *t)
-{
-       int id;
-       unsigned long timeout = TRACER_TIMEOUT;
-       u32 etb_fc = t->etb_fc;
-
-       for (id = 0; id < t->etm_regs_count; id++)
-               trace_stop_etm(t, id);
-
-       for (id = 0; id < t->etm_regs_count; id++)
-               trace_power_down_etm(t, id);
-
-       etb_unlock(t);
-       if (etb_fc) {
-               etb_fc |= ETBFF_STOPFL;
-               etb_writel(t, t->etb_fc, ETBR_FORMATTERCTRL);
-       }
-       etb_writel(t, etb_fc | ETBFF_MANUAL_FLUSH, ETBR_FORMATTERCTRL);
-
-       timeout = TRACER_TIMEOUT;
-       while (etb_readl(t, ETBR_FORMATTERCTRL) &
-                       ETBFF_MANUAL_FLUSH && --timeout)
-               ;
-       if (!timeout) {
-               dev_dbg(t->dev, "Waiting for formatter flush to commence "
-                               "timed out\n");
-               etb_lock(t);
-               return -EFAULT;
-       }
-
-       etb_writel(t, 0, ETBR_CTRL);
-
-       etb_lock(t);
-
-       t->flags &= ~TRACER_RUNNING;
-
-       return 0;
-}
-
-static int etb_getdatalen(struct tracectx *t)
-{
-       u32 v;
-       int wp;
-
-       v = etb_readl(t, ETBR_STATUS);
-
-       if (v & 1)
-               return t->etb_bufsz;
-
-       wp = etb_readl(t, ETBR_WRITEADDR);
-       return wp;
-}
-
-/* sysrq+v will always stop the running trace and leave it at that */
-static void etm_dump(void)
-{
-       struct tracectx *t = &tracer;
-       u32 first = 0;
-       int length;
-
-       if (!t->etb_regs) {
-               printk(KERN_INFO "No tracing hardware found\n");
-               return;
-       }
-
-       if (trace_isrunning(t))
-               trace_stop(t);
-
-       etb_unlock(t);
-
-       length = etb_getdatalen(t);
-
-       if (length == t->etb_bufsz)
-               first = etb_readl(t, ETBR_WRITEADDR);
-
-       etb_writel(t, first, ETBR_READADDR);
-
-       printk(KERN_INFO "Trace buffer contents length: %d\n", length);
-       printk(KERN_INFO "--- ETB buffer begin ---\n");
-       for (; length; length--)
-               printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
-       printk(KERN_INFO "\n--- ETB buffer end ---\n");
-
-       etb_lock(t);
-}
-
-static void sysrq_etm_dump(int key)
-{
-       if (!mutex_trylock(&tracer.mutex)) {
-               printk(KERN_INFO "Tracing hardware busy\n");
-               return;
-       }
-       dev_dbg(tracer.dev, "Dumping ETB buffer\n");
-       etm_dump();
-       mutex_unlock(&tracer.mutex);
-}
-
-static struct sysrq_key_op sysrq_etm_op = {
-       .handler = sysrq_etm_dump,
-       .help_msg = "etm-buffer-dump(v)",
-       .action_msg = "etm",
-};
-
-static int etb_open(struct inode *inode, struct file *file)
-{
-       if (!tracer.etb_regs)
-               return -ENODEV;
-
-       file->private_data = &tracer;
-
-       return nonseekable_open(inode, file);
-}
-
-static ssize_t etb_read(struct file *file, char __user *data,
-               size_t len, loff_t *ppos)
-{
-       int total, i;
-       long length;
-       struct tracectx *t = file->private_data;
-       u32 first = 0;
-       u32 *buf;
-       int wpos;
-       int skip;
-       long wlength;
-       loff_t pos = *ppos;
-
-       mutex_lock(&t->mutex);
-
-       if (trace_isrunning(t)) {
-               length = 0;
-               goto out;
-       }
-
-       etb_unlock(t);
-
-       total = etb_getdatalen(t);
-       if (total == 0 && t->dump_initial_etb)
-               total = t->etb_bufsz;
-       if (total == t->etb_bufsz)
-               first = etb_readl(t, ETBR_WRITEADDR);
-
-       if (pos > total * 4) {
-               skip = 0;
-               wpos = total;
-       } else {
-               skip = (int)pos % 4;
-               wpos = (int)pos / 4;
-       }
-       total -= wpos;
-       first = (first + wpos) % t->etb_bufsz;
-
-       etb_writel(t, first, ETBR_READADDR);
-
-       wlength = min(total, DIV_ROUND_UP(skip + (int)len, 4));
-       length = min(total * 4 - skip, (int)len);
-       buf = vmalloc(wlength * 4);
-
-       dev_dbg(t->dev, "ETB read %ld bytes to %lld from %ld words at %d\n",
-               length, pos, wlength, first);
-       dev_dbg(t->dev, "ETB buffer length: %d\n", total + wpos);
-       dev_dbg(t->dev, "ETB status reg: %x\n", etb_readl(t, ETBR_STATUS));
-       for (i = 0; i < wlength; i++)
-               buf[i] = etb_readl(t, ETBR_READMEM);
-
-       etb_lock(t);
-
-       length -= copy_to_user(data, (u8 *)buf + skip, length);
-       vfree(buf);
-       *ppos = pos + length;
-
-out:
-       mutex_unlock(&t->mutex);
-
-       return length;
-}
-
-static int etb_release(struct inode *inode, struct file *file)
-{
-       /* there's nothing to do here, actually */
-       return 0;
-}
-
-static const struct file_operations etb_fops = {
-       .owner = THIS_MODULE,
-       .read = etb_read,
-       .open = etb_open,
-       .release = etb_release,
-       .llseek = no_llseek,
-};
-
-static struct miscdevice etb_miscdev = {
-       .name = "tracebuf",
-       .minor = 0,
-       .fops = &etb_fops,
-};
-
-static int etb_probe(struct amba_device *dev, const struct amba_id *id)
-{
-       struct tracectx *t = &tracer;
-       int ret = 0;
-
-       ret = amba_request_regions(dev, NULL);
-       if (ret)
-               goto out;
-
-       mutex_lock(&t->mutex);
-       t->etb_regs = ioremap_nocache(dev->res.start, resource_size(&dev->res));
-       if (!t->etb_regs) {
-               ret = -ENOMEM;
-               goto out_release;
-       }
-
-       t->dev = &dev->dev;
-       t->dump_initial_etb = true;
-       amba_set_drvdata(dev, t);
-
-       etb_unlock(t);
-       t->etb_bufsz = etb_readl(t, ETBR_DEPTH);
-       dev_dbg(&dev->dev, "Size: %x\n", t->etb_bufsz);
-
-       /* make sure trace capture is disabled */
-       etb_writel(t, 0, ETBR_CTRL);
-       etb_writel(t, 0x1000, ETBR_FORMATTERCTRL);
-       etb_lock(t);
-       mutex_unlock(&t->mutex);
-
-       etb_miscdev.parent = &dev->dev;
-
-       ret = misc_register(&etb_miscdev);
-       if (ret)
-               goto out_unmap;
-
-       /* Get optional clock. Currently used to select clock source on omap3 */
-       t->emu_clk = clk_get(&dev->dev, "emu_src_ck");
-       if (IS_ERR(t->emu_clk))
-               dev_dbg(&dev->dev, "Failed to obtain emu_src_ck.\n");
-       else
-               clk_enable(t->emu_clk);
-
-       dev_dbg(&dev->dev, "ETB AMBA driver initialized.\n");
-
-out:
-       return ret;
-
-out_unmap:
-       mutex_lock(&t->mutex);
-       amba_set_drvdata(dev, NULL);
-       iounmap(t->etb_regs);
-       t->etb_regs = NULL;
-
-out_release:
-       mutex_unlock(&t->mutex);
-       amba_release_regions(dev);
-
-       return ret;
-}
-
-static int etb_remove(struct amba_device *dev)
-{
-       struct tracectx *t = amba_get_drvdata(dev);
-
-       amba_set_drvdata(dev, NULL);
-
-       iounmap(t->etb_regs);
-       t->etb_regs = NULL;
-
-       if (!IS_ERR(t->emu_clk)) {
-               clk_disable(t->emu_clk);
-               clk_put(t->emu_clk);
-       }
-
-       amba_release_regions(dev);
-
-       return 0;
-}
-
-static struct amba_id etb_ids[] = {
-       {
-               .id     = 0x0003b907,
-               .mask   = 0x0007ffff,
-       },
-       { 0, 0 },
-};
-
-static struct amba_driver etb_driver = {
-       .drv            = {
-               .name   = "etb",
-               .owner  = THIS_MODULE,
-       },
-       .probe          = etb_probe,
-       .remove         = etb_remove,
-       .id_table       = etb_ids,
-};
-
-/* use a sysfs file "trace_running" to start/stop tracing */
-static ssize_t trace_running_show(struct kobject *kobj,
-                                 struct kobj_attribute *attr,
-                                 char *buf)
-{
-       return sprintf(buf, "%x\n", trace_isrunning(&tracer));
-}
-
-static ssize_t trace_running_store(struct kobject *kobj,
-                                  struct kobj_attribute *attr,
-                                  const char *buf, size_t n)
-{
-       unsigned int value;
-       int ret;
-
-       if (sscanf(buf, "%u", &value) != 1)
-               return -EINVAL;
-
-       mutex_lock(&tracer.mutex);
-       if (!tracer.etb_regs)
-               ret = -ENODEV;
-       else
-               ret = value ? trace_start(&tracer) : trace_stop(&tracer);
-       mutex_unlock(&tracer.mutex);
-
-       return ret ? : n;
-}
-
-static struct kobj_attribute trace_running_attr =
-       __ATTR(trace_running, 0644, trace_running_show, trace_running_store);
-
-static ssize_t trace_info_show(struct kobject *kobj,
-                                 struct kobj_attribute *attr,
-                                 char *buf)
-{
-       u32 etb_wa, etb_ra, etb_st, etb_fc, etm_ctrl, etm_st;
-       int datalen;
-       int id;
-       int ret;
-
-       mutex_lock(&tracer.mutex);
-       if (tracer.etb_regs) {
-               etb_unlock(&tracer);
-               datalen = etb_getdatalen(&tracer);
-               etb_wa = etb_readl(&tracer, ETBR_WRITEADDR);
-               etb_ra = etb_readl(&tracer, ETBR_READADDR);
-               etb_st = etb_readl(&tracer, ETBR_STATUS);
-               etb_fc = etb_readl(&tracer, ETBR_FORMATTERCTRL);
-               etb_lock(&tracer);
-       } else {
-               etb_wa = etb_ra = etb_st = etb_fc = ~0;
-               datalen = -1;
-       }
-
-       ret = sprintf(buf, "Trace buffer len: %d\nComparator pairs: %d\n"
-                       "ETBR_WRITEADDR:\t%08x\n"
-                       "ETBR_READADDR:\t%08x\n"
-                       "ETBR_STATUS:\t%08x\n"
-                       "ETBR_FORMATTERCTRL:\t%08x\n",
-                       datalen,
-                       tracer.ncmppairs,
-                       etb_wa,
-                       etb_ra,
-                       etb_st,
-                       etb_fc
-                       );
-
-       for (id = 0; id < tracer.etm_regs_count; id++) {
-               etm_unlock(&tracer, id);
-               etm_ctrl = etm_readl(&tracer, id, ETMR_CTRL);
-               etm_st = etm_readl(&tracer, id, ETMR_STATUS);
-               etm_lock(&tracer, id);
-               ret += sprintf(buf + ret, "ETMR_CTRL:\t%08x\n"
-                       "ETMR_STATUS:\t%08x\n",
-                       etm_ctrl,
-                       etm_st
-                       );
-       }
-       mutex_unlock(&tracer.mutex);
-
-       return ret;
-}
-
-static struct kobj_attribute trace_info_attr =
-       __ATTR(trace_info, 0444, trace_info_show, NULL);
-
-static ssize_t trace_mode_show(struct kobject *kobj,
-                                 struct kobj_attribute *attr,
-                                 char *buf)
-{
-       return sprintf(buf, "%d %d\n",
-                       !!(tracer.flags & TRACER_CYCLE_ACC),
-                       tracer.etm_portsz);
-}
-
-static ssize_t trace_mode_store(struct kobject *kobj,
-                                  struct kobj_attribute *attr,
-                                  const char *buf, size_t n)
-{
-       unsigned int cycacc, portsz;
-
-       if (sscanf(buf, "%u %u", &cycacc, &portsz) != 2)
-               return -EINVAL;
-
-       mutex_lock(&tracer.mutex);
-       if (cycacc)
-               tracer.flags |= TRACER_CYCLE_ACC;
-       else
-               tracer.flags &= ~TRACER_CYCLE_ACC;
-
-       tracer.etm_portsz = portsz & 0x0f;
-       mutex_unlock(&tracer.mutex);
-
-       return n;
-}
-
-static struct kobj_attribute trace_mode_attr =
-       __ATTR(trace_mode, 0644, trace_mode_show, trace_mode_store);
-
-static ssize_t trace_contextid_size_show(struct kobject *kobj,
-                                        struct kobj_attribute *attr,
-                                        char *buf)
-{
-       /* 0: No context id tracing, 1: One byte, 2: Two bytes, 3: Four bytes */
-       return sprintf(buf, "%d\n", (1 << tracer.etm_contextid_size) >> 1);
-}
-
-static ssize_t trace_contextid_size_store(struct kobject *kobj,
-                                         struct kobj_attribute *attr,
-                                         const char *buf, size_t n)
-{
-       unsigned int contextid_size;
-
-       if (sscanf(buf, "%u", &contextid_size) != 1)
-               return -EINVAL;
-
-       if (contextid_size == 3 || contextid_size > 4)
-               return -EINVAL;
-
-       mutex_lock(&tracer.mutex);
-       tracer.etm_contextid_size = fls(contextid_size);
-       mutex_unlock(&tracer.mutex);
-
-       return n;
-}
-
-static struct kobj_attribute trace_contextid_size_attr =
-       __ATTR(trace_contextid_size, 0644,
-               trace_contextid_size_show, trace_contextid_size_store);
-
-static ssize_t trace_branch_output_show(struct kobject *kobj,
-                                       struct kobj_attribute *attr,
-                                       char *buf)
-{
-       return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_BRANCHOUTPUT));
-}
-
-static ssize_t trace_branch_output_store(struct kobject *kobj,
-                                        struct kobj_attribute *attr,
-                                        const char *buf, size_t n)
-{
-       unsigned int branch_output;
-
-       if (sscanf(buf, "%u", &branch_output) != 1)
-               return -EINVAL;
-
-       mutex_lock(&tracer.mutex);
-       if (branch_output) {
-               tracer.flags |= TRACER_BRANCHOUTPUT;
-               /* Branch broadcasting is incompatible with the return stack */
-               tracer.flags &= ~TRACER_RETURN_STACK;
-       } else {
-               tracer.flags &= ~TRACER_BRANCHOUTPUT;
-       }
-       mutex_unlock(&tracer.mutex);
-
-       return n;
-}
-
-static struct kobj_attribute trace_branch_output_attr =
-       __ATTR(trace_branch_output, 0644,
-               trace_branch_output_show, trace_branch_output_store);
-
-static ssize_t trace_return_stack_show(struct kobject *kobj,
-                                 struct kobj_attribute *attr,
-                                 char *buf)
-{
-       return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_RETURN_STACK));
-}
-
-static ssize_t trace_return_stack_store(struct kobject *kobj,
-                                  struct kobj_attribute *attr,
-                                  const char *buf, size_t n)
-{
-       unsigned int return_stack;
-
-       if (sscanf(buf, "%u", &return_stack) != 1)
-               return -EINVAL;
-
-       mutex_lock(&tracer.mutex);
-       if (return_stack) {
-               tracer.flags |= TRACER_RETURN_STACK;
-               /* Return stack is incompatible with branch broadcasting */
-               tracer.flags &= ~TRACER_BRANCHOUTPUT;
-       } else {
-               tracer.flags &= ~TRACER_RETURN_STACK;
-       }
-       mutex_unlock(&tracer.mutex);
-
-       return n;
-}
-
-static struct kobj_attribute trace_return_stack_attr =
-       __ATTR(trace_return_stack, 0644,
-               trace_return_stack_show, trace_return_stack_store);
-
-static ssize_t trace_timestamp_show(struct kobject *kobj,
-                                 struct kobj_attribute *attr,
-                                 char *buf)
-{
-       return sprintf(buf, "%d\n", !!(tracer.flags & TRACER_TIMESTAMP));
-}
-
-static ssize_t trace_timestamp_store(struct kobject *kobj,
-                                  struct kobj_attribute *attr,
-                                  const char *buf, size_t n)
-{
-       unsigned int timestamp;
-
-       if (sscanf(buf, "%u", &timestamp) != 1)
-               return -EINVAL;
-
-       mutex_lock(&tracer.mutex);
-       if (timestamp)
-               tracer.flags |= TRACER_TIMESTAMP;
-       else
-               tracer.flags &= ~TRACER_TIMESTAMP;
-       mutex_unlock(&tracer.mutex);
-
-       return n;
-}
-
-static struct kobj_attribute trace_timestamp_attr =
-       __ATTR(trace_timestamp, 0644,
-               trace_timestamp_show, trace_timestamp_store);
-
-static ssize_t trace_range_show(struct kobject *kobj,
-                                 struct kobj_attribute *attr,
-                                 char *buf)
-{
-       return sprintf(buf, "%08lx %08lx\n",
-                       tracer.range_start, tracer.range_end);
-}
-
-static ssize_t trace_range_store(struct kobject *kobj,
-                                  struct kobj_attribute *attr,
-                                  const char *buf, size_t n)
-{
-       unsigned long range_start, range_end;
-
-       if (sscanf(buf, "%lx %lx", &range_start, &range_end) != 2)
-               return -EINVAL;
-
-       mutex_lock(&tracer.mutex);
-       tracer.range_start = range_start;
-       tracer.range_end = range_end;
-       mutex_unlock(&tracer.mutex);
-
-       return n;
-}
-
-
-static struct kobj_attribute trace_range_attr =
-       __ATTR(trace_range, 0644, trace_range_show, trace_range_store);
-
-static ssize_t trace_data_range_show(struct kobject *kobj,
-                                 struct kobj_attribute *attr,
-                                 char *buf)
-{
-       unsigned long range_start;
-       u64 range_end;
-       mutex_lock(&tracer.mutex);
-       range_start = tracer.data_range_start;
-       range_end = tracer.data_range_end;
-       if (!range_end && (tracer.flags & TRACER_TRACE_DATA))
-               range_end = 0x100000000ULL;
-       mutex_unlock(&tracer.mutex);
-       return sprintf(buf, "%08lx %08llx\n", range_start, range_end);
-}
-
-static ssize_t trace_data_range_store(struct kobject *kobj,
-                                  struct kobj_attribute *attr,
-                                  const char *buf, size_t n)
-{
-       unsigned long range_start;
-       u64 range_end;
-
-       if (sscanf(buf, "%lx %llx", &range_start, &range_end) != 2)
-               return -EINVAL;
-
-       mutex_lock(&tracer.mutex);
-       tracer.data_range_start = range_start;
-       tracer.data_range_end = (unsigned long)range_end;
-       if (range_end)
-               tracer.flags |= TRACER_TRACE_DATA;
-       else
-               tracer.flags &= ~TRACER_TRACE_DATA;
-       mutex_unlock(&tracer.mutex);
-
-       return n;
-}
-
-
-static struct kobj_attribute trace_data_range_attr =
-       __ATTR(trace_data_range, 0644,
-               trace_data_range_show, trace_data_range_store);
-
-static int etm_probe(struct amba_device *dev, const struct amba_id *id)
-{
-       struct tracectx *t = &tracer;
-       int ret = 0;
-       void __iomem **new_regs;
-       int new_count;
-       u32 etmccr;
-       u32 etmidr;
-       u32 etmccer = 0;
-       u8 etm_version = 0;
-
-       mutex_lock(&t->mutex);
-       new_count = t->etm_regs_count + 1;
-       new_regs = krealloc(t->etm_regs,
-                               sizeof(t->etm_regs[0]) * new_count, GFP_KERNEL);
-
-       if (!new_regs) {
-               dev_dbg(&dev->dev, "Failed to allocate ETM register array\n");
-               ret = -ENOMEM;
-               goto out;
-       }
-       t->etm_regs = new_regs;
-
-       ret = amba_request_regions(dev, NULL);
-       if (ret)
-               goto out;
-
-       t->etm_regs[t->etm_regs_count] =
-               ioremap_nocache(dev->res.start, resource_size(&dev->res));
-       if (!t->etm_regs[t->etm_regs_count]) {
-               ret = -ENOMEM;
-               goto out_release;
-       }
-
-       amba_set_drvdata(dev, t->etm_regs[t->etm_regs_count]);
-
-       t->flags = TRACER_CYCLE_ACC | TRACER_TRACE_DATA | TRACER_BRANCHOUTPUT;
-       t->etm_portsz = 1;
-       t->etm_contextid_size = 3;
-
-       etm_unlock(t, t->etm_regs_count);
-       (void)etm_readl(t, t->etm_regs_count, ETMMR_PDSR);
-       /* dummy first read */
-       (void)etm_readl(&tracer, t->etm_regs_count, ETMMR_OSSRR);
-
-       etmccr = etm_readl(t, t->etm_regs_count, ETMR_CONFCODE);
-       t->ncmppairs = etmccr & 0xf;
-       if (etmccr & ETMCCR_ETMIDR_PRESENT) {
-               etmidr = etm_readl(t, t->etm_regs_count, ETMR_ID);
-               etm_version = ETMIDR_VERSION(etmidr);
-               if (etm_version >= ETMIDR_VERSION_3_1)
-                       etmccer = etm_readl(t, t->etm_regs_count, ETMR_CCE);
-       }
-       etm_writel(t, t->etm_regs_count, 0x441, ETMR_CTRL);
-       etm_writel(t, t->etm_regs_count, new_count, ETMR_TRACEIDR);
-       etm_lock(t, t->etm_regs_count);
-
-       ret = sysfs_create_file(&dev->dev.kobj,
-                       &trace_running_attr.attr);
-       if (ret)
-               goto out_unmap;
-
-       /* failing to create any of these two is not fatal */
-       ret = sysfs_create_file(&dev->dev.kobj, &trace_info_attr.attr);
-       if (ret)
-               dev_dbg(&dev->dev, "Failed to create trace_info in sysfs\n");
-
-       ret = sysfs_create_file(&dev->dev.kobj, &trace_mode_attr.attr);
-       if (ret)
-               dev_dbg(&dev->dev, "Failed to create trace_mode in sysfs\n");
-
-       ret = sysfs_create_file(&dev->dev.kobj,
-                               &trace_contextid_size_attr.attr);
-       if (ret)
-               dev_dbg(&dev->dev,
-                       "Failed to create trace_contextid_size in sysfs\n");
-
-       ret = sysfs_create_file(&dev->dev.kobj,
-                               &trace_branch_output_attr.attr);
-       if (ret)
-               dev_dbg(&dev->dev,
-                       "Failed to create trace_branch_output in sysfs\n");
-
-       if (etmccer & ETMCCER_RETURN_STACK_IMPLEMENTED) {
-               ret = sysfs_create_file(&dev->dev.kobj,
-                                       &trace_return_stack_attr.attr);
-               if (ret)
-                       dev_dbg(&dev->dev,
-                             "Failed to create trace_return_stack in sysfs\n");
-       }
-
-       if (etmccer & ETMCCER_TIMESTAMPING_IMPLEMENTED) {
-               ret = sysfs_create_file(&dev->dev.kobj,
-                                       &trace_timestamp_attr.attr);
-               if (ret)
-                       dev_dbg(&dev->dev,
-                               "Failed to create trace_timestamp in sysfs\n");
-       }
-
-       ret = sysfs_create_file(&dev->dev.kobj, &trace_range_attr.attr);
-       if (ret)
-               dev_dbg(&dev->dev, "Failed to create trace_range in sysfs\n");
-
-       if (etm_version < ETMIDR_VERSION_PFT_1_0) {
-               ret = sysfs_create_file(&dev->dev.kobj,
-                                       &trace_data_range_attr.attr);
-               if (ret)
-                       dev_dbg(&dev->dev,
-                               "Failed to create trace_data_range in sysfs\n");
-       } else {
-               tracer.flags &= ~TRACER_TRACE_DATA;
-       }
-
-       dev_dbg(&dev->dev, "ETM AMBA driver initialized.\n");
-
-       /* Enable formatter if there are multiple trace sources */
-       if (new_count > 1)
-               t->etb_fc = ETBFF_ENFCONT | ETBFF_ENFTC;
-
-       t->etm_regs_count = new_count;
-
-out:
-       mutex_unlock(&t->mutex);
-       return ret;
-
-out_unmap:
-       amba_set_drvdata(dev, NULL);
-       iounmap(t->etm_regs[t->etm_regs_count]);
-
-out_release:
-       amba_release_regions(dev);
-
-       mutex_unlock(&t->mutex);
-       return ret;
-}
-
-static int etm_remove(struct amba_device *dev)
-{
-       int i;
-       struct tracectx *t = &tracer;
-       void __iomem    *etm_regs = amba_get_drvdata(dev);
-
-       sysfs_remove_file(&dev->dev.kobj, &trace_running_attr.attr);
-       sysfs_remove_file(&dev->dev.kobj, &trace_info_attr.attr);
-       sysfs_remove_file(&dev->dev.kobj, &trace_mode_attr.attr);
-       sysfs_remove_file(&dev->dev.kobj, &trace_range_attr.attr);
-       sysfs_remove_file(&dev->dev.kobj, &trace_data_range_attr.attr);
-
-       amba_set_drvdata(dev, NULL);
-
-       mutex_lock(&t->mutex);
-       for (i = 0; i < t->etm_regs_count; i++)
-               if (t->etm_regs[i] == etm_regs)
-                       break;
-       for (; i < t->etm_regs_count - 1; i++)
-               t->etm_regs[i] = t->etm_regs[i + 1];
-       t->etm_regs_count--;
-       if (!t->etm_regs_count) {
-               kfree(t->etm_regs);
-               t->etm_regs = NULL;
-       }
-       mutex_unlock(&t->mutex);
-
-       iounmap(etm_regs);
-       amba_release_regions(dev);
-
-       return 0;
-}
-
-static struct amba_id etm_ids[] = {
-       {
-               .id     = 0x0003b921,
-               .mask   = 0x0007ffff,
-       },
-       {
-               .id     = 0x0003b950,
-               .mask   = 0x0007ffff,
-       },
-       { 0, 0 },
-};
-
-static struct amba_driver etm_driver = {
-       .drv            = {
-               .name   = "etm",
-               .owner  = THIS_MODULE,
-       },
-       .probe          = etm_probe,
-       .remove         = etm_remove,
-       .id_table       = etm_ids,
-};
-
-static int __init etm_init(void)
-{
-       int retval;
-
-       mutex_init(&tracer.mutex);
-
-       retval = amba_driver_register(&etb_driver);
-       if (retval) {
-               printk(KERN_ERR "Failed to register etb\n");
-               return retval;
-       }
-
-       retval = amba_driver_register(&etm_driver);
-       if (retval) {
-               amba_driver_unregister(&etb_driver);
-               printk(KERN_ERR "Failed to probe etm\n");
-               return retval;
-       }
-
-       /* not being able to install this handler is not fatal */
-       (void)register_sysrq_key('v', &sysrq_etm_op);
-
-       return 0;
-}
-
-device_initcall(etm_init);
-
index 1b803117ed91c212a6f4077d6c87b58c0e219f44..7eee611b6ee59d4cecbb49a6e5f491d918c23863 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/smp.h>
 #include <linux/cpu_pm.h>
+#include <linux/coresight.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cputype.h>
@@ -36,7 +37,6 @@
 #include <asm/hw_breakpoint.h>
 #include <asm/kdebug.h>
 #include <asm/traps.h>
-#include <asm/hardware/coresight.h>
 
 /* Breakpoint currently in use for each BRP. */
 static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
@@ -975,7 +975,7 @@ static void reset_ctrl_regs(void *unused)
         * Unconditionally clear the OS lock by writing a value
         * other than CS_LAR_KEY to the access register.
         */
-       ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY);
+       ARM_DBG_WRITE(c1, c0, 4, ~CORESIGHT_UNLOCK);
        isb();
 
        /*
index e8edcaa0e4323c304d2b7a0cda4105eb3a1088f8..a57cc5d33540f156fb0b074734ef11af898b04d8 100644 (file)
@@ -51,10 +51,11 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
        return (cyc * mult) >> shift;
 }
 
-static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
+static unsigned long long notrace sched_clock_32(void)
 {
        u64 epoch_ns;
        u32 epoch_cyc;
+       u32 cyc;
 
        if (cd.suspended)
                return cd.epoch_ns;
@@ -73,7 +74,9 @@ static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
                smp_rmb();
        } while (epoch_cyc != cd.epoch_cyc_copy);
 
-       return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift);
+       cyc = read_sched_clock();
+       cyc = (cyc - epoch_cyc) & sched_clock_mask;
+       return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
 }
 
 /*
@@ -165,12 +168,6 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
        pr_debug("Registered %pF as sched_clock source\n", read);
 }
 
-static unsigned long long notrace sched_clock_32(void)
-{
-       u32 cyc = read_sched_clock();
-       return cyc_to_sched_clock(cyc, sched_clock_mask);
-}
-
 unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
 
 unsigned long long notrace sched_clock(void)
index 8278960066c33a86c4218be10d54a68b0b5c59c3..3ee701f1d38e1c528855eda600133735ebb5a3b9 100644 (file)
@@ -141,6 +141,29 @@ int __init coherency_init(void)
 {
        struct device_node *np;
 
+       /*
+        * The coherency fabric is needed:
+        * - For coherency between processors on Armada XP, so only
+        *   when SMP is enabled.
+        * - For coherency between the processor and I/O devices, but
+        *   this coherency requires many pre-requisites (write
+        *   allocate cache policy, shareable pages, SMP bit set) that
+        *   are only meant in SMP situations.
+        *
+        * Note that this means that on Armada 370, there is currently
+        * no way to use hardware I/O coherency, because even when
+        * CONFIG_SMP is enabled, is_smp() returns false due to the
+        * Armada 370 being a single-core processor. To lift this
+        * limitation, we would have to find a way to make the cache
+        * policy set to write-allocate (on all Armada SoCs), and to
+        * set the shareable attribute in page tables (on all Armada
+        * SoCs except the Armada 370). Unfortunately, such decisions
+        * are taken very early in the kernel boot process, at a point
+        * where we don't know yet on which SoC we are running.
+        */
+       if (!is_smp())
+               return 0;
+
        np = of_find_matching_node(NULL, of_coherency_table);
        if (np) {
                pr_info("Initializing Coherency fabric\n");
index f49cd51e162afcc6055d9143ced540d8fa888ab5..42afc6682d10ffdeaad526b3ea9192777126be86 100644 (file)
@@ -394,14 +394,6 @@ config MACH_OMAP4_PANDA
        select OMAP_PACKAGE_CBS
        select REGULATOR_FIXED_VOLTAGE if REGULATOR
 
-config OMAP3_EMU
-       bool "OMAP3 debugging peripherals"
-       depends on ARCH_OMAP3
-       select ARM_AMBA
-       select OC_ETM
-       help
-         Say Y here to enable debugging hardware of omap3
-
 config OMAP3_SDRC_AC_TIMING
        bool "Enable SDRC AC timing register changes"
        depends on ARCH_OMAP3
index 55a9d677768328d552cec82e6229402a5696a8b2..e2f7210a8eabf60213bbedcb922a40526e978135 100644 (file)
@@ -200,7 +200,6 @@ obj-$(CONFIG_SOC_AM33XX)            += omap_hwmod_33xx_data.o
 obj-$(CONFIG_ARCH_OMAP4)               += omap_hwmod_44xx_data.o
 
 # EMU peripherals
-obj-$(CONFIG_OMAP3_EMU)                        += emu.o
 obj-$(CONFIG_HW_PERF_EVENTS)           += pmu.o
 
 obj-$(CONFIG_OMAP_MBOX_FWK)            += mailbox_mach.o
diff --git a/arch/arm/mach-omap2/emu.c b/arch/arm/mach-omap2/emu.c
deleted file mode 100644 (file)
index cbeaca2..0000000
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * emu.c
- *
- * ETM and ETB CoreSight components' resources as found in OMAP3xxx.
- *
- * Copyright (C) 2009 Nokia Corporation.
- * Alexander Shishkin
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/amba/bus.h>
-#include <linux/io.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-
-#include "soc.h"
-#include "iomap.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Alexander Shishkin");
-
-/* Cortex CoreSight components within omap3xxx EMU */
-#define ETM_BASE       (L4_EMU_34XX_PHYS + 0x10000)
-#define DBG_BASE       (L4_EMU_34XX_PHYS + 0x11000)
-#define ETB_BASE       (L4_EMU_34XX_PHYS + 0x1b000)
-#define DAPCTL         (L4_EMU_34XX_PHYS + 0x1d000)
-
-static AMBA_APB_DEVICE(omap3_etb, "etb", 0x000bb907, ETB_BASE, { }, NULL);
-static AMBA_APB_DEVICE(omap3_etm, "etm", 0x102bb921, ETM_BASE, { }, NULL);
-
-static int __init emu_init(void)
-{
-       if (!cpu_is_omap34xx())
-               return -ENODEV;
-
-       amba_device_register(&omap3_etb_device, &iomem_resource);
-       amba_device_register(&omap3_etm_device, &iomem_resource);
-
-       return 0;
-}
-
-omap_subsys_initcall(emu_init);
index 9de8940c29e6247d6c76e8d7daee06c43c1b1641..bb7529dea84193e87d38b2b78ba3ff3a932bef17 100644 (file)
@@ -268,37 +268,19 @@ static void *
 __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
        const void *caller)
 {
-       struct vm_struct *area;
-       unsigned long addr;
-
        /*
         * DMA allocation can be mapped to user space, so lets
         * set VM_USERMAP flags too.
         */
-       area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
-                                 caller);
-       if (!area)
-               return NULL;
-       addr = (unsigned long)area->addr;
-       area->phys_addr = __pfn_to_phys(page_to_pfn(page));
-
-       if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
-               vunmap((void *)addr);
-               return NULL;
-       }
-       return (void *)addr;
+       return dma_common_contiguous_remap(page, size,
+                       VM_ARM_DMA_CONSISTENT | VM_USERMAP,
+                       prot, caller);
 }
 
 static void __dma_free_remap(void *cpu_addr, size_t size)
 {
-       unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
-       struct vm_struct *area = find_vm_area(cpu_addr);
-       if (!area || (area->flags & flags) != flags) {
-               WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
-               return;
-       }
-       unmap_kernel_range((unsigned long)cpu_addr, size);
-       vunmap(cpu_addr);
+       dma_common_free_remap(cpu_addr, size,
+                       VM_ARM_DMA_CONSISTENT | VM_USERMAP);
 }
 
 #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
@@ -1223,29 +1205,8 @@ static void *
 __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
                    const void *caller)
 {
-       unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
-       struct vm_struct *area;
-       unsigned long p;
-
-       area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
-                                 caller);
-       if (!area)
-               return NULL;
-
-       area->pages = pages;
-       area->nr_pages = nr_pages;
-       p = (unsigned long)area->addr;
-
-       for (i = 0; i < nr_pages; i++) {
-               phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
-               if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
-                       goto err;
-               p += PAGE_SIZE;
-       }
-       return area->addr;
-err:
-       unmap_kernel_range((unsigned long)area->addr, size);
-       vunmap(area->addr);
+       return dma_common_pages_remap(pages, size,
+                       VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
        return NULL;
 }
 
@@ -1442,8 +1403,8 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
        }
 
        if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
-               unmap_kernel_range((unsigned long)cpu_addr, size);
-               vunmap(cpu_addr);
+               dma_common_free_remap(cpu_addr, size,
+                       VM_ARM_DMA_CONSISTENT | VM_USERMAP);
        }
 
        __iommu_remove_mapping(dev, handle, size);
index 2f95b7434bd3e09bd485772ccf856d8029f5a90c..aceeb91c71de4bd7a73ddf0b17a75447d42a2d30 100644 (file)
@@ -1,6 +1,7 @@
 config ARM64
        def_bool y
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+       select ARCH_HAS_OPP
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_HAS_OPP
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
@@ -18,6 +19,7 @@ config ARM64
        select COMMON_CLK
        select CPU_PM if (SUSPEND || CPU_IDLE)
        select DCACHE_WORD_ACCESS
+       select GENERIC_ALLOCATOR
        select GENERIC_CLOCKEVENTS
        select GENERIC_CLOCKEVENTS_BROADCAST if SMP
        select GENERIC_CPU_AUTOPROBE
@@ -36,6 +38,7 @@ config ARM64
        select HAVE_ARCH_SECCOMP_FILTER
        select HAVE_ARCH_TRACEHOOK
        select HAVE_C_RECORDMCOUNT
+       select HAVE_CC_STACKPROTECTOR
        select HAVE_DEBUG_BUGVERBOSE
        select HAVE_DEBUG_KMEMLEAK
        select HAVE_DMA_API_DEBUG
index e1b0c4601b3e88fd0035419ec970241acd22cc77..bb55717c8dad70f205123ddfcf4d214253731b5c 100644 (file)
@@ -44,4 +44,15 @@ config PID_IN_CONTEXTIDR
          instructions during context switch. Say Y here only if you are
          planning to use hardware trace tools with this kernel.
 
+config DEBUG_SET_MODULE_RONX
+        bool "Set loadable kernel module data as NX and text as RO"
+        depends on MODULES
+        help
+          This option helps catch unintended modifications to loadable
+          kernel module's text and read-only data. It also prevents execution
+          of module data. Such protection may interfere with run-time code
+          patching and dynamic kernel tracing - and they might also protect
+          against certain classes of kernel exploits.
+          If in doubt, say "N".
+
 endmenu
index eb448eae7c4c541793c6f251f3a926b442d52b76..6e883be233865edf26b7166875edcea00669109b 100644 (file)
@@ -39,7 +39,11 @@ CHECKFLAGS   += -D__aarch64__
 head-y         := arch/arm64/kernel/head.o
 
 # The byte offset of the kernel image in RAM from the start of RAM.
+ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
+TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
+else
 TEXT_OFFSET := 0x00080000
+endif
 
 export TEXT_OFFSET GZFLAGS
 
index f2defe1c380c07482449e995fdb1d89588b550f9..689b6379188c112ac2441833e165dcb63b0b4515 100644 (file)
@@ -148,4 +148,8 @@ static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
 {
 }
 
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
 #endif
index 152413076503ba4731ce13d693ae2fa70d197795..47dfa31ad71a531e3bfed91c9ea0c519a44fee58 100644 (file)
@@ -28,6 +28,8 @@ struct device_node;
  *             enable-method property.
  * @cpu_init:  Reads any data necessary for a specific enable-method from the
  *             devicetree, for a given cpu node and proposed logical id.
+ * @cpu_init_idle: Reads any data necessary to initialize CPU idle states from
+ *             devicetree, for a given cpu node and proposed logical id.
  * @cpu_prepare: Early one-time preparation step for a cpu. If there is a
  *             mechanism for doing so, tests whether it is possible to boot
  *             the given CPU.
@@ -39,6 +41,7 @@ struct device_node;
  *             from the cpu to be killed.
  * @cpu_die:   Makes a cpu leave the kernel. Must not fail. Called from the
  *             cpu being killed.
+ * @cpu_kill:  Ensures a cpu has left the kernel. Called from another cpu.
  * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
  *               to wrong parameters or error conditions. Called from the
  *               CPU being suspended. Must be called with IRQs disabled.
@@ -46,12 +49,14 @@ struct device_node;
 struct cpu_operations {
        const char      *name;
        int             (*cpu_init)(struct device_node *, unsigned int);
+       int             (*cpu_init_idle)(struct device_node *, unsigned int);
        int             (*cpu_prepare)(unsigned int);
        int             (*cpu_boot)(unsigned int);
        void            (*cpu_postboot)(void);
 #ifdef CONFIG_HOTPLUG_CPU
        int             (*cpu_disable)(unsigned int cpu);
        void            (*cpu_die)(unsigned int cpu);
+       int             (*cpu_kill)(unsigned int cpu);
 #endif
 #ifdef CONFIG_ARM64_CPU_SUSPEND
        int             (*cpu_suspend)(unsigned long);
diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h
new file mode 100644 (file)
index 0000000..b52a993
--- /dev/null
@@ -0,0 +1,13 @@
+#ifndef __ASM_CPUIDLE_H
+#define __ASM_CPUIDLE_H
+
+#ifdef CONFIG_CPU_IDLE
+extern int cpu_init_idle(unsigned int cpu);
+#else
+static inline int cpu_init_idle(unsigned int cpu)
+{
+       return -EOPNOTSUPP;
+}
+#endif
+
+#endif
index 27f54a7cc81b3b0d524b33afa20ef69f25b6ae29..ec5e41c234291b069465cd49699d57e51097f53f 100644 (file)
        __val;                                                          \
 })
 
+#define MIDR_REVISION_MASK     0xf
+#define MIDR_REVISION(midr)    ((midr) & MIDR_REVISION_MASK)
+#define MIDR_PARTNUM_SHIFT     4
+#define MIDR_PARTNUM_MASK      (0xfff << MIDR_PARTNUM_SHIFT)
+#define MIDR_PARTNUM(midr)     \
+       (((midr) & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT)
+#define MIDR_ARCHITECTURE_SHIFT        16
+#define MIDR_ARCHITECTURE_MASK (0xf << MIDR_ARCHITECTURE_SHIFT)
+#define MIDR_ARCHITECTURE(midr)        \
+       (((midr) & MIDR_ARCHITECTURE_MASK) >> MIDR_ARCHITECTURE_SHIFT)
+#define MIDR_VARIANT_SHIFT     20
+#define MIDR_VARIANT_MASK      (0xf << MIDR_VARIANT_SHIFT)
+#define MIDR_VARIANT(midr)     \
+       (((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT)
+#define MIDR_IMPLEMENTOR_SHIFT 24
+#define MIDR_IMPLEMENTOR_MASK  (0xff << MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_IMPLEMENTOR(midr) \
+       (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
+
 #define ARM_CPU_IMP_ARM                0x41
 #define ARM_CPU_IMP_APM                0x50
 
-#define ARM_CPU_PART_AEM_V8    0xD0F0
-#define ARM_CPU_PART_FOUNDATION        0xD000
-#define ARM_CPU_PART_CORTEX_A53        0xD030
-#define ARM_CPU_PART_CORTEX_A57        0xD070
+#define ARM_CPU_PART_AEM_V8    0xD0F
+#define ARM_CPU_PART_FOUNDATION        0xD00
+#define ARM_CPU_PART_CORTEX_A57        0xD07
+#define ARM_CPU_PART_CORTEX_A53        0xD03
 
-#define APM_CPU_PART_POTENZA   0x0000
+#define APM_CPU_PART_POTENZA   0x000
 
 #ifndef __ASSEMBLY__
 
@@ -65,12 +84,12 @@ static inline u64 __attribute_const__ read_cpuid_mpidr(void)
 
 static inline unsigned int __attribute_const__ read_cpuid_implementor(void)
 {
-       return (read_cpuid_id() & 0xFF000000) >> 24;
+       return MIDR_IMPLEMENTOR(read_cpuid_id());
 }
 
 static inline unsigned int __attribute_const__ read_cpuid_part_number(void)
 {
-       return (read_cpuid_id() & 0xFFF0);
+       return MIDR_PARTNUM(read_cpuid_id());
 }
 
 static inline u32 __attribute_const__ read_cpuid_cachetype(void)
index 00a41aab4a37f5b74ca13769b6a57ab8b1145db0..fa6a0c5a8de3e2cc70cb7a5372fc1203b89af383 100644 (file)
@@ -23,8 +23,6 @@
 
 #include <asm-generic/dma-coherent.h>
 
-#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-
 #define DMA_ERROR_CODE (~(dma_addr_t)0)
 extern struct dma_map_ops *dma_ops;
 extern struct dma_map_ops coherent_swiotlb_dma_ops;
index a6331e6a92b5cdec29ea3d31c5b529c6a8294f19..e84ca637af6bd214edaea8801db7cf3d55532ca8 100644 (file)
 
 /*
  * The idmap and swapper page tables need some space reserved in the kernel
- * image. The idmap only requires a pgd and a next level table to (section) map
- * the kernel, while the swapper also maps the FDT and requires an additional
- * table to map an early UART. See __create_page_tables for more information.
+ * image. Both require a pgd and a next level table to (section) map the
+ * kernel. The the swapper also maps the FDT (see __create_page_tables for
+ * more information).
  */
-#define SWAPPER_DIR_SIZE       (3 * PAGE_SIZE)
+#define SWAPPER_DIR_SIZE       (2 * PAGE_SIZE)
 #define IDMAP_DIR_SIZE         (2 * PAGE_SIZE)
 
 #ifndef __ASSEMBLY__
index 453a179469a34fd9ffa72ec723a6bada988d4e73..5279e573338690afbee3c7e593858baa9f7b9e42 100644 (file)
@@ -26,13 +26,13 @@ static inline void set_my_cpu_offset(unsigned long off)
 static inline unsigned long __my_cpu_offset(void)
 {
        unsigned long off;
-       register unsigned long *sp asm ("sp");
 
        /*
         * We want to allow caching the value, so avoid using volatile and
         * instead use a fake stack read to hazard against barrier().
         */
-       asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp));
+       asm("mrs %0, tpidr_el1" : "=r" (off) :
+               "Q" (*(const unsigned long *)current_stack_pointer));
 
        return off;
 }
index fb4b26509276d8f519a50701bb7956408d398ebb..d86b4d4df1f0a3e5ae1572ca99ee3453bec24f32 100644 (file)
@@ -141,46 +141,51 @@ extern struct page *empty_zero_page;
 #define pte_valid_not_user(pte) \
        ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
 
-static inline pte_t pte_wrprotect(pte_t pte)
+static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
 {
-       pte_val(pte) &= ~PTE_WRITE;
+       pte_val(pte) &= ~pgprot_val(prot);
        return pte;
 }
 
-static inline pte_t pte_mkwrite(pte_t pte)
+static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
 {
-       pte_val(pte) |= PTE_WRITE;
+       pte_val(pte) |= pgprot_val(prot);
        return pte;
 }
 
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+       return clear_pte_bit(pte, __pgprot(PTE_WRITE));
+}
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+       return set_pte_bit(pte, __pgprot(PTE_WRITE));
+}
+
 static inline pte_t pte_mkclean(pte_t pte)
 {
-       pte_val(pte) &= ~PTE_DIRTY;
-       return pte;
+       return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
 }
 
 static inline pte_t pte_mkdirty(pte_t pte)
 {
-       pte_val(pte) |= PTE_DIRTY;
-       return pte;
+       return set_pte_bit(pte, __pgprot(PTE_DIRTY));
 }
 
 static inline pte_t pte_mkold(pte_t pte)
 {
-       pte_val(pte) &= ~PTE_AF;
-       return pte;
+       return clear_pte_bit(pte, __pgprot(PTE_AF));
 }
 
 static inline pte_t pte_mkyoung(pte_t pte)
 {
-       pte_val(pte) |= PTE_AF;
-       return pte;
+       return set_pte_bit(pte, __pgprot(PTE_AF));
 }
 
 static inline pte_t pte_mkspecial(pte_t pte)
 {
-       pte_val(pte) |= PTE_SPECIAL;
-       return pte;
+       return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
 }
 
 static inline void set_pte(pte_t *ptep, pte_t pte)
@@ -257,7 +262,7 @@ static inline pmd_t pte_pmd(pte_t pte)
 #define pmd_mkwrite(pmd)       pte_pmd(pte_mkwrite(pmd_pte(pmd)))
 #define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 #define pmd_mkyoung(pmd)       pte_pmd(pte_mkyoung(pmd_pte(pmd)))
-#define pmd_mknotpresent(pmd)  (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK))
+#define pmd_mknotpresent(pmd)  (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
 
 #define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
index 0c657bb54597e40337fa6ebb767a4de16d6424bb..0d3b0b17e92757b77cf1370c9d5014484fdd3118 100644 (file)
@@ -34,6 +34,8 @@ extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
 extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
 extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
 extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
+void cpu_soft_restart(phys_addr_t cpu_reset,
+               unsigned long addr) __attribute__((noreturn));
 
 #include <asm/memory.h>
 
index 9a4b663670ff5010096e46cbae36bad4e929bab6..e5312ea0ec1a59bdd92934926da81155a6ce3e12 100644 (file)
 #ifndef __ASM_PSCI_H
 #define __ASM_PSCI_H
 
-struct cpuidle_driver;
-void psci_init(void);
-
-int __init psci_dt_register_idle_states(struct cpuidle_driver *,
-                                       struct device_node *[]);
+int psci_init(void);
 
 #endif /* __ASM_PSCI_H */
index 1be62bcb9d474e42e6b457eaecc4272bfc37d2b1..74a9d301819fbfa1128bcce879b95ae9c24a2e76 100644 (file)
@@ -17,7 +17,7 @@
 #define __ASM_SPARSEMEM_H
 
 #ifdef CONFIG_SPARSEMEM
-#define MAX_PHYSMEM_BITS       40
+#define MAX_PHYSMEM_BITS       48
 #define SECTION_SIZE_BITS      30
 #endif
 
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h
new file mode 100644 (file)
index 0000000..fe5e287
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function.  The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on ARM.  This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef __ASM_STACKPROTECTOR_H
+#define __ASM_STACKPROTECTOR_H
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+       unsigned long canary;
+
+       /* Try to get a semi random initial value. */
+       get_random_bytes(&canary, sizeof(canary));
+       canary ^= LINUX_VERSION_CODE;
+
+       current->stack_canary = canary;
+       __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
index 3ee8b303d9a975121b9bf4178b918c1be677ab5c..64d2d4884a9db1e663b1d025eca2a8db3bbdcce4 100644 (file)
@@ -22,6 +22,18 @@ extern char *strrchr(const char *, int c);
 #define __HAVE_ARCH_STRCHR
 extern char *strchr(const char *, int c);
 
+#define __HAVE_ARCH_STRCMP
+extern int strcmp(const char *, const char *);
+
+#define __HAVE_ARCH_STRNCMP
+extern int strncmp(const char *, const char *, __kernel_size_t);
+
+#define __HAVE_ARCH_STRLEN
+extern __kernel_size_t strlen(const char *);
+
+#define __HAVE_ARCH_STRNLEN
+extern __kernel_size_t strnlen(const char *, __kernel_size_t);
+
 #define __HAVE_ARCH_MEMCPY
 extern void *memcpy(void *, const void *, __kernel_size_t);
 
@@ -34,4 +46,7 @@ extern void *memchr(const void *, int, __kernel_size_t);
 #define __HAVE_ARCH_MEMSET
 extern void *memset(void *, int, __kernel_size_t);
 
+#define __HAVE_ARCH_MEMCMP
+extern int memcmp(const void *, const void *, size_t);
+
 #endif
index 205d81b170235f3583856fc248405e66cbc90b5b..e4a98b4c2020c9ba936f8529da70370021c114aa 100644 (file)
@@ -68,6 +68,11 @@ struct thread_info {
 #define init_thread_info       (init_thread_union.thread_info)
 #define init_stack             (init_thread_union.stack)
 
+/*
+ * how to get the current stack pointer from C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
 /*
  * how to get the thread information struct from C
  */
@@ -75,8 +80,8 @@ static inline struct thread_info *current_thread_info(void) __attribute_const__;
 
 static inline struct thread_info *current_thread_info(void)
 {
-       register unsigned long sp asm ("sp");
-       return (struct thread_info *)(sp & ~(THREAD_SIZE - 1));
+       return (struct thread_info *)
+               (current_stack_pointer & ~(THREAD_SIZE - 1));
 }
 
 #define thread_saved_pc(tsk)   \
index 3796ea6bb734012b4ab0c31f47dc1d066948d889..73f0ce570fb31caa23fe7da9b99edd68d4ef6679 100644 (file)
@@ -98,8 +98,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma,
        dsb(ish);
 }
 
-static inline void flush_tlb_range(struct vm_area_struct *vma,
-                                       unsigned long start, unsigned long end)
+static inline void __flush_tlb_range(struct vm_area_struct *vma,
+                                    unsigned long start, unsigned long end)
 {
        unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48;
        unsigned long addr;
@@ -112,7 +112,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
        dsb(ish);
 }
 
-static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end)
 {
        unsigned long addr;
        start >>= 12;
@@ -125,6 +125,29 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end
        isb();
 }
 
+/*
+ * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
+ * necessarily a performance improvement.
+ */
+#define MAX_TLB_RANGE  (1024UL << PAGE_SHIFT)
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+                                  unsigned long start, unsigned long end)
+{
+       if ((end - start) <= MAX_TLB_RANGE)
+               __flush_tlb_range(vma, start, end);
+       else
+               flush_tlb_mm(vma->vm_mm);
+}
+
+static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       if ((end - start) <= MAX_TLB_RANGE)
+               __flush_tlb_kernel_range(start, end);
+       else
+               flush_tlb_all();
+}
+
 /*
  * On AArch64, the cache coherency is handled via the set_pte_at() function.
  */
index 23e19f94d44911b05ed175bf65084720932fe450..58895f0ba9385f50d7c3d0b44c0be08b90dfd552 100644 (file)
@@ -31,6 +31,7 @@ arm64-obj-$(CONFIG_EARLY_PRINTK)      += early_printk.o
 arm64-obj-$(CONFIG_ARM_CPU_TOPOLOGY)  += topology.o
 arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND)  += sleep.o suspend.o
 arm64-obj-$(CONFIG_JUMP_LABEL)         += jump_label.o
+arm64-obj-$(CONFIG_CPU_IDLE)           += cpuidle.o
 arm64-obj-$(CONFIG_KGDB)               += kgdb.o
 arm64-obj-$(CONFIG_EFI)                        += efi.o efi-stub.o efi-entry.o
 
index 7f0512feaa13873a56de34cad0482f515c0e8655..a85843ddbde8892e456f29636fed7d7a66b03825 100644 (file)
@@ -44,10 +44,15 @@ EXPORT_SYMBOL(memstart_addr);
        /* string / mem functions */
 EXPORT_SYMBOL(strchr);
 EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strnlen);
 EXPORT_SYMBOL(memset);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memmove);
 EXPORT_SYMBOL(memchr);
+EXPORT_SYMBOL(memcmp);
 
        /* atomic bitops */
 EXPORT_SYMBOL(set_bit);
index 04efea8fe4bcde40f1fda6df56f24053e017d7a0..24fb449bc6aa31dca4aa9b4f5dd15685106113e1 100644 (file)
@@ -30,8 +30,8 @@ const struct cpu_operations *cpu_ops[NR_CPUS];
 static const struct cpu_operations *supported_cpu_ops[] __initconst = {
 #ifdef CONFIG_SMP
        &smp_spin_table_ops,
-       &cpu_psci_ops,
 #endif
+       &cpu_psci_ops,
        NULL,
 };
 
diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c
new file mode 100644 (file)
index 0000000..19d17f5
--- /dev/null
@@ -0,0 +1,31 @@
+/*
+ * ARM64 CPU idle arch support
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <asm/cpuidle.h>
+#include <asm/cpu_ops.h>
+
+int cpu_init_idle(unsigned int cpu)
+{
+       int ret = -EOPNOTSUPP;
+       struct device_node *cpu_node = of_cpu_device_node_get(cpu);
+
+       if (!cpu_node)
+               return -ENODEV;
+
+       if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_init_idle)
+               ret = cpu_ops[cpu]->cpu_init_idle(cpu_node, cpu);
+
+       of_node_put(cpu_node);
+       return ret;
+}
index b33051d501e6d5b4e3768cac94a453097e85185b..e3b37ee9076af9c5d05cb0e0c626347358ffc85a 100644 (file)
@@ -303,20 +303,20 @@ static int brk_handler(unsigned long addr, unsigned int esr,
 {
        siginfo_t info;
 
-       if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
-               return 0;
+       if (user_mode(regs)) {
+               info = (siginfo_t) {
+                       .si_signo = SIGTRAP,
+                       .si_errno = 0,
+                       .si_code  = TRAP_BRKPT,
+                       .si_addr  = (void __user *)instruction_pointer(regs),
+               };
 
-       if (!user_mode(regs))
+               force_sig_info(SIGTRAP, &info, current);
+       } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
+               pr_warning("Unexpected kernel BRK exception at EL1\n");
                return -EFAULT;
+       }
 
-       info = (siginfo_t) {
-               .si_signo = SIGTRAP,
-               .si_errno = 0,
-               .si_code  = TRAP_BRKPT,
-               .si_addr  = (void __user *)instruction_pointer(regs),
-       };
-
-       force_sig_info(SIGTRAP, &info, current);
        return 0;
 }
 
index bbc9fe1658fa7a9f064a6b63e3e9698242cb6dd2..6254f4a28eedd0f9211a7ad0e18a2459ee066f2b 100644 (file)
 
 #define KERNEL_RAM_VADDR       (PAGE_OFFSET + TEXT_OFFSET)
 
-#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000
-#error KERNEL_RAM_VADDR must start at 0xXXX80000
+#if (TEXT_OFFSET & 0xfff) != 0
+#error TEXT_OFFSET must be at least 4KB aligned
+#elif (PAGE_OFFSET & 0x1fffff) != 0
+#error PAGE_OFFSET must be at least 2MB aligned
+#elif TEXT_OFFSET > 0x1fffff
+#error TEXT_OFFSET must be less than 2MB
 #endif
 
        .macro  pgtbl, ttb0, ttb1, virt_to_phys
@@ -349,6 +353,8 @@ ENTRY(set_cpu_boot_mode_flag)
        b.ne    1f
        add     x1, x1, #4
 1:     str     w20, [x1]                       // This CPU has booted in EL1
+       dmb     sy
+       dc      ivac, x1                        // Invalidate potentially stale cache line
        ret
 ENDPROC(set_cpu_boot_mode_flag)
 
@@ -366,10 +372,6 @@ ENTRY(__boot_cpu_mode)
        .long   0
        .popsection
 
-       .align  3
-2:     .quad   .
-       .quad   PAGE_OFFSET
-
 #ifdef CONFIG_SMP
        .align  3
 1:     .quad   .
@@ -594,13 +596,6 @@ __create_page_tables:
        sub     x6, x6, #1                      // inclusive range
        create_block_map x0, x7, x3, x5, x6
 1:
-       /*
-        * Create the pgd entry for the fixed mappings.
-        */
-       ldr     x5, =FIXADDR_TOP                // Fixed mapping virtual address
-       add     x0, x26, #2 * PAGE_SIZE         // section table address
-       create_pgd_entry x26, x0, x5, x6, x7
-
        /*
         * Since the page tables have been populated with non-cacheable
         * accesses (MMU disabled), invalidate the idmap and swapper page
index 473e5dbf8f39a39e8eaa7a0740e54ee4d6bacb59..dfa6e3e74fddec289649c1baba921378f521f611 100644 (file)
@@ -105,7 +105,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
        c = irq_data_get_irq_chip(d);
        if (!c->irq_set_affinity)
                pr_debug("IRQ%u: unable to set affinity\n", d->irq);
-       else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+       else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
                cpumask_copy(d->affinity, affinity);
 
        return ret;
index c419e4814ba7eb2290dd98041b5c76ef88a3c6bb..809f4d2c234629805fb330b5c48b8ea70f1b7282 100644 (file)
 #include <asm/processor.h>
 #include <asm/stacktrace.h>
 
-static void setup_restart(void)
-{
-       /*
-        * Tell the mm system that we are going to reboot -
-        * we may need it to insert some 1:1 mappings so that
-        * soft boot works.
-        */
-       setup_mm_for_reboot();
-
-       /* Clean and invalidate caches */
-       flush_cache_all();
-
-       /* Turn D-cache off */
-       cpu_cache_off();
-
-       /* Push out any further dirty data, and ensure cache is empty */
-       flush_cache_all();
-}
+#ifdef CONFIG_CC_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
 
 void soft_restart(unsigned long addr)
 {
-       typedef void (*phys_reset_t)(unsigned long);
-       phys_reset_t phys_reset;
-
-       setup_restart();
-
-       /* Switch to the identity mapping */
-       phys_reset = (phys_reset_t)virt_to_phys(cpu_reset);
-       phys_reset(addr);
-
+       setup_mm_for_reboot();
+       cpu_soft_restart(virt_to_phys(cpu_reset), addr);
        /* Should never get here */
        BUG();
 }
index 0e32ab453e5b022695829ab44308fa301f621082..4d827dd6219933f6160f3aec70ad832e169a48cc 100644 (file)
 #include <linux/of.h>
 #include <linux/smp.h>
 #include <linux/slab.h>
+#include <linux/reboot.h>
+#include <linux/pm.h>
+#include <linux/delay.h>
+#include <uapi/linux/psci.h>
 
 #include <asm/compiler.h>
 #include <asm/cpu_ops.h>
@@ -27,6 +31,7 @@
 #include <asm/psci.h>
 #include <asm/smp_plat.h>
 #include <asm/suspend.h>
+#include <asm/system_misc.h>
 
 #define PSCI_POWER_STATE_TYPE_STANDBY          0
 #define PSCI_POWER_STATE_TYPE_POWER_DOWN       1
@@ -43,17 +48,23 @@ struct psci_operations {
        int (*cpu_off)(struct psci_power_state state);
        int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
        int (*migrate)(unsigned long cpuid);
+       int (*affinity_info)(unsigned long target_affinity,
+                       unsigned long lowest_affinity_level);
+       int (*migrate_info_type)(void);
 };
 
 static struct psci_operations psci_ops;
 
 static int (*invoke_psci_fn)(u64, u64, u64, u64);
+typedef int (*psci_initcall_t)(const struct device_node *);
 
 enum psci_function {
        PSCI_FN_CPU_SUSPEND,
        PSCI_FN_CPU_ON,
        PSCI_FN_CPU_OFF,
        PSCI_FN_MIGRATE,
+       PSCI_FN_AFFINITY_INFO,
+       PSCI_FN_MIGRATE_INFO_TYPE,
        PSCI_FN_MAX,
 };
 
@@ -61,53 +72,41 @@ static DEFINE_PER_CPU_READ_MOSTLY(struct psci_power_state *, psci_power_state);
 
 static u32 psci_function_id[PSCI_FN_MAX];
 
-#define PSCI_RET_SUCCESS               0
-#define PSCI_RET_EOPNOTSUPP            -1
-#define PSCI_RET_EINVAL                        -2
-#define PSCI_RET_EPERM                 -3
-
 static int psci_to_linux_errno(int errno)
 {
        switch (errno) {
        case PSCI_RET_SUCCESS:
                return 0;
-       case PSCI_RET_EOPNOTSUPP:
+       case PSCI_RET_NOT_SUPPORTED:
                return -EOPNOTSUPP;
-       case PSCI_RET_EINVAL:
+       case PSCI_RET_INVALID_PARAMS:
                return -EINVAL;
-       case PSCI_RET_EPERM:
+       case PSCI_RET_DENIED:
                return -EPERM;
        };
 
        return -EINVAL;
 }
 
-#define PSCI_POWER_STATE_ID_MASK       0xffff
-#define PSCI_POWER_STATE_ID_SHIFT      0
-#define PSCI_POWER_STATE_TYPE_MASK     0x1
-#define PSCI_POWER_STATE_TYPE_SHIFT    16
-#define PSCI_POWER_STATE_AFFL_MASK     0x3
-#define PSCI_POWER_STATE_AFFL_SHIFT    24
-
 static u32 psci_power_state_pack(struct psci_power_state state)
 {
-       return  ((state.id & PSCI_POWER_STATE_ID_MASK)
-                       << PSCI_POWER_STATE_ID_SHIFT)   |
-               ((state.type & PSCI_POWER_STATE_TYPE_MASK)
-                       << PSCI_POWER_STATE_TYPE_SHIFT) |
-               ((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK)
-                       << PSCI_POWER_STATE_AFFL_SHIFT);
+       return ((state.id << PSCI_0_2_POWER_STATE_ID_SHIFT)
+                       & PSCI_0_2_POWER_STATE_ID_MASK) |
+               ((state.type << PSCI_0_2_POWER_STATE_TYPE_SHIFT)
+                & PSCI_0_2_POWER_STATE_TYPE_MASK) |
+               ((state.affinity_level << PSCI_0_2_POWER_STATE_AFFL_SHIFT)
+                & PSCI_0_2_POWER_STATE_AFFL_MASK);
 }
 
 static void psci_power_state_unpack(u32 power_state,
                                    struct psci_power_state *state)
 {
-       state->id = (power_state >> PSCI_POWER_STATE_ID_SHIFT)
-                       & PSCI_POWER_STATE_ID_MASK;
-       state->type = (power_state >> PSCI_POWER_STATE_TYPE_SHIFT)
-                       & PSCI_POWER_STATE_TYPE_MASK;
-       state->affinity_level = (power_state >> PSCI_POWER_STATE_AFFL_SHIFT)
-                       & PSCI_POWER_STATE_AFFL_MASK;
+       state->id = (power_state >> PSCI_0_2_POWER_STATE_ID_SHIFT)
+                       & PSCI_0_2_POWER_STATE_ID_MASK;
+       state->type = (power_state >> PSCI_0_2_POWER_STATE_TYPE_SHIFT)
+                       & PSCI_0_2_POWER_STATE_TYPE_MASK;
+       state->affinity_level = (power_state >> PSCI_0_2_POWER_STATE_AFFL_SHIFT)
+                       & PSCI_0_2_POWER_STATE_AFFL_MASK;
 }
 
 /*
@@ -144,6 +143,14 @@ static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1,
        return function_id;
 }
 
+static int psci_get_version(void)
+{
+       int err;
+
+       err = invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
+       return err;
+}
+
 static int psci_cpu_suspend(struct psci_power_state state,
                            unsigned long entry_point)
 {
@@ -187,107 +194,135 @@ static int psci_migrate(unsigned long cpuid)
        return psci_to_linux_errno(err);
 }
 
-static const struct of_device_id psci_of_match[] __initconst = {
-       { .compatible = "arm,psci",     },
-       {},
-};
+static int psci_affinity_info(unsigned long target_affinity,
+               unsigned long lowest_affinity_level)
+{
+       int err;
+       u32 fn;
 
-int __init psci_dt_register_idle_states(struct cpuidle_driver *drv,
-                                       struct device_node *state_nodes[])
+       fn = psci_function_id[PSCI_FN_AFFINITY_INFO];
+       err = invoke_psci_fn(fn, target_affinity, lowest_affinity_level, 0);
+       return err;
+}
+
+static int psci_migrate_info_type(void)
+{
+       int err;
+       u32 fn;
+
+       fn = psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE];
+       err = invoke_psci_fn(fn, 0, 0, 0);
+       return err;
+}
+
+static int get_set_conduit_method(struct device_node *np)
 {
-       int cpu, i;
-       struct psci_power_state *psci_states;
-       const struct cpu_operations *cpu_ops_ptr;
+       const char *method;
+
+       pr_info("probing for conduit method from DT.\n");
+
+       if (of_property_read_string(np, "method", &method)) {
+               pr_warn("missing \"method\" property\n");
+               return -ENXIO;
+       }
 
-       if (!state_nodes)
+       if (!strcmp("hvc", method)) {
+               invoke_psci_fn = __invoke_psci_fn_hvc;
+       } else if (!strcmp("smc", method)) {
+               invoke_psci_fn = __invoke_psci_fn_smc;
+       } else {
+               pr_warn("invalid \"method\" property: %s\n", method);
                return -EINVAL;
-       /*
-        * This is belt-and-braces: make sure that if the idle
-        * specified protocol is psci, the cpu_ops have been
-        * initialized to psci operations. Anything else is
-        * a recipe for mayhem.
-        */
-       for_each_cpu(cpu, drv->cpumask) {
-               cpu_ops_ptr = cpu_ops[cpu];
-               if (WARN_ON(!cpu_ops_ptr || strcmp(cpu_ops_ptr->name, "psci")))
-                       return -EOPNOTSUPP;
        }
+       return 0;
+}
 
-       psci_states = kcalloc(drv->state_count, sizeof(*psci_states),
-                             GFP_KERNEL);
+static void psci_sys_reset(char str, const char *cmd)
+{
+       invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
+}
 
-       if (!psci_states) {
-               pr_warn("psci idle state allocation failed\n");
-               return -ENOMEM;
-       }
+static void psci_sys_poweroff(void)
+{
+       invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
+}
+
+/*
+ * PSCI Function IDs for v0.2+ are well defined so use
+ * standard values.
+ */
+static int __init psci_0_2_init(struct device_node *np)
+{
+       int err, ver;
+
+       err = get_set_conduit_method(np);
+
+       if (err)
+               goto out_put_node;
 
-       for_each_cpu(cpu, drv->cpumask) {
-               if (per_cpu(psci_power_state, cpu)) {
-                       pr_warn("idle states already initialized on cpu %u\n",
-                               cpu);
-                       continue;
+       ver = psci_get_version();
+
+       if (ver == PSCI_RET_NOT_SUPPORTED) {
+               /* PSCI v0.2 mandates implementation of PSCI_ID_VERSION. */
+               pr_err("PSCI firmware does not comply with the v0.2 spec.\n");
+               err = -EOPNOTSUPP;
+               goto out_put_node;
+       } else {
+               pr_info("PSCIv%d.%d detected in firmware.\n",
+                               PSCI_VERSION_MAJOR(ver),
+                               PSCI_VERSION_MINOR(ver));
+
+               if (PSCI_VERSION_MAJOR(ver) == 0 &&
+                               PSCI_VERSION_MINOR(ver) < 2) {
+                       err = -EINVAL;
+                       pr_err("Conflicting PSCI version detected.\n");
+                       goto out_put_node;
                }
-               per_cpu(psci_power_state, cpu) = psci_states;
        }
 
+       pr_info("Using standard PSCI v0.2 function IDs\n");
+       psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
+       psci_ops.cpu_suspend = psci_cpu_suspend;
 
-       for (i = 0; i < drv->state_count; i++) {
-               u32 psci_power_state;
+       psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
+       psci_ops.cpu_off = psci_cpu_off;
 
-               if (!state_nodes[i]) {
-                       /*
-                        * An index with a missing node pointer falls back to
-                        * simple STANDBYWFI
-                        */
-                       psci_states[i].type = PSCI_POWER_STATE_TYPE_STANDBY;
-                       continue;
-               }
+       psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
+       psci_ops.cpu_on = psci_cpu_on;
 
-               if (of_property_read_u32(state_nodes[i], "entry-method-param",
-                                        &psci_power_state)) {
-                       pr_warn(" * %s missing entry-method-param property\n",
-                               state_nodes[i]->full_name);
-                       /*
-                        * If entry-method-param property is missing, fall
-                        * back to STANDBYWFI state
-                        */
-                       psci_states[i].type = PSCI_POWER_STATE_TYPE_STANDBY;
-                       continue;
-               }
+       psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
+       psci_ops.migrate = psci_migrate;
 
-               pr_debug("psci-power-state %#x index %u\n",
-                        psci_power_state, i);
-               psci_power_state_unpack(psci_power_state, &psci_states[i]);
-       }
+       psci_function_id[PSCI_FN_AFFINITY_INFO] = PSCI_0_2_FN64_AFFINITY_INFO;
+       psci_ops.affinity_info = psci_affinity_info;
 
-       return 0;
+       psci_function_id[PSCI_FN_MIGRATE_INFO_TYPE] =
+               PSCI_0_2_FN_MIGRATE_INFO_TYPE;
+       psci_ops.migrate_info_type = psci_migrate_info_type;
+
+       arm_pm_restart = psci_sys_reset;
+
+       pm_power_off = psci_sys_poweroff;
+
+out_put_node:
+       of_node_put(np);
+       return err;
 }
 
-void __init psci_init(void)
+/*
+ * PSCI < v0.2 get PSCI Function IDs via DT.
+ */
+static int __init psci_0_1_init(struct device_node *np)
 {
-       struct device_node *np;
-       const char *method;
        u32 id;
+       int err;
 
-       np = of_find_matching_node(NULL, psci_of_match);
-       if (!np)
-               return;
-
-       pr_info("probing function IDs from device-tree\n");
+       err = get_set_conduit_method(np);
 
-       if (of_property_read_string(np, "method", &method)) {
-               pr_warning("missing \"method\" property\n");
+       if (err)
                goto out_put_node;
-       }
 
-       if (!strcmp("hvc", method)) {
-               invoke_psci_fn = __invoke_psci_fn_hvc;
-       } else if (!strcmp("smc", method)) {
-               invoke_psci_fn = __invoke_psci_fn_smc;
-       } else {
-               pr_warning("invalid \"method\" property: %s\n", method);
-               goto out_put_node;
-       }
+       pr_info("Using PSCI v0.1 Function IDs from DT\n");
 
        if (!of_property_read_u32(np, "cpu_suspend", &id)) {
                psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
@@ -311,7 +346,28 @@ void __init psci_init(void)
 
 out_put_node:
        of_node_put(np);
-       return;
+       return err;
+}
+
+static const struct of_device_id psci_of_match[] __initconst = {
+       { .compatible = "arm,psci",     .data = psci_0_1_init},
+       { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
+       {},
+};
+
+int __init psci_init(void)
+{
+       struct device_node *np;
+       const struct of_device_id *matched_np;
+       psci_initcall_t init_fn;
+
+       np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
+
+       if (!np)
+               return -ENODEV;
+
+       init_fn = (psci_initcall_t)matched_np->data;
+       return init_fn(np);
 }
 
 #ifdef CONFIG_SMP
@@ -364,6 +420,36 @@ static void cpu_psci_cpu_die(unsigned int cpu)
 
        pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
 }
+
+static int cpu_psci_cpu_kill(unsigned int cpu)
+{
+       int err, i;
+
+       if (!psci_ops.affinity_info)
+               return 1;
+       /*
+        * cpu_kill could race with cpu_die and we can
+        * potentially end up declaring this cpu undead
+        * while it is dying. So, try again a few times.
+        */
+
+       for (i = 0; i < 10; i++) {
+               err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
+               if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
+                       pr_info("CPU%d killed.\n", cpu);
+                       return 1;
+               }
+
+               msleep(10);
+               pr_info("Retrying again to check for CPU kill\n");
+       }
+
+       pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
+                       cpu, err);
+       /* Make op_cpu_kill() fail. */
+       return 0;
+}
+#endif
 #endif
 
 #ifdef CONFIG_ARM64_CPU_SUSPEND
@@ -380,16 +466,18 @@ static int cpu_psci_cpu_suspend(unsigned long index)
 
 const struct cpu_operations cpu_psci_ops = {
        .name           = "psci",
+#ifdef CONFIG_SMP
        .cpu_init       = cpu_psci_cpu_init,
        .cpu_prepare    = cpu_psci_cpu_prepare,
        .cpu_boot       = cpu_psci_cpu_boot,
 #ifdef CONFIG_HOTPLUG_CPU
        .cpu_disable    = cpu_psci_cpu_disable,
        .cpu_die        = cpu_psci_cpu_die,
+       .cpu_kill       = cpu_psci_cpu_kill,
+#endif
 #endif
 #ifdef CONFIG_ARM64_CPU_SUSPEND
        .cpu_suspend    = cpu_psci_cpu_suspend,
 #endif
 };
 
-#endif
index e1f70225a919e392076f0371679e1ce864443931..1267de080febbe06c7d20ff85e308621cb980623 100644 (file)
@@ -659,11 +659,18 @@ static int compat_gpr_get(struct task_struct *target,
                        reg = task_pt_regs(target)->regs[idx];
                }
 
-               ret = copy_to_user(ubuf, &reg, sizeof(reg));
-               if (ret)
-                       break;
-
-               ubuf += sizeof(reg);
+               if (kbuf) {
+                       memcpy(kbuf, &reg, sizeof(reg));
+                       kbuf += sizeof(reg);
+               } else {
+                       ret = copy_to_user(ubuf, &reg, sizeof(reg));
+                       if (ret) {
+                               ret = -EFAULT;
+                               break;
+                       }
+
+                       ubuf += sizeof(reg);
+               }
        }
 
        return ret;
@@ -693,11 +700,18 @@ static int compat_gpr_set(struct task_struct *target,
                unsigned int idx = start + i;
                compat_ulong_t reg;
 
-               ret = copy_from_user(&reg, ubuf, sizeof(reg));
-               if (ret)
-                       return ret;
-
-               ubuf += sizeof(reg);
+               if (kbuf) {
+                       memcpy(&reg, kbuf, sizeof(reg));
+                       kbuf += sizeof(reg);
+               } else {
+                       ret = copy_from_user(&reg, ubuf, sizeof(reg));
+                       if (ret) {
+                               ret = -EFAULT;
+                               break;
+                       }
+
+                       ubuf += sizeof(reg);
+               }
 
                switch (idx) {
                case 15:
index 7c868a2ac38b23ed03a919c70ef1f2b7e1e8c6b6..0ac31a581f0285b3aa46875e4b7fceb482bdb1e2 100644 (file)
@@ -231,6 +231,19 @@ int __cpu_disable(void)
        return 0;
 }
 
+static int op_cpu_kill(unsigned int cpu)
+{
+       /*
+        * If we have no means of synchronising with the dying CPU, then assume
+        * that it is really dead. We can only wait for an arbitrary length of
+        * time and hope that it's dead, so let's skip the wait and just hope.
+        */
+       if (!cpu_ops[cpu]->cpu_kill)
+               return 1;
+
+       return cpu_ops[cpu]->cpu_kill(cpu);
+}
+
 static DECLARE_COMPLETION(cpu_died);
 
 /*
@@ -244,6 +257,15 @@ void __cpu_die(unsigned int cpu)
                return;
        }
        pr_notice("CPU%u: shutdown\n", cpu);
+
+       /*
+        * Now that the dying CPU is beyond the point of no return w.r.t.
+        * in-kernel synchronisation, try to get the firwmare to help us to
+        * verify that it has really left the kernel before we consider
+        * clobbering anything it might still be using.
+        */
+       if (!op_cpu_kill(cpu))
+               pr_warn("CPU%d may not have shut down cleanly\n", cpu);
 }
 
 /*
index 55437ba1f5a4901984e368dca7c43d5c76c218b3..407991bf79f5116eed6d5aa4aa2f1314ecabe0fc 100644 (file)
@@ -111,10 +111,9 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
                frame.sp = thread_saved_sp(tsk);
                frame.pc = thread_saved_pc(tsk);
        } else {
-               register unsigned long current_sp asm("sp");
                data.no_sched_functions = 0;
                frame.fp = (unsigned long)__builtin_frame_address(0);
-               frame.sp = current_sp;
+               frame.sp = current_stack_pointer;
                frame.pc = (unsigned long)save_stack_trace_tsk;
        }
 
index 03dc3718eb136d24db7295133709a4a9e92c21b5..1ddb2bd5932aa79384104ed248cf1a6c41610fb5 100644 (file)
@@ -18,6 +18,7 @@
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
 
+#include <linux/clockchips.h>
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
@@ -74,6 +75,8 @@ void __init time_init(void)
 
        clocksource_of_init();
 
+       tick_setup_hrtimer_broadcast();
+
        arch_timer_rate = arch_timer_get_rate();
        if (!arch_timer_rate)
                panic("Unable to initialise architected timer.\n");
index 0da47699510b685f5d56d9d4cde2a0dfb9ca9242..2589ef53973af45601bf7fb552da64f736075197 100644 (file)
@@ -133,7 +133,6 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
 static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 {
        struct stackframe frame;
-       const register unsigned long current_sp asm ("sp");
 
        pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
 
@@ -146,7 +145,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
                frame.pc = regs->pc;
        } else if (tsk == current) {
                frame.fp = (unsigned long)__builtin_frame_address(0);
-               frame.sp = current_sp;
+               frame.sp = current_stack_pointer;
                frame.pc = (unsigned long)dump_backtrace;
        } else {
                /*
@@ -157,7 +156,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
                frame.pc = thread_saved_pc(tsk);
        }
 
-       printk("Call trace:\n");
+       pr_emerg("Call trace:\n");
        while (1) {
                unsigned long where = frame.pc;
                int ret;
@@ -372,17 +371,17 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 
 void __pte_error(const char *file, int line, unsigned long val)
 {
-       printk("%s:%d: bad pte %016lx.\n", file, line, val);
+       pr_crit("%s:%d: bad pte %016lx.\n", file, line, val);
 }
 
 void __pmd_error(const char *file, int line, unsigned long val)
 {
-       printk("%s:%d: bad pmd %016lx.\n", file, line, val);
+       pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val);
 }
 
 void __pgd_error(const char *file, int line, unsigned long val)
 {
-       printk("%s:%d: bad pgd %016lx.\n", file, line, val);
+       pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val);
 }
 
 void __init trap_init(void)
index 84b942612051cb65a32bbc66fdbf0769adfa1be0..ff3bddea482dd84ced8711376af1084f6ae594ee 100644 (file)
@@ -43,7 +43,7 @@ $(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
        $(call if_changed,vdsosym)
 
 # Assembly rules for the .S files
-$(obj-vdso): %.o: %.S
+$(obj-vdso): %.o: %.S FORCE
        $(call if_changed_dep,vdsoas)
 
 # Actual build commands
index 55d0e035205f0156e6b272ac6fe9fd83c6ea4fa8..de93a4bbdb7c7741dfc230b1bfa3c0c7a1c633ad 100644 (file)
@@ -13,7 +13,7 @@
 #define ARM_EXIT_DISCARD(x)    x
 
 OUTPUT_ARCH(aarch64)
-ENTRY(stext)
+ENTRY(_text)
 
 jiffies = jiffies_64;
 
index 328ce1a99daaa48e774e6a9df4d146b7357f53fd..d98d3e39879eb91789ad174305492fc865a38f93 100644 (file)
@@ -1,4 +1,5 @@
 lib-y          := bitops.o clear_user.o delay.o copy_from_user.o       \
                   copy_to_user.o copy_in_user.o copy_page.o            \
                   clear_page.o memchr.o memcpy.o memmove.o memset.o    \
+                  memcmp.o strcmp.o strncmp.o strlen.o strnlen.o       \
                   strchr.o strrchr.o
diff --git a/arch/arm64/lib/memcmp.S b/arch/arm64/lib/memcmp.S
new file mode 100644 (file)
index 0000000..6ea0776
--- /dev/null
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+* compare memory areas(when two memory areas' offset are different,
+* alignment handled by the hardware)
+*
+* Parameters:
+*  x0 - const memory area 1 pointer
+*  x1 - const memory area 2 pointer
+*  x2 - the maximal compare byte length
+* Returns:
+*  x0 - a compare result, maybe less than, equal to, or greater than ZERO
+*/
+
+/* Parameters and result.  */
+src1           .req    x0
+src2           .req    x1
+limit          .req    x2
+result         .req    x0
+
+/* Internal variables.  */
+data1          .req    x3
+data1w         .req    w3
+data2          .req    x4
+data2w         .req    w4
+has_nul                .req    x5
+diff           .req    x6
+endloop                .req    x7
+tmp1           .req    x8
+tmp2           .req    x9
+tmp3           .req    x10
+pos            .req    x11
+limit_wd       .req    x12
+mask           .req    x13
+
+ENTRY(memcmp)
+       cbz     limit, .Lret0
+       eor     tmp1, src1, src2
+       tst     tmp1, #7
+       b.ne    .Lmisaligned8
+       ands    tmp1, src1, #7
+       b.ne    .Lmutual_align
+       sub     limit_wd, limit, #1 /* limit != 0, so no underflow.  */
+       lsr     limit_wd, limit_wd, #3 /* Convert to Dwords.  */
+       /*
+       * The input source addresses are at alignment boundary.
+       * Directly compare eight bytes each time.
+       */
+.Lloop_aligned:
+       ldr     data1, [src1], #8
+       ldr     data2, [src2], #8
+.Lstart_realigned:
+       subs    limit_wd, limit_wd, #1
+       eor     diff, data1, data2      /* Non-zero if differences found.  */
+       csinv   endloop, diff, xzr, cs  /* Last Dword or differences.  */
+       cbz     endloop, .Lloop_aligned
+
+       /* Not reached the limit, must have found a diff.  */
+       tbz     limit_wd, #63, .Lnot_limit
+
+       /* Limit % 8 == 0 => the diff is in the last 8 bytes. */
+       ands    limit, limit, #7
+       b.eq    .Lnot_limit
+       /*
+       * The remained bytes less than 8. It is needed to extract valid data
+       * from last eight bytes of the intended memory range.
+       */
+       lsl     limit, limit, #3        /* bytes-> bits.  */
+       mov     mask, #~0
+CPU_BE( lsr    mask, mask, limit )
+CPU_LE( lsl    mask, mask, limit )
+       bic     data1, data1, mask
+       bic     data2, data2, mask
+
+       orr     diff, diff, mask
+       b       .Lnot_limit
+
+.Lmutual_align:
+       /*
+       * Sources are mutually aligned, but are not currently at an
+       * alignment boundary. Round down the addresses and then mask off
+       * the bytes that precede the start point.
+       */
+       bic     src1, src1, #7
+       bic     src2, src2, #7
+       ldr     data1, [src1], #8
+       ldr     data2, [src2], #8
+       /*
+       * We can not add limit with alignment offset(tmp1) here. Since the
+       * addition probably make the limit overflown.
+       */
+       sub     limit_wd, limit, #1/*limit != 0, so no underflow.*/
+       and     tmp3, limit_wd, #7
+       lsr     limit_wd, limit_wd, #3
+       add     tmp3, tmp3, tmp1
+       add     limit_wd, limit_wd, tmp3, lsr #3
+       add     limit, limit, tmp1/* Adjust the limit for the extra.  */
+
+       lsl     tmp1, tmp1, #3/* Bytes beyond alignment -> bits.*/
+       neg     tmp1, tmp1/* Bits to alignment -64.  */
+       mov     tmp2, #~0
+       /*mask off the non-intended bytes before the start address.*/
+CPU_BE( lsl    tmp2, tmp2, tmp1 )/*Big-endian.Early bytes are at MSB*/
+       /* Little-endian.  Early bytes are at LSB.  */
+CPU_LE( lsr    tmp2, tmp2, tmp1 )
+
+       orr     data1, data1, tmp2
+       orr     data2, data2, tmp2
+       b       .Lstart_realigned
+
+       /*src1 and src2 have different alignment offset.*/
+.Lmisaligned8:
+       cmp     limit, #8
+       b.lo    .Ltiny8proc /*limit < 8: compare byte by byte*/
+
+       and     tmp1, src1, #7
+       neg     tmp1, tmp1
+       add     tmp1, tmp1, #8/*valid length in the first 8 bytes of src1*/
+       and     tmp2, src2, #7
+       neg     tmp2, tmp2
+       add     tmp2, tmp2, #8/*valid length in the first 8 bytes of src2*/
+       subs    tmp3, tmp1, tmp2
+       csel    pos, tmp1, tmp2, hi /*Choose the maximum.*/
+
+       sub     limit, limit, pos
+       /*compare the proceeding bytes in the first 8 byte segment.*/
+.Ltinycmp:
+       ldrb    data1w, [src1], #1
+       ldrb    data2w, [src2], #1
+       subs    pos, pos, #1
+       ccmp    data1w, data2w, #0, ne  /* NZCV = 0b0000.  */
+       b.eq    .Ltinycmp
+       cbnz    pos, 1f /*diff occurred before the last byte.*/
+       cmp     data1w, data2w
+       b.eq    .Lstart_align
+1:
+       sub     result, data1, data2
+       ret
+
+.Lstart_align:
+       lsr     limit_wd, limit, #3
+       cbz     limit_wd, .Lremain8
+
+       ands    xzr, src1, #7
+       b.eq    .Lrecal_offset
+       /*process more leading bytes to make src1 aligned...*/
+       add     src1, src1, tmp3 /*backwards src1 to alignment boundary*/
+       add     src2, src2, tmp3
+       sub     limit, limit, tmp3
+       lsr     limit_wd, limit, #3
+       cbz     limit_wd, .Lremain8
+       /*load 8 bytes from aligned SRC1..*/
+       ldr     data1, [src1], #8
+       ldr     data2, [src2], #8
+
+       subs    limit_wd, limit_wd, #1
+       eor     diff, data1, data2  /*Non-zero if differences found.*/
+       csinv   endloop, diff, xzr, ne
+       cbnz    endloop, .Lunequal_proc
+       /*How far is the current SRC2 from the alignment boundary...*/
+       and     tmp3, tmp3, #7
+
+.Lrecal_offset:/*src1 is aligned now..*/
+       neg     pos, tmp3
+.Lloopcmp_proc:
+       /*
+       * Divide the eight bytes into two parts. First,backwards the src2
+       * to an alignment boundary,load eight bytes and compare from
+       * the SRC2 alignment boundary. If all 8 bytes are equal,then start
+       * the second part's comparison. Otherwise finish the comparison.
+       * This special handle can garantee all the accesses are in the
+       * thread/task space in avoid to overrange access.
+       */
+       ldr     data1, [src1,pos]
+       ldr     data2, [src2,pos]
+       eor     diff, data1, data2  /* Non-zero if differences found.  */
+       cbnz    diff, .Lnot_limit
+
+       /*The second part process*/
+       ldr     data1, [src1], #8
+       ldr     data2, [src2], #8
+       eor     diff, data1, data2  /* Non-zero if differences found.  */
+       subs    limit_wd, limit_wd, #1
+       csinv   endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
+       cbz     endloop, .Lloopcmp_proc
+.Lunequal_proc:
+       cbz     diff, .Lremain8
+
+/*There is differnence occured in the latest comparison.*/
+.Lnot_limit:
+/*
+* For little endian,reverse the low significant equal bits into MSB,then
+* following CLZ can find how many equal bits exist.
+*/
+CPU_LE( rev    diff, diff )
+CPU_LE( rev    data1, data1 )
+CPU_LE( rev    data2, data2 )
+
+       /*
+       * The MS-non-zero bit of DIFF marks either the first bit
+       * that is different, or the end of the significant data.
+       * Shifting left now will bring the critical information into the
+       * top bits.
+       */
+       clz     pos, diff
+       lsl     data1, data1, pos
+       lsl     data2, data2, pos
+       /*
+       * We need to zero-extend (char is unsigned) the value and then
+       * perform a signed subtraction.
+       */
+       lsr     data1, data1, #56
+       sub     result, data1, data2, lsr #56
+       ret
+
+.Lremain8:
+       /* Limit % 8 == 0 =>. all data are equal.*/
+       ands    limit, limit, #7
+       b.eq    .Lret0
+
+.Ltiny8proc:
+       ldrb    data1w, [src1], #1
+       ldrb    data2w, [src2], #1
+       subs    limit, limit, #1
+
+       ccmp    data1w, data2w, #0, ne  /* NZCV = 0b0000. */
+       b.eq    .Ltiny8proc
+       sub     result, data1, data2
+       ret
+.Lret0:
+       mov     result, #0
+       ret
+ENDPROC(memcmp)
index 27b5003609b662d8015b398913ee6aa79095c460..8a9a96d3ddae04331828c9744b4d94368ef70620 100644 (file)
@@ -1,5 +1,13 @@
 /*
  * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -16,6 +24,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/cache.h>
 
 /*
  * Copy a buffer from src to dest (alignment handled by the hardware)
  * Returns:
  *     x0 - dest
  */
+dstin  .req    x0
+src    .req    x1
+count  .req    x2
+tmp1   .req    x3
+tmp1w  .req    w3
+tmp2   .req    x4
+tmp2w  .req    w4
+tmp3   .req    x5
+tmp3w  .req    w5
+dst    .req    x6
+
+A_l    .req    x7
+A_h    .req    x8
+B_l    .req    x9
+B_h    .req    x10
+C_l    .req    x11
+C_h    .req    x12
+D_l    .req    x13
+D_h    .req    x14
+
 ENTRY(memcpy)
-       mov     x4, x0
-       subs    x2, x2, #8
-       b.mi    2f
-1:     ldr     x3, [x1], #8
-       subs    x2, x2, #8
-       str     x3, [x4], #8
-       b.pl    1b
-2:     adds    x2, x2, #4
-       b.mi    3f
-       ldr     w3, [x1], #4
-       sub     x2, x2, #4
-       str     w3, [x4], #4
-3:     adds    x2, x2, #2
-       b.mi    4f
-       ldrh    w3, [x1], #2
-       sub     x2, x2, #2
-       strh    w3, [x4], #2
-4:     adds    x2, x2, #1
-       b.mi    5f
-       ldrb    w3, [x1]
-       strb    w3, [x4]
-5:     ret
+       mov     dst, dstin
+       cmp     count, #16
+       /*When memory length is less than 16, the accessed are not aligned.*/
+       b.lo    .Ltiny15
+
+       neg     tmp2, src
+       ands    tmp2, tmp2, #15/* Bytes to reach alignment. */
+       b.eq    .LSrcAligned
+       sub     count, count, tmp2
+       /*
+       * Copy the leading memory data from src to dst in an increasing
+       * address order.By this way,the risk of overwritting the source
+       * memory data is eliminated when the distance between src and
+       * dst is less than 16. The memory accesses here are alignment.
+       */
+       tbz     tmp2, #0, 1f
+       ldrb    tmp1w, [src], #1
+       strb    tmp1w, [dst], #1
+1:
+       tbz     tmp2, #1, 2f
+       ldrh    tmp1w, [src], #2
+       strh    tmp1w, [dst], #2
+2:
+       tbz     tmp2, #2, 3f
+       ldr     tmp1w, [src], #4
+       str     tmp1w, [dst], #4
+3:
+       tbz     tmp2, #3, .LSrcAligned
+       ldr     tmp1, [src],#8
+       str     tmp1, [dst],#8
+
+.LSrcAligned:
+       cmp     count, #64
+       b.ge    .Lcpy_over64
+       /*
+       * Deal with small copies quickly by dropping straight into the
+       * exit block.
+       */
+.Ltail63:
+       /*
+       * Copy up to 48 bytes of data. At this point we only need the
+       * bottom 6 bits of count to be accurate.
+       */
+       ands    tmp1, count, #0x30
+       b.eq    .Ltiny15
+       cmp     tmp1w, #0x20
+       b.eq    1f
+       b.lt    2f
+       ldp     A_l, A_h, [src], #16
+       stp     A_l, A_h, [dst], #16
+1:
+       ldp     A_l, A_h, [src], #16
+       stp     A_l, A_h, [dst], #16
+2:
+       ldp     A_l, A_h, [src], #16
+       stp     A_l, A_h, [dst], #16
+.Ltiny15:
+       /*
+       * Prefer to break one ldp/stp into several load/store to access
+       * memory in an increasing address order,rather than to load/store 16
+       * bytes from (src-16) to (dst-16) and to backward the src to aligned
+       * address,which way is used in original cortex memcpy. If keeping
+       * the original memcpy process here, memmove need to satisfy the
+       * precondition that src address is at least 16 bytes bigger than dst
+       * address,otherwise some source data will be overwritten when memove
+       * call memcpy directly. To make memmove simpler and decouple the
+       * memcpy's dependency on memmove, withdrew the original process.
+       */
+       tbz     count, #3, 1f
+       ldr     tmp1, [src], #8
+       str     tmp1, [dst], #8
+1:
+       tbz     count, #2, 2f
+       ldr     tmp1w, [src], #4
+       str     tmp1w, [dst], #4
+2:
+       tbz     count, #1, 3f
+       ldrh    tmp1w, [src], #2
+       strh    tmp1w, [dst], #2
+3:
+       tbz     count, #0, .Lexitfunc
+       ldrb    tmp1w, [src]
+       strb    tmp1w, [dst]
+
+.Lexitfunc:
+       ret
+
+.Lcpy_over64:
+       subs    count, count, #128
+       b.ge    .Lcpy_body_large
+       /*
+       * Less than 128 bytes to copy, so handle 64 here and then jump
+       * to the tail.
+       */
+       ldp     A_l, A_h, [src],#16
+       stp     A_l, A_h, [dst],#16
+       ldp     B_l, B_h, [src],#16
+       ldp     C_l, C_h, [src],#16
+       stp     B_l, B_h, [dst],#16
+       stp     C_l, C_h, [dst],#16
+       ldp     D_l, D_h, [src],#16
+       stp     D_l, D_h, [dst],#16
+
+       tst     count, #0x3f
+       b.ne    .Ltail63
+       ret
+
+       /*
+       * Critical loop.  Start at a new cache line boundary.  Assuming
+       * 64 bytes per line this ensures the entire loop is in one line.
+       */
+       .p2align        L1_CACHE_SHIFT
+.Lcpy_body_large:
+       /* pre-get 64 bytes data. */
+       ldp     A_l, A_h, [src],#16
+       ldp     B_l, B_h, [src],#16
+       ldp     C_l, C_h, [src],#16
+       ldp     D_l, D_h, [src],#16
+1:
+       /*
+       * interlace the load of next 64 bytes data block with store of the last
+       * loaded 64 bytes data.
+       */
+       stp     A_l, A_h, [dst],#16
+       ldp     A_l, A_h, [src],#16
+       stp     B_l, B_h, [dst],#16
+       ldp     B_l, B_h, [src],#16
+       stp     C_l, C_h, [dst],#16
+       ldp     C_l, C_h, [src],#16
+       stp     D_l, D_h, [dst],#16
+       ldp     D_l, D_h, [src],#16
+       subs    count, count, #64
+       b.ge    1b
+       stp     A_l, A_h, [dst],#16
+       stp     B_l, B_h, [dst],#16
+       stp     C_l, C_h, [dst],#16
+       stp     D_l, D_h, [dst],#16
+
+       tst     count, #0x3f
+       b.ne    .Ltail63
+       ret
 ENDPROC(memcpy)
index b79fdfa42d39794d805e1c8937510a01e7aaf357..57b19ea2dad467d885f09991b902d0a52bd6f747 100644 (file)
@@ -1,5 +1,13 @@
 /*
  * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -16,6 +24,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/cache.h>
 
 /*
  * Move a buffer from src to test (alignment handled by the hardware).
  * Returns:
  *     x0 - dest
  */
+dstin  .req    x0
+src    .req    x1
+count  .req    x2
+tmp1   .req    x3
+tmp1w  .req    w3
+tmp2   .req    x4
+tmp2w  .req    w4
+tmp3   .req    x5
+tmp3w  .req    w5
+dst    .req    x6
+
+A_l    .req    x7
+A_h    .req    x8
+B_l    .req    x9
+B_h    .req    x10
+C_l    .req    x11
+C_h    .req    x12
+D_l    .req    x13
+D_h    .req    x14
+
 ENTRY(memmove)
-       cmp     x0, x1
-       b.ls    memcpy
-       add     x4, x0, x2
-       add     x1, x1, x2
-       subs    x2, x2, #8
-       b.mi    2f
-1:     ldr     x3, [x1, #-8]!
-       subs    x2, x2, #8
-       str     x3, [x4, #-8]!
-       b.pl    1b
-2:     adds    x2, x2, #4
-       b.mi    3f
-       ldr     w3, [x1, #-4]!
-       sub     x2, x2, #4
-       str     w3, [x4, #-4]!
-3:     adds    x2, x2, #2
-       b.mi    4f
-       ldrh    w3, [x1, #-2]!
-       sub     x2, x2, #2
-       strh    w3, [x4, #-2]!
-4:     adds    x2, x2, #1
-       b.mi    5f
-       ldrb    w3, [x1, #-1]
-       strb    w3, [x4, #-1]
-5:     ret
+       cmp     dstin, src
+       b.lo    memcpy
+       add     tmp1, src, count
+       cmp     dstin, tmp1
+       b.hs    memcpy          /* No overlap.  */
+
+       add     dst, dstin, count
+       add     src, src, count
+       cmp     count, #16
+       b.lo    .Ltail15  /*probably non-alignment accesses.*/
+
+       ands    tmp2, src, #15     /* Bytes to reach alignment.  */
+       b.eq    .LSrcAligned
+       sub     count, count, tmp2
+       /*
+       * process the aligned offset length to make the src aligned firstly.
+       * those extra instructions' cost is acceptable. It also make the
+       * coming accesses are based on aligned address.
+       */
+       tbz     tmp2, #0, 1f
+       ldrb    tmp1w, [src, #-1]!
+       strb    tmp1w, [dst, #-1]!
+1:
+       tbz     tmp2, #1, 2f
+       ldrh    tmp1w, [src, #-2]!
+       strh    tmp1w, [dst, #-2]!
+2:
+       tbz     tmp2, #2, 3f
+       ldr     tmp1w, [src, #-4]!
+       str     tmp1w, [dst, #-4]!
+3:
+       tbz     tmp2, #3, .LSrcAligned
+       ldr     tmp1, [src, #-8]!
+       str     tmp1, [dst, #-8]!
+
+.LSrcAligned:
+       cmp     count, #64
+       b.ge    .Lcpy_over64
+
+       /*
+       * Deal with small copies quickly by dropping straight into the
+       * exit block.
+       */
+.Ltail63:
+       /*
+       * Copy up to 48 bytes of data. At this point we only need the
+       * bottom 6 bits of count to be accurate.
+       */
+       ands    tmp1, count, #0x30
+       b.eq    .Ltail15
+       cmp     tmp1w, #0x20
+       b.eq    1f
+       b.lt    2f
+       ldp     A_l, A_h, [src, #-16]!
+       stp     A_l, A_h, [dst, #-16]!
+1:
+       ldp     A_l, A_h, [src, #-16]!
+       stp     A_l, A_h, [dst, #-16]!
+2:
+       ldp     A_l, A_h, [src, #-16]!
+       stp     A_l, A_h, [dst, #-16]!
+
+.Ltail15:
+       tbz     count, #3, 1f
+       ldr     tmp1, [src, #-8]!
+       str     tmp1, [dst, #-8]!
+1:
+       tbz     count, #2, 2f
+       ldr     tmp1w, [src, #-4]!
+       str     tmp1w, [dst, #-4]!
+2:
+       tbz     count, #1, 3f
+       ldrh    tmp1w, [src, #-2]!
+       strh    tmp1w, [dst, #-2]!
+3:
+       tbz     count, #0, .Lexitfunc
+       ldrb    tmp1w, [src, #-1]
+       strb    tmp1w, [dst, #-1]
+
+.Lexitfunc:
+       ret
+
+.Lcpy_over64:
+       subs    count, count, #128
+       b.ge    .Lcpy_body_large
+       /*
+       * Less than 128 bytes to copy, so handle 64 bytes here and then jump
+       * to the tail.
+       */
+       ldp     A_l, A_h, [src, #-16]
+       stp     A_l, A_h, [dst, #-16]
+       ldp     B_l, B_h, [src, #-32]
+       ldp     C_l, C_h, [src, #-48]
+       stp     B_l, B_h, [dst, #-32]
+       stp     C_l, C_h, [dst, #-48]
+       ldp     D_l, D_h, [src, #-64]!
+       stp     D_l, D_h, [dst, #-64]!
+
+       tst     count, #0x3f
+       b.ne    .Ltail63
+       ret
+
+       /*
+       * Critical loop. Start at a new cache line boundary. Assuming
+       * 64 bytes per line this ensures the entire loop is in one line.
+       */
+       .p2align        L1_CACHE_SHIFT
+.Lcpy_body_large:
+       /* pre-load 64 bytes data. */
+       ldp     A_l, A_h, [src, #-16]
+       ldp     B_l, B_h, [src, #-32]
+       ldp     C_l, C_h, [src, #-48]
+       ldp     D_l, D_h, [src, #-64]!
+1:
+       /*
+       * interlace the load of next 64 bytes data block with store of the last
+       * loaded 64 bytes data.
+       */
+       stp     A_l, A_h, [dst, #-16]
+       ldp     A_l, A_h, [src, #-16]
+       stp     B_l, B_h, [dst, #-32]
+       ldp     B_l, B_h, [src, #-32]
+       stp     C_l, C_h, [dst, #-48]
+       ldp     C_l, C_h, [src, #-48]
+       stp     D_l, D_h, [dst, #-64]!
+       ldp     D_l, D_h, [src, #-64]!
+       subs    count, count, #64
+       b.ge    1b
+       stp     A_l, A_h, [dst, #-16]
+       stp     B_l, B_h, [dst, #-32]
+       stp     C_l, C_h, [dst, #-48]
+       stp     D_l, D_h, [dst, #-64]!
+
+       tst     count, #0x3f
+       b.ne    .Ltail63
+       ret
 ENDPROC(memmove)
index 87e4a68fbbbcd8176b9a82e189c7704e5ec9af40..7c72dfd36b6396a921b7d2b7d66e5880f8314d72 100644 (file)
@@ -1,5 +1,13 @@
 /*
  * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -16,6 +24,7 @@
 
 #include <linux/linkage.h>
 #include <asm/assembler.h>
+#include <asm/cache.h>
 
 /*
  * Fill in the buffer with character c (alignment handled by the hardware)
  * Returns:
  *     x0 - buf
  */
+
+dstin          .req    x0
+val            .req    w1
+count          .req    x2
+tmp1           .req    x3
+tmp1w          .req    w3
+tmp2           .req    x4
+tmp2w          .req    w4
+zva_len_x      .req    x5
+zva_len                .req    w5
+zva_bits_x     .req    x6
+
+A_l            .req    x7
+A_lw           .req    w7
+dst            .req    x8
+tmp3w          .req    w9
+tmp3           .req    x9
+
 ENTRY(memset)
-       mov     x4, x0
-       and     w1, w1, #0xff
-       orr     w1, w1, w1, lsl #8
-       orr     w1, w1, w1, lsl #16
-       orr     x1, x1, x1, lsl #32
-       subs    x2, x2, #8
-       b.mi    2f
-1:     str     x1, [x4], #8
-       subs    x2, x2, #8
-       b.pl    1b
-2:     adds    x2, x2, #4
-       b.mi    3f
-       sub     x2, x2, #4
-       str     w1, [x4], #4
-3:     adds    x2, x2, #2
-       b.mi    4f
-       sub     x2, x2, #2
-       strh    w1, [x4], #2
-4:     adds    x2, x2, #1
-       b.mi    5f
-       strb    w1, [x4]
-5:     ret
+       mov     dst, dstin      /* Preserve return value.  */
+       and     A_lw, val, #255
+       orr     A_lw, A_lw, A_lw, lsl #8
+       orr     A_lw, A_lw, A_lw, lsl #16
+       orr     A_l, A_l, A_l, lsl #32
+
+       cmp     count, #15
+       b.hi    .Lover16_proc
+       /*All store maybe are non-aligned..*/
+       tbz     count, #3, 1f
+       str     A_l, [dst], #8
+1:
+       tbz     count, #2, 2f
+       str     A_lw, [dst], #4
+2:
+       tbz     count, #1, 3f
+       strh    A_lw, [dst], #2
+3:
+       tbz     count, #0, 4f
+       strb    A_lw, [dst]
+4:
+       ret
+
+.Lover16_proc:
+       /*Whether  the start address is aligned with 16.*/
+       neg     tmp2, dst
+       ands    tmp2, tmp2, #15
+       b.eq    .Laligned
+/*
+* The count is not less than 16, we can use stp to store the start 16 bytes,
+* then adjust the dst aligned with 16.This process will make the current
+* memory address at alignment boundary.
+*/
+       stp     A_l, A_l, [dst] /*non-aligned store..*/
+       /*make the dst aligned..*/
+       sub     count, count, tmp2
+       add     dst, dst, tmp2
+
+.Laligned:
+       cbz     A_l, .Lzero_mem
+
+.Ltail_maybe_long:
+       cmp     count, #64
+       b.ge    .Lnot_short
+.Ltail63:
+       ands    tmp1, count, #0x30
+       b.eq    3f
+       cmp     tmp1w, #0x20
+       b.eq    1f
+       b.lt    2f
+       stp     A_l, A_l, [dst], #16
+1:
+       stp     A_l, A_l, [dst], #16
+2:
+       stp     A_l, A_l, [dst], #16
+/*
+* The last store length is less than 16,use stp to write last 16 bytes.
+* It will lead some bytes written twice and the access is non-aligned.
+*/
+3:
+       ands    count, count, #15
+       cbz     count, 4f
+       add     dst, dst, count
+       stp     A_l, A_l, [dst, #-16]   /* Repeat some/all of last store. */
+4:
+       ret
+
+       /*
+       * Critical loop. Start at a new cache line boundary. Assuming
+       * 64 bytes per line, this ensures the entire loop is in one line.
+       */
+       .p2align        L1_CACHE_SHIFT
+.Lnot_short:
+       sub     dst, dst, #16/* Pre-bias.  */
+       sub     count, count, #64
+1:
+       stp     A_l, A_l, [dst, #16]
+       stp     A_l, A_l, [dst, #32]
+       stp     A_l, A_l, [dst, #48]
+       stp     A_l, A_l, [dst, #64]!
+       subs    count, count, #64
+       b.ge    1b
+       tst     count, #0x3f
+       add     dst, dst, #16
+       b.ne    .Ltail63
+.Lexitfunc:
+       ret
+
+       /*
+       * For zeroing memory, check to see if we can use the ZVA feature to
+       * zero entire 'cache' lines.
+       */
+.Lzero_mem:
+       cmp     count, #63
+       b.le    .Ltail63
+       /*
+       * For zeroing small amounts of memory, it's not worth setting up
+       * the line-clear code.
+       */
+       cmp     count, #128
+       b.lt    .Lnot_short /*count is at least  128 bytes*/
+
+       mrs     tmp1, dczid_el0
+       tbnz    tmp1, #4, .Lnot_short
+       mov     tmp3w, #4
+       and     zva_len, tmp1w, #15     /* Safety: other bits reserved.  */
+       lsl     zva_len, tmp3w, zva_len
+
+       ands    tmp3w, zva_len, #63
+       /*
+       * ensure the zva_len is not less than 64.
+       * It is not meaningful to use ZVA if the block size is less than 64.
+       */
+       b.ne    .Lnot_short
+.Lzero_by_line:
+       /*
+       * Compute how far we need to go to become suitably aligned. We're
+       * already at quad-word alignment.
+       */
+       cmp     count, zva_len_x
+       b.lt    .Lnot_short             /* Not enough to reach alignment.  */
+       sub     zva_bits_x, zva_len_x, #1
+       neg     tmp2, dst
+       ands    tmp2, tmp2, zva_bits_x
+       b.eq    2f                      /* Already aligned.  */
+       /* Not aligned, check that there's enough to copy after alignment.*/
+       sub     tmp1, count, tmp2
+       /*
+       * grantee the remain length to be ZVA is bigger than 64,
+       * avoid to make the 2f's process over mem range.*/
+       cmp     tmp1, #64
+       ccmp    tmp1, zva_len_x, #8, ge /* NZCV=0b1000 */
+       b.lt    .Lnot_short
+       /*
+       * We know that there's at least 64 bytes to zero and that it's safe
+       * to overrun by 64 bytes.
+       */
+       mov     count, tmp1
+1:
+       stp     A_l, A_l, [dst]
+       stp     A_l, A_l, [dst, #16]
+       stp     A_l, A_l, [dst, #32]
+       subs    tmp2, tmp2, #64
+       stp     A_l, A_l, [dst, #48]
+       add     dst, dst, #64
+       b.ge    1b
+       /* We've overrun a bit, so adjust dst downwards.*/
+       add     dst, dst, tmp2
+2:
+       sub     count, count, zva_len_x
+3:
+       dc      zva, dst
+       add     dst, dst, zva_len_x
+       subs    count, count, zva_len_x
+       b.ge    3b
+       ands    count, count, zva_bits_x
+       b.ne    .Ltail_maybe_long
+       ret
 ENDPROC(memset)
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
new file mode 100644 (file)
index 0000000..42f828b
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * compare two strings
+ *
+ * Parameters:
+ *     x0 - const string 1 pointer
+ *    x1 - const string 2 pointer
+ * Returns:
+ * x0 - an integer less than, equal to, or greater than zero
+ * if  s1  is  found, respectively, to be less than, to match,
+ * or be greater than s2.
+ */
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+#define REP8_80 0x8080808080808080
+
+/* Parameters and result.  */
+src1           .req    x0
+src2           .req    x1
+result         .req    x0
+
+/* Internal variables.  */
+data1          .req    x2
+data1w         .req    w2
+data2          .req    x3
+data2w         .req    w3
+has_nul                .req    x4
+diff           .req    x5
+syndrome       .req    x6
+tmp1           .req    x7
+tmp2           .req    x8
+tmp3           .req    x9
+zeroones       .req    x10
+pos            .req    x11
+
+ENTRY(strcmp)
+       eor     tmp1, src1, src2
+       mov     zeroones, #REP8_01
+       tst     tmp1, #7
+       b.ne    .Lmisaligned8
+       ands    tmp1, src1, #7
+       b.ne    .Lmutual_align
+
+       /*
+       * NUL detection works on the principle that (X - 1) & (~X) & 0x80
+       * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+       * can be done in parallel across the entire word.
+       */
+.Lloop_aligned:
+       ldr     data1, [src1], #8
+       ldr     data2, [src2], #8
+.Lstart_realigned:
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, #REP8_7f
+       eor     diff, data1, data2      /* Non-zero if differences found.  */
+       bic     has_nul, tmp1, tmp2     /* Non-zero if NUL terminator.  */
+       orr     syndrome, diff, has_nul
+       cbz     syndrome, .Lloop_aligned
+       b       .Lcal_cmpresult
+
+.Lmutual_align:
+       /*
+       * Sources are mutually aligned, but are not currently at an
+       * alignment boundary.  Round down the addresses and then mask off
+       * the bytes that preceed the start point.
+       */
+       bic     src1, src1, #7
+       bic     src2, src2, #7
+       lsl     tmp1, tmp1, #3          /* Bytes beyond alignment -> bits.  */
+       ldr     data1, [src1], #8
+       neg     tmp1, tmp1              /* Bits to alignment -64.  */
+       ldr     data2, [src2], #8
+       mov     tmp2, #~0
+       /* Big-endian.  Early bytes are at MSB.  */
+CPU_BE( lsl    tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
+       /* Little-endian.  Early bytes are at LSB.  */
+CPU_LE( lsr    tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
+
+       orr     data1, data1, tmp2
+       orr     data2, data2, tmp2
+       b       .Lstart_realigned
+
+.Lmisaligned8:
+       /*
+       * Get the align offset length to compare per byte first.
+       * After this process, one string's address will be aligned.
+       */
+       and     tmp1, src1, #7
+       neg     tmp1, tmp1
+       add     tmp1, tmp1, #8
+       and     tmp2, src2, #7
+       neg     tmp2, tmp2
+       add     tmp2, tmp2, #8
+       subs    tmp3, tmp1, tmp2
+       csel    pos, tmp1, tmp2, hi /*Choose the maximum. */
+.Ltinycmp:
+       ldrb    data1w, [src1], #1
+       ldrb    data2w, [src2], #1
+       subs    pos, pos, #1
+       ccmp    data1w, #1, #0, ne  /* NZCV = 0b0000.  */
+       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
+       b.eq    .Ltinycmp
+       cbnz    pos, 1f /*find the null or unequal...*/
+       cmp     data1w, #1
+       ccmp    data1w, data2w, #0, cs
+       b.eq    .Lstart_align /*the last bytes are equal....*/
+1:
+       sub     result, data1, data2
+       ret
+
+.Lstart_align:
+       ands    xzr, src1, #7
+       b.eq    .Lrecal_offset
+       /*process more leading bytes to make str1 aligned...*/
+       add     src1, src1, tmp3
+       add     src2, src2, tmp3
+       /*load 8 bytes from aligned str1 and non-aligned str2..*/
+       ldr     data1, [src1], #8
+       ldr     data2, [src2], #8
+
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, #REP8_7f
+       bic     has_nul, tmp1, tmp2
+       eor     diff, data1, data2 /* Non-zero if differences found.  */
+       orr     syndrome, diff, has_nul
+       cbnz    syndrome, .Lcal_cmpresult
+       /*How far is the current str2 from the alignment boundary...*/
+       and     tmp3, tmp3, #7
+.Lrecal_offset:
+       neg     pos, tmp3
+.Lloopcmp_proc:
+       /*
+       * Divide the eight bytes into two parts. First,backwards the src2
+       * to an alignment boundary,load eight bytes from the SRC2 alignment
+       * boundary,then compare with the relative bytes from SRC1.
+       * If all 8 bytes are equal,then start the second part's comparison.
+       * Otherwise finish the comparison.
+       * This special handle can garantee all the accesses are in the
+       * thread/task space in avoid to overrange access.
+       */
+       ldr     data1, [src1,pos]
+       ldr     data2, [src2,pos]
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, #REP8_7f
+       bic     has_nul, tmp1, tmp2
+       eor     diff, data1, data2  /* Non-zero if differences found.  */
+       orr     syndrome, diff, has_nul
+       cbnz    syndrome, .Lcal_cmpresult
+
+       /*The second part process*/
+       ldr     data1, [src1], #8
+       ldr     data2, [src2], #8
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, #REP8_7f
+       bic     has_nul, tmp1, tmp2
+       eor     diff, data1, data2  /* Non-zero if differences found.  */
+       orr     syndrome, diff, has_nul
+       cbz     syndrome, .Lloopcmp_proc
+
+.Lcal_cmpresult:
+       /*
+       * reversed the byte-order as big-endian,then CLZ can find the most
+       * significant zero bits.
+       */
+CPU_LE( rev    syndrome, syndrome )
+CPU_LE( rev    data1, data1 )
+CPU_LE( rev    data2, data2 )
+
+       /*
+       * For big-endian we cannot use the trick with the syndrome value
+       * as carry-propagation can corrupt the upper bits if the trailing
+       * bytes in the string contain 0x01.
+       * However, if there is no NUL byte in the dword, we can generate
+       * the result directly.  We ca not just subtract the bytes as the
+       * MSB might be significant.
+       */
+CPU_BE( cbnz   has_nul, 1f )
+CPU_BE( cmp    data1, data2 )
+CPU_BE( cset   result, ne )
+CPU_BE( cneg   result, result, lo )
+CPU_BE( ret )
+CPU_BE( 1: )
+       /*Re-compute the NUL-byte detection, using a byte-reversed value. */
+CPU_BE(        rev     tmp3, data1 )
+CPU_BE(        sub     tmp1, tmp3, zeroones )
+CPU_BE(        orr     tmp2, tmp3, #REP8_7f )
+CPU_BE(        bic     has_nul, tmp1, tmp2 )
+CPU_BE(        rev     has_nul, has_nul )
+CPU_BE(        orr     syndrome, diff, has_nul )
+
+       clz     pos, syndrome
+       /*
+       * The MS-non-zero bit of the syndrome marks either the first bit
+       * that is different, or the top bit of the first zero byte.
+       * Shifting left now will bring the critical information into the
+       * top bits.
+       */
+       lsl     data1, data1, pos
+       lsl     data2, data2, pos
+       /*
+       * But we need to zero-extend (char is unsigned) the value and then
+       * perform a signed 32-bit subtraction.
+       */
+       lsr     data1, data1, #56
+       sub     result, data1, data2, lsr #56
+       ret
+ENDPROC(strcmp)
diff --git a/arch/arm64/lib/strlen.S b/arch/arm64/lib/strlen.S
new file mode 100644 (file)
index 0000000..987b68b
--- /dev/null
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * calculate the length of a string
+ *
+ * Parameters:
+ *     x0 - const string pointer
+ * Returns:
+ *     x0 - the return length of specific string
+ */
+
+/* Arguments and results.  */
+srcin          .req    x0
+len            .req    x0
+
+/* Locals and temporaries.  */
+src            .req    x1
+data1          .req    x2
+data2          .req    x3
+data2a         .req    x4
+has_nul1       .req    x5
+has_nul2       .req    x6
+tmp1           .req    x7
+tmp2           .req    x8
+tmp3           .req    x9
+tmp4           .req    x10
+zeroones       .req    x11
+pos            .req    x12
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+#define REP8_80 0x8080808080808080
+
+ENTRY(strlen)
+       mov     zeroones, #REP8_01
+       bic     src, srcin, #15
+       ands    tmp1, srcin, #15
+       b.ne    .Lmisaligned
+       /*
+       * NUL detection works on the principle that (X - 1) & (~X) & 0x80
+       * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+       * can be done in parallel across the entire word.
+       */
+       /*
+       * The inner loop deals with two Dwords at a time. This has a
+       * slightly higher start-up cost, but we should win quite quickly,
+       * especially on cores with a high number of issue slots per
+       * cycle, as we get much better parallelism out of the operations.
+       */
+.Lloop:
+       ldp     data1, data2, [src], #16
+.Lrealigned:
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, #REP8_7f
+       sub     tmp3, data2, zeroones
+       orr     tmp4, data2, #REP8_7f
+       bic     has_nul1, tmp1, tmp2
+       bics    has_nul2, tmp3, tmp4
+       ccmp    has_nul1, #0, #0, eq    /* NZCV = 0000  */
+       b.eq    .Lloop
+
+       sub     len, src, srcin
+       cbz     has_nul1, .Lnul_in_data2
+CPU_BE(        mov     data2, data1 )  /*prepare data to re-calculate the syndrome*/
+       sub     len, len, #8
+       mov     has_nul2, has_nul1
+.Lnul_in_data2:
+       /*
+       * For big-endian, carry propagation (if the final byte in the
+       * string is 0x01) means we cannot use has_nul directly.  The
+       * easiest way to get the correct byte is to byte-swap the data
+       * and calculate the syndrome a second time.
+       */
+CPU_BE( rev    data2, data2 )
+CPU_BE( sub    tmp1, data2, zeroones )
+CPU_BE( orr    tmp2, data2, #REP8_7f )
+CPU_BE( bic    has_nul2, tmp1, tmp2 )
+
+       sub     len, len, #8
+       rev     has_nul2, has_nul2
+       clz     pos, has_nul2
+       add     len, len, pos, lsr #3           /* Bits to bytes.  */
+       ret
+
+.Lmisaligned:
+       cmp     tmp1, #8
+       neg     tmp1, tmp1
+       ldp     data1, data2, [src], #16
+       lsl     tmp1, tmp1, #3          /* Bytes beyond alignment -> bits.  */
+       mov     tmp2, #~0
+       /* Big-endian.  Early bytes are at MSB.  */
+CPU_BE( lsl    tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
+       /* Little-endian.  Early bytes are at LSB.  */
+CPU_LE( lsr    tmp2, tmp2, tmp1 )      /* Shift (tmp1 & 63).  */
+
+       orr     data1, data1, tmp2
+       orr     data2a, data2, tmp2
+       csinv   data1, data1, xzr, le
+       csel    data2, data2, data2a, le
+       b       .Lrealigned
+ENDPROC(strlen)
diff --git a/arch/arm64/lib/strncmp.S b/arch/arm64/lib/strncmp.S
new file mode 100644 (file)
index 0000000..0224cf5
--- /dev/null
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * compare two strings
+ *
+ * Parameters:
+ *  x0 - const string 1 pointer
+ *  x1 - const string 2 pointer
+ *  x2 - the maximal length to be compared
+ * Returns:
+ *  x0 - an integer less than, equal to, or greater than zero if s1 is found,
+ *     respectively, to be less than, to match, or be greater than s2.
+ */
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+#define REP8_80 0x8080808080808080
+
+/* Parameters and result.  */
+src1           .req    x0
+src2           .req    x1
+limit          .req    x2
+result         .req    x0
+
+/* Internal variables.  */
+data1          .req    x3
+data1w         .req    w3
+data2          .req    x4
+data2w         .req    w4
+has_nul                .req    x5
+diff           .req    x6
+syndrome       .req    x7
+tmp1           .req    x8
+tmp2           .req    x9
+tmp3           .req    x10
+zeroones       .req    x11
+pos            .req    x12
+limit_wd       .req    x13
+mask           .req    x14
+endloop                .req    x15
+
+ENTRY(strncmp)
+       cbz     limit, .Lret0
+       eor     tmp1, src1, src2
+       mov     zeroones, #REP8_01
+       tst     tmp1, #7
+       b.ne    .Lmisaligned8
+       ands    tmp1, src1, #7
+       b.ne    .Lmutual_align
+       /* Calculate the number of full and partial words -1.  */
+       /*
+       * when limit is mulitply of 8, if not sub 1,
+       * the judgement of last dword will wrong.
+       */
+       sub     limit_wd, limit, #1 /* limit != 0, so no underflow.  */
+       lsr     limit_wd, limit_wd, #3  /* Convert to Dwords.  */
+
+       /*
+       * NUL detection works on the principle that (X - 1) & (~X) & 0x80
+       * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+       * can be done in parallel across the entire word.
+       */
+.Lloop_aligned:
+       ldr     data1, [src1], #8
+       ldr     data2, [src2], #8
+.Lstart_realigned:
+       subs    limit_wd, limit_wd, #1
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, #REP8_7f
+       eor     diff, data1, data2  /* Non-zero if differences found.  */
+       csinv   endloop, diff, xzr, pl  /* Last Dword or differences.*/
+       bics    has_nul, tmp1, tmp2 /* Non-zero if NUL terminator.  */
+       ccmp    endloop, #0, #0, eq
+       b.eq    .Lloop_aligned
+
+       /*Not reached the limit, must have found the end or a diff.  */
+       tbz     limit_wd, #63, .Lnot_limit
+
+       /* Limit % 8 == 0 => all bytes significant.  */
+       ands    limit, limit, #7
+       b.eq    .Lnot_limit
+
+       lsl     limit, limit, #3    /* Bits -> bytes.  */
+       mov     mask, #~0
+CPU_BE( lsr    mask, mask, limit )
+CPU_LE( lsl    mask, mask, limit )
+       bic     data1, data1, mask
+       bic     data2, data2, mask
+
+       /* Make sure that the NUL byte is marked in the syndrome.  */
+       orr     has_nul, has_nul, mask
+
+.Lnot_limit:
+       orr     syndrome, diff, has_nul
+       b       .Lcal_cmpresult
+
+.Lmutual_align:
+       /*
+       * Sources are mutually aligned, but are not currently at an
+       * alignment boundary.  Round down the addresses and then mask off
+       * the bytes that precede the start point.
+       * We also need to adjust the limit calculations, but without
+       * overflowing if the limit is near ULONG_MAX.
+       */
+       bic     src1, src1, #7
+       bic     src2, src2, #7
+       ldr     data1, [src1], #8
+       neg     tmp3, tmp1, lsl #3  /* 64 - bits(bytes beyond align). */
+       ldr     data2, [src2], #8
+       mov     tmp2, #~0
+       sub     limit_wd, limit, #1 /* limit != 0, so no underflow.  */
+       /* Big-endian.  Early bytes are at MSB.  */
+CPU_BE( lsl    tmp2, tmp2, tmp3 )      /* Shift (tmp1 & 63).  */
+       /* Little-endian.  Early bytes are at LSB.  */
+CPU_LE( lsr    tmp2, tmp2, tmp3 )      /* Shift (tmp1 & 63).  */
+
+       and     tmp3, limit_wd, #7
+       lsr     limit_wd, limit_wd, #3
+       /* Adjust the limit. Only low 3 bits used, so overflow irrelevant.*/
+       add     limit, limit, tmp1
+       add     tmp3, tmp3, tmp1
+       orr     data1, data1, tmp2
+       orr     data2, data2, tmp2
+       add     limit_wd, limit_wd, tmp3, lsr #3
+       b       .Lstart_realigned
+
+/*when src1 offset is not equal to src2 offset...*/
+.Lmisaligned8:
+       cmp     limit, #8
+       b.lo    .Ltiny8proc /*limit < 8... */
+       /*
+       * Get the align offset length to compare per byte first.
+       * After this process, one string's address will be aligned.*/
+       and     tmp1, src1, #7
+       neg     tmp1, tmp1
+       add     tmp1, tmp1, #8
+       and     tmp2, src2, #7
+       neg     tmp2, tmp2
+       add     tmp2, tmp2, #8
+       subs    tmp3, tmp1, tmp2
+       csel    pos, tmp1, tmp2, hi /*Choose the maximum. */
+       /*
+       * Here, limit is not less than 8, so directly run .Ltinycmp
+       * without checking the limit.*/
+       sub     limit, limit, pos
+.Ltinycmp:
+       ldrb    data1w, [src1], #1
+       ldrb    data2w, [src2], #1
+       subs    pos, pos, #1
+       ccmp    data1w, #1, #0, ne  /* NZCV = 0b0000.  */
+       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
+       b.eq    .Ltinycmp
+       cbnz    pos, 1f /*find the null or unequal...*/
+       cmp     data1w, #1
+       ccmp    data1w, data2w, #0, cs
+       b.eq    .Lstart_align /*the last bytes are equal....*/
+1:
+       sub     result, data1, data2
+       ret
+
+.Lstart_align:
+       lsr     limit_wd, limit, #3
+       cbz     limit_wd, .Lremain8
+       /*process more leading bytes to make str1 aligned...*/
+       ands    xzr, src1, #7
+       b.eq    .Lrecal_offset
+       add     src1, src1, tmp3        /*tmp3 is positive in this branch.*/
+       add     src2, src2, tmp3
+       ldr     data1, [src1], #8
+       ldr     data2, [src2], #8
+
+       sub     limit, limit, tmp3
+       lsr     limit_wd, limit, #3
+       subs    limit_wd, limit_wd, #1
+
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, #REP8_7f
+       eor     diff, data1, data2  /* Non-zero if differences found.  */
+       csinv   endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
+       bics    has_nul, tmp1, tmp2
+       ccmp    endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
+       b.ne    .Lunequal_proc
+       /*How far is the current str2 from the alignment boundary...*/
+       and     tmp3, tmp3, #7
+.Lrecal_offset:
+       neg     pos, tmp3
+.Lloopcmp_proc:
+       /*
+       * Divide the eight bytes into two parts. First,backwards the src2
+       * to an alignment boundary,load eight bytes from the SRC2 alignment
+       * boundary,then compare with the relative bytes from SRC1.
+       * If all 8 bytes are equal,then start the second part's comparison.
+       * Otherwise finish the comparison.
+       * This special handle can garantee all the accesses are in the
+       * thread/task space in avoid to overrange access.
+       */
+       ldr     data1, [src1,pos]
+       ldr     data2, [src2,pos]
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, #REP8_7f
+       bics    has_nul, tmp1, tmp2 /* Non-zero if NUL terminator.  */
+       eor     diff, data1, data2  /* Non-zero if differences found.  */
+       csinv   endloop, diff, xzr, eq
+       cbnz    endloop, .Lunequal_proc
+
+       /*The second part process*/
+       ldr     data1, [src1], #8
+       ldr     data2, [src2], #8
+       subs    limit_wd, limit_wd, #1
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, #REP8_7f
+       eor     diff, data1, data2  /* Non-zero if differences found.  */
+       csinv   endloop, diff, xzr, ne/*if limit_wd is 0,will finish the cmp*/
+       bics    has_nul, tmp1, tmp2
+       ccmp    endloop, #0, #0, eq /*has_null is ZERO: no null byte*/
+       b.eq    .Lloopcmp_proc
+
+.Lunequal_proc:
+       orr     syndrome, diff, has_nul
+       cbz     syndrome, .Lremain8
+.Lcal_cmpresult:
+       /*
+       * reversed the byte-order as big-endian,then CLZ can find the most
+       * significant zero bits.
+       */
+CPU_LE( rev    syndrome, syndrome )
+CPU_LE( rev    data1, data1 )
+CPU_LE( rev    data2, data2 )
+       /*
+       * For big-endian we cannot use the trick with the syndrome value
+       * as carry-propagation can corrupt the upper bits if the trailing
+       * bytes in the string contain 0x01.
+       * However, if there is no NUL byte in the dword, we can generate
+       * the result directly.  We can't just subtract the bytes as the
+       * MSB might be significant.
+       */
+CPU_BE( cbnz   has_nul, 1f )
+CPU_BE( cmp    data1, data2 )
+CPU_BE( cset   result, ne )
+CPU_BE( cneg   result, result, lo )
+CPU_BE( ret )
+CPU_BE( 1: )
+       /* Re-compute the NUL-byte detection, using a byte-reversed value.*/
+CPU_BE( rev    tmp3, data1 )
+CPU_BE( sub    tmp1, tmp3, zeroones )
+CPU_BE( orr    tmp2, tmp3, #REP8_7f )
+CPU_BE( bic    has_nul, tmp1, tmp2 )
+CPU_BE( rev    has_nul, has_nul )
+CPU_BE( orr    syndrome, diff, has_nul )
+       /*
+       * The MS-non-zero bit of the syndrome marks either the first bit
+       * that is different, or the top bit of the first zero byte.
+       * Shifting left now will bring the critical information into the
+       * top bits.
+       */
+       clz     pos, syndrome
+       lsl     data1, data1, pos
+       lsl     data2, data2, pos
+       /*
+       * But we need to zero-extend (char is unsigned) the value and then
+       * perform a signed 32-bit subtraction.
+       */
+       lsr     data1, data1, #56
+       sub     result, data1, data2, lsr #56
+       ret
+
+.Lremain8:
+       /* Limit % 8 == 0 => all bytes significant.  */
+       ands    limit, limit, #7
+       b.eq    .Lret0
+.Ltiny8proc:
+       ldrb    data1w, [src1], #1
+       ldrb    data2w, [src2], #1
+       subs    limit, limit, #1
+
+       ccmp    data1w, #1, #0, ne  /* NZCV = 0b0000.  */
+       ccmp    data1w, data2w, #0, cs  /* NZCV = 0b0000.  */
+       b.eq    .Ltiny8proc
+       sub     result, data1, data2
+       ret
+
+.Lret0:
+       mov     result, #0
+       ret
+ENDPROC(strncmp)
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
new file mode 100644 (file)
index 0000000..2ca6657
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ * Copyright (C) 2013 Linaro.
+ *
+ * This code is based on glibc cortex strings work originally authored by Linaro
+ * and re-licensed under GPLv2 for the Linux kernel. The original code can
+ * be found @
+ *
+ * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
+ * files/head:/src/aarch64/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * determine the length of a fixed-size string
+ *
+ * Parameters:
+ *     x0 - const string pointer
+ *     x1 - maximal string length
+ * Returns:
+ *     x0 - the return length of specific string
+ */
+
+/* Arguments and results.  */
+srcin          .req    x0
+len            .req    x0
+limit          .req    x1
+
+/* Locals and temporaries.  */
+src            .req    x2
+data1          .req    x3
+data2          .req    x4
+data2a         .req    x5
+has_nul1       .req    x6
+has_nul2       .req    x7
+tmp1           .req    x8
+tmp2           .req    x9
+tmp3           .req    x10
+tmp4           .req    x11
+zeroones       .req    x12
+pos            .req    x13
+limit_wd       .req    x14
+
+#define REP8_01 0x0101010101010101
+#define REP8_7f 0x7f7f7f7f7f7f7f7f
+#define REP8_80 0x8080808080808080
+
+ENTRY(strnlen)
+       cbz     limit, .Lhit_limit
+       mov     zeroones, #REP8_01
+       bic     src, srcin, #15
+       ands    tmp1, srcin, #15
+       b.ne    .Lmisaligned
+       /* Calculate the number of full and partial words -1.  */
+       sub     limit_wd, limit, #1 /* Limit != 0, so no underflow.  */
+       lsr     limit_wd, limit_wd, #4  /* Convert to Qwords.  */
+
+       /*
+       * NUL detection works on the principle that (X - 1) & (~X) & 0x80
+       * (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
+       * can be done in parallel across the entire word.
+       */
+       /*
+       * The inner loop deals with two Dwords at a time.  This has a
+       * slightly higher start-up cost, but we should win quite quickly,
+       * especially on cores with a high number of issue slots per
+       * cycle, as we get much better parallelism out of the operations.
+       */
+.Lloop:
+       ldp     data1, data2, [src], #16
+.Lrealigned:
+       sub     tmp1, data1, zeroones
+       orr     tmp2, data1, #REP8_7f
+       sub     tmp3, data2, zeroones
+       orr     tmp4, data2, #REP8_7f
+       bic     has_nul1, tmp1, tmp2
+       bic     has_nul2, tmp3, tmp4
+       subs    limit_wd, limit_wd, #1
+       orr     tmp1, has_nul1, has_nul2
+       ccmp    tmp1, #0, #0, pl    /* NZCV = 0000  */
+       b.eq    .Lloop
+
+       cbz     tmp1, .Lhit_limit   /* No null in final Qword.  */
+
+       /*
+       * We know there's a null in the final Qword. The easiest thing
+       * to do now is work out the length of the string and return
+       * MIN (len, limit).
+       */
+       sub     len, src, srcin
+       cbz     has_nul1, .Lnul_in_data2
+CPU_BE( mov    data2, data1 )  /*perpare data to re-calculate the syndrome*/
+
+       sub     len, len, #8
+       mov     has_nul2, has_nul1
+.Lnul_in_data2:
+       /*
+       * For big-endian, carry propagation (if the final byte in the
+       * string is 0x01) means we cannot use has_nul directly.  The
+       * easiest way to get the correct byte is to byte-swap the data
+       * and calculate the syndrome a second time.
+       */
+CPU_BE( rev    data2, data2 )
+CPU_BE( sub    tmp1, data2, zeroones )
+CPU_BE( orr    tmp2, data2, #REP8_7f )
+CPU_BE( bic    has_nul2, tmp1, tmp2 )
+
+       sub     len, len, #8
+       rev     has_nul2, has_nul2
+       clz     pos, has_nul2
+       add     len, len, pos, lsr #3       /* Bits to bytes.  */
+       cmp     len, limit
+       csel    len, len, limit, ls     /* Return the lower value.  */
+       ret
+
+.Lmisaligned:
+       /*
+       * Deal with a partial first word.
+       * We're doing two things in parallel here;
+       * 1) Calculate the number of words (but avoiding overflow if
+       * limit is near ULONG_MAX) - to do this we need to work out
+       * limit + tmp1 - 1 as a 65-bit value before shifting it;
+       * 2) Load and mask the initial data words - we force the bytes
+       * before the ones we are interested in to 0xff - this ensures
+       * early bytes will not hit any zero detection.
+       */
+       ldp     data1, data2, [src], #16
+
+       sub     limit_wd, limit, #1
+       and     tmp3, limit_wd, #15
+       lsr     limit_wd, limit_wd, #4
+
+       add     tmp3, tmp3, tmp1
+       add     limit_wd, limit_wd, tmp3, lsr #4
+
+       neg     tmp4, tmp1
+       lsl     tmp4, tmp4, #3  /* Bytes beyond alignment -> bits.  */
+
+       mov     tmp2, #~0
+       /* Big-endian.  Early bytes are at MSB.  */
+CPU_BE( lsl    tmp2, tmp2, tmp4 )      /* Shift (tmp1 & 63).  */
+       /* Little-endian.  Early bytes are at LSB.  */
+CPU_LE( lsr    tmp2, tmp2, tmp4 )      /* Shift (tmp1 & 63).  */
+
+       cmp     tmp1, #8
+
+       orr     data1, data1, tmp2
+       orr     data2a, data2, tmp2
+
+       csinv   data1, data1, xzr, le
+       csel    data2, data2, data2a, le
+       b       .Lrealigned
+
+.Lhit_limit:
+       mov     len, limit
+       ret
+ENDPROC(strnlen)
index 3ecb56c624d369810f2c19789b73e663635902b9..c56179ed2c09af1d96b4f8df1b42d2ce6a67ab27 100644 (file)
@@ -1,5 +1,5 @@
 obj-y                          := dma-mapping.o extable.o fault.o init.o \
                                   cache.o copypage.o flush.o \
                                   ioremap.o mmap.o pgd.o mmu.o \
-                                  context.o proc.o
+                                  context.o proc.o pageattr.o
 obj-$(CONFIG_HUGETLB_PAGE)     += hugetlbpage.o
index fda756875fa63e0fca640e99ca9a20ed789a20f4..23663837acff5e61c15cb9a3342c414c3fb710b1 100644 (file)
@@ -31,7 +31,7 @@
  *     Corrupted registers: x0-x7, x9-x11
  */
 __flush_dcache_all:
-       dsb     sy                              // ensure ordering with previous memory accesses
+       dmb     sy                              // ensure ordering with previous memory accesses
        mrs     x0, clidr_el1                   // read clidr
        and     x3, x0, #0x7000000              // extract loc from clidr
        lsr     x3, x3, #23                     // left align loc bit field
@@ -128,7 +128,7 @@ USER(9f, dc cvau, x4        )               // clean D line to PoU
        add     x4, x4, x2
        cmp     x4, x1
        b.lo    1b
-       dsb     sy
+       dsb     ish
 
        icache_line_size x2, x3
        sub     x3, x2, #1
@@ -139,7 +139,7 @@ USER(9f, ic ivau, x4        )               // invalidate I line PoU
        cmp     x4, x1
        b.lo    1b
 9:                                             // ignore any faulting cache operation
-       dsb     sy
+       dsb     ish
        isb
        ret
 ENDPROC(flush_icache_range)
index f39a55d5891843adc3b7b716c575918e7b530f90..eeb1cf3ff2999316d25f9cc6ef83e4d534042e26 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/gfp.h>
 #include <linux/export.h>
 #include <linux/slab.h>
+#include <linux/genalloc.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
 #include <linux/of.h>
@@ -43,6 +44,54 @@ static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
        return prot;
 }
 
+static struct gen_pool *atomic_pool;
+
+#define DEFAULT_DMA_COHERENT_POOL_SIZE  SZ_256K
+static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
+
+static int __init early_coherent_pool(char *p)
+{
+       atomic_pool_size = memparse(p, &p);
+       return 0;
+}
+early_param("coherent_pool", early_coherent_pool);
+
+static void *__alloc_from_pool(size_t size, struct page **ret_page)
+{
+       unsigned long val;
+       void *ptr = NULL;
+
+       if (!atomic_pool) {
+               WARN(1, "coherent pool not initialised!\n");
+               return NULL;
+       }
+
+       val = gen_pool_alloc(atomic_pool, size);
+       if (val) {
+               phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
+
+               *ret_page = phys_to_page(phys);
+               ptr = (void *)val;
+       }
+
+       return ptr;
+}
+
+static bool __in_atomic_pool(void *start, size_t size)
+{
+       return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
+}
+
+static int __free_from_pool(void *start, size_t size)
+{
+       if (!__in_atomic_pool(start, size))
+               return 0;
+
+       gen_pool_free(atomic_pool, (unsigned long)start, size);
+
+       return 1;
+}
+
 static void *__dma_alloc_coherent(struct device *dev, size_t size,
                                  dma_addr_t *dma_handle, gfp_t flags,
                                  struct dma_attrs *attrs)
@@ -50,7 +99,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
        if (IS_ENABLED(CONFIG_ZONE_DMA) &&
            dev->coherent_dma_mask <= DMA_BIT_MASK(32))
                flags |= GFP_DMA;
-       if (IS_ENABLED(CONFIG_DMA_CMA)) {
+       if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
                struct page *page;
 
                size = PAGE_ALIGN(size);
@@ -70,50 +119,54 @@ static void __dma_free_coherent(struct device *dev, size_t size,
                                void *vaddr, dma_addr_t dma_handle,
                                struct dma_attrs *attrs)
 {
+       bool freed;
+       phys_addr_t paddr = dma_to_phys(dev, dma_handle);
+
        if (dev == NULL) {
                WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
                return;
        }
 
-       if (IS_ENABLED(CONFIG_DMA_CMA)) {
-               phys_addr_t paddr = dma_to_phys(dev, dma_handle);
-
-               dma_release_from_contiguous(dev,
+       freed = dma_release_from_contiguous(dev,
                                        phys_to_page(paddr),
                                        size >> PAGE_SHIFT);
-       } else {
+       if (!freed)
                swiotlb_free_coherent(dev, size, vaddr, dma_handle);
-       }
 }
 
 static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
                                     dma_addr_t *dma_handle, gfp_t flags,
                                     struct dma_attrs *attrs)
 {
-       struct page *page, **map;
+       struct page *page;
        void *ptr, *coherent_ptr;
-       int order, i;
 
        size = PAGE_ALIGN(size);
-       order = get_order(size);
+
+       if (!(flags & __GFP_WAIT)) {
+               struct page *page = NULL;
+               void *addr = __alloc_from_pool(size, &page);
+
+               if (addr)
+                       *dma_handle = phys_to_dma(dev, page_to_phys(page));
+
+               return addr;
+
+       }
 
        ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
        if (!ptr)
                goto no_mem;
-       map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
-       if (!map)
-               goto no_map;
 
        /* remove any dirty cache lines on the kernel alias */
        __dma_flush_range(ptr, ptr + size);
 
        /* create a coherent mapping */
        page = virt_to_page(ptr);
-       for (i = 0; i < (size >> PAGE_SHIFT); i++)
-               map[i] = page + i;
-       coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
-                           __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false));
-       kfree(map);
+       coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
+                               __get_dma_pgprot(attrs,
+                                       __pgprot(PROT_NORMAL_NC), false),
+                                       NULL);
        if (!coherent_ptr)
                goto no_map;
 
@@ -132,6 +185,8 @@ static void __dma_free_noncoherent(struct device *dev, size_t size,
 {
        void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
 
+       if (__free_from_pool(vaddr, size))
+               return;
        vunmap(vaddr);
        __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
 }
@@ -329,6 +384,67 @@ static struct notifier_block amba_bus_nb = {
 
 extern int swiotlb_late_init_with_default_size(size_t default_size);
 
+static int __init atomic_pool_init(void)
+{
+       pgprot_t prot = __pgprot(PROT_NORMAL_NC);
+       unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
+       struct page *page;
+       void *addr;
+       unsigned int pool_size_order = get_order(atomic_pool_size);
+
+       if (dev_get_cma_area(NULL))
+               page = dma_alloc_from_contiguous(NULL, nr_pages,
+                                                       pool_size_order);
+       else
+               page = alloc_pages(GFP_DMA, pool_size_order);
+
+       if (page) {
+               int ret;
+               void *page_addr = page_address(page);
+
+               memset(page_addr, 0, atomic_pool_size);
+               __dma_flush_range(page_addr, page_addr + atomic_pool_size);
+
+               atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
+               if (!atomic_pool)
+                       goto free_page;
+
+               addr = dma_common_contiguous_remap(page, atomic_pool_size,
+                                       VM_USERMAP, prot, atomic_pool_init);
+
+               if (!addr)
+                       goto destroy_genpool;
+
+               ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
+                                       page_to_phys(page),
+                                       atomic_pool_size, -1);
+               if (ret)
+                       goto remove_mapping;
+
+               gen_pool_set_algo(atomic_pool,
+                                 gen_pool_first_fit_order_align,
+                                 (void *)PAGE_SHIFT);
+
+               pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
+                       atomic_pool_size / 1024);
+               return 0;
+       }
+       goto out;
+
+remove_mapping:
+       dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
+destroy_genpool:
+       gen_pool_destroy(atomic_pool);
+       atomic_pool = NULL;
+free_page:
+       if (!dma_release_from_contiguous(NULL, page, nr_pages))
+               __free_pages(page, pool_size_order);
+out:
+       pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
+               atomic_pool_size / 1024);
+       return -ENOMEM;
+}
+
 static int __init swiotlb_late_init(void)
 {
        size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
@@ -343,7 +459,17 @@ static int __init swiotlb_late_init(void)
 
        return swiotlb_late_init_with_default_size(swiotlb_size);
 }
-arch_initcall(swiotlb_late_init);
+
+static int __init arm64_dma_init(void)
+{
+       int ret = 0;
+
+       ret |= swiotlb_late_init();
+       ret |= atomic_pool_init();
+
+       return ret;
+}
+arch_initcall(arm64_dma_init);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES     4096
 
index 2fc8258bab2df614ab51a0bf1ffb4a36a236ba41..023747bf4dd7dd2025a17c3fc44244aa1b0f7b00 100644 (file)
@@ -51,7 +51,11 @@ int pmd_huge(pmd_t pmd)
 
 int pud_huge(pud_t pud)
 {
+#ifndef __PAGETABLE_PMD_FOLDED
        return !(pud_val(pud) & PUD_TABLE_BIT);
+#else
+       return 0;
+#endif
 }
 
 static __init int setup_hugepagesz(char *opt)
index 1adb46528fb27246a5417e3ceb970341c8c575aa..ea0c0812ef8e6dd48da001737fb2d375c031f0ed 100644 (file)
@@ -68,6 +68,17 @@ static int __init early_initrd(char *p)
 }
 early_param("initrd", early_initrd);
 
+/*
+ * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
+ * currently assumes that for memory starting above 4G, 32-bit devices will
+ * use a DMA offset.
+ */
+static phys_addr_t max_zone_dma_phys(void)
+{
+       phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
+       return min(offset + (1ULL << 32), memblock_end_of_DRAM());
+}
+
 static void __init zone_sizes_init(unsigned long min, unsigned long max)
 {
        struct memblock_region *reg;
@@ -78,9 +89,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA)) {
-               unsigned long max_dma_phys =
-                       (unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
-               max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
+               max_dma = PFN_DOWN(max_zone_dma_phys());
                zone_size[ZONE_DMA] = max_dma - min;
        }
        zone_size[ZONE_NORMAL] = max - max_dma;
@@ -153,6 +162,7 @@ void __init rockchip_ion_reserve_bit64(void)
 void __init arm64_memblock_init(void)
 {
        u64 *reserve_map, base, size;
+       phys_addr_t dma_phys_limit = 0;
 
        /*
         * Register the kernel text, kernel data, initrd, and initial
@@ -192,7 +202,11 @@ void __init arm64_memblock_init(void)
 #ifdef CONFIG_ARCH_ROCKCHIP
        rockchip_ion_reserve_bit64();
 #endif
-       dma_contiguous_reserve(0);
+
+       /* 4GB maximum for 32-bit only capable devices */
+       if (IS_ENABLED(CONFIG_ZONE_DMA))
+               dma_phys_limit = max_zone_dma_phys();
+       dma_contiguous_reserve(dma_phys_limit);
 
        memblock_allow_resize();
        memblock_dump_all();
index 00d315ae1de9715c4428dd2a753132b0288b6209..cf25fa9aa68f555e2f64a77386e225a797a8112d 100644 (file)
@@ -98,19 +98,25 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
 }
 EXPORT_SYMBOL(ioremap_cache);
 
-#ifndef CONFIG_ARM64_64K_PAGES
 static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+#ifndef CONFIG_ARM64_64K_PAGES
+static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
 #endif
 
-static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+static inline pud_t * __init early_ioremap_pud(unsigned long addr)
 {
        pgd_t *pgd;
-       pud_t *pud;
 
        pgd = pgd_offset_k(addr);
        BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
 
-       pud = pud_offset(pgd, addr);
+       return pud_offset(pgd, addr);
+}
+
+static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+{
+       pud_t *pud = early_ioremap_pud(addr);
+
        BUG_ON(pud_none(*pud) || pud_bad(*pud));
 
        return pmd_offset(pud, addr);
@@ -127,13 +133,17 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr)
 
 void __init early_ioremap_init(void)
 {
+       pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
+       unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN);
 
-       pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
-#ifndef CONFIG_ARM64_64K_PAGES
-       /* need to populate pmd for 4k pagesize only */
+       pgd = pgd_offset_k(addr);
+       pud = pud_offset(pgd, addr);
+       pud_populate(&init_mm, pud, bm_pmd);
+       pmd = pmd_offset(pud, addr);
        pmd_populate_kernel(&init_mm, pmd, bm_pte);
-#endif
+
        /*
         * The boot-ioremap range spans multiple pmds, for which
         * we are not prepared:
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
new file mode 100644 (file)
index 0000000..75e744e
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+struct page_change_data {
+       pgprot_t set_mask;
+       pgprot_t clear_mask;
+};
+
+static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
+                       void *data)
+{
+       struct page_change_data *cdata = data;
+       pte_t pte = *ptep;
+
+       pte = clear_pte_bit(pte, cdata->clear_mask);
+       pte = set_pte_bit(pte, cdata->set_mask);
+
+       set_pte(ptep, pte);
+       return 0;
+}
+
+static int change_memory_common(unsigned long addr, int numpages,
+                               pgprot_t set_mask, pgprot_t clear_mask)
+{
+       unsigned long start = addr;
+       unsigned long size = PAGE_SIZE*numpages;
+       unsigned long end = start + size;
+       int ret;
+       struct page_change_data data;
+
+       if (!IS_ALIGNED(addr, PAGE_SIZE)) {
+               addr &= PAGE_MASK;
+               WARN_ON_ONCE(1);
+       }
+
+       if (!is_module_address(start) || !is_module_address(end - 1))
+               return -EINVAL;
+
+       data.set_mask = set_mask;
+       data.clear_mask = clear_mask;
+
+       ret = apply_to_page_range(&init_mm, start, size, change_page_range,
+                                       &data);
+
+       flush_tlb_kernel_range(start, end);
+       return ret;
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+       return change_memory_common(addr, numpages,
+                                       __pgprot(PTE_RDONLY),
+                                       __pgprot(PTE_WRITE));
+}
+EXPORT_SYMBOL_GPL(set_memory_ro);
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+       return change_memory_common(addr, numpages,
+                                       __pgprot(PTE_WRITE),
+                                       __pgprot(PTE_RDONLY));
+}
+EXPORT_SYMBOL_GPL(set_memory_rw);
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+       return change_memory_common(addr, numpages,
+                                       __pgprot(PTE_PXN),
+                                       __pgprot(0));
+}
+EXPORT_SYMBOL_GPL(set_memory_nx);
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+       return change_memory_common(addr, numpages,
+                                       __pgprot(0),
+                                       __pgprot(PTE_PXN));
+}
+EXPORT_SYMBOL_GPL(set_memory_x);
index e832494016157716da981c07097ba0b0d5e3535e..1cf94513b37f48085e9969c4ead47af3e419fbfb 100644 (file)
@@ -69,6 +69,21 @@ ENTRY(cpu_reset)
        ret     x0
 ENDPROC(cpu_reset)
 
+ENTRY(cpu_soft_restart)
+       /* Save address of cpu_reset() and reset address */
+       mov     x19, x0
+       mov     x20, x1
+
+       /* Turn D-cache off */
+       bl      cpu_cache_off
+
+       /* Push out all dirty data, and ensure cache is empty */
+       bl      flush_cache_all
+
+       mov     x0, x20
+       ret     x19
+ENDPROC(cpu_soft_restart)
+
 /*
  *     cpu_do_idle()
  *
@@ -175,7 +190,7 @@ ENDPROC(cpu_do_switch_mm)
 ENTRY(__cpu_setup)
        ic      iallu                           // I+BTB cache invalidate
        tlbi    vmalle1is                       // invalidate I + D TLBs
-       dsb     sy
+       dsb     ish
 
        mov     x0, #3 << 20
        msr     cpacr_el1, x0                   // Enable FP/ASIMD
index 47afd08c90f7fd3f934beafb9398d106099e7f40..fe7e97a1aad9922b619c6511596d09d3cc2034ac 100644 (file)
@@ -30,8 +30,8 @@
 V_FUNCTION_BEGIN(__kernel_getcpu)
   .cfi_startproc
        mfspr   r5,SPRN_USPRG3
-       cmpdi   cr0,r3,0
-       cmpdi   cr1,r4,0
+       cmpwi   cr0,r3,0
+       cmpwi   cr1,r4,0
        clrlwi  r6,r5,16
        rlwinm  r7,r5,16,31-15,31-0
        beq     cr0,1f
index 8b6e4f5288a29cc155fb1a3f20d46c08da38be07..a98afed9348b73f57e27b40dd92db99f38f51a72 100644 (file)
@@ -248,7 +248,7 @@ asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
        struct group_info *group_info;
        int retval;
 
-       if (!capable(CAP_SETGID))
+       if (!may_setgroups())
                return -EPERM;
        if ((unsigned)gidsetsize > NGROUPS_MAX)
                return -EINVAL;
index 2a46ca720afca5fd6f5e5010698e8eea56748837..2874be9aef0ae7f19efec928c66a4bb0394e0311 100644 (file)
@@ -34,7 +34,7 @@ static inline unsigned int __getcpu(void)
                native_read_tscp(&p);
        } else {
                /* Load per CPU data from GDT */
-               asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+               asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
        }
 
        return p;
index 46727eb37bfe20915242badc965a2b4985bd28b4..6e1aaf73852ac956156df80f4dfcb931633f5269 100644 (file)
@@ -28,6 +28,13 @@ struct user_desc {
        unsigned int  seg_not_present:1;
        unsigned int  useable:1;
 #ifdef __x86_64__
+       /*
+        * Because this bit is not present in 32-bit user code, user
+        * programs can pass uninitialized values here.  Therefore, in
+        * any context in which a user_desc comes from a 32-bit program,
+        * the kernel must act as though lm == 0, regardless of the
+        * actual value.
+        */
        unsigned int  lm:1;
 #endif
 };
index d651082c7cf720a805f653b162343f6b9a984329..7a34e8fe54bd1d8d2f9c98cbde0edd6b38f13ccf 100644 (file)
@@ -65,6 +65,7 @@
 #define EXIT_REASON_EOI_INDUCED         45
 #define EXIT_REASON_EPT_VIOLATION       48
 #define EXIT_REASON_EPT_MISCONFIG       49
+#define EXIT_REASON_INVEPT              50
 #define EXIT_REASON_PREEMPTION_TIMER    52
 #define EXIT_REASON_WBINVD              54
 #define EXIT_REASON_XSETBV              55
index 8aac56bda7dc3b37b8eac0620d3fc7990f50cf95..7185af255fb5237e126a0274b5db7660522cb92c 100644 (file)
@@ -2657,6 +2657,17 @@ static struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
        return uncore_pmu_to_box(uncore_event_to_pmu(event), smp_processor_id());
 }
 
+/*
+ * Using uncore_pmu_event_init pmu event_init callback
+ * as a detection point for uncore events.
+ */
+static int uncore_pmu_event_init(struct perf_event *event);
+
+static bool is_uncore_event(struct perf_event *event)
+{
+       return event->pmu->event_init == uncore_pmu_event_init;
+}
+
 static int
 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
 {
@@ -2671,13 +2682,18 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, b
                return -EINVAL;
 
        n = box->n_events;
-       box->event_list[n] = leader;
-       n++;
+
+       if (is_uncore_event(leader)) {
+               box->event_list[n] = leader;
+               n++;
+       }
+
        if (!dogrp)
                return n;
 
        list_for_each_entry(event, &leader->sibling_list, group_entry) {
-               if (event->state <= PERF_EVENT_STATE_OFF)
+               if (!is_uncore_event(event) ||
+                   event->state <= PERF_EVENT_STATE_OFF)
                        continue;
 
                if (n >= max_count)
index cd6d9a5a42f60dcb93a528853fe65b8773b0832e..c4ff2a9161399b92bce9a148263b79bdfb862338 100644 (file)
@@ -279,7 +279,14 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
 static void __init paravirt_ops_setup(void)
 {
        pv_info.name = "KVM";
-       pv_info.paravirt_enabled = 1;
+
+       /*
+        * KVM isn't paravirt in the sense of paravirt_enabled.  A KVM
+        * guest kernel works like a bare metal kernel with additional
+        * features, and paravirt_enabled is about features that are
+        * missing.
+        */
+       pv_info.paravirt_enabled = 0;
 
        if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
                pv_cpu_ops.io_delay = kvm_io_delay;
index 3dd37ebd591b36db493d449506d33a6b8915841f..41514f56c24138176408fd63def75b14f02a29ac 100644 (file)
@@ -265,7 +265,6 @@ void __init kvmclock_init(void)
 #endif
        kvm_get_preset_lpj();
        clocksource_register_hz(&kvm_clock, NSEC_PER_SEC);
-       pv_info.paravirt_enabled = 1;
        pv_info.name = "KVM";
 
        if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
index f99a242730e95ce185900990305e360444d36b44..7099ab1e075bd30b3b2b9d1458da7e6d61ef0e8c 100644 (file)
@@ -279,24 +279,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
        fpu = switch_fpu_prepare(prev_p, next_p, cpu);
 
-       /*
-        * Reload esp0, LDT and the page table pointer:
-        */
+       /* Reload esp0 and ss1. */
        load_sp0(tss, next);
 
-       /*
-        * Switch DS and ES.
-        * This won't pick up thread selector changes, but I guess that is ok.
-        */
-       savesegment(es, prev->es);
-       if (unlikely(next->es | prev->es))
-               loadsegment(es, next->es);
-
-       savesegment(ds, prev->ds);
-       if (unlikely(next->ds | prev->ds))
-               loadsegment(ds, next->ds);
-
-
        /* We must save %fs and %gs before load_TLS() because
         * %fs and %gs may be cleared by load_TLS().
         *
@@ -305,41 +290,101 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        savesegment(fs, fsindex);
        savesegment(gs, gsindex);
 
+       /*
+        * Load TLS before restoring any segments so that segment loads
+        * reference the correct GDT entries.
+        */
        load_TLS(next, cpu);
 
        /*
-        * Leave lazy mode, flushing any hypercalls made here.
-        * This must be done before restoring TLS segments so
-        * the GDT and LDT are properly updated, and must be
-        * done before math_state_restore, so the TS bit is up
-        * to date.
+        * Leave lazy mode, flushing any hypercalls made here.  This
+        * must be done after loading TLS entries in the GDT but before
+        * loading segments that might reference them, and and it must
+        * be done before math_state_restore, so the TS bit is up to
+        * date.
         */
        arch_end_context_switch(next_p);
 
+       /* Switch DS and ES.
+        *
+        * Reading them only returns the selectors, but writing them (if
+        * nonzero) loads the full descriptor from the GDT or LDT.  The
+        * LDT for next is loaded in switch_mm, and the GDT is loaded
+        * above.
+        *
+        * We therefore need to write new values to the segment
+        * registers on every context switch unless both the new and old
+        * values are zero.
+        *
+        * Note that we don't need to do anything for CS and SS, as
+        * those are saved and restored as part of pt_regs.
+        */
+       savesegment(es, prev->es);
+       if (unlikely(next->es | prev->es))
+               loadsegment(es, next->es);
+
+       savesegment(ds, prev->ds);
+       if (unlikely(next->ds | prev->ds))
+               loadsegment(ds, next->ds);
+
        /*
         * Switch FS and GS.
         *
-        * Segment register != 0 always requires a reload.  Also
-        * reload when it has changed.  When prev process used 64bit
-        * base always reload to avoid an information leak.
+        * These are even more complicated than FS and GS: they have
+        * 64-bit bases are that controlled by arch_prctl.  Those bases
+        * only differ from the values in the GDT or LDT if the selector
+        * is 0.
+        *
+        * Loading the segment register resets the hidden base part of
+        * the register to 0 or the value from the GDT / LDT.  If the
+        * next base address zero, writing 0 to the segment register is
+        * much faster than using wrmsr to explicitly zero the base.
+        *
+        * The thread_struct.fs and thread_struct.gs values are 0
+        * if the fs and gs bases respectively are not overridden
+        * from the values implied by fsindex and gsindex.  They
+        * are nonzero, and store the nonzero base addresses, if
+        * the bases are overridden.
+        *
+        * (fs != 0 && fsindex != 0) || (gs != 0 && gsindex != 0) should
+        * be impossible.
+        *
+        * Therefore we need to reload the segment registers if either
+        * the old or new selector is nonzero, and we need to override
+        * the base address if next thread expects it to be overridden.
+        *
+        * This code is unnecessarily slow in the case where the old and
+        * new indexes are zero and the new base is nonzero -- it will
+        * unnecessarily write 0 to the selector before writing the new
+        * base address.
+        *
+        * Note: This all depends on arch_prctl being the only way that
+        * user code can override the segment base.  Once wrfsbase and
+        * wrgsbase are enabled, most of this code will need to change.
         */
        if (unlikely(fsindex | next->fsindex | prev->fs)) {
                loadsegment(fs, next->fsindex);
+
                /*
-                * Check if the user used a selector != 0; if yes
-                *  clear 64bit base, since overloaded base is always
-                *  mapped to the Null selector
+                * If user code wrote a nonzero value to FS, then it also
+                * cleared the overridden base address.
+                *
+                * XXX: if user code wrote 0 to FS and cleared the base
+                * address itself, we won't notice and we'll incorrectly
+                * restore the prior base address next time we reschdule
+                * the process.
                 */
                if (fsindex)
                        prev->fs = 0;
        }
-       /* when next process has a 64bit base use it */
        if (next->fs)
                wrmsrl(MSR_FS_BASE, next->fs);
        prev->fsindex = fsindex;
 
        if (unlikely(gsindex | next->gsindex | prev->gs)) {
                load_gs_index(next->gsindex);
+
+               /* This works (and fails) the same way as fsindex above. */
                if (gsindex)
                        prev->gs = 0;
        }
index 90fd1195f2761b140eaea95ead6fd23d92445873..3876c04feef9dbe9160ee5d3c3488ef15f6235de 100644 (file)
@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
 EXPORT_SYMBOL(pm_power_off);
 
 static const struct desc_ptr no_idt = {};
-static int reboot_mode;
+static enum reboot_mode reboot_mode;
 enum reboot_type reboot_type = BOOT_ACPI;
 int reboot_force;
 
@@ -88,11 +88,11 @@ static int __init reboot_setup(char *str)
 
                switch (*str) {
                case 'w':
-                       reboot_mode = 0x1234;
+                       reboot_mode = REBOOT_WARM;
                        break;
 
                case 'c':
-                       reboot_mode = 0;
+                       reboot_mode = REBOOT_COLD;
                        break;
 
 #ifdef CONFIG_SMP
@@ -552,6 +552,7 @@ static void native_machine_emergency_restart(void)
        int i;
        int attempt = 0;
        int orig_reboot_type = reboot_type;
+       unsigned short mode;
 
        if (reboot_emergency)
                emergency_vmx_disable_all();
@@ -559,7 +560,8 @@ static void native_machine_emergency_restart(void)
        tboot_shutdown(TB_SHUTDOWN_REBOOT);
 
        /* Tell the BIOS if we want cold or warm reboot */
-       *((unsigned short *)__va(0x472)) = reboot_mode;
+       mode = reboot_mode == REBOOT_WARM ? 0x1234 : 0;
+       *((unsigned short *)__va(0x472)) = mode;
 
        for (;;) {
                /* Could also try the reset bit in the Hammer NB */
@@ -601,7 +603,7 @@ static void native_machine_emergency_restart(void)
 
                case BOOT_EFI:
                        if (efi_enabled(EFI_RUNTIME_SERVICES))
-                               efi.reset_system(reboot_mode ?
+                               efi.reset_system(reboot_mode == REBOOT_WARM ?
                                                 EFI_RESET_WARM :
                                                 EFI_RESET_COLD,
                                                 EFI_SUCCESS, 0, NULL);
index f7fec09e3e3a83c9fec36dfe2f0c75906b8c23c1..4e942f31b1a7c9401a65fb37af093caab5ad0c2e 100644 (file)
@@ -27,6 +27,37 @@ static int get_free_idx(void)
        return -ESRCH;
 }
 
+static bool tls_desc_okay(const struct user_desc *info)
+{
+       if (LDT_empty(info))
+               return true;
+
+       /*
+        * espfix is required for 16-bit data segments, but espfix
+        * only works for LDT segments.
+        */
+       if (!info->seg_32bit)
+               return false;
+
+       /* Only allow data segments in the TLS array. */
+       if (info->contents > 1)
+               return false;
+
+       /*
+        * Non-present segments with DPL 3 present an interesting attack
+        * surface.  The kernel should handle such segments correctly,
+        * but TLS is very difficult to protect in a sandbox, so prevent
+        * such segments from being created.
+        *
+        * If userspace needs to remove a TLS entry, it can still delete
+        * it outright.
+        */
+       if (info->seg_not_present)
+               return false;
+
+       return true;
+}
+
 static void set_tls_desc(struct task_struct *p, int idx,
                         const struct user_desc *info, int n)
 {
@@ -66,6 +97,9 @@ int do_set_thread_area(struct task_struct *p, int idx,
        if (copy_from_user(&info, u_info, sizeof(info)))
                return -EFAULT;
 
+       if (!tls_desc_okay(&info))
+               return -EINVAL;
+
        if (idx == -1)
                idx = info.entry_number;
 
@@ -192,6 +226,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
 {
        struct user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
        const struct user_desc *info;
+       int i;
 
        if (pos >= GDT_ENTRY_TLS_ENTRIES * sizeof(struct user_desc) ||
            (pos % sizeof(struct user_desc)) != 0 ||
@@ -205,6 +240,10 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
        else
                info = infobuf;
 
+       for (i = 0; i < count / sizeof(struct user_desc); i++)
+               if (!tls_desc_okay(info + i))
+                       return -EINVAL;
+
        set_tls_desc(target,
                     GDT_ENTRY_TLS_MIN + (pos / sizeof(struct user_desc)),
                     info, count / sizeof(struct user_desc));
index a3476bedd2017a0cec2df63ce4befd5c7af82f3c..d969fb91f17b511ab87780d0b5cea217f70d3576 100644 (file)
@@ -6242,6 +6242,12 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
        return 1;
 }
 
+static int handle_invept(struct kvm_vcpu *vcpu)
+{
+       kvm_queue_exception(vcpu, UD_VECTOR);
+       return 1;
+}
+
 /*
  * The exit handlers return 1 if the exit was handled fully and guest execution
  * may resume.  Otherwise they set the kvm_run parameter to indicate what needs
@@ -6286,6 +6292,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
        [EXIT_REASON_PAUSE_INSTRUCTION]       = handle_pause,
        [EXIT_REASON_MWAIT_INSTRUCTION]       = handle_invalid_op,
        [EXIT_REASON_MONITOR_INSTRUCTION]     = handle_invalid_op,
+       [EXIT_REASON_INVEPT]                  = handle_invept,
 };
 
 static const int kvm_vmx_max_exit_handlers =
@@ -6512,6 +6519,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
        case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
        case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
        case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
+       case EXIT_REASON_INVEPT:
                /*
                 * VMX instructions trap unconditionally. This allows L1 to
                 * emulate them for its L2 guest, i.e., allows 3-level nesting!
index 431e8754441125a1d446214e9d17b837083e3bb6..ab6ba35a9357a1ed2998ec0422ab11270d6b3b3f 100644 (file)
@@ -117,30 +117,45 @@ subsys_initcall(init_vdso);
 
 struct linux_binprm;
 
-/* Put the vdso above the (randomized) stack with another randomized offset.
-   This way there is no hole in the middle of address space.
-   To save memory make sure it is still in the same PTE as the stack top.
-   This doesn't give that many random bits */
+/*
+ * Put the vdso above the (randomized) stack with another randomized
+ * offset.  This way there is no hole in the middle of address space.
+ * To save memory make sure it is still in the same PTE as the stack
+ * top.  This doesn't give that many random bits.
+ *
+ * Note that this algorithm is imperfect: the distribution of the vdso
+ * start address within a PMD is biased toward the end.
+ *
+ * Only used for the 64-bit and x32 vdsos.
+ */
 static unsigned long vdso_addr(unsigned long start, unsigned len)
 {
        unsigned long addr, end;
        unsigned offset;
-       end = (start + PMD_SIZE - 1) & PMD_MASK;
+
+       /*
+        * Round up the start address.  It can start out unaligned as a result
+        * of stack start randomization.
+        */
+       start = PAGE_ALIGN(start);
+
+       /* Round the lowest possible end address up to a PMD boundary. */
+       end = (start + len + PMD_SIZE - 1) & PMD_MASK;
        if (end >= TASK_SIZE_MAX)
                end = TASK_SIZE_MAX;
        end -= len;
-       /* This loses some more bits than a modulo, but is cheaper */
-       offset = get_random_int() & (PTRS_PER_PTE - 1);
-       addr = start + (offset << PAGE_SHIFT);
-       if (addr >= end)
-               addr = end;
+
+       if (end > start) {
+               offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+               addr = start + (offset << PAGE_SHIFT);
+       } else {
+               addr = start;
+       }
 
        /*
-        * page-align it here so that get_unmapped_area doesn't
-        * align it wrongfully again to the next page. addr can come in 4K
-        * unaligned here as a result of stack start randomization.
+        * Forcibly align the final address in case we have a hardware
+        * issue that requires alignment for performance reasons.
         */
-       addr = PAGE_ALIGN(addr);
        addr = align_vdso_addr(addr);
 
        return addr;
index 6374a29fd783fc3a8fd80ef03093e97666e77db6..2d350aae2eb66185f02bb6ac3b9bf6d741716dff 100644 (file)
@@ -1070,9 +1070,16 @@ int disk_expand_part_tbl(struct gendisk *disk, int partno)
        struct disk_part_tbl *old_ptbl = disk->part_tbl;
        struct disk_part_tbl *new_ptbl;
        int len = old_ptbl ? old_ptbl->len : 0;
-       int target = partno + 1;
+       int i, target;
        size_t size;
-       int i;
+
+       /*
+        * check for int overflow, since we can get here from blkpg_ioctl()
+        * with a user passed 'partno'.
+        */
+       target = partno + 1;
+       if (target < 0)
+               return -EINVAL;
 
        /* disk_max_parts() is zero during initialization, ignore if so */
        if (disk_max_parts(disk) && target > disk_max_parts(disk))
index bf948e134981a6ed47da7ab71d0aff8f8de81f51..6ef6e2ad344e1227dd7d125299f462ea7261a9cc 100644 (file)
@@ -449,6 +449,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
 {
        struct af_alg_completion *completion = req->data;
 
+       if (err == -EINPROGRESS)
+               return;
+
        completion->err = err;
        complete(&completion->completion);
 }
index 2c0f180ab34316d77571e5edc0abbabaaa63f992..a77fca50d5505cff8689081eabe8a0679bea1905 100755 (executable)
@@ -157,4 +157,5 @@ obj-$(CONFIG_IPACK_BUS)             += ipack/
 obj-$(CONFIG_NTB)              += ntb/
 
 obj-$(CONFIG_GATOR)            += gator/
-obj-y                          += headset_observe/
\ No newline at end of file
+obj-y                          += headset_observe/
+obj-$(CONFIG_CORESIGHT)                += coresight/
index 7e35fed9d5bf3517312ed5eab30afa144ff426ce..9d3a90397d16e68bafda9eb4650778b8ef26cef8 100644 (file)
@@ -508,7 +508,7 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
 
 //             amba_put_disable_pclk(dev);
 
-               if (cid == AMBA_CID)
+               if (cid == AMBA_CID || cid == CORESIGHT_CID)
                        dev->periphid = pid;
 
                if (!dev->periphid)
index 64150a9ffff3fb54668ca7f12a00ff12d2a5777f..9064a2f2760cd77d0284a82a79fd733e1498f164 100644 (file)
@@ -320,6 +320,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
        { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
        { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
+       { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
+       { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
        { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
        { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
        { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
@@ -491,6 +494,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
         * enabled.  https://bugzilla.kernel.org/show_bug.cgi?id=60731
         */
        { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
+       { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
 
        /* Enmotus */
        { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
index 8401061b4040b7fd3e2e13cfbee9cd51c87b2ce7..38a2389f5b1bc86abc0baae3fda3f9ab20956cd4 100644 (file)
@@ -1501,7 +1501,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
        host_priv->csr_base = csr_base;
 
        irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
-       if (irq < 0) {
+       if (!irq) {
                dev_err(&ofdev->dev, "invalid irq from platform\n");
                goto error_exit_with_cleanup;
        }
index d414331b480e72afc9ac8ac505dee449f1c795d9..558d562f490138413806115b4817640c4fc325ae 100644 (file)
@@ -242,13 +242,15 @@ static ssize_t store_drivers_probe(struct bus_type *bus,
                                   const char *buf, size_t count)
 {
        struct device *dev;
+       int err = -EINVAL;
 
        dev = bus_find_device_by_name(bus, NULL, buf);
        if (!dev)
                return -ENODEV;
-       if (bus_rescan_devices_helper(dev, NULL) != 0)
-               return -EINVAL;
-       return count;
+       if (bus_rescan_devices_helper(dev, NULL) == 0)
+               err = count;
+       put_device(dev);
+       return err;
 }
 
 static struct device *next_device(struct klist_iter *i)
index 0ce39a33b3c2c2faf23746f662d609f22c7cf46b..dd302ea3b599510843f97b3dc79e3d34447b85db 100644 (file)
@@ -10,6 +10,8 @@
 #include <linux/dma-mapping.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <asm-generic/dma-coherent.h>
 
 /*
@@ -267,3 +269,73 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
        return ret;
 }
 EXPORT_SYMBOL(dma_common_mmap);
+
+#ifdef CONFIG_MMU
+/*
+ * remaps an array of PAGE_SIZE pages into another vm_area
+ * Cannot be used in non-sleeping contexts
+ */
+void *dma_common_pages_remap(struct page **pages, size_t size,
+                       unsigned long vm_flags, pgprot_t prot,
+                       const void *caller)
+{
+       struct vm_struct *area;
+
+       area = get_vm_area_caller(size, vm_flags, caller);
+       if (!area)
+               return NULL;
+
+       area->pages = pages;
+
+       if (map_vm_area(area, prot, &pages)) {
+               vunmap(area->addr);
+               return NULL;
+       }
+
+       return area->addr;
+}
+
+/*
+ * remaps an allocated contiguous region into another vm_area.
+ * Cannot be used in non-sleeping contexts
+ */
+
+void *dma_common_contiguous_remap(struct page *page, size_t size,
+                       unsigned long vm_flags,
+                       pgprot_t prot, const void *caller)
+{
+       int i;
+       struct page **pages;
+       void *ptr;
+       unsigned long pfn;
+
+       pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
+       if (!pages)
+               return NULL;
+
+       for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++)
+               pages[i] = pfn_to_page(pfn + i);
+
+       ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+
+       kfree(pages);
+
+       return ptr;
+}
+
+/*
+ * unmaps a range previously mapped by dma_common_*_remap
+ */
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
+{
+       struct vm_struct *area = find_vm_area(cpu_addr);
+
+       if (!area || (area->flags & vm_flags) != vm_flags) {
+               WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+               return;
+       }
+
+       unmap_kernel_range((unsigned long)cpu_addr, size);
+       vunmap(cpu_addr);
+}
+#endif
diff --git a/drivers/coresight/Makefile b/drivers/coresight/Makefile
new file mode 100644 (file)
index 0000000..4b4bec8
--- /dev/null
@@ -0,0 +1,11 @@
+#
+# Makefile for CoreSight drivers.
+#
+obj-$(CONFIG_CORESIGHT) += coresight.o
+obj-$(CONFIG_OF) += of_coresight.o
+obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
+obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
+obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
+obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
+                                          coresight-replicator.o
+obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
diff --git a/drivers/coresight/coresight-etb10.c b/drivers/coresight/coresight-etb10.c
new file mode 100644 (file)
index 0000000..c922d4a
--- /dev/null
@@ -0,0 +1,537 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/seq_file.h>
+#include <linux/coresight.h>
+#include <linux/amba/bus.h>
+
+#include "coresight-priv.h"
+
+#define ETB_RAM_DEPTH_REG      0x004
+#define ETB_STATUS_REG         0x00c
+#define ETB_RAM_READ_DATA_REG  0x010
+#define ETB_RAM_READ_POINTER   0x014
+#define ETB_RAM_WRITE_POINTER  0x018
+#define ETB_TRG                        0x01c
+#define ETB_CTL_REG            0x020
+#define ETB_RWD_REG            0x024
+#define ETB_FFSR               0x300
+#define ETB_FFCR               0x304
+#define ETB_ITMISCOP0          0xee0
+#define ETB_ITTRFLINACK                0xee4
+#define ETB_ITTRFLIN           0xee8
+#define ETB_ITATBDATA0         0xeeC
+#define ETB_ITATBCTR2          0xef0
+#define ETB_ITATBCTR1          0xef4
+#define ETB_ITATBCTR0          0xef8
+
+/* register description */
+/* STS - 0x00C */
+#define ETB_STATUS_RAM_FULL    BIT(0)
+/* CTL - 0x020 */
+#define ETB_CTL_CAPT_EN                BIT(0)
+/* FFCR - 0x304 */
+#define ETB_FFCR_EN_FTC                BIT(0)
+#define ETB_FFCR_FON_MAN       BIT(6)
+#define ETB_FFCR_STOP_FI       BIT(12)
+#define ETB_FFCR_STOP_TRIGGER  BIT(13)
+
+#define ETB_FFCR_BIT           6
+#define ETB_FFSR_BIT           1
+#define ETB_FRAME_SIZE_WORDS   4
+
+/**
+ * struct etb_drvdata - specifics associated to an ETB component
+ * @base:      memory mapped base address for this component.
+ * @dev:       the device entity associated to this component.
+ * @csdev:     component vitals needed by the framework.
+ * @miscdev:   specifics to handle "/dev/xyz.etb" entry.
+ * @clk:       the clock this component is associated to.
+ * @spinlock:  only one at a time pls.
+ * @in_use:    synchronise user space access to etb buffer.
+ * @buf:       area of memory where ETB buffer content gets sent.
+ * @buffer_depth: size of @buf.
+ * @enable:    this ETB is being used.
+ * @trigger_cntr: amount of words to store after a trigger.
+ */
+struct etb_drvdata {
+       void __iomem            *base;
+       struct device           *dev;
+       struct coresight_device *csdev;
+       struct miscdevice       miscdev;
+       struct clk              *clk;
+       spinlock_t              spinlock;
+       atomic_t                in_use;
+       u8                      *buf;
+       u32                     buffer_depth;
+       bool                    enable;
+       u32                     trigger_cntr;
+};
+
+static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
+{
+       int ret;
+       u32 depth = 0;
+
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       /* RO registers don't need locking */
+       depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
+
+       clk_disable_unprepare(drvdata->clk);
+       return depth;
+}
+
+static void etb_enable_hw(struct etb_drvdata *drvdata)
+{
+       int i;
+       u32 depth;
+
+       CS_UNLOCK(drvdata->base);
+
+       depth = drvdata->buffer_depth;
+       /* reset write RAM pointer address */
+       writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
+       /* clear entire RAM buffer */
+       for (i = 0; i < depth; i++)
+               writel_relaxed(0x0, drvdata->base + ETB_RWD_REG);
+
+       /* reset write RAM pointer address */
+       writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
+       /* reset read RAM pointer address */
+       writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
+
+       writel_relaxed(drvdata->trigger_cntr, drvdata->base + ETB_TRG);
+       writel_relaxed(ETB_FFCR_EN_FTC | ETB_FFCR_STOP_TRIGGER,
+                      drvdata->base + ETB_FFCR);
+       /* ETB trace capture enable */
+       writel_relaxed(ETB_CTL_CAPT_EN, drvdata->base + ETB_CTL_REG);
+
+       CS_LOCK(drvdata->base);
+}
+
+static int etb_enable(struct coresight_device *csdev)
+{
+       struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+       int ret;
+       unsigned long flags;
+
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       etb_enable_hw(drvdata);
+       drvdata->enable = true;
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       dev_info(drvdata->dev, "ETB enabled\n");
+       return 0;
+}
+
+static void etb_disable_hw(struct etb_drvdata *drvdata)
+{
+       u32 ffcr;
+
+       CS_UNLOCK(drvdata->base);
+
+       ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
+       /* stop formatter when a stop has completed */
+       ffcr |= ETB_FFCR_STOP_FI;
+       writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
+       /* manually generate a flush of the system */
+       ffcr |= ETB_FFCR_FON_MAN;
+       writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
+
+       if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
+               dev_err(drvdata->dev,
+                       "timeout observed when probing at offset %#x\n",
+                       ETB_FFCR);
+       }
+
+       /* disable trace capture */
+       writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
+
+       if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
+               dev_err(drvdata->dev,
+                       "timeout observed when probing at offset %#x\n",
+                       ETB_FFCR);
+       }
+
+       CS_LOCK(drvdata->base);
+}
+
+static void etb_dump_hw(struct etb_drvdata *drvdata)
+{
+       int i;
+       u8 *buf_ptr;
+       u32 read_data, depth;
+       u32 read_ptr, write_ptr;
+       u32 frame_off, frame_endoff;
+
+       CS_UNLOCK(drvdata->base);
+
+       read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
+       write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
+
+       frame_off = write_ptr % ETB_FRAME_SIZE_WORDS;
+       frame_endoff = ETB_FRAME_SIZE_WORDS - frame_off;
+       if (frame_off) {
+               dev_err(drvdata->dev,
+                       "write_ptr: %lu not aligned to formatter frame size\n",
+                       (unsigned long)write_ptr);
+               dev_err(drvdata->dev, "frameoff: %lu, frame_endoff: %lu\n",
+                       (unsigned long)frame_off, (unsigned long)frame_endoff);
+               write_ptr += frame_endoff;
+       }
+
+       if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
+                     & ETB_STATUS_RAM_FULL) == 0)
+               writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
+       else
+               writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+
+       depth = drvdata->buffer_depth;
+       buf_ptr = drvdata->buf;
+       for (i = 0; i < depth; i++) {
+               read_data = readl_relaxed(drvdata->base +
+                                         ETB_RAM_READ_DATA_REG);
+               *buf_ptr++ = read_data >> 0;
+               *buf_ptr++ = read_data >> 8;
+               *buf_ptr++ = read_data >> 16;
+               *buf_ptr++ = read_data >> 24;
+       }
+
+       if (frame_off) {
+               buf_ptr -= (frame_endoff * 4);
+               for (i = 0; i < frame_endoff; i++) {
+                       *buf_ptr++ = 0x0;
+                       *buf_ptr++ = 0x0;
+                       *buf_ptr++ = 0x0;
+                       *buf_ptr++ = 0x0;
+               }
+       }
+
+       writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void etb_disable(struct coresight_device *csdev)
+{
+       struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+       unsigned long flags;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       etb_disable_hw(drvdata);
+       etb_dump_hw(drvdata);
+       drvdata->enable = false;
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       clk_disable_unprepare(drvdata->clk);
+
+       dev_info(drvdata->dev, "ETB disabled\n");
+}
+
+static const struct coresight_ops_sink etb_sink_ops = {
+       .enable         = etb_enable,
+       .disable        = etb_disable,
+};
+
+static const struct coresight_ops etb_cs_ops = {
+       .sink_ops       = &etb_sink_ops,
+};
+
+static void etb_dump(struct etb_drvdata *drvdata)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (drvdata->enable) {
+               etb_disable_hw(drvdata);
+               etb_dump_hw(drvdata);
+               etb_enable_hw(drvdata);
+       }
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       dev_info(drvdata->dev, "ETB dumped\n");
+}
+
+static int etb_open(struct inode *inode, struct file *file)
+{
+       struct etb_drvdata *drvdata = container_of(file->private_data,
+                                                  struct etb_drvdata, miscdev);
+
+       if (atomic_cmpxchg(&drvdata->in_use, 0, 1))
+               return -EBUSY;
+
+       dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
+       return 0;
+}
+
+static ssize_t etb_read(struct file *file, char __user *data,
+                               size_t len, loff_t *ppos)
+{
+       u32 depth;
+       struct etb_drvdata *drvdata = container_of(file->private_data,
+                                                  struct etb_drvdata, miscdev);
+
+       etb_dump(drvdata);
+
+       depth = drvdata->buffer_depth;
+       if (*ppos + len > depth * 4)
+               len = depth * 4 - *ppos;
+
+       if (copy_to_user(data, drvdata->buf + *ppos, len)) {
+               dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+               return -EFAULT;
+       }
+
+       *ppos += len;
+
+       dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n",
+               __func__, len, (int) (depth * 4 - *ppos));
+       return len;
+}
+
+static int etb_release(struct inode *inode, struct file *file)
+{
+       struct etb_drvdata *drvdata = container_of(file->private_data,
+                                                  struct etb_drvdata, miscdev);
+       atomic_set(&drvdata->in_use, 0);
+
+       dev_dbg(drvdata->dev, "%s: released\n", __func__);
+       return 0;
+}
+
+static const struct file_operations etb_fops = {
+       .owner          = THIS_MODULE,
+       .open           = etb_open,
+       .read           = etb_read,
+       .release        = etb_release,
+       .llseek         = no_llseek,
+};
+
+static ssize_t status_show(struct device *dev,
+                          struct device_attribute *attr, char *buf)
+{
+       int ret;
+       unsigned long flags;
+       u32 etb_rdr, etb_sr, etb_rrp, etb_rwp;
+       u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr;
+       struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               goto out;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       CS_UNLOCK(drvdata->base);
+
+       etb_rdr = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
+       etb_sr = readl_relaxed(drvdata->base + ETB_STATUS_REG);
+       etb_rrp = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
+       etb_rwp = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
+       etb_trg = readl_relaxed(drvdata->base + ETB_TRG);
+       etb_cr = readl_relaxed(drvdata->base + ETB_CTL_REG);
+       etb_ffsr = readl_relaxed(drvdata->base + ETB_FFSR);
+       etb_ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
+
+       CS_LOCK(drvdata->base);
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       clk_disable_unprepare(drvdata->clk);
+
+       return sprintf(buf,
+                      "Depth:\t\t0x%x\n"
+                      "Status:\t\t0x%x\n"
+                      "RAM read ptr:\t0x%x\n"
+                      "RAM wrt ptr:\t0x%x\n"
+                      "Trigger cnt:\t0x%x\n"
+                      "Control:\t0x%x\n"
+                      "Flush status:\t0x%x\n"
+                      "Flush ctrl:\t0x%x\n",
+                      etb_rdr, etb_sr, etb_rrp, etb_rwp,
+                      etb_trg, etb_cr, etb_ffsr, etb_ffcr);
+out:
+       return -EINVAL;
+}
+static DEVICE_ATTR_RO(status);
+
+static ssize_t trigger_cntr_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val = drvdata->trigger_cntr;
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t trigger_cntr_store(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->trigger_cntr = val;
+       return size;
+}
+static DEVICE_ATTR_RW(trigger_cntr);
+
+static struct attribute *coresight_etb_attrs[] = {
+       &dev_attr_trigger_cntr.attr,
+       &dev_attr_status.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(coresight_etb);
+
+static int etb_probe(struct amba_device *adev, const struct amba_id *id)
+{
+       int ret;
+       void __iomem *base;
+       struct device *dev = &adev->dev;
+       struct coresight_platform_data *pdata = NULL;
+       struct etb_drvdata *drvdata;
+       struct resource *res = &adev->res;
+       struct coresight_desc *desc;
+       struct device_node *np = adev->dev.of_node;
+
+       if (np) {
+               pdata = of_get_coresight_platform_data(dev, np);
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
+               adev->dev.platform_data = pdata;
+       }
+
+       drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
+       drvdata->dev = &adev->dev;
+       dev_set_drvdata(dev, drvdata);
+
+       /* validity for the resource is already checked by the AMBA core */
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       drvdata->base = base;
+
+       spin_lock_init(&drvdata->spinlock);
+
+       drvdata->clk = adev->pclk;
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       drvdata->buffer_depth =  etb_get_buffer_depth(drvdata);
+       clk_disable_unprepare(drvdata->clk);
+
+       if (drvdata->buffer_depth < 0)
+               return -EINVAL;
+
+       drvdata->buf = devm_kzalloc(dev,
+                                   drvdata->buffer_depth * 4, GFP_KERNEL);
+       if (!drvdata->buf)
+               return -ENOMEM;
+
+       desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+
+       desc->type = CORESIGHT_DEV_TYPE_SINK;
+       desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+       desc->ops = &etb_cs_ops;
+       desc->pdata = pdata;
+       desc->dev = dev;
+       desc->groups = coresight_etb_groups;
+       drvdata->csdev = coresight_register(desc);
+       if (IS_ERR(drvdata->csdev))
+               return PTR_ERR(drvdata->csdev);
+
+       drvdata->miscdev.name = pdata->name;
+       drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
+       drvdata->miscdev.fops = &etb_fops;
+       ret = misc_register(&drvdata->miscdev);
+       if (ret)
+               goto err_misc_register;
+
+       dev_info(dev, "ETB initialized\n");
+       return 0;
+
+err_misc_register:
+       coresight_unregister(drvdata->csdev);
+       return ret;
+}
+
+static int etb_remove(struct amba_device *adev)
+{
+       struct etb_drvdata *drvdata = amba_get_drvdata(adev);
+
+       misc_deregister(&drvdata->miscdev);
+       coresight_unregister(drvdata->csdev);
+       return 0;
+}
+
+static struct amba_id etb_ids[] = {
+       {
+               .id     = 0x0003b907,
+               .mask   = 0x0003ffff,
+       },
+       { 0, 0},
+};
+
+static struct amba_driver etb_driver = {
+       .drv = {
+               .name   = "coresight-etb10",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = etb_probe,
+       .remove         = etb_remove,
+       .id_table       = etb_ids,
+};
+
+static int __init etb_init(void)
+{
+       return amba_driver_register(&etb_driver);
+}
+module_init(etb_init);
+
+static void __exit etb_exit(void)
+{
+       amba_driver_unregister(&etb_driver);
+}
+module_exit(etb_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
diff --git a/drivers/coresight/coresight-etm-cp14.c b/drivers/coresight/coresight-etm-cp14.c
new file mode 100644 (file)
index 0000000..12a2206
--- /dev/null
@@ -0,0 +1,591 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bug.h>
+#include <asm/hardware/cp14.h>
+
+#include "coresight-etm.h"
+
+int etm_readl_cp14(u32 reg, unsigned int *val)
+{
+       switch (reg) {
+       case ETMCR:
+               *val = etm_read(ETMCR);
+               return 0;
+       case ETMCCR:
+               *val = etm_read(ETMCCR);
+               return 0;
+       case ETMTRIGGER:
+               *val = etm_read(ETMTRIGGER);
+               return 0;
+       case ETMSR:
+               *val = etm_read(ETMSR);
+               return 0;
+       case ETMSCR:
+               *val = etm_read(ETMSCR);
+               return 0;
+       case ETMTSSCR:
+               *val = etm_read(ETMTSSCR);
+               return 0;
+       case ETMTEEVR:
+               *val = etm_read(ETMTEEVR);
+               return 0;
+       case ETMTECR1:
+               *val = etm_read(ETMTECR1);
+               return 0;
+       case ETMFFLR:
+               *val = etm_read(ETMFFLR);
+               return 0;
+       case ETMACVRn(0):
+               *val = etm_read(ETMACVR0);
+               return 0;
+       case ETMACVRn(1):
+               *val = etm_read(ETMACVR1);
+               return 0;
+       case ETMACVRn(2):
+               *val = etm_read(ETMACVR2);
+               return 0;
+       case ETMACVRn(3):
+               *val = etm_read(ETMACVR3);
+               return 0;
+       case ETMACVRn(4):
+               *val = etm_read(ETMACVR4);
+               return 0;
+       case ETMACVRn(5):
+               *val = etm_read(ETMACVR5);
+               return 0;
+       case ETMACVRn(6):
+               *val = etm_read(ETMACVR6);
+               return 0;
+       case ETMACVRn(7):
+               *val = etm_read(ETMACVR7);
+               return 0;
+       case ETMACVRn(8):
+               *val = etm_read(ETMACVR8);
+               return 0;
+       case ETMACVRn(9):
+               *val = etm_read(ETMACVR9);
+               return 0;
+       case ETMACVRn(10):
+               *val = etm_read(ETMACVR10);
+               return 0;
+       case ETMACVRn(11):
+               *val = etm_read(ETMACVR11);
+               return 0;
+       case ETMACVRn(12):
+               *val = etm_read(ETMACVR12);
+               return 0;
+       case ETMACVRn(13):
+               *val = etm_read(ETMACVR13);
+               return 0;
+       case ETMACVRn(14):
+               *val = etm_read(ETMACVR14);
+               return 0;
+       case ETMACVRn(15):
+               *val = etm_read(ETMACVR15);
+               return 0;
+       case ETMACTRn(0):
+               *val = etm_read(ETMACTR0);
+               return 0;
+       case ETMACTRn(1):
+               *val = etm_read(ETMACTR1);
+               return 0;
+       case ETMACTRn(2):
+               *val = etm_read(ETMACTR2);
+               return 0;
+       case ETMACTRn(3):
+               *val = etm_read(ETMACTR3);
+               return 0;
+       case ETMACTRn(4):
+               *val = etm_read(ETMACTR4);
+               return 0;
+       case ETMACTRn(5):
+               *val = etm_read(ETMACTR5);
+               return 0;
+       case ETMACTRn(6):
+               *val = etm_read(ETMACTR6);
+               return 0;
+       case ETMACTRn(7):
+               *val = etm_read(ETMACTR7);
+               return 0;
+       case ETMACTRn(8):
+               *val = etm_read(ETMACTR8);
+               return 0;
+       case ETMACTRn(9):
+               *val = etm_read(ETMACTR9);
+               return 0;
+       case ETMACTRn(10):
+               *val = etm_read(ETMACTR10);
+               return 0;
+       case ETMACTRn(11):
+               *val = etm_read(ETMACTR11);
+               return 0;
+       case ETMACTRn(12):
+               *val = etm_read(ETMACTR12);
+               return 0;
+       case ETMACTRn(13):
+               *val = etm_read(ETMACTR13);
+               return 0;
+       case ETMACTRn(14):
+               *val = etm_read(ETMACTR14);
+               return 0;
+       case ETMACTRn(15):
+               *val = etm_read(ETMACTR15);
+               return 0;
+       case ETMCNTRLDVRn(0):
+               *val = etm_read(ETMCNTRLDVR0);
+               return 0;
+       case ETMCNTRLDVRn(1):
+               *val = etm_read(ETMCNTRLDVR1);
+               return 0;
+       case ETMCNTRLDVRn(2):
+               *val = etm_read(ETMCNTRLDVR2);
+               return 0;
+       case ETMCNTRLDVRn(3):
+               *val = etm_read(ETMCNTRLDVR3);
+               return 0;
+       case ETMCNTENRn(0):
+               *val = etm_read(ETMCNTENR0);
+               return 0;
+       case ETMCNTENRn(1):
+               *val = etm_read(ETMCNTENR1);
+               return 0;
+       case ETMCNTENRn(2):
+               *val = etm_read(ETMCNTENR2);
+               return 0;
+       case ETMCNTENRn(3):
+               *val = etm_read(ETMCNTENR3);
+               return 0;
+       case ETMCNTRLDEVRn(0):
+               *val = etm_read(ETMCNTRLDEVR0);
+               return 0;
+       case ETMCNTRLDEVRn(1):
+               *val = etm_read(ETMCNTRLDEVR1);
+               return 0;
+       case ETMCNTRLDEVRn(2):
+               *val = etm_read(ETMCNTRLDEVR2);
+               return 0;
+       case ETMCNTRLDEVRn(3):
+               *val = etm_read(ETMCNTRLDEVR3);
+               return 0;
+       case ETMCNTVRn(0):
+               *val = etm_read(ETMCNTVR0);
+               return 0;
+       case ETMCNTVRn(1):
+               *val = etm_read(ETMCNTVR1);
+               return 0;
+       case ETMCNTVRn(2):
+               *val = etm_read(ETMCNTVR2);
+               return 0;
+       case ETMCNTVRn(3):
+               *val = etm_read(ETMCNTVR3);
+               return 0;
+       case ETMSQ12EVR:
+               *val = etm_read(ETMSQ12EVR);
+               return 0;
+       case ETMSQ21EVR:
+               *val = etm_read(ETMSQ21EVR);
+               return 0;
+       case ETMSQ23EVR:
+               *val = etm_read(ETMSQ23EVR);
+               return 0;
+       case ETMSQ31EVR:
+               *val = etm_read(ETMSQ31EVR);
+               return 0;
+       case ETMSQ32EVR:
+               *val = etm_read(ETMSQ32EVR);
+               return 0;
+       case ETMSQ13EVR:
+               *val = etm_read(ETMSQ13EVR);
+               return 0;
+       case ETMSQR:
+               *val = etm_read(ETMSQR);
+               return 0;
+       case ETMEXTOUTEVRn(0):
+               *val = etm_read(ETMEXTOUTEVR0);
+               return 0;
+       case ETMEXTOUTEVRn(1):
+               *val = etm_read(ETMEXTOUTEVR1);
+               return 0;
+       case ETMEXTOUTEVRn(2):
+               *val = etm_read(ETMEXTOUTEVR2);
+               return 0;
+       case ETMEXTOUTEVRn(3):
+               *val = etm_read(ETMEXTOUTEVR3);
+               return 0;
+       case ETMCIDCVRn(0):
+               *val = etm_read(ETMCIDCVR0);
+               return 0;
+       case ETMCIDCVRn(1):
+               *val = etm_read(ETMCIDCVR1);
+               return 0;
+       case ETMCIDCVRn(2):
+               *val = etm_read(ETMCIDCVR2);
+               return 0;
+       case ETMCIDCMR:
+               *val = etm_read(ETMCIDCMR);
+               return 0;
+       case ETMIMPSPEC0:
+               *val = etm_read(ETMIMPSPEC0);
+               return 0;
+       case ETMIMPSPEC1:
+               *val = etm_read(ETMIMPSPEC1);
+               return 0;
+       case ETMIMPSPEC2:
+               *val = etm_read(ETMIMPSPEC2);
+               return 0;
+       case ETMIMPSPEC3:
+               *val = etm_read(ETMIMPSPEC3);
+               return 0;
+       case ETMIMPSPEC4:
+               *val = etm_read(ETMIMPSPEC4);
+               return 0;
+       case ETMIMPSPEC5:
+               *val = etm_read(ETMIMPSPEC5);
+               return 0;
+       case ETMIMPSPEC6:
+               *val = etm_read(ETMIMPSPEC6);
+               return 0;
+       case ETMIMPSPEC7:
+               *val = etm_read(ETMIMPSPEC7);
+               return 0;
+       case ETMSYNCFR:
+               *val = etm_read(ETMSYNCFR);
+               return 0;
+       case ETMIDR:
+               *val = etm_read(ETMIDR);
+               return 0;
+       case ETMCCER:
+               *val = etm_read(ETMCCER);
+               return 0;
+       case ETMEXTINSELR:
+               *val = etm_read(ETMEXTINSELR);
+               return 0;
+       case ETMTESSEICR:
+               *val = etm_read(ETMTESSEICR);
+               return 0;
+       case ETMEIBCR:
+               *val = etm_read(ETMEIBCR);
+               return 0;
+       case ETMTSEVR:
+               *val = etm_read(ETMTSEVR);
+               return 0;
+       case ETMAUXCR:
+               *val = etm_read(ETMAUXCR);
+               return 0;
+       case ETMTRACEIDR:
+               *val = etm_read(ETMTRACEIDR);
+               return 0;
+       case ETMVMIDCVR:
+               *val = etm_read(ETMVMIDCVR);
+               return 0;
+       case ETMOSLSR:
+               *val = etm_read(ETMOSLSR);
+               return 0;
+       case ETMOSSRR:
+               *val = etm_read(ETMOSSRR);
+               return 0;
+       case ETMPDCR:
+               *val = etm_read(ETMPDCR);
+               return 0;
+       case ETMPDSR:
+               *val = etm_read(ETMPDSR);
+               return 0;
+       default:
+               *val = 0;
+               return -EINVAL;
+       }
+}
+
+int etm_writel_cp14(u32 reg, u32 val)
+{
+       switch (reg) {
+       case ETMCR:
+               etm_write(val, ETMCR);
+               break;
+       case ETMTRIGGER:
+               etm_write(val, ETMTRIGGER);
+               break;
+       case ETMSR:
+               etm_write(val, ETMSR);
+               break;
+       case ETMTSSCR:
+               etm_write(val, ETMTSSCR);
+               break;
+       case ETMTEEVR:
+               etm_write(val, ETMTEEVR);
+               break;
+       case ETMTECR1:
+               etm_write(val, ETMTECR1);
+               break;
+       case ETMFFLR:
+               etm_write(val, ETMFFLR);
+               break;
+       case ETMACVRn(0):
+               etm_write(val, ETMACVR0);
+               break;
+       case ETMACVRn(1):
+               etm_write(val, ETMACVR1);
+               break;
+       case ETMACVRn(2):
+               etm_write(val, ETMACVR2);
+               break;
+       case ETMACVRn(3):
+               etm_write(val, ETMACVR3);
+               break;
+       case ETMACVRn(4):
+               etm_write(val, ETMACVR4);
+               break;
+       case ETMACVRn(5):
+               etm_write(val, ETMACVR5);
+               break;
+       case ETMACVRn(6):
+               etm_write(val, ETMACVR6);
+               break;
+       case ETMACVRn(7):
+               etm_write(val, ETMACVR7);
+               break;
+       case ETMACVRn(8):
+               etm_write(val, ETMACVR8);
+               break;
+       case ETMACVRn(9):
+               etm_write(val, ETMACVR9);
+               break;
+       case ETMACVRn(10):
+               etm_write(val, ETMACVR10);
+               break;
+       case ETMACVRn(11):
+               etm_write(val, ETMACVR11);
+               break;
+       case ETMACVRn(12):
+               etm_write(val, ETMACVR12);
+               break;
+       case ETMACVRn(13):
+               etm_write(val, ETMACVR13);
+               break;
+       case ETMACVRn(14):
+               etm_write(val, ETMACVR14);
+               break;
+       case ETMACVRn(15):
+               etm_write(val, ETMACVR15);
+               break;
+       case ETMACTRn(0):
+               etm_write(val, ETMACTR0);
+               break;
+       case ETMACTRn(1):
+               etm_write(val, ETMACTR1);
+               break;
+       case ETMACTRn(2):
+               etm_write(val, ETMACTR2);
+               break;
+       case ETMACTRn(3):
+               etm_write(val, ETMACTR3);
+               break;
+       case ETMACTRn(4):
+               etm_write(val, ETMACTR4);
+               break;
+       case ETMACTRn(5):
+               etm_write(val, ETMACTR5);
+               break;
+       case ETMACTRn(6):
+               etm_write(val, ETMACTR6);
+               break;
+       case ETMACTRn(7):
+               etm_write(val, ETMACTR7);
+               break;
+       case ETMACTRn(8):
+               etm_write(val, ETMACTR8);
+               break;
+       case ETMACTRn(9):
+               etm_write(val, ETMACTR9);
+               break;
+       case ETMACTRn(10):
+               etm_write(val, ETMACTR10);
+               break;
+       case ETMACTRn(11):
+               etm_write(val, ETMACTR11);
+               break;
+       case ETMACTRn(12):
+               etm_write(val, ETMACTR12);
+               break;
+       case ETMACTRn(13):
+               etm_write(val, ETMACTR13);
+               break;
+       case ETMACTRn(14):
+               etm_write(val, ETMACTR14);
+               break;
+       case ETMACTRn(15):
+               etm_write(val, ETMACTR15);
+               break;
+       case ETMCNTRLDVRn(0):
+               etm_write(val, ETMCNTRLDVR0);
+               break;
+       case ETMCNTRLDVRn(1):
+               etm_write(val, ETMCNTRLDVR1);
+               break;
+       case ETMCNTRLDVRn(2):
+               etm_write(val, ETMCNTRLDVR2);
+               break;
+       case ETMCNTRLDVRn(3):
+               etm_write(val, ETMCNTRLDVR3);
+               break;
+       case ETMCNTENRn(0):
+               etm_write(val, ETMCNTENR0);
+               break;
+       case ETMCNTENRn(1):
+               etm_write(val, ETMCNTENR1);
+               break;
+       case ETMCNTENRn(2):
+               etm_write(val, ETMCNTENR2);
+               break;
+       case ETMCNTENRn(3):
+               etm_write(val, ETMCNTENR3);
+               break;
+       case ETMCNTRLDEVRn(0):
+               etm_write(val, ETMCNTRLDEVR0);
+               break;
+       case ETMCNTRLDEVRn(1):
+               etm_write(val, ETMCNTRLDEVR1);
+               break;
+       case ETMCNTRLDEVRn(2):
+               etm_write(val, ETMCNTRLDEVR2);
+               break;
+       case ETMCNTRLDEVRn(3):
+               etm_write(val, ETMCNTRLDEVR3);
+               break;
+       case ETMCNTVRn(0):
+               etm_write(val, ETMCNTVR0);
+               break;
+       case ETMCNTVRn(1):
+               etm_write(val, ETMCNTVR1);
+               break;
+       case ETMCNTVRn(2):
+               etm_write(val, ETMCNTVR2);
+               break;
+       case ETMCNTVRn(3):
+               etm_write(val, ETMCNTVR3);
+               break;
+       case ETMSQ12EVR:
+               etm_write(val, ETMSQ12EVR);
+               break;
+       case ETMSQ21EVR:
+               etm_write(val, ETMSQ21EVR);
+               break;
+       case ETMSQ23EVR:
+               etm_write(val, ETMSQ23EVR);
+               break;
+       case ETMSQ31EVR:
+               etm_write(val, ETMSQ31EVR);
+               break;
+       case ETMSQ32EVR:
+               etm_write(val, ETMSQ32EVR);
+               break;
+       case ETMSQ13EVR:
+               etm_write(val, ETMSQ13EVR);
+               break;
+       case ETMSQR:
+               etm_write(val, ETMSQR);
+               break;
+       case ETMEXTOUTEVRn(0):
+               etm_write(val, ETMEXTOUTEVR0);
+               break;
+       case ETMEXTOUTEVRn(1):
+               etm_write(val, ETMEXTOUTEVR1);
+               break;
+       case ETMEXTOUTEVRn(2):
+               etm_write(val, ETMEXTOUTEVR2);
+               break;
+       case ETMEXTOUTEVRn(3):
+               etm_write(val, ETMEXTOUTEVR3);
+               break;
+       case ETMCIDCVRn(0):
+               etm_write(val, ETMCIDCVR0);
+               break;
+       case ETMCIDCVRn(1):
+               etm_write(val, ETMCIDCVR1);
+               break;
+       case ETMCIDCVRn(2):
+               etm_write(val, ETMCIDCVR2);
+               break;
+       case ETMCIDCMR:
+               etm_write(val, ETMCIDCMR);
+               break;
+       case ETMIMPSPEC0:
+               etm_write(val, ETMIMPSPEC0);
+               break;
+       case ETMIMPSPEC1:
+               etm_write(val, ETMIMPSPEC1);
+               break;
+       case ETMIMPSPEC2:
+               etm_write(val, ETMIMPSPEC2);
+               break;
+       case ETMIMPSPEC3:
+               etm_write(val, ETMIMPSPEC3);
+               break;
+       case ETMIMPSPEC4:
+               etm_write(val, ETMIMPSPEC4);
+               break;
+       case ETMIMPSPEC5:
+               etm_write(val, ETMIMPSPEC5);
+               break;
+       case ETMIMPSPEC6:
+               etm_write(val, ETMIMPSPEC6);
+               break;
+       case ETMIMPSPEC7:
+               etm_write(val, ETMIMPSPEC7);
+               break;
+       case ETMSYNCFR:
+               etm_write(val, ETMSYNCFR);
+               break;
+       case ETMEXTINSELR:
+               etm_write(val, ETMEXTINSELR);
+               break;
+       case ETMTESSEICR:
+               etm_write(val, ETMTESSEICR);
+               break;
+       case ETMEIBCR:
+               etm_write(val, ETMEIBCR);
+               break;
+       case ETMTSEVR:
+               etm_write(val, ETMTSEVR);
+               break;
+       case ETMAUXCR:
+               etm_write(val, ETMAUXCR);
+               break;
+       case ETMTRACEIDR:
+               etm_write(val, ETMTRACEIDR);
+               break;
+       case ETMVMIDCVR:
+               etm_write(val, ETMVMIDCVR);
+               break;
+       case ETMOSLAR:
+               etm_write(val, ETMOSLAR);
+               break;
+       case ETMOSSRR:
+               etm_write(val, ETMOSSRR);
+               break;
+       case ETMPDCR:
+               etm_write(val, ETMPDCR);
+               break;
+       case ETMPDSR:
+               etm_write(val, ETMPDSR);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
diff --git a/drivers/coresight/coresight-etm.h b/drivers/coresight/coresight-etm.h
new file mode 100644 (file)
index 0000000..501c5fa
--- /dev/null
@@ -0,0 +1,251 @@
+/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORESIGHT_CORESIGHT_ETM_H
+#define _CORESIGHT_CORESIGHT_ETM_H
+
+#include <linux/spinlock.h>
+#include "coresight-priv.h"
+
+/*
+ * Device registers:
+ * 0x000 - 0x2FC: Trace         registers
+ * 0x300 - 0x314: Management    registers
+ * 0x318 - 0xEFC: Trace         registers
+ *
+ * Coresight registers
+ * 0xF00 - 0xF9C: Management    registers
+ * 0xFA0 - 0xFA4: Management    registers in PFTv1.0
+ *                Trace         registers in PFTv1.1
+ * 0xFA8 - 0xFFC: Management    registers
+ */
+
+/* Trace registers (0x000-0x2FC) */
+#define ETMCR                  0x000
+#define ETMCCR                 0x004
+#define ETMTRIGGER             0x008
+#define ETMSR                  0x010
+#define ETMSCR                 0x014
+#define ETMTSSCR               0x018
+#define ETMTECR2               0x01c
+#define ETMTEEVR               0x020
+#define ETMTECR1               0x024
+#define ETMFFLR                        0x02c
+#define ETMACVRn(n)            (0x040 + (n * 4))
+#define ETMACTRn(n)            (0x080 + (n * 4))
+#define ETMCNTRLDVRn(n)                (0x140 + (n * 4))
+#define ETMCNTENRn(n)          (0x150 + (n * 4))
+#define ETMCNTRLDEVRn(n)       (0x160 + (n * 4))
+#define ETMCNTVRn(n)           (0x170 + (n * 4))
+#define ETMSQ12EVR             0x180
+#define ETMSQ21EVR             0x184
+#define ETMSQ23EVR             0x188
+#define ETMSQ31EVR             0x18c
+#define ETMSQ32EVR             0x190
+#define ETMSQ13EVR             0x194
+#define ETMSQR                 0x19c
+#define ETMEXTOUTEVRn(n)       (0x1a0 + (n * 4))
+#define ETMCIDCVRn(n)          (0x1b0 + (n * 4))
+#define ETMCIDCMR              0x1bc
+#define ETMIMPSPEC0            0x1c0
+#define ETMIMPSPEC1            0x1c4
+#define ETMIMPSPEC2            0x1c8
+#define ETMIMPSPEC3            0x1cc
+#define ETMIMPSPEC4            0x1d0
+#define ETMIMPSPEC5            0x1d4
+#define ETMIMPSPEC6            0x1d8
+#define ETMIMPSPEC7            0x1dc
+#define ETMSYNCFR              0x1e0
+#define ETMIDR                 0x1e4
+#define ETMCCER                        0x1e8
+#define ETMEXTINSELR           0x1ec
+#define ETMTESSEICR            0x1f0
+#define ETMEIBCR               0x1f4
+#define ETMTSEVR               0x1f8
+#define ETMAUXCR               0x1fc
+#define ETMTRACEIDR            0x200
+#define ETMVMIDCVR             0x240
+/* Management registers (0x300-0x314) */
+#define ETMOSLAR               0x300
+#define ETMOSLSR               0x304
+#define ETMOSSRR               0x308
+#define ETMPDCR                        0x310
+#define ETMPDSR                        0x314
+#define ETM_MAX_ADDR_CMP       16
+#define ETM_MAX_CNTR           4
+#define ETM_MAX_CTXID_CMP      3
+
+/* Register definition */
+/* ETMCR - 0x00 */
+#define ETMCR_PWD_DWN          BIT(0)
+#define ETMCR_STALL_MODE       BIT(7)
+#define ETMCR_ETM_PRG          BIT(10)
+#define ETMCR_ETM_EN           BIT(11)
+#define ETMCR_CYC_ACC          BIT(12)
+#define ETMCR_CTXID_SIZE       (BIT(14)|BIT(15))
+#define ETMCR_TIMESTAMP_EN     BIT(28)
+/* ETMCCR - 0x04 */
+#define ETMCCR_FIFOFULL                BIT(23)
+/* ETMPDCR - 0x310 */
+#define ETMPDCR_PWD_UP         BIT(3)
+/* ETMTECR1 - 0x024 */
+#define ETMTECR1_ADDR_COMP_1   BIT(0)
+#define ETMTECR1_INC_EXC       BIT(24)
+#define ETMTECR1_START_STOP    BIT(25)
+/* ETMCCER - 0x1E8 */
+#define ETMCCER_TIMESTAMP      BIT(22)
+
+#define ETM_MODE_EXCLUDE       BIT(0)
+#define ETM_MODE_CYCACC                BIT(1)
+#define ETM_MODE_STALL         BIT(2)
+#define ETM_MODE_TIMESTAMP     BIT(3)
+#define ETM_MODE_CTXID         BIT(4)
+#define ETM_MODE_ALL           0x1f
+
+#define ETM_SQR_MASK           0x3
+#define ETM_TRACEID_MASK       0x3f
+#define ETM_EVENT_MASK         0x1ffff
+#define ETM_SYNC_MASK          0xfff
+#define ETM_ALL_MASK           0xffffffff
+
+#define ETMSR_PROG_BIT         1
+#define ETM_SEQ_STATE_MAX_VAL  (0x2)
+#define PORT_SIZE_MASK         (GENMASK(21, 21) | GENMASK(6, 4))
+
+#define ETM_HARD_WIRE_RES_A    /* Hard wired, always true */   \
+                               ((0x0f << 0)    |               \
+                               /* Resource index A */          \
+                               (0x06 << 4))
+
+#define ETM_ADD_COMP_0         /* Single addr comparator 1 */  \
+                               ((0x00 << 7)    |               \
+                               /* Resource index B */          \
+                               (0x00 << 11))
+
+#define ETM_EVENT_NOT_A                BIT(14) /* NOT(A) */
+
+#define ETM_DEFAULT_EVENT_VAL  (ETM_HARD_WIRE_RES_A    |       \
+                                ETM_ADD_COMP_0         |       \
+                                ETM_EVENT_NOT_A)
+/**
+ * struct etm_drvdata - specifics associated to an ETM component
+ * @base:      memory mapped base address for this component.
+ * @dev:       the device entity associated to this component.
+ * @csdev:     component vitals needed by the framework.
+ * @clk:       the clock this component is associated to.
+ * @spinlock:  only one at a time pls.
+ * @cpu:       the cpu this component is affined to.
+ * @port_size: port size as reported by ETMCR bit 4-6 and 21.
+ * @arch:      ETM/PTM version number.
+ * @use_cpu14: true if management registers need to be accessed via CP14.
+ * @enable:    is this ETM/PTM currently tracing.
+ * @sticky_enable: true if ETM base configuration has been done.
+ * @boot_enable:true if we should start tracing at boot time.
+ * @os_unlock: true if access to management registers is allowed.
+ * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
+ * @nr_cntr:   Number of counters as found in ETMCCR bit 13-15.
+ * @nr_ext_inp:        Number of external input as found in ETMCCR bit 17-19.
+ * @nr_ext_out:        Number of external output as found in ETMCCR bit 20-22.
+ * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
+ * @etmccr:    value of register ETMCCR.
+ * @etmccer:   value of register ETMCCER.
+ * @traceid:   value of the current ID for this component.
+ * @mode:      controls various modes supported by this ETM/PTM.
+ * @ctrl:      used in conjunction with @mode.
+ * @trigger_event: setting for register ETMTRIGGER.
+ * @startstop_ctrl: setting for register ETMTSSCR.
+ * @enable_event: setting for register ETMTEEVR.
+ * @enable_ctrl1: setting for register ETMTECR1.
+ * @fifofull_level: setting for register ETMFFLR.
+ * @addr_idx:  index for the address comparator selection.
+ * @addr_val:  value for address comparator register.
+ * @addr_acctype: access type for address comparator register.
+ * @addr_type: current status of the comparator register.
+ * @cntr_idx:  index for the counter register selection.
+ * @cntr_rld_val: reload value of a counter register.
+ * @cntr_event:        control for counter enable register.
+ * @cntr_rld_event: value for counter reload event register.
+ * @cntr_val:  counter value register.
+ * @seq_12_event: event causing the transition from 1 to 2.
+ * @seq_21_event: event causing the transition from 2 to 1.
+ * @seq_23_event: event causing the transition from 2 to 3.
+ * @seq_31_event: event causing the transition from 3 to 1.
+ * @seq_32_event: event causing the transition from 3 to 2.
+ * @seq_13_event: event causing the transition from 1 to 3.
+ * @seq_curr_state: current value of the sequencer register.
+ * @ctxid_idx: index for the context ID registers.
+ * @ctxid_val: value for the context ID to trigger on.
+ * @ctxid_mask: mask applicable to all the context IDs.
+ * @sync_freq: Synchronisation frequency.
+ * @timestamp_event: Defines an event that requests the insertion
+                    of a timestamp into the trace stream.
+ */
+struct etm_drvdata {
+       void __iomem                    *base;
+       struct device                   *dev;
+       struct coresight_device         *csdev;
+       struct clk                      *clk;
+       spinlock_t                      spinlock;
+       int                             cpu;
+       int                             port_size;
+       u8                              arch;
+       bool                            use_cp14;
+       bool                            enable;
+       bool                            sticky_enable;
+       bool                            boot_enable;
+       bool                            os_unlock;
+       u8                              nr_addr_cmp;
+       u8                              nr_cntr;
+       u8                              nr_ext_inp;
+       u8                              nr_ext_out;
+       u8                              nr_ctxid_cmp;
+       u32                             etmccr;
+       u32                             etmccer;
+       u32                             traceid;
+       u32                             mode;
+       u32                             ctrl;
+       u32                             trigger_event;
+       u32                             startstop_ctrl;
+       u32                             enable_event;
+       u32                             enable_ctrl1;
+       u32                             fifofull_level;
+       u8                              addr_idx;
+       u32                             addr_val[ETM_MAX_ADDR_CMP];
+       u32                             addr_acctype[ETM_MAX_ADDR_CMP];
+       u32                             addr_type[ETM_MAX_ADDR_CMP];
+       u8                              cntr_idx;
+       u32                             cntr_rld_val[ETM_MAX_CNTR];
+       u32                             cntr_event[ETM_MAX_CNTR];
+       u32                             cntr_rld_event[ETM_MAX_CNTR];
+       u32                             cntr_val[ETM_MAX_CNTR];
+       u32                             seq_12_event;
+       u32                             seq_21_event;
+       u32                             seq_23_event;
+       u32                             seq_31_event;
+       u32                             seq_32_event;
+       u32                             seq_13_event;
+       u32                             seq_curr_state;
+       u8                              ctxid_idx;
+       u32                             ctxid_val[ETM_MAX_CTXID_CMP];
+       u32                             ctxid_mask;
+       u32                             sync_freq;
+       u32                             timestamp_event;
+};
+
+enum etm_addr_type {
+       ETM_ADDR_TYPE_NONE,
+       ETM_ADDR_TYPE_SINGLE,
+       ETM_ADDR_TYPE_RANGE,
+       ETM_ADDR_TYPE_START,
+       ETM_ADDR_TYPE_STOP,
+};
+#endif
diff --git a/drivers/coresight/coresight-etm3x.c b/drivers/coresight/coresight-etm3x.c
new file mode 100644 (file)
index 0000000..d9e3ed6
--- /dev/null
@@ -0,0 +1,1928 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/sysfs.h>
+#include <linux/stat.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+#include <linux/amba/bus.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <asm/sections.h>
+
+#include "coresight-etm.h"
+
+#ifdef CONFIG_CORESIGHT_SOURCE_ETM_DEFAULT_ENABLE
+static int boot_enable = 1;
+#else
+static int boot_enable;
+#endif
+module_param_named(
+       boot_enable, boot_enable, int, S_IRUGO
+);
+
+/* The number of ETM/PTM currently registered */
+static int etm_count;
+static struct etm_drvdata *etmdrvdata[NR_CPUS];
+
+static inline void etm_writel(struct etm_drvdata *drvdata,
+                             u32 val, u32 off)
+{
+       if (drvdata->use_cp14) {
+               if (etm_writel_cp14(off, val)) {
+                       dev_err(drvdata->dev,
+                               "invalid CP14 access to ETM reg: %#x", off);
+               }
+       } else {
+               writel_relaxed(val, drvdata->base + off);
+       }
+}
+
+static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
+{
+       u32 val;
+
+       if (drvdata->use_cp14) {
+               if (etm_readl_cp14(off, &val)) {
+                       dev_err(drvdata->dev,
+                               "invalid CP14 access to ETM reg: %#x", off);
+               }
+       } else {
+               val = readl_relaxed(drvdata->base + off);
+       }
+
+       return val;
+}
+
+/*
+ * Memory mapped writes to clear os lock are not supported on some processors
+ * and OS lock must be unlocked before any memory mapped access on such
+ * processors, otherwise memory mapped reads/writes will be invalid.
+ */
+static void etm_os_unlock(void *info)
+{
+       struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
+       /* Writing any value to ETMOSLAR unlocks the trace registers */
+       etm_writel(drvdata, 0x0, ETMOSLAR);
+       isb();
+}
+
+static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
+{
+       u32 etmcr;
+
+       /* Ensure pending cp14 accesses complete before setting pwrdwn */
+       mb();
+       isb();
+       etmcr = etm_readl(drvdata, ETMCR);
+       etmcr |= ETMCR_PWD_DWN;
+       etm_writel(drvdata, etmcr, ETMCR);
+}
+
+static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
+{
+       u32 etmcr;
+
+       etmcr = etm_readl(drvdata, ETMCR);
+       etmcr &= ~ETMCR_PWD_DWN;
+       etm_writel(drvdata, etmcr, ETMCR);
+       /* Ensure pwrup completes before subsequent cp14 accesses */
+       mb();
+       isb();
+}
+
+static void etm_set_pwrup(struct etm_drvdata *drvdata)
+{
+       u32 etmpdcr;
+
+       etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
+       etmpdcr |= ETMPDCR_PWD_UP;
+       writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
+       /* Ensure pwrup completes before subsequent cp14 accesses */
+       mb();
+       isb();
+}
+
+static void etm_clr_pwrup(struct etm_drvdata *drvdata)
+{
+       u32 etmpdcr;
+
+       /* Ensure pending cp14 accesses complete before clearing pwrup */
+       mb();
+       isb();
+       etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
+       etmpdcr &= ~ETMPDCR_PWD_UP;
+       writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
+}
+
+/**
+ * coresight_timeout_etm - loop until a bit has changed to a specific state.
+ * @drvdata: etm's private data structure.
+ * @offset: address of a register, starting from @addr.
+ * @position: the position of the bit of interest.
+ * @value: the value the bit should have.
+ *
+ * Basically the same as @coresight_timeout except for the register access
+ * method where we have to account for CP14 configurations.
+
+ * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
+ * TIMEOUT_US has elapsed, which ever happens first.
+ */
+
+static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
+                                 int position, int value)
+{
+       int i;
+       u32 val;
+
+       for (i = TIMEOUT_US; i > 0; i--) {
+               val = etm_readl(drvdata, offset);
+               /* Waiting on the bit to go from 0 to 1 */
+               if (value) {
+                       if (val & BIT(position))
+                               return 0;
+               /* Waiting on the bit to go from 1 to 0 */
+               } else {
+                       if (!(val & BIT(position)))
+                               return 0;
+               }
+
+               /*
+                * Delay is arbitrary - the specification doesn't say how long
+                * we are expected to wait.  Extra check required to make sure
+                * we don't wait needlessly on the last iteration.
+                */
+               if (i - 1)
+                       udelay(1);
+       }
+
+       return -EAGAIN;
+}
+
+
+static void etm_set_prog(struct etm_drvdata *drvdata)
+{
+       u32 etmcr;
+
+       etmcr = etm_readl(drvdata, ETMCR);
+       etmcr |= ETMCR_ETM_PRG;
+       etm_writel(drvdata, etmcr, ETMCR);
+       /*
+        * Recommended by spec for cp14 accesses to ensure etmcr write is
+        * complete before polling etmsr
+        */
+       isb();
+       if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
+               dev_err(drvdata->dev,
+                       "timeout observed when probing at offset %#x\n", ETMSR);
+       }
+}
+
+static void etm_clr_prog(struct etm_drvdata *drvdata)
+{
+       u32 etmcr;
+
+       etmcr = etm_readl(drvdata, ETMCR);
+       etmcr &= ~ETMCR_ETM_PRG;
+       etm_writel(drvdata, etmcr, ETMCR);
+       /*
+        * Recommended by spec for cp14 accesses to ensure etmcr write is
+        * complete before polling etmsr
+        */
+       isb();
+       if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
+               dev_err(drvdata->dev,
+                       "timeout observed when probing at offset %#x\n", ETMSR);
+       }
+}
+
+static void etm_set_default(struct etm_drvdata *drvdata)
+{
+       int i;
+
+       drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
+       drvdata->enable_event = ETM_HARD_WIRE_RES_A;
+
+       drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
+       drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
+       drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
+       drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
+       drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
+       drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
+       drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
+
+       for (i = 0; i < drvdata->nr_cntr; i++) {
+               drvdata->cntr_rld_val[i] = 0x0;
+               drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
+               drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
+               drvdata->cntr_val[i] = 0x0;
+       }
+
+       drvdata->seq_curr_state = 0x0;
+       drvdata->ctxid_idx = 0x0;
+       for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
+               drvdata->ctxid_val[i] = 0x0;
+       drvdata->ctxid_mask = 0x0;
+}
+
+static void etm_enable_hw(void *info)
+{
+       int i;
+       u32 etmcr;
+       struct etm_drvdata *drvdata = info;
+
+       CS_UNLOCK(drvdata->base);
+
+       /* Turn engine on */
+       etm_clr_pwrdwn(drvdata);
+       /* Apply power to trace registers */
+       etm_set_pwrup(drvdata);
+       /* Make sure all registers are accessible */
+       etm_os_unlock(drvdata);
+
+       etm_set_prog(drvdata);
+
+       etmcr = etm_readl(drvdata, ETMCR);
+       etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
+       etmcr |= drvdata->port_size;
+       etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
+       etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
+       etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
+       etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
+       etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
+       etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
+       for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+               etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
+               etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
+       }
+       for (i = 0; i < drvdata->nr_cntr; i++) {
+               etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
+               etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
+               etm_writel(drvdata, drvdata->cntr_rld_event[i],
+                          ETMCNTRLDEVRn(i));
+               etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
+       }
+       etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
+       etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
+       etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
+       etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
+       etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
+       etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
+       etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
+       for (i = 0; i < drvdata->nr_ext_out; i++)
+               etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
+       for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
+               etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i));
+       etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
+       etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
+       /* No external input selected */
+       etm_writel(drvdata, 0x0, ETMEXTINSELR);
+       etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
+       /* No auxiliary control selected */
+       etm_writel(drvdata, 0x0, ETMAUXCR);
+       etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
+       /* No VMID comparator value selected */
+       etm_writel(drvdata, 0x0, ETMVMIDCVR);
+
+       /* Ensures trace output is enabled from this ETM */
+       etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
+
+       etm_clr_prog(drvdata);
+       CS_LOCK(drvdata->base);
+
+       dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
+}
+
+static int etm_trace_id_simple(struct etm_drvdata *drvdata)
+{
+       if (!drvdata->enable)
+               return drvdata->traceid;
+
+       return (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
+}
+
+static int etm_trace_id(struct coresight_device *csdev)
+{
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+       unsigned long flags;
+       int trace_id = -1;
+
+       if (!drvdata->enable)
+               return drvdata->traceid;
+
+       if (clk_prepare_enable(drvdata->clk))
+               goto out;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+
+       CS_UNLOCK(drvdata->base);
+       trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
+       CS_LOCK(drvdata->base);
+
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+       clk_disable_unprepare(drvdata->clk);
+out:
+       return trace_id;
+}
+
+static int etm_enable(struct coresight_device *csdev)
+{
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+       int ret;
+
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               goto err_clk;
+
+       spin_lock(&drvdata->spinlock);
+
+       /*
+        * Configure the ETM only if the CPU is online.  If it isn't online
+        * hw configuration will take place when 'CPU_STARTING' is received
+        * in @etm_cpu_callback.
+        */
+       if (cpu_online(drvdata->cpu)) {
+               ret = smp_call_function_single(drvdata->cpu,
+                                              etm_enable_hw, drvdata, 1);
+               if (ret)
+                       goto err;
+       }
+
+       drvdata->enable = true;
+       drvdata->sticky_enable = true;
+
+       spin_unlock(&drvdata->spinlock);
+
+       dev_info(drvdata->dev, "ETM tracing enabled\n");
+       return 0;
+err:
+       spin_unlock(&drvdata->spinlock);
+       clk_disable_unprepare(drvdata->clk);
+err_clk:
+       return ret;
+}
+
+static void etm_disable_hw(void *info)
+{
+       int i;
+       struct etm_drvdata *drvdata = info;
+
+       CS_UNLOCK(drvdata->base);
+       etm_set_prog(drvdata);
+
+       /* Program trace enable to low by using always false event */
+       etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
+
+       /* Read back sequencer and counters for post trace analysis */
+       drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+
+       for (i = 0; i < drvdata->nr_cntr; i++)
+               drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
+
+       etm_set_pwrdwn(drvdata);
+       CS_LOCK(drvdata->base);
+
+       dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
+}
+
+static void etm_disable(struct coresight_device *csdev)
+{
+       struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       /*
+        * Taking hotplug lock here protects from clocks getting disabled
+        * with tracing being left on (crash scenario) if user disable occurs
+        * after cpu online mask indicates the cpu is offline but before the
+        * DYING hotplug callback is serviced by the ETM driver.
+        */
+       get_online_cpus();
+       spin_lock(&drvdata->spinlock);
+
+       /*
+        * Executing etm_disable_hw on the cpu whose ETM is being disabled
+        * ensures that register writes occur when cpu is powered.
+        */
+       smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
+       drvdata->enable = false;
+
+       spin_unlock(&drvdata->spinlock);
+       put_online_cpus();
+
+       clk_disable_unprepare(drvdata->clk);
+
+       dev_info(drvdata->dev, "ETM tracing disabled\n");
+}
+
+static const struct coresight_ops_source etm_source_ops = {
+       .trace_id       = etm_trace_id,
+       .enable         = etm_enable,
+       .disable        = etm_disable,
+};
+
+static const struct coresight_ops etm_cs_ops = {
+       .source_ops     = &etm_source_ops,
+};
+
+static ssize_t nr_addr_cmp_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_addr_cmp;
+       return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_addr_cmp);
+
+static ssize_t nr_cntr_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{      unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_cntr;
+       return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_cntr);
+
+static ssize_t nr_ctxid_cmp_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->nr_ctxid_cmp;
+       return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(nr_ctxid_cmp);
+
+static ssize_t etmsr_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
+{
+       int ret;
+       unsigned long flags, val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       CS_UNLOCK(drvdata->base);
+
+       val = etm_readl(drvdata, ETMSR);
+
+       CS_LOCK(drvdata->base);
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+       clk_disable_unprepare(drvdata->clk);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+static DEVICE_ATTR_RO(etmsr);
+
+static ssize_t reset_store(struct device *dev,
+                          struct device_attribute *attr,
+                          const char *buf, size_t size)
+{
+       int i, ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val) {
+               spin_lock(&drvdata->spinlock);
+               drvdata->mode = ETM_MODE_EXCLUDE;
+               drvdata->ctrl = 0x0;
+               drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
+               drvdata->startstop_ctrl = 0x0;
+               drvdata->addr_idx = 0x0;
+               for (i = 0; i < drvdata->nr_addr_cmp; i++) {
+                       drvdata->addr_val[i] = 0x0;
+                       drvdata->addr_acctype[i] = 0x0;
+                       drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
+               }
+               drvdata->cntr_idx = 0x0;
+
+               etm_set_default(drvdata);
+               spin_unlock(&drvdata->spinlock);
+       }
+
+       return size;
+}
+static DEVICE_ATTR_WO(reset);
+
+static ssize_t mode_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->mode;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t mode_store(struct device *dev,
+                         struct device_attribute *attr,
+                         const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->mode = val & ETM_MODE_ALL;
+
+       if (drvdata->mode & ETM_MODE_EXCLUDE)
+               drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
+       else
+               drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
+
+       if (drvdata->mode & ETM_MODE_CYCACC)
+               drvdata->ctrl |= ETMCR_CYC_ACC;
+       else
+               drvdata->ctrl &= ~ETMCR_CYC_ACC;
+
+       if (drvdata->mode & ETM_MODE_STALL) {
+               if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
+                       dev_warn(drvdata->dev, "stall mode not supported\n");
+                       return -EINVAL;
+               }
+               drvdata->ctrl |= ETMCR_STALL_MODE;
+        } else
+               drvdata->ctrl &= ~ETMCR_STALL_MODE;
+
+       if (drvdata->mode & ETM_MODE_TIMESTAMP) {
+               if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
+                       dev_warn(drvdata->dev, "timestamp not supported\n");
+                       return -EINVAL;
+               }
+               drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
+       } else
+               drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
+
+       if (drvdata->mode & ETM_MODE_CTXID)
+               drvdata->ctrl |= ETMCR_CTXID_SIZE;
+       else
+               drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(mode);
+
+static ssize_t trigger_event_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->trigger_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t trigger_event_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->trigger_event = val & ETM_EVENT_MASK;
+
+       return size;
+}
+static DEVICE_ATTR_RW(trigger_event);
+
+static ssize_t enable_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->enable_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t enable_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->enable_event = val & ETM_EVENT_MASK;
+
+       return size;
+}
+static DEVICE_ATTR_RW(enable_event);
+
+static ssize_t fifofull_level_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->fifofull_level;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t fifofull_level_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->fifofull_level = val;
+
+       return size;
+}
+static DEVICE_ATTR_RW(fifofull_level);
+
+static ssize_t addr_idx_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->addr_idx;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_idx_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val >= drvdata->nr_addr_cmp)
+               return -EINVAL;
+
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       drvdata->addr_idx = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_idx);
+
+static ssize_t addr_single_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       spin_lock(&drvdata->spinlock);
+       idx = drvdata->addr_idx;
+       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EINVAL;
+       }
+
+       val = drvdata->addr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_single_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t size)
+{
+       u8 idx;
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       idx = drvdata->addr_idx;
+       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EINVAL;
+       }
+
+       drvdata->addr_val[idx] = val;
+       drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_single);
+
+static ssize_t addr_range_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       u8 idx;
+       unsigned long val1, val2;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       spin_lock(&drvdata->spinlock);
+       idx = drvdata->addr_idx;
+       if (idx % 2 != 0) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+       if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+             (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       val1 = drvdata->addr_val[idx];
+       val2 = drvdata->addr_val[idx + 1];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx %#lx\n", val1, val2);
+}
+
+static ssize_t addr_range_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       u8 idx;
+       unsigned long val1, val2;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
+               return -EINVAL;
+       /* Lower address comparator cannot have a higher address value */
+       if (val1 > val2)
+               return -EINVAL;
+
+       spin_lock(&drvdata->spinlock);
+       idx = drvdata->addr_idx;
+       if (idx % 2 != 0) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+       if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
+              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
+             (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
+              drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       drvdata->addr_val[idx] = val1;
+       drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
+       drvdata->addr_val[idx + 1] = val2;
+       drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
+       drvdata->enable_ctrl1 |= (1 << (idx/2));
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_range);
+
+static ssize_t addr_start_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       spin_lock(&drvdata->spinlock);
+       idx = drvdata->addr_idx;
+       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       val = drvdata->addr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_start_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       u8 idx;
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       idx = drvdata->addr_idx;
+       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       drvdata->addr_val[idx] = val;
+       drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
+       drvdata->startstop_ctrl |= (1 << idx);
+       drvdata->enable_ctrl1 |= BIT(25);
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_start);
+
+static ssize_t addr_stop_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       u8 idx;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       spin_lock(&drvdata->spinlock);
+       idx = drvdata->addr_idx;
+       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       val = drvdata->addr_val[idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_stop_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       u8 idx;
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       idx = drvdata->addr_idx;
+       if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
+             drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
+               spin_unlock(&drvdata->spinlock);
+               return -EPERM;
+       }
+
+       drvdata->addr_val[idx] = val;
+       drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
+       drvdata->startstop_ctrl |= (1 << (idx + 16));
+       drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_stop);
+
+static ssize_t addr_acctype_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       spin_lock(&drvdata->spinlock);
+       val = drvdata->addr_acctype[drvdata->addr_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t addr_acctype_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->addr_acctype[drvdata->addr_idx] = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(addr_acctype);
+
+static ssize_t cntr_idx_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->cntr_idx;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_idx_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val >= drvdata->nr_cntr)
+               return -EINVAL;
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       drvdata->cntr_idx = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_idx);
+
+static ssize_t cntr_rld_val_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       spin_lock(&drvdata->spinlock);
+       val = drvdata->cntr_rld_val[drvdata->cntr_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_val_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_val);
+
+static ssize_t cntr_event_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       spin_lock(&drvdata->spinlock);
+       val = drvdata->cntr_event[drvdata->cntr_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_event_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_event);
+
+static ssize_t cntr_rld_event_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       spin_lock(&drvdata->spinlock);
+       val = drvdata->cntr_rld_event[drvdata->cntr_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t cntr_rld_event_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_rld_event);
+
+static ssize_t cntr_val_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       int i, ret = 0;
+       u32 val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       if (!drvdata->enable) {
+               spin_lock(&drvdata->spinlock);
+               for (i = 0; i < drvdata->nr_cntr; i++)
+                       ret += sprintf(buf, "counter %d: %x\n",
+                                      i, drvdata->cntr_val[i]);
+               spin_unlock(&drvdata->spinlock);
+               return ret;
+       }
+
+       for (i = 0; i < drvdata->nr_cntr; i++) {
+               val = etm_readl(drvdata, ETMCNTVRn(i));
+               ret += sprintf(buf, "counter %d: %x\n", i, val);
+       }
+
+       return ret;
+}
+
+static ssize_t cntr_val_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->cntr_val[drvdata->cntr_idx] = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(cntr_val);
+
+static ssize_t seq_12_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->seq_12_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_12_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->seq_12_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_12_event);
+
+static ssize_t seq_21_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->seq_21_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_21_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->seq_21_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_21_event);
+
+static ssize_t seq_23_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->seq_23_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_23_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->seq_23_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_23_event);
+
+static ssize_t seq_31_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->seq_31_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_31_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->seq_31_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_31_event);
+
+static ssize_t seq_32_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->seq_32_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_32_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->seq_32_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_32_event);
+
+static ssize_t seq_13_event_show(struct device *dev,
+                                struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->seq_13_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_13_event_store(struct device *dev,
+                                 struct device_attribute *attr,
+                                 const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->seq_13_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(seq_13_event);
+
+static ssize_t seq_curr_state_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       int ret;
+       unsigned long val, flags;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       if (!drvdata->enable) {
+               val = drvdata->seq_curr_state;
+               goto out;
+       }
+
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+
+       CS_UNLOCK(drvdata->base);
+       val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
+       CS_LOCK(drvdata->base);
+
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+       clk_disable_unprepare(drvdata->clk);
+out:
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t seq_curr_state_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val > ETM_SEQ_STATE_MAX_VAL)
+               return -EINVAL;
+
+       drvdata->seq_curr_state = val;
+
+       return size;
+}
+static DEVICE_ATTR_RW(seq_curr_state);
+
+static ssize_t ctxid_idx_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->ctxid_idx;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_idx_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       if (val >= drvdata->nr_ctxid_cmp)
+               return -EINVAL;
+
+       /*
+        * Use spinlock to ensure index doesn't change while it gets
+        * dereferenced multiple times within a spinlock block elsewhere.
+        */
+       spin_lock(&drvdata->spinlock);
+       drvdata->ctxid_idx = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(ctxid_idx);
+
+static ssize_t ctxid_val_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       spin_lock(&drvdata->spinlock);
+       val = drvdata->ctxid_val[drvdata->ctxid_idx];
+       spin_unlock(&drvdata->spinlock);
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_val_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       spin_lock(&drvdata->spinlock);
+       drvdata->ctxid_val[drvdata->ctxid_idx] = val;
+       spin_unlock(&drvdata->spinlock);
+
+       return size;
+}
+static DEVICE_ATTR_RW(ctxid_val);
+
+static ssize_t ctxid_mask_show(struct device *dev,
+                              struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->ctxid_mask;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t ctxid_mask_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->ctxid_mask = val;
+       return size;
+}
+static DEVICE_ATTR_RW(ctxid_mask);
+
+static ssize_t sync_freq_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->sync_freq;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t sync_freq_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->sync_freq = val & ETM_SYNC_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(sync_freq);
+
+static ssize_t timestamp_event_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       val = drvdata->timestamp_event;
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t timestamp_event_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->timestamp_event = val & ETM_EVENT_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(timestamp_event);
+
+static ssize_t status_show(struct device *dev,
+                          struct device_attribute *attr, char *buf)
+{
+       int ret;
+       unsigned long flags;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+
+       CS_UNLOCK(drvdata->base);
+       ret = sprintf(buf,
+                     "ETMCCR: 0x%08x\n"
+                     "ETMCCER: 0x%08x\n"
+                     "ETMSCR: 0x%08x\n"
+                     "ETMIDR: 0x%08x\n"
+                     "ETMCR: 0x%08x\n"
+                     "ETMTRACEIDR: 0x%08x\n"
+                     "Enable event: 0x%08x\n"
+                     "Enable start/stop: 0x%08x\n"
+                     "Enable control: CR1 0x%08x CR2 0x%08x\n"
+                     "CPU affinity: %d\n",
+                     drvdata->etmccr, drvdata->etmccer,
+                     etm_readl(drvdata, ETMSCR), etm_readl(drvdata, ETMIDR),
+                     etm_readl(drvdata, ETMCR), etm_trace_id_simple(drvdata),
+                     etm_readl(drvdata, ETMTEEVR),
+                     etm_readl(drvdata, ETMTSSCR),
+                     etm_readl(drvdata, ETMTECR1),
+                     etm_readl(drvdata, ETMTECR2),
+                     drvdata->cpu);
+       CS_LOCK(drvdata->base);
+
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+       clk_disable_unprepare(drvdata->clk);
+
+       return ret;
+}
+static DEVICE_ATTR_RO(status);
+
+static ssize_t traceid_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       int ret;
+       unsigned long val, flags;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       if (!drvdata->enable) {
+               val = drvdata->traceid;
+               goto out;
+       }
+
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       CS_UNLOCK(drvdata->base);
+
+       val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
+
+       CS_LOCK(drvdata->base);
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+       clk_disable_unprepare(drvdata->clk);
+out:
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t traceid_store(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->traceid = val & ETM_TRACEID_MASK;
+       return size;
+}
+static DEVICE_ATTR_RW(traceid);
+
+static struct attribute *coresight_etm_attrs[] = {
+       &dev_attr_nr_addr_cmp.attr,
+       &dev_attr_nr_cntr.attr,
+       &dev_attr_nr_ctxid_cmp.attr,
+       &dev_attr_etmsr.attr,
+       &dev_attr_reset.attr,
+       &dev_attr_mode.attr,
+       &dev_attr_trigger_event.attr,
+       &dev_attr_enable_event.attr,
+       &dev_attr_fifofull_level.attr,
+       &dev_attr_addr_idx.attr,
+       &dev_attr_addr_single.attr,
+       &dev_attr_addr_range.attr,
+       &dev_attr_addr_start.attr,
+       &dev_attr_addr_stop.attr,
+       &dev_attr_addr_acctype.attr,
+       &dev_attr_cntr_idx.attr,
+       &dev_attr_cntr_rld_val.attr,
+       &dev_attr_cntr_event.attr,
+       &dev_attr_cntr_rld_event.attr,
+       &dev_attr_cntr_val.attr,
+       &dev_attr_seq_12_event.attr,
+       &dev_attr_seq_21_event.attr,
+       &dev_attr_seq_23_event.attr,
+       &dev_attr_seq_31_event.attr,
+       &dev_attr_seq_32_event.attr,
+       &dev_attr_seq_13_event.attr,
+       &dev_attr_seq_curr_state.attr,
+       &dev_attr_ctxid_idx.attr,
+       &dev_attr_ctxid_val.attr,
+       &dev_attr_ctxid_mask.attr,
+       &dev_attr_sync_freq.attr,
+       &dev_attr_timestamp_event.attr,
+       &dev_attr_status.attr,
+       &dev_attr_traceid.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(coresight_etm);
+
+static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
+                           void *hcpu)
+{
+       unsigned int cpu = (unsigned long)hcpu;
+
+       if (!etmdrvdata[cpu])
+               goto out;
+
+       switch (action & (~CPU_TASKS_FROZEN)) {
+       case CPU_STARTING:
+               spin_lock(&etmdrvdata[cpu]->spinlock);
+               if (!etmdrvdata[cpu]->os_unlock) {
+                       etm_os_unlock(etmdrvdata[cpu]);
+                       etmdrvdata[cpu]->os_unlock = true;
+               }
+
+               if (etmdrvdata[cpu]->enable)
+                       etm_enable_hw(etmdrvdata[cpu]);
+               spin_unlock(&etmdrvdata[cpu]->spinlock);
+               break;
+
+       case CPU_ONLINE:
+               if (etmdrvdata[cpu]->boot_enable &&
+                   !etmdrvdata[cpu]->sticky_enable)
+                       coresight_enable(etmdrvdata[cpu]->csdev);
+               break;
+
+       case CPU_DYING:
+               spin_lock(&etmdrvdata[cpu]->spinlock);
+               if (etmdrvdata[cpu]->enable)
+                       etm_disable_hw(etmdrvdata[cpu]);
+               spin_unlock(&etmdrvdata[cpu]->spinlock);
+               break;
+       }
+out:
+       return NOTIFY_OK;
+}
+
+static struct notifier_block etm_cpu_notifier = {
+       .notifier_call = etm_cpu_callback,
+};
+
+static bool etm_arch_supported(u8 arch)
+{
+       switch (arch) {
+       case ETM_ARCH_V3_3:
+               break;
+       case ETM_ARCH_V3_5:
+               break;
+       case PFT_ARCH_V1_0:
+               break;
+       case PFT_ARCH_V1_1:
+               break;
+       default:
+               return false;
+       }
+       return true;
+}
+
+static void etm_init_arch_data(void *info)
+{
+       u32 etmidr;
+       u32 etmccr;
+       struct etm_drvdata *drvdata = info;
+
+       CS_UNLOCK(drvdata->base);
+
+       /* First dummy read */
+       (void)etm_readl(drvdata, ETMPDSR);
+       /* Provide power to ETM: ETMPDCR[3] == 1 */
+       etm_set_pwrup(drvdata);
+       /*
+        * Clear power down bit since when this bit is set writes to
+        * certain registers might be ignored.
+        */
+       etm_clr_pwrdwn(drvdata);
+       /*
+        * Set prog bit. It will be set from reset but this is included to
+        * ensure it is set
+        */
+       etm_set_prog(drvdata);
+
+       /* Find all capabilities */
+       etmidr = etm_readl(drvdata, ETMIDR);
+       drvdata->arch = BMVAL(etmidr, 4, 11);
+       drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
+
+       drvdata->etmccer = etm_readl(drvdata, ETMCCER);
+       etmccr = etm_readl(drvdata, ETMCCR);
+       drvdata->etmccr = etmccr;
+       drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
+       drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
+       drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
+       drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
+       drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
+
+       etm_set_pwrdwn(drvdata);
+       etm_clr_pwrup(drvdata);
+       CS_LOCK(drvdata->base);
+}
+
+static void etm_init_default_data(struct etm_drvdata *drvdata)
+{
+       static int etm3x_traceid;
+
+       u32 flags = (1 << 0 | /* instruction execute*/
+                    3 << 3 | /* ARM instruction */
+                    0 << 5 | /* No data value comparison */
+                    0 << 7 | /* No exact mach */
+                    0 << 8 | /* Ignore context ID */
+                    0 << 10); /* Security ignored */
+
+       /*
+        * Initial configuration only - guarantees sources handled by
+        * this driver have a unique ID at startup time but not between
+        * all other types of sources.  For that we lean on the core
+        * framework.
+        */
+       drvdata->traceid = etm3x_traceid++;
+       drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
+       drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
+       if (drvdata->nr_addr_cmp >= 2) {
+               drvdata->addr_val[0] = (u32) _stext;
+               drvdata->addr_val[1] = (u32) _etext;
+               drvdata->addr_acctype[0] = flags;
+               drvdata->addr_acctype[1] = flags;
+               drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
+               drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
+       }
+
+       etm_set_default(drvdata);
+}
+
+static int etm_probe(struct amba_device *adev, const struct amba_id *id)
+{
+       int ret;
+       void __iomem *base;
+       struct device *dev = &adev->dev;
+       struct coresight_platform_data *pdata = NULL;
+       struct etm_drvdata *drvdata;
+       struct resource *res = &adev->res;
+       struct coresight_desc *desc;
+       struct device_node *np = adev->dev.of_node;
+
+       desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+
+       drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
+       if (np) {
+               pdata = of_get_coresight_platform_data(dev, np);
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
+
+               adev->dev.platform_data = pdata;
+               drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
+       }
+
+       drvdata->dev = &adev->dev;
+       dev_set_drvdata(dev, drvdata);
+
+       /* Validity for the resource is already checked by the AMBA core */
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       drvdata->base = base;
+
+       spin_lock_init(&drvdata->spinlock);
+
+       drvdata->clk = adev->pclk;
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       drvdata->cpu = pdata ? pdata->cpu : 0;
+
+       get_online_cpus();
+       etmdrvdata[drvdata->cpu] = drvdata;
+
+       if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
+               drvdata->os_unlock = true;
+
+       if (smp_call_function_single(drvdata->cpu,
+                                    etm_init_arch_data,  drvdata, 1))
+               dev_err(dev, "ETM arch init failed\n");
+
+       if (!etm_count++)
+               register_hotcpu_notifier(&etm_cpu_notifier);
+
+       put_online_cpus();
+
+       if (etm_arch_supported(drvdata->arch) == false) {
+               ret = -EINVAL;
+               goto err_arch_supported;
+       }
+       etm_init_default_data(drvdata);
+
+       clk_disable_unprepare(drvdata->clk);
+
+       desc->type = CORESIGHT_DEV_TYPE_SOURCE;
+       desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
+       desc->ops = &etm_cs_ops;
+       desc->pdata = pdata;
+       desc->dev = dev;
+       desc->groups = coresight_etm_groups;
+       drvdata->csdev = coresight_register(desc);
+       if (IS_ERR(drvdata->csdev)) {
+               ret = PTR_ERR(drvdata->csdev);
+               goto err_arch_supported;
+       }
+
+       dev_info(dev, "ETM initialized\n");
+
+       if (boot_enable) {
+               coresight_enable(drvdata->csdev);
+               drvdata->boot_enable = true;
+       }
+
+       return 0;
+
+err_arch_supported:
+       clk_disable_unprepare(drvdata->clk);
+       if (--etm_count == 0)
+               unregister_hotcpu_notifier(&etm_cpu_notifier);
+       return ret;
+}
+
+static int etm_remove(struct amba_device *adev)
+{
+       struct etm_drvdata *drvdata = amba_get_drvdata(adev);
+
+       coresight_unregister(drvdata->csdev);
+       if (--etm_count == 0)
+               unregister_hotcpu_notifier(&etm_cpu_notifier);
+
+       return 0;
+}
+
+static struct amba_id etm_ids[] = {
+       {       /* ETM 3.3 */
+               .id     = 0x0003b921,
+               .mask   = 0x0003ffff,
+       },
+       {       /* ETM 3.5 */
+               .id     = 0x0003b956,
+               .mask   = 0x0003ffff,
+       },
+       {       /* PTM 1.0 */
+               .id     = 0x0003b950,
+               .mask   = 0x0003ffff,
+       },
+       {       /* PTM 1.1 */
+               .id     = 0x0003b95f,
+               .mask   = 0x0003ffff,
+       },
+       { 0, 0},
+};
+
+static struct amba_driver etm_driver = {
+       .drv = {
+               .name   = "coresight-etm3x",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = etm_probe,
+       .remove         = etm_remove,
+       .id_table       = etm_ids,
+};
+
+int __init etm_init(void)
+{
+       return amba_driver_register(&etm_driver);
+}
+module_init(etm_init);
+
+void __exit etm_exit(void)
+{
+       amba_driver_unregister(&etm_driver);
+}
+module_exit(etm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
diff --git a/drivers/coresight/coresight-funnel.c b/drivers/coresight/coresight-funnel.c
new file mode 100644 (file)
index 0000000..2108edf
--- /dev/null
@@ -0,0 +1,268 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/amba/bus.h>
+
+#include "coresight-priv.h"
+
+#define FUNNEL_FUNCTL          0x000
+#define FUNNEL_PRICTL          0x004
+
+#define FUNNEL_HOLDTIME_MASK   0xf00
+#define FUNNEL_HOLDTIME_SHFT   0x8
+#define FUNNEL_HOLDTIME                (0x7 << FUNNEL_HOLDTIME_SHFT)
+
+/**
+ * struct funnel_drvdata - specifics associated to a funnel component
+ * @base:      memory mapped base address for this component.
+ * @dev:       the device entity associated to this component.
+ * @csdev:     component vitals needed by the framework.
+ * @clk:       the clock this component is associated to.
+ * @priority:  port selection order.
+ */
+struct funnel_drvdata {
+       void __iomem            *base;
+       struct device           *dev;
+       struct coresight_device *csdev;
+       struct clk              *clk;
+       unsigned long           priority;
+};
+
+static void funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
+{
+       u32 functl;
+
+       CS_UNLOCK(drvdata->base);
+
+       functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
+       functl &= ~FUNNEL_HOLDTIME_MASK;
+       functl |= FUNNEL_HOLDTIME;
+       functl |= (1 << port);
+       writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
+       writel_relaxed(drvdata->priority, drvdata->base + FUNNEL_PRICTL);
+
+       CS_LOCK(drvdata->base);
+}
+
+static int funnel_enable(struct coresight_device *csdev, int inport,
+                        int outport)
+{
+       struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+       int ret;
+
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       funnel_enable_hw(drvdata, inport);
+
+       dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
+       return 0;
+}
+
+static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport)
+{
+       u32 functl;
+
+       CS_UNLOCK(drvdata->base);
+
+       functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
+       functl &= ~(1 << inport);
+       writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void funnel_disable(struct coresight_device *csdev, int inport,
+                          int outport)
+{
+       struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       funnel_disable_hw(drvdata, inport);
+
+       clk_disable_unprepare(drvdata->clk);
+
+       dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
+}
+
+static const struct coresight_ops_link funnel_link_ops = {
+       .enable         = funnel_enable,
+       .disable        = funnel_disable,
+};
+
+static const struct coresight_ops funnel_cs_ops = {
+       .link_ops       = &funnel_link_ops,
+};
+
+static ssize_t priority_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val = drvdata->priority;
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t priority_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->priority = val;
+       return size;
+}
+static DEVICE_ATTR_RW(priority);
+
+static u32 get_funnel_ctrl_hw(struct funnel_drvdata *drvdata)
+{
+       u32 functl;
+
+       CS_UNLOCK(drvdata->base);
+       functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
+       CS_LOCK(drvdata->base);
+
+       return functl;
+}
+
+static ssize_t funnel_ctrl_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
+{
+       int ret;
+       u32 val;
+       struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       val = get_funnel_ctrl_hw(drvdata);
+       clk_disable_unprepare(drvdata->clk);
+
+       return sprintf(buf, "%#x\n", val);
+}
+static DEVICE_ATTR_RO(funnel_ctrl);
+
+static struct attribute *coresight_funnel_attrs[] = {
+       &dev_attr_funnel_ctrl.attr,
+       &dev_attr_priority.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(coresight_funnel);
+
+static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
+{
+       void __iomem *base;
+       struct device *dev = &adev->dev;
+       struct coresight_platform_data *pdata = NULL;
+       struct funnel_drvdata *drvdata;
+       struct resource *res = &adev->res;
+       struct coresight_desc *desc;
+       struct device_node *np = adev->dev.of_node;
+
+       if (np) {
+               pdata = of_get_coresight_platform_data(dev, np);
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
+               adev->dev.platform_data = pdata;
+       }
+
+       drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
+       drvdata->dev = &adev->dev;
+       dev_set_drvdata(dev, drvdata);
+
+       /* Validity for the resource is already checked by the AMBA core */
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       drvdata->base = base;
+
+       drvdata->clk = adev->pclk;
+
+       desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+
+       desc->type = CORESIGHT_DEV_TYPE_LINK;
+       desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
+       desc->ops = &funnel_cs_ops;
+       desc->pdata = pdata;
+       desc->dev = dev;
+       desc->groups = coresight_funnel_groups;
+       drvdata->csdev = coresight_register(desc);
+       if (IS_ERR(drvdata->csdev))
+               return PTR_ERR(drvdata->csdev);
+
+       dev_info(dev, "FUNNEL initialized\n");
+       return 0;
+}
+
+static int funnel_remove(struct amba_device *adev)
+{
+       struct funnel_drvdata *drvdata = amba_get_drvdata(adev);
+
+       coresight_unregister(drvdata->csdev);
+       return 0;
+}
+
+static struct amba_id funnel_ids[] = {
+       {
+               .id     = 0x0003b908,
+               .mask   = 0x0003ffff,
+       },
+       { 0, 0},
+};
+
+static struct amba_driver funnel_driver = {
+       .drv = {
+               .name   = "coresight-funnel",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = funnel_probe,
+       .remove         = funnel_remove,
+       .id_table       = funnel_ids,
+};
+
+static int __init funnel_init(void)
+{
+       return amba_driver_register(&funnel_driver);
+}
+module_init(funnel_init);
+
+static void __exit funnel_exit(void)
+{
+       amba_driver_unregister(&funnel_driver);
+}
+module_exit(funnel_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Funnel driver");
diff --git a/drivers/coresight/coresight-priv.h b/drivers/coresight/coresight-priv.h
new file mode 100644 (file)
index 0000000..7b3372f
--- /dev/null
@@ -0,0 +1,63 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _CORESIGHT_PRIV_H
+#define _CORESIGHT_PRIV_H
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/coresight.h>
+
+/*
+ * Coresight management registers (0xf00-0xfcc)
+ * 0xfa0 - 0xfa4: Management   registers in PFTv1.0
+ *               Trace         registers in PFTv1.1
+ */
+#define CORESIGHT_ITCTRL       0xf00
+#define CORESIGHT_CLAIMSET     0xfa0
+#define CORESIGHT_CLAIMCLR     0xfa4
+#define CORESIGHT_LAR          0xfb0
+#define CORESIGHT_LSR          0xfb4
+#define CORESIGHT_AUTHSTATUS   0xfb8
+#define CORESIGHT_DEVID                0xfc8
+#define CORESIGHT_DEVTYPE      0xfcc
+
+#define TIMEOUT_US             100
+#define BMVAL(val, lsb, msb)   ((val & GENMASK(msb, lsb)) >> lsb)
+
+static inline void CS_LOCK(void __iomem *addr)
+{
+       do {
+               /* Wait for things to settle */
+               mb();
+               writel_relaxed(0x0, addr + CORESIGHT_LAR);
+       } while (0);
+}
+
+static inline void CS_UNLOCK(void __iomem *addr)
+{
+       do {
+               writel_relaxed(CORESIGHT_UNLOCK, addr + CORESIGHT_LAR);
+               /* Make sure everyone has seen this */
+               mb();
+       } while (0);
+}
+
+#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
+extern int etm_readl_cp14(u32 off, unsigned int *val);
+extern int etm_writel_cp14(u32 off, u32 val);
+#else
+static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; }
+static inline int etm_writel_cp14(u32 val, u32 off) { return 0; }
+#endif
+
+#endif
diff --git a/drivers/coresight/coresight-replicator.c b/drivers/coresight/coresight-replicator.c
new file mode 100644 (file)
index 0000000..a2dfcf9
--- /dev/null
@@ -0,0 +1,137 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+
+#include "coresight-priv.h"
+
+/**
+ * struct replicator_drvdata - specifics associated to a replicator component
+ * @dev:       the device entity associated with this component
+ * @csdev:     component vitals needed by the framework
+ */
+struct replicator_drvdata {
+       struct device           *dev;
+       struct coresight_device *csdev;
+};
+
+static int replicator_enable(struct coresight_device *csdev, int inport,
+                            int outport)
+{
+       struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       dev_info(drvdata->dev, "REPLICATOR enabled\n");
+       return 0;
+}
+
+static void replicator_disable(struct coresight_device *csdev, int inport,
+                              int outport)
+{
+       struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       dev_info(drvdata->dev, "REPLICATOR disabled\n");
+}
+
+static const struct coresight_ops_link replicator_link_ops = {
+       .enable         = replicator_enable,
+       .disable        = replicator_disable,
+};
+
+static const struct coresight_ops replicator_cs_ops = {
+       .link_ops       = &replicator_link_ops,
+};
+
+static int replicator_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct coresight_platform_data *pdata = NULL;
+       struct replicator_drvdata *drvdata;
+       struct coresight_desc *desc;
+       struct device_node *np = pdev->dev.of_node;
+
+       if (np) {
+               pdata = of_get_coresight_platform_data(dev, np);
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
+               pdev->dev.platform_data = pdata;
+       }
+
+       drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
+       drvdata->dev = &pdev->dev;
+       platform_set_drvdata(pdev, drvdata);
+
+       desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+
+       desc->type = CORESIGHT_DEV_TYPE_LINK;
+       desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
+       desc->ops = &replicator_cs_ops;
+       desc->pdata = pdev->dev.platform_data;
+       desc->dev = &pdev->dev;
+       drvdata->csdev = coresight_register(desc);
+       if (IS_ERR(drvdata->csdev))
+               return PTR_ERR(drvdata->csdev);
+
+       dev_info(dev, "REPLICATOR initialized\n");
+       return 0;
+}
+
+static int replicator_remove(struct platform_device *pdev)
+{
+       struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
+
+       coresight_unregister(drvdata->csdev);
+       return 0;
+}
+
+static struct of_device_id replicator_match[] = {
+       {.compatible = "arm,coresight-replicator"},
+       {}
+};
+
+static struct platform_driver replicator_driver = {
+       .probe          = replicator_probe,
+       .remove         = replicator_remove,
+       .driver         = {
+               .name   = "coresight-replicator",
+               .of_match_table = replicator_match,
+       },
+};
+
+static int __init replicator_init(void)
+{
+       return platform_driver_register(&replicator_driver);
+}
+module_init(replicator_init);
+
+static void __exit replicator_exit(void)
+{
+       platform_driver_unregister(&replicator_driver);
+}
+module_exit(replicator_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Replicator driver");
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/coresight/coresight-tmc.c
new file mode 100644 (file)
index 0000000..ce2c293
--- /dev/null
@@ -0,0 +1,776 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/coresight.h>
+#include <linux/amba/bus.h>
+
+#include "coresight-priv.h"
+
+#define TMC_RSZ                        0x004
+#define TMC_STS                        0x00c
+#define TMC_RRD                        0x010
+#define TMC_RRP                        0x014
+#define TMC_RWP                        0x018
+#define TMC_TRG                        0x01c
+#define TMC_CTL                        0x020
+#define TMC_RWD                        0x024
+#define TMC_MODE               0x028
+#define TMC_LBUFLEVEL          0x02c
+#define TMC_CBUFLEVEL          0x030
+#define TMC_BUFWM              0x034
+#define TMC_RRPHI              0x038
+#define TMC_RWPHI              0x03c
+#define TMC_AXICTL             0x110
+#define TMC_DBALO              0x118
+#define TMC_DBAHI              0x11c
+#define TMC_FFSR               0x300
+#define TMC_FFCR               0x304
+#define TMC_PSCR               0x308
+#define TMC_ITMISCOP0          0xee0
+#define TMC_ITTRFLIN           0xee8
+#define TMC_ITATBDATA0         0xeec
+#define TMC_ITATBCTR2          0xef0
+#define TMC_ITATBCTR1          0xef4
+#define TMC_ITATBCTR0          0xef8
+
+/* register description */
+/* TMC_CTL - 0x020 */
+#define TMC_CTL_CAPT_EN                BIT(0)
+/* TMC_STS - 0x00C */
+#define TMC_STS_TRIGGERED      BIT(1)
+/* TMC_AXICTL - 0x110 */
+#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
+#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
+#define TMC_AXICTL_SCT_GAT_MODE        BIT(7)
+#define TMC_AXICTL_WR_BURST_LEN 0xF00
+/* TMC_FFCR - 0x304 */
+#define TMC_FFCR_EN_FMT                BIT(0)
+#define TMC_FFCR_EN_TI         BIT(1)
+#define TMC_FFCR_FON_FLIN      BIT(4)
+#define TMC_FFCR_FON_TRIG_EVT  BIT(5)
+#define TMC_FFCR_FLUSHMAN      BIT(6)
+#define TMC_FFCR_TRIGON_TRIGIN BIT(8)
+#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
+
+#define TMC_STS_TRIGGERED_BIT  2
+#define TMC_FFCR_FLUSHMAN_BIT  6
+
+enum tmc_config_type {
+       TMC_CONFIG_TYPE_ETB,
+       TMC_CONFIG_TYPE_ETR,
+       TMC_CONFIG_TYPE_ETF,
+};
+
+enum tmc_mode {
+       TMC_MODE_CIRCULAR_BUFFER,
+       TMC_MODE_SOFTWARE_FIFO,
+       TMC_MODE_HARDWARE_FIFO,
+};
+
+enum tmc_mem_intf_width {
+       TMC_MEM_INTF_WIDTH_32BITS       = 0x2,
+       TMC_MEM_INTF_WIDTH_64BITS       = 0x3,
+       TMC_MEM_INTF_WIDTH_128BITS      = 0x4,
+       TMC_MEM_INTF_WIDTH_256BITS      = 0x5,
+};
+
+/**
+ * struct tmc_drvdata - specifics associated to an TMC component
+ * @base:      memory mapped base address for this component.
+ * @dev:       the device entity associated to this component.
+ * @csdev:     component vitals needed by the framework.
+ * @miscdev:   specifics to handle "/dev/xyz.tmc" entry.
+ * @clk:       the clock this component is associated to.
+ * @spinlock:  only one at a time pls.
+ * @read_count:        manages preparation of buffer for reading.
+ * @buf:       area of memory where trace data get sent.
+ * @paddr:     DMA start location in RAM.
+ * @vaddr:     virtual representation of @paddr.
+ * @size:      @buf size.
+ * @enable:    this TMC is being used.
+ * @config_type: TMC variant, must be of type @tmc_config_type.
+ * @trigger_cntr: amount of words to store after a trigger.
+ */
+struct tmc_drvdata {
+       void __iomem            *base;
+       struct device           *dev;
+       struct coresight_device *csdev;
+       struct miscdevice       miscdev;
+       struct clk              *clk;
+       spinlock_t              spinlock;
+       int                     read_count;
+       bool                    reading;
+       char                    *buf;
+       dma_addr_t              paddr;
+       void __iomem            *vaddr;
+       u32                     size;
+       bool                    enable;
+       enum tmc_config_type    config_type;
+       u32                     trigger_cntr;
+};
+
+static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
+{
+       /* Ensure formatter, unformatter and hardware fifo are empty */
+       if (coresight_timeout(drvdata->base,
+                             TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) {
+               dev_err(drvdata->dev,
+                       "timeout observed when probing at offset %#x\n",
+                       TMC_STS);
+       }
+}
+
+static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
+{
+       u32 ffcr;
+
+       ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
+       ffcr |= TMC_FFCR_STOP_ON_FLUSH;
+       writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
+       ffcr |= TMC_FFCR_FLUSHMAN;
+       writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
+       /* Ensure flush completes */
+       if (coresight_timeout(drvdata->base,
+                             TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
+               dev_err(drvdata->dev,
+                       "timeout observed when probing at offset %#x\n",
+                       TMC_FFCR);
+       }
+
+       tmc_wait_for_ready(drvdata);
+}
+
+static void tmc_enable_hw(struct tmc_drvdata *drvdata)
+{
+       writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
+}
+
+static void tmc_disable_hw(struct tmc_drvdata *drvdata)
+{
+       writel_relaxed(0x0, drvdata->base + TMC_CTL);
+}
+
+static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
+{
+       /* Zero out the memory to help with debug */
+       memset(drvdata->buf, 0, drvdata->size);
+
+       CS_UNLOCK(drvdata->base);
+
+       writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
+       writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
+                      TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
+                      TMC_FFCR_TRIGON_TRIGIN,
+                      drvdata->base + TMC_FFCR);
+
+       writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
+       tmc_enable_hw(drvdata);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
+{
+       u32 axictl;
+
+       /* Zero out the memory to help with debug */
+       memset(drvdata->vaddr, 0, drvdata->size);
+
+       CS_UNLOCK(drvdata->base);
+
+       writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
+       writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
+
+       axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
+       axictl |= TMC_AXICTL_WR_BURST_LEN;
+       writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+       axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
+       writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+       axictl = (axictl &
+                 ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
+                 TMC_AXICTL_PROT_CTL_B1;
+       writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
+
+       writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
+       writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
+       writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
+                      TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
+                      TMC_FFCR_TRIGON_TRIGIN,
+                      drvdata->base + TMC_FFCR);
+       writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
+       tmc_enable_hw(drvdata);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
+       writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
+                      drvdata->base + TMC_FFCR);
+       writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
+       tmc_enable_hw(drvdata);
+
+       CS_LOCK(drvdata->base);
+}
+
+static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
+{
+       int ret;
+       unsigned long flags;
+
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (drvdata->reading) {
+               spin_unlock_irqrestore(&drvdata->spinlock, flags);
+               clk_disable_unprepare(drvdata->clk);
+               return -EBUSY;
+       }
+
+       if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+               tmc_etb_enable_hw(drvdata);
+       } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+               tmc_etr_enable_hw(drvdata);
+       } else {
+               if (mode == TMC_MODE_CIRCULAR_BUFFER)
+                       tmc_etb_enable_hw(drvdata);
+               else
+                       tmc_etf_enable_hw(drvdata);
+       }
+       drvdata->enable = true;
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       dev_info(drvdata->dev, "TMC enabled\n");
+       return 0;
+}
+
+static int tmc_enable_sink(struct coresight_device *csdev)
+{
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
+}
+
+static int tmc_enable_link(struct coresight_device *csdev, int inport,
+                          int outport)
+{
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO);
+}
+
+static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
+{
+       enum tmc_mem_intf_width memwidth;
+       u8 memwords;
+       char *bufp;
+       u32 read_data;
+       int i;
+
+       memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10);
+       if (memwidth == TMC_MEM_INTF_WIDTH_32BITS)
+               memwords = 1;
+       else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS)
+               memwords = 2;
+       else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS)
+               memwords = 4;
+       else
+               memwords = 8;
+
+       bufp = drvdata->buf;
+       while (1) {
+               for (i = 0; i < memwords; i++) {
+                       read_data = readl_relaxed(drvdata->base + TMC_RRD);
+                       if (read_data == 0xFFFFFFFF)
+                               return;
+                       memcpy(bufp, &read_data, 4);
+                       bufp += 4;
+               }
+       }
+}
+
+static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       tmc_flush_and_stop(drvdata);
+       tmc_etb_dump_hw(drvdata);
+       tmc_disable_hw(drvdata);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
+{
+       u32 rwp, val;
+
+       rwp = readl_relaxed(drvdata->base + TMC_RWP);
+       val = readl_relaxed(drvdata->base + TMC_STS);
+
+       /* How much memory do we still have */
+       if (val & BIT(0))
+               drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
+       else
+               drvdata->buf = drvdata->vaddr;
+}
+
+static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       tmc_flush_and_stop(drvdata);
+       tmc_etr_dump_hw(drvdata);
+       tmc_disable_hw(drvdata);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       tmc_flush_and_stop(drvdata);
+       tmc_disable_hw(drvdata);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (drvdata->reading)
+               goto out;
+
+       if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+               tmc_etb_disable_hw(drvdata);
+       } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+               tmc_etr_disable_hw(drvdata);
+       } else {
+               if (mode == TMC_MODE_CIRCULAR_BUFFER)
+                       tmc_etb_disable_hw(drvdata);
+               else
+                       tmc_etf_disable_hw(drvdata);
+       }
+out:
+       drvdata->enable = false;
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       clk_disable_unprepare(drvdata->clk);
+
+       dev_info(drvdata->dev, "TMC disabled\n");
+}
+
+static void tmc_disable_sink(struct coresight_device *csdev)
+{
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
+}
+
+static void tmc_disable_link(struct coresight_device *csdev, int inport,
+                            int outport)
+{
+       struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
+}
+
+static const struct coresight_ops_sink tmc_sink_ops = {
+       .enable         = tmc_enable_sink,
+       .disable        = tmc_disable_sink,
+};
+
+static const struct coresight_ops_link tmc_link_ops = {
+       .enable         = tmc_enable_link,
+       .disable        = tmc_disable_link,
+};
+
+static const struct coresight_ops tmc_etb_cs_ops = {
+       .sink_ops       = &tmc_sink_ops,
+};
+
+static const struct coresight_ops tmc_etr_cs_ops = {
+       .sink_ops       = &tmc_sink_ops,
+};
+
+static const struct coresight_ops tmc_etf_cs_ops = {
+       .sink_ops       = &tmc_sink_ops,
+       .link_ops       = &tmc_link_ops,
+};
+
+static int tmc_read_prepare(struct tmc_drvdata *drvdata)
+{
+       int ret;
+       unsigned long flags;
+       enum tmc_mode mode;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (!drvdata->enable)
+               goto out;
+
+       if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+               tmc_etb_disable_hw(drvdata);
+       } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+               tmc_etr_disable_hw(drvdata);
+       } else {
+               mode = readl_relaxed(drvdata->base + TMC_MODE);
+               if (mode == TMC_MODE_CIRCULAR_BUFFER) {
+                       tmc_etb_disable_hw(drvdata);
+               } else {
+                       ret = -ENODEV;
+                       goto err;
+               }
+       }
+out:
+       drvdata->reading = true;
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       dev_info(drvdata->dev, "TMC read start\n");
+       return 0;
+err:
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+       return ret;
+}
+
+static void tmc_read_unprepare(struct tmc_drvdata *drvdata)
+{
+       unsigned long flags;
+       enum tmc_mode mode;
+
+       spin_lock_irqsave(&drvdata->spinlock, flags);
+       if (!drvdata->enable)
+               goto out;
+
+       if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+               tmc_etb_enable_hw(drvdata);
+       } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+               tmc_etr_enable_hw(drvdata);
+       } else {
+               mode = readl_relaxed(drvdata->base + TMC_MODE);
+               if (mode == TMC_MODE_CIRCULAR_BUFFER)
+                       tmc_etb_enable_hw(drvdata);
+       }
+out:
+       drvdata->reading = false;
+       spin_unlock_irqrestore(&drvdata->spinlock, flags);
+
+       dev_info(drvdata->dev, "TMC read end\n");
+}
+
+static int tmc_open(struct inode *inode, struct file *file)
+{
+       struct tmc_drvdata *drvdata = container_of(file->private_data,
+                                                  struct tmc_drvdata, miscdev);
+       int ret = 0;
+
+       if (drvdata->read_count++)
+               goto out;
+
+       ret = tmc_read_prepare(drvdata);
+       if (ret)
+               return ret;
+out:
+       nonseekable_open(inode, file);
+
+       dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
+       return 0;
+}
+
+static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
+                       loff_t *ppos)
+{
+       struct tmc_drvdata *drvdata = container_of(file->private_data,
+                                                  struct tmc_drvdata, miscdev);
+       char *bufp = drvdata->buf + *ppos;
+
+       if (*ppos + len > drvdata->size)
+               len = drvdata->size - *ppos;
+
+       if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+               if (bufp == (char *)(drvdata->vaddr + drvdata->size))
+                       bufp = drvdata->vaddr;
+               else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
+                       bufp -= drvdata->size;
+               if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
+                       len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
+       }
+
+       if (copy_to_user(data, bufp, len)) {
+               dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
+               return -EFAULT;
+       }
+
+       *ppos += len;
+
+       dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n",
+               __func__, len, (int) (drvdata->size - *ppos));
+       return len;
+}
+
+static int tmc_release(struct inode *inode, struct file *file)
+{
+       struct tmc_drvdata *drvdata = container_of(file->private_data,
+                                                  struct tmc_drvdata, miscdev);
+
+       if (--drvdata->read_count) {
+               if (drvdata->read_count < 0) {
+                       dev_err(drvdata->dev, "mismatched close\n");
+                       drvdata->read_count = 0;
+               }
+               goto out;
+       }
+
+       tmc_read_unprepare(drvdata);
+out:
+       dev_dbg(drvdata->dev, "%s: released\n", __func__);
+       return 0;
+}
+
+static const struct file_operations tmc_fops = {
+       .owner          = THIS_MODULE,
+       .open           = tmc_open,
+       .read           = tmc_read,
+       .release        = tmc_release,
+       .llseek         = no_llseek,
+};
+
+static ssize_t trigger_cntr_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+       unsigned long val = drvdata->trigger_cntr;
+
+       return sprintf(buf, "%#lx\n", val);
+}
+
+static ssize_t trigger_cntr_store(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
+
+       ret = kstrtoul(buf, 16, &val);
+       if (ret)
+               return ret;
+
+       drvdata->trigger_cntr = val;
+       return size;
+}
+static DEVICE_ATTR_RW(trigger_cntr);
+
+static struct attribute *coresight_etb_attrs[] = {
+       &dev_attr_trigger_cntr.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(coresight_etb);
+
+static struct attribute *coresight_etr_attrs[] = {
+       &dev_attr_trigger_cntr.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(coresight_etr);
+
+static struct attribute *coresight_etf_attrs[] = {
+       &dev_attr_trigger_cntr.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(coresight_etf);
+
+static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
+{
+       int ret = 0;
+       u32 devid;
+       void __iomem *base;
+       struct device *dev = &adev->dev;
+       struct coresight_platform_data *pdata = NULL;
+       struct tmc_drvdata *drvdata;
+       struct resource *res = &adev->res;
+       struct coresight_desc *desc;
+       struct device_node *np = adev->dev.of_node;
+
+       if (np) {
+               pdata = of_get_coresight_platform_data(dev, np);
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
+               adev->dev.platform_data = pdata;
+       }
+
+       drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
+       drvdata->dev = &adev->dev;
+       dev_set_drvdata(dev, drvdata);
+
+       /* Validity for the resource is already checked by the AMBA core */
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       drvdata->base = base;
+
+       spin_lock_init(&drvdata->spinlock);
+
+       drvdata->clk = adev->pclk;
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
+       drvdata->config_type = BMVAL(devid, 6, 7);
+
+       if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+               if (np)
+                       ret = of_property_read_u32(np,
+                                                  "arm,buffer-size",
+                                                  &drvdata->size);
+               if (ret)
+                       drvdata->size = SZ_1M;
+       } else {
+               drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
+       }
+
+       clk_disable_unprepare(drvdata->clk);
+
+       if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+               drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
+                                               &drvdata->paddr, GFP_KERNEL);
+               if (!drvdata->vaddr)
+                       return -ENOMEM;
+
+               memset(drvdata->vaddr, 0, drvdata->size);
+               drvdata->buf = drvdata->vaddr;
+       } else {
+               drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
+               if (!drvdata->buf)
+                       return -ENOMEM;
+       }
+
+       desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+       if (!desc) {
+               ret = -ENOMEM;
+               goto err_devm_kzalloc;
+       }
+
+       desc->pdata = pdata;
+       desc->dev = dev;
+       desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
+
+       if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
+               desc->type = CORESIGHT_DEV_TYPE_SINK;
+               desc->ops = &tmc_etb_cs_ops;
+               desc->groups = coresight_etb_groups;
+       } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
+               desc->type = CORESIGHT_DEV_TYPE_SINK;
+               desc->ops = &tmc_etr_cs_ops;
+               desc->groups = coresight_etr_groups;
+       } else {
+               desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
+               desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
+               desc->ops = &tmc_etf_cs_ops;
+               desc->groups = coresight_etf_groups;
+       }
+
+       drvdata->csdev = coresight_register(desc);
+       if (IS_ERR(drvdata->csdev)) {
+               ret = PTR_ERR(drvdata->csdev);
+               goto err_devm_kzalloc;
+       }
+
+       drvdata->miscdev.name = pdata->name;
+       drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
+       drvdata->miscdev.fops = &tmc_fops;
+       ret = misc_register(&drvdata->miscdev);
+       if (ret)
+               goto err_misc_register;
+
+       dev_info(dev, "TMC initialized\n");
+       return 0;
+
+err_misc_register:
+       coresight_unregister(drvdata->csdev);
+err_devm_kzalloc:
+       if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
+               dma_free_coherent(dev, drvdata->size,
+                               &drvdata->paddr, GFP_KERNEL);
+       return ret;
+}
+
+static int tmc_remove(struct amba_device *adev)
+{
+       struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
+
+       misc_deregister(&drvdata->miscdev);
+       coresight_unregister(drvdata->csdev);
+       if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
+               dma_free_coherent(drvdata->dev, drvdata->size,
+                                 &drvdata->paddr, GFP_KERNEL);
+
+       return 0;
+}
+
+static struct amba_id tmc_ids[] = {
+       {
+               .id     = 0x0003b961,
+               .mask   = 0x0003ffff,
+       },
+       { 0, 0},
+};
+
+static struct amba_driver tmc_driver = {
+       .drv = {
+               .name   = "coresight-tmc",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = tmc_probe,
+       .remove         = tmc_remove,
+       .id_table       = tmc_ids,
+};
+
+static int __init tmc_init(void)
+{
+       return amba_driver_register(&tmc_driver);
+}
+module_init(tmc_init);
+
+static void __exit tmc_exit(void)
+{
+       amba_driver_unregister(&tmc_driver);
+}
+module_exit(tmc_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver");
diff --git a/drivers/coresight/coresight-tpiu.c b/drivers/coresight/coresight-tpiu.c
new file mode 100644 (file)
index 0000000..ae10108
--- /dev/null
@@ -0,0 +1,217 @@
+/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/amba/bus.h>
+
+#include "coresight-priv.h"
+
+#define TPIU_SUPP_PORTSZ       0x000
+#define TPIU_CURR_PORTSZ       0x004
+#define TPIU_SUPP_TRIGMODES    0x100
+#define TPIU_TRIG_CNTRVAL      0x104
+#define TPIU_TRIG_MULT         0x108
+#define TPIU_SUPP_TESTPATM     0x200
+#define TPIU_CURR_TESTPATM     0x204
+#define TPIU_TEST_PATREPCNTR   0x208
+#define TPIU_FFSR              0x300
+#define TPIU_FFCR              0x304
+#define TPIU_FSYNC_CNTR                0x308
+#define TPIU_EXTCTL_INPORT     0x400
+#define TPIU_EXTCTL_OUTPORT    0x404
+#define TPIU_ITTRFLINACK       0xee4
+#define TPIU_ITTRFLIN          0xee8
+#define TPIU_ITATBDATA0                0xeec
+#define TPIU_ITATBCTR2         0xef0
+#define TPIU_ITATBCTR1         0xef4
+#define TPIU_ITATBCTR0         0xef8
+
+/** register definition **/
+/* FFCR - 0x304 */
+#define FFCR_FON_MAN           BIT(6)
+
+/**
+ * @base:      memory mapped base address for this component.
+ * @dev:       the device entity associated to this component.
+ * @csdev:     component vitals needed by the framework.
+ * @clk:       the clock this component is associated to.
+ */
+struct tpiu_drvdata {
+       void __iomem            *base;
+       struct device           *dev;
+       struct coresight_device *csdev;
+       struct clk              *clk;
+};
+
+static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       /* TODO: fill this up */
+
+       CS_LOCK(drvdata->base);
+}
+
+static int tpiu_enable(struct coresight_device *csdev)
+{
+       struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+       int ret;
+
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       tpiu_enable_hw(drvdata);
+
+       dev_info(drvdata->dev, "TPIU enabled\n");
+       return 0;
+}
+
+static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
+{
+       CS_UNLOCK(drvdata->base);
+
+       /* Clear formatter controle reg. */
+       writel_relaxed(0x0, drvdata->base + TPIU_FFCR);
+       /* Generate manual flush */
+       writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
+
+       CS_LOCK(drvdata->base);
+}
+
+static void tpiu_disable(struct coresight_device *csdev)
+{
+       struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
+
+       tpiu_disable_hw(drvdata);
+
+       clk_disable_unprepare(drvdata->clk);
+
+       dev_info(drvdata->dev, "TPIU disabled\n");
+}
+
+static const struct coresight_ops_sink tpiu_sink_ops = {
+       .enable         = tpiu_enable,
+       .disable        = tpiu_disable,
+};
+
+static const struct coresight_ops tpiu_cs_ops = {
+       .sink_ops       = &tpiu_sink_ops,
+};
+
+static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
+{
+       int ret;
+       void __iomem *base;
+       struct device *dev = &adev->dev;
+       struct coresight_platform_data *pdata = NULL;
+       struct tpiu_drvdata *drvdata;
+       struct resource *res = &adev->res;
+       struct coresight_desc *desc;
+       struct device_node *np = adev->dev.of_node;
+
+       if (np) {
+               pdata = of_get_coresight_platform_data(dev, np);
+               if (IS_ERR(pdata))
+                       return PTR_ERR(pdata);
+               adev->dev.platform_data = pdata;
+       }
+
+       drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
+       if (!drvdata)
+               return -ENOMEM;
+
+       drvdata->dev = &adev->dev;
+       dev_set_drvdata(dev, drvdata);
+
+       /* Validity for the resource is already checked by the AMBA core */
+       base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(base))
+               return PTR_ERR(base);
+
+       drvdata->base = base;
+
+       drvdata->clk = adev->pclk;
+       ret = clk_prepare_enable(drvdata->clk);
+       if (ret)
+               return ret;
+
+       /* Disable tpiu to support older devices */
+       tpiu_disable_hw(drvdata);
+
+       clk_disable_unprepare(drvdata->clk);
+
+       desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+
+       desc->type = CORESIGHT_DEV_TYPE_SINK;
+       desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PORT;
+       desc->ops = &tpiu_cs_ops;
+       desc->pdata = pdata;
+       desc->dev = dev;
+       drvdata->csdev = coresight_register(desc);
+       if (IS_ERR(drvdata->csdev))
+               return PTR_ERR(drvdata->csdev);
+
+       dev_info(dev, "TPIU initialized\n");
+       return 0;
+}
+
+static int tpiu_remove(struct amba_device *adev)
+{
+       struct tpiu_drvdata *drvdata = amba_get_drvdata(adev);
+
+       coresight_unregister(drvdata->csdev);
+       return 0;
+}
+
+static struct amba_id tpiu_ids[] = {
+       {
+               .id     = 0x0003b912,
+               .mask   = 0x0003ffff,
+       },
+       { 0, 0},
+};
+
+static struct amba_driver tpiu_driver = {
+       .drv = {
+               .name   = "coresight-tpiu",
+               .owner  = THIS_MODULE,
+       },
+       .probe          = tpiu_probe,
+       .remove         = tpiu_remove,
+       .id_table       = tpiu_ids,
+};
+
+static int __init tpiu_init(void)
+{
+       return amba_driver_register(&tpiu_driver);
+}
+module_init(tpiu_init);
+
+static void __exit tpiu_exit(void)
+{
+       amba_driver_unregister(&tpiu_driver);
+}
+module_exit(tpiu_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver");
diff --git a/drivers/coresight/coresight.c b/drivers/coresight/coresight.c
new file mode 100644 (file)
index 0000000..6e0181f
--- /dev/null
@@ -0,0 +1,717 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/clk.h>
+#include <linux/coresight.h>
+#include <linux/of_platform.h>
+#include <linux/delay.h>
+
+#include "coresight-priv.h"
+
+static DEFINE_MUTEX(coresight_mutex);
+
+static int coresight_id_match(struct device *dev, void *data)
+{
+       int trace_id, i_trace_id;
+       struct coresight_device *csdev, *i_csdev;
+
+       csdev = data;
+       i_csdev = to_coresight_device(dev);
+
+       /*
+        * No need to care about oneself and components that are not
+        * sources or not enabled
+        */
+       if (i_csdev == csdev || !i_csdev->enable ||
+           i_csdev->type != CORESIGHT_DEV_TYPE_SOURCE)
+               return 0;
+
+       /* Get the source ID for both compoment */
+       trace_id = source_ops(csdev)->trace_id(csdev);
+       i_trace_id = source_ops(i_csdev)->trace_id(i_csdev);
+
+       /* All you need is one */
+       if (trace_id == i_trace_id)
+               return 1;
+
+       return 0;
+}
+
+static int coresight_source_is_unique(struct coresight_device *csdev)
+{
+       int trace_id = source_ops(csdev)->trace_id(csdev);
+
+       /* this shouldn't happen */
+       if (trace_id < 0)
+               return 0;
+
+       return !bus_for_each_dev(&coresight_bustype, NULL,
+                                csdev, coresight_id_match);
+}
+
+static int coresight_find_link_inport(struct coresight_device *csdev)
+{
+       int i;
+       struct coresight_device *parent;
+       struct coresight_connection *conn;
+
+       parent = container_of(csdev->path_link.next,
+                             struct coresight_device, path_link);
+
+       for (i = 0; i < parent->nr_outport; i++) {
+               conn = &parent->conns[i];
+               if (conn->child_dev == csdev)
+                       return conn->child_port;
+       }
+
+       dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n",
+               dev_name(&parent->dev), dev_name(&csdev->dev));
+
+       return 0;
+}
+
+static int coresight_find_link_outport(struct coresight_device *csdev)
+{
+       int i;
+       struct coresight_device *child;
+       struct coresight_connection *conn;
+
+       child = container_of(csdev->path_link.prev,
+                            struct coresight_device, path_link);
+
+       for (i = 0; i < csdev->nr_outport; i++) {
+               conn = &csdev->conns[i];
+               if (conn->child_dev == child)
+                       return conn->outport;
+       }
+
+       dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n",
+               dev_name(&csdev->dev), dev_name(&child->dev));
+
+       return 0;
+}
+
+static int coresight_enable_sink(struct coresight_device *csdev)
+{
+       int ret;
+
+       if (!csdev->enable) {
+               if (sink_ops(csdev)->enable) {
+                       ret = sink_ops(csdev)->enable(csdev);
+                       if (ret)
+                               return ret;
+               }
+               csdev->enable = true;
+       }
+
+       atomic_inc(csdev->refcnt);
+
+       return 0;
+}
+
+static void coresight_disable_sink(struct coresight_device *csdev)
+{
+       if (atomic_dec_return(csdev->refcnt) == 0) {
+               if (sink_ops(csdev)->disable) {
+                       sink_ops(csdev)->disable(csdev);
+                       csdev->enable = false;
+               }
+       }
+}
+
+static int coresight_enable_link(struct coresight_device *csdev)
+{
+       int ret;
+       int link_subtype;
+       int refport, inport, outport;
+
+       inport = coresight_find_link_inport(csdev);
+       outport = coresight_find_link_outport(csdev);
+       link_subtype = csdev->subtype.link_subtype;
+
+       if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+               refport = inport;
+       else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+               refport = outport;
+       else
+               refport = 0;
+
+       if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
+               if (link_ops(csdev)->enable) {
+                       ret = link_ops(csdev)->enable(csdev, inport, outport);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       csdev->enable = true;
+
+       return 0;
+}
+
+static void coresight_disable_link(struct coresight_device *csdev)
+{
+       int i, nr_conns;
+       int link_subtype;
+       int refport, inport, outport;
+
+       inport = coresight_find_link_inport(csdev);
+       outport = coresight_find_link_outport(csdev);
+       link_subtype = csdev->subtype.link_subtype;
+
+       if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
+               refport = inport;
+               nr_conns = csdev->nr_inport;
+       } else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) {
+               refport = outport;
+               nr_conns = csdev->nr_outport;
+       } else {
+               refport = 0;
+               nr_conns = 1;
+       }
+
+       if (atomic_dec_return(&csdev->refcnt[refport]) == 0) {
+               if (link_ops(csdev)->disable)
+                       link_ops(csdev)->disable(csdev, inport, outport);
+       }
+
+       for (i = 0; i < nr_conns; i++)
+               if (atomic_read(&csdev->refcnt[i]) != 0)
+                       return;
+
+       csdev->enable = false;
+}
+
+static int coresight_enable_source(struct coresight_device *csdev)
+{
+       int ret;
+
+       if (!coresight_source_is_unique(csdev)) {
+               dev_warn(&csdev->dev, "traceID %d not unique\n",
+                        source_ops(csdev)->trace_id(csdev));
+               return -EINVAL;
+       }
+
+       if (!csdev->enable) {
+               if (source_ops(csdev)->enable) {
+                       ret = source_ops(csdev)->enable(csdev);
+                       if (ret)
+                               return ret;
+               }
+               csdev->enable = true;
+       }
+
+       atomic_inc(csdev->refcnt);
+
+       return 0;
+}
+
+static void coresight_disable_source(struct coresight_device *csdev)
+{
+       if (atomic_dec_return(csdev->refcnt) == 0) {
+               if (source_ops(csdev)->disable) {
+                       source_ops(csdev)->disable(csdev);
+                       csdev->enable = false;
+               }
+       }
+}
+
+static int coresight_enable_path(struct list_head *path)
+{
+       int ret = 0;
+       struct coresight_device *cd;
+
+       list_for_each_entry(cd, path, path_link) {
+               if (cd == list_first_entry(path, struct coresight_device,
+                                          path_link)) {
+                       ret = coresight_enable_sink(cd);
+               } else if (list_is_last(&cd->path_link, path)) {
+                       /*
+                        * Don't enable the source just yet - this needs to
+                        * happen at the very end when all links and sink
+                        * along the path have been configured properly.
+                        */
+                       ;
+               } else {
+                       ret = coresight_enable_link(cd);
+               }
+               if (ret)
+                       goto err;
+       }
+
+       return 0;
+err:
+       list_for_each_entry_continue_reverse(cd, path, path_link) {
+               if (cd == list_first_entry(path, struct coresight_device,
+                                          path_link)) {
+                       coresight_disable_sink(cd);
+               } else if (list_is_last(&cd->path_link, path)) {
+                       ;
+               } else {
+                       coresight_disable_link(cd);
+               }
+       }
+
+       return ret;
+}
+
+static int coresight_disable_path(struct list_head *path)
+{
+       struct coresight_device *cd;
+
+       list_for_each_entry_reverse(cd, path, path_link) {
+               if (cd == list_first_entry(path, struct coresight_device,
+                                          path_link)) {
+                       coresight_disable_sink(cd);
+               } else if (list_is_last(&cd->path_link, path)) {
+                       /*
+                        * The source has already been stopped, no need
+                        * to do it again here.
+                        */
+                       ;
+               } else {
+                       coresight_disable_link(cd);
+               }
+       }
+
+       return 0;
+}
+
+static int coresight_build_paths(struct coresight_device *csdev,
+                                struct list_head *path,
+                                bool enable)
+{
+       int i, ret = -EINVAL;
+       struct coresight_connection *conn;
+
+       list_add(&csdev->path_link, path);
+
+       if (csdev->type == CORESIGHT_DEV_TYPE_SINK && csdev->activated) {
+               if (enable)
+                       ret = coresight_enable_path(path);
+               else
+                       ret = coresight_disable_path(path);
+       } else {
+               for (i = 0; i < csdev->nr_outport; i++) {
+                       conn = &csdev->conns[i];
+                       if (coresight_build_paths(conn->child_dev,
+                                                   path, enable) == 0)
+                               ret = 0;
+               }
+       }
+
+       if (list_first_entry(path, struct coresight_device, path_link) != csdev)
+               dev_err(&csdev->dev, "wrong device in %s\n", __func__);
+
+       list_del(&csdev->path_link);
+
+       return ret;
+}
+
+int coresight_enable(struct coresight_device *csdev)
+{
+       int ret = 0;
+       LIST_HEAD(path);
+
+       mutex_lock(&coresight_mutex);
+       if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
+               ret = -EINVAL;
+               dev_err(&csdev->dev, "wrong device type in %s\n", __func__);
+               goto out;
+       }
+       if (csdev->enable)
+               goto out;
+
+       if (coresight_build_paths(csdev, &path, true)) {
+               dev_err(&csdev->dev, "building path(s) failed\n");
+               goto out;
+       }
+
+       if (coresight_enable_source(csdev))
+               dev_err(&csdev->dev, "source enable failed\n");
+out:
+       mutex_unlock(&coresight_mutex);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(coresight_enable);
+
+void coresight_disable(struct coresight_device *csdev)
+{
+       LIST_HEAD(path);
+
+       mutex_lock(&coresight_mutex);
+       if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
+               dev_err(&csdev->dev, "wrong device type in %s\n", __func__);
+               goto out;
+       }
+       if (!csdev->enable)
+               goto out;
+
+       coresight_disable_source(csdev);
+       if (coresight_build_paths(csdev, &path, false))
+               dev_err(&csdev->dev, "releasing path(s) failed\n");
+
+out:
+       mutex_unlock(&coresight_mutex);
+}
+EXPORT_SYMBOL_GPL(coresight_disable);
+
+static ssize_t enable_sink_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct coresight_device *csdev = to_coresight_device(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->activated);
+}
+
+static ssize_t enable_sink_store(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t size)
+{
+       int ret;
+       unsigned long val;
+       struct coresight_device *csdev = to_coresight_device(dev);
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       if (val)
+               csdev->activated = true;
+       else
+               csdev->activated = false;
+
+       return size;
+
+}
+static DEVICE_ATTR_RW(enable_sink);
+
+static ssize_t enable_source_show(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct coresight_device *csdev = to_coresight_device(dev);
+
+       return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable);
+}
+
+static ssize_t enable_source_store(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t size)
+{
+       int ret = 0;
+       unsigned long val;
+       struct coresight_device *csdev = to_coresight_device(dev);
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret)
+               return ret;
+
+       if (val) {
+               ret = coresight_enable(csdev);
+               if (ret)
+                       return ret;
+       } else {
+               coresight_disable(csdev);
+       }
+
+       return size;
+}
+static DEVICE_ATTR_RW(enable_source);
+
+static struct attribute *coresight_sink_attrs[] = {
+       &dev_attr_enable_sink.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(coresight_sink);
+
+static struct attribute *coresight_source_attrs[] = {
+       &dev_attr_enable_source.attr,
+       NULL,
+};
+ATTRIBUTE_GROUPS(coresight_source);
+
+static struct device_type coresight_dev_type[] = {
+       {
+               .name = "none",
+       },
+       {
+               .name = "sink",
+               .groups = coresight_sink_groups,
+       },
+       {
+               .name = "link",
+       },
+       {
+               .name = "linksink",
+               .groups = coresight_sink_groups,
+       },
+       {
+               .name = "source",
+               .groups = coresight_source_groups,
+       },
+};
+
+static void coresight_device_release(struct device *dev)
+{
+       struct coresight_device *csdev = to_coresight_device(dev);
+
+       kfree(csdev);
+}
+
+static int coresight_orphan_match(struct device *dev, void *data)
+{
+       int i;
+       bool still_orphan = false;
+       struct coresight_device *csdev, *i_csdev;
+       struct coresight_connection *conn;
+
+       csdev = data;
+       i_csdev = to_coresight_device(dev);
+
+       /* No need to check oneself */
+       if (csdev == i_csdev)
+               return 0;
+
+       /* Move on to another component if no connection is orphan */
+       if (!i_csdev->orphan)
+               return 0;
+       /*
+        * Circle throuch all the connection of that component.  If we find
+        * an orphan connection whose name matches @csdev, link it.
+        */
+       for (i = 0; i < i_csdev->nr_outport; i++)       {
+               conn = &i_csdev->conns[i];
+
+               /* We have found at least one orphan connection */
+               if (conn->child_dev == NULL) {
+                       /* Does it match this newly added device? */
+                       if (!strcmp(dev_name(&csdev->dev), conn->child_name))
+                               conn->child_dev = csdev;
+               } else {
+                       /* Too bad, this component still has an orphan */
+                       still_orphan = true;
+               }
+       }
+
+       i_csdev->orphan = still_orphan;
+
+       /*
+        * Returning '0' ensures that all known component on the
+        * bus will be checked.
+        */
+       return 0;
+}
+
+static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
+{
+       /*
+        * No need to check for a return value as orphan connection(s)
+        * are hooked-up with each newly added component.
+        */
+       bus_for_each_dev(&coresight_bustype, NULL,
+                                csdev, coresight_orphan_match);
+}
+
+
+static int coresight_name_match(struct device *dev, void *data)
+{
+       char *to_match;
+       struct coresight_device *i_csdev;
+
+       to_match = data;
+       i_csdev = to_coresight_device(dev);
+
+       if (!strcmp(to_match, dev_name(&i_csdev->dev)))
+               return 1;
+
+       return 0;
+}
+
+static void coresight_fixup_device_conns(struct coresight_device *csdev)
+{
+       int i;
+       struct device *dev = NULL;
+       struct coresight_connection *conn;
+
+       for (i = 0; i < csdev->nr_outport; i++) {
+               conn = &csdev->conns[i];
+               dev = bus_find_device(&coresight_bustype, NULL,
+                                     (void *)conn->child_name,
+                                     coresight_name_match);
+
+               if (dev) {
+                       conn->child_dev = to_coresight_device(dev);
+               } else {
+                       csdev->orphan = true;
+                       conn->child_dev = NULL;
+               }
+       }
+}
+
+/**
+ * coresight_timeout - loop until a bit has changed to a specific state.
+ * @addr: base address of the area of interest.
+ * @offset: address of a register, starting from @addr.
+ * @position: the position of the bit of interest.
+ * @value: the value the bit should have.
+ *
+ * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
+ * TIMEOUT_US has elapsed, which ever happens first.
+ */
+
+int coresight_timeout(void __iomem *addr, u32 offset, int position, int value)
+{
+       int i;
+       u32 val;
+
+       for (i = TIMEOUT_US; i > 0; i--) {
+               val = __raw_readl(addr + offset);
+               /* waiting on the bit to go from 0 to 1 */
+               if (value) {
+                       if (val & BIT(position))
+                               return 0;
+               /* waiting on the bit to go from 1 to 0 */
+               } else {
+                       if (!(val & BIT(position)))
+                               return 0;
+               }
+
+               /*
+                * Delay is arbitrary - the specification doesn't say how long
+                * we are expected to wait.  Extra check required to make sure
+                * we don't wait needlessly on the last iteration.
+                */
+               if (i - 1)
+                       udelay(1);
+       }
+
+       return -EAGAIN;
+}
+
+struct bus_type coresight_bustype = {
+       .name   = "coresight",
+};
+
+static int __init coresight_init(void)
+{
+       return bus_register(&coresight_bustype);
+}
+postcore_initcall(coresight_init);
+
+struct coresight_device *coresight_register(struct coresight_desc *desc)
+{
+       int i;
+       int ret;
+       int link_subtype;
+       int nr_refcnts = 1;
+       atomic_t *refcnts = NULL;
+       struct coresight_device *csdev;
+       struct coresight_connection *conns;
+
+       csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
+       if (!csdev) {
+               ret = -ENOMEM;
+               goto err_kzalloc_csdev;
+       }
+
+       if (desc->type == CORESIGHT_DEV_TYPE_LINK ||
+           desc->type == CORESIGHT_DEV_TYPE_LINKSINK) {
+               link_subtype = desc->subtype.link_subtype;
+
+               if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
+                       nr_refcnts = desc->pdata->nr_inport;
+               else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
+                       nr_refcnts = desc->pdata->nr_outport;
+       }
+
+       refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
+       if (!refcnts) {
+               ret = -ENOMEM;
+               goto err_kzalloc_refcnts;
+       }
+
+       csdev->refcnt = refcnts;
+
+       csdev->nr_inport = desc->pdata->nr_inport;
+       csdev->nr_outport = desc->pdata->nr_outport;
+       conns = kcalloc(csdev->nr_outport, sizeof(*conns), GFP_KERNEL);
+       if (!conns) {
+               ret = -ENOMEM;
+               goto err_kzalloc_conns;
+       }
+
+       for (i = 0; i < csdev->nr_outport; i++) {
+               conns[i].outport = desc->pdata->outports[i];
+               conns[i].child_name = desc->pdata->child_names[i];
+               conns[i].child_port = desc->pdata->child_ports[i];
+       }
+
+       csdev->conns = conns;
+
+       csdev->type = desc->type;
+       csdev->subtype = desc->subtype;
+       csdev->ops = desc->ops;
+       csdev->orphan = false;
+
+       csdev->dev.type = &coresight_dev_type[desc->type];
+       csdev->dev.groups = desc->groups;
+       csdev->dev.parent = desc->dev;
+       csdev->dev.release = coresight_device_release;
+       csdev->dev.bus = &coresight_bustype;
+       dev_set_name(&csdev->dev, "%s", desc->pdata->name);
+
+       ret = device_register(&csdev->dev);
+       if (ret)
+               goto err_device_register;
+
+       mutex_lock(&coresight_mutex);
+
+       coresight_fixup_device_conns(csdev);
+       coresight_fixup_orphan_conns(csdev);
+
+       mutex_unlock(&coresight_mutex);
+
+       return csdev;
+
+err_device_register:
+       kfree(conns);
+err_kzalloc_conns:
+       kfree(refcnts);
+err_kzalloc_refcnts:
+       kfree(csdev);
+err_kzalloc_csdev:
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(coresight_register);
+
+void coresight_unregister(struct coresight_device *csdev)
+{
+       mutex_lock(&coresight_mutex);
+
+       kfree(csdev->conns);
+       device_unregister(&csdev->dev);
+
+       mutex_unlock(&coresight_mutex);
+}
+EXPORT_SYMBOL_GPL(coresight_unregister);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/coresight/of_coresight.c b/drivers/coresight/of_coresight.c
new file mode 100644 (file)
index 0000000..5030c07
--- /dev/null
@@ -0,0 +1,204 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/amba/bus.h>
+#include <linux/coresight.h>
+#include <asm/smp_plat.h>
+
+
+static int of_dev_node_match(struct device *dev, void *data)
+{
+       return dev->of_node == data;
+}
+
+static struct device *
+of_coresight_get_endpoint_device(struct device_node *endpoint)
+{
+       struct device *dev = NULL;
+
+       /*
+        * If we have a non-configuable replicator, it will be found on the
+        * platform bus.
+        */
+       dev = bus_find_device(&platform_bus_type, NULL,
+                             endpoint, of_dev_node_match);
+       if (dev)
+               return dev;
+
+       /*
+        * We have a configurable component - circle through the AMBA bus
+        * looking for the device that matches the endpoint node.
+        */
+       return bus_find_device(&amba_bustype, NULL,
+                              endpoint, of_dev_node_match);
+}
+
+static struct device_node *of_get_coresight_endpoint(
+               const struct device_node *parent, struct device_node *prev)
+{
+       struct device_node *node = of_graph_get_next_endpoint(parent, prev);
+
+       of_node_put(prev);
+       return node;
+}
+
+static void of_coresight_get_ports(struct device_node *node,
+                                  int *nr_inport, int *nr_outport)
+{
+       struct device_node *ep = NULL;
+       int in = 0, out = 0;
+
+       do {
+               ep = of_get_coresight_endpoint(node, ep);
+               if (!ep)
+                       break;
+
+               if (of_property_read_bool(ep, "slave-mode"))
+                       in++;
+               else
+                       out++;
+
+       } while (ep);
+
+       *nr_inport = in;
+       *nr_outport = out;
+}
+
+static int of_coresight_alloc_memory(struct device *dev,
+                       struct coresight_platform_data *pdata)
+{
+       /* List of output port on this component */
+       pdata->outports = devm_kzalloc(dev, pdata->nr_outport *
+                                      sizeof(*pdata->outports),
+                                      GFP_KERNEL);
+       if (!pdata->outports)
+               return -ENOMEM;
+
+       /* Children connected to this component via @outport */
+        pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
+                                         sizeof(*pdata->child_names),
+                                         GFP_KERNEL);
+       if (!pdata->child_names)
+               return -ENOMEM;
+
+       /* Port number on the child this component is connected to */
+       pdata->child_ports = devm_kzalloc(dev, pdata->nr_outport *
+                                         sizeof(*pdata->child_ports),
+                                         GFP_KERNEL);
+       if (!pdata->child_ports)
+               return -ENOMEM;
+
+       return 0;
+}
+
+struct coresight_platform_data *of_get_coresight_platform_data(
+                               struct device *dev, struct device_node *node)
+{
+       int i = 0, ret = 0;
+       struct coresight_platform_data *pdata;
+       struct of_endpoint endpoint, rendpoint;
+       struct device *rdev;
+       struct device_node *cpu;
+       struct device_node *ep = NULL;
+       struct device_node *rparent = NULL;
+       struct device_node *rport = NULL;
+
+       pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               return ERR_PTR(-ENOMEM);
+
+       /* Use device name as debugfs handle */
+       pdata->name = dev_name(dev);
+
+       /* Get the number of input and output port for this component */
+       of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);
+
+       if (pdata->nr_outport) {
+               ret = of_coresight_alloc_memory(dev, pdata);
+               if (ret)
+                       return ERR_PTR(ret);
+
+               /* Iterate through each port to discover topology */
+               do {
+                       /* Get a handle on a port */
+                       ep = of_get_coresight_endpoint(node, ep);
+                       if (!ep)
+                               break;
+
+                       /*
+                        * No need to deal with input ports, processing for as
+                        * processing for output ports will deal with them.
+                        */
+                       if (of_find_property(ep, "slave-mode", NULL))
+                               continue;
+
+                       /* Get a handle on the local endpoint */
+                       ret = of_graph_parse_endpoint(ep, &endpoint);
+
+                       if (ret)
+                               continue;
+
+                       /* The local out port number */
+                       pdata->outports[i] = endpoint.id;
+
+                       /*
+                        * Get a handle on the remote port and parent
+                        * attached to it.
+                        */
+                       rparent = of_graph_get_remote_port_parent(ep);
+                       rport = of_graph_get_remote_port(ep);
+
+                       if (!rparent || !rport)
+                               continue;
+
+                       if (of_graph_parse_endpoint(rport, &rendpoint))
+                               continue;
+
+                       rdev = of_coresight_get_endpoint_device(rparent);
+                       if (!dev)
+                               continue;
+
+                       pdata->child_names[i] = dev_name(rdev);
+                       pdata->child_ports[i] = rendpoint.id;
+
+                       i++;
+               } while (ep);
+       }
+
+       /* Affinity defaults to CPU0 */
+       pdata->cpu = 0;
+       cpu = of_parse_phandle(node, "cpu", 0);
+       if (cpu) {
+               const u32 *mpidr;
+               int len, index;
+
+               mpidr = of_get_property(cpu, "reg", &len);
+               if (mpidr && len == 4) {
+                       index = get_logical_index(be32_to_cpup(mpidr));
+                       if (index != -EINVAL)
+                               pdata->cpu = index;
+               }
+       }
+
+       return pdata;
+}
+EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
index 1956eea7df166313a719a23abb3dfe9e07e59848..f77778e9e463eabe8b7e0d3e69ffac281674b9f1 100644 (file)
@@ -117,6 +117,7 @@ struct cpufreq_interactive_tunables {
        /* End time of touchboost pulse in ktime converted to usecs */
        u64 touchboostpulse_endtime;
 #endif
+       bool boosted;
        /*
         * Max additional time to wait in idle, beyond timer_rate, at speeds
         * above minimum before wakeup to reduce speed, or -1 if unnecessary.
@@ -356,7 +357,6 @@ static void cpufreq_interactive_timer(unsigned long data)
        unsigned int loadadjfreq;
        unsigned int index;
        unsigned long flags;
-       bool boosted;
 
        if (!down_read_trylock(&pcpu->enable_sem))
                return;
@@ -376,14 +376,14 @@ static void cpufreq_interactive_timer(unsigned long data)
        do_div(cputime_speedadj, delta_time);
        loadadjfreq = (unsigned int)cputime_speedadj * 100;
        cpu_load = loadadjfreq / pcpu->target_freq;
-       boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
+       tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
 
 #ifdef CONFIG_ARCH_ROCKCHIP
        pcpu->target_freq = pcpu->policy->cur;
-       boosted |= now < tunables->touchboostpulse_endtime;
+       tunables->boosted |= now < tunables->touchboostpulse_endtime;
 #endif
 
-       if (cpu_load >= tunables->go_hispeed_load || boosted) {
+       if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
 #ifdef CONFIG_ARCH_ROCKCHIP
                if (now < tunables->touchboostpulse_endtime) {
                        new_freq = choose_freq(pcpu, loadadjfreq);
@@ -451,7 +451,7 @@ static void cpufreq_interactive_timer(unsigned long data)
         * (or the indefinite boost is turned off).
         */
 
-       if (!boosted || new_freq > tunables->hispeed_freq) {
+       if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
                pcpu->floor_freq = new_freq;
                pcpu->floor_validate_time = now;
        }
@@ -609,19 +609,21 @@ static int cpufreq_interactive_speedchange_task(void *data)
        return 0;
 }
 
-static void cpufreq_interactive_boost(void)
+static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
 {
        int i;
        int anyboost = 0;
        unsigned long flags[2];
        struct cpufreq_interactive_cpuinfo *pcpu;
-       struct cpufreq_interactive_tunables *tunables;
+
+       tunables->boosted = true;
 
        spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
 
        for_each_online_cpu(i) {
                pcpu = &per_cpu(cpuinfo, i);
-               tunables = pcpu->policy->governor_data;
+               if (tunables != pcpu->policy->governor_data)
+                       continue;
 
                spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
                if (pcpu->target_freq < tunables->hispeed_freq) {
@@ -934,7 +936,8 @@ static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
 
        if (tunables->boost_val) {
                trace_cpufreq_interactive_boost("on");
-               cpufreq_interactive_boost();
+               if (!tunables->boosted)
+                       cpufreq_interactive_boost(tunables);
        } else {
                tunables->boostpulse_endtime = ktime_to_us(ktime_get());
                trace_cpufreq_interactive_unboost("off");
@@ -956,7 +959,8 @@ static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
        tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
                tunables->boostpulse_duration_val;
        trace_cpufreq_interactive_boost("pulse");
-       cpufreq_interactive_boost();
+       if (!tunables->boosted)
+               cpufreq_interactive_boost(tunables);
        return count;
 }
 
index 842d7ba83101b224fc62bae095a8065307f00112..9625ce7ed5f8dc3c2a5bf46cc70a39b2a88fedd8 100644 (file)
@@ -31,26 +31,20 @@ config CPU_IDLE_GOV_MENU
 config ARCH_NEEDS_CPU_IDLE_COUPLED
        def_bool n
 
-config OF_IDLE_STATES
-        bool "Idle states DT support"
-       depends on ARM || ARM64
-       default n
-       help
-        Allows the CPU idle framework to initialize CPU idle drivers
-        state data by using DT provided nodes compliant with idle states
-        device tree bindings.
+config DT_IDLE_STATES
+       bool
 
 if CPU_IDLE
 
+menu "ARM64 CPU Idle Drivers"
+depends on ARM64
+source "drivers/cpuidle/Kconfig.arm64"
+endmenu
+
 config CPU_IDLE_CALXEDA
        bool "CPU Idle Driver for Calxeda processors"
        depends on ARCH_HIGHBANK
        help
          Select this to enable cpuidle on Calxeda processors.
 
-menu "ARM64 CPU Idle Drivers"
-depends on ARM64
-source "drivers/cpuidle/Kconfig.arm64"
-endmenu
-
 endif
index b83612c67e6de58cc5e10937c1a0445818ed839b..d0a08ed1b2ee62823e3f457a1ebf4905a41332f4 100644 (file)
@@ -4,10 +4,11 @@
 
 config ARM64_CPUIDLE
        bool "Generic ARM64 CPU idle Driver"
-       select OF_IDLE_STATES
+       select ARM64_CPU_SUSPEND
+       select DT_IDLE_STATES
        help
-         Select this to enable generic cpuidle driver for ARM v8.
+         Select this to enable generic cpuidle driver for ARM64.
          It provides a generic idle driver whose idle states are configured
          at run-time through DT nodes. The CPUidle suspend backend is
-         initialized by the device tree parsing code on matching the entry
-         method to the respective CPU operations.
+         initialized by calling the CPU operations init idle hook
+         provided by architecture code.
index 2d97bcfecd00cfa02f4239960abb5f37e5a6639c..0bd32cd03f0af843d2917dbce105419a7cdae0eb 100644 (file)
@@ -5,7 +5,7 @@
 obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
 obj-$(CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED) += coupled.o
 obj-$(CONFIG_BIG_LITTLE) += arm_big_little.o
-obj-$(CONFIG_OF_IDLE_STATES)             += of_idle_states.o
+obj-$(CONFIG_DT_IDLE_STATES)             += dt_idle_states.o
 
 obj-$(CONFIG_CPU_IDLE_CALXEDA) += cpuidle-calxeda.o
 obj-$(CONFIG_ARCH_KIRKWOOD) += cpuidle-kirkwood.o
index 2cfde6ce3086389e2b3091b3e4b70257b9ec8592..50997ea942fce58975db9f7d933183671f6e740f 100644 (file)
@@ -9,6 +9,8 @@
  * published by the Free Software Foundation.
  */
 
+#define pr_fmt(fmt) "CPUidle arm64: " fmt
+
 #include <linux/cpuidle.h>
 #include <linux/cpumask.h>
 #include <linux/cpu_pm.h>
 #include <linux/module.h>
 #include <linux/of.h>
 
-#include <asm/psci.h>
+#include <asm/cpuidle.h>
 #include <asm/suspend.h>
 
-#include "of_idle_states.h"
-
-typedef int (*suspend_init_fn)(struct cpuidle_driver *,
-                              struct device_node *[]);
-
-struct cpu_suspend_ops {
-       const char *id;
-       suspend_init_fn init_fn;
-};
-
-static const struct cpu_suspend_ops suspend_operations[] __initconst = {
-       {"arm,psci", psci_dt_register_idle_states},
-       {}
-};
-
-static __init const struct cpu_suspend_ops *get_suspend_ops(const char *str)
-{
-       int i;
-
-       if (!str)
-               return NULL;
-
-       for (i = 0; suspend_operations[i].id; i++)
-               if (!strcmp(suspend_operations[i].id, str))
-                       return &suspend_operations[i];
-
-       return NULL;
-}
+#include "dt_idle_states.h"
 
 /*
- * arm_enter_idle_state - Programs CPU to enter the specified state
+ * arm64_enter_idle_state - Programs CPU to enter the specified state
  *
- * @dev: cpuidle device
- * @drv: cpuidle driver
- * @idx: state index
+ * dev: cpuidle device
+ * drv: cpuidle driver
+ * idx: state index
  *
  * Called from the CPUidle framework to program the device to the
  * specified target state selected by the governor.
  */
-static int arm_enter_idle_state(struct cpuidle_device *dev,
-                               struct cpuidle_driver *drv, int idx)
+static int arm64_enter_idle_state(struct cpuidle_device *dev,
+                                 struct cpuidle_driver *drv, int idx)
 {
        int ret;
 
@@ -68,30 +43,47 @@ static int arm_enter_idle_state(struct cpuidle_device *dev,
                return idx;
        }
 
-       cpu_pm_enter();
-       /*
-        * Pass idle state index to cpu_suspend which in turn will call
-        * the CPU ops suspend protocol with idle index as a parameter.
-        *
-        * Some states would not require context to be saved and flushed
-        * to DRAM, so calling cpu_suspend would not be stricly necessary.
-        * When power domains specifications for ARM CPUs are finalized then
-        * this code can be optimized to prevent saving registers if not
-        * needed.
-        */
-       ret = cpu_suspend(idx);
+       ret = cpu_pm_enter();
+       if (!ret) {
+               /*
+                * Pass idle state index to cpu_suspend which in turn will
+                * call the CPU ops suspend protocol with idle index as a
+                * parameter.
+                */
+               ret = cpu_suspend(idx);
 
-       cpu_pm_exit();
+               cpu_pm_exit();
+       }
 
        return ret ? -1 : idx;
 }
 
-struct cpuidle_driver arm64_idle_driver = {
+static struct cpuidle_driver arm64_idle_driver = {
        .name = "arm64_idle",
        .owner = THIS_MODULE,
+       /*
+        * State at index 0 is standby wfi and considered standard
+        * on all ARM platforms. If in some platforms simple wfi
+        * can't be used as "state 0", DT bindings must be implemented
+        * to work around this issue and allow installing a special
+        * handler for idle state index 0.
+        */
+       .states[0] = {
+               .enter                  = arm64_enter_idle_state,
+               .exit_latency           = 1,
+               .target_residency       = 1,
+               .power_usage            = UINT_MAX,
+               .flags                  = CPUIDLE_FLAG_TIME_VALID,
+               .name                   = "WFI",
+               .desc                   = "ARM64 WFI",
+       }
 };
 
-static struct device_node *state_nodes[CPUIDLE_STATE_MAX] __initdata;
+static const struct of_device_id arm64_idle_state_match[] __initconst = {
+       { .compatible = "arm,idle-state",
+         .data = arm64_enter_idle_state },
+       { },
+};
 
 /*
  * arm64_idle_init
@@ -102,58 +94,40 @@ static struct device_node *state_nodes[CPUIDLE_STATE_MAX] __initdata;
  */
 static int __init arm64_idle_init(void)
 {
-       int i, ret;
-       const char *entry_method;
-       struct device_node *idle_states_node;
-       const struct cpu_suspend_ops *suspend_init;
+       int cpu, ret;
        struct cpuidle_driver *drv = &arm64_idle_driver;
 
-       idle_states_node = of_find_node_by_path("/cpus/idle-states");
-       if (!idle_states_node)
-               return -ENOENT;
-
-       if (of_property_read_string(idle_states_node, "entry-method",
-                                   &entry_method)) {
-               pr_warn(" * %s missing entry-method property\n",
-                           idle_states_node->full_name);
-               of_node_put(idle_states_node);
-               return -EOPNOTSUPP;
-       }
-
-       suspend_init = get_suspend_ops(entry_method);
-       if (!suspend_init) {
-               pr_warn("Missing suspend initializer\n");
-               of_node_put(idle_states_node);
-               return -EOPNOTSUPP;
-       }
-
        /*
-        * State at index 0 is standby wfi and considered standard
-        * on all ARM platforms. If in some platforms simple wfi
-        * can't be used as "state 0", DT bindings must be implemented
-        * to work around this issue and allow installing a special
-        * handler for idle state index 0.
+        * Initialize idle states data, starting at index 1.
+        * This driver is DT only, if no DT idle states are detected (ret == 0)
+        * let the driver initialization fail accordingly since there is no
+        * reason to initialize the idle driver if only wfi is supported.
         */
-       drv->states[0].exit_latency = 1;
-       drv->states[0].target_residency = 1;
-       drv->states[0].flags = CPUIDLE_FLAG_TIME_VALID;
-       strncpy(drv->states[0].name, "ARM WFI", CPUIDLE_NAME_LEN);
-       strncpy(drv->states[0].desc, "ARM WFI", CPUIDLE_DESC_LEN);
+       ret = dt_init_idle_driver(drv, arm64_idle_state_match, 1);
+       if (ret <= 0) {
+               if (ret)
+                       pr_err("failed to initialize idle states\n");
+               return ret ? : -ENODEV;
+       }
 
-       drv->cpumask = (struct cpumask *) cpu_possible_mask;
        /*
-        * Start at index 1, request idle state nodes to be filled
+        * Call arch CPU operations in order to initialize
+        * idle states suspend back-end specific data
         */
-       ret = of_init_idle_driver(drv, state_nodes, 1, true);
-       if (ret)
-               return ret;
-
-       if (suspend_init->init_fn(drv, state_nodes))
-               return -EOPNOTSUPP;
+       for_each_possible_cpu(cpu) {
+               ret = cpu_init_idle(cpu);
+               if (ret) {
+                       pr_err("CPU %d failed to init idle CPU ops\n", cpu);
+                       return ret;
+               }
+       }
 
-       for (i = 0; i < drv->state_count; i++)
-               drv->states[i].enter = arm_enter_idle_state;
+       ret = cpuidle_register(drv, NULL);
+       if (ret) {
+               pr_err("failed to register cpuidle driver\n");
+               return ret;
+       }
 
-       return cpuidle_register(drv, NULL);
+       return 0;
 }
 device_initcall(arm64_idle_init);
diff --git a/drivers/cpuidle/dt_idle_states.c b/drivers/cpuidle/dt_idle_states.c
new file mode 100644 (file)
index 0000000..52f4d11
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+ * DT idle states parsing code.
+ *
+ * Copyright (C) 2014 ARM Ltd.
+ * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "DT idle-states: " fmt
+
+#include <linux/cpuidle.h>
+#include <linux/cpumask.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "dt_idle_states.h"
+
+static int init_state_node(struct cpuidle_state *idle_state,
+                          const struct of_device_id *matches,
+                          struct device_node *state_node)
+{
+       int err;
+       const struct of_device_id *match_id;
+
+       match_id = of_match_node(matches, state_node);
+       if (!match_id)
+               return -ENODEV;
+       /*
+        * CPUidle drivers are expected to initialize the const void *data
+        * pointer of the passed in struct of_device_id array to the idle
+        * state enter function.
+        */
+       idle_state->enter = match_id->data;
+
+       err = of_property_read_u32(state_node, "wakeup-latency-us",
+                                  &idle_state->exit_latency);
+       if (err) {
+               u32 entry_latency, exit_latency;
+
+               err = of_property_read_u32(state_node, "entry-latency-us",
+                                          &entry_latency);
+               if (err) {
+                       pr_debug(" * %s missing entry-latency-us property\n",
+                                state_node->full_name);
+                       return -EINVAL;
+               }
+
+               err = of_property_read_u32(state_node, "exit-latency-us",
+                                          &exit_latency);
+               if (err) {
+                       pr_debug(" * %s missing exit-latency-us property\n",
+                                state_node->full_name);
+                       return -EINVAL;
+               }
+               /*
+                * If wakeup-latency-us is missing, default to entry+exit
+                * latencies as defined in idle states bindings
+                */
+               idle_state->exit_latency = entry_latency + exit_latency;
+       }
+
+       err = of_property_read_u32(state_node, "min-residency-us",
+                                  &idle_state->target_residency);
+       if (err) {
+               pr_debug(" * %s missing min-residency-us property\n",
+                            state_node->full_name);
+               return -EINVAL;
+       }
+
+       idle_state->flags = CPUIDLE_FLAG_TIME_VALID;
+       if (of_property_read_bool(state_node, "local-timer-stop"))
+               idle_state->flags |= CPUIDLE_FLAG_TIMER_STOP;
+       /*
+        * TODO:
+        *      replace with kstrdup and pointer assignment when name
+        *      and desc become string pointers
+        */
+       strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN - 1);
+       strncpy(idle_state->desc, state_node->name, CPUIDLE_DESC_LEN - 1);
+       return 0;
+}
+
+/*
+ * Check that the idle state is uniform across all CPUs in the CPUidle driver
+ * cpumask
+ */
+static bool idle_state_valid(struct device_node *state_node, unsigned int idx,
+                            const cpumask_t *cpumask)
+{
+       int cpu;
+       struct device_node *cpu_node, *curr_state_node;
+       bool valid = true;
+
+       /*
+        * Compare idle state phandles for index idx on all CPUs in the
+        * CPUidle driver cpumask. Start from next logical cpu following
+        * cpumask_first(cpumask) since that's the CPU state_node was
+        * retrieved from. If a mismatch is found bail out straight
+        * away since we certainly hit a firmware misconfiguration.
+        */
+       for (cpu = cpumask_next(cpumask_first(cpumask), cpumask);
+            cpu < nr_cpu_ids; cpu = cpumask_next(cpu, cpumask)) {
+               cpu_node = of_cpu_device_node_get(cpu);
+               curr_state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+                                                  idx);
+               if (state_node != curr_state_node)
+                       valid = false;
+
+               of_node_put(curr_state_node);
+               of_node_put(cpu_node);
+               if (!valid)
+                       break;
+       }
+
+       return valid;
+}
+
+/**
+ * dt_init_idle_driver() - Parse the DT idle states and initialize the
+ *                        idle driver states array
+ * @drv:         Pointer to CPU idle driver to be initialized
+ * @matches:     Array of of_device_id match structures to search in for
+ *               compatible idle state nodes. The data pointer for each valid
+ *               struct of_device_id entry in the matches array must point to
+ *               a function with the following signature, that corresponds to
+ *               the CPUidle state enter function signature:
+ *
+ *               int (*)(struct cpuidle_device *dev,
+ *                       struct cpuidle_driver *drv,
+ *                       int index);
+ *
+ * @start_idx:    First idle state index to be initialized
+ *
+ * If DT idle states are detected and are valid the state count and states
+ * array entries in the cpuidle driver are initialized accordingly starting
+ * from index start_idx.
+ *
+ * Return: number of valid DT idle states parsed, <0 on failure
+ */
+int dt_init_idle_driver(struct cpuidle_driver *drv,
+                       const struct of_device_id *matches,
+                       unsigned int start_idx)
+{
+       struct cpuidle_state *idle_state;
+       struct device_node *state_node, *cpu_node;
+       int i, err = 0;
+       const cpumask_t *cpumask;
+       unsigned int state_idx = start_idx;
+
+       if (state_idx >= CPUIDLE_STATE_MAX)
+               return -EINVAL;
+       /*
+        * We get the idle states for the first logical cpu in the
+        * driver mask (or cpu_possible_mask if the driver cpumask is not set)
+        * and we check through idle_state_valid() if they are uniform
+        * across CPUs, otherwise we hit a firmware misconfiguration.
+        */
+       cpumask = drv->cpumask ? : cpu_possible_mask;
+       cpu_node = of_cpu_device_node_get(cpumask_first(cpumask));
+
+       for (i = 0; ; i++) {
+               state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+               if (!state_node)
+                       break;
+
+               if (!idle_state_valid(state_node, i, cpumask)) {
+                       pr_warn("%s idle state not valid, bailing out\n",
+                               state_node->full_name);
+                       err = -EINVAL;
+                       break;
+               }
+
+               if (state_idx == CPUIDLE_STATE_MAX) {
+                       pr_warn("State index reached static CPU idle driver states array size\n");
+                       break;
+               }
+
+               idle_state = &drv->states[state_idx++];
+               err = init_state_node(idle_state, matches, state_node);
+               if (err) {
+                       pr_err("Parsing idle state node %s failed with err %d\n",
+                              state_node->full_name, err);
+                       err = -EINVAL;
+                       break;
+               }
+               of_node_put(state_node);
+       }
+
+       of_node_put(state_node);
+       of_node_put(cpu_node);
+       if (err)
+               return err;
+       /*
+        * Update the driver state count only if some valid DT idle states
+        * were detected
+        */
+       if (i)
+               drv->state_count = state_idx;
+
+       /*
+        * Return the number of present and valid DT idle states, which can
+        * also be 0 on platforms with missing DT idle states or legacy DT
+        * configuration predating the DT idle states bindings.
+        */
+       return i;
+}
+EXPORT_SYMBOL_GPL(dt_init_idle_driver);
diff --git a/drivers/cpuidle/dt_idle_states.h b/drivers/cpuidle/dt_idle_states.h
new file mode 100644 (file)
index 0000000..4818134
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef __DT_IDLE_STATES
+#define __DT_IDLE_STATES
+
+int dt_init_idle_driver(struct cpuidle_driver *drv,
+                       const struct of_device_id *matches,
+                       unsigned int start_idx);
+#endif
diff --git a/drivers/cpuidle/of_idle_states.c b/drivers/cpuidle/of_idle_states.c
deleted file mode 100644 (file)
index eceb1b4..0000000
+++ /dev/null
@@ -1,274 +0,0 @@
-/*
- * OF idle states parsing code.
- *
- * Copyright (C) 2014 ARM Ltd.
- * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/cpuidle.h>
-#include <linux/cpumask.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/list_sort.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-
-#include "of_idle_states.h"
-
-struct state_elem {
-       struct list_head list;
-       struct device_node *node;
-       int val;
-};
-
-static struct list_head head __initdata = LIST_HEAD_INIT(head);
-
-static bool __init state_cpu_valid(struct device_node *state_node,
-                                  struct device_node *cpu_node)
-{
-       int i = 0;
-       struct device_node *cpu_state;
-
-       while ((cpu_state = of_parse_phandle(cpu_node,
-                                            "cpu-idle-states", i++))) {
-               if (cpu_state && state_node == cpu_state) {
-                       of_node_put(cpu_state);
-                       return true;
-               }
-               of_node_put(cpu_state);
-       }
-       return false;
-}
-
-static bool __init state_cpus_valid(const cpumask_t *cpus,
-                                   struct device_node *state_node)
-{
-       int cpu;
-       struct device_node *cpu_node;
-
-       /*
-        * Check if state is valid on driver cpumask cpus
-        */
-       for_each_cpu(cpu, cpus) {
-               cpu_node = of_get_cpu_node(cpu, NULL);
-
-               if (!cpu_node) {
-                       pr_err("Missing device node for CPU %d\n", cpu);
-                       return false;
-               }
-
-               if (!state_cpu_valid(state_node, cpu_node))
-                       return false;
-       }
-
-       return true;
-}
-
-static int __init state_cmp(void *priv, struct list_head *a,
-                           struct list_head *b)
-{
-       struct state_elem *ela, *elb;
-
-       ela = container_of(a, struct state_elem, list);
-       elb = container_of(b, struct state_elem, list);
-
-       return ela->val - elb->val;
-}
-
-static int __init add_state_node(cpumask_t *cpumask,
-                                struct device_node *state_node)
-{
-       struct state_elem *el;
-       u32 val;
-
-       pr_debug(" * %s...\n", state_node->full_name);
-
-       if (!state_cpus_valid(cpumask, state_node))
-               return -EINVAL;
-       /*
-        * Parse just the value required to sort the states.
-        */
-       if (of_property_read_u32(state_node, "min-residency-us",
-                                &val)) {
-               pr_debug(" * %s missing min-residency-us property\n",
-                        state_node->full_name);
-               return -EINVAL;
-       }
-
-       el = kmalloc(sizeof(*el), GFP_KERNEL);
-       if (!el) {
-               pr_err("%s failed to allocate memory\n", __func__);
-               return -ENOMEM;
-       }
-
-       el->node = state_node;
-       el->val = val;
-       list_add_tail(&el->list, &head);
-
-       return 0;
-}
-
-static void __init init_state_node(struct cpuidle_driver *drv,
-                                  struct device_node *state_node,
-                                  int *cnt)
-{
-       struct cpuidle_state *idle_state;
-
-       pr_debug(" * %s...\n", state_node->full_name);
-
-       idle_state = &drv->states[*cnt];
-
-       if (of_property_read_u32(state_node, "exit-latency-us",
-                                &idle_state->exit_latency)) {
-               pr_debug(" * %s missing exit-latency-us property\n",
-                            state_node->full_name);
-               return;
-       }
-
-       if (of_property_read_u32(state_node, "min-residency-us",
-                                &idle_state->target_residency)) {
-               pr_debug(" * %s missing min-residency-us property\n",
-                            state_node->full_name);
-               return;
-       }
-       /*
-        * It is unknown to the idle driver if and when the tick_device
-        * loses context when the CPU enters the idle states. To solve
-        * this issue the tick device must be linked to a power domain
-        * so that the idle driver can check on which states the device
-        * loses its context. Current code takes the conservative choice
-        * of defining the idle state as one where the tick device always
-        * loses its context. On platforms where tick device never loses
-        * its context (ie it is not a C3STOP device) this turns into
-        * a nop. On platforms where the tick device does lose context in some
-        * states, this code can be optimized, when power domain specifications
-        * for ARM CPUs are finalized.
-        */
-       idle_state->flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TIMER_STOP;
-
-       strncpy(idle_state->name, state_node->name, CPUIDLE_NAME_LEN);
-       strncpy(idle_state->desc, state_node->name, CPUIDLE_NAME_LEN);
-
-       (*cnt)++;
-}
-
-static int __init init_idle_states(struct cpuidle_driver *drv,
-                                  struct device_node *state_nodes[],
-                                  unsigned int start_idx, bool init_nodes)
-{
-       struct state_elem *el;
-       struct list_head *curr, *tmp;
-       unsigned int cnt = start_idx;
-
-       list_for_each_entry(el, &head, list) {
-               /*
-                * Check if the init function has to fill the
-                * state_nodes array on behalf of the CPUidle driver.
-                */
-               if (init_nodes)
-                       state_nodes[cnt] = el->node;
-               /*
-                * cnt is updated on return if a state was added.
-                */
-               init_state_node(drv, el->node, &cnt);
-
-               if (cnt == CPUIDLE_STATE_MAX) {
-                       pr_warn("State index reached static CPU idle state limit\n");
-                       break;
-               }
-       }
-
-       drv->state_count = cnt;
-
-       list_for_each_safe(curr, tmp, &head) {
-               list_del(curr);
-               kfree(container_of(curr, struct state_elem, list));
-       }
-
-       /*
-        * If no idle states are detected, return an error and let the idle
-        * driver initialization fail accordingly.
-        */
-       return (cnt > start_idx) ? 0 : -ENODATA;
-}
-
-static void __init add_idle_states(struct cpuidle_driver *drv,
-                                  struct device_node *idle_states)
-{
-       struct device_node *state_node;
-
-       for_each_child_of_node(idle_states, state_node) {
-               if ((!of_device_is_compatible(state_node, "arm,idle-state"))) {
-                       pr_warn(" * %s: children of /cpus/idle-states must be \"arm,idle-state\" compatible\n",
-                                    state_node->full_name);
-                       continue;
-               }
-               /*
-                * If memory allocation fails, better bail out.
-                * Initialized nodes are freed at initialization
-                * completion in of_init_idle_driver().
-                */
-               if ((add_state_node(drv->cpumask, state_node) == -ENOMEM))
-                       break;
-       }
-       /*
-        * Sort the states list before initializing the CPUidle driver
-        * states array.
-        */
-       list_sort(NULL, &head, state_cmp);
-}
-
-/*
- * of_init_idle_driver - Parse the DT idle states and initialize the
- *                      idle driver states array
- *
- * @drv:         Pointer to CPU idle driver to be initialized
- * @state_nodes:  Array of struct device_nodes to be initialized if
- *               init_nodes == true. Must be sized CPUIDLE_STATE_MAX
- * @start_idx:    First idle state index to be initialized
- * @init_nodes:   Boolean to request device nodes initialization
- *
- * Returns:
- *     0 on success
- *     <0 on failure
- *
- *     On success the states array in the cpuidle driver contains
- *     initialized entries in the states array, starting from index start_idx.
- *     If init_nodes == true, on success the state_nodes array is initialized
- *     with idle state DT node pointers, starting from index start_idx,
- *     in a 1:1 relation with the idle driver states array.
- */
-int __init of_init_idle_driver(struct cpuidle_driver *drv,
-                              struct device_node *state_nodes[],
-                              unsigned int start_idx, bool init_nodes)
-{
-       struct device_node *idle_states_node;
-       int ret;
-
-       if (start_idx >= CPUIDLE_STATE_MAX) {
-               pr_warn("State index exceeds static CPU idle driver states array size\n");
-               return -EINVAL;
-       }
-
-       if (WARN(init_nodes && !state_nodes,
-               "Requested nodes stashing in an invalid nodes container\n"))
-               return -EINVAL;
-
-       idle_states_node = of_find_node_by_path("/cpus/idle-states");
-       if (!idle_states_node)
-               return -ENOENT;
-
-       add_idle_states(drv, idle_states_node);
-
-       ret = init_idle_states(drv, state_nodes, start_idx, init_nodes);
-
-       of_node_put(idle_states_node);
-
-       return ret;
-}
diff --git a/drivers/cpuidle/of_idle_states.h b/drivers/cpuidle/of_idle_states.h
deleted file mode 100644 (file)
index 049f94f..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef __OF_IDLE_STATES
-#define __OF_IDLE_STATES
-
-int __init of_init_idle_driver(struct cpuidle_driver *drv,
-                              struct device_node *state_nodes[],
-                              unsigned int start_idx,
-                              bool init_nodes);
-#endif
index 08e8e18b3f85597371d110900ab122d2d3062347..f5d1dc5b5563b7fe466c27ddd4a10936cdbfe63b 100644 (file)
@@ -1097,6 +1097,17 @@ bool intel_lvds_init(struct drm_device *dev)
        int pipe;
        u8 pin;
 
+       /*
+        * Unlock registers and just leave them unlocked. Do this before
+        * checking quirk lists to avoid bogus WARNINGs.
+        */
+       if (HAS_PCH_SPLIT(dev)) {
+               I915_WRITE(PCH_PP_CONTROL,
+                          I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+       } else {
+               I915_WRITE(PP_CONTROL,
+                          I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+       }
        if (!intel_lvds_supported(dev))
                return false;
 
@@ -1280,17 +1291,6 @@ out:
        DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
                      lvds_encoder->is_dual_link ? "dual" : "single");
 
-       /*
-        * Unlock registers and just
-        * leave them unlocked
-        */
-       if (HAS_PCH_SPLIT(dev)) {
-               I915_WRITE(PCH_PP_CONTROL,
-                          I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
-       } else {
-               I915_WRITE(PP_CONTROL,
-                          I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
-       }
        lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
        if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
                DRM_DEBUG_KMS("lid notifier registration failed\n");
index 1113e8f691372bd7ba1c4ff8356ebd559fe7bf68..2c3c4c58a765032cdcc04f60f3f7d8f32b20cef0 100644 (file)
@@ -666,6 +666,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
 
        /* Get associated drm_crtc: */
        drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
+       if (!drmcrtc)
+               return -EINVAL;
 
        /* Helper routine in DRM core does all the work: */
        return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
index 81d0e6e1f754101479d95dc5c756498fe3bbc720..2bd798a7d9aa7b7638160061a9f1fd376bbefc16 100644 (file)
@@ -1687,6 +1687,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
index a1e431f830e31468239ee994e75248f3823011b8..45c593dbf5cdcfcef39a3440a232c74c38597b32 100644 (file)
 #define USB_DEVICE_ID_KYE_GPEN_560     0x5003
 #define USB_DEVICE_ID_KYE_EASYPEN_I405X        0x5010
 #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X       0x5011
+#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2     0x501a
 #define USB_DEVICE_ID_KYE_EASYPEN_M610X        0x5013
 
 #define USB_VENDOR_ID_LABTEC           0x1020
index 0f9950e8239a75b9bf22d1d22f6cc3e0e295cc77..66763546df157d66fe89d24f443ec85e1379dd8c 100644 (file)
@@ -316,6 +316,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                               USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+       { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+                              USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+         HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
        { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
                USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
          HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
index 843f2dd55200a4c92d53652d84472800f2f4f35f..973eed788cc6fb348972e959fd57851425aa0e63 100644 (file)
@@ -303,6 +303,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
                }
                break;
        case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+       case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
                if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
                        rdesc = mousepen_i608x_rdesc_fixed;
                        *rsize = sizeof(mousepen_i608x_rdesc_fixed);
@@ -383,6 +384,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
        switch (id->product) {
        case USB_DEVICE_ID_KYE_EASYPEN_I405X:
        case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+       case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
        case USB_DEVICE_ID_KYE_EASYPEN_M610X:
                ret = kye_tablet_enable(hdev);
                if (ret) {
@@ -405,6 +407,8 @@ static const struct hid_device_id kye_devices[] = {
                                USB_DEVICE_ID_KYE_EASYPEN_I405X) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+                               USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
        { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
                                USB_DEVICE_ID_KYE_EASYPEN_M610X) },
        { }
index d4f1e3bee5909eed76861acff408b582985462af..264ddc4a0118521c0d8d3656a1d0967d0a59ae85 100644 (file)
@@ -35,6 +35,8 @@ static struct class *pyra_class;
 static void profile_activated(struct pyra_device *pyra,
                unsigned int new_profile)
 {
+       if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
+               return;
        pyra->actual_profile = new_profile;
        pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
 }
@@ -236,9 +238,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
        if (off != 0 || count != PYRA_SIZE_SETTINGS)
                return -EINVAL;
 
-       mutex_lock(&pyra->pyra_lock);
-
        settings = (struct pyra_settings const *)buf;
+       if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
+               return -EINVAL;
+
+       mutex_lock(&pyra->pyra_lock);
 
        retval = pyra_set_settings(usb_dev, settings);
        if (retval) {
index 2b1799a3b212d73e1bc9100be6092923a4bc8cb2..469daa04dadbeffa3c98c44f5c0dbf03350c0494 100644 (file)
@@ -134,6 +134,7 @@ struct i2c_hid {
                                                   * descriptor. */
        unsigned int            bufsize;        /* i2c buffer size */
        char                    *inbuf;         /* Input buffer */
+       char                    *rawbuf;        /* Raw Input buffer */
        char                    *cmdbuf;        /* Command buffer */
        char                    *argsbuf;       /* Command arguments buffer */
 
@@ -340,7 +341,7 @@ static int i2c_hid_hwreset(struct i2c_client *client)
 static void i2c_hid_get_input(struct i2c_hid *ihid)
 {
        int ret, ret_size;
-       int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+       int size = ihid->bufsize;
 
        ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
        if (ret != size) {
@@ -471,9 +472,11 @@ static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
 static void i2c_hid_free_buffers(struct i2c_hid *ihid)
 {
        kfree(ihid->inbuf);
+       kfree(ihid->rawbuf);
        kfree(ihid->argsbuf);
        kfree(ihid->cmdbuf);
        ihid->inbuf = NULL;
+       ihid->rawbuf = NULL;
        ihid->cmdbuf = NULL;
        ihid->argsbuf = NULL;
        ihid->bufsize = 0;
@@ -489,10 +492,11 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
                       report_size; /* report */
 
        ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
+       ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
        ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
        ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
 
-       if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) {
+       if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
                i2c_hid_free_buffers(ihid);
                return -ENOMEM;
        }
@@ -519,12 +523,12 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
 
        ret = i2c_hid_get_report(client,
                        report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
-                       report_number, ihid->inbuf, ask_count);
+                       report_number, ihid->rawbuf, ask_count);
 
        if (ret < 0)
                return ret;
 
-       ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8);
+       ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
 
        if (ret_count <= 2)
                return 0;
@@ -533,7 +537,7 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
 
        /* The query buffer contains the size, dropping it in the reply */
        count = min(count, ret_count - 2);
-       memcpy(buf, ihid->inbuf + 2, count);
+       memcpy(buf, ihid->rawbuf + 2, count);
 
        return count;
 }
index 0db9a67278ba21d3d574b3f209311d5f54fecbac..5b46a79dcb1f755f7711ffa9c85392e1f409b4db 100644 (file)
@@ -110,6 +110,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_LTS1, HID_QUIRK_NO_INIT_REPORTS },
index cf20e06a88e18b5569aaf17a08909b0fb8363645..09f29e92095abf44565216f459dc3da00e1ec3e0 100644 (file)
@@ -414,11 +414,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
        if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
                if (msg->flags & I2C_M_IGNORE_NAK)
                        return msg->len;
-               if (stop) {
-                       w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
-                       w |= DAVINCI_I2C_MDR_STP;
-                       davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
-               }
+               w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
+               w |= DAVINCI_I2C_MDR_STP;
+               davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
                return -EREMOTEIO;
        }
        return -EIO;
index b06be8e3bb76e5d82d5f4a40c86535354edd5287..7645924f9f8b3909da1a065b77deb0c82bc0f4d7 100644 (file)
@@ -928,14 +928,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                if (stat & OMAP_I2C_STAT_NACK) {
                        err |= OMAP_I2C_STAT_NACK;
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
-                       break;
                }
 
                if (stat & OMAP_I2C_STAT_AL) {
                        dev_err(dev->dev, "Arbitration lost\n");
                        err |= OMAP_I2C_STAT_AL;
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
-                       break;
                }
 
                /*
@@ -960,11 +958,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
                        if (dev->fifo_size)
                                num_bytes = dev->buf_len;
 
-                       omap_i2c_receive_data(dev, num_bytes, true);
-
-                       if (dev->errata & I2C_OMAP_ERRATA_I207)
+                       if (dev->errata & I2C_OMAP_ERRATA_I207) {
                                i2c_omap_errata_i207(dev, stat);
+                               num_bytes = (omap_i2c_read_reg(dev,
+                                       OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
+                       }
 
+                       omap_i2c_receive_data(dev, num_bytes, true);
                        omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
                        continue;
                }
index 6771e3c94801881520929b965a0bb897688819db..db4e10d4c7f545d9ad1e4a8914868046b47785f9 100644 (file)
@@ -1796,7 +1796,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
        struct dma_pte *first_pte = NULL, *pte = NULL;
        phys_addr_t uninitialized_var(pteval);
        int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
-       unsigned long sg_res;
+       unsigned long sg_res = 0;
        unsigned int largepage_lvl = 0;
        unsigned long lvl_pages = 0;
 
@@ -1807,10 +1807,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
 
        prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
 
-       if (sg)
-               sg_res = 0;
-       else {
-               sg_res = nr_pages + 1;
+       if (!sg) {
+               sg_res = nr_pages;
                pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
        }
 
index 5a2c75499824f417a2e6bfd22f1b2c15e5135b1d..a79cbd6038f6ec5d79d5dc32b64f0933425f90f2 100644 (file)
@@ -883,7 +883,6 @@ void bitmap_unplug(struct bitmap *bitmap)
 {
        unsigned long i;
        int dirty, need_write;
-       int wait = 0;
 
        if (!bitmap || !bitmap->storage.filemap ||
            test_bit(BITMAP_STALE, &bitmap->flags))
@@ -901,16 +900,13 @@ void bitmap_unplug(struct bitmap *bitmap)
                        clear_page_attr(bitmap, i, BITMAP_PAGE_PENDING);
                        write_page(bitmap, bitmap->storage.filemap[i], 0);
                }
-               if (dirty)
-                       wait = 1;
-       }
-       if (wait) { /* if any writes were performed, we need to wait on them */
-               if (bitmap->storage.file)
-                       wait_event(bitmap->write_wait,
-                                  atomic_read(&bitmap->pending_writes)==0);
-               else
-                       md_super_wait(bitmap->mddev);
        }
+       if (bitmap->storage.file)
+               wait_event(bitmap->write_wait,
+                          atomic_read(&bitmap->pending_writes)==0);
+       else
+               md_super_wait(bitmap->mddev);
+
        if (test_bit(BITMAP_WRITE_ERROR, &bitmap->flags))
                bitmap_file_kick(bitmap);
 }
index c9b4ca9e0696312d174b0122d62fe354fb0ab0a9..e855a190270d59bba68ae6851f45cf90ab7b0e17 100644 (file)
@@ -529,6 +529,19 @@ static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
                end_io(&b->bio, r);
 }
 
+static void inline_endio(struct bio *bio, int error)
+{
+       bio_end_io_t *end_fn = bio->bi_private;
+
+       /*
+        * Reset the bio to free any attached resources
+        * (e.g. bio integrity profiles).
+        */
+       bio_reset(bio);
+
+       end_fn(bio, error);
+}
+
 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
                           bio_end_io_t *end_io)
 {
@@ -540,7 +553,12 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
        b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
        b->bio.bi_sector = block << b->c->sectors_per_block_bits;
        b->bio.bi_bdev = b->c->bdev;
-       b->bio.bi_end_io = end_io;
+       b->bio.bi_end_io = inline_endio;
+       /*
+        * Use of .bi_private isn't a problem here because
+        * the dm_buffer's inline bio is local to bufio.
+        */
+       b->bio.bi_private = end_io;
 
        /*
         * We assume that if len >= PAGE_SIZE ptr is page-aligned.
index afb419e514bf53f2ed1f1173c31471c5fac3e1f5..056d09c33af14a104c8f16b95cc7c06e251f4bb0 100644 (file)
@@ -493,7 +493,9 @@ static int sm_bootstrap_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count
 {
        struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
 
-       return smm->ll.nr_blocks;
+       *count = smm->ll.nr_blocks;
+
+       return 0;
 }
 
 static int sm_bootstrap_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
index cae4f46838517472f4f705267ed9b3e93a1999c1..b280216de31b6927a1a1e5d499cd6d8dbd17ad0c 100644 (file)
@@ -2139,7 +2139,7 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
                ret = smiapp_set_compose(subdev, fh, sel);
                break;
        default:
-               BUG();
+               ret = -EINVAL;
        }
 
        mutex_unlock(&sensor->mutex);
index 15e1463e5e1334258a101b6b584d10fd4f3495a1..17fe83e81ea40744a8c1b7d2dadfbfaf4d44c092 100644 (file)
@@ -263,6 +263,17 @@ static int tc6393xb_ohci_disable(struct platform_device *dev)
        return 0;
 }
 
+static int tc6393xb_ohci_suspend(struct platform_device *dev)
+{
+       struct tc6393xb_platform_data *tcpd = dev_get_platdata(dev->dev.parent);
+
+       /* We can't properly store/restore OHCI state, so fail here */
+       if (tcpd->resume_restore)
+               return -EBUSY;
+
+       return tc6393xb_ohci_disable(dev);
+}
+
 static int tc6393xb_fb_enable(struct platform_device *dev)
 {
        struct tc6393xb *tc6393xb = dev_get_drvdata(dev->dev.parent);
@@ -403,7 +414,7 @@ static struct mfd_cell tc6393xb_cells[] = {
                .num_resources = ARRAY_SIZE(tc6393xb_ohci_resources),
                .resources = tc6393xb_ohci_resources,
                .enable = tc6393xb_ohci_enable,
-               .suspend = tc6393xb_ohci_disable,
+               .suspend = tc6393xb_ohci_suspend,
                .resume = tc6393xb_ohci_enable,
                .disable = tc6393xb_ohci_disable,
        },
index a5eb95bc4717bc478829bfd075f95a29b18e6061..6df820652d6d85096a9cf44da5272dae3953d582 100755 (executable)
@@ -257,7 +257,7 @@ static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
        int ret;
        struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
 
-       ret = snprintf(buf, PAGE_SIZE, "%d",
+       ret = snprintf(buf, PAGE_SIZE, "%d\n",
                       get_disk_ro(dev_to_disk(dev)) ^
                       md->read_only);
        mmc_blk_put(md);
index ec2c2dc1c1ca06ad89144289f2996ad112526425..2a1b6e037e1a1ced496ac446ca07d6037ef61d0e 100644 (file)
@@ -133,6 +133,10 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
        ubi_assert(!vol->updating && !vol->changing_leb);
        vol->updating = 1;
 
+       vol->upd_buf = vmalloc(ubi->leb_size);
+       if (!vol->upd_buf)
+               return -ENOMEM;
+
        err = set_update_marker(ubi, vol);
        if (err)
                return err;
@@ -152,14 +156,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
                err = clear_update_marker(ubi, vol, 0);
                if (err)
                        return err;
+
+               vfree(vol->upd_buf);
                vol->updating = 0;
                return 0;
        }
 
-       vol->upd_buf = vmalloc(ubi->leb_size);
-       if (!vol->upd_buf)
-               return -ENOMEM;
-
        vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
                               vol->usable_leb_size);
        vol->upd_bytes = bytes;
index c95bfb183c62b185f2f6cc17b5d84245bdd2d13d..49e570abe58b01b07a3e78e71d0fede9fd99306a 100644 (file)
@@ -1209,7 +1209,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 
        err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
        if (err) {
-               kmem_cache_free(ubi_wl_entry_slab, e1);
                if (e2)
                        kmem_cache_free(ubi_wl_entry_slab, e2);
                goto out_ro;
@@ -1223,10 +1222,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
                dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
                       e2->pnum, vol_id, lnum);
                err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
-               if (err) {
-                       kmem_cache_free(ubi_wl_entry_slab, e2);
+               if (err)
                        goto out_ro;
-               }
        }
 
        dbg_wl("done");
@@ -1262,10 +1259,9 @@ out_not_moved:
 
        ubi_free_vid_hdr(ubi, vid_hdr);
        err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
-       if (err) {
-               kmem_cache_free(ubi_wl_entry_slab, e2);
+       if (err)
                goto out_ro;
-       }
+
        mutex_unlock(&ubi->move_mutex);
        return 0;
 
index a0f647f92bf55c7388034f9fbd0a377cc1225901..3a220d2f2ee1b47763eefbe73e7944e94debb056 100644 (file)
@@ -727,7 +727,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
        dev->cmd_buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL);
        if (!dev->cmd_buf) {
                err = -ENOMEM;
-               goto lbl_set_intf_data;
+               goto lbl_free_candev;
        }
 
        dev->udev = usb_dev;
@@ -766,7 +766,7 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
        err = register_candev(netdev);
        if (err) {
                dev_err(&intf->dev, "couldn't register CAN device: %d\n", err);
-               goto lbl_free_cmd_buf;
+               goto lbl_restore_intf_data;
        }
 
        if (dev->prev_siblings)
@@ -779,14 +779,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
        if (dev->adapter->dev_init) {
                err = dev->adapter->dev_init(dev);
                if (err)
-                       goto lbl_free_cmd_buf;
+                       goto lbl_unregister_candev;
        }
 
        /* set bus off */
        if (dev->adapter->dev_set_bus) {
                err = dev->adapter->dev_set_bus(dev, 0);
                if (err)
-                       goto lbl_free_cmd_buf;
+                       goto lbl_unregister_candev;
        }
 
        /* get device number early */
@@ -798,11 +798,14 @@ static int peak_usb_create_dev(struct peak_usb_adapter *peak_usb_adapter,
 
        return 0;
 
-lbl_free_cmd_buf:
-       kfree(dev->cmd_buf);
+lbl_unregister_candev:
+       unregister_candev(netdev);
 
-lbl_set_intf_data:
+lbl_restore_intf_data:
        usb_set_intfdata(intf, dev->prev_siblings);
+       kfree(dev->cmd_buf);
+
+lbl_free_candev:
        free_candev(netdev);
 
        return err;
index 263dd921edc42342bba78ce9f2885862f0fc3087..f7f796a2c50bc036f7817b6fab070615e1795d37 100644 (file)
@@ -333,8 +333,6 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
        if (!(dev->state & PCAN_USB_STATE_CONNECTED))
                return 0;
 
-       memset(req_addr, '\0', req_size);
-
        req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER;
 
        switch (req_id) {
@@ -345,6 +343,7 @@ static int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id,
        default:
                p = usb_rcvctrlpipe(dev->udev, 0);
                req_type |= USB_DIR_IN;
+               memset(req_addr, '\0', req_size);
                break;
        }
 
index 3de4069f020e35da50f4e547c5b79edbeb9a44e9..5501cad30cfa94207deaf0b2a5bbcfb01fa7cc89 100644 (file)
@@ -8392,7 +8392,8 @@ static int tg3_init_rings(struct tg3 *tp)
                if (tnapi->rx_rcb)
                        memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
 
-               if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
+               if (tnapi->prodring.rx_std &&
+                   tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
                        tg3_free_rings(tp);
                        return -ENOMEM;
                }
index 4d3c8122e2aa92975911146f51c73f08a77255fa..3f342fbe9ccfeb0cc3460b2cdc0ccf5f764d6eb9 100644 (file)
@@ -1584,6 +1584,8 @@ void igb_power_up_link(struct igb_adapter *adapter)
                igb_power_up_phy_copper(&adapter->hw);
        else
                igb_power_up_serdes_link_82575(&adapter->hw);
+
+       igb_setup_link(&adapter->hw);
 }
 
 /**
index f8821ce2780219cb63cb86a0fcfc6bf88fa30cfe..8b6c9237eda4a36b09bcd53a1d95d2a27f5f4978 100644 (file)
 /* Various constants */
 
 /* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS                16
+#define MVNETA_TXDONE_COAL_PKTS                1
 #define MVNETA_RX_COAL_PKTS            32
 #define MVNETA_RX_COAL_USEC            100
 
index 1157f028a90f341a50c300c40ba8e967d9fb2e01..6cc808865e9538abbe74996c01bf104d85795fd2 100644 (file)
@@ -1207,7 +1207,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
 
        switch (op) {
        case RES_OP_RESERVE:
-               count = get_param_l(&in_param);
+               count = get_param_l(&in_param) & 0xffffff;
                align = get_param_h(&in_param);
                err = __mlx4_qp_reserve_range(dev, count, align, &base);
                if (err)
index 65fe929529a8f9b303858a329b7892d9f9c1cc27..3bfd0b88016e1144a6f9f816f0d66e1c323eccb2 100644 (file)
@@ -225,13 +225,7 @@ ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
        } else {
                switch (queue_type) {
                case AR5K_TX_QUEUE_DATA:
-                       for (queue = AR5K_TX_QUEUE_ID_DATA_MIN;
-                               ah->ah_txq[queue].tqi_type !=
-                               AR5K_TX_QUEUE_INACTIVE; queue++) {
-
-                               if (queue > AR5K_TX_QUEUE_ID_DATA_MAX)
-                                       return -EINVAL;
-                       }
+                       queue = queue_info->tqi_subtype;
                        break;
                case AR5K_TX_QUEUE_UAPSD:
                        queue = AR5K_TX_QUEUE_ID_UAPSD;
index ae3034374bc4ca1b064fe54d7a6ce6b5ed43b706..d7d9e311089f9a3b2e4f03f4804df8485712e3c6 100644 (file)
 #define AH_WOW_BEACON_MISS             BIT(3)
 
 enum ath_hw_txq_subtype {
-       ATH_TXQ_AC_BE = 0,
-       ATH_TXQ_AC_BK = 1,
+       ATH_TXQ_AC_BK = 0,
+       ATH_TXQ_AC_BE = 1,
        ATH_TXQ_AC_VI = 2,
        ATH_TXQ_AC_VO = 3,
 };
index 566109a40fb3887e6236b3c56d00e8ebcc70354d..941b08b71308238a7a9546270c143e9b544dd864 100644 (file)
@@ -311,14 +311,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
                q = ATH9K_NUM_TX_QUEUES - 3;
                break;
        case ATH9K_TX_QUEUE_DATA:
-               for (q = 0; q < ATH9K_NUM_TX_QUEUES; q++)
-                       if (ah->txq[q].tqi_type ==
-                           ATH9K_TX_QUEUE_INACTIVE)
-                               break;
-               if (q == ATH9K_NUM_TX_QUEUES) {
-                       ath_err(common, "No available TX queue\n");
-                       return -1;
-               }
+               q = qinfo->tqi_subtype;
                break;
        default:
                ath_err(common, "Invalid TX queue type: %u\n", type);
index 82e0f1fd22542ef95fa1c68119201c1cbe10940b..a1db958df4a4b6f517599ecb123dffcd283dae9d 100644 (file)
@@ -459,9 +459,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
                len = skb_frag_size(frag);
                offset = frag->page_offset;
 
-               /* Data must not cross a page boundary. */
-               BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
-
                /* Skip unused frames from start of page */
                page += offset >> PAGE_SHIFT;
                offset &= ~PAGE_MASK;
@@ -469,8 +466,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
                while (len > 0) {
                        unsigned long bytes;
 
-                       BUG_ON(offset >= PAGE_SIZE);
-
                        bytes = PAGE_SIZE - offset;
                        if (bytes > len)
                                bytes = len;
index 90733929f4f0cdce09831889dbc8840042e83cf9..aa4d7e3a3050fe5534886d0f59f85239dc6cd34d 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/cpu.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_graph.h>
 #include <linux/spinlock.h>
 #include <linux/slab.h>
 #include <linux/proc_fs.h>
@@ -1799,3 +1800,154 @@ int of_device_is_stdout_path(struct device_node *dn)
        return of_stdout == dn;
 }
 EXPORT_SYMBOL_GPL(of_device_is_stdout_path);
+
+/**
+ * of_graph_parse_endpoint() - parse common endpoint node properties
+ * @node: pointer to endpoint device_node
+ * @endpoint: pointer to the OF endpoint data structure
+ *
+ * The caller should hold a reference to @node.
+ */
+int of_graph_parse_endpoint(const struct device_node *node,
+                           struct of_endpoint *endpoint)
+{
+       struct device_node *port_node = of_get_parent(node);
+
+       WARN_ONCE(!port_node, "%s(): endpoint %s has no parent node\n",
+                 __func__, node->full_name);
+
+       memset(endpoint, 0, sizeof(*endpoint));
+
+       endpoint->local_node = node;
+       /*
+        * It doesn't matter whether the two calls below succeed.
+        * If they don't then the default value 0 is used.
+        */
+       of_property_read_u32(port_node, "reg", &endpoint->port);
+       of_property_read_u32(node, "reg", &endpoint->id);
+
+       of_node_put(port_node);
+
+       return 0;
+}
+EXPORT_SYMBOL(of_graph_parse_endpoint);
+
+/**
+ * of_graph_get_next_endpoint() - get next endpoint node
+ * @parent: pointer to the parent device node
+ * @prev: previous endpoint node, or NULL to get first
+ *
+ * Return: An 'endpoint' node pointer with refcount incremented. Refcount
+ * of the passed @prev node is not decremented, the caller have to use
+ * of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
+                                       struct device_node *prev)
+{
+       struct device_node *endpoint;
+       struct device_node *port;
+
+       if (!parent)
+               return NULL;
+
+       /*
+        * Start by locating the port node. If no previous endpoint is specified
+        * search for the first port node, otherwise get the previous endpoint
+        * parent port node.
+        */
+       if (!prev) {
+               struct device_node *node;
+
+               node = of_get_child_by_name(parent, "ports");
+               if (node)
+                       parent = node;
+
+               port = of_get_child_by_name(parent, "port");
+               of_node_put(node);
+
+               if (!port) {
+                       pr_err("%s(): no port node found in %s\n",
+                              __func__, parent->full_name);
+                       return NULL;
+               }
+       } else {
+               port = of_get_parent(prev);
+               if (WARN_ONCE(!port, "%s(): endpoint %s has no parent node\n",
+                             __func__, prev->full_name))
+                       return NULL;
+
+               /*
+                * Avoid dropping prev node refcount to 0 when getting the next
+                * child below.
+                */
+               of_node_get(prev);
+       }
+
+       while (1) {
+               /*
+                * Now that we have a port node, get the next endpoint by
+                * getting the next child. If the previous endpoint is NULL this
+                * will return the first child.
+                */
+               endpoint = of_get_next_child(port, prev);
+               if (endpoint) {
+                       of_node_put(port);
+                       return endpoint;
+               }
+
+               /* No more endpoints under this port, try the next one. */
+               prev = NULL;
+
+               do {
+                       port = of_get_next_child(parent, port);
+                       if (!port)
+                               return NULL;
+               } while (of_node_cmp(port->name, "port"));
+       }
+}
+EXPORT_SYMBOL(of_graph_get_next_endpoint);
+
+/**
+ * of_graph_get_remote_port_parent() - get remote port's parent node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote device node associated with remote endpoint node linked
+ *        to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_port_parent(
+                              const struct device_node *node)
+{
+       struct device_node *np;
+       unsigned int depth;
+
+       /* Get remote endpoint node. */
+       np = of_parse_phandle(node, "remote-endpoint", 0);
+
+       /* Walk 3 levels up only if there is 'ports' node. */
+       for (depth = 3; depth && np; depth--) {
+               np = of_get_next_parent(np);
+               if (depth == 2 && of_node_cmp(np->name, "ports"))
+                       break;
+       }
+       return np;
+}
+EXPORT_SYMBOL(of_graph_get_remote_port_parent);
+
+/**
+ * of_graph_get_remote_port() - get remote port node
+ * @node: pointer to a local endpoint device_node
+ *
+ * Return: Remote port node associated with remote endpoint node linked
+ *        to @node. Use of_node_put() on it when done.
+ */
+struct device_node *of_graph_get_remote_port(const struct device_node *node)
+{
+       struct device_node *np;
+
+       /* Get remote endpoint node. */
+       np = of_parse_phandle(node, "remote-endpoint", 0);
+       if (!np)
+               return NULL;
+       return of_get_next_parent(np);
+}
+EXPORT_SYMBOL(of_graph_get_remote_port);
index ea37072e8bf2d5789d03b57f0c703e91ec45f5ab..034a4d2964d6f741ba469793114ac885757547f9 100644 (file)
@@ -210,14 +210,17 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
                res->flags |= IORESOURCE_SIZEALIGN;
                if (res->flags & IORESOURCE_IO) {
                        l &= PCI_BASE_ADDRESS_IO_MASK;
+                       sz &= PCI_BASE_ADDRESS_IO_MASK;
                        mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
                } else {
                        l &= PCI_BASE_ADDRESS_MEM_MASK;
+                       sz &= PCI_BASE_ADDRESS_MEM_MASK;
                        mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
                }
        } else {
                res->flags |= (l & IORESOURCE_ROM_ENABLE);
                l &= PCI_ROM_ADDRESS_MASK;
+               sz &= PCI_ROM_ADDRESS_MASK;
                mask = (u32)PCI_ROM_ADDRESS_MASK;
        }
 
index 4956c99ed90e5f5bcd9fcedb651e22450ee196a1..78b4fe84524587fca74d5bc38c06c58816de8ecf 100644 (file)
@@ -933,7 +933,7 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance,
        abort_fr->abort_mfi_phys_addr_hi = 0;
 
        cmd->sync_cmd = 1;
-       cmd->cmd_status = 0xFF;
+       cmd->cmd_status = ENODATA;
 
        instance->instancet->issue_dcmd(instance, cmd);
 
index aaa0df1f2fc616d12dda0c29e125049c38b45a58..4522aac4449b7e9e8216bf6ac05703c2f4f02770 100644 (file)
@@ -537,11 +537,15 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
                              unsigned int old)
 {
        struct s3c24xx_uart_port *ourport = to_ourport(port);
+       int timeout = 10000;
 
        ourport->pm_level = level;
 
        switch (level) {
        case 3:
+               while (--timeout && !s3c24xx_serial_txempty_nofifo(port))
+                       udelay(100);
+
                if (!IS_ERR(ourport->baudclk))
                        clk_disable_unprepare(ourport->baudclk);
 
index 1e71f918eb9fbbfbdf979826a4d01f022d0f8101..2800776b2e915730d4bc56f2f8e427ffdf94beb7 100644 (file)
@@ -1087,10 +1087,11 @@ next_desc:
        } else {
                control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0);
                data_interface = usb_ifnum_to_if(usb_dev, (data_interface_num = union_header->bSlaveInterface0));
-               if (!control_interface || !data_interface) {
-                       dev_dbg(&intf->dev, "no interfaces\n");
-                       return -ENODEV;
-               }
+       }
+
+       if (!control_interface || !data_interface) {
+               dev_dbg(&intf->dev, "no interfaces\n");
+               return -ENODEV;
        }
 
        if (data_interface_num != call_interface_num)
@@ -1365,6 +1366,7 @@ alloc_fail8:
                                &dev_attr_wCountryCodes);
                device_remove_file(&acm->control->dev,
                                &dev_attr_iCountryCodeRelDate);
+               kfree(acm->country_codes);
        }
        device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
 alloc_fail7:
index 9d3c245850af64400003a2a4674057dab9cb3a76..a5b53bc08c3f6da7bda90dda1ebd724407b623bd 100644 (file)
@@ -136,13 +136,13 @@ void adf_modeinfo_to_fb_videomode(const struct drm_mode_modeinfo *mode,
        vmode->vsync_len = mode->vsync_end - mode->vsync_start;
 
        vmode->sync = 0;
-       if (mode->flags | DRM_MODE_FLAG_PHSYNC)
+       if (mode->flags & DRM_MODE_FLAG_PHSYNC)
                vmode->sync |= FB_SYNC_HOR_HIGH_ACT;
-       if (mode->flags | DRM_MODE_FLAG_PVSYNC)
+       if (mode->flags & DRM_MODE_FLAG_PVSYNC)
                vmode->sync |= FB_SYNC_VERT_HIGH_ACT;
-       if (mode->flags | DRM_MODE_FLAG_PCSYNC)
+       if (mode->flags & DRM_MODE_FLAG_PCSYNC)
                vmode->sync |= FB_SYNC_COMP_HIGH_ACT;
-       if (mode->flags | DRM_MODE_FLAG_BCAST)
+       if (mode->flags & DRM_MODE_FLAG_BCAST)
                vmode->sync |= FB_SYNC_BROADCAST;
 
        vmode->vmode = 0;
index 1d94316f0ea46616ceda930896af210d7ad68284..301b08496478b958ac889853814e4638fe32569a 100644 (file)
@@ -390,7 +390,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
 
        /* NOTE: We use dev_addr here, not paddr! */
        if (is_xen_swiotlb_buffer(dev_addr)) {
-               swiotlb_tbl_unmap_single(hwdev, paddr, size, dir);
+               swiotlb_tbl_unmap_single(hwdev, dev_addr, size, dir);
                return;
        }
 
index f26f38ccd1942bb8c27fddb05d25995c9a4e3e1b..019fc5a68a145d81507916adeeecab6e7241db28 100644 (file)
@@ -1843,6 +1843,14 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_node *delayed_node;
        int ret = 0;
 
+       /*
+        * we don't do delayed inode updates during log recovery because it
+        * leads to enospc problems.  This means we also can't do
+        * delayed inode refs
+        */
+       if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
+               return -EAGAIN;
+
        delayed_node = btrfs_get_or_create_delayed_node(inode);
        if (IS_ERR(delayed_node))
                return PTR_ERR(delayed_node);
index abecce3993542adf75463b2f681189787b0ea74b..7360f03ddbe1656d067912afe7414c64dbe86d71 100644 (file)
@@ -3857,12 +3857,6 @@ again:
                if (ret)
                        break;
 
-               /* opt_discard */
-               if (btrfs_test_opt(root, DISCARD))
-                       ret = btrfs_error_discard_extent(root, start,
-                                                        end + 1 - start,
-                                                        NULL);
-
                clear_extent_dirty(unpin, start, end, GFP_NOFS);
                btrfs_error_unpin_extent_range(root, start, end);
                cond_resched();
index bbafa05519daa69b0fb05dc5d5583cbfb4c6fa56..f99c71e40f8b86a91a73c4ace7a17f933c4de464 100644 (file)
@@ -5277,7 +5277,8 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
        update_global_block_rsv(fs_info);
 }
 
-static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
+static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
+                             const bool return_free_space)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_block_group_cache *cache = NULL;
@@ -5301,7 +5302,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
 
                if (start < cache->last_byte_to_unpin) {
                        len = min(len, cache->last_byte_to_unpin - start);
-                       btrfs_add_free_space(cache, start, len);
+                       if (return_free_space)
+                               btrfs_add_free_space(cache, start, len);
                }
 
                start += len;
@@ -5364,7 +5366,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
                                                   end + 1 - start, NULL);
 
                clear_extent_dirty(unpin, start, end, GFP_NOFS);
-               unpin_extent_range(root, start, end);
+               unpin_extent_range(root, start, end, true);
                cond_resched();
        }
 
@@ -8564,7 +8566,7 @@ out:
 
 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
 {
-       return unpin_extent_range(root, start, end);
+       return unpin_extent_range(root, start, end, false);
 }
 
 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
index a4a7a1a8da95c4c1e7571d99e0d58a7b5209f4ee..0a3809500599e8cd71671ab46ca2f0f5e06c0eaa 100644 (file)
@@ -263,8 +263,6 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
        if (!em)
                goto out;
 
-       if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
-               list_move(&em->list, &tree->modified_extents);
        em->generation = gen;
        clear_bit(EXTENT_FLAG_PINNED, &em->flags);
        em->mod_start = em->start;
index f71ec125290db7da87355f444f7308826ee1c034..1da2446bf6b003a1d636b852e107b908ed50ac1f 100644 (file)
@@ -2102,7 +2102,6 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size,
                        break;
                case 2:
                        dst[dst_byte_offset++] |= (src_byte);
-                       dst[dst_byte_offset] = 0;
                        current_bit_offset = 0;
                        break;
                }
index a7abbea2c09638ef8c190555ec466834c0c06edf..9ff3664bb3ea460d139982f49d65378b4f16bf58 100644 (file)
@@ -196,23 +196,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
 {
        int rc = 0;
        struct ecryptfs_crypt_stat *crypt_stat = NULL;
-       struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
        struct dentry *ecryptfs_dentry = file->f_path.dentry;
        /* Private value of ecryptfs_dentry allocated in
         * ecryptfs_lookup() */
        struct ecryptfs_file_info *file_info;
 
-       mount_crypt_stat = &ecryptfs_superblock_to_private(
-               ecryptfs_dentry->d_sb)->mount_crypt_stat;
-       if ((mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
-           && ((file->f_flags & O_WRONLY) || (file->f_flags & O_RDWR)
-               || (file->f_flags & O_CREAT) || (file->f_flags & O_TRUNC)
-               || (file->f_flags & O_APPEND))) {
-               printk(KERN_WARNING "Mount has encrypted view enabled; "
-                      "files may only be read\n");
-               rc = -EPERM;
-               goto out;
-       }
        /* Released in ecryptfs_release or end of function if failure */
        file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
        ecryptfs_set_file_private(file, file_info);
index e924cf45aad9559533214814cccbb05aa7a06b44..329a9cc2b2ebe56d2646924b8955da6f342e74bc 100644 (file)
@@ -494,6 +494,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
 {
        struct super_block *s;
        struct ecryptfs_sb_info *sbi;
+       struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
        struct ecryptfs_dentry_info *root_info;
        const char *err = "Getting sb failed";
        struct inode *inode;
@@ -512,6 +513,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
                err = "Error parsing options";
                goto out;
        }
+       mount_crypt_stat = &sbi->mount_crypt_stat;
 
        s = sget(fs_type, NULL, set_anon_super, flags, NULL);
        if (IS_ERR(s)) {
@@ -558,11 +560,19 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
 
        /**
         * Set the POSIX ACL flag based on whether they're enabled in the lower
-        * mount. Force a read-only eCryptfs mount if the lower mount is ro.
-        * Allow a ro eCryptfs mount even when the lower mount is rw.
+        * mount.
         */
        s->s_flags = flags & ~MS_POSIXACL;
-       s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL);
+       s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
+
+       /**
+        * Force a read-only eCryptfs mount when:
+        *   1) The lower mount is ro
+        *   2) The ecryptfs_encrypted_view mount option is specified
+        */
+       if (path.dentry->d_sb->s_flags & MS_RDONLY ||
+           mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
+               s->s_flags |= MS_RDONLY;
 
        s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
        s->s_blocksize = path.dentry->d_sb->s_blocksize;
index 288534920fe5cc4960f99ae636777344c5d42db6..20d6697bd6386560a679dda5e8b8592a65d63798 100644 (file)
@@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
                                sb->s_blocksize - offset : towrite;
 
                tmp_bh.b_state = 0;
+               tmp_bh.b_size = sb->s_blocksize;
                err = ext2_get_block(inode, blk, &tmp_bh, 1);
                if (err < 0)
                        goto out;
index 556af9eff33692ef523e6a39b6a5805f392847c7..62d426d1d5e3495c5fe7d21d2349d75d73e7ccf2 100644 (file)
@@ -470,12 +470,28 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
         * write_inode()
         */
        spin_lock(&inode->i_lock);
-       /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
-       if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
-               inode->i_state &= ~I_DIRTY_PAGES;
+
        dirty = inode->i_state & I_DIRTY;
-       inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
+       inode->i_state &= ~I_DIRTY;
+
+       /*
+        * Paired with smp_mb() in __mark_inode_dirty().  This allows
+        * __mark_inode_dirty() to test i_state without grabbing i_lock -
+        * either they see the I_DIRTY bits cleared or we see the dirtied
+        * inode.
+        *
+        * I_DIRTY_PAGES is always cleared together above even if @mapping
+        * still has dirty pages.  The flag is reinstated after smp_mb() if
+        * necessary.  This guarantees that either __mark_inode_dirty()
+        * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
+        */
+       smp_mb();
+
+       if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+               inode->i_state |= I_DIRTY_PAGES;
+
        spin_unlock(&inode->i_lock);
+
        /* Don't write the inode if only I_DIRTY_PAGES was set */
        if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
                int err = write_inode(inode, wbc);
@@ -1146,12 +1162,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
        }
 
        /*
-        * make sure that changes are seen by all cpus before we test i_state
-        * -- mikulas
+        * Paired with smp_mb() in __writeback_single_inode() for the
+        * following lockless i_state test.  See there for details.
         */
        smp_mb();
 
-       /* avoid the locking if we can */
        if ((inode->i_state & flags) == flags)
                return;
 
index f488bbae541ac8d5db4eb7e963c33452ebb3e937..735d7522a3a911f19af593d6b5f7d366d6cf448d 100644 (file)
@@ -30,6 +30,7 @@ struct rock_state {
        int cont_size;
        int cont_extent;
        int cont_offset;
+       int cont_loops;
        struct inode *inode;
 };
 
@@ -73,6 +74,9 @@ static void init_rock_state(struct rock_state *rs, struct inode *inode)
        rs->inode = inode;
 }
 
+/* Maximum number of Rock Ridge continuation entries */
+#define RR_MAX_CE_ENTRIES 32
+
 /*
  * Returns 0 if the caller should continue scanning, 1 if the scan must end
  * and -ve on error.
@@ -105,6 +109,8 @@ static int rock_continue(struct rock_state *rs)
                        goto out;
                }
                ret = -EIO;
+               if (++rs->cont_loops >= RR_MAX_CE_ENTRIES)
+                       goto out;
                bh = sb_bread(rs->inode->i_sb, rs->cont_extent);
                if (bh) {
                        memcpy(rs->buffer, bh->b_data + rs->cont_offset,
@@ -356,6 +362,9 @@ repeat:
                        rs.cont_size = isonum_733(rr->u.CE.size);
                        break;
                case SIG('E', 'R'):
+                       /* Invalid length of ER tag id? */
+                       if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
+                               goto out;
                        ISOFS_SB(inode->i_sb)->s_rock = 1;
                        printk(KERN_DEBUG "ISO 9660 Extensions: ");
                        {
index 154822397780a3ce3d0d962e81b1042ed193f68e..d0244c8ba09c8e8d53aba8dab6bcb367f2829b07 100644 (file)
@@ -1342,6 +1342,9 @@ SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
                goto dput_and_out;
        if (!check_mnt(mnt))
                goto dput_and_out;
+       retval = -EPERM;
+       if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
+               goto dput_and_out;
 
        retval = do_umount(mnt, flags);
 dput_and_out:
@@ -1816,7 +1819,13 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
        }
        if ((mnt->mnt.mnt_flags & MNT_LOCK_NODEV) &&
            !(mnt_flags & MNT_NODEV)) {
-               return -EPERM;
+               /* Was the nodev implicitly added in mount? */
+               if ((mnt->mnt_ns->user_ns != &init_user_ns) &&
+                   !(sb->s_type->fs_flags & FS_USERNS_DEV_MOUNT)) {
+                       mnt_flags |= MNT_NODEV;
+               } else {
+                       return -EPERM;
+               }
        }
        if ((mnt->mnt.mnt_flags & MNT_LOCK_NOSUID) &&
            !(mnt_flags & MNT_NOSUID)) {
index 60426ccb3b6561e25b050f50139df908a6b251fe..2f970de02b1629c649f591281bc24e5810ebd870 100644 (file)
@@ -448,7 +448,6 @@ static long __ncp_ioctl(struct inode *inode, unsigned int cmd, unsigned long arg
                                                result = -EIO;
                                        }
                                }
-                               result = 0;
                        }
                        mutex_unlock(&server->root_setup_lock);
 
index a4eaa40e7bdb50f9e1eadc87d006736fb20bfc6d..86390c3a95dbb1c99c1f7f8f14ff7557b5f36a14 100644 (file)
@@ -6418,6 +6418,9 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
 
        dprintk("--> %s\n", __func__);
 
+       /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
+       pnfs_get_layout_hdr(NFS_I(inode)->layout);
+
        lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags);
        if (!lgp->args.layout.pages) {
                nfs4_layoutget_release(lgp);
@@ -6430,9 +6433,6 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags)
        lgp->res.seq_res.sr_slot = NULL;
        nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0);
 
-       /* nfs4_layoutget_release calls pnfs_put_layout_hdr */
-       pnfs_get_layout_hdr(NFS_I(inode)->layout);
-
        task = rpc_run_task(&task_setup_data);
        if (IS_ERR(task))
                return ERR_CAST(task);
index 836307ae1f08008d6bd4faae3135d4df0665b00e..4a58afa99654e088792144addc9dfed0a36ba9a2 100644 (file)
@@ -1200,15 +1200,14 @@ static int copy_cred(struct svc_cred *target, struct svc_cred *source)
        return 0;
 }
 
-static long long
+static int
 compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
 {
-       long long res;
-
-       res = o1->len - o2->len;
-       if (res)
-               return res;
-       return (long long)memcmp(o1->data, o2->data, o1->len);
+       if (o1->len < o2->len)
+               return -1;
+       if (o1->len > o2->len)
+               return 1;
+       return memcmp(o1->data, o2->data, o1->len);
 }
 
 static int same_name(const char *n1, const char *n2)
@@ -1365,7 +1364,7 @@ add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
 static struct nfs4_client *
 find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
 {
-       long long cmp;
+       int cmp;
        struct rb_node *node = root->rb_node;
        struct nfs4_client *clp;
 
index 9b45f0666cfcf4b8f84ad776188ec4c86015ffec..acf179d7615f2e32ac84ab387d9148fa66ff5088 100644 (file)
@@ -1743,6 +1743,9 @@ static __be32 nfsd4_encode_components_esc(char sep, char *components,
                }
                else
                        end++;
+               if (found_esc)
+                       end = next;
+
                str = end;
        }
        *pp = p;
index 2e1372efbb00ede1c899ac8d9941a2ca1bac86aa..587d699bdc2c79f4ffd24dfa4bb65f7491af8de4 100644 (file)
@@ -49,6 +49,8 @@ struct nilfs_iget_args {
        int for_gc;
 };
 
+static int nilfs_iget_test(struct inode *inode, void *opaque);
+
 void nilfs_inode_add_blocks(struct inode *inode, int n)
 {
        struct nilfs_root *root = NILFS_I(inode)->i_root;
@@ -347,6 +349,17 @@ const struct address_space_operations nilfs_aops = {
        .is_partially_uptodate  = block_is_partially_uptodate,
 };
 
+static int nilfs_insert_inode_locked(struct inode *inode,
+                                    struct nilfs_root *root,
+                                    unsigned long ino)
+{
+       struct nilfs_iget_args args = {
+               .ino = ino, .root = root, .cno = 0, .for_gc = 0
+       };
+
+       return insert_inode_locked4(inode, ino, nilfs_iget_test, &args);
+}
+
 struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
 {
        struct super_block *sb = dir->i_sb;
@@ -382,7 +395,7 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
        if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
                err = nilfs_bmap_read(ii->i_bmap, NULL);
                if (err < 0)
-                       goto failed_bmap;
+                       goto failed_after_creation;
 
                set_bit(NILFS_I_BMAP, &ii->i_state);
                /* No lock is needed; iget() ensures it. */
@@ -398,21 +411,24 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode)
        spin_lock(&nilfs->ns_next_gen_lock);
        inode->i_generation = nilfs->ns_next_generation++;
        spin_unlock(&nilfs->ns_next_gen_lock);
-       insert_inode_hash(inode);
+       if (nilfs_insert_inode_locked(inode, root, ino) < 0) {
+               err = -EIO;
+               goto failed_after_creation;
+       }
 
        err = nilfs_init_acl(inode, dir);
        if (unlikely(err))
-               goto failed_acl; /* never occur. When supporting
+               goto failed_after_creation; /* never occur. When supporting
                                    nilfs_init_acl(), proper cancellation of
                                    above jobs should be considered */
 
        return inode;
 
- failed_acl:
- failed_bmap:
+ failed_after_creation:
        clear_nlink(inode);
+       unlock_new_inode(inode);
        iput(inode);  /* raw_inode will be deleted through
-                        generic_delete_inode() */
+                        nilfs_evict_inode() */
        goto failed;
 
  failed_ifile_create_inode:
@@ -460,8 +476,8 @@ int nilfs_read_inode_common(struct inode *inode,
        inode->i_atime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
        inode->i_ctime.tv_nsec = le32_to_cpu(raw_inode->i_ctime_nsec);
        inode->i_mtime.tv_nsec = le32_to_cpu(raw_inode->i_mtime_nsec);
-       if (inode->i_nlink == 0 && inode->i_mode == 0)
-               return -EINVAL; /* this inode is deleted */
+       if (inode->i_nlink == 0)
+               return -ESTALE; /* this inode is deleted */
 
        inode->i_blocks = le64_to_cpu(raw_inode->i_blocks);
        ii->i_flags = le32_to_cpu(raw_inode->i_flags);
index 9de78f08989edcd9ab0dc18b3aaca3a65535f11a..0f84b257932c2351569db8fc87432dfb9bcfebe9 100644 (file)
@@ -51,9 +51,11 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode)
        int err = nilfs_add_link(dentry, inode);
        if (!err) {
                d_instantiate(dentry, inode);
+               unlock_new_inode(inode);
                return 0;
        }
        inode_dec_link_count(inode);
+       unlock_new_inode(inode);
        iput(inode);
        return err;
 }
@@ -182,6 +184,7 @@ out:
 out_fail:
        drop_nlink(inode);
        nilfs_mark_inode_dirty(inode);
+       unlock_new_inode(inode);
        iput(inode);
        goto out;
 }
@@ -201,11 +204,15 @@ static int nilfs_link(struct dentry *old_dentry, struct inode *dir,
        inode_inc_link_count(inode);
        ihold(inode);
 
-       err = nilfs_add_nondir(dentry, inode);
-       if (!err)
+       err = nilfs_add_link(dentry, inode);
+       if (!err) {
+               d_instantiate(dentry, inode);
                err = nilfs_transaction_commit(dir->i_sb);
-       else
+       } else {
+               inode_dec_link_count(inode);
+               iput(inode);
                nilfs_transaction_abort(dir->i_sb);
+       }
 
        return err;
 }
@@ -243,6 +250,7 @@ static int nilfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
        nilfs_mark_inode_dirty(inode);
        d_instantiate(dentry, inode);
+       unlock_new_inode(inode);
 out:
        if (!err)
                err = nilfs_transaction_commit(dir->i_sb);
@@ -255,6 +263,7 @@ out_fail:
        drop_nlink(inode);
        drop_nlink(inode);
        nilfs_mark_inode_dirty(inode);
+       unlock_new_inode(inode);
        iput(inode);
 out_dir:
        drop_nlink(dir);
index 20dfec72e90330a9b613e2a78ee5b59874870925..f998c6009ad4943b5af65e9a35d8b749a15587bf 100644 (file)
@@ -917,7 +917,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
        }
 }
 
-static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
 {
        int i;
 
@@ -938,7 +938,11 @@ static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
                page_cache_release(wc->w_target_page);
        }
        ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
+}
 
+static void ocfs2_free_write_ctxt(struct ocfs2_write_ctxt *wc)
+{
+       ocfs2_unlock_pages(wc);
        brelse(wc->w_di_bh);
        kfree(wc);
 }
@@ -2060,11 +2064,19 @@ out_write_size:
        di->i_mtime_nsec = di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
        ocfs2_journal_dirty(handle, wc->w_di_bh);
 
+       /* unlock pages before dealloc since it needs acquiring j_trans_barrier
+        * lock, or it will cause a deadlock since journal commit threads holds
+        * this lock and will ask for the page lock when flushing the data.
+        * put it here to preserve the unlock order.
+        */
+       ocfs2_unlock_pages(wc);
+
        ocfs2_commit_trans(osb, handle);
 
        ocfs2_run_deallocs(osb, &wc->w_dealloc);
 
-       ocfs2_free_write_ctxt(wc);
+       brelse(wc->w_di_bh);
+       kfree(wc);
 
        return copied;
 }
index 040b030ef6c0ac915aa0329dfb015a92f1f76893..fb426d0bd322e2ab627fc390fe1f1f9c6fdb65ab 100644 (file)
@@ -2647,6 +2647,57 @@ static const struct file_operations proc_projid_map_operations = {
        .llseek         = seq_lseek,
        .release        = proc_id_map_release,
 };
+
+static int proc_setgroups_open(struct inode *inode, struct file *file)
+{
+       struct user_namespace *ns = NULL;
+       struct task_struct *task;
+       int ret;
+
+       ret = -ESRCH;
+       task = get_proc_task(inode);
+       if (task) {
+               rcu_read_lock();
+               ns = get_user_ns(task_cred_xxx(task, user_ns));
+               rcu_read_unlock();
+               put_task_struct(task);
+       }
+       if (!ns)
+               goto err;
+
+       if (file->f_mode & FMODE_WRITE) {
+               ret = -EACCES;
+               if (!ns_capable(ns, CAP_SYS_ADMIN))
+                       goto err_put_ns;
+       }
+
+       ret = single_open(file, &proc_setgroups_show, ns);
+       if (ret)
+               goto err_put_ns;
+
+       return 0;
+err_put_ns:
+       put_user_ns(ns);
+err:
+       return ret;
+}
+
+static int proc_setgroups_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *seq = file->private_data;
+       struct user_namespace *ns = seq->private;
+       int ret = single_release(inode, file);
+       put_user_ns(ns);
+       return ret;
+}
+
+static const struct file_operations proc_setgroups_operations = {
+       .open           = proc_setgroups_open,
+       .write          = proc_setgroups_write,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = proc_setgroups_release,
+};
 #endif /* CONFIG_USER_NS */
 
 static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
@@ -2755,6 +2806,7 @@ static const struct pid_entry tgid_base_stuff[] = {
        REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
        REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
        REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
+       REG("setgroups",  S_IRUGO|S_IWUSR, proc_setgroups_operations),
 #endif
 #ifdef CONFIG_CHECKPOINT_RESTORE
        REG("timers",     S_IRUGO, proc_timers_operations),
@@ -3108,6 +3160,7 @@ static const struct pid_entry tid_base_stuff[] = {
        REG("uid_map",    S_IRUGO|S_IWUSR, proc_uid_map_operations),
        REG("gid_map",    S_IRUGO|S_IWUSR, proc_gid_map_operations),
        REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
+       REG("setgroups",  S_IRUGO|S_IWUSR, proc_setgroups_operations),
 #endif
 };
 
index 058f17f0b466aaee343cc76adc654cdd15f8d6ec..c5684c92266ee0209d6a925d5b3830dab551638a 100644 (file)
@@ -61,6 +61,11 @@ module_param(mem_size, ulong, 0400);
 MODULE_PARM_DESC(mem_size,
                "size of reserved RAM used to store oops/panic logs");
 
+static unsigned int mem_type;
+module_param(mem_type, uint, 0600);
+MODULE_PARM_DESC(mem_type,
+               "set to 1 to try to use unbuffered memory (default 0)");
+
 static int dump_oops = 1;
 module_param(dump_oops, int, 0600);
 MODULE_PARM_DESC(dump_oops,
@@ -79,6 +84,7 @@ struct ramoops_context {
        struct persistent_ram_zone *fprz;
        phys_addr_t phys_addr;
        unsigned long size;
+       unsigned int memtype;
        size_t record_size;
        size_t console_size;
        size_t ftrace_size;
@@ -331,7 +337,8 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
                size_t sz = cxt->record_size;
 
                cxt->przs[i] = persistent_ram_new(*paddr, sz, 0,
-                                                 &cxt->ecc_info);
+                                                 &cxt->ecc_info,
+                                                 cxt->memtype);
                if (IS_ERR(cxt->przs[i])) {
                        err = PTR_ERR(cxt->przs[i]);
                        dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
@@ -361,7 +368,7 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
                return -ENOMEM;
        }
 
-       *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info);
+       *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
        if (IS_ERR(*prz)) {
                int err = PTR_ERR(*prz);
 
@@ -417,6 +424,7 @@ static int ramoops_probe(struct platform_device *pdev)
        cxt->dump_read_cnt = 0;
        cxt->size = pdata->mem_size;
        cxt->phys_addr = pdata->mem_address;
+       cxt->memtype = pdata->mem_type;
        cxt->record_size = pdata->record_size;
        cxt->console_size = pdata->console_size;
        cxt->ftrace_size = pdata->ftrace_size;
@@ -547,6 +555,7 @@ static void ramoops_register_dummy(void)
 
        dummy_data->mem_size = mem_size;
        dummy_data->mem_address = mem_address;
+       dummy_data->mem_type = 0;
        dummy_data->record_size = record_size;
        dummy_data->console_size = ramoops_console_size;
        dummy_data->ftrace_size = ramoops_ftrace_size;
index 59337326e288fda783d4ff3aa8894cd1cd0253b1..6ff97553331b8d67192e8c1dba31cef75d3f6eec 100644 (file)
@@ -333,7 +333,8 @@ void persistent_ram_zap(struct persistent_ram_zone *prz)
        persistent_ram_update_header_ecc(prz);
 }
 
-static void *persistent_ram_vmap(phys_addr_t start, size_t size)
+static void *persistent_ram_vmap(phys_addr_t start, size_t size,
+               unsigned int memtype)
 {
        struct page **pages;
        phys_addr_t page_start;
@@ -345,7 +346,10 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size)
        page_start = start - offset_in_page(start);
        page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
 
-       prot = pgprot_noncached(PAGE_KERNEL);
+       if (memtype)
+               prot = pgprot_noncached(PAGE_KERNEL);
+       else
+               prot = pgprot_writecombine(PAGE_KERNEL);
 
        pages = kmalloc(sizeof(struct page *) * page_count, GFP_KERNEL);
        if (!pages) {
@@ -364,27 +368,35 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size)
        return vaddr;
 }
 
-static void *persistent_ram_iomap(phys_addr_t start, size_t size)
+static void *persistent_ram_iomap(phys_addr_t start, size_t size,
+               unsigned int memtype)
 {
+       void *va;
+
        if (!request_mem_region(start, size, "persistent_ram")) {
                pr_err("request mem region (0x%llx@0x%llx) failed\n",
                        (unsigned long long)size, (unsigned long long)start);
                return NULL;
        }
 
-       return ioremap(start, size);
+       if (memtype)
+               va = ioremap(start, size);
+       else
+               va = ioremap_wc(start, size);
+
+       return va;
 }
 
 static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
-               struct persistent_ram_zone *prz)
+               struct persistent_ram_zone *prz, int memtype)
 {
        prz->paddr = start;
        prz->size = size;
 
        if (pfn_valid(start >> PAGE_SHIFT))
-               prz->vaddr = persistent_ram_vmap(start, size);
+               prz->vaddr = persistent_ram_vmap(start, size, memtype);
        else
-               prz->vaddr = persistent_ram_iomap(start, size);
+               prz->vaddr = persistent_ram_iomap(start, size, memtype);
 
        if (!prz->vaddr) {
                pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
@@ -452,7 +464,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
 }
 
 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
-                       u32 sig, struct persistent_ram_ecc_info *ecc_info)
+                       u32 sig, struct persistent_ram_ecc_info *ecc_info,
+                       unsigned int memtype)
 {
        struct persistent_ram_zone *prz;
        int ret = -ENOMEM;
@@ -463,7 +476,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
                goto err;
        }
 
-       ret = persistent_ram_buffer_map(start, size, prz);
+       ret = persistent_ram_buffer_map(start, size, prz, memtype);
        if (ret)
                goto err;
 
index d7c6dbe4194bb33bbe37930c63cf23377e5d9343..d89f324bc38797220f6afcd09962e63d2c70c1e4 100644 (file)
@@ -80,11 +80,17 @@ static int udf_symlink_filler(struct file *file, struct page *page)
        struct inode *inode = page->mapping->host;
        struct buffer_head *bh = NULL;
        unsigned char *symlink;
-       int err = -EIO;
+       int err;
        unsigned char *p = kmap(page);
        struct udf_inode_info *iinfo;
        uint32_t pos;
 
+       /* We don't support symlinks longer than one block */
+       if (inode->i_size > inode->i_sb->s_blocksize) {
+               err = -ENAMETOOLONG;
+               goto out_unmap;
+       }
+
        iinfo = UDF_I(inode);
        pos = udf_block_map(inode, 0);
 
@@ -94,8 +100,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
        } else {
                bh = sb_bread(inode->i_sb, pos);
 
-               if (!bh)
-                       goto out;
+               if (!bh) {
+                       err = -EIO;
+                       goto out_unlock_inode;
+               }
 
                symlink = bh->b_data;
        }
@@ -109,9 +117,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
        unlock_page(page);
        return 0;
 
-out:
+out_unlock_inode:
        up_read(&iinfo->i_data_sem);
        SetPageError(page);
+out_unmap:
        kunmap(page);
        unlock_page(page);
        return err;
index de8bf89940f8dc3a7bc27a877067d3885cf646fa..a9fd248f5d482c15be819abf48ccc5fd7e78788c 100644 (file)
@@ -179,6 +179,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
                           void *cpu_addr, dma_addr_t dma_addr, size_t size);
 
+void *dma_common_contiguous_remap(struct page *page, size_t size,
+                       unsigned long vm_flags,
+                       pgprot_t prot, const void *caller);
+
+void *dma_common_pages_remap(struct page **pages, size_t size,
+                       unsigned long vm_flags, pgprot_t prot,
+                       const void *caller);
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
+
 /**
  * dma_mmap_attrs - map a coherent DMA allocation into user space
  * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
index 43ec7e247a8086972ae7ef0e87efde66dd4ac0e7..aafb89c3a3e6e878d0e10b75ebdf984e918132d1 100644 (file)
@@ -23,6 +23,7 @@
 
 #define AMBA_NR_IRQS   2
 #define AMBA_CID       0xb105f00d
+#define CORESIGHT_CID  0xb105900d
 
 struct clk;
 
index c1dde8e00d2508a1b356b026a5f00be246495324..be3a228f389b326503b26c5cd086f7f9dd235a6a 100644 (file)
 #define BITS_TO_LONGS(nr)      DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
 #endif
 
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l)          (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l))
+#define GENMASK_ULL(h, l)      (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l))
+
 extern unsigned int __sw_hweight8(unsigned int w);
 extern unsigned int __sw_hweight16(unsigned int w);
 extern unsigned int __sw_hweight32(unsigned int w);
index ae1193bcf074b8d13808932d879548dfd7f1420e..3abe1e9a1bde591297766b71e8d57d57a8c7b47c 100644 (file)
@@ -61,6 +61,11 @@ enum clock_event_mode {
  */
 #define CLOCK_EVT_FEAT_DYNIRQ          0x000020
 
+/*
+ * Clockevent device is based on a hrtimer for broadcast
+ */
+#define CLOCK_EVT_FEAT_HRTIMER         0x000080
+
 /**
  * struct clock_event_device - clock event device descriptor
  * @event_handler:     Assigned by the framework to be called by the low
@@ -82,6 +87,7 @@ enum clock_event_mode {
  * @name:              ptr to clock event name
  * @rating:            variable to rate clock event devices
  * @irq:               IRQ number (only for non CPU local devices)
+ * @bound_on:          Bound on CPU
  * @cpumask:           cpumask to indicate for which CPUs this device works
  * @list:              list head for the management code
  * @owner:             module reference
@@ -112,6 +118,7 @@ struct clock_event_device {
        const char              *name;
        int                     rating;
        int                     irq;
+       int                     bound_on;
        const struct cpumask    *cpumask;
        struct list_head        list;
        struct module           *owner;
@@ -178,15 +185,17 @@ extern int tick_receive_broadcast(void);
 #endif
 
 #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT)
+extern void tick_setup_hrtimer_broadcast(void);
 extern int tick_check_broadcast_expired(void);
 #else
 static inline int tick_check_broadcast_expired(void) { return 0; }
+static inline void tick_setup_hrtimer_broadcast(void) {};
 #endif
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
-extern void clockevents_notify(unsigned long reason, void *arg);
+extern int clockevents_notify(unsigned long reason, void *arg);
 #else
-static inline void clockevents_notify(unsigned long reason, void *arg) {}
+static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
 #endif
 
 #else /* CONFIG_GENERIC_CLOCKEVENTS_BUILD */
@@ -194,8 +203,9 @@ static inline void clockevents_notify(unsigned long reason, void *arg) {}
 static inline void clockevents_suspend(void) {}
 static inline void clockevents_resume(void) {}
 
-static inline void clockevents_notify(unsigned long reason, void *arg) {}
+static inline int clockevents_notify(unsigned long reason, void *arg) { return 0; }
 static inline int tick_check_broadcast_expired(void) { return 0; }
+static inline void tick_setup_hrtimer_broadcast(void) {};
 
 #endif
 
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
new file mode 100644 (file)
index 0000000..bdde419
--- /dev/null
@@ -0,0 +1,263 @@
+/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_CORESIGHT_H
+#define _LINUX_CORESIGHT_H
+
+#include <linux/device.h>
+
+/* Peripheral id registers (0xFD0-0xFEC) */
+#define CORESIGHT_PERIPHIDR4   0xfd0
+#define CORESIGHT_PERIPHIDR5   0xfd4
+#define CORESIGHT_PERIPHIDR6   0xfd8
+#define CORESIGHT_PERIPHIDR7   0xfdC
+#define CORESIGHT_PERIPHIDR0   0xfe0
+#define CORESIGHT_PERIPHIDR1   0xfe4
+#define CORESIGHT_PERIPHIDR2   0xfe8
+#define CORESIGHT_PERIPHIDR3   0xfeC
+/* Component id registers (0xFF0-0xFFC) */
+#define CORESIGHT_COMPIDR0     0xff0
+#define CORESIGHT_COMPIDR1     0xff4
+#define CORESIGHT_COMPIDR2     0xff8
+#define CORESIGHT_COMPIDR3     0xffC
+
+#define ETM_ARCH_V3_3          0x23
+#define ETM_ARCH_V3_5          0x25
+#define PFT_ARCH_V1_0          0x30
+#define PFT_ARCH_V1_1          0x31
+
+#define CORESIGHT_UNLOCK       0xc5acce55
+
+extern struct bus_type coresight_bustype;
+
+enum coresight_dev_type {
+       CORESIGHT_DEV_TYPE_NONE,
+       CORESIGHT_DEV_TYPE_SINK,
+       CORESIGHT_DEV_TYPE_LINK,
+       CORESIGHT_DEV_TYPE_LINKSINK,
+       CORESIGHT_DEV_TYPE_SOURCE,
+};
+
+enum coresight_dev_subtype_sink {
+       CORESIGHT_DEV_SUBTYPE_SINK_NONE,
+       CORESIGHT_DEV_SUBTYPE_SINK_PORT,
+       CORESIGHT_DEV_SUBTYPE_SINK_BUFFER,
+};
+
+enum coresight_dev_subtype_link {
+       CORESIGHT_DEV_SUBTYPE_LINK_NONE,
+       CORESIGHT_DEV_SUBTYPE_LINK_MERG,
+       CORESIGHT_DEV_SUBTYPE_LINK_SPLIT,
+       CORESIGHT_DEV_SUBTYPE_LINK_FIFO,
+};
+
+enum coresight_dev_subtype_source {
+       CORESIGHT_DEV_SUBTYPE_SOURCE_NONE,
+       CORESIGHT_DEV_SUBTYPE_SOURCE_PROC,
+       CORESIGHT_DEV_SUBTYPE_SOURCE_BUS,
+       CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
+};
+
+/**
+ * struct coresight_dev_subtype - further characterisation of a type
+ * @sink_subtype:      type of sink this component is, as defined
+                       by @coresight_dev_subtype_sink.
+ * @link_subtype:      type of link this component is, as defined
+                       by @coresight_dev_subtype_link.
+ * @source_subtype:    type of source this component is, as defined
+                       by @coresight_dev_subtype_source.
+ */
+struct coresight_dev_subtype {
+       enum coresight_dev_subtype_sink sink_subtype;
+       enum coresight_dev_subtype_link link_subtype;
+       enum coresight_dev_subtype_source source_subtype;
+};
+
+/**
+ * struct coresight_platform_data - data harvested from the DT specification
+ * @cpu:       the CPU a source belongs to. Only applicable for ETM/PTMs.
+ * @name:      name of the component as shown under sysfs.
+ * @nr_inport: number of input ports for this component.
+ * @outports:  list of remote enpoint port number.
+ * @child_names:name of all child components connected to this device.
+ * @child_ports:child component port number the current component is
+               connected  to.
+ * @nr_outport:        number of output ports for this component.
+ * @clk:       The clock this component is associated to.
+ */
+struct coresight_platform_data {
+       int cpu;
+       const char *name;
+       int nr_inport;
+       int *outports;
+       const char **child_names;
+       int *child_ports;
+       int nr_outport;
+       struct clk *clk;
+};
+
+/**
+ * struct coresight_desc - description of a component required from drivers
+ * @type:      as defined by @coresight_dev_type.
+ * @subtype:   as defined by @coresight_dev_subtype.
+ * @ops:       generic operations for this component, as defined
+               by @coresight_ops.
+ * @pdata:     platform data collected from DT.
+ * @dev:       The device entity associated to this component.
+ * @groups     :operations specific to this component. These will end up
+               in the component's sysfs sub-directory.
+ */
+struct coresight_desc {
+       enum coresight_dev_type type;
+       struct coresight_dev_subtype subtype;
+       const struct coresight_ops *ops;
+       struct coresight_platform_data *pdata;
+       struct device *dev;
+       const struct attribute_group **groups;
+};
+
+/**
+ * struct coresight_connection - representation of a single connection
+ * @ref_count: keeping count a port' references.
+ * @outport:   a connection's output port number.
+ * @chid_name: remote component's name.
+ * @child_port:        remote component's port number @output is connected to.
+ * @child_dev: a @coresight_device representation of the component
+               connected to @outport.
+ */
+struct coresight_connection {
+       int outport;
+       const char *child_name;
+       int child_port;
+       struct coresight_device *child_dev;
+};
+
+/**
+ * struct coresight_device - representation of a device as used by the framework
+ * @nr_inport: number of input port associated to this component.
+ * @nr_outport:        number of output port associated to this component.
+ * @type:      as defined by @coresight_dev_type.
+ * @subtype:   as defined by @coresight_dev_subtype.
+ * @ops:       generic operations for this component, as defined
+               by @coresight_ops.
+ * @dev:       The device entity associated to this component.
+ * @refcnt:    keep track of what is in use.
+ * @path_link: link of current component into the path being enabled.
+ * @orphan:    true if the component has connections that haven't been linked.
+ * @enable:    'true' if component is currently part of an active path.
+ * @activated: 'true' only if a _sink_ has been activated.  A sink can be
+               activated but not yet enabled.  Enabling for a _sink_
+               happens when a source has been selected for that it.
+ */
+struct coresight_device {
+       struct coresight_connection *conns;
+       int nr_inport;
+       int nr_outport;
+       enum coresight_dev_type type;
+       struct coresight_dev_subtype subtype;
+       const struct coresight_ops *ops;
+       struct device dev;
+       atomic_t *refcnt;
+       struct list_head path_link;
+       bool orphan;
+       bool enable;    /* true only if configured as part of a path */
+       bool activated; /* true only if a sink is part of a path */
+};
+
+#define to_coresight_device(d) container_of(d, struct coresight_device, dev)
+
+#define source_ops(csdev)      csdev->ops->source_ops
+#define sink_ops(csdev)                csdev->ops->sink_ops
+#define link_ops(csdev)                csdev->ops->link_ops
+
+#define CORESIGHT_DEBUGFS_ENTRY(__name, __entry_name,                  \
+                                __mode, __get, __set, __fmt)           \
+DEFINE_SIMPLE_ATTRIBUTE(__name ## _ops, __get, __set, __fmt);          \
+static const struct coresight_ops_entry __name ## _entry = {           \
+       .name = __entry_name,                                           \
+       .mode = __mode,                                                 \
+       .ops  = &__name ## _ops                                         \
+}
+
+/**
+ * struct coresight_ops_sink - basic operations for a sink
+ * Operations available for sinks
+ * @enable:    enables the sink.
+ * @disable:   disables the sink.
+ */
+struct coresight_ops_sink {
+       int (*enable)(struct coresight_device *csdev);
+       void (*disable)(struct coresight_device *csdev);
+};
+
+/**
+ * struct coresight_ops_link - basic operations for a link
+ * Operations available for links.
+ * @enable:    enables flow between iport and oport.
+ * @disable:   disables flow between iport and oport.
+ */
+struct coresight_ops_link {
+       int (*enable)(struct coresight_device *csdev, int iport, int oport);
+       void (*disable)(struct coresight_device *csdev, int iport, int oport);
+};
+
+/**
+ * struct coresight_ops_source - basic operations for a source
+ * Operations available for sources.
+ * @trace_id:  returns the value of the component's trace ID as known
+               to the HW.
+ * @enable:    enables tracing from a source.
+ * @disable:   disables tracing for a source.
+ */
+struct coresight_ops_source {
+       int (*trace_id)(struct coresight_device *csdev);
+       int (*enable)(struct coresight_device *csdev);
+       void (*disable)(struct coresight_device *csdev);
+};
+
+struct coresight_ops {
+       const struct coresight_ops_sink *sink_ops;
+       const struct coresight_ops_link *link_ops;
+       const struct coresight_ops_source *source_ops;
+};
+
+#ifdef CONFIG_CORESIGHT
+extern struct coresight_device *
+coresight_register(struct coresight_desc *desc);
+extern void coresight_unregister(struct coresight_device *csdev);
+extern int coresight_enable(struct coresight_device *csdev);
+extern void coresight_disable(struct coresight_device *csdev);
+extern int coresight_is_bit_set(u32 val, int position, int value);
+extern int coresight_timeout(void __iomem *addr, u32 offset,
+                            int position, int value);
+#ifdef CONFIG_OF
+extern struct coresight_platform_data *of_get_coresight_platform_data(
+                               struct device *dev, struct device_node *node);
+#endif
+#else
+static inline struct coresight_device *
+coresight_register(struct coresight_desc *desc) { return NULL; }
+static inline void coresight_unregister(struct coresight_device *csdev) {}
+static inline int
+coresight_enable(struct coresight_device *csdev) { return -ENOSYS; }
+static inline void coresight_disable(struct coresight_device *csdev) {}
+static inline int coresight_is_bit_set(u32 val, int position, int value)
+                                        { return 0; }
+static inline int coresight_timeout(void __iomem *addr, u32 offset,
+                                    int position, int value) { return 1; }
+#ifdef CONFIG_OF
+static inline struct coresight_platform_data *of_get_coresight_platform_data(
+       struct device *dev, struct device_node *node) { return NULL; }
+#endif
+#endif
+
+#endif
index 04421e82536596c5aeb7a28b35e142556c821598..6c58dd7cb9ace20c8a30a429490cac03df022fcf 100644 (file)
@@ -68,6 +68,7 @@ extern void groups_free(struct group_info *);
 extern int set_current_groups(struct group_info *);
 extern int set_groups(struct cred *, struct group_info *);
 extern int groups_search(const struct group_info *, kgid_t);
+extern bool may_setgroups(void);
 
 /* access the groups "array" with this macro */
 #define GROUP_AT(gi, i) \
index 2f4c5168633e383a318b37e7df56ff4859c9cf42..92039aa2fff31b5bb0435ec801511181255e4d99 100644 (file)
@@ -48,7 +48,11 @@ struct bus_attribute {
 };
 
 #define BUS_ATTR(_name, _mode, _show, _store)  \
-struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
+       struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define BUS_ATTR_RW(_name) \
+       struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
+#define BUS_ATTR_RO(_name) \
+       struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
 
 extern int __must_check bus_create_file(struct bus_type *,
                                        struct bus_attribute *);
@@ -254,9 +258,14 @@ struct driver_attribute {
                         size_t count);
 };
 
-#define DRIVER_ATTR(_name, _mode, _show, _store)       \
-struct driver_attribute driver_attr_##_name =          \
-       __ATTR(_name, _mode, _show, _store)
+#define DRIVER_ATTR(_name, _mode, _show, _store) \
+       struct driver_attribute driver_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define DRIVER_ATTR_RW(_name) \
+       struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
+#define DRIVER_ATTR_RO(_name) \
+       struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
+#define DRIVER_ATTR_WO(_name) \
+       struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
 
 extern int __must_check driver_create_file(struct device_driver *driver,
                                        const struct driver_attribute *attr);
@@ -407,8 +416,12 @@ struct class_attribute {
                                 const struct class_attribute *attr);
 };
 
-#define CLASS_ATTR(_name, _mode, _show, _store)                        \
-struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define CLASS_ATTR(_name, _mode, _show, _store) \
+       struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define CLASS_ATTR_RW(_name) \
+       struct class_attribute class_attr_##_name = __ATTR_RW(_name)
+#define CLASS_ATTR_RO(_name) \
+       struct class_attribute class_attr_##_name = __ATTR_RO(_name)
 
 extern int __must_check class_create_file(struct class *class,
                                          const struct class_attribute *attr);
@@ -416,7 +429,6 @@ extern void class_remove_file(struct class *class,
                              const struct class_attribute *attr);
 
 /* Simple class attribute that is just a static string */
-
 struct class_attribute_string {
        struct class_attribute attr;
        char *str;
@@ -505,6 +517,12 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr,
 
 #define DEVICE_ATTR(_name, _mode, _show, _store) \
        struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+#define DEVICE_ATTR_RW(_name) \
+       struct device_attribute dev_attr_##_name = __ATTR_RW(_name)
+#define DEVICE_ATTR_RO(_name) \
+       struct device_attribute dev_attr_##_name = __ATTR_RO(_name)
+#define DEVICE_ATTR_WO(_name) \
+       struct device_attribute dev_attr_##_name = __ATTR_WO(_name)
 #define DEVICE_ULONG_ATTR(_name, _mode, _var) \
        struct dev_ext_attribute dev_attr_##_name = \
                { __ATTR(_name, _mode, device_show_ulong, device_store_ulong), &(_var) }
index f8d41cb1cbe0a4afb726779f4c0ca05ff9a2a926..9e8986ae700c936bb7442d5e7a73a867fece3a7c 100644 (file)
@@ -106,6 +106,10 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo,
 extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
                unsigned long start, unsigned int nr, void *data);
 
+extern unsigned long gen_pool_first_fit_order_align(unsigned long *map,
+               unsigned long size, unsigned long start, unsigned int nr,
+               void *data);
+
 extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
                unsigned long start, unsigned int nr, void *data);
 
@@ -113,6 +117,9 @@ extern struct gen_pool *devm_gen_pool_create(struct device *dev,
                int min_alloc_order, int nid);
 extern struct gen_pool *dev_get_gen_pool(struct device *dev);
 
+bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+                       size_t size);
+
 #ifdef CONFIG_OF
 extern struct gen_pool *of_get_named_gen_pool(struct device_node *np,
        const char *propname, int index);
index ec4e37ce33c1fddffca2a08e72b1a7bd0cf0ab8a..2122edf525adc9fd6cf9d4d8bfbe871db2ff88e1 100644 (file)
@@ -1636,7 +1636,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
 #if VM_GROWSUP
 extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
 #else
-  #define expand_upwards(vma, address) do { } while (0)
+  #define expand_upwards(vma, address) (0)
 #endif
 
 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
index 901b7435e890333027362b1ea89ba58b79578768..192c783c2ec65373e7dae5f8d9a395dd27f27c49 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _LINUX_OF_DEVICE_H
 #define _LINUX_OF_DEVICE_H
 
+#include <linux/cpu.h>
 #include <linux/platform_device.h>
 #include <linux/of_platform.h> /* temporary until merge */
 
@@ -43,6 +44,15 @@ static inline void of_device_node_put(struct device *dev)
        of_node_put(dev->of_node);
 }
 
+static inline struct device_node *of_cpu_device_node_get(int cpu)
+{
+       struct device *cpu_dev;
+       cpu_dev = get_cpu_device(cpu);
+       if (!cpu_dev)
+               return NULL;
+       return of_node_get(cpu_dev->of_node);
+}
+
 #else /* CONFIG_OF_DEVICE */
 
 static inline int of_driver_match_device(struct device *dev,
@@ -67,6 +77,11 @@ static inline const struct of_device_id *of_match_device(
 {
        return NULL;
 }
+
+static inline struct device_node *of_cpu_device_node_get(int cpu)
+{
+       return NULL;
+}
 #endif /* CONFIG_OF_DEVICE */
 
 #endif /* _LINUX_OF_DEVICE_H */
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h
new file mode 100644 (file)
index 0000000..56e0507
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * OF graph binding parsing helpers
+ *
+ * Copyright (C) 2012 - 2013 Samsung Electronics Co., Ltd.
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * Copyright (C) 2012 Renesas Electronics Corp.
+ * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_OF_GRAPH_H
+#define __LINUX_OF_GRAPH_H
+
+/**
+ * struct of_endpoint - the OF graph endpoint data structure
+ * @port: identifier (value of reg property) of a port this endpoint belongs to
+ * @id: identifier (value of reg property) of this endpoint
+ * @local_node: pointer to device_node of this endpoint
+ */
+struct of_endpoint {
+       unsigned int port;
+       unsigned int id;
+       const struct device_node *local_node;
+};
+
+#ifdef CONFIG_OF
+int of_graph_parse_endpoint(const struct device_node *node,
+                               struct of_endpoint *endpoint);
+struct device_node *of_graph_get_next_endpoint(const struct device_node *parent,
+                                       struct device_node *previous);
+struct device_node *of_graph_get_remote_port_parent(
+                                       const struct device_node *node);
+struct device_node *of_graph_get_remote_port(const struct device_node *node);
+#else
+
+static inline int of_graph_parse_endpoint(const struct device_node *node,
+                                       struct of_endpoint *endpoint)
+{
+       return -ENOSYS;
+}
+static inline struct device_node *of_graph_get_next_endpoint(
+                                       const struct device_node *parent,
+                                       struct device_node *previous)
+{
+       return NULL;
+}
+
+static inline struct device_node *of_graph_get_remote_port_parent(
+                                       const struct device_node *node)
+{
+       return NULL;
+}
+
+static inline struct device_node *of_graph_get_remote_port(
+                                       const struct device_node *node)
+{
+       return NULL;
+}
+
+#endif /* CONFIG_OF */
+
+#endif /* __LINUX_OF_GRAPH_H */
index 9e370618352a1bd86ccc6ced2d5ae3af0cc371cb..17baad3a0c57c178ba35f2fc71b67bda7aee946f 100644 (file)
@@ -53,7 +53,8 @@ struct persistent_ram_zone {
 };
 
 struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
-                       u32 sig, struct persistent_ram_ecc_info *ecc_info);
+                       u32 sig, struct persistent_ram_ecc_info *ecc_info,
+                       unsigned int memtype);
 void persistent_ram_free(struct persistent_ram_zone *prz);
 void persistent_ram_zap(struct persistent_ram_zone *prz);
 
@@ -78,6 +79,7 @@ void ramoops_console_write_buf(const char *buf, size_t size);
 struct ramoops_platform_data {
        unsigned long   mem_size;
        unsigned long   mem_address;
+       unsigned int    mem_type;
        unsigned long   record_size;
        unsigned long   console_size;
        unsigned long   ftrace_size;
index 23b36304cd881bb77bfb0ca092b0e4346b88d87d..2ca9ed7cfc9b621d693f43ced731f1946b5121a4 100644 (file)
 #define SYS_HALT       0x0002  /* Notify of system halt */
 #define SYS_POWER_OFF  0x0003  /* Notify of system power off */
 
+enum reboot_mode {
+       REBOOT_COLD = 0,
+       REBOOT_WARM,
+};
+
 extern int register_reboot_notifier(struct notifier_block *);
 extern int unregister_reboot_notifier(struct notifier_block *);
 
index e2cee22f578a6b24d7eb39c1ad298f15115bcb2d..7dd65cbfcdb375ad4895a684f4a600bb73f5c0de 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/list.h>
 #include <linux/lockdep.h>
 #include <linux/kobject_ns.h>
+#include <linux/stat.h>
 #include <linux/atomic.h>
 
 struct kobject;
@@ -68,17 +69,25 @@ struct attribute_group {
  * for examples..
  */
 
-#define __ATTR(_name,_mode,_show,_store) { \
-       .attr = {.name = __stringify(_name), .mode = _mode },   \
-       .show   = _show,                                        \
-       .store  = _store,                                       \
+#define __ATTR(_name,_mode,_show,_store) {                             \
+       .attr = {.name = __stringify(_name), .mode = _mode },           \
+       .show   = _show,                                                \
+       .store  = _store,                                               \
 }
 
-#define __ATTR_RO(_name) { \
-       .attr   = { .name = __stringify(_name), .mode = 0444 }, \
-       .show   = _name##_show,                                 \
+#define __ATTR_RO(_name) {                                             \
+       .attr   = { .name = __stringify(_name), .mode = S_IRUGO },      \
+       .show   = _name##_show,                                         \
 }
 
+#define __ATTR_WO(_name) {                                             \
+       .attr   = { .name = __stringify(_name), .mode = S_IWUSR },      \
+       .store  = _name##_store,                                        \
+}
+
+#define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO),            \
+                        _name##_show, _name##_store)
+
 #define __ATTR_NULL { .attr = { .name = NULL } }
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -92,6 +101,18 @@ struct attribute_group {
 #define __ATTR_IGNORE_LOCKDEP  __ATTR
 #endif
 
+#define __ATTRIBUTE_GROUPS(_name)                              \
+static const struct attribute_group *_name##_groups[] = {      \
+       &_name##_group,                                         \
+       NULL,                                                   \
+}
+
+#define ATTRIBUTE_GROUPS(_name)                                        \
+static const struct attribute_group _name##_group = {          \
+       .attrs = _name##_attrs,                                 \
+};                                                             \
+__ATTRIBUTE_GROUPS(_name)
+
 #define attr_name(_attr) (_attr).attr.name
 
 struct file;
@@ -121,6 +142,36 @@ struct bin_attribute {
  */
 #define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr)
 
+/* macros to create static binary attributes easier */
+#define __BIN_ATTR(_name, _mode, _read, _write, _size) {               \
+       .attr = { .name = __stringify(_name), .mode = _mode },          \
+       .read   = _read,                                                \
+       .write  = _write,                                               \
+       .size   = _size,                                                \
+}
+
+#define __BIN_ATTR_RO(_name, _size) {                                  \
+       .attr   = { .name = __stringify(_name), .mode = S_IRUGO },      \
+       .read   = _name##_read,                                         \
+       .size   = _size,                                                \
+}
+
+#define __BIN_ATTR_RW(_name, _size) __BIN_ATTR(_name,                  \
+                                  (S_IWUSR | S_IRUGO), _name##_read,   \
+                                  _name##_write)
+
+#define __BIN_ATTR_NULL __ATTR_NULL
+
+#define BIN_ATTR(_name, _mode, _read, _write, _size)                   \
+struct bin_attribute bin_attr_##_name = __BIN_ATTR(_name, _mode, _read,        \
+                                       _write, _size)
+
+#define BIN_ATTR_RO(_name, _size)                                      \
+struct bin_attribute bin_attr_##_name = __BIN_ATTR_RO(_name, _size)
+
+#define BIN_ATTR_RW(_name, _size)                                      \
+struct bin_attribute bin_attr_##_name = __BIN_ATTR_RW(_name, _size)
+
 struct sysfs_ops {
        ssize_t (*show)(struct kobject *, struct attribute *,char *);
        ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
index 14105c26a83618da5d91ae09331fb83ba8f8ad6b..a37081cf59da637dd6b8d81ead7073485c475d73 100644 (file)
@@ -17,6 +17,10 @@ struct uid_gid_map { /* 64 bytes -- 1 cache line */
        } extent[UID_GID_MAP_MAX_EXTENTS];
 };
 
+#define USERNS_SETGROUPS_ALLOWED 1UL
+
+#define USERNS_INIT_FLAGS USERNS_SETGROUPS_ALLOWED
+
 struct user_namespace {
        struct uid_gid_map      uid_map;
        struct uid_gid_map      gid_map;
@@ -27,6 +31,7 @@ struct user_namespace {
        kuid_t                  owner;
        kgid_t                  group;
        unsigned int            proc_inum;
+       unsigned long           flags;
        bool                    may_mount_sysfs;
        bool                    may_mount_proc;
 };
@@ -59,6 +64,9 @@ extern struct seq_operations proc_projid_seq_operations;
 extern ssize_t proc_uid_map_write(struct file *, const char __user *, size_t, loff_t *);
 extern ssize_t proc_gid_map_write(struct file *, const char __user *, size_t, loff_t *);
 extern ssize_t proc_projid_map_write(struct file *, const char __user *, size_t, loff_t *);
+extern ssize_t proc_setgroups_write(struct file *, const char __user *, size_t, loff_t *);
+extern int proc_setgroups_show(struct seq_file *m, void *v);
+extern bool userns_may_setgroups(const struct user_namespace *ns);
 #else
 
 static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
@@ -83,6 +91,10 @@ static inline void put_user_ns(struct user_namespace *ns)
 {
 }
 
+static inline bool userns_may_setgroups(const struct user_namespace *ns)
+{
+       return true;
+}
 #endif
 
 void update_mnt_policy(struct user_namespace *userns);
index 3f63ea6464cae60eb55acb1bbed1cb10af2fdedf..7bf4d519c20fbf96fa730758aa22cf5389399886 100644 (file)
@@ -6887,11 +6887,11 @@ SYSCALL_DEFINE5(perf_event_open,
 
        if (move_group) {
                synchronize_rcu();
-               perf_install_in_context(ctx, group_leader, event->cpu);
+               perf_install_in_context(ctx, group_leader, group_leader->cpu);
                get_ctx(ctx);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_install_in_context(ctx, sibling, event->cpu);
+                       perf_install_in_context(ctx, sibling, sibling->cpu);
                        get_ctx(ctx);
                }
        }
index 6b2588dd04ff20fb89995394f9c530a2613fbb83..67b4ba30475fbc2b902e5a6226aeac2d2aaa5803 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/slab.h>
 #include <linux/security.h>
 #include <linux/syscalls.h>
+#include <linux/user_namespace.h>
 #include <asm/uaccess.h>
 
 /* init to 2 - one for init_task, one to ensure it is never freed */
@@ -223,6 +224,14 @@ out:
        return i;
 }
 
+bool may_setgroups(void)
+{
+       struct user_namespace *user_ns = current_user_ns();
+
+       return ns_capable(user_ns, CAP_SETGID) &&
+               userns_may_setgroups(user_ns);
+}
+
 /*
  *     SMP: Our groups are copy-on-write. We can set them safely
  *     without another task interfering.
@@ -233,7 +242,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist)
        struct group_info *group_info;
        int retval;
 
-       if (!nsown_capable(CAP_SETGID))
+       if (!may_setgroups())
                return -EPERM;
        if ((unsigned)gidsetsize > NGROUPS_MAX)
                return -EINVAL;
index 0eb6d8e8b1da7ffd3d5a750698395d0889190b73..3cdba517360050c24bbb00ad97c13e63c9492a43 100644 (file)
@@ -335,6 +335,8 @@ out:
 
 out_unlock:
        spin_unlock_irq(&pidmap_lock);
+       put_pid_ns(ns);
+
 out_free:
        while (++i <= ns->level)
                free_pidmap(pid->numbers + i);
index ff7d9d2ab504ec69da98156f3df39a9b5a868478..a64e0de74c0d0bed8645db0da1be7322ca5316df 100644 (file)
@@ -3,7 +3,10 @@ obj-y += timeconv.o posix-clock.o alarmtimer.o
 
 obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD)                += clockevents.o
 obj-$(CONFIG_GENERIC_CLOCKEVENTS)              += tick-common.o
-obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)    += tick-broadcast.o
+ifeq ($(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST),y)
+ obj-y                                         += tick-broadcast.o
+ obj-$(CONFIG_TICK_ONESHOT)                    += tick-broadcast-hrtimer.o
+endif
 obj-$(CONFIG_TICK_ONESHOT)                     += tick-oneshot.o
 obj-$(CONFIG_TICK_ONESHOT)                     += tick-sched.o
 obj-$(CONFIG_TIMER_STATS)                      += timer_stats.o
index 58e8430165b52d81930c0ab4e32f527783da193b..378613bebb1352369f2a59a1ee1d4647a81c722a 100644 (file)
@@ -434,18 +434,45 @@ void clockevents_resume(void)
 #ifdef CONFIG_GENERIC_CLOCKEVENTS
 /**
  * clockevents_notify - notification about relevant events
+ * Returns 0 on success, any other value on error
  */
-void clockevents_notify(unsigned long reason, void *arg)
+int clockevents_notify(unsigned long reason, void *arg)
 {
        struct clock_event_device *dev, *tmp;
        unsigned long flags;
-       int cpu;
+       int cpu, ret = 0;
 
        raw_spin_lock_irqsave(&clockevents_lock, flags);
-       tick_notify(reason, arg);
 
        switch (reason) {
+       case CLOCK_EVT_NOTIFY_BROADCAST_ON:
+       case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
+       case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
+               tick_broadcast_on_off(reason, arg);
+               break;
+
+       case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
+       case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
+               ret = tick_broadcast_oneshot_control(reason);
+               break;
+
+       case CLOCK_EVT_NOTIFY_CPU_DYING:
+               tick_handover_do_timer(arg);
+               break;
+
+       case CLOCK_EVT_NOTIFY_SUSPEND:
+               tick_suspend();
+               tick_suspend_broadcast();
+               break;
+
+       case CLOCK_EVT_NOTIFY_RESUME:
+               tick_resume();
+               break;
+
        case CLOCK_EVT_NOTIFY_CPU_DEAD:
+               tick_shutdown_broadcast_oneshot(arg);
+               tick_shutdown_broadcast(arg);
+               tick_shutdown(arg);
                /*
                 * Unregister the clock event devices which were
                 * released from the users in the notify chain.
@@ -469,6 +496,7 @@ void clockevents_notify(unsigned long reason, void *arg)
                break;
        }
        raw_spin_unlock_irqrestore(&clockevents_lock, flags);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(clockevents_notify);
 #endif
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c
new file mode 100644 (file)
index 0000000..eb682d5
--- /dev/null
@@ -0,0 +1,106 @@
+/*
+ * linux/kernel/time/tick-broadcast-hrtimer.c
+ * This file emulates a local clock event device
+ * via a pseudo clock device.
+ */
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/hrtimer.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/profile.h>
+#include <linux/clockchips.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/module.h>
+
+#include "tick-internal.h"
+
+static struct hrtimer bctimer;
+
+static void bc_set_mode(enum clock_event_mode mode,
+                       struct clock_event_device *bc)
+{
+       switch (mode) {
+       case CLOCK_EVT_MODE_SHUTDOWN:
+               /*
+                * Note, we cannot cancel the timer here as we might
+                * run into the following live lock scenario:
+                *
+                * cpu 0                cpu1
+                * lock(broadcast_lock);
+                *                      hrtimer_interrupt()
+                *                      bc_handler()
+                *                         tick_handle_oneshot_broadcast();
+                *                          lock(broadcast_lock);
+                * hrtimer_cancel()
+                *  wait_for_callback()
+                */
+               hrtimer_try_to_cancel(&bctimer);
+               break;
+       default:
+               break;
+       }
+}
+
+/*
+ * This is called from the guts of the broadcast code when the cpu
+ * which is about to enter idle has the earliest broadcast timer event.
+ */
+static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
+{
+       /*
+        * We try to cancel the timer first. If the callback is on
+        * flight on some other cpu then we let it handle it. If we
+        * were able to cancel the timer nothing can rearm it as we
+        * own broadcast_lock.
+        *
+        * However we can also be called from the event handler of
+        * ce_broadcast_hrtimer itself when it expires. We cannot
+        * restart the timer because we are in the callback, but we
+        * can set the expiry time and let the callback return
+        * HRTIMER_RESTART.
+        */
+       if (hrtimer_try_to_cancel(&bctimer) >= 0) {
+               hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED);
+               /* Bind the "device" to the cpu */
+               bc->bound_on = smp_processor_id();
+       } else if (bc->bound_on == smp_processor_id()) {
+               hrtimer_set_expires(&bctimer, expires);
+       }
+       return 0;
+}
+
+static struct clock_event_device ce_broadcast_hrtimer = {
+       .set_mode               = bc_set_mode,
+       .set_next_ktime         = bc_set_next,
+       .features               = CLOCK_EVT_FEAT_ONESHOT |
+                                 CLOCK_EVT_FEAT_KTIME |
+                                 CLOCK_EVT_FEAT_HRTIMER,
+       .rating                 = 0,
+       .bound_on               = -1,
+       .min_delta_ns           = 1,
+       .max_delta_ns           = KTIME_MAX,
+       .min_delta_ticks        = 1,
+       .max_delta_ticks        = ULONG_MAX,
+       .mult                   = 1,
+       .shift                  = 0,
+       .cpumask                = cpu_all_mask,
+};
+
+static enum hrtimer_restart bc_handler(struct hrtimer *t)
+{
+       ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
+
+       if (ce_broadcast_hrtimer.next_event.tv64 == KTIME_MAX)
+               return HRTIMER_NORESTART;
+
+       return HRTIMER_RESTART;
+}
+
+void tick_setup_hrtimer_broadcast(void)
+{
+       hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+       bctimer.function = bc_handler;
+       clockevents_register_device(&ce_broadcast_hrtimer);
+}
index 19ee339a1d0dd7e104bf25d6c6783eaf03df9e05..16f4830a5498acaa4f5ad768f73b47f7abc09994 100644 (file)
@@ -626,24 +626,61 @@ again:
        raw_spin_unlock(&tick_broadcast_lock);
 }
 
+static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
+{
+       if (!(bc->features & CLOCK_EVT_FEAT_HRTIMER))
+               return 0;
+       if (bc->next_event.tv64 == KTIME_MAX)
+               return 0;
+       return bc->bound_on == cpu ? -EBUSY : 0;
+}
+
+static void broadcast_shutdown_local(struct clock_event_device *bc,
+                                    struct clock_event_device *dev)
+{
+       /*
+        * For hrtimer based broadcasting we cannot shutdown the cpu
+        * local device if our own event is the first one to expire or
+        * if we own the broadcast timer.
+        */
+       if (bc->features & CLOCK_EVT_FEAT_HRTIMER) {
+               if (broadcast_needs_cpu(bc, smp_processor_id()))
+                       return;
+               if (dev->next_event.tv64 < bc->next_event.tv64)
+                       return;
+       }
+       clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
+}
+
+static void broadcast_move_bc(int deadcpu)
+{
+       struct clock_event_device *bc = tick_broadcast_device.evtdev;
+
+       if (!bc || !broadcast_needs_cpu(bc, deadcpu))
+               return;
+       /* This moves the broadcast assignment to this cpu */
+       clockevents_program_event(bc, bc->next_event, 1);
+}
+
 /*
  * Powerstate information: The system enters/leaves a state, where
  * affected devices might stop
+ * Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
  */
-void tick_broadcast_oneshot_control(unsigned long reason)
+int tick_broadcast_oneshot_control(unsigned long reason)
 {
        struct clock_event_device *bc, *dev;
        struct tick_device *td;
        unsigned long flags;
        ktime_t now;
-       int cpu;
+       int cpu, ret = 0;
 
        /*
         * Periodic mode does not care about the enter/exit of power
         * states
         */
        if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
-               return;
+               return 0;
 
        /*
         * We are called with preemtion disabled from the depth of the
@@ -654,7 +691,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
        dev = td->evtdev;
 
        if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
-               return;
+               return 0;
 
        bc = tick_broadcast_device.evtdev;
 
@@ -662,7 +699,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
        if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
                if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
                        WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
-                       clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
+                       broadcast_shutdown_local(bc, dev);
                        /*
                         * We only reprogram the broadcast timer if we
                         * did not mark ourself in the force mask and
@@ -675,6 +712,16 @@ void tick_broadcast_oneshot_control(unsigned long reason)
                            dev->next_event.tv64 < bc->next_event.tv64)
                                tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
                }
+               /*
+                * If the current CPU owns the hrtimer broadcast
+                * mechanism, it cannot go deep idle and we remove the
+                * CPU from the broadcast mask. We don't have to go
+                * through the EXIT path as the local timer is not
+                * shutdown.
+                */
+               ret = broadcast_needs_cpu(bc, cpu);
+               if (ret)
+                       cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
        } else {
                if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
                        clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
@@ -742,6 +789,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
        }
 out:
        raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
+       return ret;
 }
 
 /*
@@ -848,6 +896,8 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
        cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
        cpumask_clear_cpu(cpu, tick_broadcast_force_mask);
 
+       broadcast_move_bc(cpu);
+
        raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
 }
 
index 086216c433fa3d0423dd83d5cf085557570c73ca..21711bc2bc25260a9ddea86bd52b1d4c38afdc29 100644 (file)
@@ -303,7 +303,7 @@ out_bc:
  *
  * Called with interrupts disabled.
  */
-static void tick_handover_do_timer(int *cpup)
+void tick_handover_do_timer(int *cpup)
 {
        if (*cpup == tick_do_timer_cpu) {
                int cpu = cpumask_first(cpu_online_mask);
@@ -320,7 +320,7 @@ static void tick_handover_do_timer(int *cpup)
  * access the hardware device itself.
  * We just set the mode and remove it from the lists.
  */
-static void tick_shutdown(unsigned int *cpup)
+void tick_shutdown(unsigned int *cpup)
 {
        struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
        struct clock_event_device *dev = td->evtdev;
@@ -341,7 +341,7 @@ static void tick_shutdown(unsigned int *cpup)
        raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 }
 
-static void tick_suspend(void)
+void tick_suspend(void)
 {
        struct tick_device *td = &__get_cpu_var(tick_cpu_device);
        unsigned long flags;
@@ -351,7 +351,7 @@ static void tick_suspend(void)
        raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 }
 
-static void tick_resume(void)
+void tick_resume(void)
 {
        struct tick_device *td = &__get_cpu_var(tick_cpu_device);
        unsigned long flags;
@@ -369,45 +369,6 @@ static void tick_resume(void)
        raw_spin_unlock_irqrestore(&tick_device_lock, flags);
 }
 
-void tick_notify(unsigned long reason, void *dev)
-{
-       switch (reason) {
-
-       case CLOCK_EVT_NOTIFY_BROADCAST_ON:
-       case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
-       case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
-               tick_broadcast_on_off(reason, dev);
-               break;
-
-       case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
-       case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
-               tick_broadcast_oneshot_control(reason);
-               break;
-
-       case CLOCK_EVT_NOTIFY_CPU_DYING:
-               tick_handover_do_timer(dev);
-               break;
-
-       case CLOCK_EVT_NOTIFY_CPU_DEAD:
-               tick_shutdown_broadcast_oneshot(dev);
-               tick_shutdown_broadcast(dev);
-               tick_shutdown(dev);
-               break;
-
-       case CLOCK_EVT_NOTIFY_SUSPEND:
-               tick_suspend();
-               tick_suspend_broadcast();
-               break;
-
-       case CLOCK_EVT_NOTIFY_RESUME:
-               tick_resume();
-               break;
-
-       default:
-               break;
-       }
-}
-
 /**
  * tick_init - initialize the tick control
  */
index 60742fe6f63de49e3e1d5e8c87dadfb7c7198e11..c85edb18d68e4a848e90d9d2888e923020c6210f 100644 (file)
@@ -18,8 +18,11 @@ extern int tick_do_timer_cpu __read_mostly;
 
 extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
 extern void tick_handle_periodic(struct clock_event_device *dev);
-extern void tick_notify(unsigned long reason, void *dev);
 extern void tick_check_new_device(struct clock_event_device *dev);
+extern void tick_handover_do_timer(int *cpup);
+extern void tick_shutdown(unsigned int *cpup);
+extern void tick_suspend(void);
+extern void tick_resume(void);
 
 extern void clockevents_shutdown(struct clock_event_device *dev);
 
@@ -36,7 +39,7 @@ extern int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *));
 extern void tick_resume_oneshot(void);
 # ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
 extern void tick_broadcast_setup_oneshot(struct clock_event_device *bc);
-extern void tick_broadcast_oneshot_control(unsigned long reason);
+extern int tick_broadcast_oneshot_control(unsigned long reason);
 extern void tick_broadcast_switch_to_oneshot(void);
 extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
 extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
@@ -48,7 +51,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
        BUG();
 }
-static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
+static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; }
 static inline void tick_broadcast_switch_to_oneshot(void) { }
 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
 static inline int tick_broadcast_oneshot_active(void) { return 0; }
@@ -77,7 +80,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 {
        BUG();
 }
-static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
+static inline int tick_broadcast_oneshot_control(unsigned long reason) { return 0; }
 static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
 static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
 {
@@ -142,6 +145,8 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
        return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
 }
 
+int __clockevents_update_freq(struct clock_event_device *dev, u32 freq);
+
 #endif
 
 extern void do_timer(unsigned long ticks);
index f6c83d7ef0006fffe3cc9811b736c513c1b8f6f1..d58cc4d8f0d1fa95c7ec0120cb408a9b4ad859e5 100644 (file)
@@ -176,7 +176,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist)
        struct group_info *group_info;
        int retval;
 
-       if (!nsown_capable(CAP_SETGID))
+       if (!may_setgroups())
                return -EPERM;
        if ((unsigned)gidsetsize > NGROUPS_MAX)
                return -EINVAL;
index 69b4c3d48cdee20fc94be4d2869e2e330ba0a7b2..6bbef5604101cf52f6d052f7de96e373920847b5 100644 (file)
@@ -51,6 +51,7 @@ struct user_namespace init_user_ns = {
        .owner = GLOBAL_ROOT_UID,
        .group = GLOBAL_ROOT_GID,
        .proc_inum = PROC_USER_INIT_INO,
+       .flags = USERNS_INIT_FLAGS,
        .may_mount_sysfs = true,
        .may_mount_proc = true,
 };
index 9bea1d7dd21fac61c2084cb5bc295c1980c2573e..3f2fb33d291aa8739ab09a2f63d090695787c765 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/fs_struct.h>
 
 static struct kmem_cache *user_ns_cachep __read_mostly;
+static DEFINE_MUTEX(userns_state_mutex);
 
 static bool new_idmap_permitted(const struct file *file,
                                struct user_namespace *ns, int cap_setid,
@@ -99,6 +100,11 @@ int create_user_ns(struct cred *new)
        ns->owner = owner;
        ns->group = group;
 
+       /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
+       mutex_lock(&userns_state_mutex);
+       ns->flags = parent_ns->flags;
+       mutex_unlock(&userns_state_mutex);
+
        set_cred_user_ns(new, ns);
 
        update_mnt_policy(ns);
@@ -577,9 +583,6 @@ static bool mappings_overlap(struct uid_gid_map *new_map, struct uid_gid_extent
        return false;
 }
 
-
-static DEFINE_MUTEX(id_map_mutex);
-
 static ssize_t map_write(struct file *file, const char __user *buf,
                         size_t count, loff_t *ppos,
                         int cap_setid,
@@ -596,7 +599,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
        ssize_t ret = -EINVAL;
 
        /*
-        * The id_map_mutex serializes all writes to any given map.
+        * The userns_state_mutex serializes all writes to any given map.
         *
         * Any map is only ever written once.
         *
@@ -614,7 +617,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
         * order and smp_rmb() is guaranteed that we don't have crazy
         * architectures returning stale data.
         */
-       mutex_lock(&id_map_mutex);
+       mutex_lock(&userns_state_mutex);
 
        ret = -EPERM;
        /* Only allow one successful write to the map */
@@ -741,7 +744,7 @@ static ssize_t map_write(struct file *file, const char __user *buf,
        *ppos = count;
        ret = count;
 out:
-       mutex_unlock(&id_map_mutex);
+       mutex_unlock(&userns_state_mutex);
        if (page)
                free_page(page);
        return ret;
@@ -800,17 +803,21 @@ static bool new_idmap_permitted(const struct file *file,
                                struct user_namespace *ns, int cap_setid,
                                struct uid_gid_map *new_map)
 {
-       /* Allow mapping to your own filesystem ids */
-       if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1)) {
+       const struct cred *cred = file->f_cred;
+       /* Don't allow mappings that would allow anything that wouldn't
+        * be allowed without the establishment of unprivileged mappings.
+        */
+       if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
+           uid_eq(ns->owner, cred->euid)) {
                u32 id = new_map->extent[0].lower_first;
                if (cap_setid == CAP_SETUID) {
                        kuid_t uid = make_kuid(ns->parent, id);
-                       if (uid_eq(uid, file->f_cred->fsuid))
+                       if (uid_eq(uid, cred->euid))
                                return true;
-               }
-               else if (cap_setid == CAP_SETGID) {
+               } else if (cap_setid == CAP_SETGID) {
                        kgid_t gid = make_kgid(ns->parent, id);
-                       if (gid_eq(gid, file->f_cred->fsgid))
+                       if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
+                           gid_eq(gid, cred->egid))
                                return true;
                }
        }
@@ -830,6 +837,100 @@ static bool new_idmap_permitted(const struct file *file,
        return false;
 }
 
+int proc_setgroups_show(struct seq_file *seq, void *v)
+{
+       struct user_namespace *ns = seq->private;
+       unsigned long userns_flags = ACCESS_ONCE(ns->flags);
+
+       seq_printf(seq, "%s\n",
+                  (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
+                  "allow" : "deny");
+       return 0;
+}
+
+ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
+                            size_t count, loff_t *ppos)
+{
+       struct seq_file *seq = file->private_data;
+       struct user_namespace *ns = seq->private;
+       char kbuf[8], *pos;
+       bool setgroups_allowed;
+       ssize_t ret;
+
+       /* Only allow a very narrow range of strings to be written */
+       ret = -EINVAL;
+       if ((*ppos != 0) || (count >= sizeof(kbuf)))
+               goto out;
+
+       /* What was written? */
+       ret = -EFAULT;
+       if (copy_from_user(kbuf, buf, count))
+               goto out;
+       kbuf[count] = '\0';
+       pos = kbuf;
+
+       /* What is being requested? */
+       ret = -EINVAL;
+       if (strncmp(pos, "allow", 5) == 0) {
+               pos += 5;
+               setgroups_allowed = true;
+       }
+       else if (strncmp(pos, "deny", 4) == 0) {
+               pos += 4;
+               setgroups_allowed = false;
+       }
+       else
+               goto out;
+
+       /* Verify there is not trailing junk on the line */
+       pos = skip_spaces(pos);
+       if (*pos != '\0')
+               goto out;
+
+       ret = -EPERM;
+       mutex_lock(&userns_state_mutex);
+       if (setgroups_allowed) {
+               /* Enabling setgroups after setgroups has been disabled
+                * is not allowed.
+                */
+               if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
+                       goto out_unlock;
+       } else {
+               /* Permanently disabling setgroups after setgroups has
+                * been enabled by writing the gid_map is not allowed.
+                */
+               if (ns->gid_map.nr_extents != 0)
+                       goto out_unlock;
+               ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
+       }
+       mutex_unlock(&userns_state_mutex);
+
+       /* Report a successful write */
+       *ppos = count;
+       ret = count;
+out:
+       return ret;
+out_unlock:
+       mutex_unlock(&userns_state_mutex);
+       goto out;
+}
+
+bool userns_may_setgroups(const struct user_namespace *ns)
+{
+       bool allowed;
+
+       mutex_lock(&userns_state_mutex);
+       /* It is not safe to use setgroups until a gid mapping in
+        * the user namespace has been established.
+        */
+       allowed = ns->gid_map.nr_extents != 0;
+       /* Is setgroups allowed? */
+       allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
+       mutex_unlock(&userns_state_mutex);
+
+       return allowed;
+}
+
 static void *userns_get(struct task_struct *task)
 {
        struct user_namespace *user_ns;
index 2a39bf62d8c1b71d5862098991b7fb71abbc9faf..9e9c46c267db7e8fc9ac7b27f65f0644ffba12ac 100644 (file)
@@ -373,6 +373,35 @@ void gen_pool_for_each_chunk(struct gen_pool *pool,
 }
 EXPORT_SYMBOL(gen_pool_for_each_chunk);
 
+/**
+ * addr_in_gen_pool - checks if an address falls within the range of a pool
+ * @pool:      the generic memory pool
+ * @start:     start address
+ * @size:      size of the region
+ *
+ * Check if the range of addresses falls within the specified pool. Returns
+ * true if the entire range is contained in the pool and false otherwise.
+ */
+bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+                       size_t size)
+{
+       bool found = false;
+       unsigned long end = start + size;
+       struct gen_pool_chunk *chunk;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
+               if (start >= chunk->start_addr && start <= chunk->end_addr) {
+                       if (end <= chunk->end_addr) {
+                               found = true;
+                               break;
+                       }
+               }
+       }
+       rcu_read_unlock();
+       return found;
+}
+
 /**
  * gen_pool_avail - get available free space of the pool
  * @pool: pool to get available free space
@@ -451,6 +480,26 @@ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
 }
 EXPORT_SYMBOL(gen_pool_first_fit);
 
+/**
+ * gen_pool_first_fit_order_align - find the first available region
+ * of memory matching the size requirement. The region will be aligned
+ * to the order of the size specified.
+ * @map: The address to base the search on
+ * @size: The bitmap size in bits
+ * @start: The bitnumber to start searching at
+ * @nr: The number of zeroed bits we're looking for
+ * @data: additional data - unused
+ */
+unsigned long gen_pool_first_fit_order_align(unsigned long *map,
+               unsigned long size, unsigned long start,
+               unsigned int nr, void *data)
+{
+       unsigned long align_mask = roundup_pow_of_two(nr) - 1;
+
+       return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
+}
+EXPORT_SYMBOL(gen_pool_first_fit_order_align);
+
 /**
  * gen_pool_best_fit - find the best fitting region of memory
  * macthing the size requirement (no alignment constraint)
diff --git a/linaro/configs/booting-test.conf b/linaro/configs/booting-test.conf
new file mode 100644 (file)
index 0000000..7592b21
--- /dev/null
@@ -0,0 +1,66 @@
+CONFIG_UNUSED_SYMBOLS=y
+CONFIG_HEADERS_CHECK=y
+CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_DEBUG_SHIRQ=y
+CONFIG_LOCKUP_DETECTOR=y
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_SELFTEST=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE=400
+CONFIG_DEBUG_KMEMLEAK_TEST=m
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_PI_LIST=y
+CONFIG_RT_MUTEX_TESTER=y
+CONFIG_DEBUG_LOCKDEP=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_DEBUG_KOBJECT=y
+CONFIG_DEBUG_HIGHMEM=y
+CONFIG_DEBUG_BUGVERBOSE=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_RB=y
+CONFIG_DEBUG_WRITECOUNT=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_LIST=y
+CONFIG_TEST_LIST_SORT=y
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_DEBUG_CREDENTIALS=y
+CONFIG_DEBUG_BLOCK_EXT_DEVT=y
+CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
+CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_FAULT_INJECTION=y
+CONFIG_TRACER_MAX_TRACE=y
+CONFIG_GENERIC_TRACER=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_FUNCTION_GRAPH_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_TRACER_SNAPSHOT=y
+CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
+CONFIG_STACK_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_DYNAMIC_FTRACE=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_FTRACE_MCOUNT_RECORD=y
+CONFIG_FTRACE_SELFTEST=y
+CONFIG_FTRACE_STARTUP_TEST=y
+CONFIG_EVENT_TRACE_TEST_SYSCALLS=y
+CONFIG_RING_BUFFER_BENCHMARK=y
+CONFIG_RING_BUFFER_STARTUP_TEST=y
+CONFIG_DYNAMIC_DEBUG=y
+CONFIG_DMA_API_DEBUG=y
+CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_TEST_STRING_HELPERS=y
+CONFIG_TEST_KSTRTOX=y
+CONFIG_STRICT_DEVMEM=y
+CONFIG_OLD_MCOUNT=y
+CONFIG_DEBUG_USER=y
+
index eea1a9dfac38fc84a56a72f8161ae1fe7ddb8260..2e53312b89073b95344175d9201444b9ffef19eb 100644 (file)
@@ -232,8 +232,6 @@ static ssize_t stable_pages_required_show(struct device *dev,
                        bdi_cap_stable_pages_required(bdi) ? 1 : 0);
 }
 
-#define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
-
 static struct device_attribute bdi_dev_attrs[] = {
        __ATTR_RW(read_ahead_kb),
        __ATTR_RW(min_ratio),
index 1b24bdcb3197492674b0d6d0c128f9928fb977b1..a55036a684873c9b51a10847d3bf63b2e7f79c71 100644 (file)
@@ -244,8 +244,10 @@ int __frontswap_store(struct page *page)
                  the (older) page from frontswap
                 */
                inc_frontswap_failed_stores();
-               if (dup)
+               if (dup) {
                        __frontswap_clear(sis, offset);
+                       frontswap_ops->invalidate_page(type, offset);
+               }
        }
        if (frontswap_writethrough_enabled)
                /* report failure so swap also writes to swap device */
index b5edd6ee71d831fdd869bff1666839838d574443..1df7bd48cdae095a80d37e8546679e126ff41c54 100644 (file)
@@ -836,20 +836,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                if (!pte_file(pte)) {
                        swp_entry_t entry = pte_to_swp_entry(pte);
 
-                       if (swap_duplicate(entry) < 0)
-                               return entry.val;
-
-                       /* make sure dst_mm is on swapoff's mmlist. */
-                       if (unlikely(list_empty(&dst_mm->mmlist))) {
-                               spin_lock(&mmlist_lock);
-                               if (list_empty(&dst_mm->mmlist))
-                                       list_add(&dst_mm->mmlist,
-                                                &src_mm->mmlist);
-                               spin_unlock(&mmlist_lock);
-                       }
-                       if (likely(!non_swap_entry(entry)))
+                       if (likely(!non_swap_entry(entry))) {
+                               if (swap_duplicate(entry) < 0)
+                                       return entry.val;
+
+                               /* make sure dst_mm is on swapoff's mmlist. */
+                               if (unlikely(list_empty(&dst_mm->mmlist))) {
+                                       spin_lock(&mmlist_lock);
+                                       if (list_empty(&dst_mm->mmlist))
+                                               list_add(&dst_mm->mmlist,
+                                                        &src_mm->mmlist);
+                                       spin_unlock(&mmlist_lock);
+                               }
                                rss[MM_SWAPENTS]++;
-                       else if (is_migration_entry(entry)) {
+                       else if (is_migration_entry(entry)) {
                                page = migration_entry_to_page(entry);
 
                                if (PageAnon(page))
@@ -3202,7 +3202,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
                if (prev && prev->vm_end == address)
                        return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
 
-               expand_downwards(vma, address - PAGE_SIZE);
+               return expand_downwards(vma, address - PAGE_SIZE);
        }
        if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
                struct vm_area_struct *next = vma->vm_next;
@@ -3211,7 +3211,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
                if (next && next->vm_start == address + PAGE_SIZE)
                        return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
 
-               expand_upwards(vma, address + PAGE_SIZE);
+               return expand_upwards(vma, address + PAGE_SIZE);
        }
        return 0;
 }
index 2e768e37c2a78ba4e56b5c26c7c72829cc271996..fa5cb89348bad2b034e3f2f9d82cac8e06390018 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2063,14 +2063,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
 {
        struct mm_struct *mm = vma->vm_mm;
        struct rlimit *rlim = current->signal->rlim;
-       unsigned long new_start;
+       unsigned long new_start, actual_size;
 
        /* address space limit tests */
        if (!may_expand_vm(mm, grow))
                return -ENOMEM;
 
        /* Stack limit test */
-       if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+       actual_size = size;
+       if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+               actual_size -= PAGE_SIZE;
+       if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
                return -ENOMEM;
 
        /* mlock limit tests */
index ec826b383b625b2840b31af6722bbf61f4d05f2c..13a4a39a1f4b4a05eae84e17c8a16be6abeaebbc 100644 (file)
@@ -2674,18 +2674,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
                return false;
 
        /*
-        * There is a potential race between when kswapd checks its watermarks
-        * and a process gets throttled. There is also a potential race if
-        * processes get throttled, kswapd wakes, a large process exits therby
-        * balancing the zones that causes kswapd to miss a wakeup. If kswapd
-        * is going to sleep, no process should be sleeping on pfmemalloc_wait
-        * so wake them now if necessary. If necessary, processes will wake
-        * kswapd and get throttled again
+        * The throttled processes are normally woken up in balance_pgdat() as
+        * soon as pfmemalloc_watermark_ok() is true. But there is a potential
+        * race between when kswapd checks the watermarks and a process gets
+        * throttled. There is also a potential race if processes get
+        * throttled, kswapd wakes, a large process exits thereby balancing the
+        * zones, which causes kswapd to exit balance_pgdat() before reaching
+        * the wake up checks. If kswapd is going to sleep, no process should
+        * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
+        * the wake up is premature, processes will wake kswapd and get
+        * throttled again. The difference from wake ups in balance_pgdat() is
+        * that here we are under prepare_to_wait().
         */
-       if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
-               wake_up(&pgdat->pfmemalloc_wait);
-               return false;
-       }
+       if (waitqueue_active(&pgdat->pfmemalloc_wait))
+               wake_up_all(&pgdat->pfmemalloc_wait);
 
        return pgdat_balanced(pgdat, order, classzone_idx);
 }
index ae43dd807bb2b21a03ced8c874461467837b5303..25c4dd563a7985f46bc97d20297b1f2e0cb593d8 100644 (file)
@@ -1318,6 +1318,7 @@ static int do_setlink(const struct sk_buff *skb,
                        goto errout;
                }
                if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
+                       put_net(net);
                        err = -EPERM;
                        goto errout;
                }
index 08b13803d617d0e241f0395a58e6e16fa7c79cec..05e05968c5dd1c566e6139098dcb98fe8a2c2dfd 100644 (file)
@@ -1534,7 +1534,9 @@ int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
                if (!net_eq(dev_net(ifp->idev->dev), net))
                        continue;
                if (ipv6_addr_equal(&ifp->addr, addr) &&
-                   !(ifp->flags&IFA_F_TENTATIVE) &&
+                   (!(ifp->flags&IFA_F_TENTATIVE) ||
+                    (ipv6_use_optimistic_addr(ifp->idev) &&
+                     ifp->flags&IFA_F_OPTIMISTIC)) &&
                    (dev == NULL || ifp->idev->dev == dev ||
                     !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
                        rcu_read_unlock_bh();
index 6c20f4731f1a0322e9dd1416e670b2d3e1bcd515..65156a73b3f3bdc1f9f96c74fe663327a4a54185 100644 (file)
@@ -512,11 +512,11 @@ static int ip6gre_rcv(struct sk_buff *skb)
 
                skb->protocol = gre_proto;
                /* WCCP version 1 and 2 protocol decoding.
-                * - Change protocol to IP
+                * - Change protocol to IPv6
                 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
                 */
                if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
-                       skb->protocol = htons(ETH_P_IP);
+                       skb->protocol = htons(ETH_P_IPV6);
                        if ((*(h + offset) & 0xF0) != 0x40)
                                offset += 4;
                }
index 67059b88fea5f28619c97ad2ca8c150840045055..635d0972b688c3a08c2b79fc1099dc27d041140e 100644 (file)
@@ -607,7 +607,7 @@ void ieee80211_free_sta_keys(struct ieee80211_local *local,
        int i;
 
        mutex_lock(&local->key_mtx);
-       for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
+       for (i = 0; i < ARRAY_SIZE(sta->gtk); i++) {
                key = key_mtx_dereference(local, sta->gtk[i]);
                if (!key)
                        continue;
index 85bc6d498b46f59b76658881cf8e55aac2df6e34..9299a38c372e803332788d8e73a2a20e9602ab49 100644 (file)
@@ -1585,14 +1585,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
        sc = le16_to_cpu(hdr->seq_ctrl);
        frag = sc & IEEE80211_SCTL_FRAG;
 
-       if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
-               goto out;
-
        if (is_multicast_ether_addr(hdr->addr1)) {
                rx->local->dot11MulticastReceivedFrameCount++;
-               goto out;
+               goto out_no_led;
        }
 
+       if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+               goto out;
+
        I802_DEBUG_INC(rx->local->rx_handlers_fragments);
 
        if (skb_linearize(rx->skb))
@@ -1683,9 +1683,10 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
        status->rx_flags |= IEEE80211_RX_FRAGMENTED;
 
  out:
+       ieee80211_led_rx(rx->local);
+ out_no_led:
        if (rx->sta)
                rx->sta->rx_packets++;
-       ieee80211_led_rx(rx->local);
        return RX_CONTINUE;
 }
 
index b6f5fc3127b944bcc68766f815e36f4af0e34a3e..73b8ca51ba14ffa3fecf138bf2953b7de2581489 100644 (file)
@@ -413,12 +413,12 @@ int sctp_packet_transmit(struct sctp_packet *packet)
        sk = chunk->skb->sk;
 
        /* Allocate the new skb.  */
-       nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
+       nskb = alloc_skb(packet->size + MAX_HEADER, GFP_ATOMIC);
        if (!nskb)
                goto nomem;
 
        /* Make sure the outbound skb has enough header room reserved. */
-       skb_reserve(nskb, packet->overhead + LL_MAX_HEADER);
+       skb_reserve(nskb, packet->overhead + MAX_HEADER);
 
        /* Set the owning socket so that we know where to get the
         * destination IP address.
index 4305b2f2ec5eb7f8432c2c6dd7daf983b59f6d24..8c0e07b7a70b06dfb322825e23a2ab1a216d7f57 100755 (executable)
@@ -1750,7 +1750,7 @@ sub dump_struct($$) {
        # strip kmemcheck_bitfield_{begin,end}.*;
        $members =~ s/kmemcheck_bitfield_.*?;//gos;
        # strip attributes
-       $members =~ s/__aligned\s*\(.+\)//gos;
+       $members =~ s/__aligned\s*\([^;]*\)//gos;
 
        create_parameterlist($members, ';', $file);
        check_sections($file, $declaration_name, "struct", $sectcheck, $struct_actual, $nested);
index 9e1e005c75967d497831fb67146020aeed4bc4a8..c4c8df4b214d9f0bc2f941e0144526b3260a7a1f 100644 (file)
@@ -1018,10 +1018,13 @@ static int __init init_encrypted(void)
        ret = encrypted_shash_alloc();
        if (ret < 0)
                return ret;
+       ret = aes_get_sizes();
+       if (ret < 0)
+               goto out;
        ret = register_key_type(&key_type_encrypted);
        if (ret < 0)
                goto out;
-       return aes_get_sizes();
+       return 0;
 out:
        encrypted_shash_release();
        return ret;
index aeefec74a0618a6b9ee4d1662f12b0fe28623aa0..83a0f9b4452b2e984d9ee034aa4a574a8b29b601 100644 (file)
@@ -327,8 +327,10 @@ int snd_hda_get_sub_nodes(struct hda_codec *codec, hda_nid_t nid,
        unsigned int parm;
 
        parm = snd_hda_param_read(codec, nid, AC_PAR_NODE_COUNT);
-       if (parm == -1)
+       if (parm == -1) {
+               *start_id = 0;
                return 0;
+       }
        *start_id = (parm >> 16) & 0x7fff;
        return (int)(parm & 0x7fff);
 }
index 5dd4c4af9c9f900955c16901c1a11b69d6a9e857..4ae5767a2cf5cac96008b925ed451f9db2b34cf0 100644 (file)
@@ -573,9 +573,9 @@ static void stac_store_hints(struct hda_codec *codec)
                        spec->gpio_mask;
        }
        if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
-               spec->gpio_mask &= spec->gpio_mask;
-       if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
                spec->gpio_dir &= spec->gpio_mask;
+       if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
+               spec->gpio_data &= spec->gpio_mask;
        if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
                spec->eapd_mask &= spec->gpio_mask;
        if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
index 76bfeb3c3e30cb56b040270567835d432667f3b7..be8de7ce1cda00bd0bd0f15d7c76d3d2126586d9 100644 (file)
@@ -1364,8 +1364,8 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
        {"STENL Mux", "Sidetone Left", "DMICL"},
        {"STENR Mux", "Sidetone Right", "ADCR"},
        {"STENR Mux", "Sidetone Right", "DMICR"},
-       {"DACL", "NULL", "STENL Mux"},
-       {"DACR", "NULL", "STENL Mux"},
+       {"DACL", NULL, "STENL Mux"},
+       {"DACR", NULL, "STENL Mux"},
 
        {"AIFINL", NULL, "SHDN"},
        {"AIFINR", NULL, "SHDN"},
index 4068f24912322b5e53f7ad90f5e4d562b5360cef..bb3878c9625fcb0bb7e2ead98ffc6d89e2580a13 100644 (file)
@@ -176,6 +176,13 @@ static int _process_sigma_firmware(struct device *dev,
                goto done;
        }
 
+       if (ssfw_head->version != 1) {
+               dev_err(dev,
+                       "Failed to load firmware: Invalid version %d. Supported firmware versions: 1\n",
+                       ssfw_head->version);
+               goto done;
+       }
+
        crc = crc32(0, fw->data + sizeof(*ssfw_head),
                        fw->size - sizeof(*ssfw_head));
        pr_debug("%s: crc=%x\n", __func__, crc);
index 593a3ea12d4c3cca61caa6976d94847478a0c6dc..489a9abf112b1431769fd4822fc4362b0e005c4e 100644 (file)
@@ -263,6 +263,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
        snd_soc_dai_set_dma_data(dai, substream, NULL);
 }
 
+static int dw_i2s_prepare(struct snd_pcm_substream *substream,
+                         struct snd_soc_dai *dai)
+{
+       struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+       if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+               i2s_write_reg(dev->i2s_base, TXFFR, 1);
+       else
+               i2s_write_reg(dev->i2s_base, RXFFR, 1);
+
+       return 0;
+}
+
 static int dw_i2s_trigger(struct snd_pcm_substream *substream,
                int cmd, struct snd_soc_dai *dai)
 {
@@ -294,6 +307,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = {
        .startup        = dw_i2s_startup,
        .shutdown       = dw_i2s_shutdown,
        .hw_params      = dw_i2s_hw_params,
+       .prepare        = dw_i2s_prepare,
        .trigger        = dw_i2s_trigger,
 };
 
index 8e01fa4991c519a07b9f7c8dda22acac1d38cb77..93249133aeec42bcca4ec169c90e08fbf595c0bd 100644 (file)
@@ -364,6 +364,8 @@ static void snd_usbmidi_error_timer(unsigned long data)
                if (in && in->error_resubmit) {
                        in->error_resubmit = 0;
                        for (j = 0; j < INPUT_URBS; ++j) {
+                               if (atomic_read(&in->urbs[j]->use_count))
+                                       continue;
                                in->urbs[j]->dev = umidi->dev;
                                snd_usbmidi_submit_urb(in->urbs[j], GFP_ATOMIC);
                        }
index 0339d464791a27f4a7c5f9b2b9a835be62726082..4df31b0f94a3731b0c1ccb40ba81d6b5c52c732a 100644 (file)
@@ -322,8 +322,11 @@ static struct usbmix_name_map hercules_usb51_map[] = {
        { 0 }                           /* terminator */
 };
 
-static const struct usbmix_name_map kef_x300a_map[] = {
-       { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
+/* some (all?) SCMS USB3318 devices are affected by a firmware lock up
+ * when anything attempts to access FU 10 (control)
+ */
+static const struct usbmix_name_map scms_usb3318_map[] = {
+       { 10, NULL },
        { 0 }
 };
 
@@ -415,8 +418,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .map = ebox44_map,
        },
        {
+               /* KEF X300A */
                .id = USB_ID(0x27ac, 0x1000),
-               .map = kef_x300a_map,
+               .map = scms_usb3318_map,
+       },
+       {
+               /* Arcam rPAC */
+               .id = USB_ID(0x25c4, 0x0003),
+               .map = scms_usb3318_map,
        },
        { 0 } /* terminator */
 };
index 14c2fe20aa628cc53fc969d69ebc9466ca4274f2..20764e01df168f2cb8ec7b6a62740aa297365e50 100644 (file)
@@ -34,6 +34,7 @@ struct events_stats {
        u32 nr_invalid_chains;
        u32 nr_unknown_id;
        u32 nr_unprocessable_samples;
+       u32 nr_unordered_events;
 };
 
 enum hist_column {
index e392202b96bc2fc4291b55cd9678090da74cb579..6f593a704ea5621a167ae7d6c784662a2e4233f0 100644 (file)
@@ -656,8 +656,7 @@ static int perf_session_queue_event(struct perf_session *s, union perf_event *ev
                return -ETIME;
 
        if (timestamp < s->ordered_samples.last_flush) {
-               printf("Warning: Timestamp below last timeslice flush\n");
-               return -EINVAL;
+               s->stats.nr_unordered_events++;
        }
 
        if (!list_empty(sc)) {
@@ -1057,6 +1056,8 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
                            "Do you have a KVM guest running and not using 'perf kvm'?\n",
                            session->stats.nr_unprocessable_samples);
        }
+       if (session->stats.nr_unordered_events != 0)
+               ui__warning("%u out of order events recorded.\n", session->stats.nr_unordered_events);
 }
 
 #define session_done() (*(volatile int *)(&session_done))
index 1b3ff2fda4d0e410102b0ec663ec360d37fe3e97..517785052f1c37a4c368810c5c88d35119c9cab0 100644 (file)
@@ -6,6 +6,8 @@
 #include <sys/types.h>
 #include <sys/mount.h>
 #include <sys/wait.h>
+#include <sys/vfs.h>
+#include <sys/statvfs.h>
 #include <stdlib.h>
 #include <unistd.h>
 #include <fcntl.h>
 # define CLONE_NEWPID 0x20000000
 #endif
 
+#ifndef MS_REC
+# define MS_REC 16384
+#endif
 #ifndef MS_RELATIME
-#define MS_RELATIME (1 << 21)
+# define MS_RELATIME (1 << 21)
 #endif
 #ifndef MS_STRICTATIME
-#define MS_STRICTATIME (1 << 24)
+# define MS_STRICTATIME (1 << 24)
 #endif
 
 static void die(char *fmt, ...)
@@ -48,17 +53,14 @@ static void die(char *fmt, ...)
        exit(EXIT_FAILURE);
 }
 
-static void write_file(char *filename, char *fmt, ...)
+static void vmaybe_write_file(bool enoent_ok, char *filename, char *fmt, va_list ap)
 {
        char buf[4096];
        int fd;
        ssize_t written;
        int buf_len;
-       va_list ap;
 
-       va_start(ap, fmt);
        buf_len = vsnprintf(buf, sizeof(buf), fmt, ap);
-       va_end(ap);
        if (buf_len < 0) {
                die("vsnprintf failed: %s\n",
                    strerror(errno));
@@ -69,6 +71,8 @@ static void write_file(char *filename, char *fmt, ...)
 
        fd = open(filename, O_WRONLY);
        if (fd < 0) {
+               if ((errno == ENOENT) && enoent_ok)
+                       return;
                die("open of %s failed: %s\n",
                    filename, strerror(errno));
        }
@@ -87,6 +91,65 @@ static void write_file(char *filename, char *fmt, ...)
        }
 }
 
+static void maybe_write_file(char *filename, char *fmt, ...)
+{
+       va_list ap;
+
+       va_start(ap, fmt);
+       vmaybe_write_file(true, filename, fmt, ap);
+       va_end(ap);
+
+}
+
+static void write_file(char *filename, char *fmt, ...)
+{
+       va_list ap;
+
+       va_start(ap, fmt);
+       vmaybe_write_file(false, filename, fmt, ap);
+       va_end(ap);
+
+}
+
+static int read_mnt_flags(const char *path)
+{
+       int ret;
+       struct statvfs stat;
+       int mnt_flags;
+
+       ret = statvfs(path, &stat);
+       if (ret != 0) {
+               die("statvfs of %s failed: %s\n",
+                       path, strerror(errno));
+       }
+       if (stat.f_flag & ~(ST_RDONLY | ST_NOSUID | ST_NODEV | \
+                       ST_NOEXEC | ST_NOATIME | ST_NODIRATIME | ST_RELATIME | \
+                       ST_SYNCHRONOUS | ST_MANDLOCK)) {
+               die("Unrecognized mount flags\n");
+       }
+       mnt_flags = 0;
+       if (stat.f_flag & ST_RDONLY)
+               mnt_flags |= MS_RDONLY;
+       if (stat.f_flag & ST_NOSUID)
+               mnt_flags |= MS_NOSUID;
+       if (stat.f_flag & ST_NODEV)
+               mnt_flags |= MS_NODEV;
+       if (stat.f_flag & ST_NOEXEC)
+               mnt_flags |= MS_NOEXEC;
+       if (stat.f_flag & ST_NOATIME)
+               mnt_flags |= MS_NOATIME;
+       if (stat.f_flag & ST_NODIRATIME)
+               mnt_flags |= MS_NODIRATIME;
+       if (stat.f_flag & ST_RELATIME)
+               mnt_flags |= MS_RELATIME;
+       if (stat.f_flag & ST_SYNCHRONOUS)
+               mnt_flags |= MS_SYNCHRONOUS;
+       if (stat.f_flag & ST_MANDLOCK)
+               mnt_flags |= ST_MANDLOCK;
+
+       return mnt_flags;
+}
+
 static void create_and_enter_userns(void)
 {
        uid_t uid;
@@ -100,13 +163,10 @@ static void create_and_enter_userns(void)
                        strerror(errno));
        }
 
+       maybe_write_file("/proc/self/setgroups", "deny");
        write_file("/proc/self/uid_map", "0 %d 1", uid);
        write_file("/proc/self/gid_map", "0 %d 1", gid);
 
-       if (setgroups(0, NULL) != 0) {
-               die("setgroups failed: %s\n",
-                       strerror(errno));
-       }
        if (setgid(0) != 0) {
                die ("setgid(0) failed %s\n",
                        strerror(errno));
@@ -118,7 +178,8 @@ static void create_and_enter_userns(void)
 }
 
 static
-bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
+bool test_unpriv_remount(const char *fstype, const char *mount_options,
+                        int mount_flags, int remount_flags, int invalid_flags)
 {
        pid_t child;
 
@@ -151,9 +212,11 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
                        strerror(errno));
        }
 
-       if (mount("testing", "/tmp", "ramfs", mount_flags, NULL) != 0) {
-               die("mount of /tmp failed: %s\n",
-                       strerror(errno));
+       if (mount("testing", "/tmp", fstype, mount_flags, mount_options) != 0) {
+               die("mount of %s with options '%s' on /tmp failed: %s\n",
+                   fstype,
+                   mount_options? mount_options : "",
+                   strerror(errno));
        }
 
        create_and_enter_userns();
@@ -181,62 +244,127 @@ bool test_unpriv_remount(int mount_flags, int remount_flags, int invalid_flags)
 
 static bool test_unpriv_remount_simple(int mount_flags)
 {
-       return test_unpriv_remount(mount_flags, mount_flags, 0);
+       return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags, 0);
 }
 
 static bool test_unpriv_remount_atime(int mount_flags, int invalid_flags)
 {
-       return test_unpriv_remount(mount_flags, mount_flags, invalid_flags);
+       return test_unpriv_remount("ramfs", NULL, mount_flags, mount_flags,
+                                  invalid_flags);
+}
+
+static bool test_priv_mount_unpriv_remount(void)
+{
+       pid_t child;
+       int ret;
+       const char *orig_path = "/dev";
+       const char *dest_path = "/tmp";
+       int orig_mnt_flags, remount_mnt_flags;
+
+       child = fork();
+       if (child == -1) {
+               die("fork failed: %s\n",
+                       strerror(errno));
+       }
+       if (child != 0) { /* parent */
+               pid_t pid;
+               int status;
+               pid = waitpid(child, &status, 0);
+               if (pid == -1) {
+                       die("waitpid failed: %s\n",
+                               strerror(errno));
+               }
+               if (pid != child) {
+                       die("waited for %d got %d\n",
+                               child, pid);
+               }
+               if (!WIFEXITED(status)) {
+                       die("child did not terminate cleanly\n");
+               }
+               return WEXITSTATUS(status) == EXIT_SUCCESS ? true : false;
+       }
+
+       orig_mnt_flags = read_mnt_flags(orig_path);
+
+       create_and_enter_userns();
+       ret = unshare(CLONE_NEWNS);
+       if (ret != 0) {
+               die("unshare(CLONE_NEWNS) failed: %s\n",
+                       strerror(errno));
+       }
+
+       ret = mount(orig_path, dest_path, "bind", MS_BIND | MS_REC, NULL);
+       if (ret != 0) {
+               die("recursive bind mount of %s onto %s failed: %s\n",
+                       orig_path, dest_path, strerror(errno));
+       }
+
+       ret = mount(dest_path, dest_path, "none",
+                   MS_REMOUNT | MS_BIND | orig_mnt_flags , NULL);
+       if (ret != 0) {
+               /* system("cat /proc/self/mounts"); */
+               die("remount of /tmp failed: %s\n",
+                   strerror(errno));
+       }
+
+       remount_mnt_flags = read_mnt_flags(dest_path);
+       if (orig_mnt_flags != remount_mnt_flags) {
+               die("Mount flags unexpectedly changed during remount of %s originally mounted on %s\n",
+                       dest_path, orig_path);
+       }
+       exit(EXIT_SUCCESS);
 }
 
 int main(int argc, char **argv)
 {
-       if (!test_unpriv_remount_simple(MS_RDONLY|MS_NODEV)) {
+       if (!test_unpriv_remount_simple(MS_RDONLY)) {
                die("MS_RDONLY malfunctions\n");
        }
-       if (!test_unpriv_remount_simple(MS_NODEV)) {
+       if (!test_unpriv_remount("devpts", "newinstance", MS_NODEV, MS_NODEV, 0)) {
                die("MS_NODEV malfunctions\n");
        }
-       if (!test_unpriv_remount_simple(MS_NOSUID|MS_NODEV)) {
+       if (!test_unpriv_remount_simple(MS_NOSUID)) {
                die("MS_NOSUID malfunctions\n");
        }
-       if (!test_unpriv_remount_simple(MS_NOEXEC|MS_NODEV)) {
+       if (!test_unpriv_remount_simple(MS_NOEXEC)) {
                die("MS_NOEXEC malfunctions\n");
        }
-       if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODEV,
-                                      MS_NOATIME|MS_NODEV))
+       if (!test_unpriv_remount_atime(MS_RELATIME,
+                                      MS_NOATIME))
        {
                die("MS_RELATIME malfunctions\n");
        }
-       if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODEV,
-                                      MS_NOATIME|MS_NODEV))
+       if (!test_unpriv_remount_atime(MS_STRICTATIME,
+                                      MS_NOATIME))
        {
                die("MS_STRICTATIME malfunctions\n");
        }
-       if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODEV,
-                                      MS_STRICTATIME|MS_NODEV))
+       if (!test_unpriv_remount_atime(MS_NOATIME,
+                                      MS_STRICTATIME))
        {
-               die("MS_RELATIME malfunctions\n");
+               die("MS_NOATIME malfunctions\n");
        }
-       if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME|MS_NODEV,
-                                      MS_NOATIME|MS_NODEV))
+       if (!test_unpriv_remount_atime(MS_RELATIME|MS_NODIRATIME,
+                                      MS_NOATIME))
        {
-               die("MS_RELATIME malfunctions\n");
+               die("MS_RELATIME|MS_NODIRATIME malfunctions\n");
        }
-       if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME|MS_NODEV,
-                                      MS_NOATIME|MS_NODEV))
+       if (!test_unpriv_remount_atime(MS_STRICTATIME|MS_NODIRATIME,
+                                      MS_NOATIME))
        {
-               die("MS_RELATIME malfunctions\n");
+               die("MS_STRICTATIME|MS_NODIRATIME malfunctions\n");
        }
-       if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME|MS_NODEV,
-                                      MS_STRICTATIME|MS_NODEV))
+       if (!test_unpriv_remount_atime(MS_NOATIME|MS_NODIRATIME,
+                                      MS_STRICTATIME))
        {
-               die("MS_RELATIME malfunctions\n");
+               die("MS_NOATIME|MS_DIRATIME malfunctions\n");
        }
-       if (!test_unpriv_remount(MS_STRICTATIME|MS_NODEV, MS_NODEV,
-                                MS_NOATIME|MS_NODEV))
+       if (!test_unpriv_remount("ramfs", NULL, MS_STRICTATIME, 0, MS_NOATIME))
        {
                die("Default atime malfunctions\n");
        }
+       if (!test_priv_mount_unpriv_remount()) {
+               die("Mount flags unexpectedly changed after remount\n");
+       }
        return EXIT_SUCCESS;
 }