Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Sep 2015 01:19:42 +0000 (18:19 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 11 Sep 2015 01:19:42 +0000 (18:19 -0700)
Merge third patch-bomb from Andrew Morton:

 - even more of the rest of MM

 - lib/ updates

 - checkpatch updates

 - small changes to a few scruffy filesystems

 - kmod fixes/cleanups

 - kexec updates

 - a dma-mapping cleanup series from hch

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (81 commits)
  dma-mapping: consolidate dma_set_mask
  dma-mapping: consolidate dma_supported
  dma-mapping: cosolidate dma_mapping_error
  dma-mapping: consolidate dma_{alloc,free}_noncoherent
  dma-mapping: consolidate dma_{alloc,free}_{attrs,coherent}
  mm: use vma_is_anonymous() in create_huge_pmd() and wp_huge_pmd()
  mm: make sure all file VMAs have ->vm_ops set
  mm, mpx: add "vm_flags_t vm_flags" arg to do_mmap_pgoff()
  mm: mark most vm_operations_struct const
  namei: fix warning while make xmldocs caused by namei.c
  ipc: convert invalid scenarios to use WARN_ON
  zlib_deflate/deftree: remove bi_reverse()
  lib/decompress_unlzma: Do a NULL check for pointer
  lib/decompressors: use real out buf size for gunzip with kernel
  fs/affs: make root lookup from blkdev logical size
  sysctl: fix int -> unsigned long assignments in INT_MIN case
  kexec: export KERNEL_IMAGE_SIZE to vmcoreinfo
  kexec: align crash_notes allocation to make it be inside one physical page
  kexec: remove unnecessary test in kimage_alloc_crash_control_pages()
  kexec: split kexec_load syscall from kexec core code
  ...

263 files changed:
Documentation/DocBook/device-drivers.tmpl
Documentation/devicetree/bindings/pwm/lpc1850-sct-pwm.txt [new file with mode: 0644]
Documentation/devicetree/bindings/soc/qcom/qcom,smd.txt
Documentation/features/vm/THP/arch-support.txt
Documentation/virtual/kvm/api.txt
MAINTAINERS
arch/arm/boot/dts/exynos3250-monk.dts
arch/arm/boot/dts/exynos3250-rinato.dts
arch/arm/boot/dts/exynos3250.dtsi
arch/arm/boot/dts/exynos4.dtsi
arch/arm/boot/dts/exynos4212.dtsi
arch/arm/boot/dts/exynos4412-odroid-common.dtsi
arch/arm/boot/dts/exynos4412-odroidu3.dts
arch/arm/boot/dts/exynos4412-origen.dts
arch/arm/boot/dts/exynos4412-trats2.dts
arch/arm/boot/dts/exynos4412.dtsi
arch/arm/boot/dts/exynos5250-arndale.dts
arch/arm/boot/dts/exynos5250-smdk5250.dts
arch/arm/boot/dts/exynos5250-snow.dts
arch/arm/boot/dts/exynos5250-spring.dts
arch/arm/boot/dts/exynos5250.dtsi
arch/arm/boot/dts/exynos5422-cpus.dtsi [new file with mode: 0644]
arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
arch/arm/boot/dts/qcom-apq8064-cm-qs600.dts
arch/arm/boot/dts/qcom-apq8064-ifc6410.dts
arch/arm/boot/dts/qcom-apq8074-dragonboard.dts
arch/arm/boot/dts/qcom-apq8084-ifc6540.dts
arch/arm/boot/dts/qcom-apq8084-mtp.dts
arch/arm/boot/dts/qcom-apq8084.dtsi
arch/arm/boot/dts/qcom-ipq8064-ap148.dts
arch/arm/boot/dts/qcom-ipq8064.dtsi
arch/arm/boot/dts/qcom-msm8660-surf.dts
arch/arm/boot/dts/qcom-msm8660.dtsi
arch/arm/boot/dts/qcom-msm8960-cdp.dts
arch/arm/boot/dts/qcom-msm8960.dtsi
arch/arm/boot/dts/qcom-msm8974-sony-xperia-honami.dts
arch/arm/boot/dts/qcom-msm8974.dtsi
arch/arm/configs/exynos_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/xen/page.h
arch/arm/kvm/arm.c
arch/arm/kvm/guest.c
arch/arm/kvm/interrupts.S
arch/arm/kvm/reset.c
arch/arm/mach-exynos/Kconfig
arch/arm/mach-exynos/exynos.c
arch/arm/xen/enlighten.c
arch/arm/xen/mm.c
arch/arm64/include/asm/hw_breakpoint.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/hw_breakpoint.c
arch/arm64/kvm/Makefile
arch/arm64/kvm/debug.c [new file with mode: 0644]
arch/arm64/kvm/guest.c
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/hyp.S
arch/arm64/kvm/reset.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/sys_regs.h
arch/arm64/kvm/sys_regs_generic_v8.c
arch/arm64/kvm/trace.h
arch/hexagon/include/uapi/asm/signal.h
arch/hexagon/kernel/time.c
arch/metag/kernel/irq.c
arch/microblaze/include/uapi/asm/elf.h
arch/nios2/boot/dts/10m50_devboard.dts [new file with mode: 0755]
arch/nios2/configs/10m50_defconfig [new file with mode: 0755]
arch/nios2/kernel/misaligned.c
arch/nios2/kernel/time.c
arch/powerpc/include/asm/kvm_book3s.h
arch/powerpc/include/asm/kvm_book3s_asm.h
arch/powerpc/include/asm/kvm_booke.h
arch/powerpc/include/asm/kvm_host.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kvm/Kconfig
arch/powerpc/kvm/book3s.c
arch/powerpc/kvm/book3s_32_mmu_host.c
arch/powerpc/kvm/book3s_64_mmu_host.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_emulate.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_builtin.c
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rm_xics.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_paired_singles.c
arch/powerpc/kvm/book3s_segment.S
arch/powerpc/kvm/book3s_xics.c
arch/powerpc/kvm/booke.c
arch/powerpc/kvm/e500_mmu.c
arch/powerpc/kvm/powerpc.c
arch/x86/include/asm/xen/page.h
arch/x86/kvm/emulate.c
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c
arch/x86/xen/mmu.c
arch/x86/xen/smp.c
drivers/base/property.c
drivers/block/virtio_blk.c
drivers/block/xen-blkfront.c
drivers/clk/samsung/clk-exynos4.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Makefile
drivers/cpufreq/exynos-cpufreq.c [deleted file]
drivers/cpufreq/exynos-cpufreq.h [deleted file]
drivers/cpufreq/exynos4x12-cpufreq.c [deleted file]
drivers/cpufreq/exynos5250-cpufreq.c [deleted file]
drivers/input/misc/xen-kbdfront.c
drivers/mtd/nand/Makefile
drivers/mtd/spi-nor/spi-nor.c
drivers/net/dsa/bcm_sf2.c
drivers/net/dsa/bcm_sf2.h
drivers/net/dsa/mv88e6171.c
drivers/net/ethernet/altera/altera_tse_main.c
drivers/net/ethernet/cavium/liquidio/lio_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/ethoc.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/jme.c
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/synopsys/Kconfig
drivers/net/phy/Kconfig
drivers/net/phy/Makefile
drivers/net/phy/fixed_phy.c
drivers/net/phy/microchip.c [new file with mode: 0644]
drivers/net/usb/lan78xx.c
drivers/net/usb/r8152.c
drivers/net/usb/usbnet.c
drivers/net/vxlan.c
drivers/net/wan/sbni.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/netback.c
drivers/net/xen-netfront.c
drivers/pwm/Kconfig
drivers/pwm/Makefile
drivers/pwm/core.c
drivers/pwm/pwm-atmel-hlcdc.c
drivers/pwm/pwm-atmel-tcb.c
drivers/pwm/pwm-atmel.c
drivers/pwm/pwm-bcm-kona.c
drivers/pwm/pwm-ep93xx.c
drivers/pwm/pwm-imx.c
drivers/pwm/pwm-lpc18xx-sct.c [new file with mode: 0644]
drivers/pwm/pwm-mxs.c
drivers/pwm/pwm-pca9685.c
drivers/pwm/pwm-renesas-tpu.c
drivers/pwm/pwm-rockchip.c
drivers/pwm/pwm-tegra.c
drivers/pwm/pwm-tiecap.c
drivers/pwm/pwm-tiehrpwm.c
drivers/pwm/sysfs.c
drivers/reset/reset-ath79.c
drivers/scsi/xen-scsifront.c
drivers/soc/qcom/smd.c
drivers/soc/qcom/smem.c
drivers/tty/hvc/hvc_xen.c
drivers/tty/serial/Kconfig
drivers/tty/serial/amba-pl011.c
drivers/video/fbdev/xen-fbfront.c
drivers/virtio/virtio_balloon.c
drivers/virtio/virtio_mmio.c
drivers/xen/balloon.c
drivers/xen/biomerge.c
drivers/xen/events/events_base.c
drivers/xen/events/events_fifo.c
drivers/xen/gntalloc.c
drivers/xen/manage.c
drivers/xen/privcmd.c
drivers/xen/swiotlb-xen.c
drivers/xen/tmem.c
drivers/xen/xenbus/xenbus_client.c
drivers/xen/xenbus/xenbus_dev_backend.c
drivers/xen/xenbus/xenbus_probe.c
drivers/xen/xlate_mmu.c
fs/cifs/cifs_ioctl.h [new file with mode: 0644]
fs/cifs/cifsfs.h
fs/cifs/cifspdu.h
fs/cifs/cifssmb.c
fs/cifs/ioctl.c
fs/cifs/smb2pdu.c
fs/cifs/transport.c
fs/dax.c
fs/ufs/balloc.c
include/kvm/arm_arch_timer.h
include/kvm/arm_vgic.h
include/linux/amba/serial.h
include/linux/irqchip/arm-gic-v3.h
include/linux/irqchip/arm-gic.h
include/linux/kvm_host.h
include/linux/microchipphy.h [new file with mode: 0644]
include/linux/netlink.h
include/linux/pwm.h
include/linux/reset.h
include/net/fib_rules.h
include/net/mac80211.h
include/net/netfilter/br_netfilter.h
include/net/netfilter/nf_conntrack.h
include/net/netfilter/nf_tables.h
include/trace/events/kvm.h
include/uapi/linux/elf-em.h
include/uapi/linux/if_ether.h
include/uapi/linux/kvm.h
include/uapi/xen/privcmd.h
include/xen/page.h
include/xen/xen-ops.h
kernel/bpf/syscall.c
kernel/bpf/verifier.c
net/bridge/br_netlink.c
net/bridge/br_vlan.c
net/core/fib_rules.c
net/decnet/dn_rules.c
net/ipv4/fib_rules.c
net/ipv4/ipmr.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_output.c
net/ipv6/fib6_rules.c
net/ipv6/ip6mr.c
net/ipv6/route.c
net/mac80211/cfg.c
net/mac80211/mlme.c
net/mac80211/rate.c
net/mac80211/tdls.c
net/mac80211/vht.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/ipset/ip_set_hash_netnet.c
net/netfilter/ipset/ip_set_hash_netportnet.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_synproxy_core.c
net/netfilter/nfnetlink.c
net/netfilter/nfnetlink_queue_core.c
net/netfilter/xt_CT.c
net/netlink/af_netlink.c
net/openvswitch/Kconfig
net/openvswitch/Makefile
net/openvswitch/conntrack.h
net/rds/connection.c
net/rfkill/core.c
net/sctp/protocol.c
net/switchdev/switchdev.c
net/tipc/bcast.c
net/wireless/reg.c
virt/kvm/arm/arch_timer.c
virt/kvm/arm/vgic-v2.c
virt/kvm/arm/vgic-v3.c
virt/kvm/arm/vgic.c
virt/kvm/irqchip.c
virt/kvm/kvm_main.c

index abba93f9d64a5c3244d1a2ff710a837d1d243848..1d6008d51b5552f4be9a71f4bc4e568b004fb1eb 100644 (file)
@@ -490,4 +490,31 @@ X!Ilib/fonts/fonts.c
 !Edrivers/hsi/hsi.c
   </chapter>
 
+  <chapter id="pwm">
+    <title>Pulse-Width Modulation (PWM)</title>
+    <para>
+      Pulse-width modulation is a modulation technique primarily used to
+      control power supplied to electrical devices.
+    </para>
+    <para>
+      The PWM framework provides an abstraction for providers and consumers
+      of PWM signals. A controller that provides one or more PWM signals is
+      registered as <structname>struct pwm_chip</structname>. Providers are
+      expected to embed this structure in a driver-specific structure. This
+      structure contains fields that describe a particular chip.
+    </para>
+    <para>
+      A chip exposes one or more PWM signal sources, each of which exposed
+      as a <structname>struct pwm_device</structname>. Operations can be
+      performed on PWM devices to control the period, duty cycle, polarity
+      and active state of the signal.
+    </para>
+    <para>
+      Note that PWM devices are exclusive resources: they can always only be
+      used by one consumer at a time.
+    </para>
+!Iinclude/linux/pwm.h
+!Edrivers/pwm/core.c
+  </chapter>
+
 </book>
diff --git a/Documentation/devicetree/bindings/pwm/lpc1850-sct-pwm.txt b/Documentation/devicetree/bindings/pwm/lpc1850-sct-pwm.txt
new file mode 100644 (file)
index 0000000..36e49d4
--- /dev/null
@@ -0,0 +1,20 @@
+* NXP LPC18xx State Configurable Timer - Pulse Width Modulator driver
+
+Required properties:
+  - compatible: Should be "nxp,lpc1850-sct-pwm"
+  - reg: Should contain physical base address and length of pwm registers.
+  - clocks: Must contain an entry for each entry in clock-names.
+    See ../clock/clock-bindings.txt for details.
+  - clock-names: Must include the following entries.
+    - pwm: PWM operating clock.
+  - #pwm-cells: Should be 3. See pwm.txt in this directory for the description
+    of the cells format.
+
+Example:
+  pwm: pwm@40000000 {
+    compatible = "nxp,lpc1850-sct-pwm";
+    reg = <0x40000000 0x1000>;
+    clocks =<&ccu1 CLK_CPU_SCT>;
+    clock-names = "pwm";
+    #pwm-cells = <3>;
+  };
index f65c76db98599253fbf068d89f1fb32e914b4377..97d9b3e1bf399d60a7aadbb8114b704924eb458f 100644 (file)
@@ -37,6 +37,12 @@ The edge is described by the following properties:
        Definition: the identifier of the remote processor in the smd channel
                    allocation table
 
+- qcom,remote-pid:
+       Usage: optional
+       Value type: <u32>
+       Definition: the identifier for the remote processor as known by the rest
+                   of the system.
+
 = SMD DEVICES
 
 In turn, subnodes of the "edges" represent devices tied to SMD channels on that
index 972d02c2a74ccd0ecad5554152ece286f9307b0b..df384e3e845f7b22f121427c6a0c2d352d7648ce 100644 (file)
@@ -20,7 +20,7 @@
     |        ia64: | TODO |
     |        m32r: |  ..  |
     |        m68k: |  ..  |
-    |       metag: |  ..  |
+    |       metag: | TODO |
     |  microblaze: |  ..  |
     |        mips: |  ok  |
     |     mn10300: |  ..  |
index a4ebcb712375478e9bb60a7652d49f4d46b9c978..d9ecceea5a02960615fb25fe74f08e6a8a5e0232 100644 (file)
@@ -2671,7 +2671,7 @@ handled.
 4.87 KVM_SET_GUEST_DEBUG
 
 Capability: KVM_CAP_SET_GUEST_DEBUG
-Architectures: x86, s390, ppc
+Architectures: x86, s390, ppc, arm64
 Type: vcpu ioctl
 Parameters: struct kvm_guest_debug (in)
 Returns: 0 on success; -1 on error
@@ -2693,8 +2693,8 @@ when running. Common control bits are:
 The top 16 bits of the control field are architecture specific control
 flags which can include the following:
 
-  - KVM_GUESTDBG_USE_SW_BP:     using software breakpoints [x86]
-  - KVM_GUESTDBG_USE_HW_BP:     using hardware breakpoints [x86, s390]
+  - KVM_GUESTDBG_USE_SW_BP:     using software breakpoints [x86, arm64]
+  - KVM_GUESTDBG_USE_HW_BP:     using hardware breakpoints [x86, s390, arm64]
   - KVM_GUESTDBG_INJECT_DB:     inject DB type exception [x86]
   - KVM_GUESTDBG_INJECT_BP:     inject BP type exception [x86]
   - KVM_GUESTDBG_EXIT_PENDING:  trigger an immediate guest exit [s390]
@@ -2709,6 +2709,11 @@ updated to the correct (supplied) values.
 The second part of the structure is architecture specific and
 typically contains a set of debug registers.
 
+For arm64 the number of debug registers is implementation defined and
+can be determined by querying the KVM_CAP_GUEST_DEBUG_HW_BPS and
+KVM_CAP_GUEST_DEBUG_HW_WPS capabilities which return a positive number
+indicating the number of supported registers.
+
 When debug events exit the main run loop with the reason
 KVM_EXIT_DEBUG with the kvm_debug_exit_arch part of the kvm_run
 structure containing architecture specific debug information.
@@ -3111,11 +3116,13 @@ data_offset describes where the data is located (KVM_EXIT_IO_OUT) or
 where kvm expects application code to place the data for the next
 KVM_RUN invocation (KVM_EXIT_IO_IN).  Data format is a packed array.
 
+               /* KVM_EXIT_DEBUG */
                struct {
                        struct kvm_debug_exit_arch arch;
                } debug;
 
-Unused.
+If the exit_reason is KVM_EXIT_DEBUG, then a vcpu is processing a debug event
+for which architecture specific information is returned.
 
                /* KVM_EXIT_MMIO */
                struct {
index 59e2c0bc428ff0de5a2d8f22ea693bf615376dae..310da4295c7026e27698e9f8b980adcc1893b774 100644 (file)
@@ -7377,7 +7377,7 @@ F:        drivers/scsi/nsp32*
 NIOS2 ARCHITECTURE
 M:     Ley Foon Tan <lftan@altera.com>
 L:     nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
-T:     git git://git.rocketboards.org/linux-socfpga-next.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lftan/nios2.git
 S:     Maintained
 F:     arch/nios2/
 
index a5863acc5fff36aa28e1be183a85fc37883cd186..540a0adf2be6dcc94da5b487958cacca4348996d 100644 (file)
                min-microvolt = <1100000>;
                max-microvolt = <2700000>;
        };
+
+       thermal-zones {
+               cpu_thermal: cpu-thermal {
+                       cooling-maps {
+                               map0 {
+                                       /* Correspond to 500MHz at freq_table */
+                                       cooling-device = <&cpu0 5 5>;
+                               };
+                               map1 {
+                                       /* Correspond to 200MHz at freq_table */
+                                       cooling-device = <&cpu0 8 8>;
+                               };
+                       };
+               };
+       };
 };
 
 &adc {
        };
 };
 
+&cpu0 {
+       cpu0-supply = <&buck2_reg>;
+};
+
 &exynos_usbphy {
        status = "okay";
 };
index baa9b2f52009eebffbe1a856afb8bbca551ded2f..41a5fafb9aa93a5393cc7b0930f376041f3a5fbb 100644 (file)
                min-microvolt = <1100000>;
                max-microvolt = <2700000>;
        };
+
+       thermal-zones {
+               cpu_thermal: cpu-thermal {
+                       cooling-maps {
+                               map0 {
+                                       /* Corresponds to 500MHz */
+                                       cooling-device = <&cpu0 5 5>;
+                               };
+                               map1 {
+                                       /* Corresponds to 200MHz */
+                                       cooling-device = <&cpu0 8 8>;
+                               };
+                       };
+               };
+       };
 };
 
 &adc {
        };
 };
 
+&cpu0 {
+       cpu0-supply = <&buck2_reg>;
+};
+
 &exynos_usbphy {
        status = "okay";
 };
index 2db99433e17fdad0299b672ae87e49b6444a457e..033def482fc3d71693c48bd5a942eda4a7833bbf 100644 (file)
                        compatible = "arm,cortex-a7";
                        reg = <0>;
                        clock-frequency = <1000000000>;
+                       clocks = <&cmu CLK_ARM_CLK>;
+                       clock-names = "cpu";
+                       #cooling-cells = <2>;
+
+                       operating-points = <
+                               1000000 1150000
+                               900000  1112500
+                               800000  1075000
+                               700000  1037500
+                               600000  1000000
+                               500000  962500
+                               400000  925000
+                               300000  887500
+                               200000  850000
+                               100000  850000
+                       >;
                };
 
                cpu1: cpu@1 {
index b0d52b1a646af0cb1154f7216a81da9b3e64ef6a..98c0a368b7778dc3b13ec9e07d476e5eebff9cbb 100644 (file)
                clocks = <&clock CLK_JPEG>;
                clock-names = "jpeg";
                power-domains = <&pd_cam>;
+               iommus = <&sysmmu_jpeg>;
        };
 
        hdmi: hdmi@12D00000 {
index d9c8efeef208d8d6cf5b39f34506083020d7b0fe..538901123d37ea594e682d6d066b2c3b3bd0064b 100644 (file)
@@ -30,6 +30,9 @@
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <0xA00>;
+                       clocks = <&clock CLK_ARM_CLK>;
+                       clock-names = "cpu";
+                       operating-points-v2 = <&cpu0_opp_table>;
                        cooling-min-level = <13>;
                        cooling-max-level = <7>;
                        #cooling-cells = <2>; /* min followed by max */
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <0xA01>;
+                       operating-points-v2 = <&cpu0_opp_table>;
+               };
+       };
+
+       cpu0_opp_table: opp_table0 {
+               compatible = "operating-points-v2";
+               opp-shared;
+
+               opp00 {
+                       opp-hz = /bits/ 64 <200000000>;
+                       opp-microvolt = <900000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp01 {
+                       opp-hz = /bits/ 64 <300000000>;
+                       opp-microvolt = <900000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp02 {
+                       opp-hz = /bits/ 64 <400000000>;
+                       opp-microvolt = <925000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp03 {
+                       opp-hz = /bits/ 64 <500000000>;
+                       opp-microvolt = <950000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp04 {
+                       opp-hz = /bits/ 64 <600000000>;
+                       opp-microvolt = <975000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp05 {
+                       opp-hz = /bits/ 64 <700000000>;
+                       opp-microvolt = <987500>;
+                       clock-latency-ns = <200000>;
+               };
+               opp06 {
+                       opp-hz = /bits/ 64 <800000000>;
+                       opp-microvolt = <1000000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp07 {
+                       opp-hz = /bits/ 64 <900000000>;
+                       opp-microvolt = <1037500>;
+                       clock-latency-ns = <200000>;
+               };
+               opp08 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = <1087500>;
+                       clock-latency-ns = <200000>;
+               };
+               opp09 {
+                       opp-hz = /bits/ 64 <1100000000>;
+                       opp-microvolt = <1137500>;
+                       clock-latency-ns = <200000>;
+               };
+               opp10 {
+                       opp-hz = /bits/ 64 <1200000000>;
+                       opp-microvolt = <1187500>;
+                       clock-latency-ns = <200000>;
+               };
+               opp11 {
+                       opp-hz = /bits/ 64 <1300000000>;
+                       opp-microvolt = <1250000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp12 {
+                       opp-hz = /bits/ 64 <1400000000>;
+                       opp-microvolt = <1287500>;
+                       clock-latency-ns = <200000>;
+               };
+               opp13 {
+                       opp-hz = /bits/ 64 <1500000000>;
+                       opp-microvolt = <1350000>;
+                       clock-latency-ns = <200000>;
+                       turbo-mode;
                };
        };
 };
index ca7d168d1dd62004aa45db808741b3458ae1da61..db52841297a5744d29b86fc167b43dcf1c6a1db6 100644 (file)
        };
 };
 
+&cpu0 {
+       cpu0-supply = <&buck2_reg>;
+};
+
 /* RSTN signal for eMMC */
 &sd1_cd {
        samsung,pin-pud = <0>;
index 44684e57ead1e6a60e86731e67f7f5bc0141adfc..8632f35c6c26892fcbea54b71d41cbed29820458 100644 (file)
@@ -13,6 +13,7 @@
 
 /dts-v1/;
 #include "exynos4412-odroid-common.dtsi"
+#include <dt-bindings/gpio/gpio.h>
 
 / {
        model = "Hardkernel ODROID-U3 board based on Exynos4412";
                "Speakers", "SPKL",
                "Speakers", "SPKR";
 };
+
+&spi_1 {
+       pinctrl-names = "default";
+       pinctrl-0 = <&spi1_bus>;
+       cs-gpios = <&gpb 5 GPIO_ACTIVE_HIGH>;
+       status = "okay";
+};
index 84c76310b31288d8542c2a8d1a0965b9a705bca7..9d528af68c1a45ba8b35b4d8c8100abb27afecae 100644 (file)
        };
 };
 
+&cpu0 {
+       cpu0-supply = <&buck2_reg>;
+};
+
 &fimd {
        pinctrl-0 = <&lcd_clk &lcd_data24 &pwm1_out>;
        pinctrl-names = "default";
index 8848400590184c4ff309564f3788a17a2bf4e9ad..2a1ebb76ebe0084af6ff07a8617df2050675af0c 100644 (file)
        status = "okay";
 };
 
+&cpu0 {
+       cpu0-supply = <&buck2_reg>;
+};
+
 &csis_0 {
        status = "okay";
        vddcore-supply = <&ldo8_reg>;
index b78ada70bd051d6ff3cc2bdd5cbb26859fd644eb..ca0e3c15977f13febd2ae550949ae704ecbf33cb 100644 (file)
@@ -30,6 +30,9 @@
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <0xA00>;
+                       clocks = <&clock CLK_ARM_CLK>;
+                       clock-names = "cpu";
+                       operating-points-v2 = <&cpu0_opp_table>;
                        cooling-min-level = <13>;
                        cooling-max-level = <7>;
                        #cooling-cells = <2>; /* min followed by max */
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <0xA01>;
+                       operating-points-v2 = <&cpu0_opp_table>;
                };
 
                cpu@A02 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <0xA02>;
+                       operating-points-v2 = <&cpu0_opp_table>;
                };
 
                cpu@A03 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <0xA03>;
+                       operating-points-v2 = <&cpu0_opp_table>;
+               };
+       };
+
+       cpu0_opp_table: opp_table0 {
+               compatible = "operating-points-v2";
+               opp-shared;
+
+               opp00 {
+                       opp-hz = /bits/ 64 <200000000>;
+                       opp-microvolt = <900000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp01 {
+                       opp-hz = /bits/ 64 <300000000>;
+                       opp-microvolt = <900000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp02 {
+                       opp-hz = /bits/ 64 <400000000>;
+                       opp-microvolt = <925000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp03 {
+                       opp-hz = /bits/ 64 <500000000>;
+                       opp-microvolt = <950000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp04 {
+                       opp-hz = /bits/ 64 <600000000>;
+                       opp-microvolt = <975000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp05 {
+                       opp-hz = /bits/ 64 <700000000>;
+                       opp-microvolt = <987500>;
+                       clock-latency-ns = <200000>;
+               };
+               opp06 {
+                       opp-hz = /bits/ 64 <800000000>;
+                       opp-microvolt = <1000000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp07 {
+                       opp-hz = /bits/ 64 <900000000>;
+                       opp-microvolt = <1037500>;
+                       clock-latency-ns = <200000>;
+               };
+               opp08 {
+                       opp-hz = /bits/ 64 <1000000000>;
+                       opp-microvolt = <1087500>;
+                       clock-latency-ns = <200000>;
+               };
+               opp09 {
+                       opp-hz = /bits/ 64 <1100000000>;
+                       opp-microvolt = <1137500>;
+                       clock-latency-ns = <200000>;
+               };
+               opp10 {
+                       opp-hz = /bits/ 64 <1200000000>;
+                       opp-microvolt = <1187500>;
+                       clock-latency-ns = <200000>;
+               };
+               opp11 {
+                       opp-hz = /bits/ 64 <1300000000>;
+                       opp-microvolt = <1250000>;
+                       clock-latency-ns = <200000>;
+               };
+               opp12 {
+                       opp-hz = /bits/ 64 <1400000000>;
+                       opp-microvolt = <1287500>;
+                       clock-latency-ns = <200000>;
+               };
+               opp13 {
+                       opp-hz = /bits/ 64 <1500000000>;
+                       opp-microvolt = <1350000>;
+                       clock-latency-ns = <200000>;
+                       turbo-mode;
                };
        };
 
index 7e728a1b55590abe15e89d109ad0433a86785f17..db3f65f3eb45995d840a7ddc60284b0ff6e85457 100644 (file)
        };
 };
 
+&cpu0 {
+       cpu0-supply = <&buck2_reg>;
+};
+
 &dp {
        status = "okay";
        samsung,color-space = <0>;
index 4fe186d01f8a52b52f9155d76b1496b7d586ed7e..15aea760c1dadee45c631d78c64366cea7739276 100644 (file)
        };
 };
 
+&cpu0 {
+       cpu0-supply = <&buck2_reg>;
+};
+
 &dp {
        samsung,color-space = <0>;
        samsung,dynamic-range = <0>;
index b7f4122df456b05438b8f719adaa9eb95a5dfb5f..0720caab5511112026a1d53156deae2cf6338345 100644 (file)
        };
 };
 
+&cpu0 {
+       cpu0-supply = <&buck2_reg>;
+};
+
 &dp {
        status = "okay";
        pinctrl-names = "default";
        status = "okay";
        samsung,spi-src-clk = <0>;
        num-cs = <1>;
+       cs-gpios = <&gpa2 5 GPIO_ACTIVE_HIGH>;
 };
 
 &usbdrd_dwc3 {
index d03f9b8d376d082308fa9e06c515f69ae83386c2..c1edd6d038a905dffd68e895cdafae25c967f160 100644 (file)
        };
 };
 
+&cpu0 {
+       cpu0-supply = <&buck2_reg>;
+};
+
 &dp {
        status = "okay";
        pinctrl-names = "default";
index 4a1f88300a281b8248cad85646e98dca6b4d47cf..b24610ea8c2a93619bfe75770b63d1b67b61d680 100644 (file)
                        compatible = "arm,cortex-a15";
                        reg = <0>;
                        clock-frequency = <1700000000>;
+                       clocks = <&clock CLK_ARM_CLK>;
+                       clock-names = "cpu";
+                       clock-latency = <140000>;
+
+                       operating-points = <
+                               1700000 1300000
+                               1600000 1250000
+                               1500000 1225000
+                               1400000 1200000
+                               1300000 1150000
+                               1200000 1125000
+                               1100000 1100000
+                               1000000 1075000
+                                900000 1050000
+                                800000 1025000
+                                700000 1012500
+                                600000 1000000
+                                500000  975000
+                                400000  950000
+                                300000  937500
+                                200000  925000
+                       >;
                        cooling-min-level = <15>;
                        cooling-max-level = <9>;
                        #cooling-cells = <2>; /* min followed by max */
diff --git a/arch/arm/boot/dts/exynos5422-cpus.dtsi b/arch/arm/boot/dts/exynos5422-cpus.dtsi
new file mode 100644 (file)
index 0000000..b7f60c8
--- /dev/null
@@ -0,0 +1,81 @@
+/*
+ * SAMSUNG EXYNOS5422 SoC cpu device tree source
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ *             http://www.samsung.com
+ *
+ * The only difference between EXYNOS5422 and EXYNOS5800 is cpu ordering. The
+ * EXYNOS5422 is booting from Cortex-A7 core while the EXYNOS5800 is booting
+ * from Cortex-A15 core.
+ *
+ * EXYNOS5422 based board files can include this file to provide cpu ordering
+ * which could boot a cortex-a7 from cpu0.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+&cpu0 {
+       device_type = "cpu";
+       compatible = "arm,cortex-a7";
+       reg = <0x100>;
+       clock-frequency = <1000000000>;
+       cci-control-port = <&cci_control0>;
+};
+
+&cpu1 {
+       device_type = "cpu";
+       compatible = "arm,cortex-a7";
+       reg = <0x101>;
+       clock-frequency = <1000000000>;
+       cci-control-port = <&cci_control0>;
+};
+
+&cpu2 {
+       device_type = "cpu";
+       compatible = "arm,cortex-a7";
+       reg = <0x102>;
+       clock-frequency = <1000000000>;
+       cci-control-port = <&cci_control0>;
+};
+
+&cpu3 {
+       device_type = "cpu";
+       compatible = "arm,cortex-a7";
+       reg = <0x103>;
+       clock-frequency = <1000000000>;
+       cci-control-port = <&cci_control0>;
+};
+
+&cpu4 {
+       device_type = "cpu";
+       compatible = "arm,cortex-a15";
+       reg = <0x0>;
+       clock-frequency = <1800000000>;
+       cci-control-port = <&cci_control1>;
+};
+
+&cpu5 {
+       device_type = "cpu";
+       compatible = "arm,cortex-a15";
+       reg = <0x1>;
+       clock-frequency = <1800000000>;
+       cci-control-port = <&cci_control1>;
+};
+
+&cpu6 {
+       device_type = "cpu";
+       compatible = "arm,cortex-a15";
+       reg = <0x2>;
+       clock-frequency = <1800000000>;
+       cci-control-port = <&cci_control1>;
+};
+
+&cpu7 {
+       device_type = "cpu";
+       compatible = "arm,cortex-a15";
+       reg = <0x3>;
+       clock-frequency = <1800000000>;
+       cci-control-port = <&cci_control1>;
+};
index 1565667e6f699d171aa769a904a5993698c5203f..79ffdfe712aa4a8ad193d4afd671962edfb73646 100644 (file)
@@ -15,6 +15,7 @@
 #include <dt-bindings/gpio/gpio.h>
 #include <dt-bindings/sound/samsung-i2s.h>
 #include "exynos5800.dtsi"
+#include "exynos5422-cpus.dtsi"
 #include "exynos5422-cpu-thermal.dtsi"
 
 / {
index 34ccb260f12a9e6be49f3a41f20544e293687ad0..47c0282bdfca7ce11a8ce0e05119f8df229b9a4f 100644 (file)
@@ -4,6 +4,14 @@
        model = "CompuLab CM-QS600";
        compatible = "qcom,apq8064-cm-qs600", "qcom,apq8064";
 
+       aliases {
+               serial0 = &gsbi7_serial;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
        soc {
                pinctrl@800000 {
                        i2c1_pins: i2c1 {
index 88d6655ddaf6dfae95301dfb97d85f46063b05ca..f3100da082b2a3cbe1a1229a1f6be0a21ab4ca5a 100644 (file)
                serial1 = &gsbi6_serial;
        };
 
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
        soc {
                pinctrl@800000 {
                        card_detect: card_detect {
index d484d08163e9415557e17d05e34c567c7e2d9de1..835bdc71c5ba4e7db71642305e541f5e203d4758 100644 (file)
@@ -6,6 +6,14 @@
        model = "Qualcomm APQ8074 Dragonboard";
        compatible = "qcom,apq8074-dragonboard", "qcom,apq8074";
 
+       aliases {
+               serial0 = &blsp1_uart2;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
        soc {
                serial@f991e000 {
                        status = "ok";
index f7725b96612c65475fec7f67445f4afb54c19e81..c9c2b769554f84d91b0a535e98fb1a070cf6b232 100644 (file)
@@ -5,6 +5,14 @@
        model = "Qualcomm APQ8084/IFC6540";
        compatible = "qcom,apq8084-ifc6540", "qcom,apq8084";
 
+       aliases {
+               serial0 = &blsp2_uart2;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
        soc {
                serial@f995e000 {
                        status = "okay";
index cb43acfc5d1d9d0773243d42874d5bd1f5243e4f..3016c7048d446cb5ee1e0810b37986663eb20398 100644 (file)
@@ -5,6 +5,14 @@
        model = "Qualcomm APQ 8084-MTP";
        compatible = "qcom,apq8084-mtp", "qcom,apq8084";
 
+       aliases {
+               serial0 = &blsp2_uart2;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
        soc {
                serial@f995e000 {
                        status = "okay";
index 7084010ee61ba463024a9e4ba5d48caf327e3c05..0554fbd72c40ba78f0c7205cdd050821d8b52b43 100644 (file)
                        interrupts = <0 208 0>;
                };
 
-               serial@f995e000 {
+               blsp2_uart2: serial@f995e000 {
                        compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
                        reg = <0xf995e000 0x1000>;
                        interrupts = <0 114 0x0>;
index 55b2910efd872170ca19a99d867be4a34dd60794..d501382493e3d6eb97f4a4ec3635734240180922 100644 (file)
@@ -4,6 +4,14 @@
        model = "Qualcomm IPQ8064/AP148";
        compatible = "qcom,ipq8064-ap148", "qcom,ipq8064";
 
+       aliases {
+               serial0 = &gsbi4_serial;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
        reserved-memory {
                #address-cells = <1>;
                #size-cells = <1>;
index 9f727d8eadf6998561e748e0400e46299417f9b3..fa698635eea0d1f859d57c766c679438a416b55d 100644 (file)
 
                        syscon-tcsr = <&tcsr>;
 
-                       serial@16340000 {
+                       gsbi4_serial: serial@16340000 {
                                compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
                                reg = <0x16340000 0x1000>,
                                      <0x16300000 0x1000>;
index e0883c376248073158153de3341b6bae038c58f9..b17f379e8c2afe3f92fe1f905fbe501fccad9118 100644 (file)
@@ -6,6 +6,14 @@
        model = "Qualcomm MSM8660 SURF";
        compatible = "qcom,msm8660-surf", "qcom,msm8660";
 
+       aliases {
+               serial0 = &gsbi12_serial;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
        soc {
                gsbi@19c00000 {
                        status = "ok";
index ef2fe72b54c91e5b139a38788e5695fe991855cc..e5f7f33aa4677739f9bc1e6d57d969db9b856cf9 100644 (file)
@@ -98,7 +98,7 @@
 
                        syscon-tcsr = <&tcsr>;
 
-                       serial@19c40000 {
+                       gsbi12_serial: serial@19c40000 {
                                compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
                                reg = <0x19c40000 0x1000>,
                                      <0x19c00000 0x1000>;
index fad71d5527b0f80d88e7bc433318d53ca35ec8ca..b72a55462caf1b2aaf99d8a293ad47c79ef7858d 100644 (file)
@@ -6,6 +6,14 @@
        model = "Qualcomm MSM8960 CDP";
        compatible = "qcom,msm8960-cdp", "qcom,msm8960";
 
+       aliases {
+               serial0 = &gsbi5_serial;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
        soc {
                gsbi@16400000 {
                        status = "ok";
index 2096a94c9b525faa47df980d751763804f10e7c5..134cd91d68ece1034077c0e36f154756f6143cb6 100644 (file)
 
                        syscon-tcsr = <&tcsr>;
 
-                       serial@16440000 {
+                       gsbi5_serial: serial@16440000 {
                                compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm";
                                reg = <0x16440000 0x1000>,
                                      <0x16400000 0x1000>;
index 9bc72a3356e45ea7c79d43daf9eae826342e8f52..016f9ad9392a9cbad2702834aff6648add614b7b 100644 (file)
@@ -6,6 +6,14 @@
        model = "Sony Xperia Z1";
        compatible = "sony,xperia-honami", "qcom,msm8974";
 
+       aliases {
+               serial0 = &blsp1_uart2;
+       };
+
+       chosen {
+               stdout-path = "serial0:115200n8";
+       };
+
        memory@0 {
                reg = <0 0x40000000>, <0x40000000 0x40000000>;
                device_type = "memory";
index d7c99b894a491c7011e8a95f3800f5585aeb49e4..ab8e5725046809e53b9ef7ce39b1f6ca33d35dcc 100644 (file)
                        hwlocks = <&tcsr_mutex 3>;
                };
 
-               serial@f991e000 {
+               blsp1_uart2: serial@f991e000 {
                        compatible = "qcom,msm-uartdm-v1.4", "qcom,msm-uartdm";
                        reg = <0xf991e000 0x1000>;
                        interrupts = <0 108 0x0>;
index 3eaf8fbaf60346330215f43bbfd2a28bd4f10857..1ff2bfa2e183f45087571875197888e81e3cc8ad 100644 (file)
@@ -27,6 +27,8 @@ CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
 CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPUFREQ_DT=y
 CONFIG_CPU_IDLE=y
 CONFIG_ARM_EXYNOS_CPUIDLE=y
 CONFIG_VFP=y
@@ -94,6 +96,7 @@ CONFIG_CHARGER_MAX14577=y
 CONFIG_CHARGER_MAX77693=y
 CONFIG_CHARGER_TPS65090=y
 CONFIG_SENSORS_LM90=y
+CONFIG_SENSORS_NTC_THERMISTOR=y
 CONFIG_SENSORS_PWM_FAN=y
 CONFIG_SENSORS_INA2XX=y
 CONFIG_THERMAL=y
@@ -144,6 +147,8 @@ CONFIG_SND=y
 CONFIG_SND_SOC=y
 CONFIG_SND_SOC_SAMSUNG=y
 CONFIG_SND_SOC_SNOW=y
+CONFIG_SND_SOC_ODROIDX2=y
+CONFIG_SND_SIMPLE_CARD=y
 CONFIG_USB=y
 CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
 CONFIG_USB_XHCI_HCD=y
index f84471d5d2db3ed5a3f8137dfa76fce92c851c29..03deb7fb35e8999522baceb89fbae066d978f7e3 100644 (file)
@@ -362,6 +362,7 @@ CONFIG_POWER_RESET_KEYSTONE=y
 CONFIG_POWER_RESET_RMOBILE=y
 CONFIG_SENSORS_LM90=y
 CONFIG_SENSORS_LM95245=y
+CONFIG_SENSORS_NTC_THERMISTOR=m
 CONFIG_THERMAL=y
 CONFIG_CPU_THERMAL=y
 CONFIG_RCAR_THERMAL=y
@@ -410,7 +411,9 @@ CONFIG_REGULATOR_MAX8907=y
 CONFIG_REGULATOR_MAX8973=y
 CONFIG_REGULATOR_MAX77686=y
 CONFIG_REGULATOR_MAX77693=m
+CONFIG_REGULATOR_MAX77802=m
 CONFIG_REGULATOR_PALMAS=y
+CONFIG_REGULATOR_PBIAS=y
 CONFIG_REGULATOR_PWM=m
 CONFIG_REGULATOR_S2MPS11=y
 CONFIG_REGULATOR_S5M8767=y
@@ -509,8 +512,6 @@ CONFIG_USB_CHIPIDEA_HOST=y
 CONFIG_AB8500_USB=y
 CONFIG_KEYSTONE_USB_PHY=y
 CONFIG_OMAP_USB3=y
-CONFIG_SAMSUNG_USB2PHY=y
-CONFIG_SAMSUNG_USB3PHY=y
 CONFIG_USB_GPIO_VBUS=y
 CONFIG_USB_ISP1301=y
 CONFIG_USB_MXS_PHY=y
@@ -635,6 +636,7 @@ CONFIG_EXTCON=y
 CONFIG_TI_AEMIF=y
 CONFIG_IIO=y
 CONFIG_AT91_ADC=m
+CONFIG_EXYNOS_ADC=m
 CONFIG_XILINX_XADC=y
 CONFIG_AK8975=y
 CONFIG_PWM=y
index e896d2c196e63b79365e6f637c7b697c56c48f71..dcba0fa5176e990f8a23333f08e34e192351b407 100644 (file)
@@ -231,4 +231,9 @@ static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 
+static inline void kvm_arm_init_debug(void) {}
+static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
+
 #endif /* __ARM_KVM_HOST_H__ */
index 98b1084f8282e82101ae86de867cc0c012240aca..127956353b0060fc1a4e6d7edacdcc0a7219e201 100644 (file)
@@ -34,7 +34,19 @@ typedef struct xpaddr {
 unsigned long __pfn_to_mfn(unsigned long pfn);
 extern struct rb_root phys_to_mach;
 
-static inline unsigned long pfn_to_mfn(unsigned long pfn)
+/* Pseudo-physical <-> Guest conversion */
+static inline unsigned long pfn_to_gfn(unsigned long pfn)
+{
+       return pfn;
+}
+
+static inline unsigned long gfn_to_pfn(unsigned long gfn)
+{
+       return gfn;
+}
+
+/* Pseudo-physical <-> BUS conversion */
+static inline unsigned long pfn_to_bfn(unsigned long pfn)
 {
        unsigned long mfn;
 
@@ -47,16 +59,16 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
        return pfn;
 }
 
-static inline unsigned long mfn_to_pfn(unsigned long mfn)
+static inline unsigned long bfn_to_pfn(unsigned long bfn)
 {
-       return mfn;
+       return bfn;
 }
 
-#define mfn_to_local_pfn(mfn) mfn_to_pfn(mfn)
+#define bfn_to_local_pfn(bfn)  bfn_to_pfn(bfn)
 
-/* VIRT <-> MACHINE conversion */
-#define virt_to_mfn(v)         (pfn_to_mfn(virt_to_pfn(v)))
-#define mfn_to_virt(m)         (__va(mfn_to_pfn(m) << PAGE_SHIFT))
+/* VIRT <-> GUEST conversion */
+#define virt_to_gfn(v)         (pfn_to_gfn(virt_to_pfn(v)))
+#define gfn_to_virt(m)         (__va(gfn_to_pfn(m) << PAGE_SHIFT))
 
 /* Only used in PV code. But ARM guests are always HVM. */
 static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr)
@@ -96,7 +108,7 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 
 bool xen_arch_need_swiotlb(struct device *dev,
                           unsigned long pfn,
-                          unsigned long mfn);
+                          unsigned long bfn);
 unsigned long xen_get_swiotlb_free_pages(unsigned int order);
 
 #endif /* _ASM_ARM_XEN_PAGE_H */
index bc738d2b83929e6a05762ea8b73af6ec1ed86837..ce404a5c30628c72533a62e430c6150a54032fea 100644 (file)
@@ -125,6 +125,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        if (ret)
                goto out_free_stage2_pgd;
 
+       kvm_vgic_early_init(kvm);
        kvm_timer_init(kvm);
 
        /* Mark the initial VMID generation invalid */
@@ -249,6 +250,7 @@ out:
 
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 {
+       kvm_vgic_vcpu_early_init(vcpu);
 }
 
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
@@ -278,6 +280,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        /* Set up the timer */
        kvm_timer_vcpu_init(vcpu);
 
+       kvm_arm_reset_debug_ptr(vcpu);
+
        return 0;
 }
 
@@ -301,13 +305,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        kvm_arm_set_running_vcpu(NULL);
 }
 
-int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
-                                       struct kvm_guest_debug *dbg)
-{
-       return -EINVAL;
-}
-
-
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
@@ -528,10 +525,20 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                if (vcpu->arch.pause)
                        vcpu_pause(vcpu);
 
-               kvm_vgic_flush_hwstate(vcpu);
+               /*
+                * Disarming the background timer must be done in a
+                * preemptible context, as this call may sleep.
+                */
                kvm_timer_flush_hwstate(vcpu);
 
+               /*
+                * Preparing the interrupts to be injected also
+                * involves poking the GIC, which must be done in a
+                * non-preemptible context.
+                */
                preempt_disable();
+               kvm_vgic_flush_hwstate(vcpu);
+
                local_irq_disable();
 
                /*
@@ -544,12 +551,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 
                if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) {
                        local_irq_enable();
+                       kvm_vgic_sync_hwstate(vcpu);
                        preempt_enable();
                        kvm_timer_sync_hwstate(vcpu);
-                       kvm_vgic_sync_hwstate(vcpu);
                        continue;
                }
 
+               kvm_arm_setup_debug(vcpu);
+
                /**************************************************************
                 * Enter the guest
                 */
@@ -564,6 +573,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                 * Back from guest
                 *************************************************************/
 
+               kvm_arm_clear_debug(vcpu);
+
                /*
                 * We may have taken a host interrupt in HYP mode (ie
                 * while executing the guest). This interrupt is still
@@ -586,11 +597,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
                 */
                kvm_guest_exit();
                trace_kvm_exit(kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
-               preempt_enable();
 
+               kvm_vgic_sync_hwstate(vcpu);
+
+               preempt_enable();
 
                kvm_timer_sync_hwstate(vcpu);
-               kvm_vgic_sync_hwstate(vcpu);
 
                ret = handle_exit(vcpu, run, ret);
        }
@@ -921,6 +933,8 @@ static void cpu_init_hyp_mode(void *dummy)
        vector_ptr = (unsigned long)__kvm_hyp_vector;
 
        __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
+
+       kvm_arm_init_debug();
 }
 
 static int hyp_init_cpu_notify(struct notifier_block *self,
index d503fbb787d362752b9b6b688b2829e19b675095..96e935bbc38c8b4fd906aeacdc27ca28696b596a 100644 (file)
@@ -290,3 +290,9 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 {
        return -EINVAL;
 }
+
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                       struct kvm_guest_debug *dbg)
+{
+       return -EINVAL;
+}
index 568494dbbbb5b169581b656ed5126a0164b5a621..900ef6dd8f727e9a510b79cd0d9e36dbafa26cfa 100644 (file)
@@ -361,10 +361,6 @@ hyp_hvc:
        @ Check syndrome register
        mrc     p15, 4, r1, c5, c2, 0   @ HSR
        lsr     r0, r1, #HSR_EC_SHIFT
-#ifdef CONFIG_VFPv3
-       cmp     r0, #HSR_EC_CP_0_13
-       beq     switch_to_guest_vfp
-#endif
        cmp     r0, #HSR_EC_HVC
        bne     guest_trap              @ Not HVC instr.
 
@@ -378,7 +374,10 @@ hyp_hvc:
        cmp     r2, #0
        bne     guest_trap              @ Guest called HVC
 
-host_switch_to_hyp:
+       /*
+        * Getting here means host called HVC, we shift parameters and branch
+        * to Hyp function.
+        */
        pop     {r0, r1, r2}
 
        /* Check for __hyp_get_vectors */
@@ -409,6 +408,10 @@ guest_trap:
 
        @ Check if we need the fault information
        lsr     r1, r1, #HSR_EC_SHIFT
+#ifdef CONFIG_VFPv3
+       cmp     r1, #HSR_EC_CP_0_13
+       beq     switch_to_guest_vfp
+#endif
        cmp     r1, #HSR_EC_IABT
        mrceq   p15, 4, r2, c6, c0, 2   @ HIFAR
        beq     2f
@@ -477,7 +480,6 @@ guest_trap:
  */
 #ifdef CONFIG_VFPv3
 switch_to_guest_vfp:
-       load_vcpu                       @ Load VCPU pointer to r0
        push    {r3-r7}
 
        @ NEON/VFP used.  Turn on VFP access.
index f558c073c02378a449a05d337d47a8161ae5c51d..eeb85858d6bbe6dff02ac453ea2f19a889cd8810 100644 (file)
@@ -77,7 +77,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        kvm_reset_coprocs(vcpu);
 
        /* Reset arch_timer context */
-       kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
-
-       return 0;
+       return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
 }
index 4c4858c566d8c6e22a8dba63b967d85708a08516..3a10f1a8317ae7a053ed997da88a06ddd5311b57 100644 (file)
@@ -15,6 +15,7 @@ menuconfig ARCH_EXYNOS
        select ARM_AMBA
        select ARM_GIC
        select COMMON_CLK_SAMSUNG
+       select EXYNOS_THERMAL
        select HAVE_ARM_SCU if SMP
        select HAVE_S3C2410_I2C if I2C
        select HAVE_S3C2410_WATCHDOG if WATCHDOG
@@ -24,6 +25,7 @@ menuconfig ARCH_EXYNOS
        select PM_GENERIC_DOMAINS if PM
        select S5P_DEV_MFC
        select SRAM
+       select THERMAL
        select MFD_SYSCON
        help
          Support for SAMSUNG EXYNOS SoCs (EXYNOS4/5)
index 5f8ddcdeeacf1117d92313e6cb34608be136a955..1c47aee31e9cc60aeabc8c504b41c76c2379a435 100644 (file)
@@ -225,7 +225,11 @@ static void __init exynos_init_irq(void)
 }
 
 static const struct of_device_id exynos_cpufreq_matches[] = {
+       { .compatible = "samsung,exynos3250", .data = "cpufreq-dt" },
        { .compatible = "samsung,exynos4210", .data = "cpufreq-dt" },
+       { .compatible = "samsung,exynos4212", .data = "cpufreq-dt" },
+       { .compatible = "samsung,exynos4412", .data = "cpufreq-dt" },
+       { .compatible = "samsung,exynos5250", .data = "cpufreq-dt" },
        { /* sentinel */ }
 };
 
index c50c8d33f874075a6228150bbc362ef0aaaf41c1..eeeab074e1542a3567de5dd4d79c5dab72c083da 100644 (file)
@@ -49,35 +49,35 @@ static __read_mostly unsigned int xen_events_irq;
 
 static __initdata struct device_node *xen_node;
 
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
                               unsigned long addr,
-                              xen_pfn_t *mfn, int nr,
+                              xen_pfn_t *gfn, int nr,
                               int *err_ptr, pgprot_t prot,
                               unsigned domid,
                               struct page **pages)
 {
-       return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
+       return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
                                         prot, domid, pages);
 }
-EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
 
 /* Not used by XENFEAT_auto_translated guests. */
-int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
                               unsigned long addr,
-                              xen_pfn_t mfn, int nr,
+                              xen_pfn_t gfn, int nr,
                               pgprot_t prot, unsigned domid,
                               struct page **pages)
 {
        return -ENOSYS;
 }
-EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
 
-int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
                               int nr, struct page **pages)
 {
        return xen_xlate_unmap_gfn_range(vma, nr, pages);
 }
-EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
+EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
 
 static void xen_percpu_init(void)
 {
index 03e75fef15b8254483929f1332de033a1eeeac05..6dd911d1f0ac6c0f2c328abbee593d85d5bd5bcd 100644 (file)
@@ -139,9 +139,9 @@ void __xen_dma_sync_single_for_device(struct device *hwdev,
 
 bool xen_arch_need_swiotlb(struct device *dev,
                           unsigned long pfn,
-                          unsigned long mfn)
+                          unsigned long bfn)
 {
-       return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev));
+       return (!hypercall_cflush && (pfn != bfn) && !is_device_dma_coherent(dev));
 }
 
 int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
index 52b484b6aa1a7fec251a72b028f655523c74236a..4c47cb2fbb526f7ae445e4f1b46178ba99934283 100644 (file)
@@ -16,6 +16,8 @@
 #ifndef __ASM_HW_BREAKPOINT_H
 #define __ASM_HW_BREAKPOINT_H
 
+#include <asm/cputype.h>
+
 #ifdef __KERNEL__
 
 struct arch_hw_breakpoint_ctrl {
@@ -132,5 +134,17 @@ static inline void ptrace_hw_copy_thread(struct task_struct *task)
 
 extern struct pmu perf_ops_bp;
 
+/* Determine number of BRP registers available. */
+static inline int get_num_brps(void)
+{
+       return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1;
+}
+
+/* Determine number of WRP registers available. */
+static inline int get_num_wrps(void)
+{
+       return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1;
+}
+
 #endif /* __KERNEL__ */
 #endif /* __ASM_BREAKPOINT_H */
index ac6fafb95fe71e48048fe3831f226853f2f4914d..7605e095217f7c2434594327ae4b3453cb758f42 100644 (file)
 #define HSTR_EL2_TTEE  (1 << 16)
 #define HSTR_EL2_T(x)  (1 << x)
 
+/* Hyp Coproccessor Trap Register Shifts */
+#define CPTR_EL2_TFP_SHIFT 10
+
 /* Hyp Coprocessor Trap Register */
 #define CPTR_EL2_TCPAC (1 << 31)
 #define CPTR_EL2_TTA   (1 << 20)
-#define CPTR_EL2_TFP   (1 << 10)
+#define CPTR_EL2_TFP   (1 << CPTR_EL2_TFP_SHIFT)
 
 /* Hyp Debug Configuration Register bits */
 #define MDCR_EL2_TDRA          (1 << 11)
index 3c5fe685a2d69a55daf68fe40fa782a0843bb2f5..67fa0de3d48324cc19a06871904f619189ed27da 100644 (file)
 #define        CNTKCTL_EL1     20      /* Timer Control Register (EL1) */
 #define        PAR_EL1         21      /* Physical Address Register */
 #define MDSCR_EL1      22      /* Monitor Debug System Control Register */
-#define DBGBCR0_EL1    23      /* Debug Breakpoint Control Registers (0-15) */
-#define DBGBCR15_EL1   38
-#define DBGBVR0_EL1    39      /* Debug Breakpoint Value Registers (0-15) */
-#define DBGBVR15_EL1   54
-#define DBGWCR0_EL1    55      /* Debug Watchpoint Control Registers (0-15) */
-#define DBGWCR15_EL1   70
-#define DBGWVR0_EL1    71      /* Debug Watchpoint Value Registers (0-15) */
-#define DBGWVR15_EL1   86
-#define MDCCINT_EL1    87      /* Monitor Debug Comms Channel Interrupt Enable Reg */
+#define MDCCINT_EL1    23      /* Monitor Debug Comms Channel Interrupt Enable Reg */
 
 /* 32bit specific registers. Keep them at the end of the range */
-#define        DACR32_EL2      88      /* Domain Access Control Register */
-#define        IFSR32_EL2      89      /* Instruction Fault Status Register */
-#define        FPEXC32_EL2     90      /* Floating-Point Exception Control Register */
-#define        DBGVCR32_EL2    91      /* Debug Vector Catch Register */
-#define        TEECR32_EL1     92      /* ThumbEE Configuration Register */
-#define        TEEHBR32_EL1    93      /* ThumbEE Handler Base Register */
-#define        NR_SYS_REGS     94
+#define        DACR32_EL2      24      /* Domain Access Control Register */
+#define        IFSR32_EL2      25      /* Instruction Fault Status Register */
+#define        FPEXC32_EL2     26      /* Floating-Point Exception Control Register */
+#define        DBGVCR32_EL2    27      /* Debug Vector Catch Register */
+#define        TEECR32_EL1     28      /* ThumbEE Configuration Register */
+#define        TEEHBR32_EL1    29      /* ThumbEE Handler Base Register */
+#define        NR_SYS_REGS     30
 
 /* 32bit mapping */
 #define c0_MPIDR       (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
@@ -132,6 +124,8 @@ extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
 
 extern u64 __vgic_v3_get_ich_vtr_el2(void);
 
+extern u32 __kvm_get_mdcr_el2(void);
+
 #endif
 
 #endif /* __ARM_KVM_ASM_H__ */
index 2709db2a7eac78a49fc75c072810b9ac41cc9e0a..415938dc45cff94600625963b71011882f3a3ae4 100644 (file)
@@ -103,15 +103,34 @@ struct kvm_vcpu_arch {
 
        /* HYP configuration */
        u64 hcr_el2;
+       u32 mdcr_el2;
 
        /* Exception Information */
        struct kvm_vcpu_fault_info fault;
 
-       /* Debug state */
+       /* Guest debug state */
        u64 debug_flags;
 
+       /*
+        * We maintain more than a single set of debug registers to support
+        * debugging the guest from the host and to maintain separate host and
+        * guest state during world switches. vcpu_debug_state are the debug
+        * registers of the vcpu as the guest sees them.  host_debug_state are
+        * the host registers which are saved and restored during
+        * world switches. external_debug_state contains the debug
+        * values we want to debug the guest. This is set via the
+        * KVM_SET_GUEST_DEBUG ioctl.
+        *
+        * debug_ptr points to the set of debug registers that should be loaded
+        * onto the hardware when running the guest.
+        */
+       struct kvm_guest_debug_arch *debug_ptr;
+       struct kvm_guest_debug_arch vcpu_debug_state;
+       struct kvm_guest_debug_arch external_debug_state;
+
        /* Pointer to host CPU context */
        kvm_cpu_context_t *host_cpu_context;
+       struct kvm_guest_debug_arch host_debug_state;
 
        /* VGIC state */
        struct vgic_cpu vgic_cpu;
@@ -122,6 +141,17 @@ struct kvm_vcpu_arch {
         * here.
         */
 
+       /*
+        * Guest registers we preserve during guest debugging.
+        *
+        * These shadow registers are updated by the kvm_handle_sys_reg
+        * trap handler if the guest accesses or updates them while we
+        * are using guest debug.
+        */
+       struct {
+               u32     mdscr_el1;
+       } guest_debug_preserved;
+
        /* Don't run the guest */
        bool pause;
 
@@ -216,15 +246,15 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
                     hyp_stack_ptr, vector_ptr);
 }
 
-struct vgic_sr_vectors {
-       void    *save_vgic;
-       void    *restore_vgic;
-};
-
 static inline void kvm_arch_hardware_disable(void) {}
 static inline void kvm_arch_hardware_unsetup(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
 static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
 
+void kvm_arm_init_debug(void);
+void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
+void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
+void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+
 #endif /* __ARM64_KVM_HOST_H__ */
index d26832022127e822d9912f19ca73fe6ea794a309..0cd7b5947dfcfc635c8f6c7de4480f8b015d8fd6 100644 (file)
@@ -53,14 +53,20 @@ struct kvm_regs {
        struct user_fpsimd_state fp_regs;
 };
 
-/* Supported Processor Types */
+/*
+ * Supported CPU Targets - Adding a new target type is not recommended,
+ * unless there are some special registers not supported by the
+ * genericv8 syreg table.
+ */
 #define KVM_ARM_TARGET_AEM_V8          0
 #define KVM_ARM_TARGET_FOUNDATION_V8   1
 #define KVM_ARM_TARGET_CORTEX_A57      2
 #define KVM_ARM_TARGET_XGENE_POTENZA   3
 #define KVM_ARM_TARGET_CORTEX_A53      4
+/* Generic ARM v8 target */
+#define KVM_ARM_TARGET_GENERIC_V8      5
 
-#define KVM_ARM_NUM_TARGETS            5
+#define KVM_ARM_NUM_TARGETS            6
 
 /* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
 #define KVM_ARM_DEVICE_TYPE_SHIFT      0
@@ -100,12 +106,39 @@ struct kvm_sregs {
 struct kvm_fpu {
 };
 
+/*
+ * See v8 ARM ARM D7.3: Debug Registers
+ *
+ * The architectural limit is 16 debug registers of each type although
+ * in practice there are usually less (see ID_AA64DFR0_EL1).
+ *
+ * Although the control registers are architecturally defined as 32
+ * bits wide we use a 64 bit structure here to keep parity with
+ * KVM_GET/SET_ONE_REG behaviour which treats all system registers as
+ * 64 bit values. It also allows for the possibility of the
+ * architecture expanding the control registers without having to
+ * change the userspace ABI.
+ */
+#define KVM_ARM_MAX_DBG_REGS 16
 struct kvm_guest_debug_arch {
+       __u64 dbg_bcr[KVM_ARM_MAX_DBG_REGS];
+       __u64 dbg_bvr[KVM_ARM_MAX_DBG_REGS];
+       __u64 dbg_wcr[KVM_ARM_MAX_DBG_REGS];
+       __u64 dbg_wvr[KVM_ARM_MAX_DBG_REGS];
 };
 
 struct kvm_debug_exit_arch {
+       __u32 hsr;
+       __u64 far;      /* used for watchpoints */
 };
 
+/*
+ * Architecture specific defines for kvm_guest_debug->control
+ */
+
+#define KVM_GUESTDBG_USE_SW_BP         (1 << 16)
+#define KVM_GUESTDBG_USE_HW            (1 << 17)
+
 struct kvm_sync_regs {
 };
 
index c99701a34d7b3c9238c80e1e4ebbaeba6e3488c3..8d89cf8dae5556851365e2cee335599b4d8eb359 100644 (file)
@@ -116,17 +116,22 @@ int main(void)
   DEFINE(VCPU_FAR_EL2,         offsetof(struct kvm_vcpu, arch.fault.far_el2));
   DEFINE(VCPU_HPFAR_EL2,       offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
   DEFINE(VCPU_DEBUG_FLAGS,     offsetof(struct kvm_vcpu, arch.debug_flags));
+  DEFINE(VCPU_DEBUG_PTR,       offsetof(struct kvm_vcpu, arch.debug_ptr));
+  DEFINE(DEBUG_BCR,            offsetof(struct kvm_guest_debug_arch, dbg_bcr));
+  DEFINE(DEBUG_BVR,            offsetof(struct kvm_guest_debug_arch, dbg_bvr));
+  DEFINE(DEBUG_WCR,            offsetof(struct kvm_guest_debug_arch, dbg_wcr));
+  DEFINE(DEBUG_WVR,            offsetof(struct kvm_guest_debug_arch, dbg_wvr));
   DEFINE(VCPU_HCR_EL2,         offsetof(struct kvm_vcpu, arch.hcr_el2));
+  DEFINE(VCPU_MDCR_EL2,        offsetof(struct kvm_vcpu, arch.mdcr_el2));
   DEFINE(VCPU_IRQ_LINES,       offsetof(struct kvm_vcpu, arch.irq_lines));
   DEFINE(VCPU_HOST_CONTEXT,    offsetof(struct kvm_vcpu, arch.host_cpu_context));
+  DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state));
   DEFINE(VCPU_TIMER_CNTV_CTL,  offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
   DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
   DEFINE(KVM_TIMER_CNTVOFF,    offsetof(struct kvm, arch.timer.cntvoff));
   DEFINE(KVM_TIMER_ENABLED,    offsetof(struct kvm, arch.timer.enabled));
   DEFINE(VCPU_KVM,             offsetof(struct kvm_vcpu, kvm));
   DEFINE(VCPU_VGIC_CPU,                offsetof(struct kvm_vcpu, arch.vgic_cpu));
-  DEFINE(VGIC_SAVE_FN,         offsetof(struct vgic_sr_vectors, save_vgic));
-  DEFINE(VGIC_RESTORE_FN,      offsetof(struct vgic_sr_vectors, restore_vgic));
   DEFINE(VGIC_V2_CPU_HCR,      offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
   DEFINE(VGIC_V2_CPU_VMCR,     offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
   DEFINE(VGIC_V2_CPU_MISR,     offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
index 003bc3d50636f585e3777661ad270baedaf1fe15..c97040ecf838096069e1ebd0fdd50f2d0050b5ad 100644 (file)
@@ -48,18 +48,6 @@ static DEFINE_PER_CPU(int, stepping_kernel_bp);
 static int core_num_brps;
 static int core_num_wrps;
 
-/* Determine number of BRP registers available. */
-static int get_num_brps(void)
-{
-       return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1;
-}
-
-/* Determine number of WRP registers available. */
-static int get_num_wrps(void)
-{
-       return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1;
-}
-
 int hw_breakpoint_slots(int type)
 {
        /*
index f90f4aa7f88d9d60ba4e8b53645c7d81d15f3e07..1949fe5f54246a3f14983753570f4e4d0f978e76 100644 (file)
@@ -17,7 +17,7 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/psci.o $(ARM)/perf.o
 
 kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
 kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
-kvm-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
+kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
 
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
diff --git a/arch/arm64/kvm/debug.c b/arch/arm64/kvm/debug.c
new file mode 100644 (file)
index 0000000..47e5f0f
--- /dev/null
@@ -0,0 +1,217 @@
+/*
+ * Debug and Guest Debug support
+ *
+ * Copyright (C) 2015 - Linaro Ltd
+ * Author: Alex Bennée <alex.bennee@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <linux/hw_breakpoint.h>
+
+#include <asm/debug-monitors.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_emulate.h>
+
+#include "trace.h"
+
+/* These are the bits of MDSCR_EL1 we may manipulate */
+#define MDSCR_EL1_DEBUG_MASK   (DBG_MDSCR_SS | \
+                               DBG_MDSCR_KDE | \
+                               DBG_MDSCR_MDE)
+
+static DEFINE_PER_CPU(u32, mdcr_el2);
+
+/**
+ * save/restore_guest_debug_regs
+ *
+ * For some debug operations we need to tweak some guest registers. As
+ * a result we need to save the state of those registers before we
+ * make those modifications.
+ *
+ * Guest access to MDSCR_EL1 is trapped by the hypervisor and handled
+ * after we have restored the preserved value to the main context.
+ */
+static void save_guest_debug_regs(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.guest_debug_preserved.mdscr_el1 = vcpu_sys_reg(vcpu, MDSCR_EL1);
+
+       trace_kvm_arm_set_dreg32("Saved MDSCR_EL1",
+                               vcpu->arch.guest_debug_preserved.mdscr_el1);
+}
+
+static void restore_guest_debug_regs(struct kvm_vcpu *vcpu)
+{
+       vcpu_sys_reg(vcpu, MDSCR_EL1) = vcpu->arch.guest_debug_preserved.mdscr_el1;
+
+       trace_kvm_arm_set_dreg32("Restored MDSCR_EL1",
+                               vcpu_sys_reg(vcpu, MDSCR_EL1));
+}
+
+/**
+ * kvm_arm_init_debug - grab what we need for debug
+ *
+ * Currently the sole task of this function is to retrieve the initial
+ * value of mdcr_el2 so we can preserve MDCR_EL2.HPMN which has
+ * presumably been set-up by some knowledgeable bootcode.
+ *
+ * It is called once per-cpu during CPU hyp initialisation.
+ */
+
+void kvm_arm_init_debug(void)
+{
+       __this_cpu_write(mdcr_el2, kvm_call_hyp(__kvm_get_mdcr_el2));
+}
+
+/**
+ * kvm_arm_reset_debug_ptr - reset the debug ptr to point to the vcpu state
+ */
+
+void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu)
+{
+       vcpu->arch.debug_ptr = &vcpu->arch.vcpu_debug_state;
+}
+
+/**
+ * kvm_arm_setup_debug - set up debug related stuff
+ *
+ * @vcpu:      the vcpu pointer
+ *
+ * This is called before each entry into the hypervisor to setup any
+ * debug related registers. Currently this just ensures we will trap
+ * access to:
+ *  - Performance monitors (MDCR_EL2_TPM/MDCR_EL2_TPMCR)
+ *  - Debug ROM Address (MDCR_EL2_TDRA)
+ *  - OS related registers (MDCR_EL2_TDOSA)
+ *
+ * Additionally, KVM only traps guest accesses to the debug registers if
+ * the guest is not actively using them (see the KVM_ARM64_DEBUG_DIRTY
+ * flag on vcpu->arch.debug_flags).  Since the guest must not interfere
+ * with the hardware state when debugging the guest, we must ensure that
+ * trapping is enabled whenever we are debugging the guest using the
+ * debug registers.
+ */
+
+void kvm_arm_setup_debug(struct kvm_vcpu *vcpu)
+{
+       bool trap_debug = !(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY);
+
+       trace_kvm_arm_setup_debug(vcpu, vcpu->guest_debug);
+
+       vcpu->arch.mdcr_el2 = __this_cpu_read(mdcr_el2) & MDCR_EL2_HPMN_MASK;
+       vcpu->arch.mdcr_el2 |= (MDCR_EL2_TPM |
+                               MDCR_EL2_TPMCR |
+                               MDCR_EL2_TDRA |
+                               MDCR_EL2_TDOSA);
+
+       /* Is Guest debugging in effect? */
+       if (vcpu->guest_debug) {
+               /* Route all software debug exceptions to EL2 */
+               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDE;
+
+               /* Save guest debug state */
+               save_guest_debug_regs(vcpu);
+
+               /*
+                * Single Step (ARM ARM D2.12.3 The software step state
+                * machine)
+                *
+                * If we are doing Single Step we need to manipulate
+                * the guest's MDSCR_EL1.SS and PSTATE.SS. Once the
+                * step has occurred the hypervisor will trap the
+                * debug exception and we return to userspace.
+                *
+                * If the guest attempts to single step its userspace
+                * we would have to deal with a trapped exception
+                * while in the guest kernel. Because this would be
+                * hard to unwind we suppress the guest's ability to
+                * do so by masking MDSCR_EL.SS.
+                *
+                * This confuses guest debuggers which use
+                * single-step behind the scenes but everything
+                * returns to normal once the host is no longer
+                * debugging the system.
+                */
+               if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
+                       *vcpu_cpsr(vcpu) |=  DBG_SPSR_SS;
+                       vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_SS;
+               } else {
+                       vcpu_sys_reg(vcpu, MDSCR_EL1) &= ~DBG_MDSCR_SS;
+               }
+
+               trace_kvm_arm_set_dreg32("SPSR_EL2", *vcpu_cpsr(vcpu));
+
+               /*
+                * HW Breakpoints and watchpoints
+                *
+                * We simply switch the debug_ptr to point to our new
+                * external_debug_state which has been populated by the
+                * debug ioctl. The existing KVM_ARM64_DEBUG_DIRTY
+                * mechanism ensures the registers are updated on the
+                * world switch.
+                */
+               if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
+                       /* Enable breakpoints/watchpoints */
+                       vcpu_sys_reg(vcpu, MDSCR_EL1) |= DBG_MDSCR_MDE;
+
+                       vcpu->arch.debug_ptr = &vcpu->arch.external_debug_state;
+                       vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+                       trap_debug = true;
+
+                       trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
+                                               &vcpu->arch.debug_ptr->dbg_bcr[0],
+                                               &vcpu->arch.debug_ptr->dbg_bvr[0]);
+
+                       trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
+                                               &vcpu->arch.debug_ptr->dbg_wcr[0],
+                                               &vcpu->arch.debug_ptr->dbg_wvr[0]);
+               }
+       }
+
+       BUG_ON(!vcpu->guest_debug &&
+               vcpu->arch.debug_ptr != &vcpu->arch.vcpu_debug_state);
+
+       /* Trap debug register access */
+       if (trap_debug)
+               vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
+
+       trace_kvm_arm_set_dreg32("MDCR_EL2", vcpu->arch.mdcr_el2);
+       trace_kvm_arm_set_dreg32("MDSCR_EL1", vcpu_sys_reg(vcpu, MDSCR_EL1));
+}
+
+void kvm_arm_clear_debug(struct kvm_vcpu *vcpu)
+{
+       trace_kvm_arm_clear_debug(vcpu->guest_debug);
+
+       if (vcpu->guest_debug) {
+               restore_guest_debug_regs(vcpu);
+
+               /*
+                * If we were using HW debug we need to restore the
+                * debug_ptr to the guest debug state.
+                */
+               if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
+                       kvm_arm_reset_debug_ptr(vcpu);
+
+                       trace_kvm_arm_set_regset("BKPTS", get_num_brps(),
+                                               &vcpu->arch.debug_ptr->dbg_bcr[0],
+                                               &vcpu->arch.debug_ptr->dbg_bvr[0]);
+
+                       trace_kvm_arm_set_regset("WAPTS", get_num_wrps(),
+                                               &vcpu->arch.debug_ptr->dbg_wcr[0],
+                                               &vcpu->arch.debug_ptr->dbg_wvr[0]);
+               }
+       }
+}
index 9535bd555d1d47cf190f78306eb879484d70a261..d250160d32bc68ae636c804b9cdfe2499bdddcb9 100644 (file)
@@ -32,6 +32,8 @@
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_coproc.h>
 
+#include "trace.h"
+
 struct kvm_stats_debugfs_item debugfs_entries[] = {
        { NULL }
 };
@@ -293,7 +295,8 @@ int __attribute_const__ kvm_target_cpu(void)
                break;
        };
 
-       return -EINVAL;
+       /* Return a default generic target */
+       return KVM_ARM_TARGET_GENERIC_V8;
 }
 
 int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
@@ -331,3 +334,41 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 {
        return -EINVAL;
 }
+
+#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE |    \
+                           KVM_GUESTDBG_USE_SW_BP | \
+                           KVM_GUESTDBG_USE_HW | \
+                           KVM_GUESTDBG_SINGLESTEP)
+
+/**
+ * kvm_arch_vcpu_ioctl_set_guest_debug - set up guest debugging
+ * @kvm:       pointer to the KVM struct
+ * @kvm_guest_debug: the ioctl data buffer
+ *
+ * This sets up and enables the VM for guest debugging. Userspace
+ * passes in a control flag to enable different debug types and
+ * potentially other architecture specific information in the rest of
+ * the structure.
+ */
+int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
+                                       struct kvm_guest_debug *dbg)
+{
+       trace_kvm_set_guest_debug(vcpu, dbg->control);
+
+       if (dbg->control & ~KVM_GUESTDBG_VALID_MASK)
+               return -EINVAL;
+
+       if (dbg->control & KVM_GUESTDBG_ENABLE) {
+               vcpu->guest_debug = dbg->control;
+
+               /* Hardware assisted Break and Watch points */
+               if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW) {
+                       vcpu->arch.external_debug_state = dbg->arch;
+               }
+
+       } else {
+               /* If not enabled clear all flags */
+               vcpu->guest_debug = 0;
+       }
+       return 0;
+}
index 524fa25671fc85da0a0eafac65b95698b6de3729..68a0759b1375e3d6824b55b0c0548ba21c434ca0 100644 (file)
@@ -82,6 +82,45 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
        return 1;
 }
 
+/**
+ * kvm_handle_guest_debug - handle a debug exception instruction
+ *
+ * @vcpu:      the vcpu pointer
+ * @run:       access to the kvm_run structure for results
+ *
+ * We route all debug exceptions through the same handler. If both the
+ * guest and host are using the same debug facilities it will be up to
+ * userspace to re-inject the correct exception for guest delivery.
+ *
+ * @return: 0 (while setting run->exit_reason), -1 for error
+ */
+static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+       u32 hsr = kvm_vcpu_get_hsr(vcpu);
+       int ret = 0;
+
+       run->exit_reason = KVM_EXIT_DEBUG;
+       run->debug.arch.hsr = hsr;
+
+       switch (hsr >> ESR_ELx_EC_SHIFT) {
+       case ESR_ELx_EC_WATCHPT_LOW:
+               run->debug.arch.far = vcpu->arch.fault.far_el2;
+               /* fall through */
+       case ESR_ELx_EC_SOFTSTP_LOW:
+       case ESR_ELx_EC_BREAKPT_LOW:
+       case ESR_ELx_EC_BKPT32:
+       case ESR_ELx_EC_BRK64:
+               break;
+       default:
+               kvm_err("%s: un-handled case hsr: %#08x\n",
+                       __func__, (unsigned int) hsr);
+               ret = -1;
+               break;
+       }
+
+       return ret;
+}
+
 static exit_handle_fn arm_exit_handlers[] = {
        [ESR_ELx_EC_WFx]        = kvm_handle_wfx,
        [ESR_ELx_EC_CP15_32]    = kvm_handle_cp15_32,
@@ -96,6 +135,11 @@ static exit_handle_fn arm_exit_handlers[] = {
        [ESR_ELx_EC_SYS64]      = kvm_handle_sys_reg,
        [ESR_ELx_EC_IABT_LOW]   = kvm_handle_guest_abort,
        [ESR_ELx_EC_DABT_LOW]   = kvm_handle_guest_abort,
+       [ESR_ELx_EC_SOFTSTP_LOW]= kvm_handle_guest_debug,
+       [ESR_ELx_EC_WATCHPT_LOW]= kvm_handle_guest_debug,
+       [ESR_ELx_EC_BREAKPT_LOW]= kvm_handle_guest_debug,
+       [ESR_ELx_EC_BKPT32]     = kvm_handle_guest_debug,
+       [ESR_ELx_EC_BRK64]      = kvm_handle_guest_debug,
 };
 
 static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
index 10915aaf0b015b7281efa832e3ea50a90e42394c..37c89ea2c572ed858c0425344b45e0897a89b74f 100644 (file)
        stp     x24, x25, [x3, #160]
 .endm
 
-.macro save_debug
-       // x2: base address for cpu context
-       // x3: tmp register
-
-       mrs     x26, id_aa64dfr0_el1
-       ubfx    x24, x26, #12, #4       // Extract BRPs
-       ubfx    x25, x26, #20, #4       // Extract WRPs
-       mov     w26, #15
-       sub     w24, w26, w24           // How many BPs to skip
-       sub     w25, w26, w25           // How many WPs to skip
-
-       add     x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
-
-       adr     x26, 1f
-       add     x26, x26, x24, lsl #2
-       br      x26
-1:
-       mrs     x20, dbgbcr15_el1
-       mrs     x19, dbgbcr14_el1
-       mrs     x18, dbgbcr13_el1
-       mrs     x17, dbgbcr12_el1
-       mrs     x16, dbgbcr11_el1
-       mrs     x15, dbgbcr10_el1
-       mrs     x14, dbgbcr9_el1
-       mrs     x13, dbgbcr8_el1
-       mrs     x12, dbgbcr7_el1
-       mrs     x11, dbgbcr6_el1
-       mrs     x10, dbgbcr5_el1
-       mrs     x9, dbgbcr4_el1
-       mrs     x8, dbgbcr3_el1
-       mrs     x7, dbgbcr2_el1
-       mrs     x6, dbgbcr1_el1
-       mrs     x5, dbgbcr0_el1
-
-       adr     x26, 1f
-       add     x26, x26, x24, lsl #2
-       br      x26
-
-1:
-       str     x20, [x3, #(15 * 8)]
-       str     x19, [x3, #(14 * 8)]
-       str     x18, [x3, #(13 * 8)]
-       str     x17, [x3, #(12 * 8)]
-       str     x16, [x3, #(11 * 8)]
-       str     x15, [x3, #(10 * 8)]
-       str     x14, [x3, #(9 * 8)]
-       str     x13, [x3, #(8 * 8)]
-       str     x12, [x3, #(7 * 8)]
-       str     x11, [x3, #(6 * 8)]
-       str     x10, [x3, #(5 * 8)]
-       str     x9, [x3, #(4 * 8)]
-       str     x8, [x3, #(3 * 8)]
-       str     x7, [x3, #(2 * 8)]
-       str     x6, [x3, #(1 * 8)]
-       str     x5, [x3, #(0 * 8)]
-
-       add     x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
-
-       adr     x26, 1f
-       add     x26, x26, x24, lsl #2
-       br      x26
+.macro save_debug type
+       // x4: pointer to register set
+       // x5: number of registers to skip
+       // x6..x22 trashed
+
+       adr     x22, 1f
+       add     x22, x22, x5, lsl #2
+       br      x22
 1:
-       mrs     x20, dbgbvr15_el1
-       mrs     x19, dbgbvr14_el1
-       mrs     x18, dbgbvr13_el1
-       mrs     x17, dbgbvr12_el1
-       mrs     x16, dbgbvr11_el1
-       mrs     x15, dbgbvr10_el1
-       mrs     x14, dbgbvr9_el1
-       mrs     x13, dbgbvr8_el1
-       mrs     x12, dbgbvr7_el1
-       mrs     x11, dbgbvr6_el1
-       mrs     x10, dbgbvr5_el1
-       mrs     x9, dbgbvr4_el1
-       mrs     x8, dbgbvr3_el1
-       mrs     x7, dbgbvr2_el1
-       mrs     x6, dbgbvr1_el1
-       mrs     x5, dbgbvr0_el1
-
-       adr     x26, 1f
-       add     x26, x26, x24, lsl #2
-       br      x26
-
-1:
-       str     x20, [x3, #(15 * 8)]
-       str     x19, [x3, #(14 * 8)]
-       str     x18, [x3, #(13 * 8)]
-       str     x17, [x3, #(12 * 8)]
-       str     x16, [x3, #(11 * 8)]
-       str     x15, [x3, #(10 * 8)]
-       str     x14, [x3, #(9 * 8)]
-       str     x13, [x3, #(8 * 8)]
-       str     x12, [x3, #(7 * 8)]
-       str     x11, [x3, #(6 * 8)]
-       str     x10, [x3, #(5 * 8)]
-       str     x9, [x3, #(4 * 8)]
-       str     x8, [x3, #(3 * 8)]
-       str     x7, [x3, #(2 * 8)]
-       str     x6, [x3, #(1 * 8)]
-       str     x5, [x3, #(0 * 8)]
-
-       add     x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
-
-       adr     x26, 1f
-       add     x26, x26, x25, lsl #2
-       br      x26
-1:
-       mrs     x20, dbgwcr15_el1
-       mrs     x19, dbgwcr14_el1
-       mrs     x18, dbgwcr13_el1
-       mrs     x17, dbgwcr12_el1
-       mrs     x16, dbgwcr11_el1
-       mrs     x15, dbgwcr10_el1
-       mrs     x14, dbgwcr9_el1
-       mrs     x13, dbgwcr8_el1
-       mrs     x12, dbgwcr7_el1
-       mrs     x11, dbgwcr6_el1
-       mrs     x10, dbgwcr5_el1
-       mrs     x9, dbgwcr4_el1
-       mrs     x8, dbgwcr3_el1
-       mrs     x7, dbgwcr2_el1
-       mrs     x6, dbgwcr1_el1
-       mrs     x5, dbgwcr0_el1
-
-       adr     x26, 1f
-       add     x26, x26, x25, lsl #2
-       br      x26
-
-1:
-       str     x20, [x3, #(15 * 8)]
-       str     x19, [x3, #(14 * 8)]
-       str     x18, [x3, #(13 * 8)]
-       str     x17, [x3, #(12 * 8)]
-       str     x16, [x3, #(11 * 8)]
-       str     x15, [x3, #(10 * 8)]
-       str     x14, [x3, #(9 * 8)]
-       str     x13, [x3, #(8 * 8)]
-       str     x12, [x3, #(7 * 8)]
-       str     x11, [x3, #(6 * 8)]
-       str     x10, [x3, #(5 * 8)]
-       str     x9, [x3, #(4 * 8)]
-       str     x8, [x3, #(3 * 8)]
-       str     x7, [x3, #(2 * 8)]
-       str     x6, [x3, #(1 * 8)]
-       str     x5, [x3, #(0 * 8)]
-
-       add     x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
-
-       adr     x26, 1f
-       add     x26, x26, x25, lsl #2
-       br      x26
-1:
-       mrs     x20, dbgwvr15_el1
-       mrs     x19, dbgwvr14_el1
-       mrs     x18, dbgwvr13_el1
-       mrs     x17, dbgwvr12_el1
-       mrs     x16, dbgwvr11_el1
-       mrs     x15, dbgwvr10_el1
-       mrs     x14, dbgwvr9_el1
-       mrs     x13, dbgwvr8_el1
-       mrs     x12, dbgwvr7_el1
-       mrs     x11, dbgwvr6_el1
-       mrs     x10, dbgwvr5_el1
-       mrs     x9, dbgwvr4_el1
-       mrs     x8, dbgwvr3_el1
-       mrs     x7, dbgwvr2_el1
-       mrs     x6, dbgwvr1_el1
-       mrs     x5, dbgwvr0_el1
-
-       adr     x26, 1f
-       add     x26, x26, x25, lsl #2
-       br      x26
-
+       mrs     x21, \type\()15_el1
+       mrs     x20, \type\()14_el1
+       mrs     x19, \type\()13_el1
+       mrs     x18, \type\()12_el1
+       mrs     x17, \type\()11_el1
+       mrs     x16, \type\()10_el1
+       mrs     x15, \type\()9_el1
+       mrs     x14, \type\()8_el1
+       mrs     x13, \type\()7_el1
+       mrs     x12, \type\()6_el1
+       mrs     x11, \type\()5_el1
+       mrs     x10, \type\()4_el1
+       mrs     x9, \type\()3_el1
+       mrs     x8, \type\()2_el1
+       mrs     x7, \type\()1_el1
+       mrs     x6, \type\()0_el1
+
+       adr     x22, 1f
+       add     x22, x22, x5, lsl #2
+       br      x22
 1:
-       str     x20, [x3, #(15 * 8)]
-       str     x19, [x3, #(14 * 8)]
-       str     x18, [x3, #(13 * 8)]
-       str     x17, [x3, #(12 * 8)]
-       str     x16, [x3, #(11 * 8)]
-       str     x15, [x3, #(10 * 8)]
-       str     x14, [x3, #(9 * 8)]
-       str     x13, [x3, #(8 * 8)]
-       str     x12, [x3, #(7 * 8)]
-       str     x11, [x3, #(6 * 8)]
-       str     x10, [x3, #(5 * 8)]
-       str     x9, [x3, #(4 * 8)]
-       str     x8, [x3, #(3 * 8)]
-       str     x7, [x3, #(2 * 8)]
-       str     x6, [x3, #(1 * 8)]
-       str     x5, [x3, #(0 * 8)]
-
-       mrs     x21, mdccint_el1
-       str     x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+       str     x21, [x4, #(15 * 8)]
+       str     x20, [x4, #(14 * 8)]
+       str     x19, [x4, #(13 * 8)]
+       str     x18, [x4, #(12 * 8)]
+       str     x17, [x4, #(11 * 8)]
+       str     x16, [x4, #(10 * 8)]
+       str     x15, [x4, #(9 * 8)]
+       str     x14, [x4, #(8 * 8)]
+       str     x13, [x4, #(7 * 8)]
+       str     x12, [x4, #(6 * 8)]
+       str     x11, [x4, #(5 * 8)]
+       str     x10, [x4, #(4 * 8)]
+       str     x9, [x4, #(3 * 8)]
+       str     x8, [x4, #(2 * 8)]
+       str     x7, [x4, #(1 * 8)]
+       str     x6, [x4, #(0 * 8)]
 .endm
 
 .macro restore_sysregs
        msr     mdscr_el1,      x25
 .endm
 
-.macro restore_debug
-       // x2: base address for cpu context
-       // x3: tmp register
-
-       mrs     x26, id_aa64dfr0_el1
-       ubfx    x24, x26, #12, #4       // Extract BRPs
-       ubfx    x25, x26, #20, #4       // Extract WRPs
-       mov     w26, #15
-       sub     w24, w26, w24           // How many BPs to skip
-       sub     w25, w26, w25           // How many WPs to skip
-
-       add     x3, x2, #CPU_SYSREG_OFFSET(DBGBCR0_EL1)
+.macro restore_debug type
+       // x4: pointer to register set
+       // x5: number of registers to skip
+       // x6..x22 trashed
 
-       adr     x26, 1f
-       add     x26, x26, x24, lsl #2
-       br      x26
-1:
-       ldr     x20, [x3, #(15 * 8)]
-       ldr     x19, [x3, #(14 * 8)]
-       ldr     x18, [x3, #(13 * 8)]
-       ldr     x17, [x3, #(12 * 8)]
-       ldr     x16, [x3, #(11 * 8)]
-       ldr     x15, [x3, #(10 * 8)]
-       ldr     x14, [x3, #(9 * 8)]
-       ldr     x13, [x3, #(8 * 8)]
-       ldr     x12, [x3, #(7 * 8)]
-       ldr     x11, [x3, #(6 * 8)]
-       ldr     x10, [x3, #(5 * 8)]
-       ldr     x9, [x3, #(4 * 8)]
-       ldr     x8, [x3, #(3 * 8)]
-       ldr     x7, [x3, #(2 * 8)]
-       ldr     x6, [x3, #(1 * 8)]
-       ldr     x5, [x3, #(0 * 8)]
-
-       adr     x26, 1f
-       add     x26, x26, x24, lsl #2
-       br      x26
+       adr     x22, 1f
+       add     x22, x22, x5, lsl #2
+       br      x22
 1:
-       msr     dbgbcr15_el1, x20
-       msr     dbgbcr14_el1, x19
-       msr     dbgbcr13_el1, x18
-       msr     dbgbcr12_el1, x17
-       msr     dbgbcr11_el1, x16
-       msr     dbgbcr10_el1, x15
-       msr     dbgbcr9_el1, x14
-       msr     dbgbcr8_el1, x13
-       msr     dbgbcr7_el1, x12
-       msr     dbgbcr6_el1, x11
-       msr     dbgbcr5_el1, x10
-       msr     dbgbcr4_el1, x9
-       msr     dbgbcr3_el1, x8
-       msr     dbgbcr2_el1, x7
-       msr     dbgbcr1_el1, x6
-       msr     dbgbcr0_el1, x5
-
-       add     x3, x2, #CPU_SYSREG_OFFSET(DBGBVR0_EL1)
-
-       adr     x26, 1f
-       add     x26, x26, x24, lsl #2
-       br      x26
+       ldr     x21, [x4, #(15 * 8)]
+       ldr     x20, [x4, #(14 * 8)]
+       ldr     x19, [x4, #(13 * 8)]
+       ldr     x18, [x4, #(12 * 8)]
+       ldr     x17, [x4, #(11 * 8)]
+       ldr     x16, [x4, #(10 * 8)]
+       ldr     x15, [x4, #(9 * 8)]
+       ldr     x14, [x4, #(8 * 8)]
+       ldr     x13, [x4, #(7 * 8)]
+       ldr     x12, [x4, #(6 * 8)]
+       ldr     x11, [x4, #(5 * 8)]
+       ldr     x10, [x4, #(4 * 8)]
+       ldr     x9, [x4, #(3 * 8)]
+       ldr     x8, [x4, #(2 * 8)]
+       ldr     x7, [x4, #(1 * 8)]
+       ldr     x6, [x4, #(0 * 8)]
+
+       adr     x22, 1f
+       add     x22, x22, x5, lsl #2
+       br      x22
 1:
-       ldr     x20, [x3, #(15 * 8)]
-       ldr     x19, [x3, #(14 * 8)]
-       ldr     x18, [x3, #(13 * 8)]
-       ldr     x17, [x3, #(12 * 8)]
-       ldr     x16, [x3, #(11 * 8)]
-       ldr     x15, [x3, #(10 * 8)]
-       ldr     x14, [x3, #(9 * 8)]
-       ldr     x13, [x3, #(8 * 8)]
-       ldr     x12, [x3, #(7 * 8)]
-       ldr     x11, [x3, #(6 * 8)]
-       ldr     x10, [x3, #(5 * 8)]
-       ldr     x9, [x3, #(4 * 8)]
-       ldr     x8, [x3, #(3 * 8)]
-       ldr     x7, [x3, #(2 * 8)]
-       ldr     x6, [x3, #(1 * 8)]
-       ldr     x5, [x3, #(0 * 8)]
-
-       adr     x26, 1f
-       add     x26, x26, x24, lsl #2
-       br      x26
-1:
-       msr     dbgbvr15_el1, x20
-       msr     dbgbvr14_el1, x19
-       msr     dbgbvr13_el1, x18
-       msr     dbgbvr12_el1, x17
-       msr     dbgbvr11_el1, x16
-       msr     dbgbvr10_el1, x15
-       msr     dbgbvr9_el1, x14
-       msr     dbgbvr8_el1, x13
-       msr     dbgbvr7_el1, x12
-       msr     dbgbvr6_el1, x11
-       msr     dbgbvr5_el1, x10
-       msr     dbgbvr4_el1, x9
-       msr     dbgbvr3_el1, x8
-       msr     dbgbvr2_el1, x7
-       msr     dbgbvr1_el1, x6
-       msr     dbgbvr0_el1, x5
-
-       add     x3, x2, #CPU_SYSREG_OFFSET(DBGWCR0_EL1)
-
-       adr     x26, 1f
-       add     x26, x26, x25, lsl #2
-       br      x26
-1:
-       ldr     x20, [x3, #(15 * 8)]
-       ldr     x19, [x3, #(14 * 8)]
-       ldr     x18, [x3, #(13 * 8)]
-       ldr     x17, [x3, #(12 * 8)]
-       ldr     x16, [x3, #(11 * 8)]
-       ldr     x15, [x3, #(10 * 8)]
-       ldr     x14, [x3, #(9 * 8)]
-       ldr     x13, [x3, #(8 * 8)]
-       ldr     x12, [x3, #(7 * 8)]
-       ldr     x11, [x3, #(6 * 8)]
-       ldr     x10, [x3, #(5 * 8)]
-       ldr     x9, [x3, #(4 * 8)]
-       ldr     x8, [x3, #(3 * 8)]
-       ldr     x7, [x3, #(2 * 8)]
-       ldr     x6, [x3, #(1 * 8)]
-       ldr     x5, [x3, #(0 * 8)]
-
-       adr     x26, 1f
-       add     x26, x26, x25, lsl #2
-       br      x26
-1:
-       msr     dbgwcr15_el1, x20
-       msr     dbgwcr14_el1, x19
-       msr     dbgwcr13_el1, x18
-       msr     dbgwcr12_el1, x17
-       msr     dbgwcr11_el1, x16
-       msr     dbgwcr10_el1, x15
-       msr     dbgwcr9_el1, x14
-       msr     dbgwcr8_el1, x13
-       msr     dbgwcr7_el1, x12
-       msr     dbgwcr6_el1, x11
-       msr     dbgwcr5_el1, x10
-       msr     dbgwcr4_el1, x9
-       msr     dbgwcr3_el1, x8
-       msr     dbgwcr2_el1, x7
-       msr     dbgwcr1_el1, x6
-       msr     dbgwcr0_el1, x5
-
-       add     x3, x2, #CPU_SYSREG_OFFSET(DBGWVR0_EL1)
-
-       adr     x26, 1f
-       add     x26, x26, x25, lsl #2
-       br      x26
-1:
-       ldr     x20, [x3, #(15 * 8)]
-       ldr     x19, [x3, #(14 * 8)]
-       ldr     x18, [x3, #(13 * 8)]
-       ldr     x17, [x3, #(12 * 8)]
-       ldr     x16, [x3, #(11 * 8)]
-       ldr     x15, [x3, #(10 * 8)]
-       ldr     x14, [x3, #(9 * 8)]
-       ldr     x13, [x3, #(8 * 8)]
-       ldr     x12, [x3, #(7 * 8)]
-       ldr     x11, [x3, #(6 * 8)]
-       ldr     x10, [x3, #(5 * 8)]
-       ldr     x9, [x3, #(4 * 8)]
-       ldr     x8, [x3, #(3 * 8)]
-       ldr     x7, [x3, #(2 * 8)]
-       ldr     x6, [x3, #(1 * 8)]
-       ldr     x5, [x3, #(0 * 8)]
-
-       adr     x26, 1f
-       add     x26, x26, x25, lsl #2
-       br      x26
-1:
-       msr     dbgwvr15_el1, x20
-       msr     dbgwvr14_el1, x19
-       msr     dbgwvr13_el1, x18
-       msr     dbgwvr12_el1, x17
-       msr     dbgwvr11_el1, x16
-       msr     dbgwvr10_el1, x15
-       msr     dbgwvr9_el1, x14
-       msr     dbgwvr8_el1, x13
-       msr     dbgwvr7_el1, x12
-       msr     dbgwvr6_el1, x11
-       msr     dbgwvr5_el1, x10
-       msr     dbgwvr4_el1, x9
-       msr     dbgwvr3_el1, x8
-       msr     dbgwvr2_el1, x7
-       msr     dbgwvr1_el1, x6
-       msr     dbgwvr0_el1, x5
-
-       ldr     x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
-       msr     mdccint_el1, x21
+       msr     \type\()15_el1, x21
+       msr     \type\()14_el1, x20
+       msr     \type\()13_el1, x19
+       msr     \type\()12_el1, x18
+       msr     \type\()11_el1, x17
+       msr     \type\()10_el1, x16
+       msr     \type\()9_el1, x15
+       msr     \type\()8_el1, x14
+       msr     \type\()7_el1, x13
+       msr     \type\()6_el1, x12
+       msr     \type\()5_el1, x11
+       msr     \type\()4_el1, x10
+       msr     \type\()3_el1, x9
+       msr     \type\()2_el1, x8
+       msr     \type\()1_el1, x7
+       msr     \type\()0_el1, x6
 .endm
 
 .macro skip_32bit_state tmp, target
        tbz     \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
 .endm
 
+/*
+ * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled)
+ */
+.macro skip_fpsimd_state tmp, target
+       mrs     \tmp, cptr_el2
+       tbnz    \tmp, #CPTR_EL2_TFP_SHIFT, \target
+.endm
+
 .macro compute_debug_state target
        // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
        // is set, we do a full save/restore cycle and disable trapping.
        add     x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
        mrs     x4, dacr32_el2
        mrs     x5, ifsr32_el2
-       mrs     x6, fpexc32_el2
        stp     x4, x5, [x3]
-       str     x6, [x3, #16]
 
+       skip_fpsimd_state x8, 3f
+       mrs     x6, fpexc32_el2
+       str     x6, [x3, #16]
+3:
        skip_debug_state x8, 2f
        mrs     x7, dbgvcr32_el2
        str     x7, [x3, #24]
 
        add     x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
        ldp     x4, x5, [x3]
-       ldr     x6, [x3, #16]
        msr     dacr32_el2, x4
        msr     ifsr32_el2, x5
-       msr     fpexc32_el2, x6
 
        skip_debug_state x8, 2f
        ldr     x7, [x3, #24]
 
 .macro activate_traps
        ldr     x2, [x0, #VCPU_HCR_EL2]
+
+       /*
+        * We are about to set CPTR_EL2.TFP to trap all floating point
+        * register accesses to EL2, however, the ARM ARM clearly states that
+        * traps are only taken to EL2 if the operation would not otherwise
+        * trap to EL1.  Therefore, always make sure that for 32-bit guests,
+        * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
+        */
+       tbnz    x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state
+       mov     x3, #(1 << 30)
+       msr     fpexc32_el2, x3
+       isb
+99:
        msr     hcr_el2, x2
        mov     x2, #CPTR_EL2_TTA
+       orr     x2, x2, #CPTR_EL2_TFP
        msr     cptr_el2, x2
 
        mov     x2, #(1 << 15)  // Trap CP15 Cr=15
        msr     hstr_el2, x2
 
-       mrs     x2, mdcr_el2
-       and     x2, x2, #MDCR_EL2_HPMN_MASK
-       orr     x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
-       orr     x2, x2, #(MDCR_EL2_TDRA | MDCR_EL2_TDOSA)
-
-       // Check for KVM_ARM64_DEBUG_DIRTY, and set debug to trap
-       // if not dirty.
-       ldr     x3, [x0, #VCPU_DEBUG_FLAGS]
-       tbnz    x3, #KVM_ARM64_DEBUG_DIRTY_SHIFT, 1f
-       orr     x2, x2,  #MDCR_EL2_TDA
-1:
+       // Monitor Debug Config - see kvm_arm_setup_debug()
+       ldr     x2, [x0, #VCPU_MDCR_EL2]
        msr     mdcr_el2, x2
 .endm
 
 .macro deactivate_traps
        mov     x2, #HCR_RW
        msr     hcr_el2, x2
-       msr     cptr_el2, xzr
        msr     hstr_el2, xzr
 
        mrs     x2, mdcr_el2
@@ -900,21 +622,101 @@ __restore_sysregs:
        restore_sysregs
        ret
 
+/* Save debug state */
 __save_debug:
-       save_debug
+       // x2: ptr to CPU context
+       // x3: ptr to debug reg struct
+       // x4/x5/x6-22/x24-26: trashed
+
+       mrs     x26, id_aa64dfr0_el1
+       ubfx    x24, x26, #12, #4       // Extract BRPs
+       ubfx    x25, x26, #20, #4       // Extract WRPs
+       mov     w26, #15
+       sub     w24, w26, w24           // How many BPs to skip
+       sub     w25, w26, w25           // How many WPs to skip
+
+       mov     x5, x24
+       add     x4, x3, #DEBUG_BCR
+       save_debug dbgbcr
+       add     x4, x3, #DEBUG_BVR
+       save_debug dbgbvr
+
+       mov     x5, x25
+       add     x4, x3, #DEBUG_WCR
+       save_debug dbgwcr
+       add     x4, x3, #DEBUG_WVR
+       save_debug dbgwvr
+
+       mrs     x21, mdccint_el1
+       str     x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
        ret
 
+/* Restore debug state */
 __restore_debug:
-       restore_debug
+       // x2: ptr to CPU context
+       // x3: ptr to debug reg struct
+       // x4/x5/x6-22/x24-26: trashed
+
+       mrs     x26, id_aa64dfr0_el1
+       ubfx    x24, x26, #12, #4       // Extract BRPs
+       ubfx    x25, x26, #20, #4       // Extract WRPs
+       mov     w26, #15
+       sub     w24, w26, w24           // How many BPs to skip
+       sub     w25, w26, w25           // How many WPs to skip
+
+       mov     x5, x24
+       add     x4, x3, #DEBUG_BCR
+       restore_debug dbgbcr
+       add     x4, x3, #DEBUG_BVR
+       restore_debug dbgbvr
+
+       mov     x5, x25
+       add     x4, x3, #DEBUG_WCR
+       restore_debug dbgwcr
+       add     x4, x3, #DEBUG_WVR
+       restore_debug dbgwvr
+
+       ldr     x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
+       msr     mdccint_el1, x21
+
        ret
 
 __save_fpsimd:
+       skip_fpsimd_state x3, 1f
        save_fpsimd
-       ret
+1:     ret
 
 __restore_fpsimd:
+       skip_fpsimd_state x3, 1f
        restore_fpsimd
-       ret
+1:     ret
+
+switch_to_guest_fpsimd:
+       push    x4, lr
+
+       mrs     x2, cptr_el2
+       bic     x2, x2, #CPTR_EL2_TFP
+       msr     cptr_el2, x2
+       isb
+
+       mrs     x0, tpidr_el2
+
+       ldr     x2, [x0, #VCPU_HOST_CONTEXT]
+       kern_hyp_va x2
+       bl __save_fpsimd
+
+       add     x2, x0, #VCPU_CONTEXT
+       bl __restore_fpsimd
+
+       skip_32bit_state x3, 1f
+       ldr     x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
+       msr     fpexc32_el2, x4
+1:
+       pop     x4, lr
+       pop     x2, x3
+       pop     x0, x1
+
+       eret
 
 /*
  * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
@@ -936,10 +738,10 @@ ENTRY(__kvm_vcpu_run)
        kern_hyp_va x2
 
        save_host_regs
-       bl __save_fpsimd
        bl __save_sysregs
 
        compute_debug_state 1f
+       add     x3, x0, #VCPU_HOST_DEBUG_STATE
        bl      __save_debug
 1:
        activate_traps
@@ -952,9 +754,10 @@ ENTRY(__kvm_vcpu_run)
        add     x2, x0, #VCPU_CONTEXT
 
        bl __restore_sysregs
-       bl __restore_fpsimd
 
        skip_debug_state x3, 1f
+       ldr     x3, [x0, #VCPU_DEBUG_PTR]
+       kern_hyp_va x3
        bl      __restore_debug
 1:
        restore_guest_32bit_state
@@ -975,6 +778,8 @@ __kvm_vcpu_return:
        bl __save_sysregs
 
        skip_debug_state x3, 1f
+       ldr     x3, [x0, #VCPU_DEBUG_PTR]
+       kern_hyp_va x3
        bl      __save_debug
 1:
        save_guest_32bit_state
@@ -991,12 +796,15 @@ __kvm_vcpu_return:
 
        bl __restore_sysregs
        bl __restore_fpsimd
+       /* Clear FPSIMD and Trace trapping */
+       msr     cptr_el2, xzr
 
        skip_debug_state x3, 1f
        // Clear the dirty flag for the next run, as all the state has
        // already been saved. Note that we nuke the whole 64bit word.
        // If we ever add more flags, we'll have to be more careful...
        str     xzr, [x0, #VCPU_DEBUG_FLAGS]
+       add     x3, x0, #VCPU_HOST_DEBUG_STATE
        bl      __restore_debug
 1:
        restore_host_regs
@@ -1199,6 +1007,11 @@ el1_trap:
         * x1: ESR
         * x2: ESR_EC
         */
+
+       /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+       cmp     x2, #ESR_ELx_EC_FP_ASIMD
+       b.eq    switch_to_guest_fpsimd
+
        cmp     x2, #ESR_ELx_EC_DABT_LOW
        mov     x0, #ESR_ELx_EC_IABT_LOW
        ccmp    x2, x0, #4, ne
@@ -1293,4 +1106,10 @@ ENTRY(__kvm_hyp_vector)
        ventry  el1_error_invalid               // Error 32-bit EL1
 ENDPROC(__kvm_hyp_vector)
 
+
+ENTRY(__kvm_get_mdcr_el2)
+       mrs     x0, mdcr_el2
+       ret
+ENDPROC(__kvm_get_mdcr_el2)
+
        .popsection
index 0b43265789858cbe71f761eebbc48927834b7fe8..91cf5350b3283232cd6d88aca9af669ce972c697 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/errno.h>
 #include <linux/kvm_host.h>
 #include <linux/kvm.h>
+#include <linux/hw_breakpoint.h>
 
 #include <kvm/arm_arch_timer.h>
 
@@ -56,6 +57,12 @@ static bool cpu_has_32bit_el1(void)
        return !!(pfr0 & 0x20);
 }
 
+/**
+ * kvm_arch_dev_ioctl_check_extension
+ *
+ * We currently assume that the number of HW registers is uniform
+ * across all CPUs (see cpuinfo_sanity_check).
+ */
 int kvm_arch_dev_ioctl_check_extension(long ext)
 {
        int r;
@@ -64,6 +71,15 @@ int kvm_arch_dev_ioctl_check_extension(long ext)
        case KVM_CAP_ARM_EL1_32BIT:
                r = cpu_has_32bit_el1();
                break;
+       case KVM_CAP_GUEST_DEBUG_HW_BPS:
+               r = get_num_brps();
+               break;
+       case KVM_CAP_GUEST_DEBUG_HW_WPS:
+               r = get_num_wrps();
+               break;
+       case KVM_CAP_SET_GUEST_DEBUG:
+               r = 1;
+               break;
        default:
                r = 0;
        }
@@ -105,7 +121,5 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
        kvm_reset_sys_regs(vcpu);
 
        /* Reset timer */
-       kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
-
-       return 0;
+       return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
 }
index c370b4014799697c292a99cf24721a425fcb3790..b41607d270ac83ebd1413a185753bf0b9e2af7f0 100644 (file)
@@ -38,6 +38,8 @@
 
 #include "sys_regs.h"
 
+#include "trace.h"
+
 /*
  * All of this file is extremly similar to the ARM coproc.c, but the
  * types are different. My gut feeling is that it should be pretty
@@ -208,9 +210,217 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
                *vcpu_reg(vcpu, p->Rt) = vcpu_sys_reg(vcpu, r->reg);
        }
 
+       trace_trap_reg(__func__, r->reg, p->is_write, *vcpu_reg(vcpu, p->Rt));
+
+       return true;
+}
+
+/*
+ * reg_to_dbg/dbg_to_reg
+ *
+ * A 32 bit write to a debug register leave top bits alone
+ * A 32 bit read from a debug register only returns the bottom bits
+ *
+ * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
+ * hyp.S code switches between host and guest values in future.
+ */
+static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
+                             const struct sys_reg_params *p,
+                             u64 *dbg_reg)
+{
+       u64 val = *vcpu_reg(vcpu, p->Rt);
+
+       if (p->is_32bit) {
+               val &= 0xffffffffUL;
+               val |= ((*dbg_reg >> 32) << 32);
+       }
+
+       *dbg_reg = val;
+       vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+}
+
+static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
+                             const struct sys_reg_params *p,
+                             u64 *dbg_reg)
+{
+       u64 val = *dbg_reg;
+
+       if (p->is_32bit)
+               val &= 0xffffffffUL;
+
+       *vcpu_reg(vcpu, p->Rt) = val;
+}
+
+static inline bool trap_bvr(struct kvm_vcpu *vcpu,
+                           const struct sys_reg_params *p,
+                           const struct sys_reg_desc *rd)
+{
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+       if (p->is_write)
+               reg_to_dbg(vcpu, p, dbg_reg);
+       else
+               dbg_to_reg(vcpu, p, dbg_reg);
+
+       trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+       return true;
+}
+
+static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+               const struct kvm_one_reg *reg, void __user *uaddr)
+{
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+       if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+       const struct kvm_one_reg *reg, void __user *uaddr)
+{
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+       if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static inline void reset_bvr(struct kvm_vcpu *vcpu,
+                            const struct sys_reg_desc *rd)
+{
+       vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
+}
+
+static inline bool trap_bcr(struct kvm_vcpu *vcpu,
+                           const struct sys_reg_params *p,
+                           const struct sys_reg_desc *rd)
+{
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+
+       if (p->is_write)
+               reg_to_dbg(vcpu, p, dbg_reg);
+       else
+               dbg_to_reg(vcpu, p, dbg_reg);
+
+       trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+       return true;
+}
+
+static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+               const struct kvm_one_reg *reg, void __user *uaddr)
+{
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+
+       if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+
+       return 0;
+}
+
+static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+       const struct kvm_one_reg *reg, void __user *uaddr)
+{
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
+
+       if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static inline void reset_bcr(struct kvm_vcpu *vcpu,
+                            const struct sys_reg_desc *rd)
+{
+       vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
+}
+
+static inline bool trap_wvr(struct kvm_vcpu *vcpu,
+                           const struct sys_reg_params *p,
+                           const struct sys_reg_desc *rd)
+{
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+
+       if (p->is_write)
+               reg_to_dbg(vcpu, p, dbg_reg);
+       else
+               dbg_to_reg(vcpu, p, dbg_reg);
+
+       trace_trap_reg(__func__, rd->reg, p->is_write,
+               vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
+
        return true;
 }
 
+static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+               const struct kvm_one_reg *reg, void __user *uaddr)
+{
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+
+       if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+       const struct kvm_one_reg *reg, void __user *uaddr)
+{
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
+
+       if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static inline void reset_wvr(struct kvm_vcpu *vcpu,
+                            const struct sys_reg_desc *rd)
+{
+       vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
+}
+
+static inline bool trap_wcr(struct kvm_vcpu *vcpu,
+                           const struct sys_reg_params *p,
+                           const struct sys_reg_desc *rd)
+{
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+
+       if (p->is_write)
+               reg_to_dbg(vcpu, p, dbg_reg);
+       else
+               dbg_to_reg(vcpu, p, dbg_reg);
+
+       trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+       return true;
+}
+
+static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+               const struct kvm_one_reg *reg, void __user *uaddr)
+{
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+
+       if (copy_from_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+       const struct kvm_one_reg *reg, void __user *uaddr)
+{
+       __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
+
+       if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
+               return -EFAULT;
+       return 0;
+}
+
+static inline void reset_wcr(struct kvm_vcpu *vcpu,
+                            const struct sys_reg_desc *rd)
+{
+       vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
+}
+
 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 {
        u64 amair;
@@ -240,16 +450,16 @@ static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 #define DBG_BCR_BVR_WCR_WVR_EL1(n)                                     \
        /* DBGBVRn_EL1 */                                               \
        { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b100),     \
-         trap_debug_regs, reset_val, (DBGBVR0_EL1 + (n)), 0 },         \
+         trap_bvr, reset_bvr, n, 0, get_bvr, set_bvr },                \
        /* DBGBCRn_EL1 */                                               \
        { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b101),     \
-         trap_debug_regs, reset_val, (DBGBCR0_EL1 + (n)), 0 },         \
+         trap_bcr, reset_bcr, n, 0, get_bcr, set_bcr },                \
        /* DBGWVRn_EL1 */                                               \
        { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b110),     \
-         trap_debug_regs, reset_val, (DBGWVR0_EL1 + (n)), 0 },         \
+         trap_wvr, reset_wvr, n, 0,  get_wvr, set_wvr },               \
        /* DBGWCRn_EL1 */                                               \
        { Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111),     \
-         trap_debug_regs, reset_val, (DBGWCR0_EL1 + (n)), 0 }
+         trap_wcr, reset_wcr, n, 0,  get_wcr, set_wcr }
 
 /*
  * Architected system registers.
@@ -516,28 +726,57 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
        return true;
 }
 
-#define DBG_BCR_BVR_WCR_WVR(n)                                 \
-       /* DBGBVRn */                                           \
-       { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_debug32,    \
-         NULL, (cp14_DBGBVR0 + (n) * 2) },                     \
-       /* DBGBCRn */                                           \
-       { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_debug32,    \
-         NULL, (cp14_DBGBCR0 + (n) * 2) },                     \
-       /* DBGWVRn */                                           \
-       { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_debug32,    \
-         NULL, (cp14_DBGWVR0 + (n) * 2) },                     \
-       /* DBGWCRn */                                           \
-       { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_debug32,    \
-         NULL, (cp14_DBGWCR0 + (n) * 2) }
-
-#define DBGBXVR(n)                                             \
-       { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_debug32,    \
-         NULL, cp14_DBGBXVR0 + n * 2 }
+/* AArch32 debug register mappings
+ *
+ * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
+ * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
+ *
+ * All control registers and watchpoint value registers are mapped to
+ * the lower 32 bits of their AArch64 equivalents. We share the trap
+ * handlers with the above AArch64 code which checks what mode the
+ * system is in.
+ */
+
+static inline bool trap_xvr(struct kvm_vcpu *vcpu,
+                           const struct sys_reg_params *p,
+                           const struct sys_reg_desc *rd)
+{
+       u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
+
+       if (p->is_write) {
+               u64 val = *dbg_reg;
+
+               val &= 0xffffffffUL;
+               val |= *vcpu_reg(vcpu, p->Rt) << 32;
+               *dbg_reg = val;
+
+               vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+       } else {
+               *vcpu_reg(vcpu, p->Rt) = *dbg_reg >> 32;
+       }
+
+       trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
+
+       return true;
+}
+
+#define DBG_BCR_BVR_WCR_WVR(n)                                         \
+       /* DBGBVRn */                                                   \
+       { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n },     \
+       /* DBGBCRn */                                                   \
+       { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n },     \
+       /* DBGWVRn */                                                   \
+       { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n },     \
+       /* DBGWCRn */                                                   \
+       { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
+
+#define DBGBXVR(n)                                                     \
+       { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
 
 /*
  * Trapped cp14 registers. We generally ignore most of the external
  * debug, on the principle that they don't really make sense to a
- * guest. Revisit this one day, whould this principle change.
+ * guest. Revisit this one day, would this principle change.
  */
 static const struct sys_reg_desc cp14_regs[] = {
        /* DBGIDR */
@@ -999,6 +1238,8 @@ int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
        struct sys_reg_params params;
        unsigned long esr = kvm_vcpu_get_hsr(vcpu);
 
+       trace_kvm_handle_sys_reg(esr);
+
        params.is_aarch32 = false;
        params.is_32bit = false;
        params.Op0 = (esr >> 20) & 3;
@@ -1303,6 +1544,9 @@ int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
        if (!r)
                return get_invariant_sys_reg(reg->id, uaddr);
 
+       if (r->get_user)
+               return (r->get_user)(vcpu, r, reg, uaddr);
+
        return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id);
 }
 
@@ -1321,6 +1565,9 @@ int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg
        if (!r)
                return set_invariant_sys_reg(reg->id, uaddr);
 
+       if (r->set_user)
+               return (r->set_user)(vcpu, r, reg, uaddr);
+
        return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
 }
 
index d411e251412c316bb28b7d971b4dbc0c6e996ee3..eaa324e4db4da1149adfe7ae012b895353f05e91 100644 (file)
@@ -55,6 +55,12 @@ struct sys_reg_desc {
 
        /* Value (usually reset value) */
        u64 val;
+
+       /* Custom get/set_user functions, fallback to generic if NULL */
+       int (*get_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+                       const struct kvm_one_reg *reg, void __user *uaddr);
+       int (*set_user)(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
+                       const struct kvm_one_reg *reg, void __user *uaddr);
 };
 
 static inline void print_sys_reg_instr(const struct sys_reg_params *p)
index 475fd29293102649ea45ee411c077cca64f37782..1e4576824165502d2a6d5a1caee9e0c1da10c3fa 100644 (file)
@@ -94,6 +94,8 @@ static int __init sys_reg_genericv8_init(void)
                                          &genericv8_target_table);
        kvm_register_target_sys_reg_table(KVM_ARM_TARGET_XGENE_POTENZA,
                                          &genericv8_target_table);
+       kvm_register_target_sys_reg_table(KVM_ARM_TARGET_GENERIC_V8,
+                                         &genericv8_target_table);
 
        return 0;
 }
index 157416e963f2f2e2e2ee83a2656e27fb611f7b76..7fb0008c4fa3d487cc282225b13fbec8fb3fc215 100644 (file)
@@ -44,6 +44,129 @@ TRACE_EVENT(kvm_hvc_arm64,
                  __entry->vcpu_pc, __entry->r0, __entry->imm)
 );
 
+TRACE_EVENT(kvm_arm_setup_debug,
+       TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
+       TP_ARGS(vcpu, guest_debug),
+
+       TP_STRUCT__entry(
+               __field(struct kvm_vcpu *, vcpu)
+               __field(__u32, guest_debug)
+       ),
+
+       TP_fast_assign(
+               __entry->vcpu = vcpu;
+               __entry->guest_debug = guest_debug;
+       ),
+
+       TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
+);
+
+TRACE_EVENT(kvm_arm_clear_debug,
+       TP_PROTO(__u32 guest_debug),
+       TP_ARGS(guest_debug),
+
+       TP_STRUCT__entry(
+               __field(__u32, guest_debug)
+       ),
+
+       TP_fast_assign(
+               __entry->guest_debug = guest_debug;
+       ),
+
+       TP_printk("flags: 0x%08x", __entry->guest_debug)
+);
+
+TRACE_EVENT(kvm_arm_set_dreg32,
+       TP_PROTO(const char *name, __u32 value),
+       TP_ARGS(name, value),
+
+       TP_STRUCT__entry(
+               __field(const char *, name)
+               __field(__u32, value)
+       ),
+
+       TP_fast_assign(
+               __entry->name = name;
+               __entry->value = value;
+       ),
+
+       TP_printk("%s: 0x%08x", __entry->name, __entry->value)
+);
+
+TRACE_EVENT(kvm_arm_set_regset,
+       TP_PROTO(const char *type, int len, __u64 *control, __u64 *value),
+       TP_ARGS(type, len, control, value),
+       TP_STRUCT__entry(
+               __field(const char *, name)
+               __field(int, len)
+               __array(u64, ctrls, 16)
+               __array(u64, values, 16)
+       ),
+       TP_fast_assign(
+               __entry->name = type;
+               __entry->len = len;
+               memcpy(__entry->ctrls, control, len << 3);
+               memcpy(__entry->values, value, len << 3);
+       ),
+       TP_printk("%d %s CTRL:%s VALUE:%s", __entry->len, __entry->name,
+               __print_array(__entry->ctrls, __entry->len, sizeof(__u64)),
+               __print_array(__entry->values, __entry->len, sizeof(__u64)))
+);
+
+TRACE_EVENT(trap_reg,
+       TP_PROTO(const char *fn, int reg, bool is_write, u64 write_value),
+       TP_ARGS(fn, reg, is_write, write_value),
+
+       TP_STRUCT__entry(
+               __field(const char *, fn)
+               __field(int, reg)
+               __field(bool, is_write)
+               __field(u64, write_value)
+       ),
+
+       TP_fast_assign(
+               __entry->fn = fn;
+               __entry->reg = reg;
+               __entry->is_write = is_write;
+               __entry->write_value = write_value;
+       ),
+
+       TP_printk("%s %s reg %d (0x%08llx)", __entry->fn,  __entry->is_write?"write to":"read from", __entry->reg, __entry->write_value)
+);
+
+TRACE_EVENT(kvm_handle_sys_reg,
+       TP_PROTO(unsigned long hsr),
+       TP_ARGS(hsr),
+
+       TP_STRUCT__entry(
+               __field(unsigned long,  hsr)
+       ),
+
+       TP_fast_assign(
+               __entry->hsr = hsr;
+       ),
+
+       TP_printk("HSR 0x%08lx", __entry->hsr)
+);
+
+TRACE_EVENT(kvm_set_guest_debug,
+       TP_PROTO(struct kvm_vcpu *vcpu, __u32 guest_debug),
+       TP_ARGS(vcpu, guest_debug),
+
+       TP_STRUCT__entry(
+               __field(struct kvm_vcpu *, vcpu)
+               __field(__u32, guest_debug)
+       ),
+
+       TP_fast_assign(
+               __entry->vcpu = vcpu;
+               __entry->guest_debug = guest_debug;
+       ),
+
+       TP_printk("vcpu: %p, flags: 0x%08x", __entry->vcpu, __entry->guest_debug)
+);
+
+
 #endif /* _TRACE_ARM64_KVM_H */
 
 #undef TRACE_INCLUDE_PATH
index 98106e55ad4ff73299a156b87e527b29bc6384b1..24b998888916a663659f0f4f8fa0befc88f40cdd 100644 (file)
@@ -19,8 +19,6 @@
 #ifndef _ASM_SIGNAL_H
 #define _ASM_SIGNAL_H
 
-#include <uapi/asm/registers.h>
-
 extern unsigned long __rt_sigtramp_template[2];
 
 void do_signal(struct pt_regs *regs);
index 17fbf45bf1502ad4c7212346e9333b95d2914937..a6a1d1f8309a40918e0e884715a09380d45c6d18 100644 (file)
@@ -97,20 +97,6 @@ static int set_next_event(unsigned long delta, struct clock_event_device *evt)
        return 0;
 }
 
-/*
- * Sets the mode (periodic, shutdown, oneshot, etc) of a timer.
- */
-static void set_mode(enum clock_event_mode mode,
-       struct clock_event_device *evt)
-{
-       switch (mode) {
-       case CLOCK_EVT_MODE_SHUTDOWN:
-               /* XXX implement me */
-       default:
-               break;
-       }
-}
-
 #ifdef CONFIG_SMP
 /*  Broadcast mechanism  */
 static void broadcast(const struct cpumask *mask)
@@ -119,13 +105,13 @@ static void broadcast(const struct cpumask *mask)
 }
 #endif
 
+/* XXX Implement set_state_shutdown() */
 static struct clock_event_device hexagon_clockevent_dev = {
        .name           = "clockevent",
        .features       = CLOCK_EVT_FEAT_ONESHOT,
        .rating         = 400,
        .irq            = RTOS_TIMER_INT,
        .set_next_event = set_next_event,
-       .set_mode       = set_mode,
 #ifdef CONFIG_SMP
        .broadcast      = broadcast,
 #endif
@@ -146,7 +132,6 @@ void setup_percpu_clockdev(void)
 
        dummy_clock_dev->features = CLOCK_EVT_FEAT_DUMMY;
        dummy_clock_dev->cpumask = cpumask_of(cpu);
-       dummy_clock_dev->mode = CLOCK_EVT_MODE_UNUSED;
 
        clockevents_register_device(dummy_clock_dev);
 }
index 4f8f1f87ef1160fb5cd88a2fc7e771682891b2df..a336094a7a6c943b43dda144b44054d83bb4f54e 100644 (file)
@@ -270,23 +270,25 @@ void migrate_irqs(void)
 
        for_each_active_irq(i) {
                struct irq_data *data = irq_get_irq_data(i);
+               struct cpumask *mask;
                unsigned int newcpu;
 
                if (irqd_is_per_cpu(data))
                        continue;
 
-               if (!cpumask_test_cpu(cpu, data->affinity))
+               mask = irq_data_get_affinity_mask(data);
+               if (!cpumask_test_cpu(cpu, mask))
                        continue;
 
-               newcpu = cpumask_any_and(data->affinity, cpu_online_mask);
+               newcpu = cpumask_any_and(mask, cpu_online_mask);
 
                if (newcpu >= nr_cpu_ids) {
                        pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
                                            i, cpu);
 
-                       cpumask_setall(data->affinity);
+                       cpumask_setall(mask);
                }
-               irq_set_affinity(i, data->affinity);
+               irq_set_affinity(i, mask);
        }
 }
 #endif /* CONFIG_HOTPLUG_CPU */
index be1731d5e2fa2b600fb17acabe2622b04a0c65fb..e9bcdb6e0086ba2b911621700b7712bc5b442057 100644 (file)
 #ifndef _UAPI_ASM_MICROBLAZE_ELF_H
 #define _UAPI_ASM_MICROBLAZE_ELF_H
 
+#include <linux/elf-em.h>
+
 /*
  * Note there is no "official" ELF designation for Microblaze.
  * I've snaffled the value from the microblaze binutils source code
  * /binutils/microblaze/include/elf/microblaze.h
  */
-#define EM_MICROBLAZE          189
 #define EM_MICROBLAZE_OLD      0xbaab
 #define ELF_ARCH               EM_MICROBLAZE
 
diff --git a/arch/nios2/boot/dts/10m50_devboard.dts b/arch/nios2/boot/dts/10m50_devboard.dts
new file mode 100755 (executable)
index 0000000..3e411c6
--- /dev/null
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2015 Altera Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/dts-v1/;
+
+/ {
+       model = "Altera NiosII Max10";
+       compatible = "altr,niosii-max10";
+       #address-cells = <1>;
+       #size-cells = <1>;
+
+       cpus {
+               #address-cells = <1>;
+               #size-cells = <0>;
+
+               cpu: cpu@0 {
+                       device_type = "cpu";
+                       compatible = "altr,nios2-1.1";
+                       reg = <0x00000000>;
+                       interrupt-controller;
+                       #interrupt-cells = <1>;
+                       altr,exception-addr = <0xc8000120>;
+                       altr,fast-tlb-miss-addr = <0xc0000100>;
+                       altr,has-div = <1>;
+                       altr,has-initda = <1>;
+                       altr,has-mmu = <1>;
+                       altr,has-mul = <1>;
+                       altr,implementation = "fast";
+                       altr,pid-num-bits = <8>;
+                       altr,reset-addr = <0xd4000000>;
+                       altr,tlb-num-entries = <256>;
+                       altr,tlb-num-ways = <16>;
+                       altr,tlb-ptr-sz = <8>;
+                       clock-frequency = <75000000>;
+                       dcache-line-size = <32>;
+                       dcache-size = <32768>;
+                       icache-line-size = <32>;
+                       icache-size = <32768>;
+               };
+       };
+
+       memory {
+               device_type = "memory";
+               reg = <0x08000000 0x08000000>,
+                       <0x00000000 0x00000400>;
+       };
+
+       sopc0: sopc@0 {
+               device_type = "soc";
+               ranges;
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "altr,avalon", "simple-bus";
+               bus-frequency = <75000000>;
+
+               jtag_uart: serial@18001530 {
+                       compatible = "altr,juart-1.0";
+                       reg = <0x18001530 0x00000008>;
+                       interrupt-parent = <&cpu>;
+                       interrupts = <7>;
+               };
+
+               a_16550_uart_0: serial@18001600 {
+                       compatible = "altr,16550-FIFO32", "ns16550a";
+                       reg = <0x18001600 0x00000200>;
+                       interrupt-parent = <&cpu>;
+                       interrupts = <1>;
+                       auto-flow-control = <1>;
+                       clock-frequency = <50000000>;
+                       fifo-size = <32>;
+                       reg-io-width = <4>;
+                       reg-shift = <2>;
+               };
+
+               sysid: sysid@18001528 {
+                       compatible = "altr,sysid-1.0";
+                       reg = <0x18001528 0x00000008>;
+                       id = <4207856382>;
+                       timestamp = <1431309290>;
+               };
+
+               rgmii_0_eth_tse_0: ethernet@400 {
+                       compatible = "altr,tse-msgdma-1.0", "altr,tse-1.0";
+                       reg = <0x00000400 0x00000400>,
+                               <0x00000820 0x00000020>,
+                               <0x00000800 0x00000020>,
+                               <0x000008c0 0x00000008>,
+                               <0x00000840 0x00000020>,
+                               <0x00000860 0x00000020>;
+                       reg-names = "control_port", "rx_csr", "rx_desc", "rx_resp", "tx_csr", "tx_desc";
+                       interrupt-parent = <&cpu>;
+                       interrupts = <2 3>;
+                       interrupt-names = "rx_irq", "tx_irq";
+                       rx-fifo-depth = <8192>;
+                       tx-fifo-depth = <8192>;
+                       address-bits = <48>;
+                       max-frame-size = <1518>;
+                       local-mac-address = [00 00 00 00 00 00];
+                       altr,has-supplementary-unicast;
+                       altr,enable-sup-addr = <1>;
+                       altr,has-hash-multicast-filter;
+                       altr,enable-hash = <1>;
+                       phy-mode = "rgmii-id";
+                       phy-handle = <&phy0>;
+                       rgmii_0_eth_tse_0_mdio: mdio {
+                               compatible = "altr,tse-mdio";
+                               #address-cells = <1>;
+                               #size-cells = <0>;
+                               phy0: ethernet-phy@0 {
+                                       reg = <0>;
+                                       device_type = "ethernet-phy";
+                               };
+                       };
+               };
+
+               enet_pll: clock@0 {
+                       compatible = "altr,pll-1.0";
+                       #clock-cells = <1>;
+
+                       enet_pll_c0: enet_pll_c0 {
+                               compatible = "fixed-clock";
+                               #clock-cells = <0>;
+                               clock-frequency = <125000000>;
+                               clock-output-names = "enet_pll-c0";
+                       };
+
+                       enet_pll_c1: enet_pll_c1 {
+                               compatible = "fixed-clock";
+                               #clock-cells = <0>;
+                               clock-frequency = <25000000>;
+                               clock-output-names = "enet_pll-c1";
+                       };
+
+                       enet_pll_c2: enet_pll_c2 {
+                               compatible = "fixed-clock";
+                               #clock-cells = <0>;
+                               clock-frequency = <2500000>;
+                               clock-output-names = "enet_pll-c2";
+                       };
+               };
+
+               sys_pll: clock@1 {
+                       compatible = "altr,pll-1.0";
+                       #clock-cells = <1>;
+
+                       sys_pll_c0: sys_pll_c0 {
+                               compatible = "fixed-clock";
+                               #clock-cells = <0>;
+                               clock-frequency = <100000000>;
+                               clock-output-names = "sys_pll-c0";
+                       };
+
+                       sys_pll_c1: sys_pll_c1 {
+                               compatible = "fixed-clock";
+                               #clock-cells = <0>;
+                               clock-frequency = <50000000>;
+                               clock-output-names = "sys_pll-c1";
+                       };
+
+                       sys_pll_c2: sys_pll_c2 {
+                               compatible = "fixed-clock";
+                               #clock-cells = <0>;
+                               clock-frequency = <75000000>;
+                               clock-output-names = "sys_pll-c2";
+                       };
+               };
+
+               sys_clk_timer: timer@18001440 {
+                       compatible = "altr,timer-1.0";
+                       reg = <0x18001440 0x00000020>;
+                       interrupt-parent = <&cpu>;
+                       interrupts = <0>;
+                       clock-frequency = <75000000>;
+               };
+
+               led_pio: gpio@180014d0 {
+                       compatible = "altr,pio-1.0";
+                       reg = <0x180014d0 0x00000010>;
+                       altr,gpio-bank-width = <4>;
+                       resetvalue = <15>;
+                       #gpio-cells = <2>;
+                       gpio-controller;
+               };
+
+               button_pio: gpio@180014c0 {
+                       compatible = "altr,pio-1.0";
+                       reg = <0x180014c0 0x00000010>;
+                       interrupt-parent = <&cpu>;
+                       interrupts = <6>;
+                       altr,gpio-bank-width = <3>;
+                       altr,interrupt-type = <2>;
+                       edge_type = <1>;
+                       level_trigger = <0>;
+                       resetvalue = <0>;
+                       #gpio-cells = <2>;
+                       gpio-controller;
+               };
+
+               sys_clk_timer_1: timer@880 {
+                       compatible = "altr,timer-1.0";
+                       reg = <0x00000880 0x00000020>;
+                       interrupt-parent = <&cpu>;
+                       interrupts = <5>;
+                       clock-frequency = <75000000>;
+               };
+
+               fpga_leds: leds {
+                       compatible = "gpio-leds";
+
+                       led_fpga0: fpga0 {
+                               label = "fpga_led0";
+                               gpios = <&led_pio 0 1>;
+                       };
+
+                       led_fpga1: fpga1 {
+                               label = "fpga_led1";
+                               gpios = <&led_pio 1 1>;
+                       };
+
+                       led_fpga2: fpga2 {
+                               label = "fpga_led2";
+                               gpios = <&led_pio 2 1>;
+                       };
+
+                       led_fpga3: fpga3 {
+                               label = "fpga_led3";
+                               gpios = <&led_pio 3 1>;
+                       };
+               };
+       };
+
+       chosen {
+               bootargs = "debug console=ttyS0,115200";
+       };
+};
diff --git a/arch/nios2/configs/10m50_defconfig b/arch/nios2/configs/10m50_defconfig
new file mode 100755 (executable)
index 0000000..8b2a30b
--- /dev/null
@@ -0,0 +1,81 @@
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_SYSCTL_SYSCALL=y
+# CONFIG_ELF_CORE is not set
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_SHMEM is not set
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_NIOS2_MEM_BASE=0x8000000
+CONFIG_NIOS2_HW_MUL_SUPPORT=y
+CONFIG_NIOS2_HW_DIV_SUPPORT=y
+CONFIG_CUSTOM_CACHE_SETTINGS=y
+CONFIG_NIOS2_DCACHE_SIZE=0x8000
+CONFIG_NIOS2_ICACHE_SIZE=0x8000
+# CONFIG_NIOS2_CMDLINE_IGNORE_DTB is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_FW_LOADER is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_NETDEVICES=y
+CONFIG_ALTERA_TSE=y
+CONFIG_MARVELL_PHY=y
+# CONFIG_WLAN is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_ALTERA_JTAGUART=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_ALTERA=y
+# CONFIG_HWMON is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3_ACL=y
+CONFIG_ROOT_NFS=y
+CONFIG_SUNRPC_DEBUG=y
+CONFIG_DEBUG_INFO=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
index 4e5907a0cabe88d4a797b9cdbc0f0081392c62bd..23e0544e117cef65f8b771fdd2917ed8995c50e8 100644 (file)
@@ -32,8 +32,6 @@
 #define INST_STW       0x15
 #define INST_LDW       0x17
 
-static unsigned long ma_user, ma_kern, ma_skipped, ma_half, ma_word;
-
 static unsigned int ma_usermode;
 #define UM_WARN                0x01
 #define UM_FIXUP       0x02
@@ -53,7 +51,6 @@ static int reg_offsets[32];
 static inline u32 get_reg_val(struct pt_regs *fp, int reg)
 {
        u8 *p = ((u8 *)fp) + reg_offsets[reg];
-
        return *(u32 *)p;
 }
 
@@ -71,14 +68,13 @@ asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause)
        u32 isn, addr, val;
        int in_kernel;
        u8 a, b, d0, d1, d2, d3;
-       u16 imm16;
+       s16 imm16;
        unsigned int fault;
 
        /* back up one instruction */
        fp->ea -= 4;
 
        if (fixup_exception(fp)) {
-               ma_skipped++;
                return;
        }
 
@@ -103,18 +99,11 @@ asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause)
                        fault |= __get_user(d1, (u8 *)(addr+1));
                        val = (d1 << 8) | d0;
                        put_reg_val(fp, b, val);
-                       ma_half++;
                        break;
                case INST_STH:
                        val = get_reg_val(fp, b);
                        d1 = val >> 8;
                        d0 = val >> 0;
-
-                       pr_debug("sth: ra=%d (%08x) rb=%d (%08x), imm16 %04x addr %08x val %08x\n",
-                               a, get_reg_val(fp, a),
-                               b, get_reg_val(fp, b),
-                               imm16, addr, val);
-
                        if (in_kernel) {
                                *(u8 *)(addr+0) = d0;
                                *(u8 *)(addr+1) = d1;
@@ -122,14 +111,12 @@ asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause)
                                fault |= __put_user(d0, (u8 *)(addr+0));
                                fault |= __put_user(d1, (u8 *)(addr+1));
                        }
-                       ma_half++;
                        break;
                case INST_LDH:
                        fault |= __get_user(d0, (u8 *)(addr+0));
                        fault |= __get_user(d1, (u8 *)(addr+1));
                        val = (short)((d1 << 8) | d0);
                        put_reg_val(fp, b, val);
-                       ma_half++;
                        break;
                case INST_STW:
                        val = get_reg_val(fp, b);
@@ -148,7 +135,6 @@ asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause)
                                fault |= __put_user(d2, (u8 *)(addr+2));
                                fault |= __put_user(d3, (u8 *)(addr+3));
                        }
-                       ma_word++;
                        break;
                case INST_LDW:
                        fault |= __get_user(d0, (u8 *)(addr+0));
@@ -157,7 +143,6 @@ asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause)
                        fault |= __get_user(d3, (u8 *)(addr+3));
                        val = (d3 << 24) | (d2 << 16) | (d1 << 8) | d0;
                        put_reg_val(fp, b, val);
-                       ma_word++;
                        break;
                }
        }
@@ -186,7 +171,6 @@ asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause)
         *  note exception and skip bad instruction (return)
         */
        if (in_kernel) {
-               ma_kern++;
                fp->ea += 4;
 
                if (ma_usermode & KM_WARN) {
@@ -200,8 +184,6 @@ asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause)
                return;
        }
 
-       ma_user++;
-
        /*
         * user mode -
         *  possibly warn,
index 9e3cc8a40ee9feb164842780ef885426681c9466..bbc3f9157f9c4fec3418e8a92c20f204924e0c5b 100644 (file)
@@ -130,7 +130,7 @@ static void nios2_timer_stop(struct nios2_timer *timer)
 }
 
 static void nios2_timer_config(struct nios2_timer *timer, unsigned long period,
-       enum clock_event_mode mode)
+                              bool periodic)
 {
        u16 ctrl;
 
@@ -148,7 +148,7 @@ static void nios2_timer_config(struct nios2_timer *timer, unsigned long period,
        timer_writew(timer, period >> 16, ALTERA_TIMER_PERIODH_REG);
 
        ctrl |= ALTERA_TIMER_CONTROL_START_MSK | ALTERA_TIMER_CONTROL_ITO_MSK;
-       if (mode == CLOCK_EVT_MODE_PERIODIC)
+       if (periodic)
                ctrl |= ALTERA_TIMER_CONTROL_CONT_MSK;
        else
                ctrl &= ~ALTERA_TIMER_CONTROL_CONT_MSK;
@@ -160,32 +160,38 @@ static int nios2_timer_set_next_event(unsigned long delta,
 {
        struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
 
-       nios2_timer_config(&nios2_ced->timer, delta, evt->mode);
+       nios2_timer_config(&nios2_ced->timer, delta, false);
 
        return 0;
 }
 
-static void nios2_timer_set_mode(enum clock_event_mode mode,
-       struct clock_event_device *evt)
+static int nios2_timer_shutdown(struct clock_event_device *evt)
+{
+       struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
+       struct nios2_timer *timer = &nios2_ced->timer;
+
+       nios2_timer_stop(timer);
+       return 0;
+}
+
+static int nios2_timer_set_periodic(struct clock_event_device *evt)
 {
        unsigned long period;
        struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
        struct nios2_timer *timer = &nios2_ced->timer;
 
-       switch (mode) {
-       case CLOCK_EVT_MODE_PERIODIC:
-               period = DIV_ROUND_UP(timer->freq, HZ);
-               nios2_timer_config(timer, period, CLOCK_EVT_MODE_PERIODIC);
-               break;
-       case CLOCK_EVT_MODE_ONESHOT:
-       case CLOCK_EVT_MODE_UNUSED:
-       case CLOCK_EVT_MODE_SHUTDOWN:
-               nios2_timer_stop(timer);
-               break;
-       case CLOCK_EVT_MODE_RESUME:
-               nios2_timer_start(timer);
-               break;
-       }
+       period = DIV_ROUND_UP(timer->freq, HZ);
+       nios2_timer_config(timer, period, true);
+       return 0;
+}
+
+static int nios2_timer_resume(struct clock_event_device *evt)
+{
+       struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
+       struct nios2_timer *timer = &nios2_ced->timer;
+
+       nios2_timer_start(timer);
+       return 0;
 }
 
 irqreturn_t timer_interrupt(int irq, void *dev_id)
@@ -218,7 +224,10 @@ static struct nios2_clockevent_dev nios2_ce = {
                .rating = 250,
                .shift = 32,
                .set_next_event = nios2_timer_set_next_event,
-               .set_mode = nios2_timer_set_mode,
+               .set_state_shutdown = nios2_timer_shutdown,
+               .set_state_periodic = nios2_timer_set_periodic,
+               .set_state_oneshot = nios2_timer_shutdown,
+               .tick_resume = nios2_timer_resume,
        },
 };
 
index b91e74a817d89742355e547c61b9ce36a243f11b..9fac01cb89c14df3fd4ae8bb51ef51acb56ec54b 100644 (file)
@@ -158,6 +158,7 @@ extern pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
                        bool *writable);
 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
                        unsigned long *rmap, long pte_index, int realmode);
+extern void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize);
 extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
                        unsigned long pte_index);
 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
@@ -225,12 +226,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
        return vcpu->arch.cr;
 }
 
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
 {
        vcpu->arch.xer = val;
 }
 
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.xer;
 }
index 5bdfb5dd34002348578b8b9c0a6164a04b3d4820..72b6225aca73d9aa4ec47aa3087cb90b623042be 100644 (file)
 #define XICS_MFRR              0xc
 #define XICS_IPI               2       /* interrupt source # for IPIs */
 
+/* Maximum number of threads per physical core */
+#define MAX_SMT_THREADS                8
+
+/* Maximum number of subcores per physical core */
+#define MAX_SUBCORES           4
+
 #ifdef __ASSEMBLY__
 
 #ifdef CONFIG_KVM_BOOK3S_HANDLER
@@ -65,6 +71,19 @@ kvmppc_resume_\intno:
 
 #else  /*__ASSEMBLY__ */
 
+struct kvmppc_vcore;
+
+/* Struct used for coordinating micro-threading (split-core) mode changes */
+struct kvm_split_mode {
+       unsigned long   rpr;
+       unsigned long   pmmar;
+       unsigned long   ldbar;
+       u8              subcore_size;
+       u8              do_nap;
+       u8              napped[MAX_SMT_THREADS];
+       struct kvmppc_vcore *master_vcs[MAX_SUBCORES];
+};
+
 /*
  * This struct goes in the PACA on 64-bit processors.  It is used
  * to store host state that needs to be saved when we enter a guest
@@ -100,6 +119,7 @@ struct kvmppc_host_state {
        u64 host_spurr;
        u64 host_dscr;
        u64 dec_expires;
+       struct kvm_split_mode *kvm_split_mode;
 #endif
 #ifdef CONFIG_PPC_BOOK3S_64
        u64 cfar;
@@ -112,7 +132,7 @@ struct kvmppc_book3s_shadow_vcpu {
        bool in_use;
        ulong gpr[14];
        u32 cr;
-       u32 xer;
+       ulong xer;
        ulong ctr;
        ulong lr;
        ulong pc;
index 3286f0d6a86c1d7f614989dc676d6ab47735e6d7..bc6e29e4dfd4a125406a5736b995b8fa8500052e 100644 (file)
@@ -54,12 +54,12 @@ static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
        return vcpu->arch.cr;
 }
 
-static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
+static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
 {
        vcpu->arch.xer = val;
 }
 
-static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
+static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
 {
        return vcpu->arch.xer;
 }
index d91f65b28e322808d2d05c260ffa13614c01333b..98eebbf663405c59ebcc3174d8619773f9813ac1 100644 (file)
@@ -205,8 +205,10 @@ struct revmap_entry {
  */
 #define KVMPPC_RMAP_LOCK_BIT   63
 #define KVMPPC_RMAP_RC_SHIFT   32
+#define KVMPPC_RMAP_CHG_SHIFT  48
 #define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
 #define KVMPPC_RMAP_CHANGED    (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT)
+#define KVMPPC_RMAP_CHG_ORDER  (0x3ful << KVMPPC_RMAP_CHG_SHIFT)
 #define KVMPPC_RMAP_PRESENT    0x100000000ul
 #define KVMPPC_RMAP_INDEX      0xfffffffful
 
@@ -278,7 +280,9 @@ struct kvmppc_vcore {
        u16 last_cpu;
        u8 vcore_state;
        u8 in_guest;
+       struct kvmppc_vcore *master_vcore;
        struct list_head runnable_threads;
+       struct list_head preempt_list;
        spinlock_t lock;
        wait_queue_head_t wq;
        spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */
@@ -300,12 +304,21 @@ struct kvmppc_vcore {
 #define VCORE_EXIT_MAP(vc)     ((vc)->entry_exit_map >> 8)
 #define VCORE_IS_EXITING(vc)   (VCORE_EXIT_MAP(vc) != 0)
 
-/* Values for vcore_state */
+/* This bit is used when a vcore exit is triggered from outside the vcore */
+#define VCORE_EXIT_REQ         0x10000
+
+/*
+ * Values for vcore_state.
+ * Note that these are arranged such that lower values
+ * (< VCORE_SLEEPING) don't require stolen time accounting
+ * on load/unload, and higher values do.
+ */
 #define VCORE_INACTIVE 0
-#define VCORE_SLEEPING 1
-#define VCORE_PREEMPT  2
-#define VCORE_RUNNING  3
-#define VCORE_EXITING  4
+#define VCORE_PREEMPT  1
+#define VCORE_PIGGYBACK        2
+#define VCORE_SLEEPING 3
+#define VCORE_RUNNING  4
+#define VCORE_EXITING  5
 
 /*
  * Struct used to manage memory for a virtual processor area
@@ -473,7 +486,7 @@ struct kvm_vcpu_arch {
        ulong ciabr;
        ulong cfar;
        ulong ppr;
-       ulong pspb;
+       u32 pspb;
        ulong fscr;
        ulong shadow_fscr;
        ulong ebbhr;
@@ -619,6 +632,7 @@ struct kvm_vcpu_arch {
        int trap;
        int state;
        int ptid;
+       int thread_cpu;
        bool timer_running;
        wait_queue_head_t cpu_run;
 
index 8452335661a5d5fd896eba78c4e43b40a4532291..790f5d1d9a4624d6f17bdde78706be7240d29ad2 100644 (file)
 
 /* POWER8 Micro Partition Prefetch (MPP) parameters */
 /* Address mask is common for LOGMPP instruction and MPPR SPR */
-#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000
+#define PPC_MPPE_ADDRESS_MASK 0xffffffffc000ULL
 
 /* Bits 60 and 61 of MPP SPR should be set to one of the following */
 /* Aborting the fetch is indeed setting 00 in the table size bits */
index 810f433731dcac24809dea42f1973c5a967bcae9..221d584d089f9418abfa1087cffd9d0b0117c090 100644 (file)
@@ -511,6 +511,8 @@ int main(void)
        DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
        DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
        DEFINE(VCPU_HEIR, offsetof(struct kvm_vcpu, arch.emul_inst));
+       DEFINE(VCPU_CPU, offsetof(struct kvm_vcpu, cpu));
+       DEFINE(VCPU_THREAD_CPU, offsetof(struct kvm_vcpu, arch.thread_cpu));
 #endif
 #ifdef CONFIG_PPC_BOOK3S
        DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
@@ -673,7 +675,14 @@ int main(void)
        HSTATE_FIELD(HSTATE_DSCR, host_dscr);
        HSTATE_FIELD(HSTATE_DABR, dabr);
        HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
+       HSTATE_FIELD(HSTATE_SPLIT_MODE, kvm_split_mode);
        DEFINE(IPI_PRIORITY, IPI_PRIORITY);
+       DEFINE(KVM_SPLIT_RPR, offsetof(struct kvm_split_mode, rpr));
+       DEFINE(KVM_SPLIT_PMMAR, offsetof(struct kvm_split_mode, pmmar));
+       DEFINE(KVM_SPLIT_LDBAR, offsetof(struct kvm_split_mode, ldbar));
+       DEFINE(KVM_SPLIT_SIZE, offsetof(struct kvm_split_mode, subcore_size));
+       DEFINE(KVM_SPLIT_DO_NAP, offsetof(struct kvm_split_mode, do_nap));
+       DEFINE(KVM_SPLIT_NAPPED, offsetof(struct kvm_split_mode, napped));
 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 
 #ifdef CONFIG_PPC_BOOK3S_64
index 3caec2c421058b0065e9d0cfd382e23e9d7102a6..c2024ac9d4e859963c1c15408824b98baa3e92c3 100644 (file)
@@ -74,14 +74,14 @@ config KVM_BOOK3S_64
          If unsure, say N.
 
 config KVM_BOOK3S_64_HV
-       tristate "KVM support for POWER7 and PPC970 using hypervisor mode in host"
+       tristate "KVM for POWER7 and later using hypervisor mode in host"
        depends on KVM_BOOK3S_64 && PPC_POWERNV
        select KVM_BOOK3S_HV_POSSIBLE
        select MMU_NOTIFIER
        select CMA
        ---help---
          Support running unmodified book3s_64 guest kernels in
-         virtual machines on POWER7 and PPC970 processors that have
+         virtual machines on POWER7 and newer processors that have
          hypervisor mode available to the host.
 
          If you say Y here, KVM will use the hardware virtualization
@@ -89,8 +89,8 @@ config KVM_BOOK3S_64_HV
          guest operating systems will run at full hardware speed
          using supervisor and user modes.  However, this also means
          that KVM is not usable under PowerVM (pHyp), is only usable
-         on POWER7 (or later) processors and PPC970-family processors,
-         and cannot emulate a different processor from the host processor.
+         on POWER7 or later processors, and cannot emulate a
+         different processor from the host processor.
 
          If unsure, say N.
 
index 6d6398f4d632d0c4e5e9d2439ce7e8df4f46767f..d75bf325f54a17ebf4e19dda7a85eed271e4f3ed 100644 (file)
@@ -240,7 +240,8 @@ void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
        kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
 }
 
-int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
+static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
+                                        unsigned int priority)
 {
        int deliver = 1;
        int vec = 0;
index 2035d16a9262ac5e547bacc0cbcb4fc862706aa5..d5c9bfeb0c9c7e926aa5a40761959ce5e51f90eb 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/machdep.h>
 #include <asm/mmu_context.h>
 #include <asm/hw_irq.h>
+#include "book3s.h"
 
 /* #define DEBUG_MMU */
 /* #define DEBUG_SR */
index b982d925c7105f910003ed3e53b5a595558f67ef..79ad35abd1967c0ea68c900f9b5bb83d25a488a6 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/mmu_context.h>
 #include <asm/hw_irq.h>
 #include "trace_pr.h"
+#include "book3s.h"
 
 #define PTE_SIZE 12
 
index dab68b7af3f280225288c68f8d09eee753d85824..1f9c0a17f445f73b858dcfabb761aa098a6fd781 100644 (file)
@@ -761,6 +761,8 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
                        /* Harvest R and C */
                        rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C);
                        *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
+                       if (rcbits & HPTE_R_C)
+                               kvmppc_update_rmap_change(rmapp, psize);
                        if (rcbits & ~rev[i].guest_rpte) {
                                rev[i].guest_rpte = ptel | rcbits;
                                note_hpte_modification(kvm, &rev[i]);
@@ -927,8 +929,12 @@ static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
  retry:
        lock_rmap(rmapp);
        if (*rmapp & KVMPPC_RMAP_CHANGED) {
-               *rmapp &= ~KVMPPC_RMAP_CHANGED;
+               long change_order = (*rmapp & KVMPPC_RMAP_CHG_ORDER)
+                       >> KVMPPC_RMAP_CHG_SHIFT;
+               *rmapp &= ~(KVMPPC_RMAP_CHANGED | KVMPPC_RMAP_CHG_ORDER);
                npages_dirty = 1;
+               if (change_order > PAGE_SHIFT)
+                       npages_dirty = 1ul << (change_order - PAGE_SHIFT);
        }
        if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
                unlock_rmap(rmapp);
index 5a2bc4b0dfe5a9b83c4d5be3b820bda90e57a641..2afdb9c0937dbd3b7471b88d75e17ca714bba1ae 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/reg.h>
 #include <asm/switch_to.h>
 #include <asm/time.h>
+#include "book3s.h"
 
 #define OP_19_XOP_RFID         18
 #define OP_19_XOP_RFI          50
index a9f753fb73a816143ec4a013d9424b2acf6dbee4..9754e6815e521c80d6b8cadfe9123ab24b97c014 100644 (file)
@@ -81,6 +81,12 @@ static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
 #define MPP_BUFFER_ORDER       3
 #endif
 
+static int dynamic_mt_modes = 6;
+module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
+static int target_smt_mode;
+module_param(target_smt_mode, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
 
 static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
@@ -114,7 +120,7 @@ static bool kvmppc_ipi_thread(int cpu)
 
 static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
 {
-       int cpu = vcpu->cpu;
+       int cpu;
        wait_queue_head_t *wqp;
 
        wqp = kvm_arch_vcpu_wq(vcpu);
@@ -123,10 +129,11 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
                ++vcpu->stat.halt_wakeup;
        }
 
-       if (kvmppc_ipi_thread(cpu + vcpu->arch.ptid))
+       if (kvmppc_ipi_thread(vcpu->arch.thread_cpu))
                return;
 
        /* CPU points to the first thread of the core */
+       cpu = vcpu->cpu;
        if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
                smp_send_reschedule(cpu);
 }
@@ -164,6 +171,27 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
  * they should never fail.)
  */
 
+static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&vc->stoltb_lock, flags);
+       vc->preempt_tb = mftb();
+       spin_unlock_irqrestore(&vc->stoltb_lock, flags);
+}
+
+static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&vc->stoltb_lock, flags);
+       if (vc->preempt_tb != TB_NIL) {
+               vc->stolen_tb += mftb() - vc->preempt_tb;
+               vc->preempt_tb = TB_NIL;
+       }
+       spin_unlock_irqrestore(&vc->stoltb_lock, flags);
+}
+
 static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
 {
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
@@ -175,14 +203,9 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
         * vcpu, and once it is set to this vcpu, only this task
         * ever sets it to NULL.
         */
-       if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
-               spin_lock_irqsave(&vc->stoltb_lock, flags);
-               if (vc->preempt_tb != TB_NIL) {
-                       vc->stolen_tb += mftb() - vc->preempt_tb;
-                       vc->preempt_tb = TB_NIL;
-               }
-               spin_unlock_irqrestore(&vc->stoltb_lock, flags);
-       }
+       if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
+               kvmppc_core_end_stolen(vc);
+
        spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
        if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
            vcpu->arch.busy_preempt != TB_NIL) {
@@ -197,11 +220,9 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
        unsigned long flags;
 
-       if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) {
-               spin_lock_irqsave(&vc->stoltb_lock, flags);
-               vc->preempt_tb = mftb();
-               spin_unlock_irqrestore(&vc->stoltb_lock, flags);
-       }
+       if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
+               kvmppc_core_start_stolen(vc);
+
        spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
        if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
                vcpu->arch.busy_preempt = mftb();
@@ -214,12 +235,12 @@ static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
        kvmppc_end_cede(vcpu);
 }
 
-void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
+static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
 {
        vcpu->arch.pvr = pvr;
 }
 
-int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
+static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
 {
        unsigned long pcr = 0;
        struct kvmppc_vcore *vc = vcpu->arch.vcore;
@@ -259,7 +280,7 @@ int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
        return 0;
 }
 
-void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
+static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
 {
        int r;
 
@@ -292,7 +313,7 @@ void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
               vcpu->arch.last_inst);
 }
 
-struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
+static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
 {
        int r;
        struct kvm_vcpu *v, *ret = NULL;
@@ -641,7 +662,8 @@ static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
 
        spin_lock(&vcore->lock);
        if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
-           vcore->vcore_state != VCORE_INACTIVE)
+           vcore->vcore_state != VCORE_INACTIVE &&
+           vcore->runner)
                target = vcore->runner;
        spin_unlock(&vcore->lock);
 
@@ -1431,6 +1453,7 @@ static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
        vcore->lpcr = kvm->arch.lpcr;
        vcore->first_vcpuid = core * threads_per_subcore;
        vcore->kvm = kvm;
+       INIT_LIST_HEAD(&vcore->preempt_list);
 
        vcore->mpp_buffer_is_valid = false;
 
@@ -1655,6 +1678,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
        spin_unlock(&vcore->lock);
        vcpu->arch.vcore = vcore;
        vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
+       vcpu->arch.thread_cpu = -1;
 
        vcpu->arch.cpu_type = KVM_CPU_3S_64;
        kvmppc_sanity_check(vcpu);
@@ -1749,6 +1773,7 @@ static int kvmppc_grab_hwthread(int cpu)
 
        /* Ensure the thread won't go into the kernel if it wakes */
        tpaca->kvm_hstate.kvm_vcpu = NULL;
+       tpaca->kvm_hstate.kvm_vcore = NULL;
        tpaca->kvm_hstate.napping = 0;
        smp_wmb();
        tpaca->kvm_hstate.hwthread_req = 1;
@@ -1780,26 +1805,32 @@ static void kvmppc_release_hwthread(int cpu)
        tpaca = &paca[cpu];
        tpaca->kvm_hstate.hwthread_req = 0;
        tpaca->kvm_hstate.kvm_vcpu = NULL;
+       tpaca->kvm_hstate.kvm_vcore = NULL;
+       tpaca->kvm_hstate.kvm_split_mode = NULL;
 }
 
-static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
+static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
 {
        int cpu;
        struct paca_struct *tpaca;
-       struct kvmppc_vcore *vc = vcpu->arch.vcore;
+       struct kvmppc_vcore *mvc = vc->master_vcore;
 
-       if (vcpu->arch.timer_running) {
-               hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
-               vcpu->arch.timer_running = 0;
+       cpu = vc->pcpu;
+       if (vcpu) {
+               if (vcpu->arch.timer_running) {
+                       hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
+                       vcpu->arch.timer_running = 0;
+               }
+               cpu += vcpu->arch.ptid;
+               vcpu->cpu = mvc->pcpu;
+               vcpu->arch.thread_cpu = cpu;
        }
-       cpu = vc->pcpu + vcpu->arch.ptid;
        tpaca = &paca[cpu];
-       tpaca->kvm_hstate.kvm_vcore = vc;
-       tpaca->kvm_hstate.ptid = vcpu->arch.ptid;
-       vcpu->cpu = vc->pcpu;
-       /* Order stores to hstate.kvm_vcore etc. before store to kvm_vcpu */
-       smp_wmb();
        tpaca->kvm_hstate.kvm_vcpu = vcpu;
+       tpaca->kvm_hstate.ptid = cpu - mvc->pcpu;
+       /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
+       smp_wmb();
+       tpaca->kvm_hstate.kvm_vcore = mvc;
        if (cpu != smp_processor_id())
                kvmppc_ipi_thread(cpu);
 }
@@ -1812,12 +1843,12 @@ static void kvmppc_wait_for_nap(void)
        for (loops = 0; loops < 1000000; ++loops) {
                /*
                 * Check if all threads are finished.
-                * We set the vcpu pointer when starting a thread
+                * We set the vcore pointer when starting a thread
                 * and the thread clears it when finished, so we look
-                * for any threads that still have a non-NULL vcpu ptr.
+                * for any threads that still have a non-NULL vcore ptr.
                 */
                for (i = 1; i < threads_per_subcore; ++i)
-                       if (paca[cpu + i].kvm_hstate.kvm_vcpu)
+                       if (paca[cpu + i].kvm_hstate.kvm_vcore)
                                break;
                if (i == threads_per_subcore) {
                        HMT_medium();
@@ -1827,7 +1858,7 @@ static void kvmppc_wait_for_nap(void)
        }
        HMT_medium();
        for (i = 1; i < threads_per_subcore; ++i)
-               if (paca[cpu + i].kvm_hstate.kvm_vcpu)
+               if (paca[cpu + i].kvm_hstate.kvm_vcore)
                        pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
 }
 
@@ -1890,6 +1921,278 @@ static void kvmppc_start_restoring_l2_cache(const struct kvmppc_vcore *vc)
        mtspr(SPRN_MPPR, mpp_addr | PPC_MPPR_FETCH_WHOLE_TABLE);
 }
 
+/*
+ * A list of virtual cores for each physical CPU.
+ * These are vcores that could run but their runner VCPU tasks are
+ * (or may be) preempted.
+ */
+struct preempted_vcore_list {
+       struct list_head        list;
+       spinlock_t              lock;
+};
+
+static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
+
+static void init_vcore_lists(void)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
+               spin_lock_init(&lp->lock);
+               INIT_LIST_HEAD(&lp->list);
+       }
+}
+
+static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
+{
+       struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
+
+       vc->vcore_state = VCORE_PREEMPT;
+       vc->pcpu = smp_processor_id();
+       if (vc->num_threads < threads_per_subcore) {
+               spin_lock(&lp->lock);
+               list_add_tail(&vc->preempt_list, &lp->list);
+               spin_unlock(&lp->lock);
+       }
+
+       /* Start accumulating stolen time */
+       kvmppc_core_start_stolen(vc);
+}
+
+static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
+{
+       struct preempted_vcore_list *lp;
+
+       kvmppc_core_end_stolen(vc);
+       if (!list_empty(&vc->preempt_list)) {
+               lp = &per_cpu(preempted_vcores, vc->pcpu);
+               spin_lock(&lp->lock);
+               list_del_init(&vc->preempt_list);
+               spin_unlock(&lp->lock);
+       }
+       vc->vcore_state = VCORE_INACTIVE;
+}
+
+/*
+ * This stores information about the virtual cores currently
+ * assigned to a physical core.
+ */
+struct core_info {
+       int             n_subcores;
+       int             max_subcore_threads;
+       int             total_threads;
+       int             subcore_threads[MAX_SUBCORES];
+       struct kvm      *subcore_vm[MAX_SUBCORES];
+       struct list_head vcs[MAX_SUBCORES];
+};
+
+/*
+ * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
+ * respectively in 2-way micro-threading (split-core) mode.
+ */
+static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
+
+static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
+{
+       int sub;
+
+       memset(cip, 0, sizeof(*cip));
+       cip->n_subcores = 1;
+       cip->max_subcore_threads = vc->num_threads;
+       cip->total_threads = vc->num_threads;
+       cip->subcore_threads[0] = vc->num_threads;
+       cip->subcore_vm[0] = vc->kvm;
+       for (sub = 0; sub < MAX_SUBCORES; ++sub)
+               INIT_LIST_HEAD(&cip->vcs[sub]);
+       list_add_tail(&vc->preempt_list, &cip->vcs[0]);
+}
+
+static bool subcore_config_ok(int n_subcores, int n_threads)
+{
+       /* Can only dynamically split if unsplit to begin with */
+       if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
+               return false;
+       if (n_subcores > MAX_SUBCORES)
+               return false;
+       if (n_subcores > 1) {
+               if (!(dynamic_mt_modes & 2))
+                       n_subcores = 4;
+               if (n_subcores > 2 && !(dynamic_mt_modes & 4))
+                       return false;
+       }
+
+       return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
+}
+
+static void init_master_vcore(struct kvmppc_vcore *vc)
+{
+       vc->master_vcore = vc;
+       vc->entry_exit_map = 0;
+       vc->in_guest = 0;
+       vc->napping_threads = 0;
+       vc->conferring_threads = 0;
+}
+
+/*
+ * See if the existing subcores can be split into 3 (or fewer) subcores
+ * of at most two threads each, so we can fit in another vcore.  This
+ * assumes there are at most two subcores and at most 6 threads in total.
+ */
+static bool can_split_piggybacked_subcores(struct core_info *cip)
+{
+       int sub, new_sub;
+       int large_sub = -1;
+       int thr;
+       int n_subcores = cip->n_subcores;
+       struct kvmppc_vcore *vc, *vcnext;
+       struct kvmppc_vcore *master_vc = NULL;
+
+       for (sub = 0; sub < cip->n_subcores; ++sub) {
+               if (cip->subcore_threads[sub] <= 2)
+                       continue;
+               if (large_sub >= 0)
+                       return false;
+               large_sub = sub;
+               vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore,
+                                     preempt_list);
+               if (vc->num_threads > 2)
+                       return false;
+               n_subcores += (cip->subcore_threads[sub] - 1) >> 1;
+       }
+       if (n_subcores > 3 || large_sub < 0)
+               return false;
+
+       /*
+        * Seems feasible, so go through and move vcores to new subcores.
+        * Note that when we have two or more vcores in one subcore,
+        * all those vcores must have only one thread each.
+        */
+       new_sub = cip->n_subcores;
+       thr = 0;
+       sub = large_sub;
+       list_for_each_entry_safe(vc, vcnext, &cip->vcs[sub], preempt_list) {
+               if (thr >= 2) {
+                       list_del(&vc->preempt_list);
+                       list_add_tail(&vc->preempt_list, &cip->vcs[new_sub]);
+                       /* vc->num_threads must be 1 */
+                       if (++cip->subcore_threads[new_sub] == 1) {
+                               cip->subcore_vm[new_sub] = vc->kvm;
+                               init_master_vcore(vc);
+                               master_vc = vc;
+                               ++cip->n_subcores;
+                       } else {
+                               vc->master_vcore = master_vc;
+                               ++new_sub;
+                       }
+               }
+               thr += vc->num_threads;
+       }
+       cip->subcore_threads[large_sub] = 2;
+       cip->max_subcore_threads = 2;
+
+       return true;
+}
+
+static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
+{
+       int n_threads = vc->num_threads;
+       int sub;
+
+       if (!cpu_has_feature(CPU_FTR_ARCH_207S))
+               return false;
+
+       if (n_threads < cip->max_subcore_threads)
+               n_threads = cip->max_subcore_threads;
+       if (subcore_config_ok(cip->n_subcores + 1, n_threads)) {
+               cip->max_subcore_threads = n_threads;
+       } else if (cip->n_subcores <= 2 && cip->total_threads <= 6 &&
+                  vc->num_threads <= 2) {
+               /*
+                * We may be able to fit another subcore in by
+                * splitting an existing subcore with 3 or 4
+                * threads into two 2-thread subcores, or one
+                * with 5 or 6 threads into three subcores.
+                * We can only do this if those subcores have
+                * piggybacked virtual cores.
+                */
+               if (!can_split_piggybacked_subcores(cip))
+                       return false;
+       } else {
+               return false;
+       }
+
+       sub = cip->n_subcores;
+       ++cip->n_subcores;
+       cip->total_threads += vc->num_threads;
+       cip->subcore_threads[sub] = vc->num_threads;
+       cip->subcore_vm[sub] = vc->kvm;
+       init_master_vcore(vc);
+       list_del(&vc->preempt_list);
+       list_add_tail(&vc->preempt_list, &cip->vcs[sub]);
+
+       return true;
+}
+
+static bool can_piggyback_subcore(struct kvmppc_vcore *pvc,
+                                 struct core_info *cip, int sub)
+{
+       struct kvmppc_vcore *vc;
+       int n_thr;
+
+       vc = list_first_entry(&cip->vcs[sub], struct kvmppc_vcore,
+                             preempt_list);
+
+       /* require same VM and same per-core reg values */
+       if (pvc->kvm != vc->kvm ||
+           pvc->tb_offset != vc->tb_offset ||
+           pvc->pcr != vc->pcr ||
+           pvc->lpcr != vc->lpcr)
+               return false;
+
+       /* P8 guest with > 1 thread per core would see wrong TIR value */
+       if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
+           (vc->num_threads > 1 || pvc->num_threads > 1))
+               return false;
+
+       n_thr = cip->subcore_threads[sub] + pvc->num_threads;
+       if (n_thr > cip->max_subcore_threads) {
+               if (!subcore_config_ok(cip->n_subcores, n_thr))
+                       return false;
+               cip->max_subcore_threads = n_thr;
+       }
+
+       cip->total_threads += pvc->num_threads;
+       cip->subcore_threads[sub] = n_thr;
+       pvc->master_vcore = vc;
+       list_del(&pvc->preempt_list);
+       list_add_tail(&pvc->preempt_list, &cip->vcs[sub]);
+
+       return true;
+}
+
+/*
+ * Work out whether it is possible to piggyback the execution of
+ * vcore *pvc onto the execution of the other vcores described in *cip.
+ */
+static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
+                         int target_threads)
+{
+       int sub;
+
+       if (cip->total_threads + pvc->num_threads > target_threads)
+               return false;
+       for (sub = 0; sub < cip->n_subcores; ++sub)
+               if (cip->subcore_threads[sub] &&
+                   can_piggyback_subcore(pvc, cip, sub))
+                       return true;
+
+       if (can_dynamic_split(pvc, cip))
+               return true;
+
+       return false;
+}
+
 static void prepare_threads(struct kvmppc_vcore *vc)
 {
        struct kvm_vcpu *vcpu, *vnext;
@@ -1909,12 +2212,45 @@ static void prepare_threads(struct kvmppc_vcore *vc)
        }
 }
 
-static void post_guest_process(struct kvmppc_vcore *vc)
+static void collect_piggybacks(struct core_info *cip, int target_threads)
+{
+       struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
+       struct kvmppc_vcore *pvc, *vcnext;
+
+       spin_lock(&lp->lock);
+       list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
+               if (!spin_trylock(&pvc->lock))
+                       continue;
+               prepare_threads(pvc);
+               if (!pvc->n_runnable) {
+                       list_del_init(&pvc->preempt_list);
+                       if (pvc->runner == NULL) {
+                               pvc->vcore_state = VCORE_INACTIVE;
+                               kvmppc_core_end_stolen(pvc);
+                       }
+                       spin_unlock(&pvc->lock);
+                       continue;
+               }
+               if (!can_piggyback(pvc, cip, target_threads)) {
+                       spin_unlock(&pvc->lock);
+                       continue;
+               }
+               kvmppc_core_end_stolen(pvc);
+               pvc->vcore_state = VCORE_PIGGYBACK;
+               if (cip->total_threads >= target_threads)
+                       break;
+       }
+       spin_unlock(&lp->lock);
+}
+
+static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
 {
+       int still_running = 0;
        u64 now;
        long ret;
        struct kvm_vcpu *vcpu, *vnext;
 
+       spin_lock(&vc->lock);
        now = get_tb();
        list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
                                 arch.run_list) {
@@ -1933,17 +2269,36 @@ static void post_guest_process(struct kvmppc_vcore *vc)
                vcpu->arch.ret = ret;
                vcpu->arch.trap = 0;
 
-               if (vcpu->arch.ceded) {
-                       if (!is_kvmppc_resume_guest(ret))
-                               kvmppc_end_cede(vcpu);
-                       else
+               if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
+                       if (vcpu->arch.pending_exceptions)
+                               kvmppc_core_prepare_to_enter(vcpu);
+                       if (vcpu->arch.ceded)
                                kvmppc_set_timer(vcpu);
-               }
-               if (!is_kvmppc_resume_guest(vcpu->arch.ret)) {
+                       else
+                               ++still_running;
+               } else {
                        kvmppc_remove_runnable(vc, vcpu);
                        wake_up(&vcpu->arch.cpu_run);
                }
        }
+       list_del_init(&vc->preempt_list);
+       if (!is_master) {
+               if (still_running > 0) {
+                       kvmppc_vcore_preempt(vc);
+               } else if (vc->runner) {
+                       vc->vcore_state = VCORE_PREEMPT;
+                       kvmppc_core_start_stolen(vc);
+               } else {
+                       vc->vcore_state = VCORE_INACTIVE;
+               }
+               if (vc->n_runnable > 0 && vc->runner == NULL) {
+                       /* make sure there's a candidate runner awake */
+                       vcpu = list_first_entry(&vc->runnable_threads,
+                                               struct kvm_vcpu, arch.run_list);
+                       wake_up(&vcpu->arch.cpu_run);
+               }
+       }
+       spin_unlock(&vc->lock);
 }
 
 /*
@@ -1955,6 +2310,15 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        struct kvm_vcpu *vcpu, *vnext;
        int i;
        int srcu_idx;
+       struct core_info core_info;
+       struct kvmppc_vcore *pvc, *vcnext;
+       struct kvm_split_mode split_info, *sip;
+       int split, subcore_size, active;
+       int sub;
+       bool thr0_done;
+       unsigned long cmd_bit, stat_bit;
+       int pcpu, thr;
+       int target_threads;
 
        /*
         * Remove from the list any threads that have a signal pending
@@ -1969,11 +2333,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
        /*
         * Initialize *vc.
         */
-       vc->entry_exit_map = 0;
+       init_master_vcore(vc);
        vc->preempt_tb = TB_NIL;
-       vc->in_guest = 0;
-       vc->napping_threads = 0;
-       vc->conferring_threads = 0;
 
        /*
         * Make sure we are running on primary threads, and that secondary
@@ -1991,24 +2352,120 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
                goto out;
        }
 
+       /*
+        * See if we could run any other vcores on the physical core
+        * along with this one.
+        */
+       init_core_info(&core_info, vc);
+       pcpu = smp_processor_id();
+       target_threads = threads_per_subcore;
+       if (target_smt_mode && target_smt_mode < target_threads)
+               target_threads = target_smt_mode;
+       if (vc->num_threads < target_threads)
+               collect_piggybacks(&core_info, target_threads);
+
+       /* Decide on micro-threading (split-core) mode */
+       subcore_size = threads_per_subcore;
+       cmd_bit = stat_bit = 0;
+       split = core_info.n_subcores;
+       sip = NULL;
+       if (split > 1) {
+               /* threads_per_subcore must be MAX_SMT_THREADS (8) here */
+               if (split == 2 && (dynamic_mt_modes & 2)) {
+                       cmd_bit = HID0_POWER8_1TO2LPAR;
+                       stat_bit = HID0_POWER8_2LPARMODE;
+               } else {
+                       split = 4;
+                       cmd_bit = HID0_POWER8_1TO4LPAR;
+                       stat_bit = HID0_POWER8_4LPARMODE;
+               }
+               subcore_size = MAX_SMT_THREADS / split;
+               sip = &split_info;
+               memset(&split_info, 0, sizeof(split_info));
+               split_info.rpr = mfspr(SPRN_RPR);
+               split_info.pmmar = mfspr(SPRN_PMMAR);
+               split_info.ldbar = mfspr(SPRN_LDBAR);
+               split_info.subcore_size = subcore_size;
+               for (sub = 0; sub < core_info.n_subcores; ++sub)
+                       split_info.master_vcs[sub] =
+                               list_first_entry(&core_info.vcs[sub],
+                                       struct kvmppc_vcore, preempt_list);
+               /* order writes to split_info before kvm_split_mode pointer */
+               smp_wmb();
+       }
+       pcpu = smp_processor_id();
+       for (thr = 0; thr < threads_per_subcore; ++thr)
+               paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip;
+
+       /* Initiate micro-threading (split-core) if required */
+       if (cmd_bit) {
+               unsigned long hid0 = mfspr(SPRN_HID0);
+
+               hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS;
+               mb();
+               mtspr(SPRN_HID0, hid0);
+               isync();
+               for (;;) {
+                       hid0 = mfspr(SPRN_HID0);
+                       if (hid0 & stat_bit)
+                               break;
+                       cpu_relax();
+               }
+       }
 
-       vc->pcpu = smp_processor_id();
-       list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
-               kvmppc_start_thread(vcpu);
-               kvmppc_create_dtl_entry(vcpu, vc);
-               trace_kvm_guest_enter(vcpu);
+       /* Start all the threads */
+       active = 0;
+       for (sub = 0; sub < core_info.n_subcores; ++sub) {
+               thr = subcore_thread_map[sub];
+               thr0_done = false;
+               active |= 1 << thr;
+               list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) {
+                       pvc->pcpu = pcpu + thr;
+                       list_for_each_entry(vcpu, &pvc->runnable_threads,
+                                           arch.run_list) {
+                               kvmppc_start_thread(vcpu, pvc);
+                               kvmppc_create_dtl_entry(vcpu, pvc);
+                               trace_kvm_guest_enter(vcpu);
+                               if (!vcpu->arch.ptid)
+                                       thr0_done = true;
+                               active |= 1 << (thr + vcpu->arch.ptid);
+                       }
+                       /*
+                        * We need to start the first thread of each subcore
+                        * even if it doesn't have a vcpu.
+                        */
+                       if (pvc->master_vcore == pvc && !thr0_done)
+                               kvmppc_start_thread(NULL, pvc);
+                       thr += pvc->num_threads;
+               }
        }
 
-       /* Set this explicitly in case thread 0 doesn't have a vcpu */
-       get_paca()->kvm_hstate.kvm_vcore = vc;
-       get_paca()->kvm_hstate.ptid = 0;
+       /*
+        * Ensure that split_info.do_nap is set after setting
+        * the vcore pointer in the PACA of the secondaries.
+        */
+       smp_mb();
+       if (cmd_bit)
+               split_info.do_nap = 1;  /* ask secondaries to nap when done */
+
+       /*
+        * When doing micro-threading, poke the inactive threads as well.
+        * This gets them to the nap instruction after kvm_do_nap,
+        * which reduces the time taken to unsplit later.
+        */
+       if (split > 1)
+               for (thr = 1; thr < threads_per_subcore; ++thr)
+                       if (!(active & (1 << thr)))
+                               kvmppc_ipi_thread(pcpu + thr);
 
        vc->vcore_state = VCORE_RUNNING;
        preempt_disable();
 
        trace_kvmppc_run_core(vc, 0);
 
-       spin_unlock(&vc->lock);
+       for (sub = 0; sub < core_info.n_subcores; ++sub)
+               list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list)
+                       spin_unlock(&pvc->lock);
 
        kvm_guest_enter();
 
@@ -2019,32 +2476,58 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 
        __kvmppc_vcore_entry();
 
-       spin_lock(&vc->lock);
-
        if (vc->mpp_buffer)
                kvmppc_start_saving_l2_cache(vc);
 
-       /* disable sending of IPIs on virtual external irqs */
-       list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
-               vcpu->cpu = -1;
-       /* wait for secondary threads to finish writing their state to memory */
-       kvmppc_wait_for_nap();
-       for (i = 0; i < threads_per_subcore; ++i)
-               kvmppc_release_hwthread(vc->pcpu + i);
+       srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
+
+       spin_lock(&vc->lock);
        /* prevent other vcpu threads from doing kvmppc_start_thread() now */
        vc->vcore_state = VCORE_EXITING;
-       spin_unlock(&vc->lock);
 
-       srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
+       /* wait for secondary threads to finish writing their state to memory */
+       kvmppc_wait_for_nap();
+
+       /* Return to whole-core mode if we split the core earlier */
+       if (split > 1) {
+               unsigned long hid0 = mfspr(SPRN_HID0);
+               unsigned long loops = 0;
+
+               hid0 &= ~HID0_POWER8_DYNLPARDIS;
+               stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
+               mb();
+               mtspr(SPRN_HID0, hid0);
+               isync();
+               for (;;) {
+                       hid0 = mfspr(SPRN_HID0);
+                       if (!(hid0 & stat_bit))
+                               break;
+                       cpu_relax();
+                       ++loops;
+               }
+               split_info.do_nap = 0;
+       }
+
+       /* Let secondaries go back to the offline loop */
+       for (i = 0; i < threads_per_subcore; ++i) {
+               kvmppc_release_hwthread(pcpu + i);
+               if (sip && sip->napped[i])
+                       kvmppc_ipi_thread(pcpu + i);
+       }
+
+       spin_unlock(&vc->lock);
 
        /* make sure updates to secondary vcpu structs are visible now */
        smp_mb();
        kvm_guest_exit();
 
-       preempt_enable();
+       for (sub = 0; sub < core_info.n_subcores; ++sub)
+               list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub],
+                                        preempt_list)
+                       post_guest_process(pvc, pvc == vc);
 
        spin_lock(&vc->lock);
-       post_guest_process(vc);
+       preempt_enable();
 
  out:
        vc->vcore_state = VCORE_INACTIVE;
@@ -2055,13 +2538,17 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
  * Wait for some other vcpu thread to execute us, and
  * wake us up when we need to handle something in the host.
  */
-static void kvmppc_wait_for_exec(struct kvm_vcpu *vcpu, int wait_state)
+static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
+                                struct kvm_vcpu *vcpu, int wait_state)
 {
        DEFINE_WAIT(wait);
 
        prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
-       if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
+       if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
+               spin_unlock(&vc->lock);
                schedule();
+               spin_lock(&vc->lock);
+       }
        finish_wait(&vcpu->arch.cpu_run, &wait);
 }
 
@@ -2137,9 +2624,21 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
         * this thread straight away and have it join in.
         */
        if (!signal_pending(current)) {
-               if (vc->vcore_state == VCORE_RUNNING && !VCORE_IS_EXITING(vc)) {
+               if (vc->vcore_state == VCORE_PIGGYBACK) {
+                       struct kvmppc_vcore *mvc = vc->master_vcore;
+                       if (spin_trylock(&mvc->lock)) {
+                               if (mvc->vcore_state == VCORE_RUNNING &&
+                                   !VCORE_IS_EXITING(mvc)) {
+                                       kvmppc_create_dtl_entry(vcpu, vc);
+                                       kvmppc_start_thread(vcpu, vc);
+                                       trace_kvm_guest_enter(vcpu);
+                               }
+                               spin_unlock(&mvc->lock);
+                       }
+               } else if (vc->vcore_state == VCORE_RUNNING &&
+                          !VCORE_IS_EXITING(vc)) {
                        kvmppc_create_dtl_entry(vcpu, vc);
-                       kvmppc_start_thread(vcpu);
+                       kvmppc_start_thread(vcpu, vc);
                        trace_kvm_guest_enter(vcpu);
                } else if (vc->vcore_state == VCORE_SLEEPING) {
                        wake_up(&vc->wq);
@@ -2149,10 +2648,11 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
        while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
               !signal_pending(current)) {
+               if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
+                       kvmppc_vcore_end_preempt(vc);
+
                if (vc->vcore_state != VCORE_INACTIVE) {
-                       spin_unlock(&vc->lock);
-                       kvmppc_wait_for_exec(vcpu, TASK_INTERRUPTIBLE);
-                       spin_lock(&vc->lock);
+                       kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
                        continue;
                }
                list_for_each_entry_safe(v, vn, &vc->runnable_threads,
@@ -2179,10 +2679,11 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
                if (n_ceded == vc->n_runnable) {
                        kvmppc_vcore_blocked(vc);
                } else if (need_resched()) {
-                       vc->vcore_state = VCORE_PREEMPT;
+                       kvmppc_vcore_preempt(vc);
                        /* Let something else run */
                        cond_resched_lock(&vc->lock);
-                       vc->vcore_state = VCORE_INACTIVE;
+                       if (vc->vcore_state == VCORE_PREEMPT)
+                               kvmppc_vcore_end_preempt(vc);
                } else {
                        kvmppc_run_core(vc);
                }
@@ -2191,11 +2692,8 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 
        while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
               (vc->vcore_state == VCORE_RUNNING ||
-               vc->vcore_state == VCORE_EXITING)) {
-               spin_unlock(&vc->lock);
-               kvmppc_wait_for_exec(vcpu, TASK_UNINTERRUPTIBLE);
-               spin_lock(&vc->lock);
-       }
+               vc->vcore_state == VCORE_EXITING))
+               kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
 
        if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
                kvmppc_remove_runnable(vc, vcpu);
@@ -2755,6 +3253,8 @@ static int kvmppc_book3s_init_hv(void)
 
        init_default_hcalls();
 
+       init_vcore_lists();
+
        r = kvmppc_mmu_hv_init();
        return r;
 }
index ed2589d4593fb710085014bcc81a3d2a0e9e54ff..fd7006bf6b1a1a59a86fe42efcbbb5c9b485e6a1 100644 (file)
@@ -110,14 +110,15 @@ void __init kvm_cma_reserve(void)
 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
                            unsigned int yield_count)
 {
-       struct kvmppc_vcore *vc = vcpu->arch.vcore;
+       struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
+       int ptid = local_paca->kvm_hstate.ptid;
        int threads_running;
        int threads_ceded;
        int threads_conferring;
        u64 stop = get_tb() + 10 * tb_ticks_per_usec;
        int rv = H_SUCCESS; /* => don't yield */
 
-       set_bit(vcpu->arch.ptid, &vc->conferring_threads);
+       set_bit(ptid, &vc->conferring_threads);
        while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) {
                threads_running = VCORE_ENTRY_MAP(vc);
                threads_ceded = vc->napping_threads;
@@ -127,7 +128,7 @@ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
                        break;
                }
        }
-       clear_bit(vcpu->arch.ptid, &vc->conferring_threads);
+       clear_bit(ptid, &vc->conferring_threads);
        return rv;
 }
 
@@ -238,7 +239,8 @@ void kvmhv_commence_exit(int trap)
 {
        struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
        int ptid = local_paca->kvm_hstate.ptid;
-       int me, ee;
+       struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
+       int me, ee, i;
 
        /* Set our bit in the threads-exiting-guest map in the 0xff00
           bits of vcore->entry_exit_map */
@@ -258,4 +260,26 @@ void kvmhv_commence_exit(int trap)
         */
        if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
                kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
+
+       /*
+        * If we are doing dynamic micro-threading, interrupt the other
+        * subcores to pull them out of their guests too.
+        */
+       if (!sip)
+               return;
+
+       for (i = 0; i < MAX_SUBCORES; ++i) {
+               vc = sip->master_vcs[i];
+               if (!vc)
+                       break;
+               do {
+                       ee = vc->entry_exit_map;
+                       /* Already asked to exit? */
+                       if ((ee >> 8) != 0)
+                               break;
+               } while (cmpxchg(&vc->entry_exit_map, ee,
+                                ee | VCORE_EXIT_REQ) != ee);
+               if ((ee >> 8) == 0)
+                       kvmhv_interrupt_vcore(vc, ee);
+       }
 }
index b027a89737b62cee4399e63eb9d61232f156687d..c1df9bb1e413a1ec76222a58cf0bc13c9bfdb280 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/kvm_host.h>
 #include <linux/hugetlb.h>
 #include <linux/module.h>
+#include <linux/log2.h>
 
 #include <asm/tlbflush.h>
 #include <asm/kvm_ppc.h>
@@ -97,25 +98,52 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
 }
 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
 
+/* Update the changed page order field of an rmap entry */
+void kvmppc_update_rmap_change(unsigned long *rmap, unsigned long psize)
+{
+       unsigned long order;
+
+       if (!psize)
+               return;
+       order = ilog2(psize);
+       order <<= KVMPPC_RMAP_CHG_SHIFT;
+       if (order > (*rmap & KVMPPC_RMAP_CHG_ORDER))
+               *rmap = (*rmap & ~KVMPPC_RMAP_CHG_ORDER) | order;
+}
+EXPORT_SYMBOL_GPL(kvmppc_update_rmap_change);
+
+/* Returns a pointer to the revmap entry for the page mapped by a HPTE */
+static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
+                                     unsigned long hpte_gr)
+{
+       struct kvm_memory_slot *memslot;
+       unsigned long *rmap;
+       unsigned long gfn;
+
+       gfn = hpte_rpn(hpte_gr, hpte_page_size(hpte_v, hpte_gr));
+       memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
+       if (!memslot)
+               return NULL;
+
+       rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
+       return rmap;
+}
+
 /* Remove this HPTE from the chain for a real page */
 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
                                struct revmap_entry *rev,
                                unsigned long hpte_v, unsigned long hpte_r)
 {
        struct revmap_entry *next, *prev;
-       unsigned long gfn, ptel, head;
-       struct kvm_memory_slot *memslot;
+       unsigned long ptel, head;
        unsigned long *rmap;
        unsigned long rcbits;
 
        rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
        ptel = rev->guest_rpte |= rcbits;
-       gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
-       memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
-       if (!memslot)
+       rmap = revmap_for_hpte(kvm, hpte_v, ptel);
+       if (!rmap)
                return;
-
-       rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
        lock_rmap(rmap);
 
        head = *rmap & KVMPPC_RMAP_INDEX;
@@ -131,6 +159,8 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
                        *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
        }
        *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
+       if (rcbits & HPTE_R_C)
+               kvmppc_update_rmap_change(rmap, hpte_page_size(hpte_v, hpte_r));
        unlock_rmap(rmap);
 }
 
@@ -421,14 +451,20 @@ long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
        rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
        v = pte & ~HPTE_V_HVLOCK;
        if (v & HPTE_V_VALID) {
-               u64 pte1;
-
-               pte1 = be64_to_cpu(hpte[1]);
                hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
-               rb = compute_tlbie_rb(v, pte1, pte_index);
+               rb = compute_tlbie_rb(v, be64_to_cpu(hpte[1]), pte_index);
                do_tlbies(kvm, &rb, 1, global_invalidates(kvm, flags), true);
-               /* Read PTE low word after tlbie to get final R/C values */
-               remove_revmap_chain(kvm, pte_index, rev, v, pte1);
+               /*
+                * The reference (R) and change (C) bits in a HPT
+                * entry can be set by hardware at any time up until
+                * the HPTE is invalidated and the TLB invalidation
+                * sequence has completed.  This means that when
+                * removing a HPTE, we need to re-read the HPTE after
+                * the invalidation sequence has completed in order to
+                * obtain reliable values of R and C.
+                */
+               remove_revmap_chain(kvm, pte_index, rev, v,
+                                   be64_to_cpu(hpte[1]));
        }
        r = rev->guest_rpte & ~HPTE_GR_RESERVED;
        note_hpte_modification(kvm, rev);
@@ -655,6 +691,105 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
        return H_SUCCESS;
 }
 
+long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
+                       unsigned long pte_index)
+{
+       struct kvm *kvm = vcpu->kvm;
+       __be64 *hpte;
+       unsigned long v, r, gr;
+       struct revmap_entry *rev;
+       unsigned long *rmap;
+       long ret = H_NOT_FOUND;
+
+       if (pte_index >= kvm->arch.hpt_npte)
+               return H_PARAMETER;
+
+       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+       while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
+               cpu_relax();
+       v = be64_to_cpu(hpte[0]);
+       r = be64_to_cpu(hpte[1]);
+       if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
+               goto out;
+
+       gr = rev->guest_rpte;
+       if (rev->guest_rpte & HPTE_R_R) {
+               rev->guest_rpte &= ~HPTE_R_R;
+               note_hpte_modification(kvm, rev);
+       }
+       if (v & HPTE_V_VALID) {
+               gr |= r & (HPTE_R_R | HPTE_R_C);
+               if (r & HPTE_R_R) {
+                       kvmppc_clear_ref_hpte(kvm, hpte, pte_index);
+                       rmap = revmap_for_hpte(kvm, v, gr);
+                       if (rmap) {
+                               lock_rmap(rmap);
+                               *rmap |= KVMPPC_RMAP_REFERENCED;
+                               unlock_rmap(rmap);
+                       }
+               }
+       }
+       vcpu->arch.gpr[4] = gr;
+       ret = H_SUCCESS;
+ out:
+       unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
+       return ret;
+}
+
+long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
+                       unsigned long pte_index)
+{
+       struct kvm *kvm = vcpu->kvm;
+       __be64 *hpte;
+       unsigned long v, r, gr;
+       struct revmap_entry *rev;
+       unsigned long *rmap;
+       long ret = H_NOT_FOUND;
+
+       if (pte_index >= kvm->arch.hpt_npte)
+               return H_PARAMETER;
+
+       rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
+       hpte = (__be64 *)(kvm->arch.hpt_virt + (pte_index << 4));
+       while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
+               cpu_relax();
+       v = be64_to_cpu(hpte[0]);
+       r = be64_to_cpu(hpte[1]);
+       if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
+               goto out;
+
+       gr = rev->guest_rpte;
+       if (gr & HPTE_R_C) {
+               rev->guest_rpte &= ~HPTE_R_C;
+               note_hpte_modification(kvm, rev);
+       }
+       if (v & HPTE_V_VALID) {
+               /* need to make it temporarily absent so C is stable */
+               hpte[0] |= cpu_to_be64(HPTE_V_ABSENT);
+               kvmppc_invalidate_hpte(kvm, hpte, pte_index);
+               r = be64_to_cpu(hpte[1]);
+               gr |= r & (HPTE_R_R | HPTE_R_C);
+               if (r & HPTE_R_C) {
+                       unsigned long psize = hpte_page_size(v, r);
+                       hpte[1] = cpu_to_be64(r & ~HPTE_R_C);
+                       eieio();
+                       rmap = revmap_for_hpte(kvm, v, gr);
+                       if (rmap) {
+                               lock_rmap(rmap);
+                               *rmap |= KVMPPC_RMAP_CHANGED;
+                               kvmppc_update_rmap_change(rmap, psize);
+                               unlock_rmap(rmap);
+                       }
+               }
+       }
+       vcpu->arch.gpr[4] = gr;
+       ret = H_SUCCESS;
+ out:
+       unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
+       return ret;
+}
+
 void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
                        unsigned long pte_index)
 {
index 00e45b6d4f2464a0bde10762f5f995ea43b2296a..24f58076d49e1eada68920615c8ce2c3b1166744 100644 (file)
@@ -67,14 +67,12 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
        }
 
        /* Check if the core is loaded, if not, too hard */
-       cpu = vcpu->cpu;
+       cpu = vcpu->arch.thread_cpu;
        if (cpu < 0 || cpu >= nr_cpu_ids) {
                this_icp->rm_action |= XICS_RM_KICK_VCPU;
                this_icp->rm_kick_target = vcpu;
                return;
        }
-       /* In SMT cpu will always point to thread 0, we adjust it */
-       cpu += vcpu->arch.ptid;
 
        smp_mb();
        kvmhv_rm_send_ipi(cpu);
index faa86e9c05510973001b7d3c64557e8a9cbd212d..2273dcacef39fe9e2257303a18121116044694c6 100644 (file)
@@ -128,6 +128,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
        subf    r4, r4, r3
        mtspr   SPRN_DEC, r4
 
+       /* hwthread_req may have got set by cede or no vcpu, so clear it */
+       li      r0, 0
+       stb     r0, HSTATE_HWTHREAD_REQ(r13)
+
        /*
         * For external and machine check interrupts, we need
         * to call the Linux handler to process the interrupt.
@@ -215,7 +219,6 @@ kvm_novcpu_wakeup:
        ld      r5, HSTATE_KVM_VCORE(r13)
        li      r0, 0
        stb     r0, HSTATE_NAPPING(r13)
-       stb     r0, HSTATE_HWTHREAD_REQ(r13)
 
        /* check the wake reason */
        bl      kvmppc_check_wake_reason
@@ -315,10 +318,10 @@ kvm_start_guest:
        cmpdi   r3, 0
        bge     kvm_no_guest
 
-       /* get vcpu pointer, NULL if we have no vcpu to run */
-       ld      r4,HSTATE_KVM_VCPU(r13)
-       cmpdi   r4,0
-       /* if we have no vcpu to run, go back to sleep */
+       /* get vcore pointer, NULL if we have nothing to run */
+       ld      r5,HSTATE_KVM_VCORE(r13)
+       cmpdi   r5,0
+       /* if we have no vcore to run, go back to sleep */
        beq     kvm_no_guest
 
 kvm_secondary_got_guest:
@@ -327,21 +330,42 @@ kvm_secondary_got_guest:
        ld      r6, PACA_DSCR_DEFAULT(r13)
        std     r6, HSTATE_DSCR(r13)
 
-       /* Order load of vcore, ptid etc. after load of vcpu */
+       /* On thread 0 of a subcore, set HDEC to max */
+       lbz     r4, HSTATE_PTID(r13)
+       cmpwi   r4, 0
+       bne     63f
+       lis     r6, 0x7fff
+       ori     r6, r6, 0xffff
+       mtspr   SPRN_HDEC, r6
+       /* and set per-LPAR registers, if doing dynamic micro-threading */
+       ld      r6, HSTATE_SPLIT_MODE(r13)
+       cmpdi   r6, 0
+       beq     63f
+       ld      r0, KVM_SPLIT_RPR(r6)
+       mtspr   SPRN_RPR, r0
+       ld      r0, KVM_SPLIT_PMMAR(r6)
+       mtspr   SPRN_PMMAR, r0
+       ld      r0, KVM_SPLIT_LDBAR(r6)
+       mtspr   SPRN_LDBAR, r0
+       isync
+63:
+       /* Order load of vcpu after load of vcore */
        lwsync
+       ld      r4, HSTATE_KVM_VCPU(r13)
        bl      kvmppc_hv_entry
 
        /* Back from the guest, go back to nap */
-       /* Clear our vcpu pointer so we don't come back in early */
+       /* Clear our vcpu and vcore pointers so we don't come back in early */
        li      r0, 0
+       std     r0, HSTATE_KVM_VCPU(r13)
        /*
-        * Once we clear HSTATE_KVM_VCPU(r13), the code in
+        * Once we clear HSTATE_KVM_VCORE(r13), the code in
         * kvmppc_run_core() is going to assume that all our vcpu
         * state is visible in memory.  This lwsync makes sure
         * that that is true.
         */
        lwsync
-       std     r0, HSTATE_KVM_VCPU(r13)
+       std     r0, HSTATE_KVM_VCORE(r13)
 
 /*
  * At this point we have finished executing in the guest.
@@ -374,16 +398,71 @@ kvm_no_guest:
        b       power7_wakeup_loss
 
 53:    HMT_LOW
-       ld      r4, HSTATE_KVM_VCPU(r13)
-       cmpdi   r4, 0
+       ld      r5, HSTATE_KVM_VCORE(r13)
+       cmpdi   r5, 0
+       bne     60f
+       ld      r3, HSTATE_SPLIT_MODE(r13)
+       cmpdi   r3, 0
+       beq     kvm_no_guest
+       lbz     r0, KVM_SPLIT_DO_NAP(r3)
+       cmpwi   r0, 0
        beq     kvm_no_guest
        HMT_MEDIUM
+       b       kvm_unsplit_nap
+60:    HMT_MEDIUM
        b       kvm_secondary_got_guest
 
 54:    li      r0, KVM_HWTHREAD_IN_KVM
        stb     r0, HSTATE_HWTHREAD_STATE(r13)
        b       kvm_no_guest
 
+/*
+ * Here the primary thread is trying to return the core to
+ * whole-core mode, so we need to nap.
+ */
+kvm_unsplit_nap:
+       /*
+        * Ensure that secondary doesn't nap when it has
+        * its vcore pointer set.
+        */
+       sync            /* matches smp_mb() before setting split_info.do_nap */
+       ld      r0, HSTATE_KVM_VCORE(r13)
+       cmpdi   r0, 0
+       bne     kvm_no_guest
+       /* clear any pending message */
+BEGIN_FTR_SECTION
+       lis     r6, (PPC_DBELL_SERVER << (63-36))@h
+       PPC_MSGCLR(6)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+       /* Set kvm_split_mode.napped[tid] = 1 */
+       ld      r3, HSTATE_SPLIT_MODE(r13)
+       li      r0, 1
+       lhz     r4, PACAPACAINDEX(r13)
+       clrldi  r4, r4, 61      /* micro-threading => P8 => 8 threads/core */
+       addi    r4, r4, KVM_SPLIT_NAPPED
+       stbx    r0, r3, r4
+       /* Check the do_nap flag again after setting napped[] */
+       sync
+       lbz     r0, KVM_SPLIT_DO_NAP(r3)
+       cmpwi   r0, 0
+       beq     57f
+       li      r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
+       mfspr   r4, SPRN_LPCR
+       rlwimi  r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
+       mtspr   SPRN_LPCR, r4
+       isync
+       std     r0, HSTATE_SCRATCH0(r13)
+       ptesync
+       ld      r0, HSTATE_SCRATCH0(r13)
+1:     cmpd    r0, r0
+       bne     1b
+       nap
+       b       .
+
+57:    li      r0, 0
+       stbx    r0, r3, r4
+       b       kvm_no_guest
+
 /******************************************************************************
  *                                                                            *
  *                               Entry code                                   *
@@ -854,7 +933,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
        cmpwi   r0, 0
        bne     21f
        HMT_LOW
-20:    lbz     r0, VCORE_IN_GUEST(r5)
+20:    lwz     r3, VCORE_ENTRY_EXIT(r5)
+       cmpwi   r3, 0x100
+       bge     no_switch_exit
+       lbz     r0, VCORE_IN_GUEST(r5)
        cmpwi   r0, 0
        beq     20b
        HMT_MEDIUM
@@ -870,7 +952,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
        blt     hdec_soon
 
        ld      r6, VCPU_CTR(r4)
-       lwz     r7, VCPU_XER(r4)
+       l     r7, VCPU_XER(r4)
 
        mtctr   r6
        mtxer   r7
@@ -985,9 +1067,13 @@ secondary_too_late:
 #endif
 11:    b       kvmhv_switch_to_host
 
+no_switch_exit:
+       HMT_MEDIUM
+       li      r12, 0
+       b       12f
 hdec_soon:
        li      r12, BOOK3S_INTERRUPT_HV_DECREMENTER
-       stw     r12, VCPU_TRAP(r4)
+12:    stw     r12, VCPU_TRAP(r4)
        mr      r9, r4
 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
        addi    r3, r4, VCPU_TB_RMEXIT
@@ -1103,7 +1189,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        mfctr   r3
        mfxer   r4
        std     r3, VCPU_CTR(r9)
-       stw     r4, VCPU_XER(r9)
+       std     r4, VCPU_XER(r9)
 
        /* If this is a page table miss then see if it's theirs or ours */
        cmpwi   r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
@@ -1127,6 +1213,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
        cmpwi   r12, BOOK3S_INTERRUPT_H_DOORBELL
        bne     3f
        lbz     r0, HSTATE_HOST_IPI(r13)
+       cmpwi   r0, 0
        beq     4f
        b       guest_exit_cont
 3:
@@ -1176,6 +1263,11 @@ mc_cont:
        ld      r9, HSTATE_KVM_VCPU(r13)
        lwz     r12, VCPU_TRAP(r9)
 
+       /* Stop others sending VCPU interrupts to this physical CPU */
+       li      r0, -1
+       stw     r0, VCPU_CPU(r9)
+       stw     r0, VCPU_THREAD_CPU(r9)
+
        /* Save guest CTRL register, set runlatch to 1 */
        mfspr   r6,SPRN_CTRLF
        stw     r6,VCPU_CTRL(r9)
@@ -1540,12 +1632,17 @@ kvmhv_switch_to_host:
 
        /* Primary thread waits for all the secondaries to exit guest */
 15:    lwz     r3,VCORE_ENTRY_EXIT(r5)
-       srwi    r0,r3,8
+       rlwinm  r0,r3,32-8,0xff
        clrldi  r3,r3,56
        cmpw    r3,r0
        bne     15b
        isync
 
+       /* Did we actually switch to the guest at all? */
+       lbz     r6, VCORE_IN_GUEST(r5)
+       cmpwi   r6, 0
+       beq     19f
+
        /* Primary thread switches back to host partition */
        ld      r6,KVM_HOST_SDR1(r4)
        lwz     r7,KVM_HOST_LPID(r4)
@@ -1589,7 +1686,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 18:
        /* Signal secondary CPUs to continue */
        stb     r0,VCORE_IN_GUEST(r5)
-       lis     r8,0x7fff               /* MAX_INT@h */
+19:    lis     r8,0x7fff               /* MAX_INT@h */
        mtspr   SPRN_HDEC,r8
 
 16:    ld      r8,KVM_HOST_LPCR(r4)
@@ -1675,7 +1772,7 @@ kvmppc_hdsi:
        bl      kvmppc_msr_interrupt
 fast_interrupt_c_return:
 6:     ld      r7, VCPU_CTR(r9)
-       lwz     r8, VCPU_XER(r9)
+       l     r8, VCPU_XER(r9)
        mtctr   r7
        mtxer   r8
        mr      r4, r9
@@ -1816,8 +1913,8 @@ hcall_real_table:
        .long   DOTSYM(kvmppc_h_remove) - hcall_real_table
        .long   DOTSYM(kvmppc_h_enter) - hcall_real_table
        .long   DOTSYM(kvmppc_h_read) - hcall_real_table
-       .long   0               /* 0x10 - H_CLEAR_MOD */
-       .long   0               /* 0x14 - H_CLEAR_REF */
+       .long   DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
+       .long   DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
        .long   DOTSYM(kvmppc_h_protect) - hcall_real_table
        .long   DOTSYM(kvmppc_h_get_tce) - hcall_real_table
        .long   DOTSYM(kvmppc_h_put_tce) - hcall_real_table
index bd6ab1672ae64a9261282c800a6160b20356a950..a759d9adb0b6f8218c38c52520dfe3cdfa70c884 100644 (file)
@@ -352,7 +352,7 @@ static inline u32 inst_get_field(u32 inst, int msb, int lsb)
        return kvmppc_get_field(inst, msb + 32, lsb + 32);
 }
 
-bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
+static bool kvmppc_inst_is_paired_single(struct kvm_vcpu *vcpu, u32 inst)
 {
        if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
                return false;
index acee37cde840a3b18a512a09bfc79634a8e26f07..ca8f174289bb43a148bdd9159f71ca645e0c416a 100644 (file)
@@ -123,7 +123,7 @@ no_dcbz32_on:
        PPC_LL  r8, SVCPU_CTR(r3)
        PPC_LL  r9, SVCPU_LR(r3)
        lwz     r10, SVCPU_CR(r3)
-       lwz     r11, SVCPU_XER(r3)
+       PPC_LL  r11, SVCPU_XER(r3)
 
        mtctr   r8
        mtlr    r9
@@ -237,7 +237,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
        mfctr   r8
        mflr    r9
 
-       stw     r5, SVCPU_XER(r13)
+       PPC_STL r5, SVCPU_XER(r13)
        PPC_STL r6, SVCPU_FAULT_DAR(r13)
        stw     r7, SVCPU_FAULT_DSISR(r13)
        PPC_STL r8, SVCPU_CTR(r13)
index c6ca7db646735428fb14bde6af9ed1eb68b98351..905e94a1370f4982060b4d917e6fb5b8202d4fd3 100644 (file)
@@ -41,7 +41,7 @@
  * =======
  *
  * Each ICS has a spin lock protecting the information about the IRQ
- * sources and avoiding simultaneous deliveries if the same interrupt.
+ * sources and avoiding simultaneous deliveries of the same interrupt.
  *
  * ICP operations are done via a single compare & swap transaction
  * (most ICP state fits in the union kvmppc_icp_state)
index cc5842657161580262d10fb49e0bdfde4de497fd..ae458f0fd061efea7cdd569c1bdb32970503659c 100644 (file)
@@ -933,6 +933,7 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
 #endif
                break;
        case BOOKE_INTERRUPT_CRITICAL:
+               kvmppc_fill_pt_regs(&regs);
                unknown_exception(&regs);
                break;
        case BOOKE_INTERRUPT_DEBUG:
index 50860e919cb81777a0ef4015e48f9bfda2b3e810..29911a07bcdb071d1f94db55ca8e7ce4e2b9e5d3 100644 (file)
@@ -377,7 +377,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
                        | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
                vcpu->arch.shared->mas1 =
                          (vcpu->arch.shared->mas6 & MAS6_SPID0)
-                       | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
+                       | ((vcpu->arch.shared->mas6 & MAS6_SAS) ? MAS1_TS : 0)
                        | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
                vcpu->arch.shared->mas2 &= MAS2_EPN;
                vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
index e5dde32fe71fc1856cb5ae515150aa7130de3c96..2e51289610e432b420310971afbe374f0e0938cf 100644 (file)
@@ -660,7 +660,7 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
        return kvmppc_core_pending_dec(vcpu);
 }
 
-enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
+static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
 {
        struct kvm_vcpu *vcpu;
 
index a3804fbe1f36df9d517279dfd56774e6e03376b4..0679e11d2cf7ddf4f7a72bee6db3f4368aff37d1 100644 (file)
@@ -101,6 +101,11 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn)
 {
        unsigned long mfn;
 
+       /*
+        * Some x86 code are still using pfn_to_mfn instead of
+        * pfn_to_mfn. This will have to be removed when we figured
+        * out which call.
+        */
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return pfn;
 
@@ -147,6 +152,11 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
 {
        unsigned long pfn;
 
+       /*
+        * Some x86 code are still using mfn_to_pfn instead of
+        * gfn_to_pfn. This will have to be removed when we figure
+        * out which call.
+        */
        if (xen_feature(XENFEAT_auto_translated_physmap))
                return mfn;
 
@@ -176,6 +186,27 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
        return XPADDR(PFN_PHYS(mfn_to_pfn(PFN_DOWN(machine.maddr))) | offset);
 }
 
+/* Pseudo-physical <-> Guest conversion */
+static inline unsigned long pfn_to_gfn(unsigned long pfn)
+{
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return pfn;
+       else
+               return pfn_to_mfn(pfn);
+}
+
+static inline unsigned long gfn_to_pfn(unsigned long gfn)
+{
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return gfn;
+       else
+               return mfn_to_pfn(gfn);
+}
+
+/* Pseudo-physical <-> Bus conversion */
+#define pfn_to_bfn(pfn)                pfn_to_gfn(pfn)
+#define bfn_to_pfn(bfn)                gfn_to_pfn(bfn)
+
 /*
  * We detect special mappings in one of two ways:
  *  1. If the MFN is an I/O page then Xen will set the m2p entry
@@ -196,7 +227,7 @@ static inline xpaddr_t machine_to_phys(xmaddr_t machine)
  *      require. In all the cases we care about, the FOREIGN_FRAME bit is
  *      masked (e.g., pfn_to_mfn()) so behaviour there is correct.
  */
-static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
+static inline unsigned long bfn_to_local_pfn(unsigned long mfn)
 {
        unsigned long pfn;
 
@@ -215,6 +246,10 @@ static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
 #define virt_to_mfn(v)         (pfn_to_mfn(virt_to_pfn(v)))
 #define mfn_to_virt(m)         (__va(mfn_to_pfn(m) << PAGE_SHIFT))
 
+/* VIRT <-> GUEST conversion */
+#define virt_to_gfn(v)         (pfn_to_gfn(virt_to_pfn(v)))
+#define gfn_to_virt(g)         (__va(gfn_to_pfn(g) << PAGE_SHIFT))
+
 static inline unsigned long pte_mfn(pte_t pte)
 {
        return (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT;
@@ -262,7 +297,7 @@ void make_lowmem_page_readwrite(void *vaddr);
 
 static inline bool xen_arch_need_swiotlb(struct device *dev,
                                         unsigned long pfn,
-                                        unsigned long mfn)
+                                        unsigned long bfn)
 {
        return false;
 }
index e7a4fde5d631031908b6f336d9ecc40e7a3a413d..b372a7557c16c7d8391fffafdf0b1c74b49c4822 100644 (file)
@@ -650,6 +650,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
        u16 sel;
 
        la = seg_base(ctxt, addr.seg) + addr.ea;
+       *linear = la;
        *max_size = 0;
        switch (mode) {
        case X86EMUL_MODE_PROT64:
@@ -693,7 +694,6 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
        }
        if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
                return emulate_gp(ctxt, 0);
-       *linear = la;
        return X86EMUL_CONTINUE;
 bad:
        if (addr.seg == VCPU_SREG_SS)
index fb16a8ea3dee026d24a6e47332326226cf3a70e0..69088a1ba5090ffa763a56f5c105560f360de767 100644 (file)
@@ -3309,13 +3309,14 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
 
        walk_shadow_page_lockless_begin(vcpu);
 
-       for (shadow_walk_init(&iterator, vcpu, addr), root = iterator.level;
+       for (shadow_walk_init(&iterator, vcpu, addr),
+                leaf = root = iterator.level;
             shadow_walk_okay(&iterator);
             __shadow_walk_next(&iterator, spte)) {
-               leaf = iterator.level;
                spte = mmu_spte_get_lockless(iterator.sptep);
 
                sptes[leaf - 1] = spte;
+               leaf--;
 
                if (!is_shadow_present_pte(spte))
                        break;
@@ -3329,7 +3330,7 @@ walk_shadow_page_get_mmio_spte(struct kvm_vcpu *vcpu, u64 addr, u64 *sptep)
        if (reserved) {
                pr_err("%s: detect reserved bits on spte, addr 0x%llx, dump hierarchy:\n",
                       __func__, addr);
-               while (root >= leaf) {
+               while (root > leaf) {
                        pr_err("------ spte 0x%llx level %d.\n",
                               sptes[root - 1], root);
                        root--;
index 1e7e76e14e8927ea3c909e4d83e879d63b90a815..a60bdbccff5189b5a98b9a7fcc6a3b9f7ff5eeec 100644 (file)
@@ -5943,6 +5943,7 @@ static void process_smi_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
        put_smstate(u32, buf, offset, process_smi_get_segment_flags(&seg));
 }
 
+#ifdef CONFIG_X86_64
 static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
 {
        struct kvm_segment seg;
@@ -5958,6 +5959,7 @@ static void process_smi_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
        put_smstate(u32, buf, offset + 4, seg.limit);
        put_smstate(u64, buf, offset + 8, seg.base);
 }
+#endif
 
 static void process_smi_save_state_32(struct kvm_vcpu *vcpu, char *buf)
 {
index 2c50b445884e86b23f16f25a354d92869bbfe1e7..9c479fe4045912d9152315bc1fa4af9ed334304b 100644 (file)
@@ -2812,9 +2812,9 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
        return 0;
 }
 
-static int do_remap_mfn(struct vm_area_struct *vma,
+static int do_remap_gfn(struct vm_area_struct *vma,
                        unsigned long addr,
-                       xen_pfn_t *mfn, int nr,
+                       xen_pfn_t *gfn, int nr,
                        int *err_ptr, pgprot_t prot,
                        unsigned domid,
                        struct page **pages)
@@ -2830,14 +2830,14 @@ static int do_remap_mfn(struct vm_area_struct *vma,
        if (xen_feature(XENFEAT_auto_translated_physmap)) {
 #ifdef CONFIG_XEN_PVH
                /* We need to update the local page tables and the xen HAP */
-               return xen_xlate_remap_gfn_array(vma, addr, mfn, nr, err_ptr,
+               return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
                                                 prot, domid, pages);
 #else
                return -EINVAL;
 #endif
         }
 
-       rmd.mfn = mfn;
+       rmd.mfn = gfn;
        rmd.prot = prot;
        /* We use the err_ptr to indicate if there we are doing a contigious
         * mapping or a discontigious mapping. */
@@ -2865,8 +2865,8 @@ static int do_remap_mfn(struct vm_area_struct *vma,
                                                    batch_left, &done, domid);
 
                        /*
-                        * @err_ptr may be the same buffer as @mfn, so
-                        * only clear it after each chunk of @mfn is
+                        * @err_ptr may be the same buffer as @gfn, so
+                        * only clear it after each chunk of @gfn is
                         * used.
                         */
                        if (err_ptr) {
@@ -2896,19 +2896,19 @@ out:
        return err < 0 ? err : mapped;
 }
 
-int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
                               unsigned long addr,
-                              xen_pfn_t mfn, int nr,
+                              xen_pfn_t gfn, int nr,
                               pgprot_t prot, unsigned domid,
                               struct page **pages)
 {
-       return do_remap_mfn(vma, addr, &mfn, nr, NULL, prot, domid, pages);
+       return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
 }
-EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
+EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
 
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
                               unsigned long addr,
-                              xen_pfn_t *mfn, int nr,
+                              xen_pfn_t *gfn, int nr,
                               int *err_ptr, pgprot_t prot,
                               unsigned domid, struct page **pages)
 {
@@ -2917,13 +2917,13 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
         * cause of "wrong memory was mapped in".
         */
        BUG_ON(err_ptr == NULL);
-       return do_remap_mfn(vma, addr, mfn, nr, err_ptr, prot, domid, pages);
+       return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
 }
-EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_array);
+EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
 
 
 /* Returns: 0 success */
-int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
                               int numpgs, struct page **pages)
 {
        if (!pages || !xen_feature(XENFEAT_auto_translated_physmap))
@@ -2935,4 +2935,4 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
        return -EINVAL;
 #endif
 }
-EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
+EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
index 2a9ff734279168dc5d9d0555ce341cd2ed8e1176..3f4ebf0261f28620a96cdd1cd6366c311c6761b5 100644 (file)
@@ -453,7 +453,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
        }
 #endif
        ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
-       ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
+       ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
        if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
                BUG();
 
index ff03f2348f77efd8cea40d071a15735d42de400f..2d75366c61e03c7188e2839ec1e8d727d3365966 100644 (file)
@@ -611,13 +611,15 @@ static void *device_get_mac_addr(struct device *dev,
 */
 void *device_get_mac_address(struct device *dev, char *addr, int alen)
 {
-       addr = device_get_mac_addr(dev, "mac-address", addr, alen);
-       if (addr)
-               return addr;
+       char *res;
 
-       addr = device_get_mac_addr(dev, "local-mac-address", addr, alen);
-       if (addr)
-               return addr;
+       res = device_get_mac_addr(dev, "mac-address", addr, alen);
+       if (res)
+               return res;
+
+       res = device_get_mac_addr(dev, "local-mac-address", addr, alen);
+       if (res)
+               return res;
 
        return device_get_mac_addr(dev, "address", addr, alen);
 }
index d4d05f064d390772a2f99acbf882eaa983788511..e93899cc6f60be0bd13b45dde3b8d697b7a733c8 100644 (file)
@@ -478,8 +478,7 @@ static int virtblk_get_cache_mode(struct virtio_device *vdev)
                                   struct virtio_blk_config, wce,
                                   &writeback);
        if (err)
-               writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE) ||
-                           virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
+               writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE);
 
        return writeback;
 }
@@ -657,6 +656,7 @@ static int virtblk_probe(struct virtio_device *vdev)
        vblk->disk->private_data = vblk;
        vblk->disk->fops = &virtblk_fops;
        vblk->disk->driverfs_dev = &vdev->dev;
+       vblk->disk->flags |= GENHD_FL_EXT_DEVT;
        vblk->index = index;
 
        /* configure queue flush support */
@@ -840,7 +840,7 @@ static unsigned int features_legacy[] = {
 static unsigned int features[] = {
        VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
        VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
-       VIRTIO_BLK_F_TOPOLOGY,
+       VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
        VIRTIO_BLK_F_MQ,
 };
 
index 15083539df1529f19469135981ef211c1776d50a..0823a96902f87fa90d2e35a425183ea0de2e0049 100644 (file)
@@ -249,7 +249,7 @@ static struct grant *get_grant(grant_ref_t *gref_head,
                                struct blkfront_info *info)
 {
        struct grant *gnt_list_entry;
-       unsigned long buffer_mfn;
+       unsigned long buffer_gfn;
 
        BUG_ON(list_empty(&info->grants));
        gnt_list_entry = list_first_entry(&info->grants, struct grant,
@@ -268,10 +268,10 @@ static struct grant *get_grant(grant_ref_t *gref_head,
                BUG_ON(!pfn);
                gnt_list_entry->pfn = pfn;
        }
-       buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
+       buffer_gfn = pfn_to_gfn(gnt_list_entry->pfn);
        gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
                                        info->xbdev->otherend_id,
-                                       buffer_mfn, 0);
+                                       buffer_gfn, 0);
        return gnt_list_entry;
 }
 
index 251f48dcd12d5fa344a80d370bd6177c7f705091..7f370d3e098379e9076210764361ebb5046c739f 100644 (file)
@@ -1398,6 +1398,45 @@ static const struct exynos_cpuclk_cfg_data e4210_armclk_d[] __initconst = {
        {  0 },
 };
 
+static const struct exynos_cpuclk_cfg_data e4212_armclk_d[] __initconst = {
+       { 1500000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), },
+       { 1400000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4210_CPU_DIV1(2, 6), },
+       { 1300000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), },
+       { 1200000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4210_CPU_DIV1(2, 5), },
+       { 1100000, E4210_CPU_DIV0(2, 1, 4, 0, 6, 3), E4210_CPU_DIV1(2, 4), },
+       { 1000000, E4210_CPU_DIV0(1, 1, 4, 0, 5, 2), E4210_CPU_DIV1(2, 4), },
+       {  900000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), },
+       {  800000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4210_CPU_DIV1(2, 3), },
+       {  700000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+       {  600000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+       {  500000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+       {  400000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+       {  300000, E4210_CPU_DIV0(1, 1, 2, 0, 4, 2), E4210_CPU_DIV1(2, 3), },
+       {  200000, E4210_CPU_DIV0(1, 1, 1, 0, 3, 1), E4210_CPU_DIV1(2, 3), },
+       {  0 },
+};
+
+#define E4412_CPU_DIV1(cores, hpm, copy)                               \
+               (((cores) << 8) | ((hpm) << 4) | ((copy) << 0))
+
+static const struct exynos_cpuclk_cfg_data e4412_armclk_d[] __initconst = {
+       { 1500000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4412_CPU_DIV1(7, 0, 6), },
+       { 1400000, E4210_CPU_DIV0(2, 1, 6, 0, 7, 3), E4412_CPU_DIV1(6, 0, 6), },
+       { 1300000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4412_CPU_DIV1(6, 0, 5), },
+       { 1200000, E4210_CPU_DIV0(2, 1, 5, 0, 7, 3), E4412_CPU_DIV1(5, 0, 5), },
+       { 1100000, E4210_CPU_DIV0(2, 1, 4, 0, 6, 3), E4412_CPU_DIV1(5, 0, 4), },
+       { 1000000, E4210_CPU_DIV0(1, 1, 4, 0, 5, 2), E4412_CPU_DIV1(4, 0, 4), },
+       {  900000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4412_CPU_DIV1(4, 0, 3), },
+       {  800000, E4210_CPU_DIV0(1, 1, 3, 0, 5, 2), E4412_CPU_DIV1(3, 0, 3), },
+       {  700000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(3, 0, 3), },
+       {  600000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(2, 0, 3), },
+       {  500000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(2, 0, 3), },
+       {  400000, E4210_CPU_DIV0(1, 1, 3, 0, 4, 2), E4412_CPU_DIV1(1, 0, 3), },
+       {  300000, E4210_CPU_DIV0(1, 1, 2, 0, 4, 2), E4412_CPU_DIV1(1, 0, 3), },
+       {  200000, E4210_CPU_DIV0(1, 1, 1, 0, 3, 1), E4412_CPU_DIV1(0, 0, 3), },
+       {  0 },
+};
+
 /* register exynos4 clocks */
 static void __init exynos4_clk_init(struct device_node *np,
                                    enum exynos4_soc soc)
@@ -1491,6 +1530,17 @@ static void __init exynos4_clk_init(struct device_node *np,
                samsung_clk_register_fixed_factor(ctx,
                        exynos4x12_fixed_factor_clks,
                        ARRAY_SIZE(exynos4x12_fixed_factor_clks));
+               if (of_machine_is_compatible("samsung,exynos4412")) {
+                       exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+                               mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
+                               e4412_armclk_d, ARRAY_SIZE(e4412_armclk_d),
+                               CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+               } else {
+                       exynos_register_cpu_clock(ctx, CLK_ARM_CLK, "armclk",
+                               mout_core_p4x12[0], mout_core_p4x12[1], 0x14200,
+                               e4212_armclk_d, ARRAY_SIZE(e4212_armclk_d),
+                               CLK_CPU_NEEDS_DEBUG_ALT_DIV | CLK_CPU_HAS_DIV1);
+               }
        }
 
        samsung_clk_register_alias(ctx, exynos4_aliases,
index 77aa34eae92cbacf67e4027c6068a5551a0b83eb..5f498d9f1825038da8da98758a72701f58a7d08d 100644 (file)
@@ -24,55 +24,6 @@ config ARM_VEXPRESS_SPC_CPUFREQ
           This add the CPUfreq driver support for Versatile Express
          big.LITTLE platforms using SPC for power management.
 
-
-config ARM_EXYNOS_CPUFREQ
-       tristate "SAMSUNG EXYNOS CPUfreq Driver"
-       depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250
-       depends on THERMAL
-       help
-         This adds the CPUFreq driver for Samsung EXYNOS platforms.
-         Supported SoC versions are:
-            Exynos4210, Exynos4212, Exynos4412, and Exynos5250.
-
-         If in doubt, say N.
-
-config ARM_EXYNOS4X12_CPUFREQ
-       bool "SAMSUNG EXYNOS4x12"
-       depends on SOC_EXYNOS4212 || SOC_EXYNOS4412
-       depends on ARM_EXYNOS_CPUFREQ
-       default y
-       help
-         This adds the CPUFreq driver for Samsung EXYNOS4X12
-         SoC (EXYNOS4212 or EXYNOS4412).
-
-         If in doubt, say N.
-
-config ARM_EXYNOS5250_CPUFREQ
-       bool "SAMSUNG EXYNOS5250"
-       depends on SOC_EXYNOS5250
-       depends on ARM_EXYNOS_CPUFREQ
-       default y
-       help
-         This adds the CPUFreq driver for Samsung EXYNOS5250
-         SoC.
-
-         If in doubt, say N.
-
-config ARM_EXYNOS_CPU_FREQ_BOOST_SW
-       bool "EXYNOS Frequency Overclocking - Software"
-       depends on ARM_EXYNOS_CPUFREQ && THERMAL
-       select CPU_FREQ_BOOST_SW
-       select EXYNOS_THERMAL
-       help
-         This driver supports software managed overclocking (BOOST).
-         It allows usage of special frequencies for Samsung Exynos
-         processors if thermal conditions are appropriate.
-
-         It requires, for safe operation, thermal framework with properly
-         defined trip points.
-
-         If in doubt, say N.
-
 config ARM_EXYNOS5440_CPUFREQ
        tristate "SAMSUNG EXYNOS5440"
        depends on SOC_EXYNOS5440
index 60a57ca5b22ddd47a74af1cbe99f13626197d9a7..41340384f11f291c4fe93e96e73559926f4f59b1 100644 (file)
@@ -52,10 +52,6 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ)              += arm_big_little_dt.o
 
 obj-$(CONFIG_ARCH_DAVINCI)             += davinci-cpufreq.o
 obj-$(CONFIG_UX500_SOC_DB8500)         += dbx500-cpufreq.o
-obj-$(CONFIG_ARM_EXYNOS_CPUFREQ)       += arm-exynos-cpufreq.o
-arm-exynos-cpufreq-y                                   := exynos-cpufreq.o
-arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ)    += exynos4x12-cpufreq.o
-arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ)    += exynos5250-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ)   += exynos5440-cpufreq.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ)     += highbank-cpufreq.o
 obj-$(CONFIG_ARM_HISI_ACPU_CPUFREQ)    += hisi-acpu-cpufreq.o
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
deleted file mode 100644 (file)
index fa3dd84..0000000
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * EXYNOS - CPU frequency scaling support for EXYNOS series
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
-#include <linux/cpufreq.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/cpu_cooling.h>
-#include <linux/cpu.h>
-
-#include "exynos-cpufreq.h"
-
-static struct exynos_dvfs_info *exynos_info;
-static struct thermal_cooling_device *cdev;
-static struct regulator *arm_regulator;
-static unsigned int locking_frequency;
-
-static int exynos_cpufreq_get_index(unsigned int freq)
-{
-       struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
-       struct cpufreq_frequency_table *pos;
-
-       cpufreq_for_each_entry(pos, freq_table)
-               if (pos->frequency == freq)
-                       break;
-
-       if (pos->frequency == CPUFREQ_TABLE_END)
-               return -EINVAL;
-
-       return pos - freq_table;
-}
-
-static int exynos_cpufreq_scale(unsigned int target_freq)
-{
-       struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
-       unsigned int *volt_table = exynos_info->volt_table;
-       struct cpufreq_policy *policy = cpufreq_cpu_get(0);
-       unsigned int arm_volt, safe_arm_volt = 0;
-       unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
-       struct device *dev = exynos_info->dev;
-       unsigned int old_freq;
-       int index, old_index;
-       int ret = 0;
-
-       old_freq = policy->cur;
-
-       /*
-        * The policy max have been changed so that we cannot get proper
-        * old_index with cpufreq_frequency_table_target(). Thus, ignore
-        * policy and get the index from the raw frequency table.
-        */
-       old_index = exynos_cpufreq_get_index(old_freq);
-       if (old_index < 0) {
-               ret = old_index;
-               goto out;
-       }
-
-       index = exynos_cpufreq_get_index(target_freq);
-       if (index < 0) {
-               ret = index;
-               goto out;
-       }
-
-       /*
-        * ARM clock source will be changed APLL to MPLL temporary
-        * To support this level, need to control regulator for
-        * required voltage level
-        */
-       if (exynos_info->need_apll_change != NULL) {
-               if (exynos_info->need_apll_change(old_index, index) &&
-                  (freq_table[index].frequency < mpll_freq_khz) &&
-                  (freq_table[old_index].frequency < mpll_freq_khz))
-                       safe_arm_volt = volt_table[exynos_info->pll_safe_idx];
-       }
-       arm_volt = volt_table[index];
-
-       /* When the new frequency is higher than current frequency */
-       if ((target_freq > old_freq) && !safe_arm_volt) {
-               /* Firstly, voltage up to increase frequency */
-               ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
-               if (ret) {
-                       dev_err(dev, "failed to set cpu voltage to %d\n",
-                               arm_volt);
-                       return ret;
-               }
-       }
-
-       if (safe_arm_volt) {
-               ret = regulator_set_voltage(arm_regulator, safe_arm_volt,
-                                     safe_arm_volt);
-               if (ret) {
-                       dev_err(dev, "failed to set cpu voltage to %d\n",
-                               safe_arm_volt);
-                       return ret;
-               }
-       }
-
-       exynos_info->set_freq(old_index, index);
-
-       /* When the new frequency is lower than current frequency */
-       if ((target_freq < old_freq) ||
-          ((target_freq > old_freq) && safe_arm_volt)) {
-               /* down the voltage after frequency change */
-               ret = regulator_set_voltage(arm_regulator, arm_volt,
-                               arm_volt);
-               if (ret) {
-                       dev_err(dev, "failed to set cpu voltage to %d\n",
-                               arm_volt);
-                       goto out;
-               }
-       }
-
-out:
-       cpufreq_cpu_put(policy);
-
-       return ret;
-}
-
-static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
-{
-       return exynos_cpufreq_scale(exynos_info->freq_table[index].frequency);
-}
-
-static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
-       policy->clk = exynos_info->cpu_clk;
-       policy->suspend_freq = locking_frequency;
-       return cpufreq_generic_init(policy, exynos_info->freq_table, 100000);
-}
-
-static struct cpufreq_driver exynos_driver = {
-       .flags          = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
-       .verify         = cpufreq_generic_frequency_table_verify,
-       .target_index   = exynos_target,
-       .get            = cpufreq_generic_get,
-       .init           = exynos_cpufreq_cpu_init,
-       .name           = "exynos_cpufreq",
-       .attr           = cpufreq_generic_attr,
-#ifdef CONFIG_ARM_EXYNOS_CPU_FREQ_BOOST_SW
-       .boost_supported = true,
-#endif
-#ifdef CONFIG_PM
-       .suspend        = cpufreq_generic_suspend,
-#endif
-};
-
-static int exynos_cpufreq_probe(struct platform_device *pdev)
-{
-       struct device_node *cpu0;
-       int ret = -EINVAL;
-
-       exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
-       if (!exynos_info)
-               return -ENOMEM;
-
-       exynos_info->dev = &pdev->dev;
-
-       if (of_machine_is_compatible("samsung,exynos4212")) {
-               exynos_info->type = EXYNOS_SOC_4212;
-               ret = exynos4x12_cpufreq_init(exynos_info);
-       } else if (of_machine_is_compatible("samsung,exynos4412")) {
-               exynos_info->type = EXYNOS_SOC_4412;
-               ret = exynos4x12_cpufreq_init(exynos_info);
-       } else if (of_machine_is_compatible("samsung,exynos5250")) {
-               exynos_info->type = EXYNOS_SOC_5250;
-               ret = exynos5250_cpufreq_init(exynos_info);
-       } else {
-               pr_err("%s: Unknown SoC type\n", __func__);
-               ret = -ENODEV;
-       }
-
-       if (ret)
-               goto err_vdd_arm;
-
-       if (exynos_info->set_freq == NULL) {
-               dev_err(&pdev->dev, "No set_freq function (ERR)\n");
-               ret = -EINVAL;
-               goto err_vdd_arm;
-       }
-
-       arm_regulator = regulator_get(NULL, "vdd_arm");
-       if (IS_ERR(arm_regulator)) {
-               dev_err(&pdev->dev, "failed to get resource vdd_arm\n");
-               ret = -EINVAL;
-               goto err_vdd_arm;
-       }
-
-       /* Done here as we want to capture boot frequency */
-       locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
-
-       ret = cpufreq_register_driver(&exynos_driver);
-       if (ret)
-               goto err_cpufreq_reg;
-
-       cpu0 = of_get_cpu_node(0, NULL);
-       if (!cpu0) {
-               pr_err("failed to find cpu0 node\n");
-               return 0;
-       }
-
-       if (of_find_property(cpu0, "#cooling-cells", NULL)) {
-               cdev = of_cpufreq_cooling_register(cpu0,
-                                                  cpu_present_mask);
-               if (IS_ERR(cdev))
-                       pr_err("running cpufreq without cooling device: %ld\n",
-                              PTR_ERR(cdev));
-       }
-
-       return 0;
-
-err_cpufreq_reg:
-       dev_err(&pdev->dev, "failed to register cpufreq driver\n");
-       regulator_put(arm_regulator);
-err_vdd_arm:
-       kfree(exynos_info);
-       return ret;
-}
-
-static struct platform_driver exynos_cpufreq_platdrv = {
-       .driver = {
-               .name   = "exynos-cpufreq",
-       },
-       .probe = exynos_cpufreq_probe,
-};
-module_platform_driver(exynos_cpufreq_platdrv);
diff --git a/drivers/cpufreq/exynos-cpufreq.h b/drivers/cpufreq/exynos-cpufreq.h
deleted file mode 100644 (file)
index a3855e4..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * EXYNOS - CPUFreq support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-enum cpufreq_level_index {
-       L0, L1, L2, L3, L4,
-       L5, L6, L7, L8, L9,
-       L10, L11, L12, L13, L14,
-       L15, L16, L17, L18, L19,
-       L20,
-};
-
-enum exynos_soc_type {
-       EXYNOS_SOC_4212,
-       EXYNOS_SOC_4412,
-       EXYNOS_SOC_5250,
-};
-
-#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \
-       { \
-               .freq = (f) * 1000, \
-               .clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \
-                       (a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \
-               .clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \
-               .mps = ((m) << 16 | (p) << 8 | (s)), \
-       }
-
-struct apll_freq {
-       unsigned int freq;
-       u32 clk_div_cpu0;
-       u32 clk_div_cpu1;
-       u32 mps;
-};
-
-struct exynos_dvfs_info {
-       enum exynos_soc_type type;
-       struct device   *dev;
-       unsigned long   mpll_freq_khz;
-       unsigned int    pll_safe_idx;
-       struct clk      *cpu_clk;
-       unsigned int    *volt_table;
-       struct cpufreq_frequency_table  *freq_table;
-       void (*set_freq)(unsigned int, unsigned int);
-       bool (*need_apll_change)(unsigned int, unsigned int);
-       void __iomem    *cmu_regs;
-};
-
-#ifdef CONFIG_ARM_EXYNOS4X12_CPUFREQ
-extern int exynos4x12_cpufreq_init(struct exynos_dvfs_info *);
-#else
-static inline int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
-{
-       return -EOPNOTSUPP;
-}
-#endif
-#ifdef CONFIG_ARM_EXYNOS5250_CPUFREQ
-extern int exynos5250_cpufreq_init(struct exynos_dvfs_info *);
-#else
-static inline int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
-{
-       return -EOPNOTSUPP;
-}
-#endif
-
-#define EXYNOS4_CLKSRC_CPU                     0x14200
-#define EXYNOS4_CLKMUX_STATCPU                 0x14400
-
-#define EXYNOS4_CLKDIV_CPU                     0x14500
-#define EXYNOS4_CLKDIV_CPU1                    0x14504
-#define EXYNOS4_CLKDIV_STATCPU                 0x14600
-#define EXYNOS4_CLKDIV_STATCPU1                        0x14604
-
-#define EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT       (16)
-#define EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK    (0x7 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT)
-
-#define EXYNOS5_APLL_LOCK                      0x00000
-#define EXYNOS5_APLL_CON0                      0x00100
-#define EXYNOS5_CLKMUX_STATCPU                 0x00400
-#define EXYNOS5_CLKDIV_CPU0                    0x00500
-#define EXYNOS5_CLKDIV_CPU1                    0x00504
-#define EXYNOS5_CLKDIV_STATCPU0                        0x00600
-#define EXYNOS5_CLKDIV_STATCPU1                        0x00604
diff --git a/drivers/cpufreq/exynos4x12-cpufreq.c b/drivers/cpufreq/exynos4x12-cpufreq.c
deleted file mode 100644 (file)
index 9e78a85..0000000
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * EXYNOS4X12 - CPU frequency scaling support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/cpufreq.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include "exynos-cpufreq.h"
-
-static struct clk *cpu_clk;
-static struct clk *moutcore;
-static struct clk *mout_mpll;
-static struct clk *mout_apll;
-static struct exynos_dvfs_info *cpufreq;
-
-static unsigned int exynos4x12_volt_table[] = {
-       1350000, 1287500, 1250000, 1187500, 1137500, 1087500, 1037500,
-       1000000,  987500,  975000,  950000,  925000,  900000,  900000
-};
-
-static struct cpufreq_frequency_table exynos4x12_freq_table[] = {
-       {CPUFREQ_BOOST_FREQ, L0, 1500 * 1000},
-       {0, L1, 1400 * 1000},
-       {0, L2, 1300 * 1000},
-       {0, L3, 1200 * 1000},
-       {0, L4, 1100 * 1000},
-       {0, L5, 1000 * 1000},
-       {0, L6,  900 * 1000},
-       {0, L7,  800 * 1000},
-       {0, L8,  700 * 1000},
-       {0, L9,  600 * 1000},
-       {0, L10, 500 * 1000},
-       {0, L11, 400 * 1000},
-       {0, L12, 300 * 1000},
-       {0, L13, 200 * 1000},
-       {0, 0, CPUFREQ_TABLE_END},
-};
-
-static struct apll_freq *apll_freq_4x12;
-
-static struct apll_freq apll_freq_4212[] = {
-       /*
-        * values:
-        * freq
-        * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
-        * clock divider for COPY, HPM, RESERVED
-        * PLL M, P, S
-        */
-       APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 250, 4, 0),
-       APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 2, 0, 175, 3, 0),
-       APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 325, 6, 0),
-       APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 2, 0, 200, 4, 0),
-       APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 2, 0, 275, 6, 0),
-       APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 2, 0, 125, 3, 0),
-       APLL_FREQ(900,  0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 150, 4, 0),
-       APLL_FREQ(800,  0, 2, 5, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 0),
-       APLL_FREQ(700,  0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 175, 3, 1),
-       APLL_FREQ(600,  0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 200, 4, 1),
-       APLL_FREQ(500,  0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 125, 3, 1),
-       APLL_FREQ(400,  0, 2, 4, 0, 3, 1, 1, 0, 3, 2, 0, 100, 3, 1),
-       APLL_FREQ(300,  0, 2, 4, 0, 2, 1, 1, 0, 3, 2, 0, 200, 4, 2),
-       APLL_FREQ(200,  0, 1, 3, 0, 1, 1, 1, 0, 3, 2, 0, 100, 3, 2),
-};
-
-static struct apll_freq apll_freq_4412[] = {
-       /*
-        * values:
-        * freq
-        * clock divider for CORE, COREM0, COREM1, PERIPH, ATB, PCLK_DBG, APLL, CORE2
-        * clock divider for COPY, HPM, CORES
-        * PLL M, P, S
-        */
-       APLL_FREQ(1500, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 7, 250, 4, 0),
-       APLL_FREQ(1400, 0, 3, 7, 0, 6, 1, 2, 0, 6, 0, 6, 175, 3, 0),
-       APLL_FREQ(1300, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 6, 325, 6, 0),
-       APLL_FREQ(1200, 0, 3, 7, 0, 5, 1, 2, 0, 5, 0, 5, 200, 4, 0),
-       APLL_FREQ(1100, 0, 3, 6, 0, 4, 1, 2, 0, 4, 0, 5, 275, 6, 0),
-       APLL_FREQ(1000, 0, 2, 5, 0, 4, 1, 1, 0, 4, 0, 4, 125, 3, 0),
-       APLL_FREQ(900,  0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 4, 150, 4, 0),
-       APLL_FREQ(800,  0, 2, 5, 0, 3, 1, 1, 0, 3, 0, 3, 100, 3, 0),
-       APLL_FREQ(700,  0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 3, 175, 3, 1),
-       APLL_FREQ(600,  0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 200, 4, 1),
-       APLL_FREQ(500,  0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 2, 125, 3, 1),
-       APLL_FREQ(400,  0, 2, 4, 0, 3, 1, 1, 0, 3, 0, 1, 100, 3, 1),
-       APLL_FREQ(300,  0, 2, 4, 0, 2, 1, 1, 0, 3, 0, 1, 200, 4, 2),
-       APLL_FREQ(200,  0, 1, 3, 0, 1, 1, 1, 0, 3, 0, 0, 100, 3, 2),
-};
-
-static void exynos4x12_set_clkdiv(unsigned int div_index)
-{
-       unsigned int tmp;
-
-       /* Change Divider - CPU0 */
-
-       tmp = apll_freq_4x12[div_index].clk_div_cpu0;
-
-       __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU);
-
-       while (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU)
-              & 0x11111111)
-               cpu_relax();
-
-       /* Change Divider - CPU1 */
-       tmp = apll_freq_4x12[div_index].clk_div_cpu1;
-
-       __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS4_CLKDIV_CPU1);
-
-       do {
-               cpu_relax();
-               tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKDIV_STATCPU1);
-       } while (tmp != 0x0);
-}
-
-static void exynos4x12_set_apll(unsigned int index)
-{
-       unsigned int tmp, freq = apll_freq_4x12[index].freq;
-
-       /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
-       clk_set_parent(moutcore, mout_mpll);
-
-       do {
-               cpu_relax();
-               tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU)
-                       >> EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT);
-               tmp &= 0x7;
-       } while (tmp != 0x2);
-
-       clk_set_rate(mout_apll, freq * 1000);
-
-       /* MUX_CORE_SEL = APLL */
-       clk_set_parent(moutcore, mout_apll);
-
-       do {
-               cpu_relax();
-               tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS4_CLKMUX_STATCPU);
-               tmp &= EXYNOS4_CLKMUX_STATCPU_MUXCORE_MASK;
-       } while (tmp != (0x1 << EXYNOS4_CLKSRC_CPU_MUXCORE_SHIFT));
-}
-
-static void exynos4x12_set_frequency(unsigned int old_index,
-                                 unsigned int new_index)
-{
-       if (old_index > new_index) {
-               exynos4x12_set_clkdiv(new_index);
-               exynos4x12_set_apll(new_index);
-       } else if (old_index < new_index) {
-               exynos4x12_set_apll(new_index);
-               exynos4x12_set_clkdiv(new_index);
-       }
-}
-
-int exynos4x12_cpufreq_init(struct exynos_dvfs_info *info)
-{
-       struct device_node *np;
-       unsigned long rate;
-
-       /*
-        * HACK: This is a temporary workaround to get access to clock
-        * controller registers directly and remove static mappings and
-        * dependencies on platform headers. It is necessary to enable
-        * Exynos multi-platform support and will be removed together with
-        * this whole driver as soon as Exynos gets migrated to use
-        * cpufreq-dt driver.
-        */
-       np = of_find_compatible_node(NULL, NULL, "samsung,exynos4412-clock");
-       if (!np) {
-               pr_err("%s: failed to find clock controller DT node\n",
-                       __func__);
-               return -ENODEV;
-       }
-
-       info->cmu_regs = of_iomap(np, 0);
-       if (!info->cmu_regs) {
-               pr_err("%s: failed to map CMU registers\n", __func__);
-               return -EFAULT;
-       }
-
-       cpu_clk = clk_get(NULL, "armclk");
-       if (IS_ERR(cpu_clk))
-               return PTR_ERR(cpu_clk);
-
-       moutcore = clk_get(NULL, "moutcore");
-       if (IS_ERR(moutcore))
-               goto err_moutcore;
-
-       mout_mpll = clk_get(NULL, "mout_mpll");
-       if (IS_ERR(mout_mpll))
-               goto err_mout_mpll;
-
-       rate = clk_get_rate(mout_mpll) / 1000;
-
-       mout_apll = clk_get(NULL, "mout_apll");
-       if (IS_ERR(mout_apll))
-               goto err_mout_apll;
-
-       if (info->type == EXYNOS_SOC_4212)
-               apll_freq_4x12 = apll_freq_4212;
-       else
-               apll_freq_4x12 = apll_freq_4412;
-
-       info->mpll_freq_khz = rate;
-       /* 800Mhz */
-       info->pll_safe_idx = L7;
-       info->cpu_clk = cpu_clk;
-       info->volt_table = exynos4x12_volt_table;
-       info->freq_table = exynos4x12_freq_table;
-       info->set_freq = exynos4x12_set_frequency;
-
-       cpufreq = info;
-
-       return 0;
-
-err_mout_apll:
-       clk_put(mout_mpll);
-err_mout_mpll:
-       clk_put(moutcore);
-err_moutcore:
-       clk_put(cpu_clk);
-
-       pr_debug("%s: failed initialization\n", __func__);
-       return -EINVAL;
-}
diff --git a/drivers/cpufreq/exynos5250-cpufreq.c b/drivers/cpufreq/exynos5250-cpufreq.c
deleted file mode 100644 (file)
index 3eafdc7..0000000
+++ /dev/null
@@ -1,210 +0,0 @@
-/*
- * Copyright (c) 2010-20122Samsung Electronics Co., Ltd.
- *             http://www.samsung.com
- *
- * EXYNOS5250 - CPU frequency scaling support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/err.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/slab.h>
-#include <linux/cpufreq.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
-
-#include "exynos-cpufreq.h"
-
-static struct clk *cpu_clk;
-static struct clk *moutcore;
-static struct clk *mout_mpll;
-static struct clk *mout_apll;
-static struct exynos_dvfs_info *cpufreq;
-
-static unsigned int exynos5250_volt_table[] = {
-       1300000, 1250000, 1225000, 1200000, 1150000,
-       1125000, 1100000, 1075000, 1050000, 1025000,
-       1012500, 1000000,  975000,  950000,  937500,
-       925000
-};
-
-static struct cpufreq_frequency_table exynos5250_freq_table[] = {
-       {0, L0, 1700 * 1000},
-       {0, L1, 1600 * 1000},
-       {0, L2, 1500 * 1000},
-       {0, L3, 1400 * 1000},
-       {0, L4, 1300 * 1000},
-       {0, L5, 1200 * 1000},
-       {0, L6, 1100 * 1000},
-       {0, L7, 1000 * 1000},
-       {0, L8,  900 * 1000},
-       {0, L9,  800 * 1000},
-       {0, L10, 700 * 1000},
-       {0, L11, 600 * 1000},
-       {0, L12, 500 * 1000},
-       {0, L13, 400 * 1000},
-       {0, L14, 300 * 1000},
-       {0, L15, 200 * 1000},
-       {0, 0, CPUFREQ_TABLE_END},
-};
-
-static struct apll_freq apll_freq_5250[] = {
-       /*
-        * values:
-        * freq
-        * clock divider for ARM, CPUD, ACP, PERIPH, ATB, PCLK_DBG, APLL, ARM2
-        * clock divider for COPY, HPM, RESERVED
-        * PLL M, P, S
-        */
-       APLL_FREQ(1700, 0, 3, 7, 7, 7, 3, 5, 0, 0, 2, 0, 425, 6, 0),
-       APLL_FREQ(1600, 0, 3, 7, 7, 7, 1, 4, 0, 0, 2, 0, 200, 3, 0),
-       APLL_FREQ(1500, 0, 2, 7, 7, 7, 1, 4, 0, 0, 2, 0, 250, 4, 0),
-       APLL_FREQ(1400, 0, 2, 7, 7, 6, 1, 4, 0, 0, 2, 0, 175, 3, 0),
-       APLL_FREQ(1300, 0, 2, 7, 7, 6, 1, 3, 0, 0, 2, 0, 325, 6, 0),
-       APLL_FREQ(1200, 0, 2, 7, 7, 5, 1, 3, 0, 0, 2, 0, 200, 4, 0),
-       APLL_FREQ(1100, 0, 3, 7, 7, 5, 1, 3, 0, 0, 2, 0, 275, 6, 0),
-       APLL_FREQ(1000, 0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 125, 3, 0),
-       APLL_FREQ(900,  0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 150, 4, 0),
-       APLL_FREQ(800,  0, 1, 7, 7, 4, 1, 2, 0, 0, 2, 0, 100, 3, 0),
-       APLL_FREQ(700,  0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 175, 3, 1),
-       APLL_FREQ(600,  0, 1, 7, 7, 3, 1, 1, 0, 0, 2, 0, 200, 4, 1),
-       APLL_FREQ(500,  0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 125, 3, 1),
-       APLL_FREQ(400,  0, 1, 7, 7, 2, 1, 1, 0, 0, 2, 0, 100, 3, 1),
-       APLL_FREQ(300,  0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 200, 4, 2),
-       APLL_FREQ(200,  0, 1, 7, 7, 1, 1, 1, 0, 0, 2, 0, 100, 3, 2),
-};
-
-static void set_clkdiv(unsigned int div_index)
-{
-       unsigned int tmp;
-
-       /* Change Divider - CPU0 */
-
-       tmp = apll_freq_5250[div_index].clk_div_cpu0;
-
-       __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU0);
-
-       while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU0)
-              & 0x11111111)
-               cpu_relax();
-
-       /* Change Divider - CPU1 */
-       tmp = apll_freq_5250[div_index].clk_div_cpu1;
-
-       __raw_writel(tmp, cpufreq->cmu_regs + EXYNOS5_CLKDIV_CPU1);
-
-       while (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKDIV_STATCPU1) & 0x11)
-               cpu_relax();
-}
-
-static void set_apll(unsigned int index)
-{
-       unsigned int tmp;
-       unsigned int freq = apll_freq_5250[index].freq;
-
-       /* MUX_CORE_SEL = MPLL, ARMCLK uses MPLL for lock time */
-       clk_set_parent(moutcore, mout_mpll);
-
-       do {
-               cpu_relax();
-               tmp = (__raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU)
-                       >> 16);
-               tmp &= 0x7;
-       } while (tmp != 0x2);
-
-       clk_set_rate(mout_apll, freq * 1000);
-
-       /* MUX_CORE_SEL = APLL */
-       clk_set_parent(moutcore, mout_apll);
-
-       do {
-               cpu_relax();
-               tmp = __raw_readl(cpufreq->cmu_regs + EXYNOS5_CLKMUX_STATCPU);
-               tmp &= (0x7 << 16);
-       } while (tmp != (0x1 << 16));
-}
-
-static void exynos5250_set_frequency(unsigned int old_index,
-                                 unsigned int new_index)
-{
-       if (old_index > new_index) {
-               set_clkdiv(new_index);
-               set_apll(new_index);
-       } else if (old_index < new_index) {
-               set_apll(new_index);
-               set_clkdiv(new_index);
-       }
-}
-
-int exynos5250_cpufreq_init(struct exynos_dvfs_info *info)
-{
-       struct device_node *np;
-       unsigned long rate;
-
-       /*
-        * HACK: This is a temporary workaround to get access to clock
-        * controller registers directly and remove static mappings and
-        * dependencies on platform headers. It is necessary to enable
-        * Exynos multi-platform support and will be removed together with
-        * this whole driver as soon as Exynos gets migrated to use
-        * cpufreq-dt driver.
-        */
-       np = of_find_compatible_node(NULL, NULL, "samsung,exynos5250-clock");
-       if (!np) {
-               pr_err("%s: failed to find clock controller DT node\n",
-                       __func__);
-               return -ENODEV;
-       }
-
-       info->cmu_regs = of_iomap(np, 0);
-       if (!info->cmu_regs) {
-               pr_err("%s: failed to map CMU registers\n", __func__);
-               return -EFAULT;
-       }
-
-       cpu_clk = clk_get(NULL, "armclk");
-       if (IS_ERR(cpu_clk))
-               return PTR_ERR(cpu_clk);
-
-       moutcore = clk_get(NULL, "mout_cpu");
-       if (IS_ERR(moutcore))
-               goto err_moutcore;
-
-       mout_mpll = clk_get(NULL, "mout_mpll");
-       if (IS_ERR(mout_mpll))
-               goto err_mout_mpll;
-
-       rate = clk_get_rate(mout_mpll) / 1000;
-
-       mout_apll = clk_get(NULL, "mout_apll");
-       if (IS_ERR(mout_apll))
-               goto err_mout_apll;
-
-       info->mpll_freq_khz = rate;
-       /* 800Mhz */
-       info->pll_safe_idx = L9;
-       info->cpu_clk = cpu_clk;
-       info->volt_table = exynos5250_volt_table;
-       info->freq_table = exynos5250_freq_table;
-       info->set_freq = exynos5250_set_frequency;
-
-       cpufreq = info;
-
-       return 0;
-
-err_mout_apll:
-       clk_put(mout_mpll);
-err_mout_mpll:
-       clk_put(moutcore);
-err_moutcore:
-       clk_put(cpu_clk);
-
-       pr_err("%s: failed initialization\n", __func__);
-       return -EINVAL;
-}
index 95599e478e19cec0ab26b8a309d3714b6be1d6f8..23d0549539d43904299c51810730f5d34955d952 100644 (file)
@@ -232,7 +232,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
        struct xenbus_transaction xbt;
 
        ret = gnttab_grant_foreign_access(dev->otherend_id,
-                                         virt_to_mfn(info->page), 0);
+                                         virt_to_gfn(info->page), 0);
        if (ret < 0)
                return ret;
        info->gref = ret;
@@ -255,7 +255,7 @@ static int xenkbd_connect_backend(struct xenbus_device *dev,
                goto error_irqh;
        }
        ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
-                           virt_to_mfn(info->page));
+                           virt_to_gfn(info->page));
        if (ret)
                goto error_xenbus;
        ret = xenbus_printf(xbt, dev->nodename, "page-gref", "%u", info->gref);
index 1f897ec3c242a977a52128e37f56ef9431fbd548..075a027632b5c88ac73d2d1fd7eed49569f713c3 100644 (file)
@@ -26,7 +26,8 @@ obj-$(CONFIG_MTD_NAND_CS553X)         += cs553x_nand.o
 obj-$(CONFIG_MTD_NAND_NDFC)            += ndfc.o
 obj-$(CONFIG_MTD_NAND_ATMEL)           += atmel_nand.o
 obj-$(CONFIG_MTD_NAND_GPIO)            += gpio.o
-obj-$(CONFIG_MTD_NAND_OMAP2)           += omap2.o
+omap2_nand-objs := omap2.o
+obj-$(CONFIG_MTD_NAND_OMAP2)           += omap2_nand.o
 obj-$(CONFIG_MTD_NAND_OMAP_BCH_BUILD)  += omap_elm.o
 obj-$(CONFIG_MTD_NAND_CM_X270)         += cmx270_nand.o
 obj-$(CONFIG_MTD_NAND_PXA3xx)          += pxa3xx_nand.o
index c27d427fead43456d7afeef20ecc8966ebf6d60a..f59aedfe1462a431018de993543c4abc69131eed 100644 (file)
@@ -586,6 +586,7 @@ static const struct flash_info spi_nor_ids[] = {
        /* Micron */
        { "n25q032",     INFO(0x20ba16, 0, 64 * 1024,   64, SPI_NOR_QUAD_READ) },
        { "n25q064",     INFO(0x20ba17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
+       { "n25q064a",    INFO(0x20bb17, 0, 64 * 1024,  128, SECT_4K | SPI_NOR_QUAD_READ) },
        { "n25q128a11",  INFO(0x20bb18, 0, 64 * 1024,  256, SPI_NOR_QUAD_READ) },
        { "n25q128a13",  INFO(0x20ba18, 0, 64 * 1024,  256, SPI_NOR_QUAD_READ) },
        { "n25q256a",    INFO(0x20ba19, 0, 64 * 1024,  512, SECT_4K | SPI_NOR_QUAD_READ) },
@@ -602,7 +603,7 @@ static const struct flash_info spi_nor_ids[] = {
         * for the chips listed here (without boot sectors).
         */
        { "s25sl032p",  INFO(0x010215, 0x4d00,  64 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
-       { "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128, 0) },
+       { "s25sl064p",  INFO(0x010216, 0x4d00,  64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
        { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) },
        { "s25fl256s1", INFO(0x010219, 0x4d01,  64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
        { "s25fl512s",  INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
@@ -610,8 +611,8 @@ static const struct flash_info spi_nor_ids[] = {
        { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024,  64, 0) },
        { "s25sl12801", INFO(0x012018, 0x0301,  64 * 1024, 256, 0) },
        { "s25fl128s",  INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) },
-       { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64, 0) },
-       { "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, 0) },
+       { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024,  64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
+       { "s25fl129p1", INFO(0x012018, 0x4d01,  64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) },
        { "s25sl004a",  INFO(0x010212,      0,  64 * 1024,   8, 0) },
        { "s25sl008a",  INFO(0x010213,      0,  64 * 1024,  16, 0) },
        { "s25sl016a",  INFO(0x010214,      0,  64 * 1024,  32, 0) },
@@ -633,6 +634,7 @@ static const struct flash_info spi_nor_ids[] = {
        { "sst25wf512",  INFO(0xbf2501, 0, 64 * 1024,  1, SECT_4K | SST_WRITE) },
        { "sst25wf010",  INFO(0xbf2502, 0, 64 * 1024,  2, SECT_4K | SST_WRITE) },
        { "sst25wf020",  INFO(0xbf2503, 0, 64 * 1024,  4, SECT_4K | SST_WRITE) },
+       { "sst25wf020a", INFO(0x621612, 0, 64 * 1024,  4, SECT_4K) },
        { "sst25wf040",  INFO(0xbf2504, 0, 64 * 1024,  8, SECT_4K | SST_WRITE) },
        { "sst25wf080",  INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) },
 
@@ -1216,7 +1218,7 @@ static const struct flash_info *spi_nor_match_id(const char *name)
 {
        const struct flash_info *id = spi_nor_ids;
 
-       while (id->name[0]) {
+       while (id->name) {
                if (!strcmp(name, id->name))
                        return id;
                id++;
index 289e20443d83a3507f2700afc91f395828f45149..9d56515f4c4da8ef307ff68ccb438a5e7742ceea 100644 (file)
@@ -418,7 +418,7 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch  *ds, int port)
        core_writel(priv, port, CORE_FAST_AGE_PORT);
 
        reg = core_readl(priv, CORE_FAST_AGE_CTRL);
-       reg |= EN_AGE_PORT | FAST_AGE_STR_DONE;
+       reg |= EN_AGE_PORT | EN_AGE_DYNAMIC | FAST_AGE_STR_DONE;
        core_writel(priv, reg, CORE_FAST_AGE_CTRL);
 
        do {
@@ -432,6 +432,8 @@ static int bcm_sf2_sw_fast_age_port(struct dsa_switch  *ds, int port)
        if (!timeout)
                return -ETIMEDOUT;
 
+       core_writel(priv, 0, CORE_FAST_AGE_CTRL);
+
        return 0;
 }
 
@@ -507,7 +509,7 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
        u32 reg;
 
        reg = core_readl(priv, CORE_G_PCTL_PORT(port));
-       cur_hw_state = reg >> G_MISTP_STATE_SHIFT;
+       cur_hw_state = reg & (G_MISTP_STATE_MASK << G_MISTP_STATE_SHIFT);
 
        switch (state) {
        case BR_STATE_DISABLED:
@@ -531,10 +533,12 @@ static int bcm_sf2_sw_br_set_stp_state(struct dsa_switch *ds, int port,
        }
 
        /* Fast-age ARL entries if we are moving a port from Learning or
-        * Forwarding state to Disabled, Blocking or Listening state
+        * Forwarding (cur_hw_state) state to Disabled, Blocking or Listening
+        * state (hw_state)
         */
        if (cur_hw_state != hw_state) {
-               if (cur_hw_state & 4 && !(hw_state & 4)) {
+               if (cur_hw_state >= G_MISTP_LEARN_STATE &&
+                   hw_state <= G_MISTP_LISTEN_STATE) {
                        ret = bcm_sf2_sw_fast_age_port(ds, port);
                        if (ret) {
                                pr_err("%s: fast-ageing failed\n", __func__);
index 22e2ebf313332f4dd004162b31faf288d7f7ab25..789d7b7737da4ada78f06850eb835cd437c1040a 100644 (file)
@@ -112,8 +112,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off)  \
        spin_unlock(&priv->indir_lock);                                 \
        return (u64)indir << 32 | dir;                                  \
 }                                                                      \
-static inline void name##_writeq(struct bcm_sf2_priv *priv, u32 off,   \
-                                                       u64 val)        \
+static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val,   \
+                                                       u32 off)        \
 {                                                                      \
        spin_lock(&priv->indir_lock);                                   \
        reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE);       \
index d54b7400e8d820b334a45afab9003e2150732242..c2daaf087761c38c2dcbb6423c742950893a7024 100644 (file)
@@ -117,6 +117,11 @@ struct dsa_switch_driver mv88e6171_switch_driver = {
        .port_join_bridge       = mv88e6xxx_join_bridge,
        .port_leave_bridge      = mv88e6xxx_leave_bridge,
        .port_stp_update        = mv88e6xxx_port_stp_update,
+       .port_pvid_get          = mv88e6xxx_port_pvid_get,
+       .port_pvid_set          = mv88e6xxx_port_pvid_set,
+       .port_vlan_add          = mv88e6xxx_port_vlan_add,
+       .port_vlan_del          = mv88e6xxx_port_vlan_del,
+       .vlan_getnext           = mv88e6xxx_vlan_getnext,
        .port_fdb_add           = mv88e6xxx_port_fdb_add,
        .port_fdb_del           = mv88e6xxx_port_fdb_del,
        .port_fdb_getnext       = mv88e6xxx_port_fdb_getnext,
index da48e66377b5ff42dc497a5ac4685ca1e16a1eb9..fe644823ceaf951139a55fdeeb910058ecad6cd1 100644 (file)
@@ -511,8 +511,7 @@ static int tse_poll(struct napi_struct *napi, int budget)
 
        if (rxcomplete < budget) {
 
-               napi_gro_flush(napi, false);
-               __napi_complete(napi);
+               napi_complete(napi);
 
                netdev_dbg(priv->dev,
                           "NAPI Complete, did %d packets with budget %d\n",
@@ -1518,6 +1517,7 @@ static int altera_tse_probe(struct platform_device *pdev)
        spin_lock_init(&priv->tx_lock);
        spin_lock_init(&priv->rxdma_irq_lock);
 
+       netif_carrier_off(ndev);
        ret = register_netdev(ndev);
        if (ret) {
                dev_err(&pdev->dev, "failed to register TSE net device\n");
index 0660deecc2c9a77f88e4eb9a0dbe02a68d0fec86..f683d97d7614e7414ccf837ead2e2a507b07e4d3 100644 (file)
@@ -818,10 +818,9 @@ static int setup_glist(struct lio *lio)
        INIT_LIST_HEAD(&lio->glist);
 
        for (i = 0; i < lio->tx_qsize; i++) {
-               g = kmalloc(sizeof(*g), GFP_KERNEL);
+               g = kzalloc(sizeof(*g), GFP_KERNEL);
                if (!g)
                        break;
-               memset(g, 0, sizeof(struct octnic_gather));
 
                g->sg_size =
                        ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
index eb22d58743e22a452c3e935694cd45f252a5f26a..f5dcde27e40281d3b7dc8cf05468eec704b98f40 100644 (file)
@@ -4568,28 +4568,23 @@ static void free_some_resources(struct adapter *adapter)
 
 static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
 {
-       int ver, chip;
        u16 device_id;
 
        /* Retrieve adapter's device ID */
        pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
-       ver = device_id >> 12;
-       switch (ver) {
+
+       switch (device_id >> 12) {
        case CHELSIO_T4:
-               chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
-               break;
+               return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
        case CHELSIO_T5:
-               chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
-               break;
+               return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
        case CHELSIO_T6:
-               chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
-               break;
+               return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
        default:
                dev_err(&pdev->dev, "Device %d is not supported\n",
                        device_id);
-               return -EINVAL;
        }
-       return chip;
+       return -EINVAL;
 }
 
 static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -4724,8 +4719,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        err = -ENOMEM;
                        goto out_free_adapter;
                }
-               t4_write_reg(adapter, SGE_STAT_CFG_A,
-                            STATSOURCE_T5_V(7) | STATMODE_V(0));
        }
 
        setup_memwin(adapter);
@@ -4737,6 +4730,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (err)
                goto out_unmap_bar;
 
+       /* configure SGE_STAT_CFG_A to read WC stats */
+       if (!is_t4(adapter->params.chip))
+               t4_write_reg(adapter, SGE_STAT_CFG_A,
+                            STATSOURCE_T5_V(7) | STATMODE_V(0));
+
        for_each_port(adapter, i) {
                struct net_device *netdev;
 
index 78f446c58422ecd0ec0794f1f41ba403e6292290..9162746d7729559fff5c33315da186f9fe0244d5 100644 (file)
@@ -807,7 +807,7 @@ static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
         * message or, if we're doing a Large Send Offload, an LSO CPL message
         * with an embedded TX Packet Write CPL message.
         */
-       flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
+       flits = sgl_len(skb_shinfo(skb)->nr_frags + 1);
        if (skb_shinfo(skb)->gso_size)
                flits += (sizeof(struct fw_eth_tx_pkt_wr) +
                          sizeof(struct cpl_tx_pkt_lso_core) +
index ab4674684acc27fb18f7c9b6fb114eb9dfa2b0d0..a32de30ea663b396833305cc6a51339c7f7c05c8 100644 (file)
@@ -762,8 +762,6 @@ enum fw_ldst_func_mod_index {
 
 struct fw_ldst_cmd {
        __be32 op_to_addrspace;
-#define FW_LDST_CMD_ADDRSPACE_S                0
-#define FW_LDST_CMD_ADDRSPACE_V(x)     ((x) << FW_LDST_CMD_ADDRSPACE_S)
        __be32 cycles_to_len16;
        union fw_ldst {
                struct fw_ldst_addrval {
@@ -788,6 +786,13 @@ struct fw_ldst_cmd {
                        __be16 vctl;
                        __be16 rval;
                } mdio;
+               struct fw_ldst_cim_rq {
+                       u8 req_first64[8];
+                       u8 req_second64[8];
+                       u8 resp_first64[8];
+                       u8 resp_second64[8];
+                       __be32 r3[2];
+               } cim_rq;
                union fw_ldst_mps {
                        struct fw_ldst_mps_rplc {
                                __be16 fid_idx;
@@ -828,9 +833,33 @@ struct fw_ldst_cmd {
                        __be16 nset_pkd;
                        __be32 data[12];
                } pcie;
+               struct fw_ldst_i2c_deprecated {
+                       u8 pid_pkd;
+                       u8 base;
+                       u8 boffset;
+                       u8 data;
+                       __be32 r9;
+               } i2c_deprecated;
+               struct fw_ldst_i2c {
+                       u8 pid;
+                       u8 did;
+                       u8 boffset;
+                       u8 blen;
+                       __be32 r9;
+                       __u8   data[48];
+               } i2c;
+               struct fw_ldst_le {
+                       __be32 index;
+                       __be32 r9;
+                       u8 val[33];
+                       u8 r11[7];
+               } le;
        } u;
 };
 
+#define FW_LDST_CMD_ADDRSPACE_S                0
+#define FW_LDST_CMD_ADDRSPACE_V(x)     ((x) << FW_LDST_CMD_ADDRSPACE_S)
+
 #define FW_LDST_CMD_MSG_S       31
 #define FW_LDST_CMD_MSG_V(x)   ((x) << FW_LDST_CMD_MSG_S)
 
index 92bafa793de6ce489f60fba98bd6c069d3a9f957..c4b262ca7d43623fba1cef20dfc6c03ebeadfd7f 100644 (file)
@@ -36,8 +36,8 @@
 #define __T4FW_VERSION_H__
 
 #define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0D
-#define T4FW_VERSION_MICRO 0x20
+#define T4FW_VERSION_MINOR 0x0E
+#define T4FW_VERSION_MICRO 0x04
 #define T4FW_VERSION_BUILD 0x00
 
 #define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
 #define T4FW_MIN_VERSION_MICRO 0x00
 
 #define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0D
-#define T5FW_VERSION_MICRO 0x20
+#define T5FW_VERSION_MINOR 0x0E
+#define T5FW_VERSION_MICRO 0x04
 #define T5FW_VERSION_BUILD 0x00
 
 #define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
 #define T5FW_MIN_VERSION_MICRO 0x00
 
 #define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0D
-#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_MINOR 0x0E
+#define T6FW_VERSION_MICRO 0x04
 #define T6FW_VERSION_BUILD 0x00
 
 #define T6FW_MIN_VERSION_MAJOR 0x00
index c0a7813603c3d1c7f9df5bd783c395a61d83b54b..cf94b72dbacd942b9c56d2bc669854bea7c2ac18 100644 (file)
@@ -1226,7 +1226,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
        if (int_status & ISR_PRS)
                dm9000_rx(dev);
 
-       /* Trnasmit Interrupt check */
+       /* Transmit Interrupt check */
        if (int_status & ISR_PTS)
                dm9000_tx_done(dev, db);
 
index 3be1fbdcdd0215cbd6589001b3a11c2091d309c2..eb323913cd39fb981a8c0cc02140c0c7205ee4f8 100644 (file)
@@ -1968,7 +1968,7 @@ static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
                        memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
        }
 
-       status = be_mcc_notify(adapter);
+       status = be_mcc_notify_wait(adapter);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
index 442410cd2ca4b11baaa40039085bbe937344eec5..a2c96fd883938b6d15aa1335e4169b290f6bf241 100644 (file)
@@ -1132,10 +1132,6 @@ static int ethoc_probe(struct platform_device *pdev)
                memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
                priv->phy_id = pdata->phy_id;
        } else {
-               priv->phy_id = -1;
-
-#ifdef CONFIG_OF
-               {
                const uint8_t *mac;
 
                mac = of_get_property(pdev->dev.of_node,
@@ -1143,8 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
                                      NULL);
                if (mac)
                        memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
-               }
-#endif
+               priv->phy_id = -1;
        }
 
        /* Check that the given MAC address is valid. If it isn't, read the
index 91925e38705eb2c8c6c03ea41587d9801a416479..dd4ca39d5d8f62cf96190576224799d00d17b2ff 100644 (file)
@@ -1816,11 +1816,13 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
        struct fec_enet_private *fep = bus->priv;
        struct device *dev = &fep->pdev->dev;
        unsigned long time_left;
-       int ret = 0;
+       int ret;
 
        ret = pm_runtime_get_sync(dev);
        if (ret < 0)
                return ret;
+       else
+               ret = 0;
 
        fep->mii_timeout = 0;
        reinit_completion(&fep->mdio_done);
@@ -3029,6 +3031,14 @@ fec_set_mac_address(struct net_device *ndev, void *p)
                memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
        }
 
+       /* Add netif status check here to avoid system hang in below case:
+        * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
+        * After ethx down, fec all clocks are gated off and then register
+        * access causes system hang.
+        */
+       if (!netif_running(ndev))
+               return 0;
+
        writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
                (ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
                fep->hwp + FEC_ADDR_LOW);
index 6e9a792097d315891af8c36812e38fce15e71640..060dd39229747c4b1f43e4f9f374777bea4d95ec 100644 (file)
@@ -583,7 +583,7 @@ jme_setup_tx_resources(struct jme_adapter *jme)
        atomic_set(&txring->next_to_clean, 0);
        atomic_set(&txring->nr_free, jme->tx_ring_size);
 
-       txring->bufinf          = kmalloc(sizeof(struct jme_buffer_info) *
+       txring->bufinf          = kzalloc(sizeof(struct jme_buffer_info) *
                                        jme->tx_ring_size, GFP_ATOMIC);
        if (unlikely(!(txring->bufinf)))
                goto err_free_txring;
@@ -592,8 +592,6 @@ jme_setup_tx_resources(struct jme_adapter *jme)
         * Initialize Transmit Descriptors
         */
        memset(txring->alloc, 0, TX_RING_ALLOC_SIZE(jme->tx_ring_size));
-       memset(txring->bufinf, 0,
-               sizeof(struct jme_buffer_info) * jme->tx_ring_size);
 
        return 0;
 
@@ -845,7 +843,7 @@ jme_setup_rx_resources(struct jme_adapter *jme)
        rxring->next_to_use     = 0;
        atomic_set(&rxring->next_to_clean, 0);
 
-       rxring->bufinf          = kmalloc(sizeof(struct jme_buffer_info) *
+       rxring->bufinf          = kzalloc(sizeof(struct jme_buffer_info) *
                                        jme->rx_ring_size, GFP_ATOMIC);
        if (unlikely(!(rxring->bufinf)))
                goto err_free_rxring;
@@ -853,8 +851,6 @@ jme_setup_rx_resources(struct jme_adapter *jme)
        /*
         * Initiallize Receive Descriptors
         */
-       memset(rxring->bufinf, 0,
-               sizeof(struct jme_buffer_info) * jme->rx_ring_size);
        for (i = 0 ; i < jme->rx_ring_size ; ++i) {
                if (unlikely(jme_make_new_rx_buf(jme, i))) {
                        jme_free_rx_resources(jme);
index d52639bc491f7a1aa76f8bd127df8b9e68be0332..960169efe636a659241f7e8fae10706fd0503939 100644 (file)
@@ -1859,14 +1859,11 @@ oom:
                return;
        }
 
-       mc_spec = kmalloc(0x200, GFP_ATOMIC);
+       mc_spec = kzalloc(0x200, GFP_ATOMIC);
        if (mc_spec == NULL)
                goto oom;
        mc_other = mc_spec + (0x100 >> 2);
 
-       memset(mc_spec, 0, 0x100);
-       memset(mc_other, 0, 0x100);
-
        netdev_for_each_mc_addr(ha, dev) {
                u8 *a = ha->addr;
                u32 *table;
index 5ab3adf88166c5d2ef18c64824e078b756b41060..9f0bdd993955cab628d676d079ceb0d6bd37e805 100644 (file)
@@ -918,8 +918,6 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
                                mbx->req.arg = NULL;
                                return -ENOMEM;
                        }
-                       memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
-                       memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
                        temp = adapter->ahw->fw_hal_version << 29;
                        mbx->req.arg[0] = (type | (mbx->req.num << 16) | temp);
                        mbx->cmd_op = type;
index 6e6f18fc5d7698b08be53682ebafbd2ff1ce614c..a5f422f26cb4396a8dc25b65557c83793e956d94 100644 (file)
@@ -73,8 +73,6 @@ int qlcnic_82xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
                                mbx->req.arg = NULL;
                                return -ENOMEM;
                        }
-                       memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
-                       memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
                        mbx->req.arg[0] = type;
                        break;
                }
index 546cd5f1c85aeba1d2d74f01986995858f443206..7327b729ba2eae4119efff54ae2a199a77b11da3 100644 (file)
@@ -729,8 +729,6 @@ static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
                                mbx->req.arg = NULL;
                                return -ENOMEM;
                        }
-                       memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
-                       memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
                        mbx->req.arg[0] = (type | (mbx->req.num << 16) |
                                           (3 << 29));
                        mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
index 24dcbe62412a10a457bc5fa7ea8ce0ed1f016221..2b32e0c5a0b46bcdb4e50d1931c676f2f65503d0 100644 (file)
@@ -833,7 +833,8 @@ struct rtl8169_private {
        unsigned features;
 
        struct mii_if_info mii;
-       struct rtl8169_counters counters;
+       dma_addr_t counters_phys_addr;
+       struct rtl8169_counters *counters;
        struct rtl8169_tc_offsets tc_offset;
        u32 saved_wolopts;
        u32 opts1_mask;
@@ -2190,53 +2191,37 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
        }
 }
 
-static struct rtl8169_counters *rtl8169_map_counters(struct net_device *dev,
-                                                    dma_addr_t *paddr,
-                                                    u32 counter_cmd)
+DECLARE_RTL_COND(rtl_counters_cond)
 {
-       struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
-       struct device *d = &tp->pci_dev->dev;
-       struct rtl8169_counters *counters;
-       u32 cmd;
 
-       counters = dma_alloc_coherent(d, sizeof(*counters), paddr, GFP_KERNEL);
-       if (counters) {
-               RTL_W32(CounterAddrHigh, (u64)*paddr >> 32);
-               cmd = (u64)*paddr & DMA_BIT_MASK(32);
-               RTL_W32(CounterAddrLow, cmd);
-               RTL_W32(CounterAddrLow, cmd | counter_cmd);
-       }
-       return counters;
+       return RTL_R32(CounterAddrLow) & (CounterReset | CounterDump);
 }
 
-static void rtl8169_unmap_counters (struct net_device *dev,
-                                   dma_addr_t paddr,
-                                   struct rtl8169_counters *counters)
+static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
-       struct device *d = &tp->pci_dev->dev;
+       dma_addr_t paddr = tp->counters_phys_addr;
+       u32 cmd;
+       bool ret;
 
-       RTL_W32(CounterAddrLow, 0);
-       RTL_W32(CounterAddrHigh, 0);
+       RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
+       cmd = (u64)paddr & DMA_BIT_MASK(32);
+       RTL_W32(CounterAddrLow, cmd);
+       RTL_W32(CounterAddrLow, cmd | counter_cmd);
 
-       dma_free_coherent(d, sizeof(*counters), counters, paddr);
-}
+       ret = rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
 
-DECLARE_RTL_COND(rtl_reset_counters_cond)
-{
-       void __iomem *ioaddr = tp->mmio_addr;
+       RTL_W32(CounterAddrLow, 0);
+       RTL_W32(CounterAddrHigh, 0);
 
-       return RTL_R32(CounterAddrLow) & CounterReset;
+       return ret;
 }
 
 static bool rtl8169_reset_counters(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
-       struct rtl8169_counters *counters;
-       dma_addr_t paddr;
-       bool ret = true;
 
        /*
         * Versions prior to RTL_GIGA_MAC_VER_19 don't support resetting the
@@ -2245,32 +2230,13 @@ static bool rtl8169_reset_counters(struct net_device *dev)
        if (tp->mac_version < RTL_GIGA_MAC_VER_19)
                return true;
 
-       counters = rtl8169_map_counters(dev, &paddr, CounterReset);
-       if (!counters)
-               return false;
-
-       if (!rtl_udelay_loop_wait_low(tp, &rtl_reset_counters_cond, 10, 1000))
-               ret = false;
-
-       rtl8169_unmap_counters(dev, paddr, counters);
-
-       return ret;
-}
-
-DECLARE_RTL_COND(rtl_counters_cond)
-{
-       void __iomem *ioaddr = tp->mmio_addr;
-
-       return RTL_R32(CounterAddrLow) & CounterDump;
+       return rtl8169_do_counters(dev, CounterReset);
 }
 
 static bool rtl8169_update_counters(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
-       struct rtl8169_counters *counters;
-       dma_addr_t paddr;
-       bool ret = true;
 
        /*
         * Some chips are unable to dump tally counters when the receiver
@@ -2279,23 +2245,13 @@ static bool rtl8169_update_counters(struct net_device *dev)
        if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
                return true;
 
-       counters = rtl8169_map_counters(dev, &paddr, CounterDump);
-       if (!counters)
-               return false;
-
-       if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
-               memcpy(&tp->counters, counters, sizeof(*counters));
-       else
-               ret = false;
-
-       rtl8169_unmap_counters(dev, paddr, counters);
-
-       return ret;
+       return rtl8169_do_counters(dev, CounterDump);
 }
 
 static bool rtl8169_init_counter_offsets(struct net_device *dev)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
+       struct rtl8169_counters *counters = tp->counters;
        bool ret = false;
 
        /*
@@ -2323,9 +2279,9 @@ static bool rtl8169_init_counter_offsets(struct net_device *dev)
        if (rtl8169_update_counters(dev))
                ret = true;
 
-       tp->tc_offset.tx_errors = tp->counters.tx_errors;
-       tp->tc_offset.tx_multi_collision = tp->counters.tx_multi_collision;
-       tp->tc_offset.tx_aborted = tp->counters.tx_aborted;
+       tp->tc_offset.tx_errors = counters->tx_errors;
+       tp->tc_offset.tx_multi_collision = counters->tx_multi_collision;
+       tp->tc_offset.tx_aborted = counters->tx_aborted;
        tp->tc_offset.inited = true;
 
        return ret;
@@ -2335,24 +2291,25 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
                                      struct ethtool_stats *stats, u64 *data)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
+       struct rtl8169_counters *counters = tp->counters;
 
        ASSERT_RTNL();
 
        rtl8169_update_counters(dev);
 
-       data[0] = le64_to_cpu(tp->counters.tx_packets);
-       data[1] = le64_to_cpu(tp->counters.rx_packets);
-       data[2] = le64_to_cpu(tp->counters.tx_errors);
-       data[3] = le32_to_cpu(tp->counters.rx_errors);
-       data[4] = le16_to_cpu(tp->counters.rx_missed);
-       data[5] = le16_to_cpu(tp->counters.align_errors);
-       data[6] = le32_to_cpu(tp->counters.tx_one_collision);
-       data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
-       data[8] = le64_to_cpu(tp->counters.rx_unicast);
-       data[9] = le64_to_cpu(tp->counters.rx_broadcast);
-       data[10] = le32_to_cpu(tp->counters.rx_multicast);
-       data[11] = le16_to_cpu(tp->counters.tx_aborted);
-       data[12] = le16_to_cpu(tp->counters.tx_underun);
+       data[0] = le64_to_cpu(counters->tx_packets);
+       data[1] = le64_to_cpu(counters->rx_packets);
+       data[2] = le64_to_cpu(counters->tx_errors);
+       data[3] = le32_to_cpu(counters->rx_errors);
+       data[4] = le16_to_cpu(counters->rx_missed);
+       data[5] = le16_to_cpu(counters->align_errors);
+       data[6] = le32_to_cpu(counters->tx_one_collision);
+       data[7] = le32_to_cpu(counters->tx_multi_collision);
+       data[8] = le64_to_cpu(counters->rx_unicast);
+       data[9] = le64_to_cpu(counters->rx_broadcast);
+       data[10] = le32_to_cpu(counters->rx_multicast);
+       data[11] = le16_to_cpu(counters->tx_aborted);
+       data[12] = le16_to_cpu(counters->tx_underun);
 }
 
 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -7780,6 +7737,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
+       struct rtl8169_counters *counters = tp->counters;
        unsigned int start;
 
        if (netif_running(dev))
@@ -7816,11 +7774,11 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
         * Subtract values fetched during initalization.
         * See rtl8169_init_counter_offsets for a description why we do that.
         */
-       stats->tx_errors = le64_to_cpu(tp->counters.tx_errors) -
+       stats->tx_errors = le64_to_cpu(counters->tx_errors) -
                le64_to_cpu(tp->tc_offset.tx_errors);
-       stats->collisions = le32_to_cpu(tp->counters.tx_multi_collision) -
+       stats->collisions = le32_to_cpu(counters->tx_multi_collision) -
                le32_to_cpu(tp->tc_offset.tx_multi_collision);
-       stats->tx_aborted_errors = le16_to_cpu(tp->counters.tx_aborted) -
+       stats->tx_aborted_errors = le16_to_cpu(counters->tx_aborted) -
                le16_to_cpu(tp->tc_offset.tx_aborted);
 
        return stats;
@@ -8022,6 +7980,9 @@ static void rtl_remove_one(struct pci_dev *pdev)
 
        unregister_netdev(dev);
 
+       dma_free_coherent(&tp->pci_dev->dev, sizeof(*tp->counters),
+                         tp->counters, tp->counters_phys_addr);
+
        rtl_release_firmware(tp);
 
        if (pci_dev_run_wake(pdev))
@@ -8447,9 +8408,16 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
 
+       tp->counters = dma_alloc_coherent (&pdev->dev, sizeof(*tp->counters),
+                                          &tp->counters_phys_addr, GFP_KERNEL);
+       if (!tp->counters) {
+               rc = -ENOMEM;
+               goto err_out_msi_4;
+       }
+
        rc = register_netdev(dev);
        if (rc < 0)
-               goto err_out_msi_4;
+               goto err_out_cnt_5;
 
        pci_set_drvdata(pdev, dev);
 
@@ -8483,6 +8451,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 out:
        return rc;
 
+err_out_cnt_5:
+       dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
+                         tp->counters_phys_addr);
 err_out_msi_4:
        netif_napi_del(&tp->napi);
        rtl_disable_msi(pdev, tp);
index 864b476f7fd5a33b81ac2f6ea9b08e0cf99d299d..925f2f8659b8f181fc9a447329180cbc4a12a5e9 100644 (file)
@@ -837,8 +837,11 @@ static int stmmac_init_phy(struct net_device *dev)
                                     interface);
        }
 
-       if (IS_ERR(phydev)) {
+       if (IS_ERR_OR_NULL(phydev)) {
                pr_err("%s: Could not attach to PHY\n", dev->name);
+               if (!phydev)
+                       return -ENODEV;
+
                return PTR_ERR(phydev);
        }
 
index a8f315106742d9b1f5c06fbfaa90520ffa8323ab..8276ee5a7d541b2e27bb1e9a70ee74532e8a155e 100644 (file)
@@ -20,7 +20,7 @@ config SYNOPSYS_DWC_ETH_QOS
        select PHYLIB
        select CRC32
        select MII
-       depends on OF
+       depends on OF && HAS_DMA
        ---help---
          This driver supports the DWC Ethernet QoS from Synopsys
 
index c07030dbe7484b50e1f49c9dff6a35ba6cf0fc95..c5ad98ace5d0abeff5d6ef8f3f028e537b241a8e 100644 (file)
@@ -127,6 +127,11 @@ config DP83867_PHY
        ---help---
          Currently supports the DP83867 PHY.
 
+config MICROCHIP_PHY
+       tristate "Drivers for Microchip PHYs"
+       help
+         Supports the LAN88XX PHYs.
+
 config FIXED_PHY
        tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
        depends on PHYLIB
index 9bb103358c74d2c87054c08f0c35d1d83609a43a..87f079c4b2c7ab16e5577b0b86fdd8509f9b7c8f 100644 (file)
@@ -37,3 +37,4 @@ obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o
 obj-$(CONFIG_MDIO_SUN4I)       += mdio-sun4i.o
 obj-$(CONFIG_MDIO_MOXART)      += mdio-moxart.o
 obj-$(CONFIG_MDIO_BCM_UNIMAC)  += mdio-bcm-unimac.o
+obj-$(CONFIG_MICROCHIP_PHY)    += microchip.o
index 12c7eb2c604e15021b2f5fb78120004708fb22e8..fb1299c6326ec1f388d5c544e4607645274cbb08 100644 (file)
@@ -325,7 +325,7 @@ struct phy_device *fixed_phy_register(unsigned int irq,
        phy_addr = phy_fixed_addr++;
        spin_unlock(&phy_fixed_addr_lock);
 
-       ret = fixed_phy_add(PHY_POLL, phy_addr, status, link_gpio);
+       ret = fixed_phy_add(irq, phy_addr, status, link_gpio);
        if (ret < 0)
                return ERR_PTR(ret);
 
diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c
new file mode 100644 (file)
index 0000000..c0a20eb
--- /dev/null
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/microchipphy.h>
+
+#define DRIVER_AUTHOR  "WOOJUNG HUH <woojung.huh@microchip.com>"
+#define DRIVER_DESC    "Microchip LAN88XX PHY driver"
+
+struct lan88xx_priv {
+       int     chip_id;
+       int     chip_rev;
+       __u32   wolopts;
+};
+
+static int lan88xx_phy_config_intr(struct phy_device *phydev)
+{
+       int rc;
+
+       if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+               /* unmask all source and clear them before enable */
+               rc = phy_write(phydev, LAN88XX_INT_MASK, 0x7FFF);
+               rc = phy_read(phydev, LAN88XX_INT_STS);
+               rc = phy_write(phydev, LAN88XX_INT_MASK,
+                              LAN88XX_INT_MASK_MDINTPIN_EN_ |
+                              LAN88XX_INT_MASK_LINK_CHANGE_);
+       } else {
+               rc = phy_write(phydev, LAN88XX_INT_MASK, 0);
+       }
+
+       return rc < 0 ? rc : 0;
+}
+
+static int lan88xx_phy_ack_interrupt(struct phy_device *phydev)
+{
+       int rc = phy_read(phydev, LAN88XX_INT_STS);
+
+       return rc < 0 ? rc : 0;
+}
+
+int lan88xx_suspend(struct phy_device *phydev)
+{
+       struct lan88xx_priv *priv = phydev->priv;
+
+       /* do not power down PHY when WOL is enabled */
+       if (!priv->wolopts)
+               genphy_suspend(phydev);
+
+       return 0;
+}
+
+static int lan88xx_probe(struct phy_device *phydev)
+{
+       struct device *dev = &phydev->dev;
+       struct lan88xx_priv *priv;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->wolopts = 0;
+
+       /* these values can be used to identify internal PHY */
+       priv->chip_id = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_ID,
+                                             3, phydev->addr);
+       priv->chip_rev = phy_read_mmd_indirect(phydev, LAN88XX_MMD3_CHIP_REV,
+                                              3, phydev->addr);
+
+       phydev->priv = priv;
+
+       return 0;
+}
+
+static void lan88xx_remove(struct phy_device *phydev)
+{
+       struct device *dev = &phydev->dev;
+       struct lan88xx_priv *priv = phydev->priv;
+
+       if (priv)
+               devm_kfree(dev, priv);
+}
+
+static int lan88xx_set_wol(struct phy_device *phydev,
+                          struct ethtool_wolinfo *wol)
+{
+       struct lan88xx_priv *priv = phydev->priv;
+
+       priv->wolopts = wol->wolopts;
+
+       return 0;
+}
+
+static struct phy_driver microchip_phy_driver[] = {
+{
+       .phy_id         = 0x0007c130,
+       .phy_id_mask    = 0xfffffff0,
+       .name           = "Microchip LAN88xx",
+
+       .features       = (PHY_GBIT_FEATURES |
+                          SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+       .flags          = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+
+       .probe          = lan88xx_probe,
+       .remove         = lan88xx_remove,
+
+       .config_init    = genphy_config_init,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+
+       .ack_interrupt  = lan88xx_phy_ack_interrupt,
+       .config_intr    = lan88xx_phy_config_intr,
+
+       .suspend        = lan88xx_suspend,
+       .resume         = genphy_resume,
+       .set_wol        = lan88xx_set_wol,
+
+       .driver         = { .owner = THIS_MODULE, }
+} };
+
+module_phy_driver(microchip_phy_driver);
+
+static struct mdio_device_id __maybe_unused microchip_tbl[] = {
+       { 0x0007c130, 0xfffffff0 },
+       { }
+};
+
+MODULE_DEVICE_TABLE(mdio, microchip_tbl);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
index 39364a45af4043880fe3d195bb381fcfb8858e74..a39518fc93aadf82918fd944dcf88c6de5321f48 100644 (file)
@@ -1049,8 +1049,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
 {
        struct mii_if_info *mii = &dev->mii;
        struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
-       u16 ladv, radv;
-       int ret;
+       int ladv, radv, ret;
        u32 buf;
 
        /* clear PHY interrupt status */
@@ -1104,12 +1103,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
                }
 
                ladv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
-               if (unlikely(ladv < 0))
-                       return -EIO;
+               if (ladv < 0)
+                       return ladv;
 
                radv = lan78xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
-               if (unlikely(radv < 0))
-                       return -EIO;
+               if (radv < 0)
+                       return radv;
 
                netif_dbg(dev, link, dev->net,
                          "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
index fe4ec324aebc0284f3a72849dcfc6cc9187196a8..d9427ca3dba79628f402867b83e735bee78fac3b 100644 (file)
 #include <linux/mdio.h>
 #include <linux/usb/cdc.h>
 
-/* Version Information */
-#define DRIVER_VERSION "v1.08.1 (2015/07/28)"
+/* Information for net-next */
+#define NETNEXT_VERSION                "08"
+
+/* Information for net */
+#define NET_VERSION            "2"
+
+#define DRIVER_VERSION         "v1." NETNEXT_VERSION "." NET_VERSION
 #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
 #define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
 #define MODULENAME "r8152"
 #define OCP_EEE_ABLE           0xa5c4
 #define OCP_EEE_ADV            0xa5d0
 #define OCP_EEE_LPABLE         0xa5d2
+#define OCP_PHY_STATE          0xa708          /* nway state for 8153 */
 #define OCP_ADC_CFG            0xbc06
 
 /* SRAM Register */
 /* OCP_DOWN_SPEED */
 #define EN_10M_BGOFF           0x0080
 
+/* OCP_PHY_STATE */
+#define TXDIS_STATE            0x01
+#define ABD_STATE              0x02
+
 /* OCP_ADC_CFG */
 #define CKADSEL_L              0x0100
 #define ADC_EN                 0x0080
@@ -604,6 +614,7 @@ struct r8152 {
                void (*unload)(struct r8152 *);
                int (*eee_get)(struct r8152 *, struct ethtool_eee *);
                int (*eee_set)(struct r8152 *, struct ethtool_eee *);
+               bool (*in_nway)(struct r8152 *);
        } rtl_ops;
 
        int intr_interval;
@@ -2941,6 +2952,32 @@ static void rtl8153_down(struct r8152 *tp)
        r8153_enable_aldps(tp);
 }
 
+static bool rtl8152_in_nway(struct r8152 *tp)
+{
+       u16 nway_state;
+
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_OCP_GPHY_BASE, 0x2000);
+       tp->ocp_base = 0x2000;
+       ocp_write_byte(tp, MCU_TYPE_PLA, 0xb014, 0x4c);         /* phy state */
+       nway_state = ocp_read_word(tp, MCU_TYPE_PLA, 0xb01a);
+
+       /* bit 15: TXDIS_STATE, bit 14: ABD_STATE */
+       if (nway_state & 0xc000)
+               return false;
+       else
+               return true;
+}
+
+static bool rtl8153_in_nway(struct r8152 *tp)
+{
+       u16 phy_state = ocp_reg_read(tp, OCP_PHY_STATE) & 0xff;
+
+       if (phy_state == TXDIS_STATE || phy_state == ABD_STATE)
+               return false;
+       else
+               return true;
+}
+
 static void set_carrier(struct r8152 *tp)
 {
        struct net_device *netdev = tp->netdev;
@@ -3405,6 +3442,27 @@ static int rtl8152_post_reset(struct usb_interface *intf)
        return 0;
 }
 
+static bool delay_autosuspend(struct r8152 *tp)
+{
+       bool sw_linking = !!netif_carrier_ok(tp->netdev);
+       bool hw_linking = !!(rtl8152_get_speed(tp) & LINK_STATUS);
+
+       /* This means a linking change occurs and the driver doesn't detect it,
+        * yet. If the driver has disabled tx/rx and hw is linking on, the
+        * device wouldn't wake up by receiving any packet.
+        */
+       if (work_busy(&tp->schedule.work) || sw_linking != hw_linking)
+               return true;
+
+       /* If the linking down is occurred by nway, the device may miss the
+        * linking change event. And it wouldn't wake when linking on.
+        */
+       if (!sw_linking && tp->rtl_ops.in_nway(tp))
+               return true;
+       else
+               return false;
+}
+
 static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
 {
        struct r8152 *tp = usb_get_intfdata(intf);
@@ -3414,7 +3472,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
        mutex_lock(&tp->control);
 
        if (PMSG_IS_AUTO(message)) {
-               if (netif_running(netdev) && work_busy(&tp->schedule.work)) {
+               if (netif_running(netdev) && delay_autosuspend(tp)) {
                        ret = -EBUSY;
                        goto out1;
                }
@@ -4044,6 +4102,7 @@ static int rtl_ops_init(struct r8152 *tp)
                ops->unload             = rtl8152_unload;
                ops->eee_get            = r8152_get_eee;
                ops->eee_set            = r8152_set_eee;
+               ops->in_nway            = rtl8152_in_nway;
                break;
 
        case RTL_VER_03:
@@ -4058,6 +4117,7 @@ static int rtl_ops_init(struct r8152 *tp)
                ops->unload             = rtl8153_unload;
                ops->eee_get            = r8153_get_eee;
                ops->eee_set            = r8153_set_eee;
+               ops->in_nway            = rtl8153_in_nway;
                break;
 
        default:
index e0498571ae267c1d74dc3a65ed6d59c54cb92d1a..b4cf10781348effecf9ca1f6896c0aeab852bba2 100644 (file)
@@ -428,12 +428,18 @@ static enum skb_state defer_bh(struct usbnet *dev, struct sk_buff *skb,
        old_state = entry->state;
        entry->state = state;
        __skb_unlink(skb, list);
-       spin_unlock(&list->lock);
-       spin_lock(&dev->done.lock);
+
+       /* defer_bh() is never called with list == &dev->done.
+        * spin_lock_nested() tells lockdep that it is OK to take
+        * dev->done.lock here with list->lock held.
+        */
+       spin_lock_nested(&dev->done.lock, SINGLE_DEPTH_NESTING);
+
        __skb_queue_tail(&dev->done, skb);
        if (dev->done.qlen == 1)
                tasklet_schedule(&dev->bh);
-       spin_unlock_irqrestore(&dev->done.lock, flags);
+       spin_unlock(&dev->done.lock);
+       spin_unlock_irqrestore(&list->lock, flags);
        return old_state;
 }
 
@@ -749,6 +755,20 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
 
 /*-------------------------------------------------------------------------*/
 
+static void wait_skb_queue_empty(struct sk_buff_head *q)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&q->lock, flags);
+       while (!skb_queue_empty(q)) {
+               spin_unlock_irqrestore(&q->lock, flags);
+               schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+               set_current_state(TASK_UNINTERRUPTIBLE);
+               spin_lock_irqsave(&q->lock, flags);
+       }
+       spin_unlock_irqrestore(&q->lock, flags);
+}
+
 // precondition: never called in_interrupt
 static void usbnet_terminate_urbs(struct usbnet *dev)
 {
@@ -762,14 +782,11 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
                unlink_urbs(dev, &dev->rxq);
 
        /* maybe wait for deletions to finish. */
-       while (!skb_queue_empty(&dev->rxq)
-               && !skb_queue_empty(&dev->txq)
-               && !skb_queue_empty(&dev->done)) {
-                       schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
-                       set_current_state(TASK_UNINTERRUPTIBLE);
-                       netif_dbg(dev, ifdown, dev->net,
-                                 "waited for %d urb completions\n", temp);
-       }
+       wait_skb_queue_empty(&dev->rxq);
+       wait_skb_queue_empty(&dev->txq);
+       wait_skb_queue_empty(&dev->done);
+       netif_dbg(dev, ifdown, dev->net,
+                 "waited for %d urb completions\n", temp);
        set_current_state(TASK_RUNNING);
        remove_wait_queue(&dev->wait, &wait);
 }
index ce988fd01b3486c7f40d06289f1c1d001d7c2bcf..cf8b7f0473b3985af3c6afc68ecf856e704fbc16 100644 (file)
@@ -1223,7 +1223,6 @@ drop:
 static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
 {
        struct metadata_dst *tun_dst = NULL;
-       struct ip_tunnel_info *info;
        struct vxlan_sock *vs;
        struct vxlanhdr *vxh;
        u32 flags, vni;
@@ -1270,8 +1269,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                if (!tun_dst)
                        goto drop;
 
-               info = &tun_dst->u.tun_info;
-               md = ip_tunnel_info_opts(info);
+               md = ip_tunnel_info_opts(&tun_dst->u.tun_info);
        } else {
                memset(md, 0, sizeof(*md));
        }
@@ -1286,7 +1284,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
                md->gbp = ntohs(gbp->policy_id);
 
                if (tun_dst)
-                       info->key.tun_flags |= TUNNEL_VXLAN_OPT;
+                       tun_dst->u.tun_info.key.tun_flags |= TUNNEL_VXLAN_OPT;
 
                if (gbp->dont_learn)
                        md->gbp |= VXLAN_GBP_DONT_LEARN;
index 758c4ba1e97c91389ad6e5d1e6cda37f0dae26fa..8fef8d83436ddef9682df26e8ed2af6f2ae13377 100644 (file)
@@ -1358,6 +1358,8 @@ sbni_ioctl( struct net_device  *dev,  struct ifreq  *ifr,  int  cmd )
                if( !slave_dev  ||  !(slave_dev->flags & IFF_UP) ) {
                        netdev_err(dev, "trying to enslave non-active device %s\n",
                                   slave_name);
+                       if (slave_dev)
+                               dev_put(slave_dev);
                        return  -EPERM;
                }
 
index 6dc76c1e807b4d0ad45ae0bbbd1ba1310ed33239..a7bf747271162c450c1da60e12e417bdfe6b366a 100644 (file)
@@ -200,11 +200,6 @@ struct xenvif_queue { /* Per-queue data for xenvif */
        struct xenvif_stats stats;
 };
 
-/* Maximum number of Rx slots a to-guest packet may use, including the
- * slot needed for GSO meta-data.
- */
-#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
-
 enum state_bit_shift {
        /* This bit marks that the vif is connected */
        VIF_STATUS_CONNECTED,
@@ -317,11 +312,6 @@ int xenvif_dealloc_kthread(void *data);
 
 void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
 
-/* Determine whether the needed number of slots (req) are available,
- * and set req_event if not.
- */
-bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed);
-
 void xenvif_carrier_on(struct xenvif *vif);
 
 /* Callback from stack when TX packet can be released */
index 42569b994ea84ae03a9ff0d9b88109d3029c4f30..ec98d43916a818152263c8128b4854e9d340c3d2 100644 (file)
@@ -149,9 +149,20 @@ static inline pending_ring_idx_t pending_index(unsigned i)
        return i & (MAX_PENDING_REQS-1);
 }
 
-bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
+static int xenvif_rx_ring_slots_needed(struct xenvif *vif)
+{
+       if (vif->gso_mask)
+               return DIV_ROUND_UP(vif->dev->gso_max_size, PAGE_SIZE) + 1;
+       else
+               return DIV_ROUND_UP(vif->dev->mtu, PAGE_SIZE);
+}
+
+static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
 {
        RING_IDX prod, cons;
+       int needed;
+
+       needed = xenvif_rx_ring_slots_needed(queue->vif);
 
        do {
                prod = queue->rx.sring->req_prod;
@@ -314,7 +325,7 @@ static void xenvif_gop_frag_copy(struct xenvif_queue *queue, struct sk_buff *skb
                } else {
                        copy_gop->source.domid = DOMID_SELF;
                        copy_gop->source.u.gmfn =
-                               virt_to_mfn(page_address(page));
+                               virt_to_gfn(page_address(page));
                }
                copy_gop->source.offset = offset;
 
@@ -513,7 +524,7 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
 
        skb_queue_head_init(&rxq);
 
-       while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
+       while (xenvif_rx_ring_slots_available(queue)
               && (skb = xenvif_rx_dequeue(queue)) != NULL) {
                queue->last_rx_time = jiffies;
 
@@ -1395,7 +1406,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                queue->tx_copy_ops[*copy_ops].source.offset = txreq.offset;
 
                queue->tx_copy_ops[*copy_ops].dest.u.gmfn =
-                       virt_to_mfn(skb->data);
+                       virt_to_gfn(skb->data);
                queue->tx_copy_ops[*copy_ops].dest.domid = DOMID_SELF;
                queue->tx_copy_ops[*copy_ops].dest.offset =
                        offset_in_page(skb->data);
@@ -1938,8 +1949,7 @@ static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
        prod = queue->rx.sring->req_prod;
        cons = queue->rx.req_cons;
 
-       return !queue->stalled
-               && prod - cons < XEN_NETBK_RX_SLOTS_MAX
+       return !queue->stalled && prod - cons < 1
                && time_after(jiffies,
                              queue->last_rx_time + queue->vif->stall_timeout);
 }
@@ -1951,14 +1961,13 @@ static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
        prod = queue->rx.sring->req_prod;
        cons = queue->rx.req_cons;
 
-       return queue->stalled
-               && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
+       return queue->stalled && prod - cons >= 1;
 }
 
 static bool xenvif_have_rx_work(struct xenvif_queue *queue)
 {
        return (!skb_queue_empty(&queue->rx_queue)
-               && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
+               && xenvif_rx_ring_slots_available(queue))
                || (queue->vif->stall_timeout &&
                    (xenvif_rx_queue_stalled(queue)
                     || xenvif_rx_queue_ready(queue)))
@@ -2105,8 +2114,11 @@ static int __init netback_init(void)
        if (!xen_domain())
                return -ENODEV;
 
-       /* Allow as many queues as there are CPUs, by default */
-       xenvif_max_queues = num_online_cpus();
+       /* Allow as many queues as there are CPUs if user has not
+        * specified a value.
+        */
+       if (xenvif_max_queues == 0)
+               xenvif_max_queues = num_online_cpus();
 
        if (fatal_skb_slots < XEN_NETBK_LEGACY_SLOTS_MAX) {
                pr_info("fatal_skb_slots too small (%d), bump it to XEN_NETBK_LEGACY_SLOTS_MAX (%d)\n",
index e27e6d2ea6d283cc3508579b38e8fb27b8ade035..f821a97d78278feed765d08d886a4665d8795bd5 100644 (file)
@@ -291,7 +291,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
                struct sk_buff *skb;
                unsigned short id;
                grant_ref_t ref;
-               unsigned long pfn;
+               unsigned long gfn;
                struct xen_netif_rx_request *req;
 
                skb = xennet_alloc_one_rx_buffer(queue);
@@ -307,12 +307,12 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
                BUG_ON((signed short)ref < 0);
                queue->grant_rx_ref[id] = ref;
 
-               pfn = page_to_pfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
+               gfn = xen_page_to_gfn(skb_frag_page(&skb_shinfo(skb)->frags[0]));
 
                req = RING_GET_REQUEST(&queue->rx, req_prod);
                gnttab_grant_foreign_access_ref(ref,
                                                queue->info->xbdev->otherend_id,
-                                               pfn_to_mfn(pfn),
+                                               gfn,
                                                0);
 
                req->id = id;
@@ -430,8 +430,10 @@ static struct xen_netif_tx_request *xennet_make_one_txreq(
        ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
        BUG_ON((signed short)ref < 0);
 
-       gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
-                                       page_to_mfn(page), GNTMAP_readonly);
+       gnttab_grant_foreign_access_ref(ref,
+                                       queue->info->xbdev->otherend_id,
+                                       xen_page_to_gfn(page),
+                                       GNTMAP_readonly);
 
        queue->tx_skbs[id].skb = skb;
        queue->grant_tx_page[id] = page;
@@ -2132,8 +2134,11 @@ static int __init netif_init(void)
 
        pr_info("Initialising Xen virtual ethernet driver\n");
 
-       /* Allow as many queues as there are CPUs, by default */
-       xennet_max_queues = num_online_cpus();
+       /* Allow as many queues as there are CPUs if user has not
+        * specified a value.
+        */
+       if (xennet_max_queues == 0)
+               xennet_max_queues = num_online_cpus();
 
        return xenbus_register_frontend(&netfront_driver);
 }
index 948d9abd27f1159355d60705727683b9e0deb4d1..062630ab742451c8d4d06b61262d0a4753bb4711 100644 (file)
@@ -180,6 +180,18 @@ config PWM_LP3943
          To compile this driver as a module, choose M here: the module
          will be called pwm-lp3943.
 
+config PWM_LPC18XX_SCT
+       tristate "LPC18xx/43xx PWM/SCT support"
+       depends on ARCH_LPC18XX
+       help
+         Generic PWM framework driver for NXP LPC18xx PWM/SCT which
+         supports 16 channels.
+         A maximum of 15 channels can be requested simultaneously and
+         must have the same period.
+
+         To compile this driver as a module, choose M here: the module
+         will be called pwm-lpc18xx-sct.
+
 config PWM_LPC32XX
        tristate "LPC32XX PWM support"
        depends on ARCH_LPC32XX
index d186f35a65388d532709ee1d323f9241f3e97f35..a0e00c09ead3d05e6fb92c68f4744cfcf5177f17 100644 (file)
@@ -15,6 +15,7 @@ obj-$(CONFIG_PWM_IMG)         += pwm-img.o
 obj-$(CONFIG_PWM_IMX)          += pwm-imx.o
 obj-$(CONFIG_PWM_JZ4740)       += pwm-jz4740.o
 obj-$(CONFIG_PWM_LP3943)       += pwm-lp3943.o
+obj-$(CONFIG_PWM_LPC18XX_SCT)  += pwm-lpc18xx-sct.o
 obj-$(CONFIG_PWM_LPC32XX)      += pwm-lpc32xx.o
 obj-$(CONFIG_PWM_LPSS)         += pwm-lpss.o
 obj-$(CONFIG_PWM_LPSS_PCI)     += pwm-lpss-pci.o
index 3a7769fe53dee4f3c4c5f0bdca3e829e0fa8d5b6..3f9df3ea33508da41334178038c121f6a3939f79 100644 (file)
@@ -200,6 +200,8 @@ static void of_pwmchip_remove(struct pwm_chip *chip)
  * pwm_set_chip_data() - set private chip data for a PWM
  * @pwm: PWM device
  * @data: pointer to chip-specific data
+ *
+ * Returns: 0 on success or a negative error code on failure.
  */
 int pwm_set_chip_data(struct pwm_device *pwm, void *data)
 {
@@ -215,6 +217,8 @@ EXPORT_SYMBOL_GPL(pwm_set_chip_data);
 /**
  * pwm_get_chip_data() - get private chip data for a PWM
  * @pwm: PWM device
+ *
+ * Returns: A pointer to the chip-private data for the PWM device.
  */
 void *pwm_get_chip_data(struct pwm_device *pwm)
 {
@@ -230,6 +234,8 @@ EXPORT_SYMBOL_GPL(pwm_get_chip_data);
  * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
  * will be used. The initial polarity for all channels is specified by the
  * @polarity parameter.
+ *
+ * Returns: 0 on success or a negative error code on failure.
  */
 int pwmchip_add_with_polarity(struct pwm_chip *chip,
                              enum pwm_polarity polarity)
@@ -291,6 +297,8 @@ EXPORT_SYMBOL_GPL(pwmchip_add_with_polarity);
  *
  * Register a new PWM chip. If chip->base < 0 then a dynamically assigned base
  * will be used. The initial polarity for all channels is normal.
+ *
+ * Returns: 0 on success or a negative error code on failure.
  */
 int pwmchip_add(struct pwm_chip *chip)
 {
@@ -304,6 +312,8 @@ EXPORT_SYMBOL_GPL(pwmchip_add);
  *
  * Removes a PWM chip. This function may return busy if the PWM chip provides
  * a PWM device that is still requested.
+ *
+ * Returns: 0 on success or a negative error code on failure.
  */
 int pwmchip_remove(struct pwm_chip *chip)
 {
@@ -338,10 +348,13 @@ EXPORT_SYMBOL_GPL(pwmchip_remove);
 
 /**
  * pwm_request() - request a PWM device
- * @pwm_id: global PWM device index
+ * @pwm: global PWM device index
  * @label: PWM device label
  *
  * This function is deprecated, use pwm_get() instead.
+ *
+ * Returns: A pointer to a PWM device or an ERR_PTR()-encoded error code on
+ * failure.
  */
 struct pwm_device *pwm_request(int pwm, const char *label)
 {
@@ -376,9 +389,9 @@ EXPORT_SYMBOL_GPL(pwm_request);
  * @index: per-chip index of the PWM to request
  * @label: a literal description string of this PWM
  *
- * Returns the PWM at the given index of the given PWM chip. A negative error
- * code is returned if the index is not valid for the specified PWM chip or
- * if the PWM device cannot be requested.
+ * Returns: A pointer to the PWM device at the given index of the given PWM
+ * chip. A negative error code is returned if the index is not valid for the
+ * specified PWM chip or if the PWM device cannot be requested.
  */
 struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
                                         unsigned int index,
@@ -419,6 +432,8 @@ EXPORT_SYMBOL_GPL(pwm_free);
  * @pwm: PWM device
  * @duty_ns: "on" time (in nanoseconds)
  * @period_ns: duration (in nanoseconds) of one cycle
+ *
+ * Returns: 0 on success or a negative error code on failure.
  */
 int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
 {
@@ -443,7 +458,10 @@ EXPORT_SYMBOL_GPL(pwm_config);
  * @pwm: PWM device
  * @polarity: new polarity of the PWM signal
  *
- * Note that the polarity cannot be configured while the PWM device is enabled
+ * Note that the polarity cannot be configured while the PWM device is
+ * enabled.
+ *
+ * Returns: 0 on success or a negative error code on failure.
  */
 int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity)
 {
@@ -455,7 +473,7 @@ int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity)
        if (!pwm->chip->ops->set_polarity)
                return -ENOSYS;
 
-       if (test_bit(PWMF_ENABLED, &pwm->flags))
+       if (pwm_is_enabled(pwm))
                return -EBUSY;
 
        err = pwm->chip->ops->set_polarity(pwm->chip, pwm, polarity);
@@ -471,6 +489,8 @@ EXPORT_SYMBOL_GPL(pwm_set_polarity);
 /**
  * pwm_enable() - start a PWM output toggling
  * @pwm: PWM device
+ *
+ * Returns: 0 on success or a negative error code on failure.
  */
 int pwm_enable(struct pwm_device *pwm)
 {
@@ -524,6 +544,9 @@ static struct pwm_chip *of_node_to_pwmchip(struct device_node *np)
  * lookup of the PWM index. This also means that the "pwm-names" property
  * becomes mandatory for devices that look up the PWM device via the con_id
  * parameter.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
  */
 struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id)
 {
@@ -630,6 +653,9 @@ void pwm_remove_table(struct pwm_lookup *table, size_t num)
  *
  * Once a PWM chip has been found the specified PWM device will be requested
  * and is ready to be used.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
  */
 struct pwm_device *pwm_get(struct device *dev, const char *con_id)
 {
@@ -752,6 +778,9 @@ static void devm_pwm_release(struct device *dev, void *res)
  *
  * This function performs like pwm_get() but the acquired PWM device will
  * automatically be released on driver detach.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
  */
 struct pwm_device *devm_pwm_get(struct device *dev, const char *con_id)
 {
@@ -781,6 +810,9 @@ EXPORT_SYMBOL_GPL(devm_pwm_get);
  *
  * This function performs like of_pwm_get() but the acquired PWM device will
  * automatically be released on driver detach.
+ *
+ * Returns: A pointer to the requested PWM device or an ERR_PTR()-encoded
+ * error code on failure.
  */
 struct pwm_device *devm_of_pwm_get(struct device *dev, struct device_node *np,
                                   const char *con_id)
@@ -832,7 +864,7 @@ EXPORT_SYMBOL_GPL(devm_pwm_put);
   * pwm_can_sleep() - report whether PWM access will sleep
   * @pwm: PWM device
   *
-  * It returns true if accessing the PWM can sleep, false otherwise.
+  * Returns: True if accessing the PWM can sleep, false otherwise.
   */
 bool pwm_can_sleep(struct pwm_device *pwm)
 {
@@ -853,7 +885,7 @@ static void pwm_dbg_show(struct pwm_chip *chip, struct seq_file *s)
                if (test_bit(PWMF_REQUESTED, &pwm->flags))
                        seq_puts(s, " requested");
 
-               if (test_bit(PWMF_ENABLED, &pwm->flags))
+               if (pwm_is_enabled(pwm))
                        seq_puts(s, " enabled");
 
                seq_puts(s, "\n");
@@ -924,6 +956,5 @@ static int __init pwm_debugfs_init(void)
 
        return 0;
 }
-
 subsys_initcall(pwm_debugfs_init);
 #endif /* CONFIG_DEBUG_FS */
index fa5feaba25a5d768d47aa100f4a9e55c634aabce..5df1db40fc075add53c4084b55f94993800b0b9e 100644 (file)
@@ -217,6 +217,11 @@ static const struct atmel_hlcdc_pwm_errata atmel_hlcdc_pwm_sama5d3_errata = {
 };
 
 static const struct of_device_id atmel_hlcdc_dt_ids[] = {
+       {
+               .compatible = "atmel,at91sam9n12-hlcdc",
+               /* 9n12 has same errata as 9x5 HLCDC PWM */
+               .data = &atmel_hlcdc_pwm_at91sam9x5_errata,
+       },
        {
                .compatible = "atmel,at91sam9x5-hlcdc",
                .data = &atmel_hlcdc_pwm_at91sam9x5_errata,
index d14e0677c92ddacb6838f8cb1d7f269fc2937b7e..6da01b3bf6f463b606cac8e3b5cb2d834243456a 100644 (file)
@@ -347,7 +347,7 @@ static int atmel_tcb_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        tcbpwm->duty = duty;
 
        /* If the PWM is enabled, call enable to apply the new conf */
-       if (test_bit(PWMF_ENABLED, &pwm->flags))
+       if (pwm_is_enabled(pwm))
                atmel_tcb_pwm_enable(chip, pwm);
 
        return 0;
index a947c9095d9d6fc99d2b66be702e915eaa26014c..0e4bd4e8e5823727c03b7701ad893b4cae1f7e7e 100644 (file)
@@ -114,7 +114,7 @@ static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        u32 val;
        int ret;
 
-       if (test_bit(PWMF_ENABLED, &pwm->flags) && (period_ns != pwm->period)) {
+       if (pwm_is_enabled(pwm) && (period_ns != pwm_get_period(pwm))) {
                dev_err(chip->dev, "cannot change PWM period while enabled\n");
                return -EBUSY;
        }
@@ -176,7 +176,7 @@ static void atmel_pwm_config_v1(struct pwm_chip *chip, struct pwm_device *pwm,
         * If the PWM channel is enabled, only update CDTY by using the update
         * register, it needs to set bit 10 of CMR to 0
         */
-       if (test_bit(PWMF_ENABLED, &pwm->flags))
+       if (pwm_is_enabled(pwm))
                return;
        /*
         * If the PWM channel is disabled, write value to duty and period
@@ -191,7 +191,7 @@ static void atmel_pwm_config_v2(struct pwm_chip *chip, struct pwm_device *pwm,
 {
        struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
 
-       if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+       if (pwm_is_enabled(pwm)) {
                /*
                 * If the PWM channel is enabled, using the duty update register
                 * to update the value.
index 7af8fea2dc5b3e7f5f639fd03c6b1246f7fa4d60..c634183220232194c3a0192121681c5610feb809 100644 (file)
@@ -76,19 +76,36 @@ static inline struct kona_pwmc *to_kona_pwmc(struct pwm_chip *_chip)
        return container_of(_chip, struct kona_pwmc, chip);
 }
 
-static void kona_pwmc_apply_settings(struct kona_pwmc *kp, unsigned int chan)
+/*
+ * Clear trigger bit but set smooth bit to maintain old output.
+ */
+static void kona_pwmc_prepare_for_settings(struct kona_pwmc *kp,
+       unsigned int chan)
 {
        unsigned int value = readl(kp->base + PWM_CONTROL_OFFSET);
 
-       /* Clear trigger bit but set smooth bit to maintain old output */
        value |= 1 << PWM_CONTROL_SMOOTH_SHIFT(chan);
        value &= ~(1 << PWM_CONTROL_TRIGGER_SHIFT(chan));
        writel(value, kp->base + PWM_CONTROL_OFFSET);
 
+       /*
+        * There must be a min 400ns delay between clearing trigger and setting
+        * it. Failing to do this may result in no PWM signal.
+        */
+       ndelay(400);
+}
+
+static void kona_pwmc_apply_settings(struct kona_pwmc *kp, unsigned int chan)
+{
+       unsigned int value = readl(kp->base + PWM_CONTROL_OFFSET);
+
        /* Set trigger bit and clear smooth bit to apply new settings */
        value &= ~(1 << PWM_CONTROL_SMOOTH_SHIFT(chan));
        value |= 1 << PWM_CONTROL_TRIGGER_SHIFT(chan);
        writel(value, kp->base + PWM_CONTROL_OFFSET);
+
+       /* Trigger bit must be held high for at least 400 ns. */
+       ndelay(400);
 }
 
 static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
@@ -133,8 +150,14 @@ static int kona_pwmc_config(struct pwm_chip *chip, struct pwm_device *pwm,
                        return -EINVAL;
        }
 
-       /* If the PWM channel is enabled, write the settings to the HW */
-       if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+       /*
+        * Don't apply settings if disabled. The period and duty cycle are
+        * always calculated above to ensure the new values are
+        * validated immediately instead of on enable.
+        */
+       if (pwm_is_enabled(pwm)) {
+               kona_pwmc_prepare_for_settings(kp, chan);
+
                value = readl(kp->base + PRESCALE_OFFSET);
                value &= ~PRESCALE_MASK(chan);
                value |= prescale << PRESCALE_SHIFT(chan);
@@ -164,6 +187,8 @@ static int kona_pwmc_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
                return ret;
        }
 
+       kona_pwmc_prepare_for_settings(kp, chan);
+
        value = readl(kp->base + PWM_CONTROL_OFFSET);
 
        if (polarity == PWM_POLARITY_NORMAL)
@@ -175,9 +200,6 @@ static int kona_pwmc_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm,
 
        kona_pwmc_apply_settings(kp, chan);
 
-       /* Wait for waveform to settle before gating off the clock */
-       ndelay(400);
-
        clk_disable_unprepare(kp->clk);
 
        return 0;
@@ -194,7 +216,8 @@ static int kona_pwmc_enable(struct pwm_chip *chip, struct pwm_device *pwm)
                return ret;
        }
 
-       ret = kona_pwmc_config(chip, pwm, pwm->duty_cycle, pwm->period);
+       ret = kona_pwmc_config(chip, pwm, pwm_get_duty_cycle(pwm),
+                              pwm_get_period(pwm));
        if (ret < 0) {
                clk_disable_unprepare(kp->clk);
                return ret;
@@ -207,13 +230,20 @@ static void kona_pwmc_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct kona_pwmc *kp = to_kona_pwmc(chip);
        unsigned int chan = pwm->hwpwm;
+       unsigned int value;
+
+       kona_pwmc_prepare_for_settings(kp, chan);
 
        /* Simulate a disable by configuring for zero duty */
        writel(0, kp->base + DUTY_CYCLE_HIGH_OFFSET(chan));
-       kona_pwmc_apply_settings(kp, chan);
+       writel(0, kp->base + PERIOD_COUNT_OFFSET(chan));
 
-       /* Wait for waveform to settle before gating off the clock */
-       ndelay(400);
+       /* Set prescale to 0 for this channel */
+       value = readl(kp->base + PRESCALE_OFFSET);
+       value &= ~PRESCALE_MASK(chan);
+       writel(value, kp->base + PRESCALE_OFFSET);
+
+       kona_pwmc_apply_settings(kp, chan);
 
        clk_disable_unprepare(kp->clk);
 }
@@ -287,7 +317,7 @@ static int kona_pwmc_remove(struct platform_device *pdev)
        unsigned int chan;
 
        for (chan = 0; chan < kp->chip.npwm; chan++)
-               if (test_bit(PWMF_ENABLED, &kp->chip.pwms[chan].flags))
+               if (pwm_is_enabled(&kp->chip.pwms[chan]))
                        clk_disable_unprepare(kp->clk);
 
        return pwmchip_remove(&kp->chip);
index e593e9c45c51c1118f88ae12cc4a546498c77b40..bbf10ae02f0ecf9a147b4c6ee81e161113095519 100644 (file)
@@ -82,7 +82,7 @@ static int ep93xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
         * The clock needs to be enabled to access the PWM registers.
         * Configuration can be changed at any time.
         */
-       if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+       if (!pwm_is_enabled(pwm)) {
                ret = clk_enable(ep93xx_pwm->clk);
                if (ret)
                        return ret;
@@ -113,7 +113,7 @@ static int ep93xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
                ret = -EINVAL;
        }
 
-       if (!test_bit(PWMF_ENABLED, &pwm->flags))
+       if (!pwm_is_enabled(pwm))
                clk_disable(ep93xx_pwm->clk);
 
        return ret;
index 66d6f0c5c421c210a08d66977d3f7d2a93a1aaa8..d600fd5cd4bac9a434a20798328d513d89b5452f 100644 (file)
@@ -114,7 +114,7 @@ static int imx_pwm_config_v2(struct pwm_chip *chip,
        unsigned long long c;
        unsigned long period_cycles, duty_cycles, prescale;
        unsigned int period_ms;
-       bool enable = test_bit(PWMF_ENABLED, &pwm->flags);
+       bool enable = pwm_is_enabled(pwm);
        int wait_count = 0, fifoav;
        u32 cr, sr;
 
@@ -129,7 +129,8 @@ static int imx_pwm_config_v2(struct pwm_chip *chip,
                sr = readl(imx->mmio_base + MX3_PWMSR);
                fifoav = sr & MX3_PWMSR_FIFOAV_MASK;
                if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) {
-                       period_ms = DIV_ROUND_UP(pwm->period, NSEC_PER_MSEC);
+                       period_ms = DIV_ROUND_UP(pwm_get_period(pwm),
+                                                NSEC_PER_MSEC);
                        msleep(period_ms);
 
                        sr = readl(imx->mmio_base + MX3_PWMSR);
diff --git a/drivers/pwm/pwm-lpc18xx-sct.c b/drivers/pwm/pwm-lpc18xx-sct.c
new file mode 100644 (file)
index 0000000..9163085
--- /dev/null
@@ -0,0 +1,465 @@
+/*
+ * NXP LPC18xx State Configurable Timer - Pulse Width Modulator driver
+ *
+ * Copyright (c) 2015 Ariel D'Alessandro <ariel@vanguardiasur.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * Notes
+ * =====
+ * NXP LPC18xx provides a State Configurable Timer (SCT) which can be configured
+ * as a Pulse Width Modulator.
+ *
+ * SCT supports 16 outputs, 16 events and 16 registers. Each event will be
+ * triggered when its related register matches the SCT counter value, and it
+ * will set or clear a selected output.
+ *
+ * One of the events is preselected to generate the period, thus the maximum
+ * number of simultaneous channels is limited to 15. Notice that period is
+ * global to all the channels, thus PWM driver will refuse setting different
+ * values to it, unless there's only one channel requested.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pwm.h>
+
+/* LPC18xx SCT registers */
+#define LPC18XX_PWM_CONFIG             0x000
+#define LPC18XX_PWM_CONFIG_UNIFY       BIT(0)
+#define LPC18XX_PWM_CONFIG_NORELOAD    BIT(7)
+
+#define LPC18XX_PWM_CTRL               0x004
+#define LPC18XX_PWM_CTRL_HALT          BIT(2)
+#define LPC18XX_PWM_BIDIR              BIT(4)
+#define LPC18XX_PWM_PRE_SHIFT          5
+#define LPC18XX_PWM_PRE_MASK           (0xff << LPC18XX_PWM_PRE_SHIFT)
+#define LPC18XX_PWM_PRE(x)             (x << LPC18XX_PWM_PRE_SHIFT)
+
+#define LPC18XX_PWM_LIMIT              0x008
+
+#define LPC18XX_PWM_RES_BASE           0x058
+#define LPC18XX_PWM_RES_SHIFT(_ch)     (_ch * 2)
+#define LPC18XX_PWM_RES(_ch, _action)  (_action << LPC18XX_PWM_RES_SHIFT(_ch))
+#define LPC18XX_PWM_RES_MASK(_ch)      (0x3 << LPC18XX_PWM_RES_SHIFT(_ch))
+
+#define LPC18XX_PWM_MATCH_BASE         0x100
+#define LPC18XX_PWM_MATCH(_ch)         (LPC18XX_PWM_MATCH_BASE + _ch * 4)
+
+#define LPC18XX_PWM_MATCHREL_BASE      0x200
+#define LPC18XX_PWM_MATCHREL(_ch)      (LPC18XX_PWM_MATCHREL_BASE + _ch * 4)
+
+#define LPC18XX_PWM_EVSTATEMSK_BASE    0x300
+#define LPC18XX_PWM_EVSTATEMSK(_ch)    (LPC18XX_PWM_EVSTATEMSK_BASE + _ch * 8)
+#define LPC18XX_PWM_EVSTATEMSK_ALL     0xffffffff
+
+#define LPC18XX_PWM_EVCTRL_BASE                0x304
+#define LPC18XX_PWM_EVCTRL(_ev)                (LPC18XX_PWM_EVCTRL_BASE + _ev * 8)
+
+#define LPC18XX_PWM_EVCTRL_MATCH(_ch)  _ch
+
+#define LPC18XX_PWM_EVCTRL_COMB_SHIFT  12
+#define LPC18XX_PWM_EVCTRL_COMB_MATCH  (0x1 << LPC18XX_PWM_EVCTRL_COMB_SHIFT)
+
+#define LPC18XX_PWM_OUTPUTSET_BASE     0x500
+#define LPC18XX_PWM_OUTPUTSET(_ch)     (LPC18XX_PWM_OUTPUTSET_BASE + _ch * 8)
+
+#define LPC18XX_PWM_OUTPUTCL_BASE      0x504
+#define LPC18XX_PWM_OUTPUTCL(_ch)      (LPC18XX_PWM_OUTPUTCL_BASE + _ch * 8)
+
+/* LPC18xx SCT unified counter */
+#define LPC18XX_PWM_TIMER_MAX          0xffffffff
+
+/* LPC18xx SCT events */
+#define LPC18XX_PWM_EVENT_PERIOD       0
+#define LPC18XX_PWM_EVENT_MAX          16
+
+/* SCT conflict resolution */
+enum lpc18xx_pwm_res_action {
+       LPC18XX_PWM_RES_NONE,
+       LPC18XX_PWM_RES_SET,
+       LPC18XX_PWM_RES_CLEAR,
+       LPC18XX_PWM_RES_TOGGLE,
+};
+
+struct lpc18xx_pwm_data {
+       unsigned int duty_event;
+};
+
+struct lpc18xx_pwm_chip {
+       struct device *dev;
+       struct pwm_chip chip;
+       void __iomem *base;
+       struct clk *pwm_clk;
+       unsigned long clk_rate;
+       unsigned int period_ns;
+       unsigned int min_period_ns;
+       unsigned int max_period_ns;
+       unsigned int period_event;
+       unsigned long event_map;
+       struct mutex res_lock;
+       struct mutex period_lock;
+};
+
+static inline struct lpc18xx_pwm_chip *
+to_lpc18xx_pwm_chip(struct pwm_chip *chip)
+{
+       return container_of(chip, struct lpc18xx_pwm_chip, chip);
+}
+
+static inline void lpc18xx_pwm_writel(struct lpc18xx_pwm_chip *lpc18xx_pwm,
+                                     u32 reg, u32 val)
+{
+       writel(val, lpc18xx_pwm->base + reg);
+}
+
+static inline u32 lpc18xx_pwm_readl(struct lpc18xx_pwm_chip *lpc18xx_pwm,
+                                   u32 reg)
+{
+       return readl(lpc18xx_pwm->base + reg);
+}
+
+static void lpc18xx_pwm_set_conflict_res(struct lpc18xx_pwm_chip *lpc18xx_pwm,
+                                        struct pwm_device *pwm,
+                                        enum lpc18xx_pwm_res_action action)
+{
+       u32 val;
+
+       mutex_lock(&lpc18xx_pwm->res_lock);
+
+       /*
+        * Simultaneous set and clear may happen on an output, that is the case
+        * when duty_ns == period_ns. LPC18xx SCT allows to set a conflict
+        * resolution action to be taken in such a case.
+        */
+       val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_RES_BASE);
+       val &= ~LPC18XX_PWM_RES_MASK(pwm->hwpwm);
+       val |= LPC18XX_PWM_RES(pwm->hwpwm, action);
+       lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_RES_BASE, val);
+
+       mutex_unlock(&lpc18xx_pwm->res_lock);
+}
+
+static void lpc18xx_pwm_config_period(struct pwm_chip *chip, int period_ns)
+{
+       struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+       u64 val;
+
+       val = (u64)period_ns * lpc18xx_pwm->clk_rate;
+       do_div(val, NSEC_PER_SEC);
+
+       lpc18xx_pwm_writel(lpc18xx_pwm,
+                          LPC18XX_PWM_MATCH(lpc18xx_pwm->period_event),
+                          (u32)val - 1);
+
+       lpc18xx_pwm_writel(lpc18xx_pwm,
+                          LPC18XX_PWM_MATCHREL(lpc18xx_pwm->period_event),
+                          (u32)val - 1);
+}
+
+static void lpc18xx_pwm_config_duty(struct pwm_chip *chip,
+                                   struct pwm_device *pwm, int duty_ns)
+{
+       struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+       struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+       u64 val;
+
+       val = (u64)duty_ns * lpc18xx_pwm->clk_rate;
+       do_div(val, NSEC_PER_SEC);
+
+       lpc18xx_pwm_writel(lpc18xx_pwm,
+                          LPC18XX_PWM_MATCH(lpc18xx_data->duty_event),
+                          (u32)val);
+
+       lpc18xx_pwm_writel(lpc18xx_pwm,
+                          LPC18XX_PWM_MATCHREL(lpc18xx_data->duty_event),
+                          (u32)val);
+}
+
+static int lpc18xx_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
+                             int duty_ns, int period_ns)
+{
+       struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+       int requested_events, i;
+
+       if (period_ns < lpc18xx_pwm->min_period_ns ||
+           period_ns > lpc18xx_pwm->max_period_ns) {
+               dev_err(chip->dev, "period %d not in range\n", period_ns);
+               return -ERANGE;
+       }
+
+       mutex_lock(&lpc18xx_pwm->period_lock);
+
+       requested_events = bitmap_weight(&lpc18xx_pwm->event_map,
+                                        LPC18XX_PWM_EVENT_MAX);
+
+       /*
+        * The PWM supports only a single period for all PWM channels.
+        * Once the period is set, it can only be changed if no more than one
+        * channel is requested at that moment.
+        */
+       if (requested_events > 2 && lpc18xx_pwm->period_ns != period_ns &&
+           lpc18xx_pwm->period_ns) {
+               dev_err(chip->dev, "conflicting period requested for PWM %u\n",
+                       pwm->hwpwm);
+               mutex_unlock(&lpc18xx_pwm->period_lock);
+               return -EBUSY;
+       }
+
+       if ((requested_events <= 2 && lpc18xx_pwm->period_ns != period_ns) ||
+           !lpc18xx_pwm->period_ns) {
+               lpc18xx_pwm->period_ns = period_ns;
+               for (i = 0; i < chip->npwm; i++)
+                       pwm_set_period(&chip->pwms[i], period_ns);
+               lpc18xx_pwm_config_period(chip, period_ns);
+       }
+
+       mutex_unlock(&lpc18xx_pwm->period_lock);
+
+       lpc18xx_pwm_config_duty(chip, pwm, duty_ns);
+
+       return 0;
+}
+
+static int lpc18xx_pwm_set_polarity(struct pwm_chip *chip,
+                                   struct pwm_device *pwm,
+                                   enum pwm_polarity polarity)
+{
+       return 0;
+}
+
+static int lpc18xx_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+       struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+       enum lpc18xx_pwm_res_action res_action;
+       unsigned int set_event, clear_event;
+
+       lpc18xx_pwm_writel(lpc18xx_pwm,
+                          LPC18XX_PWM_EVCTRL(lpc18xx_data->duty_event),
+                          LPC18XX_PWM_EVCTRL_MATCH(lpc18xx_data->duty_event) |
+                          LPC18XX_PWM_EVCTRL_COMB_MATCH);
+
+       lpc18xx_pwm_writel(lpc18xx_pwm,
+                          LPC18XX_PWM_EVSTATEMSK(lpc18xx_data->duty_event),
+                          LPC18XX_PWM_EVSTATEMSK_ALL);
+
+       if (pwm->polarity == PWM_POLARITY_NORMAL) {
+               set_event = lpc18xx_pwm->period_event;
+               clear_event = lpc18xx_data->duty_event;
+               res_action = LPC18XX_PWM_RES_SET;
+       } else {
+               set_event = lpc18xx_data->duty_event;
+               clear_event = lpc18xx_pwm->period_event;
+               res_action = LPC18XX_PWM_RES_CLEAR;
+       }
+
+       lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTSET(pwm->hwpwm),
+                          BIT(set_event));
+       lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTCL(pwm->hwpwm),
+                          BIT(clear_event));
+       lpc18xx_pwm_set_conflict_res(lpc18xx_pwm, pwm, res_action);
+
+       return 0;
+}
+
+static void lpc18xx_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+       struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+
+       lpc18xx_pwm_writel(lpc18xx_pwm,
+                          LPC18XX_PWM_EVCTRL(lpc18xx_data->duty_event), 0);
+       lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTSET(pwm->hwpwm), 0);
+       lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_OUTPUTCL(pwm->hwpwm), 0);
+}
+
+static int lpc18xx_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+       struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+       unsigned long event;
+
+       event = find_first_zero_bit(&lpc18xx_pwm->event_map,
+                                   LPC18XX_PWM_EVENT_MAX);
+
+       if (event >= LPC18XX_PWM_EVENT_MAX) {
+               dev_err(lpc18xx_pwm->dev,
+                       "maximum number of simultaneous channels reached\n");
+               return -EBUSY;
+       };
+
+       set_bit(event, &lpc18xx_pwm->event_map);
+       lpc18xx_data->duty_event = event;
+       lpc18xx_pwm_config_duty(chip, pwm, pwm_get_duty_cycle(pwm));
+
+       return 0;
+}
+
+static void lpc18xx_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
+{
+       struct lpc18xx_pwm_chip *lpc18xx_pwm = to_lpc18xx_pwm_chip(chip);
+       struct lpc18xx_pwm_data *lpc18xx_data = pwm_get_chip_data(pwm);
+
+       pwm_disable(pwm);
+       pwm_set_duty_cycle(pwm, 0);
+       clear_bit(lpc18xx_data->duty_event, &lpc18xx_pwm->event_map);
+}
+
+static const struct pwm_ops lpc18xx_pwm_ops = {
+       .config = lpc18xx_pwm_config,
+       .set_polarity = lpc18xx_pwm_set_polarity,
+       .enable = lpc18xx_pwm_enable,
+       .disable = lpc18xx_pwm_disable,
+       .request = lpc18xx_pwm_request,
+       .free = lpc18xx_pwm_free,
+       .owner = THIS_MODULE,
+};
+
+static const struct of_device_id lpc18xx_pwm_of_match[] = {
+       { .compatible = "nxp,lpc1850-sct-pwm" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_pwm_of_match);
+
+static int lpc18xx_pwm_probe(struct platform_device *pdev)
+{
+       struct lpc18xx_pwm_chip *lpc18xx_pwm;
+       struct pwm_device *pwm;
+       struct resource *res;
+       int ret, i;
+       u64 val;
+
+       lpc18xx_pwm = devm_kzalloc(&pdev->dev, sizeof(*lpc18xx_pwm),
+                                  GFP_KERNEL);
+       if (!lpc18xx_pwm)
+               return -ENOMEM;
+
+       lpc18xx_pwm->dev = &pdev->dev;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       lpc18xx_pwm->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(lpc18xx_pwm->base))
+               return PTR_ERR(lpc18xx_pwm->base);
+
+       lpc18xx_pwm->pwm_clk = devm_clk_get(&pdev->dev, "pwm");
+       if (IS_ERR(lpc18xx_pwm->pwm_clk)) {
+               dev_err(&pdev->dev, "failed to get pwm clock\n");
+               return PTR_ERR(lpc18xx_pwm->pwm_clk);
+       }
+
+       ret = clk_prepare_enable(lpc18xx_pwm->pwm_clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "could not prepare or enable pwm clock\n");
+               return ret;
+       }
+
+       lpc18xx_pwm->clk_rate = clk_get_rate(lpc18xx_pwm->pwm_clk);
+
+       mutex_init(&lpc18xx_pwm->res_lock);
+       mutex_init(&lpc18xx_pwm->period_lock);
+
+       val = (u64)NSEC_PER_SEC * LPC18XX_PWM_TIMER_MAX;
+       do_div(val, lpc18xx_pwm->clk_rate);
+       lpc18xx_pwm->max_period_ns = val;
+
+       lpc18xx_pwm->min_period_ns = DIV_ROUND_UP(NSEC_PER_SEC,
+                                                 lpc18xx_pwm->clk_rate);
+
+       lpc18xx_pwm->chip.dev = &pdev->dev;
+       lpc18xx_pwm->chip.ops = &lpc18xx_pwm_ops;
+       lpc18xx_pwm->chip.base = -1;
+       lpc18xx_pwm->chip.npwm = 16;
+       lpc18xx_pwm->chip.of_xlate = of_pwm_xlate_with_flags;
+       lpc18xx_pwm->chip.of_pwm_n_cells = 3;
+
+       /* SCT counter must be in unify (32 bit) mode */
+       lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CONFIG,
+                          LPC18XX_PWM_CONFIG_UNIFY);
+
+       /*
+        * Everytime the timer counter reaches the period value, the related
+        * event will be triggered and the counter reset to 0.
+        */
+       set_bit(LPC18XX_PWM_EVENT_PERIOD, &lpc18xx_pwm->event_map);
+       lpc18xx_pwm->period_event = LPC18XX_PWM_EVENT_PERIOD;
+
+       lpc18xx_pwm_writel(lpc18xx_pwm,
+                          LPC18XX_PWM_EVSTATEMSK(lpc18xx_pwm->period_event),
+                          LPC18XX_PWM_EVSTATEMSK_ALL);
+
+       val = LPC18XX_PWM_EVCTRL_MATCH(lpc18xx_pwm->period_event) |
+             LPC18XX_PWM_EVCTRL_COMB_MATCH;
+       lpc18xx_pwm_writel(lpc18xx_pwm,
+                          LPC18XX_PWM_EVCTRL(lpc18xx_pwm->period_event), val);
+
+       lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_LIMIT,
+                          BIT(lpc18xx_pwm->period_event));
+
+       ret = pwmchip_add(&lpc18xx_pwm->chip);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret);
+               goto disable_pwmclk;
+       }
+
+       for (i = 0; i < lpc18xx_pwm->chip.npwm; i++) {
+               pwm = &lpc18xx_pwm->chip.pwms[i];
+               pwm->chip_data = devm_kzalloc(lpc18xx_pwm->dev,
+                                             sizeof(struct lpc18xx_pwm_data),
+                                             GFP_KERNEL);
+               if (!pwm->chip_data) {
+                       ret = -ENOMEM;
+                       goto remove_pwmchip;
+               }
+       }
+
+       platform_set_drvdata(pdev, lpc18xx_pwm);
+
+       val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
+       val &= ~LPC18XX_PWM_BIDIR;
+       val &= ~LPC18XX_PWM_CTRL_HALT;
+       val &= ~LPC18XX_PWM_PRE_MASK;
+       val |= LPC18XX_PWM_PRE(0);
+       lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL, val);
+
+       return 0;
+
+remove_pwmchip:
+       pwmchip_remove(&lpc18xx_pwm->chip);
+disable_pwmclk:
+       clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
+       return ret;
+}
+
+static int lpc18xx_pwm_remove(struct platform_device *pdev)
+{
+       struct lpc18xx_pwm_chip *lpc18xx_pwm = platform_get_drvdata(pdev);
+       u32 val;
+
+       val = lpc18xx_pwm_readl(lpc18xx_pwm, LPC18XX_PWM_CTRL);
+       lpc18xx_pwm_writel(lpc18xx_pwm, LPC18XX_PWM_CTRL,
+                          val | LPC18XX_PWM_CTRL_HALT);
+
+       clk_disable_unprepare(lpc18xx_pwm->pwm_clk);
+
+       return pwmchip_remove(&lpc18xx_pwm->chip);
+}
+
+static struct platform_driver lpc18xx_pwm_driver = {
+       .driver = {
+               .name = "lpc18xx-sct-pwm",
+               .of_match_table = lpc18xx_pwm_of_match,
+       },
+       .probe = lpc18xx_pwm_probe,
+       .remove = lpc18xx_pwm_remove,
+};
+module_platform_driver(lpc18xx_pwm_driver);
+
+MODULE_AUTHOR("Ariel D'Alessandro <ariel@vanguardiasur.com.ar>");
+MODULE_DESCRIPTION("NXP LPC18xx PWM driver");
+MODULE_LICENSE("GPL v2");
index b430811e14f582a9ad66e4cad3f86fbe4569a073..9a596324ebef8a1bede6324083aefc7315520c44 100644 (file)
@@ -77,7 +77,7 @@ static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
         * If the PWM channel is disabled, make sure to turn on the clock
         * before writing the register. Otherwise, keep it enabled.
         */
-       if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+       if (!pwm_is_enabled(pwm)) {
                ret = clk_prepare_enable(mxs->clk);
                if (ret)
                        return ret;
@@ -92,7 +92,7 @@ static int mxs_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        /*
         * If the PWM is not enabled, turn the clock off again to save power.
         */
-       if (!test_bit(PWMF_ENABLED, &pwm->flags))
+       if (!pwm_is_enabled(pwm))
                clk_disable_unprepare(mxs->clk);
 
        return 0;
index 34b5c275a92a3ca592c5fee09b8ca45634caea49..70448a6079b02ede68edf1b2242a5e7b049fb207 100644 (file)
@@ -2,6 +2,7 @@
  * Driver for PCA9685 16-channel 12-bit PWM LED controller
  *
  * Copyright (C) 2013 Steffen Trumtrar <s.trumtrar@pengutronix.de>
+ * Copyright (C) 2015 Clemens Gruber <clemens.gruber@pqgruber.com>
  *
  * based on the pwm-twl-led.c driver
  *
 #include <linux/pwm.h>
 #include <linux/regmap.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
+
+/*
+ * Because the PCA9685 has only one prescaler per chip, changing the period of
+ * one channel affects the period of all 16 PWM outputs!
+ * However, the ratio between each configured duty cycle and the chip-wide
+ * period remains constant, because the OFF time is set in proportion to the
+ * counter range.
+ */
 
 #define PCA9685_MODE1          0x00
 #define PCA9685_MODE2          0x01
 #define PCA9685_ALL_LED_OFF_H  0xFD
 #define PCA9685_PRESCALE       0xFE
 
+#define PCA9685_PRESCALE_MIN   0x03    /* => max. frequency of 1526 Hz */
+#define PCA9685_PRESCALE_MAX   0xFF    /* => min. frequency of 24 Hz */
+
+#define PCA9685_COUNTER_RANGE  4096
+#define PCA9685_DEFAULT_PERIOD 5000000 /* Default period_ns = 1/200 Hz */
+#define PCA9685_OSC_CLOCK_MHZ  25      /* Internal oscillator with 25 MHz */
+
 #define PCA9685_NUMREGS                0xFF
 #define PCA9685_MAXCHAN                0x10
 
 #define LED_FULL               (1 << 4)
+#define MODE1_RESTART          (1 << 7)
 #define MODE1_SLEEP            (1 << 4)
 #define MODE2_INVRT            (1 << 4)
 #define MODE2_OUTDRV           (1 << 2)
@@ -59,6 +77,8 @@ struct pca9685 {
        struct pwm_chip chip;
        struct regmap *regmap;
        int active_cnt;
+       int duty_ns;
+       int period_ns;
 };
 
 static inline struct pca9685 *to_pca(struct pwm_chip *chip)
@@ -72,6 +92,47 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        struct pca9685 *pca = to_pca(chip);
        unsigned long long duty;
        unsigned int reg;
+       int prescale;
+
+       if (period_ns != pca->period_ns) {
+               prescale = DIV_ROUND_CLOSEST(PCA9685_OSC_CLOCK_MHZ * period_ns,
+                                            PCA9685_COUNTER_RANGE * 1000) - 1;
+
+               if (prescale >= PCA9685_PRESCALE_MIN &&
+                       prescale <= PCA9685_PRESCALE_MAX) {
+                       /* Put chip into sleep mode */
+                       regmap_update_bits(pca->regmap, PCA9685_MODE1,
+                                          MODE1_SLEEP, MODE1_SLEEP);
+
+                       /* Change the chip-wide output frequency */
+                       regmap_write(pca->regmap, PCA9685_PRESCALE, prescale);
+
+                       /* Wake the chip up */
+                       regmap_update_bits(pca->regmap, PCA9685_MODE1,
+                                          MODE1_SLEEP, 0x0);
+
+                       /* Wait 500us for the oscillator to be back up */
+                       udelay(500);
+
+                       pca->period_ns = period_ns;
+
+                       /*
+                        * If the duty cycle did not change, restart PWM with
+                        * the same duty cycle to period ratio and return.
+                        */
+                       if (duty_ns == pca->duty_ns) {
+                               regmap_update_bits(pca->regmap, PCA9685_MODE1,
+                                                  MODE1_RESTART, 0x1);
+                               return 0;
+                       }
+               } else {
+                       dev_err(chip->dev,
+                               "prescaler not set: period out of bounds!\n");
+                       return -EINVAL;
+               }
+       }
+
+       pca->duty_ns = duty_ns;
 
        if (duty_ns < 1) {
                if (pwm->hwpwm >= PCA9685_MAXCHAN)
@@ -85,6 +146,22 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        }
 
        if (duty_ns == period_ns) {
+               /* Clear both OFF registers */
+               if (pwm->hwpwm >= PCA9685_MAXCHAN)
+                       reg = PCA9685_ALL_LED_OFF_L;
+               else
+                       reg = LED_N_OFF_L(pwm->hwpwm);
+
+               regmap_write(pca->regmap, reg, 0x0);
+
+               if (pwm->hwpwm >= PCA9685_MAXCHAN)
+                       reg = PCA9685_ALL_LED_OFF_H;
+               else
+                       reg = LED_N_OFF_H(pwm->hwpwm);
+
+               regmap_write(pca->regmap, reg, 0x0);
+
+               /* Set the full ON bit */
                if (pwm->hwpwm >= PCA9685_MAXCHAN)
                        reg = PCA9685_ALL_LED_ON_H;
                else
@@ -95,7 +172,7 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
                return 0;
        }
 
-       duty = 4096 * (unsigned long long)duty_ns;
+       duty = PCA9685_COUNTER_RANGE * (unsigned long long)duty_ns;
        duty = DIV_ROUND_UP_ULL(duty, period_ns);
 
        if (pwm->hwpwm >= PCA9685_MAXCHAN)
@@ -112,6 +189,14 @@ static int pca9685_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
 
        regmap_write(pca->regmap, reg, ((int)duty >> 8) & 0xf);
 
+       /* Clear the full ON bit, otherwise the set OFF time has no effect */
+       if (pwm->hwpwm >= PCA9685_MAXCHAN)
+               reg = PCA9685_ALL_LED_ON_H;
+       else
+               reg = LED_N_ON_H(pwm->hwpwm);
+
+       regmap_write(pca->regmap, reg, 0);
+
        return 0;
 }
 
@@ -228,6 +313,8 @@ static int pca9685_pwm_probe(struct i2c_client *client,
                        ret);
                return ret;
        }
+       pca->duty_ns = 0;
+       pca->period_ns = PCA9685_DEFAULT_PERIOD;
 
        i2c_set_clientdata(client, pca);
 
@@ -285,7 +372,6 @@ MODULE_DEVICE_TABLE(of, pca9685_dt_ids);
 static struct i2c_driver pca9685_i2c_driver = {
        .driver = {
                .name = "pca9685-pwm",
-               .owner = THIS_MODULE,
                .of_match_table = pca9685_dt_ids,
        },
        .probe = pca9685_pwm_probe,
index ee63f9e9d0fb752833f5dd596a8c3838d4e93cdb..075c1a764ba293dab3d1e39df05bc4b49f67b1b3 100644 (file)
@@ -301,7 +301,7 @@ static int tpu_pwm_config(struct pwm_chip *chip, struct pwm_device *_pwm,
        pwm->duty = duty;
 
        /* If the channel is disabled we're done. */
-       if (!test_bit(PWMF_ENABLED, &_pwm->flags))
+       if (!pwm_is_enabled(_pwm))
                return 0;
 
        if (duty_only && pwm->timer_on) {
index 9442df244101772259a0ddb6824fe6035c4dd568..7d9cc9049522348dd15951f927ed4649b1cdf364 100644 (file)
@@ -83,7 +83,7 @@ static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip,
                          PWM_CONTINUOUS;
        u32 val;
 
-       if (pwm->polarity == PWM_POLARITY_INVERSED)
+       if (pwm_get_polarity(pwm) == PWM_POLARITY_INVERSED)
                enable_conf |= PWM_DUTY_NEGATIVE | PWM_INACTIVE_POSITIVE;
        else
                enable_conf |= PWM_DUTY_POSITIVE | PWM_INACTIVE_NEGATIVE;
index cabd7d8e05cc0fdd79e42da12046d43d5e7032af..d4de0607b502a87c03498f7362b731dd44f06d50 100644 (file)
@@ -112,7 +112,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
         * If the PWM channel is disabled, make sure to turn on the clock
         * before writing the register. Otherwise, keep it enabled.
         */
-       if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+       if (!pwm_is_enabled(pwm)) {
                err = clk_prepare_enable(pc->clk);
                if (err < 0)
                        return err;
@@ -124,7 +124,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        /*
         * If the PWM is not enabled, turn the clock off again to save power.
         */
-       if (!test_bit(PWMF_ENABLED, &pwm->flags))
+       if (!pwm_is_enabled(pwm))
                clk_disable_unprepare(pc->clk);
 
        return 0;
@@ -214,7 +214,7 @@ static int tegra_pwm_remove(struct platform_device *pdev)
        for (i = 0; i < NUM_PWM; i++) {
                struct pwm_device *pwm = &pc->chip.pwms[i];
 
-               if (!test_bit(PWMF_ENABLED, &pwm->flags))
+               if (!pwm_is_enabled(pwm))
                        if (clk_prepare_enable(pc->clk) < 0)
                                continue;
 
index e557befdf4e65902c0c87a513bdb6dd479be5177..616af764a27682ed0301e897ab731f5ffe8c5a13 100644 (file)
@@ -97,7 +97,7 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
 
        writew(reg_val, pc->mmio_base + ECCTL2);
 
-       if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+       if (!pwm_is_enabled(pwm)) {
                /* Update active registers if not running */
                writel(duty_cycles, pc->mmio_base + CAP2);
                writel(period_cycles, pc->mmio_base + CAP1);
@@ -111,7 +111,7 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
                writel(period_cycles, pc->mmio_base + CAP3);
        }
 
-       if (!test_bit(PWMF_ENABLED, &pwm->flags)) {
+       if (!pwm_is_enabled(pwm)) {
                reg_val = readw(pc->mmio_base + ECCTL2);
                /* Disable APWM mode to put APWM output Low */
                reg_val &= ~ECCTL2_APWM_MODE;
@@ -179,7 +179,7 @@ static void ecap_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
 
 static void ecap_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
 {
-       if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+       if (pwm_is_enabled(pwm)) {
                dev_warn(chip->dev, "Removing PWM device without disabling\n");
                pm_runtime_put_sync(chip->dev);
        }
@@ -306,7 +306,7 @@ static int ecap_pwm_suspend(struct device *dev)
        ecap_pwm_save_context(pc);
 
        /* Disable explicitly if PWM is running */
-       if (test_bit(PWMF_ENABLED, &pwm->flags))
+       if (pwm_is_enabled(pwm))
                pm_runtime_put_sync(dev);
 
        return 0;
@@ -318,7 +318,7 @@ static int ecap_pwm_resume(struct device *dev)
        struct pwm_device *pwm = pc->chip.pwms;
 
        /* Enable explicitly if PWM was running */
-       if (test_bit(PWMF_ENABLED, &pwm->flags))
+       if (pwm_is_enabled(pwm))
                pm_runtime_get_sync(dev);
 
        ecap_pwm_restore_context(pc);
index 694b3cf7694be1bc14543d1ff1173a0486d2a4e2..6a41e66015b67fa84865891cbc3e97e12de6539f 100644 (file)
@@ -407,7 +407,7 @@ static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm)
 {
        struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip);
 
-       if (test_bit(PWMF_ENABLED, &pwm->flags)) {
+       if (pwm_is_enabled(pwm)) {
                dev_warn(chip->dev, "Removing PWM device without disabling\n");
                pm_runtime_put_sync(chip->dev);
        }
@@ -565,7 +565,7 @@ static int ehrpwm_pwm_suspend(struct device *dev)
        for (i = 0; i < pc->chip.npwm; i++) {
                struct pwm_device *pwm = &pc->chip.pwms[i];
 
-               if (!test_bit(PWMF_ENABLED, &pwm->flags))
+               if (!pwm_is_enabled(pwm))
                        continue;
 
                /* Disable explicitly if PWM is running */
@@ -582,7 +582,7 @@ static int ehrpwm_pwm_resume(struct device *dev)
        for (i = 0; i < pc->chip.npwm; i++) {
                struct pwm_device *pwm = &pc->chip.pwms[i];
 
-               if (!test_bit(PWMF_ENABLED, &pwm->flags))
+               if (!pwm_is_enabled(pwm))
                        continue;
 
                /* Enable explicitly if PWM was running */
index 4bd0c639e16da9d49598f637c5473ae37e419def..c472772f00a7880611e23e62cfbd1731d04dfa27 100644 (file)
@@ -46,7 +46,7 @@ static ssize_t pwm_period_show(struct device *child,
 {
        const struct pwm_device *pwm = child_to_pwm_device(child);
 
-       return sprintf(buf, "%u\n", pwm->period);
+       return sprintf(buf, "%u\n", pwm_get_period(pwm));
 }
 
 static ssize_t pwm_period_store(struct device *child,
@@ -61,7 +61,7 @@ static ssize_t pwm_period_store(struct device *child,
        if (ret)
                return ret;
 
-       ret = pwm_config(pwm, pwm->duty_cycle, val);
+       ret = pwm_config(pwm, pwm_get_duty_cycle(pwm), val);
 
        return ret ? : size;
 }
@@ -72,7 +72,7 @@ static ssize_t pwm_duty_cycle_show(struct device *child,
 {
        const struct pwm_device *pwm = child_to_pwm_device(child);
 
-       return sprintf(buf, "%u\n", pwm->duty_cycle);
+       return sprintf(buf, "%u\n", pwm_get_duty_cycle(pwm));
 }
 
 static ssize_t pwm_duty_cycle_store(struct device *child,
@@ -87,7 +87,7 @@ static ssize_t pwm_duty_cycle_store(struct device *child,
        if (ret)
                return ret;
 
-       ret = pwm_config(pwm, val, pwm->period);
+       ret = pwm_config(pwm, val, pwm_get_period(pwm));
 
        return ret ? : size;
 }
@@ -97,7 +97,7 @@ static ssize_t pwm_enable_show(struct device *child,
                               char *buf)
 {
        const struct pwm_device *pwm = child_to_pwm_device(child);
-       int enabled = test_bit(PWMF_ENABLED, &pwm->flags);
+       int enabled = pwm_is_enabled(pwm);
 
        return sprintf(buf, "%d\n", enabled);
 }
@@ -133,8 +133,19 @@ static ssize_t pwm_polarity_show(struct device *child,
                                 char *buf)
 {
        const struct pwm_device *pwm = child_to_pwm_device(child);
+       const char *polarity = "unknown";
 
-       return sprintf(buf, "%s\n", pwm->polarity ? "inversed" : "normal");
+       switch (pwm_get_polarity(pwm)) {
+       case PWM_POLARITY_NORMAL:
+               polarity = "normal";
+               break;
+
+       case PWM_POLARITY_INVERSED:
+               polarity = "inversed";
+               break;
+       }
+
+       return sprintf(buf, "%s\n", polarity);
 }
 
 static ssize_t pwm_polarity_store(struct device *child,
@@ -301,9 +312,9 @@ static struct attribute *pwm_chip_attrs[] = {
 ATTRIBUTE_GROUPS(pwm_chip);
 
 static struct class pwm_class = {
-       .name           = "pwm",
-       .owner          = THIS_MODULE,
-       .dev_groups     = pwm_chip_groups,
+       .name = "pwm",
+       .owner = THIS_MODULE,
+       .dev_groups = pwm_chip_groups,
 };
 
 static int pwmchip_sysfs_match(struct device *parent, const void *data)
index d2d290413113b753412193c5f6e1c37fda5c3524..9aaf646ece55beddf718fd8ea0ce4b89eac3f478 100644 (file)
@@ -89,6 +89,7 @@ static int ath79_reset_probe(struct platform_device *pdev)
        if (IS_ERR(ath79_reset->base))
                return PTR_ERR(ath79_reset->base);
 
+       spin_lock_init(&ath79_reset->lock);
        ath79_reset->rcdev.ops = &ath79_reset_ops;
        ath79_reset->rcdev.owner = THIS_MODULE;
        ath79_reset->rcdev.of_node = pdev->dev.of_node;
index fad22caf0efffd7551e6203abaeabd99f3ecf3df..9dc8687bf0480e53b2d89431ece47f8504fd5779 100644 (file)
@@ -377,7 +377,6 @@ static int map_data_for_request(struct vscsifrnt_info *info,
        unsigned int data_len = scsi_bufflen(sc);
        unsigned int data_grants = 0, seg_grants = 0;
        struct scatterlist *sg;
-       unsigned long mfn;
        struct scsiif_request_segment *seg;
 
        ring_req->nr_segments = 0;
@@ -420,9 +419,9 @@ static int map_data_for_request(struct vscsifrnt_info *info,
                        ref = gnttab_claim_grant_reference(&gref_head);
                        BUG_ON(ref == -ENOSPC);
 
-                       mfn = pfn_to_mfn(page_to_pfn(page));
                        gnttab_grant_foreign_access_ref(ref,
-                               info->dev->otherend_id, mfn, 1);
+                               info->dev->otherend_id,
+                               xen_page_to_gfn(page), 1);
                        shadow->gref[ref_cnt] = ref;
                        ring_req->seg[ref_cnt].gref   = ref;
                        ring_req->seg[ref_cnt].offset = (uint16_t)off;
@@ -454,9 +453,10 @@ static int map_data_for_request(struct vscsifrnt_info *info,
                        ref = gnttab_claim_grant_reference(&gref_head);
                        BUG_ON(ref == -ENOSPC);
 
-                       mfn = pfn_to_mfn(page_to_pfn(page));
                        gnttab_grant_foreign_access_ref(ref,
-                               info->dev->otherend_id, mfn, grant_ro);
+                               info->dev->otherend_id,
+                               xen_page_to_gfn(page),
+                               grant_ro);
 
                        shadow->gref[ref_cnt] = ref;
                        seg->gref   = ref;
index 327adcf117c18efbd9e454c001a6578d30c2088e..a6155c917d52d03a088a2ccbbd5a25220f392c60 100644 (file)
@@ -96,6 +96,7 @@ static const struct {
  * @smd:               handle to qcom_smd
  * @of_node:           of_node handle for information related to this edge
  * @edge_id:           identifier of this edge
+ * @remote_pid:                identifier of remote processor
  * @irq:               interrupt for signals on this edge
  * @ipc_regmap:                regmap handle holding the outgoing ipc register
  * @ipc_offset:                offset within @ipc_regmap of the register for ipc
@@ -111,6 +112,7 @@ struct qcom_smd_edge {
        struct qcom_smd *smd;
        struct device_node *of_node;
        unsigned edge_id;
+       unsigned remote_pid;
 
        int irq;
 
@@ -310,7 +312,7 @@ static void qcom_smd_channel_reset(struct qcom_smd_channel *channel)
        SET_TX_CHANNEL_INFO(channel, fHEAD, 0);
        SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
        SET_TX_CHANNEL_INFO(channel, fSTATE, 1);
-       SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
+       SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
        SET_TX_CHANNEL_INFO(channel, head, 0);
        SET_TX_CHANNEL_INFO(channel, tail, 0);
 
@@ -572,7 +574,7 @@ static irqreturn_t qcom_smd_edge_intr(int irq, void *data)
         * have to scan if the amount of available space in smem have changed
         * since last scan.
         */
-       available = qcom_smem_get_free_space(edge->edge_id);
+       available = qcom_smem_get_free_space(edge->remote_pid);
        if (available != edge->smem_available) {
                edge->smem_available = available;
                edge->need_rescan = true;
@@ -681,7 +683,7 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
                        goto out;
                }
 
-               SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
+               SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
 
                ret = wait_event_interruptible(channel->fblockread_event,
                                       qcom_smd_get_tx_avail(channel) >= tlen ||
@@ -689,7 +691,7 @@ int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len)
                if (ret)
                        goto out;
 
-               SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 0);
+               SET_TX_CHANNEL_INFO(channel, fBLOCKREADINTR, 1);
        }
 
        SET_TX_CHANNEL_INFO(channel, fTAIL, 0);
@@ -976,7 +978,8 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
        spin_lock_init(&channel->recv_lock);
        init_waitqueue_head(&channel->fblockread_event);
 
-       ret = qcom_smem_get(edge->edge_id, smem_info_item, (void **)&info, &info_size);
+       ret = qcom_smem_get(edge->remote_pid, smem_info_item, (void **)&info,
+                           &info_size);
        if (ret)
                goto free_name_and_channel;
 
@@ -997,7 +1000,8 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed
                goto free_name_and_channel;
        }
 
-       ret = qcom_smem_get(edge->edge_id, smem_fifo_item, &fifo_base, &fifo_size);
+       ret = qcom_smem_get(edge->remote_pid, smem_fifo_item, &fifo_base,
+                           &fifo_size);
        if (ret)
                goto free_name_and_channel;
 
@@ -1041,7 +1045,7 @@ static void qcom_discover_channels(struct qcom_smd_edge *edge)
        int i;
 
        for (tbl = 0; tbl < SMD_ALLOC_TBL_COUNT; tbl++) {
-               ret = qcom_smem_get(edge->edge_id,
+               ret = qcom_smem_get(edge->remote_pid,
                                    smem_items[tbl].alloc_tbl_id,
                                    (void **)&alloc_tbl,
                                    NULL);
@@ -1184,6 +1188,10 @@ static int qcom_smd_parse_edge(struct device *dev,
                return -EINVAL;
        }
 
+       edge->remote_pid = QCOM_SMEM_HOST_ANY;
+       key = "qcom,remote-pid";
+       of_property_read_u32(node, key, &edge->remote_pid);
+
        syscon_np = of_parse_phandle(node, "qcom,ipc", 0);
        if (!syscon_np) {
                dev_err(dev, "no qcom,ipc node\n");
index 7c2c324c4b10cee9a1430c7c68b2c079ea7e7ce7..52365188a1c20288a754dc7e1a530e4b25570ac3 100644 (file)
@@ -258,10 +258,6 @@ static int qcom_smem_alloc_private(struct qcom_smem *smem,
        size_t alloc_size;
        void *p;
 
-       /* We're not going to find it if there's no matching partition */
-       if (host >= SMEM_HOST_COUNT || !smem->partitions[host])
-               return -ENOENT;
-
        phdr = smem->partitions[host];
 
        p = (void *)phdr + sizeof(*phdr);
@@ -371,8 +367,9 @@ int qcom_smem_alloc(unsigned host, unsigned item, size_t size)
        if (ret)
                return ret;
 
-       ret = qcom_smem_alloc_private(__smem, host, item, size);
-       if (ret == -ENOENT)
+       if (host < SMEM_HOST_COUNT && __smem->partitions[host])
+               ret = qcom_smem_alloc_private(__smem, host, item, size);
+       else
                ret = qcom_smem_alloc_global(__smem, item, size);
 
        hwspin_unlock_irqrestore(__smem->hwlock, &flags);
@@ -428,10 +425,6 @@ static int qcom_smem_get_private(struct qcom_smem *smem,
        struct smem_private_entry *hdr;
        void *p;
 
-       /* We're not going to find it if there's no matching partition */
-       if (host >= SMEM_HOST_COUNT || !smem->partitions[host])
-               return -ENOENT;
-
        phdr = smem->partitions[host];
 
        p = (void *)phdr + sizeof(*phdr);
@@ -484,8 +477,9 @@ int qcom_smem_get(unsigned host, unsigned item, void **ptr, size_t *size)
        if (ret)
                return ret;
 
-       ret = qcom_smem_get_private(__smem, host, item, ptr, size);
-       if (ret == -ENOENT)
+       if (host < SMEM_HOST_COUNT && __smem->partitions[host])
+               ret = qcom_smem_get_private(__smem, host, item, ptr, size);
+       else
                ret = qcom_smem_get_global(__smem, item, ptr, size);
 
        hwspin_unlock_irqrestore(__smem->hwlock, &flags);
index a9d837f83ce832539a442643f10ec4221d0fa117..10beb1589d8340fb0d0c5965a53cca5abf0b75f7 100644 (file)
@@ -200,7 +200,7 @@ static int xen_hvm_console_init(void)
 {
        int r;
        uint64_t v = 0;
-       unsigned long mfn;
+       unsigned long gfn;
        struct xencons_info *info;
 
        if (!xen_hvm_domain())
@@ -217,7 +217,7 @@ static int xen_hvm_console_init(void)
        }
        /*
         * If the toolstack (or the hypervisor) hasn't set these values, the
-        * default value is 0. Even though mfn = 0 and evtchn = 0 are
+        * default value is 0. Even though gfn = 0 and evtchn = 0 are
         * theoretically correct values, in practice they never are and they
         * mean that a legacy toolstack hasn't initialized the pv console correctly.
         */
@@ -229,8 +229,8 @@ static int xen_hvm_console_init(void)
        r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
        if (r < 0 || v == 0)
                goto err;
-       mfn = v;
-       info->intf = xen_remap(mfn << PAGE_SHIFT, PAGE_SIZE);
+       gfn = v;
+       info->intf = xen_remap(gfn << PAGE_SHIFT, PAGE_SIZE);
        if (info->intf == NULL)
                goto err;
        info->vtermno = HVC_COOKIE;
@@ -265,7 +265,8 @@ static int xen_pv_console_init(void)
                return 0;
        }
        info->evtchn = xen_start_info->console.domU.evtchn;
-       info->intf = mfn_to_virt(xen_start_info->console.domU.mfn);
+       /* GFN == MFN for PV guest */
+       info->intf = gfn_to_virt(xen_start_info->console.domU.mfn);
        info->vtermno = HVC_COOKIE;
 
        spin_lock(&xencons_lock);
@@ -374,7 +375,6 @@ static int xencons_connect_backend(struct xenbus_device *dev,
        int ret, evtchn, devid, ref, irq;
        struct xenbus_transaction xbt;
        grant_ref_t gref_head;
-       unsigned long mfn;
 
        ret = xenbus_alloc_evtchn(dev, &evtchn);
        if (ret)
@@ -389,10 +389,6 @@ static int xencons_connect_backend(struct xenbus_device *dev,
                        irq, &domU_hvc_ops, 256);
        if (IS_ERR(info->hvc))
                return PTR_ERR(info->hvc);
-       if (xen_pv_domain())
-               mfn = virt_to_mfn(info->intf);
-       else
-               mfn = __pa(info->intf) >> PAGE_SHIFT;
        ret = gnttab_alloc_grant_references(1, &gref_head);
        if (ret < 0)
                return ret;
@@ -401,7 +397,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
        if (ref < 0)
                return ref;
        gnttab_grant_foreign_access_ref(ref, info->xbdev->otherend_id,
-                       mfn, 0);
+                                       virt_to_gfn(info->intf), 0);
 
  again:
        ret = xenbus_transaction_start(&xbt);
index ed299b9e63752b4ec9d319a7e989fe3307433a77..687b1ea294b79bbf999c4c86747ee2df7ba30df5 100644 (file)
@@ -47,12 +47,12 @@ config SERIAL_AMBA_PL010_CONSOLE
 
 config SERIAL_AMBA_PL011
        tristate "ARM AMBA PL011 serial port support"
-       depends on ARM_AMBA || SOC_ZX296702
+       depends on ARM_AMBA
        select SERIAL_CORE
        help
          This selects the ARM(R) AMBA(R) PrimeCell PL011 UART.  If you have
          an Integrator/PP2, Integrator/CP or Versatile platform, say Y or M
-         here. Say Y or M if you have SOC_ZX296702.
+         here.
 
          If unsure, say N.
 
index 2af09ab153b6db628c09bf248219a65a0e40387d..fd27e986b1dd3437dfd2560ec11efd8def7bf254 100644 (file)
 /* There is by now at least one vendor with differing details, so handle it */
 struct vendor_data {
        unsigned int            ifls;
-       unsigned int            fr_busy;
-       unsigned int            fr_dsr;
-       unsigned int            fr_cts;
-       unsigned int            fr_ri;
        unsigned int            lcrh_tx;
        unsigned int            lcrh_rx;
-       u16                     *reg_lut;
        bool                    oversampling;
        bool                    dma_threshold;
        bool                    cts_event_workaround;
@@ -90,48 +85,6 @@ struct vendor_data {
        unsigned int (*get_fifosize)(struct amba_device *dev);
 };
 
-/* Max address offset of register in use is 0x48 */
-#define REG_NR         (0x48 >> 2)
-#define IDX(x)         (x >> 2)
-enum reg_idx {
-       REG_DR          = IDX(UART01x_DR),
-       REG_RSR         = IDX(UART01x_RSR),
-       REG_ST_DMAWM    = IDX(ST_UART011_DMAWM),
-       REG_FR          = IDX(UART01x_FR),
-       REG_ST_LCRH_RX  = IDX(ST_UART011_LCRH_RX),
-       REG_ILPR        = IDX(UART01x_ILPR),
-       REG_IBRD        = IDX(UART011_IBRD),
-       REG_FBRD        = IDX(UART011_FBRD),
-       REG_LCRH        = IDX(UART011_LCRH),
-       REG_CR          = IDX(UART011_CR),
-       REG_IFLS        = IDX(UART011_IFLS),
-       REG_IMSC        = IDX(UART011_IMSC),
-       REG_RIS         = IDX(UART011_RIS),
-       REG_MIS         = IDX(UART011_MIS),
-       REG_ICR         = IDX(UART011_ICR),
-       REG_DMACR       = IDX(UART011_DMACR),
-};
-
-static u16 arm_reg[] = {
-       [REG_DR]                = UART01x_DR,
-       [REG_RSR]               = UART01x_RSR,
-       [REG_ST_DMAWM]          = ~0,
-       [REG_FR]                = UART01x_FR,
-       [REG_ST_LCRH_RX]        = ~0,
-       [REG_ILPR]              = UART01x_ILPR,
-       [REG_IBRD]              = UART011_IBRD,
-       [REG_FBRD]              = UART011_FBRD,
-       [REG_LCRH]              = UART011_LCRH,
-       [REG_CR]                = UART011_CR,
-       [REG_IFLS]              = UART011_IFLS,
-       [REG_IMSC]              = UART011_IMSC,
-       [REG_RIS]               = UART011_RIS,
-       [REG_MIS]               = UART011_MIS,
-       [REG_ICR]               = UART011_ICR,
-       [REG_DMACR]             = UART011_DMACR,
-};
-
-#ifdef CONFIG_ARM_AMBA
 static unsigned int get_fifosize_arm(struct amba_device *dev)
 {
        return amba_rev(dev) < 3 ? 16 : 32;
@@ -139,13 +92,8 @@ static unsigned int get_fifosize_arm(struct amba_device *dev)
 
 static struct vendor_data vendor_arm = {
        .ifls                   = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
-       .fr_busy                = UART01x_FR_BUSY,
-       .fr_dsr                 = UART01x_FR_DSR,
-       .fr_cts                 = UART01x_FR_CTS,
-       .fr_ri                  = UART011_FR_RI,
-       .lcrh_tx                = REG_LCRH,
-       .lcrh_rx                = REG_LCRH,
-       .reg_lut                = arm_reg,
+       .lcrh_tx                = UART011_LCRH,
+       .lcrh_rx                = UART011_LCRH,
        .oversampling           = false,
        .dma_threshold          = false,
        .cts_event_workaround   = false,
@@ -153,14 +101,8 @@ static struct vendor_data vendor_arm = {
        .fixed_options          = false,
        .get_fifosize           = get_fifosize_arm,
 };
-#endif
 
 static struct vendor_data vendor_sbsa = {
-       .fr_busy                = UART01x_FR_BUSY,
-       .fr_dsr                 = UART01x_FR_DSR,
-       .fr_cts                 = UART01x_FR_CTS,
-       .fr_ri                  = UART011_FR_RI,
-       .reg_lut                = arm_reg,
        .oversampling           = false,
        .dma_threshold          = false,
        .cts_event_workaround   = false,
@@ -168,26 +110,6 @@ static struct vendor_data vendor_sbsa = {
        .fixed_options          = true,
 };
 
-#ifdef CONFIG_ARM_AMBA
-static u16 st_reg[] = {
-       [REG_DR]                = UART01x_DR,
-       [REG_RSR]               = UART01x_RSR,
-       [REG_ST_DMAWM]          = ST_UART011_DMAWM,
-       [REG_FR]                = UART01x_FR,
-       [REG_ST_LCRH_RX]        = ST_UART011_LCRH_RX,
-       [REG_ILPR]              = UART01x_ILPR,
-       [REG_IBRD]              = UART011_IBRD,
-       [REG_FBRD]              = UART011_FBRD,
-       [REG_LCRH]              = UART011_LCRH,
-       [REG_CR]                = UART011_CR,
-       [REG_IFLS]              = UART011_IFLS,
-       [REG_IMSC]              = UART011_IMSC,
-       [REG_RIS]               = UART011_RIS,
-       [REG_MIS]               = UART011_MIS,
-       [REG_ICR]               = UART011_ICR,
-       [REG_DMACR]             = UART011_DMACR,
-};
-
 static unsigned int get_fifosize_st(struct amba_device *dev)
 {
        return 64;
@@ -195,13 +117,8 @@ static unsigned int get_fifosize_st(struct amba_device *dev)
 
 static struct vendor_data vendor_st = {
        .ifls                   = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
-       .fr_busy                = UART01x_FR_BUSY,
-       .fr_dsr                 = UART01x_FR_DSR,
-       .fr_cts                 = UART01x_FR_CTS,
-       .fr_ri                  = UART011_FR_RI,
-       .lcrh_tx                = REG_LCRH,
-       .lcrh_rx                = REG_ST_LCRH_RX,
-       .reg_lut                = st_reg,
+       .lcrh_tx                = ST_UART011_LCRH_TX,
+       .lcrh_rx                = ST_UART011_LCRH_RX,
        .oversampling           = true,
        .dma_threshold          = true,
        .cts_event_workaround   = true,
@@ -209,43 +126,6 @@ static struct vendor_data vendor_st = {
        .fixed_options          = false,
        .get_fifosize           = get_fifosize_st,
 };
-#endif
-
-#ifdef CONFIG_SOC_ZX296702
-static u16 zte_reg[] = {
-       [REG_DR]                = ZX_UART01x_DR,
-       [REG_RSR]               = UART01x_RSR,
-       [REG_ST_DMAWM]          = ST_UART011_DMAWM,
-       [REG_FR]                = ZX_UART01x_FR,
-       [REG_ST_LCRH_RX]        = ST_UART011_LCRH_RX,
-       [REG_ILPR]              = UART01x_ILPR,
-       [REG_IBRD]              = UART011_IBRD,
-       [REG_FBRD]              = UART011_FBRD,
-       [REG_LCRH]              = ZX_UART011_LCRH_TX,
-       [REG_CR]                = ZX_UART011_CR,
-       [REG_IFLS]              = ZX_UART011_IFLS,
-       [REG_IMSC]              = ZX_UART011_IMSC,
-       [REG_RIS]               = ZX_UART011_RIS,
-       [REG_MIS]               = ZX_UART011_MIS,
-       [REG_ICR]               = ZX_UART011_ICR,
-       [REG_DMACR]             = ZX_UART011_DMACR,
-};
-
-static struct vendor_data vendor_zte = {
-       .ifls                   = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
-       .fr_busy                = ZX_UART01x_FR_BUSY,
-       .fr_dsr                 = ZX_UART01x_FR_DSR,
-       .fr_cts                 = ZX_UART01x_FR_CTS,
-       .fr_ri                  = ZX_UART011_FR_RI,
-       .lcrh_tx                = REG_LCRH,
-       .lcrh_rx                = REG_ST_LCRH_RX,
-       .reg_lut                = zte_reg,
-       .oversampling           = false,
-       .dma_threshold          = false,
-       .cts_event_workaround   = false,
-       .fixed_options          = false,
-};
-#endif
 
 /* Deals with DMA transactions */
 
@@ -284,15 +164,10 @@ struct uart_amba_port {
        struct uart_port        port;
        struct clk              *clk;
        const struct vendor_data *vendor;
-       u16                     *reg_lut;
        unsigned int            dmacr;          /* dma control reg */
        unsigned int            im;             /* interrupt mask */
        unsigned int            old_status;
        unsigned int            fifosize;       /* vendor-specific */
-       unsigned int            fr_busy;        /* vendor-specific */
-       unsigned int            fr_dsr;         /* vendor-specific */
-       unsigned int            fr_cts;         /* vendor-specific */
-       unsigned int            fr_ri;          /* vendor-specific */
        unsigned int            lcrh_tx;        /* vendor-specific */
        unsigned int            lcrh_rx;        /* vendor-specific */
        unsigned int            old_cr;         /* state during shutdown */
@@ -309,29 +184,6 @@ struct uart_amba_port {
 #endif
 };
 
-static bool is_implemented(struct uart_amba_port *uap, unsigned int reg)
-{
-       return uap->reg_lut[reg] != (u16)~0;
-}
-
-static unsigned int pl011_readw(struct uart_amba_port *uap, int index)
-{
-       WARN_ON(index > REG_NR);
-       return readw_relaxed(uap->port.membase + uap->reg_lut[index]);
-}
-
-static void pl011_writew(struct uart_amba_port *uap, int val, int index)
-{
-       WARN_ON(index > REG_NR);
-       writew_relaxed(val, uap->port.membase + uap->reg_lut[index]);
-}
-
-static void pl011_writeb(struct uart_amba_port *uap, u8 val, int index)
-{
-       WARN_ON(index > REG_NR);
-       writeb_relaxed(val, uap->port.membase + uap->reg_lut[index]);
-}
-
 /*
  * Reads up to 256 characters from the FIFO or until it's empty and
  * inserts them into the TTY layer. Returns the number of characters
@@ -344,12 +196,12 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
        int fifotaken = 0;
 
        while (max_count--) {
-               status = pl011_readw(uap, REG_FR);
+               status = readw(uap->port.membase + UART01x_FR);
                if (status & UART01x_FR_RXFE)
                        break;
 
                /* Take chars from the FIFO and update status */
-               ch = pl011_readw(uap, REG_DR) |
+               ch = readw(uap->port.membase + UART01x_DR) |
                        UART_DUMMY_DR_RX;
                flag = TTY_NORMAL;
                uap->port.icount.rx++;
@@ -432,7 +284,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
        struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
        struct device *dev = uap->port.dev;
        struct dma_slave_config tx_conf = {
-               .dst_addr = uap->port.mapbase + uap->reg_lut[REG_DR],
+               .dst_addr = uap->port.mapbase + UART01x_DR,
                .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
                .direction = DMA_MEM_TO_DEV,
                .dst_maxburst = uap->fifosize >> 1,
@@ -487,7 +339,7 @@ static void pl011_dma_probe(struct uart_amba_port *uap)
 
        if (chan) {
                struct dma_slave_config rx_conf = {
-                       .src_addr = uap->port.mapbase + uap->reg_lut[REG_DR],
+                       .src_addr = uap->port.mapbase + UART01x_DR,
                        .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
                        .direction = DMA_DEV_TO_MEM,
                        .src_maxburst = uap->fifosize >> 2,
@@ -586,7 +438,7 @@ static void pl011_dma_tx_callback(void *data)
 
        dmacr = uap->dmacr;
        uap->dmacr = dmacr & ~UART011_TXDMAE;
-       pl011_writew(uap, uap->dmacr, REG_DMACR);
+       writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 
        /*
         * If TX DMA was disabled, it means that we've stopped the DMA for
@@ -700,7 +552,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
        dma_dev->device_issue_pending(chan);
 
        uap->dmacr |= UART011_TXDMAE;
-       pl011_writew(uap, uap->dmacr, REG_DMACR);
+       writew(uap->dmacr, uap->port.membase + UART011_DMACR);
        uap->dmatx.queued = true;
 
        /*
@@ -736,9 +588,9 @@ static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
         */
        if (uap->dmatx.queued) {
                uap->dmacr |= UART011_TXDMAE;
-               pl011_writew(uap, uap->dmacr, REG_DMACR);
+               writew(uap->dmacr, uap->port.membase + UART011_DMACR);
                uap->im &= ~UART011_TXIM;
-               pl011_writew(uap, uap->im, REG_IMSC);
+               writew(uap->im, uap->port.membase + UART011_IMSC);
                return true;
        }
 
@@ -748,7 +600,7 @@ static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
         */
        if (pl011_dma_tx_refill(uap) > 0) {
                uap->im &= ~UART011_TXIM;
-               pl011_writew(uap, uap->im, REG_IMSC);
+               writew(uap->im, uap->port.membase + UART011_IMSC);
                return true;
        }
        return false;
@@ -762,7 +614,7 @@ static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
 {
        if (uap->dmatx.queued) {
                uap->dmacr &= ~UART011_TXDMAE;
-               pl011_writew(uap, uap->dmacr, REG_DMACR);
+               writew(uap->dmacr, uap->port.membase + UART011_DMACR);
        }
 }
 
@@ -788,12 +640,14 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
                if (!uap->dmatx.queued) {
                        if (pl011_dma_tx_refill(uap) > 0) {
                                uap->im &= ~UART011_TXIM;
-                               pl011_writew(uap, uap->im, REG_IMSC);
+                               writew(uap->im, uap->port.membase +
+                                      UART011_IMSC);
                        } else
                                ret = false;
                } else if (!(uap->dmacr & UART011_TXDMAE)) {
                        uap->dmacr |= UART011_TXDMAE;
-                       pl011_writew(uap, uap->dmacr, REG_DMACR);
+                       writew(uap->dmacr,
+                                      uap->port.membase + UART011_DMACR);
                }
                return ret;
        }
@@ -804,9 +658,9 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
         */
        dmacr = uap->dmacr;
        uap->dmacr &= ~UART011_TXDMAE;
-       pl011_writew(uap, uap->dmacr, REG_DMACR);
+       writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 
-       if (pl011_readw(uap, REG_FR) & UART01x_FR_TXFF) {
+       if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
                /*
                 * No space in the FIFO, so enable the transmit interrupt
                 * so we know when there is space.  Note that once we've
@@ -815,13 +669,13 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
                return false;
        }
 
-       pl011_writew(uap, uap->port.x_char, REG_DR);
+       writew(uap->port.x_char, uap->port.membase + UART01x_DR);
        uap->port.icount.tx++;
        uap->port.x_char = 0;
 
        /* Success - restore the DMA state */
        uap->dmacr = dmacr;
-       pl011_writew(uap, dmacr, REG_DMACR);
+       writew(dmacr, uap->port.membase + UART011_DMACR);
 
        return true;
 }
@@ -849,7 +703,7 @@ __acquires(&uap->port.lock)
                             DMA_TO_DEVICE);
                uap->dmatx.queued = false;
                uap->dmacr &= ~UART011_TXDMAE;
-               pl011_writew(uap, uap->dmacr, REG_DMACR);
+               writew(uap->dmacr, uap->port.membase + UART011_DMACR);
        }
 }
 
@@ -889,11 +743,11 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
        dma_async_issue_pending(rxchan);
 
        uap->dmacr |= UART011_RXDMAE;
-       pl011_writew(uap, uap->dmacr, REG_DMACR);
+       writew(uap->dmacr, uap->port.membase + UART011_DMACR);
        uap->dmarx.running = true;
 
        uap->im &= ~UART011_RXIM;
-       pl011_writew(uap, uap->im, REG_IMSC);
+       writew(uap->im, uap->port.membase + UART011_IMSC);
 
        return 0;
 }
@@ -951,9 +805,8 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
         */
        if (dma_count == pending && readfifo) {
                /* Clear any error flags */
-               pl011_writew(uap,
-                            UART011_OEIS | UART011_BEIS | UART011_PEIS
-                            | UART011_FEIS, REG_ICR);
+               writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
+                      uap->port.membase + UART011_ICR);
 
                /*
                 * If we read all the DMA'd characters, and we had an
@@ -1001,7 +854,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
 
        /* Disable RX DMA - incoming data will wait in the FIFO */
        uap->dmacr &= ~UART011_RXDMAE;
-       pl011_writew(uap, uap->dmacr, REG_DMACR);
+       writew(uap->dmacr, uap->port.membase + UART011_DMACR);
        uap->dmarx.running = false;
 
        pending = sgbuf->sg.length - state.residue;
@@ -1021,7 +874,7 @@ static void pl011_dma_rx_irq(struct uart_amba_port *uap)
                dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
                        "fall back to interrupt mode\n");
                uap->im |= UART011_RXIM;
-               pl011_writew(uap, uap->im, REG_IMSC);
+               writew(uap->im, uap->port.membase + UART011_IMSC);
        }
 }
 
@@ -1069,7 +922,7 @@ static void pl011_dma_rx_callback(void *data)
                dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
                        "fall back to interrupt mode\n");
                uap->im |= UART011_RXIM;
-               pl011_writew(uap, uap->im, REG_IMSC);
+               writew(uap->im, uap->port.membase + UART011_IMSC);
        }
 }
 
@@ -1082,7 +935,7 @@ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
 {
        /* FIXME.  Just disable the DMA enable */
        uap->dmacr &= ~UART011_RXDMAE;
-       pl011_writew(uap, uap->dmacr, REG_DMACR);
+       writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 }
 
 /*
@@ -1126,7 +979,7 @@ static void pl011_dma_rx_poll(unsigned long args)
                spin_lock_irqsave(&uap->port.lock, flags);
                pl011_dma_rx_stop(uap);
                uap->im |= UART011_RXIM;
-               pl011_writew(uap, uap->im, REG_IMSC);
+               writew(uap->im, uap->port.membase + UART011_IMSC);
                spin_unlock_irqrestore(&uap->port.lock, flags);
 
                uap->dmarx.running = false;
@@ -1188,7 +1041,7 @@ static void pl011_dma_startup(struct uart_amba_port *uap)
 skip_rx:
        /* Turn on DMA error (RX/TX will be enabled on demand) */
        uap->dmacr |= UART011_DMAONERR;
-       pl011_writew(uap, uap->dmacr, REG_DMACR);
+       writew(uap->dmacr, uap->port.membase + UART011_DMACR);
 
        /*
         * ST Micro variants has some specific dma burst threshold
@@ -1196,9 +1049,8 @@ skip_rx:
         * be issued above/below 16 bytes.
         */
        if (uap->vendor->dma_threshold)
-               pl011_writew(uap,
-                            ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
-                            REG_ST_DMAWM);
+               writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
+                              uap->port.membase + ST_UART011_DMAWM);
 
        if (uap->using_rx_dma) {
                if (pl011_dma_rx_trigger_dma(uap))
@@ -1223,12 +1075,12 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
                return;
 
        /* Disable RX and TX DMA */
-       while (pl011_readw(uap, REG_FR) & uap->fr_busy)
+       while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
                barrier();
 
        spin_lock_irq(&uap->port.lock);
        uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
-       pl011_writew(uap, uap->dmacr, REG_DMACR);
+       writew(uap->dmacr, uap->port.membase + UART011_DMACR);
        spin_unlock_irq(&uap->port.lock);
 
        if (uap->using_tx_dma) {
@@ -1329,7 +1181,7 @@ static void pl011_stop_tx(struct uart_port *port)
            container_of(port, struct uart_amba_port, port);
 
        uap->im &= ~UART011_TXIM;
-       pl011_writew(uap, uap->im, REG_IMSC);
+       writew(uap->im, uap->port.membase + UART011_IMSC);
        pl011_dma_tx_stop(uap);
 }
 
@@ -1339,7 +1191,7 @@ static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
 static void pl011_start_tx_pio(struct uart_amba_port *uap)
 {
        uap->im |= UART011_TXIM;
-       pl011_writew(uap, uap->im, REG_IMSC);
+       writew(uap->im, uap->port.membase + UART011_IMSC);
        pl011_tx_chars(uap, false);
 }
 
@@ -1359,7 +1211,7 @@ static void pl011_stop_rx(struct uart_port *port)
 
        uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
                     UART011_PEIM|UART011_BEIM|UART011_OEIM);
-       pl011_writew(uap, uap->im, REG_IMSC);
+       writew(uap->im, uap->port.membase + UART011_IMSC);
 
        pl011_dma_rx_stop(uap);
 }
@@ -1370,7 +1222,7 @@ static void pl011_enable_ms(struct uart_port *port)
            container_of(port, struct uart_amba_port, port);
 
        uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
-       pl011_writew(uap, uap->im, REG_IMSC);
+       writew(uap->im, uap->port.membase + UART011_IMSC);
 }
 
 static void pl011_rx_chars(struct uart_amba_port *uap)
@@ -1390,7 +1242,7 @@ __acquires(&uap->port.lock)
                        dev_dbg(uap->port.dev, "could not trigger RX DMA job "
                                "fall back to interrupt mode again\n");
                        uap->im |= UART011_RXIM;
-                       pl011_writew(uap, uap->im, REG_IMSC);
+                       writew(uap->im, uap->port.membase + UART011_IMSC);
                } else {
 #ifdef CONFIG_DMA_ENGINE
                        /* Start Rx DMA poll */
@@ -1411,10 +1263,10 @@ static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
                          bool from_irq)
 {
        if (unlikely(!from_irq) &&
-           pl011_readw(uap, REG_FR) & UART01x_FR_TXFF)
+           readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
                return false; /* unable to transmit character */
 
-       pl011_writew(uap, c, REG_DR);
+       writew(c, uap->port.membase + UART01x_DR);
        uap->port.icount.tx++;
 
        return true;
@@ -1461,7 +1313,7 @@ static void pl011_modem_status(struct uart_amba_port *uap)
 {
        unsigned int status, delta;
 
-       status = pl011_readw(uap, REG_FR) & UART01x_FR_MODEM_ANY;
+       status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
 
        delta = status ^ uap->old_status;
        uap->old_status = status;
@@ -1472,11 +1324,11 @@ static void pl011_modem_status(struct uart_amba_port *uap)
        if (delta & UART01x_FR_DCD)
                uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
 
-       if (delta & uap->fr_dsr)
+       if (delta & UART01x_FR_DSR)
                uap->port.icount.dsr++;
 
-       if (delta & uap->fr_cts)
-               uart_handle_cts_change(&uap->port, status & uap->fr_cts);
+       if (delta & UART01x_FR_CTS)
+               uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
 
        wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
 }
@@ -1489,15 +1341,15 @@ static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
                return;
 
        /* workaround to make sure that all bits are unlocked.. */
-       pl011_writew(uap, 0x00, REG_ICR);
+       writew(0x00, uap->port.membase + UART011_ICR);
 
        /*
         * WA: introduce 26ns(1 uart clk) delay before W1C;
         * single apb access will incur 2 pclk(133.12Mhz) delay,
         * so add 2 dummy reads
         */
-       dummy_read = pl011_readw(uap, REG_ICR);
-       dummy_read = pl011_readw(uap, REG_ICR);
+       dummy_read = readw(uap->port.membase + UART011_ICR);
+       dummy_read = readw(uap->port.membase + UART011_ICR);
 }
 
 static irqreturn_t pl011_int(int irq, void *dev_id)
@@ -1509,13 +1361,15 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
        int handled = 0;
 
        spin_lock_irqsave(&uap->port.lock, flags);
-       imsc = pl011_readw(uap, REG_IMSC);
-       status = pl011_readw(uap, REG_RIS) & imsc;
+       imsc = readw(uap->port.membase + UART011_IMSC);
+       status = readw(uap->port.membase + UART011_RIS) & imsc;
        if (status) {
                do {
                        check_apply_cts_event_workaround(uap);
-                       pl011_writew(uap, status & ~(UART011_TXIS|UART011_RTIS|
-                                    UART011_RXIS), REG_ICR);
+
+                       writew(status & ~(UART011_TXIS|UART011_RTIS|
+                                         UART011_RXIS),
+                              uap->port.membase + UART011_ICR);
 
                        if (status & (UART011_RTIS|UART011_RXIS)) {
                                if (pl011_dma_rx_running(uap))
@@ -1532,7 +1386,7 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
                        if (pass_counter-- == 0)
                                break;
 
-                       status = pl011_readw(uap, REG_RIS) & imsc;
+                       status = readw(uap->port.membase + UART011_RIS) & imsc;
                } while (status != 0);
                handled = 1;
        }
@@ -1546,8 +1400,8 @@ static unsigned int pl011_tx_empty(struct uart_port *port)
 {
        struct uart_amba_port *uap =
            container_of(port, struct uart_amba_port, port);
-       unsigned int status = pl011_readw(uap, REG_FR);
-       return status & (uap->fr_busy|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
+       unsigned int status = readw(uap->port.membase + UART01x_FR);
+       return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
 }
 
 static unsigned int pl011_get_mctrl(struct uart_port *port)
@@ -1555,16 +1409,16 @@ static unsigned int pl011_get_mctrl(struct uart_port *port)
        struct uart_amba_port *uap =
            container_of(port, struct uart_amba_port, port);
        unsigned int result = 0;
-       unsigned int status = pl011_readw(uap, REG_FR);
+       unsigned int status = readw(uap->port.membase + UART01x_FR);
 
 #define TIOCMBIT(uartbit, tiocmbit)    \
        if (status & uartbit)           \
                result |= tiocmbit
 
        TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
-       TIOCMBIT(uap->fr_dsr, TIOCM_DSR);
-       TIOCMBIT(uap->fr_cts, TIOCM_CTS);
-       TIOCMBIT(uap->fr_ri, TIOCM_RNG);
+       TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
+       TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
+       TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
 #undef TIOCMBIT
        return result;
 }
@@ -1575,7 +1429,7 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
            container_of(port, struct uart_amba_port, port);
        unsigned int cr;
 
-       cr = pl011_readw(uap, REG_CR);
+       cr = readw(uap->port.membase + UART011_CR);
 
 #define        TIOCMBIT(tiocmbit, uartbit)             \
        if (mctrl & tiocmbit)           \
@@ -1595,7 +1449,7 @@ static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
        }
 #undef TIOCMBIT
 
-       pl011_writew(uap, cr, REG_CR);
+       writew(cr, uap->port.membase + UART011_CR);
 }
 
 static void pl011_break_ctl(struct uart_port *port, int break_state)
@@ -1606,12 +1460,12 @@ static void pl011_break_ctl(struct uart_port *port, int break_state)
        unsigned int lcr_h;
 
        spin_lock_irqsave(&uap->port.lock, flags);
-       lcr_h = pl011_readw(uap, uap->lcrh_tx);
+       lcr_h = readw(uap->port.membase + uap->lcrh_tx);
        if (break_state == -1)
                lcr_h |= UART01x_LCRH_BRK;
        else
                lcr_h &= ~UART01x_LCRH_BRK;
-       pl011_writew(uap, lcr_h, uap->lcrh_tx);
+       writew(lcr_h, uap->port.membase + uap->lcrh_tx);
        spin_unlock_irqrestore(&uap->port.lock, flags);
 }
 
@@ -1621,8 +1475,9 @@ static void pl011_quiesce_irqs(struct uart_port *port)
 {
        struct uart_amba_port *uap =
            container_of(port, struct uart_amba_port, port);
+       unsigned char __iomem *regs = uap->port.membase;
 
-       pl011_writew(uap, pl011_readw(uap, REG_MIS), REG_ICR);
+       writew(readw(regs + UART011_MIS), regs + UART011_ICR);
        /*
         * There is no way to clear TXIM as this is "ready to transmit IRQ", so
         * we simply mask it. start_tx() will unmask it.
@@ -1636,7 +1491,7 @@ static void pl011_quiesce_irqs(struct uart_port *port)
         * (including tx queue), so we're also fine with start_tx()'s caller
         * side.
         */
-       pl011_writew(uap, pl011_readw(uap, REG_IMSC) & ~UART011_TXIM, REG_IMSC);
+       writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC);
 }
 
 static int pl011_get_poll_char(struct uart_port *port)
@@ -1651,11 +1506,11 @@ static int pl011_get_poll_char(struct uart_port *port)
         */
        pl011_quiesce_irqs(port);
 
-       status = pl011_readw(uap, REG_FR);
+       status = readw(uap->port.membase + UART01x_FR);
        if (status & UART01x_FR_RXFE)
                return NO_POLL_CHAR;
 
-       return pl011_readw(uap, REG_DR);
+       return readw(uap->port.membase + UART01x_DR);
 }
 
 static void pl011_put_poll_char(struct uart_port *port,
@@ -1664,10 +1519,10 @@ static void pl011_put_poll_char(struct uart_port *port,
        struct uart_amba_port *uap =
            container_of(port, struct uart_amba_port, port);
 
-       while (pl011_readw(uap, REG_FR) & UART01x_FR_TXFF)
+       while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
                barrier();
 
-       pl011_writew(uap, ch, REG_DR);
+       writew(ch, uap->port.membase + UART01x_DR);
 }
 
 #endif /* CONFIG_CONSOLE_POLL */
@@ -1691,15 +1546,15 @@ static int pl011_hwinit(struct uart_port *port)
        uap->port.uartclk = clk_get_rate(uap->clk);
 
        /* Clear pending error and receive interrupts */
-       pl011_writew(uap, UART011_OEIS | UART011_BEIS | UART011_PEIS |
-                    UART011_FEIS | UART011_RTIS | UART011_RXIS, REG_ICR);
+       writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
+              UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
 
        /*
         * Save interrupts enable mask, and enable RX interrupts in case if
         * the interrupt is used for NMI entry.
         */
-       uap->im = pl011_readw(uap, REG_IMSC);
-       pl011_writew(uap, UART011_RTIM | UART011_RXIM, REG_IMSC);
+       uap->im = readw(uap->port.membase + UART011_IMSC);
+       writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC);
 
        if (dev_get_platdata(uap->port.dev)) {
                struct amba_pl011_data *plat;
@@ -1713,22 +1568,22 @@ static int pl011_hwinit(struct uart_port *port)
 
 static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
 {
-       pl011_writew(uap, lcr_h, uap->lcrh_rx);
-       if (is_implemented(uap, REG_ST_LCRH_RX)) {
+       writew(lcr_h, uap->port.membase + uap->lcrh_rx);
+       if (uap->lcrh_rx != uap->lcrh_tx) {
                int i;
                /*
                 * Wait 10 PCLKs before writing LCRH_TX register,
                 * to get this delay write read only register 10 times
                 */
                for (i = 0; i < 10; ++i)
-                       pl011_writew(uap, 0xff, REG_MIS);
-               pl011_writew(uap, lcr_h, uap->lcrh_tx);
+                       writew(0xff, uap->port.membase + UART011_MIS);
+               writew(lcr_h, uap->port.membase + uap->lcrh_tx);
        }
 }
 
 static int pl011_allocate_irq(struct uart_amba_port *uap)
 {
-       pl011_writew(uap, uap->im, REG_IMSC);
+       writew(uap->im, uap->port.membase + UART011_IMSC);
 
        return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
 }
@@ -1743,11 +1598,12 @@ static void pl011_enable_interrupts(struct uart_amba_port *uap)
        spin_lock_irq(&uap->port.lock);
 
        /* Clear out any spuriously appearing RX interrupts */
-       pl011_writew(uap, UART011_RTIS | UART011_RXIS, REG_ICR);
+       writew(UART011_RTIS | UART011_RXIS,
+              uap->port.membase + UART011_ICR);
        uap->im = UART011_RTIM;
        if (!pl011_dma_rx_running(uap))
                uap->im |= UART011_RXIM;
-       pl011_writew(uap, uap->im, REG_IMSC);
+       writew(uap->im, uap->port.membase + UART011_IMSC);
        spin_unlock_irq(&uap->port.lock);
 }
 
@@ -1766,21 +1622,21 @@ static int pl011_startup(struct uart_port *port)
        if (retval)
                goto clk_dis;
 
-       pl011_writew(uap, uap->vendor->ifls, REG_IFLS);
+       writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
 
        spin_lock_irq(&uap->port.lock);
 
        /* restore RTS and DTR */
        cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
        cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
-       pl011_writew(uap, cr, REG_CR);
+       writew(cr, uap->port.membase + UART011_CR);
 
        spin_unlock_irq(&uap->port.lock);
 
        /*
         * initialise the old status of the modem signals
         */
-       uap->old_status = pl011_readw(uap, REG_FR) & UART01x_FR_MODEM_ANY;
+       uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
 
        /* Startup DMA */
        pl011_dma_startup(uap);
@@ -1819,11 +1675,11 @@ static int sbsa_uart_startup(struct uart_port *port)
 static void pl011_shutdown_channel(struct uart_amba_port *uap,
                                        unsigned int lcrh)
 {
-       unsigned long val;
+      unsigned long val;
 
-       val = pl011_readw(uap, lcrh);
-       val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
-       pl011_writew(uap, val, lcrh);
+      val = readw(uap->port.membase + lcrh);
+      val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
+      writew(val, uap->port.membase + lcrh);
 }
 
 /*
@@ -1837,18 +1693,18 @@ static void pl011_disable_uart(struct uart_amba_port *uap)
 
        uap->autorts = false;
        spin_lock_irq(&uap->port.lock);
-       cr = pl011_readw(uap, REG_CR);
+       cr = readw(uap->port.membase + UART011_CR);
        uap->old_cr = cr;
        cr &= UART011_CR_RTS | UART011_CR_DTR;
        cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
-       pl011_writew(uap, cr, REG_CR);
+       writew(cr, uap->port.membase + UART011_CR);
        spin_unlock_irq(&uap->port.lock);
 
        /*
         * disable break condition and fifos
         */
        pl011_shutdown_channel(uap, uap->lcrh_rx);
-       if (is_implemented(uap, REG_ST_LCRH_RX))
+       if (uap->lcrh_rx != uap->lcrh_tx)
                pl011_shutdown_channel(uap, uap->lcrh_tx);
 }
 
@@ -1858,8 +1714,8 @@ static void pl011_disable_interrupts(struct uart_amba_port *uap)
 
        /* mask all interrupts and clear all pending ones */
        uap->im = 0;
-       pl011_writew(uap, uap->im, REG_IMSC);
-       pl011_writew(uap, 0xffff, REG_ICR);
+       writew(uap->im, uap->port.membase + UART011_IMSC);
+       writew(0xffff, uap->port.membase + UART011_ICR);
 
        spin_unlock_irq(&uap->port.lock);
 }
@@ -2011,8 +1867,8 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
                pl011_enable_ms(port);
 
        /* first, disable everything */
-       old_cr = pl011_readw(uap, REG_CR);
-       pl011_writew(uap, 0, REG_CR);
+       old_cr = readw(port->membase + UART011_CR);
+       writew(0, port->membase + UART011_CR);
 
        if (termios->c_cflag & CRTSCTS) {
                if (old_cr & UART011_CR_RTS)
@@ -2045,17 +1901,17 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
                        quot -= 2;
        }
        /* Set baud rate */
-       pl011_writew(uap, quot & 0x3f, REG_FBRD);
-       pl011_writew(uap, quot >> 6, REG_IBRD);
+       writew(quot & 0x3f, port->membase + UART011_FBRD);
+       writew(quot >> 6, port->membase + UART011_IBRD);
 
        /*
         * ----------v----------v----------v----------v-----
         * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
-        * REG_FBRD & REG_IBRD.
+        * UART011_FBRD & UART011_IBRD.
         * ----------^----------^----------^----------^-----
         */
        pl011_write_lcr_h(uap, lcr_h);
-       pl011_writew(uap, old_cr, REG_CR);
+       writew(old_cr, port->membase + UART011_CR);
 
        spin_unlock_irqrestore(&port->lock, flags);
 }
@@ -2196,9 +2052,9 @@ static void pl011_console_putchar(struct uart_port *port, int ch)
        struct uart_amba_port *uap =
            container_of(port, struct uart_amba_port, port);
 
-       while (pl011_readw(uap, REG_FR) & UART01x_FR_TXFF)
+       while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
                barrier();
-       pl011_writew(uap, ch, REG_DR);
+       writew(ch, uap->port.membase + UART01x_DR);
 }
 
 static void
@@ -2223,10 +2079,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
         *      First save the CR then disable the interrupts
         */
        if (!uap->vendor->always_enabled) {
-               old_cr = pl011_readw(uap, REG_CR);
+               old_cr = readw(uap->port.membase + UART011_CR);
                new_cr = old_cr & ~UART011_CR_CTSEN;
                new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
-               pl011_writew(uap, new_cr, REG_CR);
+               writew(new_cr, uap->port.membase + UART011_CR);
        }
 
        uart_console_write(&uap->port, s, count, pl011_console_putchar);
@@ -2236,10 +2092,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
         *      and restore the TCR
         */
        do {
-               status = pl011_readw(uap, REG_FR);
-       } while (status & uap->fr_busy);
+               status = readw(uap->port.membase + UART01x_FR);
+       } while (status & UART01x_FR_BUSY);
        if (!uap->vendor->always_enabled)
-               pl011_writew(uap, old_cr, REG_CR);
+               writew(old_cr, uap->port.membase + UART011_CR);
 
        if (locked)
                spin_unlock(&uap->port.lock);
@@ -2252,10 +2108,10 @@ static void __init
 pl011_console_get_options(struct uart_amba_port *uap, int *baud,
                             int *parity, int *bits)
 {
-       if (pl011_readw(uap, REG_CR) & UART01x_CR_UARTEN) {
+       if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
                unsigned int lcr_h, ibrd, fbrd;
 
-               lcr_h = pl011_readw(uap, uap->lcrh_tx);
+               lcr_h = readw(uap->port.membase + uap->lcrh_tx);
 
                *parity = 'n';
                if (lcr_h & UART01x_LCRH_PEN) {
@@ -2270,13 +2126,13 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud,
                else
                        *bits = 8;
 
-               ibrd = pl011_readw(uap, REG_IBRD);
-               fbrd = pl011_readw(uap, REG_FBRD);
+               ibrd = readw(uap->port.membase + UART011_IBRD);
+               fbrd = readw(uap->port.membase + UART011_FBRD);
 
                *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
 
                if (uap->vendor->oversampling) {
-                       if (pl011_readw(uap, REG_CR)
+                       if (readw(uap->port.membase + UART011_CR)
                                  & ST_UART011_CR_OVSFACT)
                                *baud *= 2;
                }
@@ -2348,13 +2204,10 @@ static struct console amba_console = {
 
 static void pl011_putc(struct uart_port *port, int c)
 {
-       struct uart_amba_port *uap =
-           container_of(port, struct uart_amba_port, port);
-
-       while (pl011_readw(uap, REG_FR) & UART01x_FR_TXFF)
+       while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
                ;
-       pl011_writeb(uap, c, REG_DR);
-       while (pl011_readw(uap, REG_FR) & uap->fr_busy)
+       writeb(c, port->membase + UART01x_DR);
+       while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
                ;
 }
 
@@ -2481,8 +2334,8 @@ static int pl011_register_port(struct uart_amba_port *uap)
        int ret;
 
        /* Ensure interrupts from this UART are masked and cleared */
-       pl011_writew(uap, 0, REG_IMSC);
-       pl011_writew(uap, 0xffff, REG_ICR);
+       writew(0, uap->port.membase + UART011_IMSC);
+       writew(0xffff, uap->port.membase + UART011_ICR);
 
        if (!amba_reg.state) {
                ret = uart_register_driver(&amba_reg);
@@ -2500,7 +2353,6 @@ static int pl011_register_port(struct uart_amba_port *uap)
        return ret;
 }
 
-#ifdef CONFIG_ARM_AMBA
 static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
 {
        struct uart_amba_port *uap;
@@ -2521,13 +2373,8 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
                return PTR_ERR(uap->clk);
 
        uap->vendor = vendor;
-       uap->reg_lut = vendor->reg_lut;
        uap->lcrh_rx = vendor->lcrh_rx;
        uap->lcrh_tx = vendor->lcrh_tx;
-       uap->fr_busy = vendor->fr_busy;
-       uap->fr_dsr = vendor->fr_dsr;
-       uap->fr_cts = vendor->fr_cts;
-       uap->fr_ri = vendor->fr_ri;
        uap->fifosize = vendor->get_fifosize(dev);
        uap->port.irq = dev->irq[0];
        uap->port.ops = &amba_pl011_pops;
@@ -2551,67 +2398,6 @@ static int pl011_remove(struct amba_device *dev)
        pl011_unregister_port(uap);
        return 0;
 }
-#endif
-
-#ifdef CONFIG_SOC_ZX296702
-static int zx_uart_probe(struct platform_device *pdev)
-{
-       struct uart_amba_port *uap;
-       struct vendor_data *vendor = &vendor_zte;
-       struct resource *res;
-       int portnr, ret;
-
-       portnr = pl011_find_free_port();
-       if (portnr < 0)
-               return portnr;
-
-       uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
-                       GFP_KERNEL);
-       if (!uap) {
-               ret = -ENOMEM;
-               goto out;
-       }
-
-       uap->clk = devm_clk_get(&pdev->dev, NULL);
-       if (IS_ERR(uap->clk)) {
-               ret = PTR_ERR(uap->clk);
-               goto out;
-       }
-
-       uap->vendor     = vendor;
-       uap->reg_lut    = vendor->reg_lut;
-       uap->lcrh_rx    = vendor->lcrh_rx;
-       uap->lcrh_tx    = vendor->lcrh_tx;
-       uap->fr_busy    = vendor->fr_busy;
-       uap->fr_dsr     = vendor->fr_dsr;
-       uap->fr_cts     = vendor->fr_cts;
-       uap->fr_ri      = vendor->fr_ri;
-       uap->fifosize   = 16;
-       uap->port.irq   = platform_get_irq(pdev, 0);
-       uap->port.ops   = &amba_pl011_pops;
-
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
-       ret = pl011_setup_port(&pdev->dev, uap, res, portnr);
-       if (ret)
-               return ret;
-
-       platform_set_drvdata(pdev, uap);
-
-       return pl011_register_port(uap);
-out:
-       return ret;
-}
-
-static int zx_uart_remove(struct platform_device *pdev)
-{
-       struct uart_amba_port *uap = platform_get_drvdata(pdev);
-
-       uart_remove_one_port(&amba_reg, &uap->port);
-       pl011_unregister_port(uap);
-       return 0;
-}
-#endif
 
 #ifdef CONFIG_PM_SLEEP
 static int pl011_suspend(struct device *dev)
@@ -2668,11 +2454,6 @@ static int sbsa_uart_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        uap->vendor     = &vendor_sbsa;
-       uap->reg_lut    = vendor_sbsa.reg_lut;
-       uap->fr_busy    = vendor_sbsa.fr_busy;
-       uap->fr_dsr     = vendor_sbsa.fr_dsr;
-       uap->fr_cts     = vendor_sbsa.fr_cts;
-       uap->fr_ri      = vendor_sbsa.fr_ri;
        uap->fifosize   = 32;
        uap->port.irq   = platform_get_irq(pdev, 0);
        uap->port.ops   = &sbsa_uart_pops;
@@ -2722,7 +2503,6 @@ static struct platform_driver arm_sbsa_uart_platform_driver = {
        },
 };
 
-#ifdef CONFIG_ARM_AMBA
 static struct amba_id pl011_ids[] = {
        {
                .id     = 0x00041011,
@@ -2748,57 +2528,20 @@ static struct amba_driver pl011_driver = {
        .probe          = pl011_probe,
        .remove         = pl011_remove,
 };
-#endif
-
-#ifdef CONFIG_SOC_ZX296702
-static const struct of_device_id zx_uart_dt_ids[] = {
-       { .compatible = "zte,zx296702-uart", },
-       { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, zx_uart_dt_ids);
-
-static struct platform_driver zx_uart_driver = {
-       .driver = {
-               .name   = "zx-uart",
-               .owner  = THIS_MODULE,
-               .pm     = &pl011_dev_pm_ops,
-               .of_match_table = zx_uart_dt_ids,
-       },
-       .probe          = zx_uart_probe,
-       .remove         = zx_uart_remove,
-};
-#endif
-
 
 static int __init pl011_init(void)
 {
-       int ret;
        printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
 
        if (platform_driver_register(&arm_sbsa_uart_platform_driver))
                pr_warn("could not register SBSA UART platform driver\n");
-
-#ifdef CONFIG_SOC_ZX296702
-       ret = platform_driver_register(&zx_uart_driver);
-       if (ret)
-               pr_warn("could not register ZX UART platform driver\n");
-#endif
-
-#ifdef CONFIG_ARM_AMBA
-       ret = amba_driver_register(&pl011_driver);
-#endif
-       return ret;
+       return amba_driver_register(&pl011_driver);
 }
 
 static void __exit pl011_exit(void)
 {
        platform_driver_unregister(&arm_sbsa_uart_platform_driver);
-#ifdef CONFIG_SOC_ZX296702
-       platform_driver_unregister(&zx_uart_driver);
-#endif
-#ifdef CONFIG_ARM_AMBA
        amba_driver_unregister(&pl011_driver);
-#endif
 }
 
 /*
index 09dc44736c1ac72160f5d9aa4d13ab1912151be9..0567d517eed34b2993a2bdf6df963ab55564297a 100644 (file)
@@ -46,7 +46,7 @@ struct xenfb_info {
        int                     nr_pages;
        int                     irq;
        struct xenfb_page       *page;
-       unsigned long           *mfns;
+       unsigned long           *gfns;
        int                     update_wanted; /* XENFB_TYPE_UPDATE wanted */
        int                     feature_resize; /* XENFB_TYPE_RESIZE ok */
        struct xenfb_resize     resize;         /* protected by resize_lock */
@@ -402,8 +402,8 @@ static int xenfb_probe(struct xenbus_device *dev,
 
        info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 
-       info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
-       if (!info->mfns)
+       info->gfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
+       if (!info->gfns)
                goto error_nomem;
 
        /* set up shared page */
@@ -530,29 +530,29 @@ static int xenfb_remove(struct xenbus_device *dev)
                framebuffer_release(info->fb_info);
        }
        free_page((unsigned long)info->page);
-       vfree(info->mfns);
+       vfree(info->gfns);
        vfree(info->fb);
        kfree(info);
 
        return 0;
 }
 
-static unsigned long vmalloc_to_mfn(void *address)
+static unsigned long vmalloc_to_gfn(void *address)
 {
-       return pfn_to_mfn(vmalloc_to_pfn(address));
+       return xen_page_to_gfn(vmalloc_to_page(address));
 }
 
 static void xenfb_init_shared_page(struct xenfb_info *info,
                                   struct fb_info *fb_info)
 {
        int i;
-       int epd = PAGE_SIZE / sizeof(info->mfns[0]);
+       int epd = PAGE_SIZE / sizeof(info->gfns[0]);
 
        for (i = 0; i < info->nr_pages; i++)
-               info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
+               info->gfns[i] = vmalloc_to_gfn(info->fb + i * PAGE_SIZE);
 
        for (i = 0; i * epd < info->nr_pages; i++)
-               info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
+               info->page->pd[i] = vmalloc_to_gfn(&info->gfns[i * epd]);
 
        info->page->width = fb_info->var.xres;
        info->page->height = fb_info->var.yres;
@@ -586,7 +586,7 @@ static int xenfb_connect_backend(struct xenbus_device *dev,
                goto unbind_irq;
        }
        ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
-                           virt_to_mfn(info->page));
+                           virt_to_gfn(info->page));
        if (ret)
                goto error_xenbus;
        ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
index 82e80e034f250b88993af25bb27434c2a78f8204..7efc32945810e8fdcafa76a6328517f35d65ea3c 100644 (file)
@@ -157,7 +157,9 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
                }
                set_page_pfns(vb->pfns + vb->num_pfns, page);
                vb->num_pages += VIRTIO_BALLOON_PAGES_PER_PAGE;
-               adjust_managed_page_count(page, -1);
+               if (!virtio_has_feature(vb->vdev,
+                                       VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+                       adjust_managed_page_count(page, -1);
        }
 
        /* Did we get any? */
@@ -166,14 +168,16 @@ static void fill_balloon(struct virtio_balloon *vb, size_t num)
        mutex_unlock(&vb->balloon_lock);
 }
 
-static void release_pages_by_pfn(const u32 pfns[], unsigned int num)
+static void release_pages_balloon(struct virtio_balloon *vb)
 {
        unsigned int i;
 
        /* Find pfns pointing at start of each page, get pages and free them. */
-       for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
-               struct page *page = balloon_pfn_to_page(pfns[i]);
-               adjust_managed_page_count(page, 1);
+       for (i = 0; i < vb->num_pfns; i += VIRTIO_BALLOON_PAGES_PER_PAGE) {
+               struct page *page = balloon_pfn_to_page(vb->pfns[i]);
+               if (!virtio_has_feature(vb->vdev,
+                                       VIRTIO_BALLOON_F_DEFLATE_ON_OOM))
+                       adjust_managed_page_count(page, 1);
                put_page(page); /* balloon reference */
        }
 }
@@ -206,7 +210,7 @@ static unsigned leak_balloon(struct virtio_balloon *vb, size_t num)
        if (vb->num_pfns != 0)
                tell_host(vb, vb->deflate_vq);
        mutex_unlock(&vb->balloon_lock);
-       release_pages_by_pfn(vb->pfns, vb->num_pfns);
+       release_pages_balloon(vb);
        return num_freed_pages;
 }
 
index 10189b5b627f962cb9a8e9527aae829e27b0aec2..f499d9da72373d04d115caa4b7b4c9e6585ee965 100644 (file)
@@ -58,6 +58,7 @@
 
 #define pr_fmt(fmt) "virtio-mmio: " fmt
 
+#include <linux/acpi.h>
 #include <linux/highmem.h>
 #include <linux/interrupt.h>
 #include <linux/io.h>
@@ -732,12 +733,21 @@ static struct of_device_id virtio_mmio_match[] = {
 };
 MODULE_DEVICE_TABLE(of, virtio_mmio_match);
 
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id virtio_mmio_acpi_match[] = {
+       { "LNRO0005", },
+       { }
+};
+MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
+#endif
+
 static struct platform_driver virtio_mmio_driver = {
        .probe          = virtio_mmio_probe,
        .remove         = virtio_mmio_remove,
        .driver         = {
                .name   = "virtio-mmio",
                .of_match_table = virtio_mmio_match,
+               .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
        },
 };
 
index 1fa633b2d556e6a33a86779fb5d711e19f13839c..c79329fcfa78c722ba00a14f147aab1df2989a1c 100644 (file)
@@ -441,7 +441,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
        /* Update direct mapping, invalidate P2M, and add to balloon. */
        for (i = 0; i < nr_pages; i++) {
                pfn = frame_list[i];
-               frame_list[i] = pfn_to_mfn(pfn);
+               frame_list[i] = pfn_to_gfn(pfn);
                page = pfn_to_page(pfn);
 
 #ifdef CONFIG_XEN_HAVE_PVMMU
index 0edb91c0de6bf2b69d6de3ec41d43d38b5d823b5..8ae2fc90e1ea88a27325927e108b777e226c6f13 100644 (file)
@@ -6,10 +6,10 @@
 bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
                               const struct bio_vec *vec2)
 {
-       unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page));
-       unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page));
+       unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
+       unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
 
        return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
-               ((mfn1 == mfn2) || ((mfn1+1) == mfn2));
+               ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
 }
 EXPORT_SYMBOL(xen_biovec_phys_mergeable);
index 68d129019e8f63d5dbf2e5c6992f56e7bd32c7b2..6cd5e65c4aff0824153cf0a2e298e85785cdcef7 100644 (file)
@@ -1688,7 +1688,7 @@ void __init xen_init_IRQ(void)
                struct physdev_pirq_eoi_gmfn eoi_gmfn;
 
                pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
-               eoi_gmfn.gmfn = virt_to_mfn(pirq_eoi_map);
+               eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
                rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
                /* TODO: No PVH support for PIRQ EOI */
                if (rc != 0) {
index ed673e1acd6159a3ca34dc10238fef8936e43249..1d4baf56c36bfc5abefb4d0701c0eb92553b7a68 100644 (file)
@@ -111,7 +111,7 @@ static int init_control_block(int cpu,
        for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
                q->head[i] = 0;
 
-       init_control.control_gfn = virt_to_mfn(control_block);
+       init_control.control_gfn = virt_to_gfn(control_block);
        init_control.offset      = 0;
        init_control.vcpu        = cpu;
 
@@ -167,7 +167,7 @@ static int evtchn_fifo_setup(struct irq_info *info)
                /* Mask all events in this page before adding it. */
                init_array_page(array_page);
 
-               expand_array.array_gfn = virt_to_mfn(array_page);
+               expand_array.array_gfn = virt_to_gfn(array_page);
 
                ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
                if (ret < 0)
index 696301d9dc91922887eb1b6b54063876488e7e4a..4547a91bca67a1005c95c478aef929d121655d00 100644 (file)
@@ -142,7 +142,8 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
 
                /* Grant foreign access to the page. */
                rc = gnttab_grant_foreign_access(op->domid,
-                       pfn_to_mfn(page_to_pfn(gref->page)), readonly);
+                                                xen_page_to_gfn(gref->page),
+                                                readonly);
                if (rc < 0)
                        goto undo;
                gref_ids[i] = gref->gref_id = rc;
index d10effee9b9eb16d46a0bfea9b108b5f22863be3..e12bd3635f832e7fa5330667fe77f32fe305db4b 100644 (file)
@@ -80,7 +80,7 @@ static int xen_suspend(void *data)
         * is resuming in a new domain.
         */
        si->cancelled = HYPERVISOR_suspend(xen_pv_domain()
-                                           ? virt_to_mfn(xen_start_info)
+                                           ? virt_to_gfn(xen_start_info)
                                            : 0);
 
        xen_arch_post_suspend(si->cancelled);
index 56cb13fcbd0e8c86a5148c8759d0acdbf2be72b3..5e9adac928e694d6701b5c59ef5300144226a0ab 100644 (file)
@@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size,
        return ret;
 }
 
-struct mmap_mfn_state {
+struct mmap_gfn_state {
        unsigned long va;
        struct vm_area_struct *vma;
        domid_t domain;
 };
 
-static int mmap_mfn_range(void *data, void *state)
+static int mmap_gfn_range(void *data, void *state)
 {
        struct privcmd_mmap_entry *msg = data;
-       struct mmap_mfn_state *st = state;
+       struct mmap_gfn_state *st = state;
        struct vm_area_struct *vma = st->vma;
        int rc;
 
@@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state)
            ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
                return -EINVAL;
 
-       rc = xen_remap_domain_mfn_range(vma,
+       rc = xen_remap_domain_gfn_range(vma,
                                        msg->va & PAGE_MASK,
                                        msg->mfn, msg->npages,
                                        vma->vm_page_prot,
@@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
        struct vm_area_struct *vma;
        int rc;
        LIST_HEAD(pagelist);
-       struct mmap_mfn_state state;
+       struct mmap_gfn_state state;
 
        /* We only support privcmd_ioctl_mmap_batch for auto translated. */
        if (xen_feature(XENFEAT_auto_translated_physmap))
@@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata)
 
        rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry),
                            &pagelist,
-                           mmap_mfn_range, &state);
+                           mmap_gfn_range, &state);
 
 
 out_up:
@@ -299,18 +299,18 @@ struct mmap_batch_state {
        int global_error;
        int version;
 
-       /* User-space mfn array to store errors in the second pass for V1. */
-       xen_pfn_t __user *user_mfn;
+       /* User-space gfn array to store errors in the second pass for V1. */
+       xen_pfn_t __user *user_gfn;
        /* User-space int array to store errors in the second pass for V2. */
        int __user *user_err;
 };
 
-/* auto translated dom0 note: if domU being created is PV, then mfn is
- * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP).
+/* auto translated dom0 note: if domU being created is PV, then gfn is
+ * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP).
  */
 static int mmap_batch_fn(void *data, int nr, void *state)
 {
-       xen_pfn_t *mfnp = data;
+       xen_pfn_t *gfnp = data;
        struct mmap_batch_state *st = state;
        struct vm_area_struct *vma = st->vma;
        struct page **pages = vma->vm_private_data;
@@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state)
                cur_pages = &pages[st->index];
 
        BUG_ON(nr < 0);
-       ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr,
-                                        (int *)mfnp, st->vma->vm_page_prot,
+       ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr,
+                                        (int *)gfnp, st->vma->vm_page_prot,
                                         st->domain, cur_pages);
 
        /* Adjust the global_error? */
@@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st)
 
        if (st->version == 1) {
                if (err) {
-                       xen_pfn_t mfn;
+                       xen_pfn_t gfn;
 
-                       ret = get_user(mfn, st->user_mfn);
+                       ret = get_user(gfn, st->user_gfn);
                        if (ret < 0)
                                return ret;
                        /*
                         * V1 encodes the error codes in the 32bit top
-                        * nibble of the mfn (with its known
+                        * nibble of the gfn (with its known
                         * limitations vis-a-vis 64 bit callers).
                         */
-                       mfn |= (err == -ENOENT) ?
+                       gfn |= (err == -ENOENT) ?
                                PRIVCMD_MMAPBATCH_PAGED_ERROR :
                                PRIVCMD_MMAPBATCH_MFN_ERROR;
-                       return __put_user(mfn, st->user_mfn++);
+                       return __put_user(gfn, st->user_gfn++);
                } else
-                       st->user_mfn++;
+                       st->user_gfn++;
        } else { /* st->version == 2 */
                if (err)
                        return __put_user(err, st->user_err++);
@@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state)
        return 0;
 }
 
-/* Allocate pfns that are then mapped with gmfns from foreign domid. Update
+/* Allocate pfns that are then mapped with gfns from foreign domid. Update
  * the vma with the page info to use later.
  * Returns: 0 if success, otherwise -errno
  */
@@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
 
        if (state.global_error) {
                /* Write back errors in second pass. */
-               state.user_mfn = (xen_pfn_t *)m.arr;
+               state.user_gfn = (xen_pfn_t *)m.arr;
                state.user_err = m.err;
                ret = traverse_pages_block(m.num, sizeof(xen_pfn_t),
                                           &pagelist, mmap_return_errors, &state);
@@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma)
        if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
                return;
 
-       rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
+       rc = xen_unmap_domain_gfn_range(vma, numpgs, pages);
        if (rc == 0)
                free_xenballooned_pages(numpgs, pages);
        else
index da1029ef815924c74cbfe6cd7dc8d338840008c8..79bc4933b13e05c220325bb3ad39f7f6c0e243dd 100644 (file)
@@ -82,8 +82,8 @@ static u64 start_dma_addr;
  */
 static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
 {
-       unsigned long mfn = pfn_to_mfn(PFN_DOWN(paddr));
-       dma_addr_t dma = (dma_addr_t)mfn << PAGE_SHIFT;
+       unsigned long bfn = pfn_to_bfn(PFN_DOWN(paddr));
+       dma_addr_t dma = (dma_addr_t)bfn << PAGE_SHIFT;
 
        dma |= paddr & ~PAGE_MASK;
 
@@ -92,7 +92,7 @@ static inline dma_addr_t xen_phys_to_bus(phys_addr_t paddr)
 
 static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
 {
-       unsigned long pfn = mfn_to_pfn(PFN_DOWN(baddr));
+       unsigned long pfn = bfn_to_pfn(PFN_DOWN(baddr));
        dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
        phys_addr_t paddr = dma;
 
@@ -110,15 +110,15 @@ static int check_pages_physically_contiguous(unsigned long pfn,
                                             unsigned int offset,
                                             size_t length)
 {
-       unsigned long next_mfn;
+       unsigned long next_bfn;
        int i;
        int nr_pages;
 
-       next_mfn = pfn_to_mfn(pfn);
+       next_bfn = pfn_to_bfn(pfn);
        nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
 
        for (i = 1; i < nr_pages; i++) {
-               if (pfn_to_mfn(++pfn) != ++next_mfn)
+               if (pfn_to_bfn(++pfn) != ++next_bfn)
                        return 0;
        }
        return 1;
@@ -138,8 +138,8 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
 
 static int is_xen_swiotlb_buffer(dma_addr_t dma_addr)
 {
-       unsigned long mfn = PFN_DOWN(dma_addr);
-       unsigned long pfn = mfn_to_local_pfn(mfn);
+       unsigned long bfn = PFN_DOWN(dma_addr);
+       unsigned long pfn = bfn_to_local_pfn(bfn);
        phys_addr_t paddr;
 
        /* If the address is outside our domain, it CAN
index 239738f944badfa3f12f3d61581ef5cb4d6910d4..945fc43272017cfe05bab171eb8b97e62057244c 100644 (file)
@@ -129,21 +129,17 @@ static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
 /* xen generic tmem ops */
 
 static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
-                            u32 index, unsigned long pfn)
+                            u32 index, struct page *page)
 {
-       unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
-
        return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
-               gmfn, 0, 0, 0);
+                          xen_page_to_gfn(page), 0, 0, 0);
 }
 
 static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
-                            u32 index, unsigned long pfn)
+                            u32 index, struct page *page)
 {
-       unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
-
        return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
-               gmfn, 0, 0, 0);
+                          xen_page_to_gfn(page), 0, 0, 0);
 }
 
 static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
@@ -173,14 +169,13 @@ static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
 {
        u32 ind = (u32) index;
        struct tmem_oid oid = *(struct tmem_oid *)&key;
-       unsigned long pfn = page_to_pfn(page);
 
        if (pool < 0)
                return;
        if (ind != index)
                return;
        mb(); /* ensure page is quiescent; tmem may address it with an alias */
-       (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
+       (void)xen_tmem_put_page((u32)pool, oid, ind, page);
 }
 
 static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
@@ -188,7 +183,6 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
 {
        u32 ind = (u32) index;
        struct tmem_oid oid = *(struct tmem_oid *)&key;
-       unsigned long pfn = page_to_pfn(page);
        int ret;
 
        /* translate return values to linux semantics */
@@ -196,7 +190,7 @@ static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
                return -1;
        if (ind != index)
                return -1;
-       ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
+       ret = xen_tmem_get_page((u32)pool, oid, ind, page);
        if (ret == 1)
                return 0;
        else
@@ -287,7 +281,6 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
 {
        u64 ind64 = (u64)offset;
        u32 ind = (u32)offset;
-       unsigned long pfn = page_to_pfn(page);
        int pool = tmem_frontswap_poolid;
        int ret;
 
@@ -296,7 +289,7 @@ static int tmem_frontswap_store(unsigned type, pgoff_t offset,
        if (ind64 != ind)
                return -1;
        mb(); /* ensure page is quiescent; tmem may address it with an alias */
-       ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
+       ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), page);
        /* translate Xen tmem return values to linux semantics */
        if (ret == 1)
                return 0;
@@ -313,7 +306,6 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset,
 {
        u64 ind64 = (u64)offset;
        u32 ind = (u32)offset;
-       unsigned long pfn = page_to_pfn(page);
        int pool = tmem_frontswap_poolid;
        int ret;
 
@@ -321,7 +313,7 @@ static int tmem_frontswap_load(unsigned type, pgoff_t offset,
                return -1;
        if (ind64 != ind)
                return -1;
-       ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
+       ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), page);
        /* translate Xen tmem return values to linux semantics */
        if (ret == 1)
                return 0;
index e30353575d5da11f75e8c927ba53945a23b73d76..2ba09c1195c87c68b47b2b210d6d0eb32c5ae056 100644 (file)
@@ -380,7 +380,7 @@ int xenbus_grant_ring(struct xenbus_device *dev, void *vaddr,
 
        for (i = 0; i < nr_pages; i++) {
                err = gnttab_grant_foreign_access(dev->otherend_id,
-                                                 virt_to_mfn(vaddr), 0);
+                                                 virt_to_gfn(vaddr), 0);
                if (err < 0) {
                        xenbus_dev_fatal(dev, err,
                                         "granting access to ring page");
index b17707ee07d4f3057c1ab529f91dcbfd15203ff9..ee6d9efd7b768c44a40bc81172af685c03cab1ea 100644 (file)
@@ -49,7 +49,7 @@ static long xenbus_alloc(domid_t domid)
                goto out_err;
 
        gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
-                       virt_to_mfn(xen_store_interface), 0 /* writable */);
+                       virt_to_gfn(xen_store_interface), 0 /* writable */);
 
        arg.dom = DOMID_SELF;
        arg.remote_dom = domid;
index 4308fb3cf7c2f717ffd446035f1c30b61f8dacf8..3cbe0556de26625b75f1e62f02d8372555d59b7f 100644 (file)
@@ -75,7 +75,7 @@ EXPORT_SYMBOL_GPL(xen_store_interface);
 enum xenstore_init xen_store_domain_type;
 EXPORT_SYMBOL_GPL(xen_store_domain_type);
 
-static unsigned long xen_store_mfn;
+static unsigned long xen_store_gfn;
 
 static BLOCKING_NOTIFIER_HEAD(xenstore_chain);
 
@@ -711,9 +711,7 @@ static int __init xenstored_local_init(void)
        if (!page)
                goto out_err;
 
-       xen_store_mfn = xen_start_info->store_mfn =
-               pfn_to_mfn(virt_to_phys((void *)page) >>
-                          PAGE_SHIFT);
+       xen_store_gfn = xen_start_info->store_mfn = virt_to_gfn((void *)page);
 
        /* Next allocate a local port which xenstored can bind to */
        alloc_unbound.dom        = DOMID_SELF;
@@ -787,12 +785,12 @@ static int __init xenbus_init(void)
                err = xenstored_local_init();
                if (err)
                        goto out_error;
-               xen_store_interface = mfn_to_virt(xen_store_mfn);
+               xen_store_interface = gfn_to_virt(xen_store_gfn);
                break;
        case XS_PV:
                xen_store_evtchn = xen_start_info->store_evtchn;
-               xen_store_mfn = xen_start_info->store_mfn;
-               xen_store_interface = mfn_to_virt(xen_store_mfn);
+               xen_store_gfn = xen_start_info->store_mfn;
+               xen_store_interface = gfn_to_virt(xen_store_gfn);
                break;
        case XS_HVM:
                err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
@@ -802,9 +800,9 @@ static int __init xenbus_init(void)
                err = hvm_get_parameter(HVM_PARAM_STORE_PFN, &v);
                if (err)
                        goto out_error;
-               xen_store_mfn = (unsigned long)v;
+               xen_store_gfn = (unsigned long)v;
                xen_store_interface =
-                       xen_remap(xen_store_mfn << PAGE_SHIFT, PAGE_SIZE);
+                       xen_remap(xen_store_gfn << PAGE_SHIFT, PAGE_SIZE);
                break;
        default:
                pr_warn("Xenstore state unknown\n");
index 58a5389aec891932f18a81770a7d0a248d6c12c2..cff23872c5a965884b227655d08615ef905b91b6 100644 (file)
@@ -38,8 +38,8 @@
 #include <xen/interface/xen.h>
 #include <xen/interface/memory.h>
 
-/* map fgmfn of domid to lpfn in the current domain */
-static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
+/* map fgfn of domid to lpfn in the current domain */
+static int map_foreign_page(unsigned long lpfn, unsigned long fgfn,
                            unsigned int domid)
 {
        int rc;
@@ -49,7 +49,7 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
                .size = 1,
                .space = XENMAPSPACE_gmfn_foreign,
        };
-       xen_ulong_t idx = fgmfn;
+       xen_ulong_t idx = fgfn;
        xen_pfn_t gpfn = lpfn;
        int err = 0;
 
@@ -62,13 +62,13 @@ static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
 }
 
 struct remap_data {
-       xen_pfn_t *fgmfn; /* foreign domain's gmfn */
+       xen_pfn_t *fgfn; /* foreign domain's gfn */
        pgprot_t prot;
        domid_t  domid;
        struct vm_area_struct *vma;
        int index;
        struct page **pages;
-       struct xen_remap_mfn_info *info;
+       struct xen_remap_gfn_info *info;
        int *err_ptr;
        int mapped;
 };
@@ -82,20 +82,20 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
        pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
        int rc;
 
-       rc = map_foreign_page(pfn, *info->fgmfn, info->domid);
+       rc = map_foreign_page(pfn, *info->fgfn, info->domid);
        *info->err_ptr++ = rc;
        if (!rc) {
                set_pte_at(info->vma->vm_mm, addr, ptep, pte);
                info->mapped++;
        }
-       info->fgmfn++;
+       info->fgfn++;
 
        return 0;
 }
 
 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
                              unsigned long addr,
-                             xen_pfn_t *mfn, int nr,
+                             xen_pfn_t *gfn, int nr,
                              int *err_ptr, pgprot_t prot,
                              unsigned domid,
                              struct page **pages)
@@ -108,7 +108,7 @@ int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
           x86 PVOPS */
        BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
 
-       data.fgmfn = mfn;
+       data.fgfn = gfn;
        data.prot  = prot;
        data.domid = domid;
        data.vma   = vma;
diff --git a/fs/cifs/cifs_ioctl.h b/fs/cifs/cifs_ioctl.h
new file mode 100644 (file)
index 0000000..0065256
--- /dev/null
@@ -0,0 +1,42 @@
+/*
+ *   fs/cifs/cifs_ioctl.h
+ *
+ *   Structure definitions for io control for cifs/smb3
+ *
+ *   Copyright (c) 2015 Steve French <steve.french@primarydata.com>
+ *
+ *   This library is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU Lesser General Public License as published
+ *   by the Free Software Foundation; either version 2.1 of the License, or
+ *   (at your option) any later version.
+ *
+ *   This library is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
+ *   the GNU Lesser General Public License for more details.
+ *
+ */
+
+struct smb_mnt_fs_info {
+       __u32   version; /* 0001 */
+       __u16   protocol_id;
+       __u16   tcon_flags;
+       __u32   vol_serial_number;
+       __u32   vol_create_time;
+       __u32   share_caps;
+       __u32   share_flags;
+       __u32   sector_flags;
+       __u32   optimal_sector_size;
+       __u32   max_bytes_chunk;
+       __u32   fs_attributes;
+       __u32   max_path_component;
+       __u32   device_type;
+       __u32   device_characteristics;
+       __u32   maximal_access;
+       __u64   cifs_posix_caps;
+} __packed;
+
+#define CIFS_IOCTL_MAGIC       0xCF
+#define CIFS_IOC_COPYCHUNK_FILE        _IOW(CIFS_IOCTL_MAGIC, 3, int)
+#define CIFS_IOC_SET_INTEGRITY  _IO(CIFS_IOCTL_MAGIC, 4)
+#define CIFS_IOC_GET_MNT_INFO _IOR(CIFS_IOCTL_MAGIC, 5, struct smb_mnt_fs_info)
index a782b22904e40b71387d844a6a7879bab8191a88..27aea110e92365e1e91610579369215cc54644ea 100644 (file)
@@ -136,5 +136,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
 extern const struct export_operations cifs_export_ops;
 #endif /* CONFIG_CIFS_NFSD_EXPORT */
 
-#define CIFS_VERSION   "2.06"
+#define CIFS_VERSION   "2.07"
 #endif                         /* _CIFSFS_H */
index 47b030da0781e988c7bcafefd462a5e8b45ff14c..f5b87303ce46d50abab41e6017c33cecb06194bb 100644 (file)
@@ -2245,6 +2245,20 @@ typedef struct {
 #define FILE_DEVICE_VIRTUAL_DISK        0x00000024
 #define FILE_DEVICE_NETWORK_REDIRECTOR  0x00000028
 
+/* Device Characteristics */
+#define FILE_REMOVABLE_MEDIA                   0x00000001
+#define FILE_READ_ONLY_DEVICE                  0x00000002
+#define FILE_FLOPPY_DISKETTE                   0x00000004
+#define FILE_WRITE_ONCE_MEDIA                  0x00000008
+#define FILE_REMOTE_DEVICE                     0x00000010
+#define FILE_DEVICE_IS_MOUNTED                 0x00000020
+#define FILE_VIRTUAL_VOLUME                    0x00000040
+#define FILE_DEVICE_SECURE_OPEN                        0x00000100
+#define FILE_CHARACTERISTIC_TS_DEVICE          0x00001000
+#define FILE_CHARACTERISTIC_WEBDAV_DEVICE      0x00002000
+#define FILE_PORTABLE_DEVICE                   0x00004000
+#define FILE_DEVICE_ALLOW_APPCONTAINER_TRAVERSAL 0x00020000
+
 typedef struct {
        __le32 DeviceType;
        __le32 DeviceCharacteristics;
index 672ef35c9f73c59d1f4b566bec2f16b4c74e4a8f..90b4f9f7de660a261b5f93322af59282294953fd 100644 (file)
@@ -696,7 +696,9 @@ cifs_echo_callback(struct mid_q_entry *mid)
 {
        struct TCP_Server_Info *server = mid->callback_data;
 
+       mutex_lock(&server->srv_mutex);
        DeleteMidQEntry(mid);
+       mutex_unlock(&server->srv_mutex);
        add_credits(server, 1, CIFS_ECHO_OP);
 }
 
@@ -1572,7 +1574,9 @@ cifs_readv_callback(struct mid_q_entry *mid)
        }
 
        queue_work(cifsiod_wq, &rdata->work);
+       mutex_lock(&server->srv_mutex);
        DeleteMidQEntry(mid);
+       mutex_unlock(&server->srv_mutex);
        add_credits(server, 1, 0);
 }
 
@@ -2032,6 +2036,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
 {
        struct cifs_writedata *wdata = mid->callback_data;
        struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+       struct TCP_Server_Info *server = tcon->ses->server;
        unsigned int written;
        WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf;
 
@@ -2068,7 +2073,9 @@ cifs_writev_callback(struct mid_q_entry *mid)
        }
 
        queue_work(cifsiod_wq, &wdata->work);
+       mutex_lock(&server->srv_mutex);
        DeleteMidQEntry(mid);
+       mutex_unlock(&server->srv_mutex);
        add_credits(tcon->ses->server, 1, 0);
 }
 
index 49b8b6e41a188b3a832c3e32c482ed7a99091aa1..c63f5227b68181ed772d1a999686a852a23d41e9 100644 (file)
 #include "cifsproto.h"
 #include "cifs_debug.h"
 #include "cifsfs.h"
+#include "cifs_ioctl.h"
 #include <linux/btrfs.h>
 
-#define CIFS_IOCTL_MAGIC       0xCF
-#define CIFS_IOC_COPYCHUNK_FILE        _IOW(CIFS_IOCTL_MAGIC, 3, int)
-#define CIFS_IOC_SET_INTEGRITY  _IO(CIFS_IOCTL_MAGIC, 4)
-
 static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
                        unsigned long srcfd, u64 off, u64 len, u64 destoff,
                        bool dup_extents)
@@ -135,6 +132,43 @@ out_drop_write:
        return rc;
 }
 
+static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
+                               void __user *arg)
+{
+       int rc = 0;
+       struct smb_mnt_fs_info *fsinf;
+
+       fsinf = kzalloc(sizeof(struct smb_mnt_fs_info), GFP_KERNEL);
+       if (fsinf == NULL)
+               return -ENOMEM;
+
+       fsinf->version = 1;
+       fsinf->protocol_id = tcon->ses->server->vals->protocol_id;
+       fsinf->device_characteristics =
+                       le32_to_cpu(tcon->fsDevInfo.DeviceCharacteristics);
+       fsinf->device_type = le32_to_cpu(tcon->fsDevInfo.DeviceType);
+       fsinf->fs_attributes = le32_to_cpu(tcon->fsAttrInfo.Attributes);
+       fsinf->max_path_component =
+               le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
+#ifdef CONFIG_CIFS_SMB2
+       fsinf->vol_serial_number = tcon->vol_serial_number;
+       fsinf->vol_create_time = le64_to_cpu(tcon->vol_create_time);
+       fsinf->share_flags = tcon->share_flags;
+       fsinf->share_caps = le32_to_cpu(tcon->capabilities);
+       fsinf->sector_flags = tcon->ss_flags;
+       fsinf->optimal_sector_size = tcon->perf_sector_size;
+       fsinf->max_bytes_chunk = tcon->max_bytes_chunk;
+       fsinf->maximal_access = tcon->maximal_access;
+#endif /* SMB2 */
+       fsinf->cifs_posix_caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+
+       if (copy_to_user(arg, fsinf, sizeof(struct smb_mnt_fs_info)))
+               rc = -EFAULT;
+
+       kfree(fsinf);
+       return rc;
+}
+
 long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
 {
        struct inode *inode = file_inode(filep);
@@ -148,8 +182,6 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
 
        xid = get_xid();
 
-       cifs_dbg(FYI, "ioctl file %p  cmd %u  arg %lu\n", filep, command, arg);
-
        cifs_sb = CIFS_SB(inode->i_sb);
 
        switch (command) {
@@ -228,6 +260,10 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
                        else
                                rc = -EOPNOTSUPP;
                        break;
+               case CIFS_IOC_GET_MNT_INFO:
+                       tcon = tlink_tcon(pSMBFile->tlink);
+                       rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
+                       break;
                default:
                        cifs_dbg(FYI, "unsupported ioctl\n");
                        break;
index b8b4f08ee094e2f8a2b811f076ca0eb4cc3e68b9..070fb2ad85ced4483d28d88c1fa4832e92eef3ba 100644 (file)
@@ -1626,7 +1626,9 @@ smb2_echo_callback(struct mid_q_entry *mid)
        if (mid->mid_state == MID_RESPONSE_RECEIVED)
                credits_received = le16_to_cpu(smb2->hdr.CreditRequest);
 
+       mutex_lock(&server->srv_mutex);
        DeleteMidQEntry(mid);
+       mutex_unlock(&server->srv_mutex);
        add_credits(server, credits_received, CIFS_ECHO_OP);
 }
 
@@ -1810,7 +1812,9 @@ smb2_readv_callback(struct mid_q_entry *mid)
                cifs_stats_fail_inc(tcon, SMB2_READ_HE);
 
        queue_work(cifsiod_wq, &rdata->work);
+       mutex_lock(&server->srv_mutex);
        DeleteMidQEntry(mid);
+       mutex_unlock(&server->srv_mutex);
        add_credits(server, credits_received, 0);
 }
 
@@ -1938,6 +1942,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
 {
        struct cifs_writedata *wdata = mid->callback_data;
        struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
+       struct TCP_Server_Info *server = tcon->ses->server;
        unsigned int written;
        struct smb2_write_rsp *rsp = (struct smb2_write_rsp *)mid->resp_buf;
        unsigned int credits_received = 1;
@@ -1977,7 +1982,9 @@ smb2_writev_callback(struct mid_q_entry *mid)
                cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
 
        queue_work(cifsiod_wq, &wdata->work);
+       mutex_lock(&server->srv_mutex);
        DeleteMidQEntry(mid);
+       mutex_unlock(&server->srv_mutex);
        add_credits(tcon->ses->server, credits_received, 0);
 }
 
index 126f46b887cc85b6ba6a0f313b918e2d8b026708..2a24c524fb9a90cedd4187460ecb90c0e5dfcf0c 100644 (file)
@@ -644,7 +644,9 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
        }
        spin_unlock(&GlobalMid_Lock);
 
+       mutex_lock(&server->srv_mutex);
        DeleteMidQEntry(mid);
+       mutex_unlock(&server->srv_mutex);
        return rc;
 }
 
index e43389c74bbcea88956b0c5972f72e94cef58313..93bf2f990ace462b31dba2ee239084e112b1dfd0 100644 (file)
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -17,6 +17,7 @@
 #include <linux/atomic.h>
 #include <linux/blkdev.h>
 #include <linux/buffer_head.h>
+#include <linux/dax.h>
 #include <linux/fs.h>
 #include <linux/genhd.h>
 #include <linux/highmem.h>
@@ -527,7 +528,7 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        unsigned long pmd_addr = address & PMD_MASK;
        bool write = flags & FAULT_FLAG_WRITE;
        long length;
-       void *kaddr;
+       void __pmem *kaddr;
        pgoff_t size, pgoff;
        sector_t block, sector;
        unsigned long pfn;
@@ -570,7 +571,8 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
        if (buffer_unwritten(&bh) || buffer_new(&bh)) {
                int i;
                for (i = 0; i < PTRS_PER_PMD; i++)
-                       clear_page(kaddr + i * PAGE_SIZE);
+                       clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
+               wmb_pmem();
                count_vm_event(PGMAJFAULT);
                mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
                result |= VM_FAULT_MAJOR;
index fb8b54eb77c5dcf89ef4a78d0c4dc88bc1d46abb..dc5fae601c24b40e432907dd93c1da6361453431 100644 (file)
@@ -417,14 +417,14 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment,
        if (oldcount == 0) {
                result = ufs_alloc_fragments (inode, cgno, goal, count, err);
                if (result) {
+                       ufs_clear_frags(inode, result + oldcount,
+                                       newcount - oldcount, locked_page != NULL);
                        write_seqlock(&UFS_I(inode)->meta_lock);
                        ufs_cpu_to_data_ptr(sb, p, result);
                        write_sequnlock(&UFS_I(inode)->meta_lock);
                        *err = 0;
                        UFS_I(inode)->i_lastfrag =
                                max(UFS_I(inode)->i_lastfrag, fragment + count);
-                       ufs_clear_frags(inode, result + oldcount,
-                                       newcount - oldcount, locked_page != NULL);
                }
                mutex_unlock(&UFS_SB(sb)->s_lock);
                UFSD("EXIT, result %llu\n", (unsigned long long)result);
index e5966758c093483cd6027b4e14ea678904be5978..e1e4d7c38dda5b1d014c607002838bc6275f9135 100644 (file)
@@ -52,13 +52,16 @@ struct arch_timer_cpu {
 
        /* Timer IRQ */
        const struct kvm_irq_level      *irq;
+
+       /* VGIC mapping */
+       struct irq_phys_map             *map;
 };
 
 int kvm_timer_hyp_init(void);
 void kvm_timer_enable(struct kvm *kvm);
 void kvm_timer_init(struct kvm *kvm);
-void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
-                         const struct kvm_irq_level *irq);
+int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+                        const struct kvm_irq_level *irq);
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
 void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
 void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
index 133ea00aa83bc8926137ca8867f28a4ab9469d27..d901f1a47be6c5e4af71ea7543f7dc7163eca2f9 100644 (file)
@@ -95,11 +95,15 @@ enum vgic_type {
 #define LR_STATE_ACTIVE                (1 << 1)
 #define LR_STATE_MASK          (3 << 0)
 #define LR_EOI_INT             (1 << 2)
+#define LR_HW                  (1 << 3)
 
 struct vgic_lr {
-       u16     irq;
-       u8      source;
-       u8      state;
+       unsigned irq:10;
+       union {
+               unsigned hwirq:10;
+               unsigned source:3;
+       };
+       unsigned state:4;
 };
 
 struct vgic_vmcr {
@@ -155,6 +159,19 @@ struct vgic_io_device {
        struct kvm_io_device dev;
 };
 
+struct irq_phys_map {
+       u32                     virt_irq;
+       u32                     phys_irq;
+       u32                     irq;
+       bool                    active;
+};
+
+struct irq_phys_map_entry {
+       struct list_head        entry;
+       struct rcu_head         rcu;
+       struct irq_phys_map     map;
+};
+
 struct vgic_dist {
        spinlock_t              lock;
        bool                    in_kernel;
@@ -252,6 +269,10 @@ struct vgic_dist {
        struct vgic_vm_ops      vm_ops;
        struct vgic_io_device   dist_iodev;
        struct vgic_io_device   *redist_iodevs;
+
+       /* Virtual irq to hwirq mapping */
+       spinlock_t              irq_phys_map_lock;
+       struct list_head        irq_phys_map_list;
 };
 
 struct vgic_v2_cpu_if {
@@ -303,6 +324,9 @@ struct vgic_cpu {
                struct vgic_v2_cpu_if   vgic_v2;
                struct vgic_v3_cpu_if   vgic_v3;
        };
+
+       /* Protected by the distributor's irq_phys_map_lock */
+       struct list_head        irq_phys_map_list;
 };
 
 #define LR_EMPTY       0xff
@@ -317,16 +341,25 @@ int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
 int kvm_vgic_hyp_init(void);
 int kvm_vgic_map_resources(struct kvm *kvm);
 int kvm_vgic_get_max_vcpus(void);
+void kvm_vgic_early_init(struct kvm *kvm);
 int kvm_vgic_create(struct kvm *kvm, u32 type);
 void kvm_vgic_destroy(struct kvm *kvm);
+void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu);
 void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
 void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
                        bool level);
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
+                              struct irq_phys_map *map, bool level);
 void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
 int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
 int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
+struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
+                                          int virt_irq, int irq);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map);
+bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map);
+void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active);
 
 #define irqchip_in_kernel(k)   (!!((k)->arch.vgic.in_kernel))
 #define vgic_initialized(k)    (!!((k)->arch.vgic.nr_cpus))
index 6a0a89ed7f81cac2b9eb4d04414874fc46243cb3..0ddb5c02ad8b6c279047c4c8c9c90e5516327ca7 100644 (file)
 #define UART01x_DR             0x00    /* Data read or written from the interface. */
 #define UART01x_RSR            0x04    /* Receive status register (Read). */
 #define UART01x_ECR            0x04    /* Error clear register (Write). */
-#define ZX_UART01x_DR          0x04    /* Data read or written from the interface. */
 #define UART010_LCRH           0x08    /* Line control register, high byte. */
 #define ST_UART011_DMAWM       0x08    /* DMA watermark configure register. */
 #define UART010_LCRM           0x0C    /* Line control register, middle byte. */
 #define ST_UART011_TIMEOUT     0x0C    /* Timeout period register. */
 #define UART010_LCRL           0x10    /* Line control register, low byte. */
 #define UART010_CR             0x14    /* Control register. */
-#define ZX_UART01x_FR          0x14    /* Flag register (Read only). */
 #define UART01x_FR             0x18    /* Flag register (Read only). */
 #define UART010_IIR            0x1C    /* Interrupt identification register (Read). */
 #define UART010_ICR            0x1C    /* Interrupt clear register (Write). */
 #define UART011_LCRH           0x2c    /* Line control register. */
 #define ST_UART011_LCRH_TX     0x2c    /* Tx Line control register. */
 #define UART011_CR             0x30    /* Control register. */
-#define ZX_UART011_LCRH_TX     0x30    /* Tx Line control register. */
 #define UART011_IFLS           0x34    /* Interrupt fifo level select. */
-#define ZX_UART011_CR          0x34    /* Control register. */
-#define ZX_UART011_IFLS                0x38    /* Interrupt fifo level select. */
 #define UART011_IMSC           0x38    /* Interrupt mask. */
 #define UART011_RIS            0x3c    /* Raw interrupt status. */
 #define UART011_MIS            0x40    /* Masked interrupt status. */
-#define ZX_UART011_IMSC                0x40    /* Interrupt mask. */
 #define UART011_ICR            0x44    /* Interrupt clear register. */
-#define ZX_UART011_RIS         0x44    /* Raw interrupt status. */
 #define UART011_DMACR          0x48    /* DMA control register. */
-#define ZX_UART011_MIS         0x48    /* Masked interrupt status. */
-#define ZX_UART011_ICR         0x4c    /* Interrupt clear register. */
 #define ST_UART011_XFCR                0x50    /* XON/XOFF control register. */
-#define ZX_UART011_DMACR       0x50    /* DMA control register. */
 #define ST_UART011_XON1                0x54    /* XON1 register. */
 #define ST_UART011_XON2                0x58    /* XON2 register. */
 #define ST_UART011_XOFF1       0x5C    /* XON1 register. */
 #define UART01x_RSR_PE                 0x02
 #define UART01x_RSR_FE                 0x01
 
-#define ZX_UART01x_FR_BUSY     0x300
 #define UART011_FR_RI          0x100
 #define UART011_FR_TXFE                0x080
 #define UART011_FR_RXFF                0x040
 #define UART01x_FR_TXFF                0x020
 #define UART01x_FR_RXFE                0x010
 #define UART01x_FR_BUSY                0x008
-#define ZX_UART01x_FR_DSR       0x008
 #define UART01x_FR_DCD                 0x004
 #define UART01x_FR_DSR                 0x002
-#define ZX_UART01x_FR_CTS      0x002
 #define UART01x_FR_CTS                 0x001
-#define ZX_UART011_FR_RI       0x001
 #define UART01x_FR_TMSK                (UART01x_FR_TXFF + UART01x_FR_BUSY)
 
 #define UART011_CR_CTSEN       0x8000  /* CTS hardware flow control */
index 71e4faf33091a114ba666054bc020adaed4a08d5..9eeeb9589acfc35baed79a89d7d3f0c1be134cf3 100644 (file)
 
 #define ICH_LR_EOI                     (1UL << 41)
 #define ICH_LR_GROUP                   (1UL << 60)
+#define ICH_LR_HW                      (1UL << 61)
 #define ICH_LR_STATE                   (3UL << 62)
 #define ICH_LR_PENDING_BIT             (1UL << 62)
 #define ICH_LR_ACTIVE_BIT              (1UL << 63)
+#define ICH_LR_PHYS_ID_SHIFT           32
+#define ICH_LR_PHYS_ID_MASK            (0x3ffUL << ICH_LR_PHYS_ID_SHIFT)
 
 #define ICH_MISR_EOI                   (1 << 0)
 #define ICH_MISR_U                     (1 << 1)
index af3d29f7078117f0b3f260804e419e6e7610d3e5..b8901dfd9e9584ba06de5f585f14c6edc9da8fb5 100644 (file)
 
 #define GICH_LR_VIRTUALID              (0x3ff << 0)
 #define GICH_LR_PHYSID_CPUID_SHIFT     (10)
-#define GICH_LR_PHYSID_CPUID           (7 << GICH_LR_PHYSID_CPUID_SHIFT)
+#define GICH_LR_PHYSID_CPUID           (0x3ff << GICH_LR_PHYSID_CPUID_SHIFT)
 #define GICH_LR_STATE                  (3 << 28)
 #define GICH_LR_PENDING_BIT            (1 << 28)
 #define GICH_LR_ACTIVE_BIT             (1 << 29)
 #define GICH_LR_EOI                    (1 << 19)
+#define GICH_LR_HW                     (1 << 31)
 
 #define GICH_VMCR_CTRL_SHIFT           0
 #define GICH_VMCR_CTRL_MASK            (0x21f << GICH_VMCR_CTRL_SHIFT)
index 81089cf1f0c11c025b6f448d20ddcc1117ab5367..1bef9e21e7259935d2d7e8755bbd4c5710f5a9a6 100644 (file)
@@ -242,6 +242,7 @@ struct kvm_vcpu {
        int sigset_active;
        sigset_t sigset;
        struct kvm_vcpu_stat stat;
+       unsigned int halt_poll_ns;
 
 #ifdef CONFIG_HAS_IOMEM
        int mmio_needed;
diff --git a/include/linux/microchipphy.h b/include/linux/microchipphy.h
new file mode 100644 (file)
index 0000000..eb492d4
--- /dev/null
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 Microchip Technology
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _MICROCHIPPHY_H
+#define _MICROCHIPPHY_H
+
+#define LAN88XX_INT_MASK                       (0x19)
+#define LAN88XX_INT_MASK_MDINTPIN_EN_          (0x8000)
+#define LAN88XX_INT_MASK_SPEED_CHANGE_         (0x4000)
+#define LAN88XX_INT_MASK_LINK_CHANGE_          (0x2000)
+#define LAN88XX_INT_MASK_FDX_CHANGE_           (0x1000)
+#define LAN88XX_INT_MASK_AUTONEG_ERR_          (0x0800)
+#define LAN88XX_INT_MASK_AUTONEG_DONE_         (0x0400)
+#define LAN88XX_INT_MASK_POE_DETECT_           (0x0200)
+#define LAN88XX_INT_MASK_SYMBOL_ERR_           (0x0100)
+#define LAN88XX_INT_MASK_FAST_LINK_FAIL_       (0x0080)
+#define LAN88XX_INT_MASK_WOL_EVENT_            (0x0040)
+#define LAN88XX_INT_MASK_EXTENDED_INT_         (0x0020)
+#define LAN88XX_INT_MASK_RESERVED_             (0x0010)
+#define LAN88XX_INT_MASK_FALSE_CARRIER_                (0x0008)
+#define LAN88XX_INT_MASK_LINK_SPEED_DS_                (0x0004)
+#define LAN88XX_INT_MASK_MASTER_SLAVE_DONE_    (0x0002)
+#define LAN88XX_INT_MASK_RX__ER_               (0x0001)
+
+#define LAN88XX_INT_STS                                (0x1A)
+#define LAN88XX_INT_STS_INT_ACTIVE_            (0x8000)
+#define LAN88XX_INT_STS_SPEED_CHANGE_          (0x4000)
+#define LAN88XX_INT_STS_LINK_CHANGE_           (0x2000)
+#define LAN88XX_INT_STS_FDX_CHANGE_            (0x1000)
+#define LAN88XX_INT_STS_AUTONEG_ERR_           (0x0800)
+#define LAN88XX_INT_STS_AUTONEG_DONE_          (0x0400)
+#define LAN88XX_INT_STS_POE_DETECT_            (0x0200)
+#define LAN88XX_INT_STS_SYMBOL_ERR_            (0x0100)
+#define LAN88XX_INT_STS_FAST_LINK_FAIL_                (0x0080)
+#define LAN88XX_INT_STS_WOL_EVENT_             (0x0040)
+#define LAN88XX_INT_STS_EXTENDED_INT_          (0x0020)
+#define LAN88XX_INT_STS_RESERVED_              (0x0010)
+#define LAN88XX_INT_STS_FALSE_CARRIER_         (0x0008)
+#define LAN88XX_INT_STS_LINK_SPEED_DS_         (0x0004)
+#define LAN88XX_INT_STS_MASTER_SLAVE_DONE_     (0x0002)
+#define LAN88XX_INT_STS_RX_ER_                 (0x0001)
+
+#define LAN88XX_EXT_PAGE_ACCESS                        (0x1F)
+#define LAN88XX_EXT_PAGE_SPACE_0               (0x0000)
+#define LAN88XX_EXT_PAGE_SPACE_1               (0x0001)
+#define LAN88XX_EXT_PAGE_SPACE_2               (0x0002)
+
+/* Extended Register Page 1 space */
+#define LAN88XX_EXT_MODE_CTRL                  (0x13)
+#define LAN88XX_EXT_MODE_CTRL_MDIX_MASK_       (0x000C)
+#define LAN88XX_EXT_MODE_CTRL_AUTO_MDIX_       (0x0000)
+#define LAN88XX_EXT_MODE_CTRL_MDI_             (0x0008)
+#define LAN88XX_EXT_MODE_CTRL_MDI_X_           (0x000C)
+
+/* MMD 3 Registers */
+#define        LAN88XX_MMD3_CHIP_ID                    (32877)
+#define        LAN88XX_MMD3_CHIP_REV                   (32878)
+
+#endif /* _MICROCHIPPHY_H */
index 9120edb650a068df60b0a2c390ae431ab78671cb..639e9b8b0e4d9ff2c9b10ce5b44a8a328abe2a31 100644 (file)
@@ -68,8 +68,17 @@ extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
 extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
 extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
 extern int netlink_has_listeners(struct sock *sk, unsigned int group);
-extern struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
-                                        u32 dst_portid, gfp_t gfp_mask);
+
+extern struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
+                                          unsigned int ldiff, u32 dst_portid,
+                                          gfp_t gfp_mask);
+static inline struct sk_buff *
+netlink_alloc_skb(struct sock *ssk, unsigned int size, u32 dst_portid,
+                 gfp_t gfp_mask)
+{
+       return __netlink_alloc_skb(ssk, size, 0, dst_portid, gfp_mask);
+}
+
 extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
 extern int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid,
                             __u32 group, gfp_t allocation);
index 36262d08a9dad8aca7eaea494e30348c0b26cb5b..d681f6875aef1a7a8b22c9453b8f9019f9fd45eb 100644 (file)
@@ -79,26 +79,43 @@ enum {
        PWMF_EXPORTED = 1 << 2,
 };
 
+/**
+ * struct pwm_device - PWM channel object
+ * @label: name of the PWM device
+ * @flags: flags associated with the PWM device
+ * @hwpwm: per-chip relative index of the PWM device
+ * @pwm: global index of the PWM device
+ * @chip: PWM chip providing this PWM device
+ * @chip_data: chip-private data associated with the PWM device
+ * @period: period of the PWM signal (in nanoseconds)
+ * @duty_cycle: duty cycle of the PWM signal (in nanoseconds)
+ * @polarity: polarity of the PWM signal
+ */
 struct pwm_device {
-       const char              *label;
-       unsigned long           flags;
-       unsigned int            hwpwm;
-       unsigned int            pwm;
-       struct pwm_chip         *chip;
-       void                    *chip_data;
-
-       unsigned int            period;         /* in nanoseconds */
-       unsigned int            duty_cycle;     /* in nanoseconds */
-       enum pwm_polarity       polarity;
+       const char *label;
+       unsigned long flags;
+       unsigned int hwpwm;
+       unsigned int pwm;
+       struct pwm_chip *chip;
+       void *chip_data;
+
+       unsigned int period;
+       unsigned int duty_cycle;
+       enum pwm_polarity polarity;
 };
 
+static inline bool pwm_is_enabled(const struct pwm_device *pwm)
+{
+       return test_bit(PWMF_ENABLED, &pwm->flags);
+}
+
 static inline void pwm_set_period(struct pwm_device *pwm, unsigned int period)
 {
        if (pwm)
                pwm->period = period;
 }
 
-static inline unsigned int pwm_get_period(struct pwm_device *pwm)
+static inline unsigned int pwm_get_period(const struct pwm_device *pwm)
 {
        return pwm ? pwm->period : 0;
 }
@@ -109,7 +126,7 @@ static inline void pwm_set_duty_cycle(struct pwm_device *pwm, unsigned int duty)
                pwm->duty_cycle = duty;
 }
 
-static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
+static inline unsigned int pwm_get_duty_cycle(const struct pwm_device *pwm)
 {
        return pwm ? pwm->duty_cycle : 0;
 }
@@ -119,6 +136,11 @@ static inline unsigned int pwm_get_duty_cycle(struct pwm_device *pwm)
  */
 int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
 
+static inline enum pwm_polarity pwm_get_polarity(const struct pwm_device *pwm)
+{
+       return pwm ? pwm->polarity : PWM_POLARITY_NORMAL;
+}
+
 /**
  * struct pwm_ops - PWM controller operations
  * @request: optional hook for requesting a PWM
@@ -131,25 +153,18 @@ int pwm_set_polarity(struct pwm_device *pwm, enum pwm_polarity polarity);
  * @owner: helps prevent removal of modules exporting active PWMs
  */
 struct pwm_ops {
-       int                     (*request)(struct pwm_chip *chip,
-                                          struct pwm_device *pwm);
-       void                    (*free)(struct pwm_chip *chip,
-                                       struct pwm_device *pwm);
-       int                     (*config)(struct pwm_chip *chip,
-                                         struct pwm_device *pwm,
-                                         int duty_ns, int period_ns);
-       int                     (*set_polarity)(struct pwm_chip *chip,
-                                         struct pwm_device *pwm,
-                                         enum pwm_polarity polarity);
-       int                     (*enable)(struct pwm_chip *chip,
-                                         struct pwm_device *pwm);
-       void                    (*disable)(struct pwm_chip *chip,
-                                          struct pwm_device *pwm);
+       int (*request)(struct pwm_chip *chip, struct pwm_device *pwm);
+       void (*free)(struct pwm_chip *chip, struct pwm_device *pwm);
+       int (*config)(struct pwm_chip *chip, struct pwm_device *pwm,
+                     int duty_ns, int period_ns);
+       int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm,
+                           enum pwm_polarity polarity);
+       int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm);
+       void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm);
 #ifdef CONFIG_DEBUG_FS
-       void                    (*dbg_show)(struct pwm_chip *chip,
-                                           struct seq_file *s);
+       void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s);
 #endif
-       struct module           *owner;
+       struct module *owner;
 };
 
 /**
@@ -160,22 +175,24 @@ struct pwm_ops {
  * @base: number of first PWM controlled by this chip
  * @npwm: number of PWMs controlled by this chip
  * @pwms: array of PWM devices allocated by the framework
+ * @of_xlate: request a PWM device given a device tree PWM specifier
+ * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier
  * @can_sleep: must be true if the .config(), .enable() or .disable()
  *             operations may sleep
  */
 struct pwm_chip {
-       struct device           *dev;
-       struct list_head        list;
-       const struct pwm_ops    *ops;
-       int                     base;
-       unsigned int            npwm;
-
-       struct pwm_device       *pwms;
-
-       struct pwm_device *     (*of_xlate)(struct pwm_chip *pc,
-                                           const struct of_phandle_args *args);
-       unsigned int            of_pwm_n_cells;
-       bool                    can_sleep;
+       struct device *dev;
+       struct list_head list;
+       const struct pwm_ops *ops;
+       int base;
+       unsigned int npwm;
+
+       struct pwm_device *pwms;
+
+       struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+                                       const struct of_phandle_args *args);
+       unsigned int of_pwm_n_cells;
+       bool can_sleep;
 };
 
 #if IS_ENABLED(CONFIG_PWM)
index da5602bd77d75996f78114b7369b409ea52f007d..7f65f9cff951033607b0beba1c1ab0df70ee60e9 100644 (file)
@@ -74,6 +74,20 @@ static inline int device_reset_optional(struct device *dev)
        return -ENOSYS;
 }
 
+static inline struct reset_control *__must_check reset_control_get(
+                                       struct device *dev, const char *id)
+{
+       WARN_ON(1);
+       return ERR_PTR(-EINVAL);
+}
+
+static inline struct reset_control *__must_check devm_reset_control_get(
+                                       struct device *dev, const char *id)
+{
+       WARN_ON(1);
+       return ERR_PTR(-EINVAL);
+}
+
 static inline struct reset_control *reset_control_get_optional(
                                        struct device *dev, const char *id)
 {
index 4e8f804f45898aae228e7b9f60fdda9e3666271d..59160de702b68023c248181ab6c5fcb6e2f42452 100644 (file)
@@ -66,7 +66,6 @@ struct fib_rules_ops {
                                           struct nlattr **);
        int                     (*fill)(struct fib_rule *, struct sk_buff *,
                                        struct fib_rule_hdr *);
-       u32                     (*default_pref)(struct fib_rules_ops *ops);
        size_t                  (*nlmsg_payload)(struct fib_rule *);
 
        /* Called after modifications to the rules set, must flush
@@ -118,5 +117,4 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
                     struct fib_lookup_arg *);
 int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
                         u32 flags);
-u32 fib_default_rule_pref(struct fib_rules_ops *ops);
 #endif
index e3314e516681ed0733ec212d7464c393222428ef..bfc569498bfa793013766e6e16158d29bb84c9ef 100644 (file)
@@ -477,7 +477,9 @@ struct ieee80211_event {
  * @chandef: Channel definition for this BSS -- the hardware might be
  *     configured a higher bandwidth than this BSS uses, for example.
  * @ht_operation_mode: HT operation mode like in &struct ieee80211_ht_operation.
- *     This field is only valid when the channel type is one of the HT types.
+ *     This field is only valid when the channel is a wide HT/VHT channel.
+ *     Note that with TDLS this can be the case (channel is HT, protection must
+ *     be used from this field) even when the BSS association isn't using HT.
  * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
  *     implies disabled
  * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
index bab824bde92cabccb025a7de9030d033be1ffd7e..d4c6b5f30acd936d863b1a5a5e89ef2443ad3943 100644 (file)
@@ -59,7 +59,7 @@ static inline unsigned int
 br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb,
                       const struct nf_hook_state *state)
 {
-       return NF_DROP;
+       return NF_ACCEPT;
 }
 #endif
 
index f5e23c6dee8bcbcc66705a4d5cefdaef311eb98b..e8ad46834df87453e1335bccb7e80dda2ba9fbe5 100644 (file)
@@ -298,6 +298,7 @@ void init_nf_conntrack_hash_rnd(void);
 struct nf_conn *nf_ct_tmpl_alloc(struct net *net,
                                 const struct nf_conntrack_zone *zone,
                                 gfp_t flags);
+void nf_ct_tmpl_free(struct nf_conn *tmpl);
 
 #define NF_CT_STAT_INC(net, count)       __this_cpu_inc((net)->ct.stat->count)
 #define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
index 2a246680a6c38934485fbd7a540df04ad47a0253..aa8bee72c9d34288cd121ebd5ddd1e3ad7a7bf40 100644 (file)
@@ -125,7 +125,7 @@ static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
 
 static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
 {
-       return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
+       return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
 }
 
 unsigned int nft_parse_register(const struct nlattr *attr);
index a44062da684b575fe3c411ab24efd5795cfa9c02..d6f83222a6a1671ec819d730801398ebb88f80ab 100644 (file)
@@ -358,6 +358,36 @@ TRACE_EVENT(
 
 #endif
 
+TRACE_EVENT(kvm_halt_poll_ns,
+       TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old),
+       TP_ARGS(grow, vcpu_id, new, old),
+
+       TP_STRUCT__entry(
+               __field(bool, grow)
+               __field(unsigned int, vcpu_id)
+               __field(int, new)
+               __field(int, old)
+       ),
+
+       TP_fast_assign(
+               __entry->grow           = grow;
+               __entry->vcpu_id        = vcpu_id;
+               __entry->new            = new;
+               __entry->old            = old;
+       ),
+
+       TP_printk("vcpu %u: halt_poll_ns %d (%s %d)",
+                       __entry->vcpu_id,
+                       __entry->new,
+                       __entry->grow ? "grow" : "shrink",
+                       __entry->old)
+);
+
+#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
+       trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
+#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
+       trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
+
 #endif /* _TRACE_KVM_MAIN_H */
 
 /* This part must be outside protection */
index 3429a3ba382b64edcef8f7f299134f81cead2184..b56dfcfe922ae2becbda3aea5116c97379bfe939 100644 (file)
@@ -39,6 +39,7 @@
 #define EM_TI_C6000    140     /* TI C6X DSPs */
 #define EM_AARCH64     183     /* ARM 64 bit */
 #define EM_TILEPRO     188     /* Tilera TILEPro */
+#define EM_MICROBLAZE  189     /* Xilinx MicroBlaze */
 #define EM_TILEGX      191     /* Tilera TILE-Gx */
 #define EM_FRV         0x5441  /* Fujitsu FR-V */
 #define EM_AVR32       0x18ad  /* Atmel AVR32 */
index aa63ed023c2b96b61b42231f9dd9b34b6ae46b66..ea9221b0331adea9b748afdbc761e386d6814b4d 100644 (file)
@@ -42,6 +42,7 @@
 #define ETH_P_LOOP     0x0060          /* Ethernet Loopback packet     */
 #define ETH_P_PUP      0x0200          /* Xerox PUP packet             */
 #define ETH_P_PUPAT    0x0201          /* Xerox PUP Addr Trans packet  */
+#define ETH_P_TSN      0x22F0          /* TSN (IEEE 1722) packet       */
 #define ETH_P_IP       0x0800          /* Internet Protocol packet     */
 #define ETH_P_X25      0x0805          /* CCITT X.25                   */
 #define ETH_P_ARP      0x0806          /* Address Resolution packet    */
index 0d831f94f8a8f773e3ff034554d911bf8de4cc17..a9256f0331aeaf6be9af8f8507f480a54d0ca273 100644 (file)
@@ -237,6 +237,7 @@ struct kvm_run {
                        __u32 count;
                        __u64 data_offset; /* relative to kvm_run start */
                } io;
+               /* KVM_EXIT_DEBUG */
                struct {
                        struct kvm_debug_exit_arch arch;
                } debug;
@@ -285,6 +286,7 @@ struct kvm_run {
                        __u32 data;
                        __u8  is_write;
                } dcr;
+               /* KVM_EXIT_INTERNAL_ERROR */
                struct {
                        __u32 suberror;
                        /* Available with KVM_CAP_INTERNAL_ERROR_DATA: */
@@ -295,6 +297,7 @@ struct kvm_run {
                struct {
                        __u64 gprs[32];
                } osi;
+               /* KVM_EXIT_PAPR_HCALL */
                struct {
                        __u64 nr;
                        __u64 ret;
@@ -819,6 +822,8 @@ struct kvm_ppc_smmu_info {
 #define KVM_CAP_DISABLE_QUIRKS 116
 #define KVM_CAP_X86_SMM 117
 #define KVM_CAP_MULTI_ADDRESS_SPACE 118
+#define KVM_CAP_GUEST_DEBUG_HW_BPS 119
+#define KVM_CAP_GUEST_DEBUG_HW_WPS 120
 
 #ifdef KVM_CAP_IRQ_ROUTING
 
index a85316811d79ad43493d21c1e35d51ed10aa257c..7ddeeda9380971b0327b09fc639eee9ebf82f1f7 100644 (file)
@@ -44,6 +44,10 @@ struct privcmd_hypercall {
 
 struct privcmd_mmap_entry {
        __u64 va;
+       /*
+        * This should be a GFN. It's not possible to change the name because
+        * it's exposed to the user-space.
+        */
        __u64 mfn;
        __u64 npages;
 };
index a5983da2f5cd929e8461eb25814c3a26ebc8e15e..1daae485e3360b3f8b30aea342b85b365230e705 100644 (file)
@@ -3,9 +3,9 @@
 
 #include <asm/xen/page.h>
 
-static inline unsigned long page_to_mfn(struct page *page)
+static inline unsigned long xen_page_to_gfn(struct page *page)
 {
-       return pfn_to_mfn(page_to_pfn(page));
+       return pfn_to_gfn(page_to_pfn(page));
 }
 
 struct xen_memory_region {
index 0ce4f32017ea91e6af37bc860c5633f473da17b3..e4e214a5abd531b0fa1805186ac32a5cb8576253 100644 (file)
@@ -30,7 +30,7 @@ void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
 struct vm_area_struct;
 
 /*
- * xen_remap_domain_mfn_array() - map an array of foreign frames
+ * xen_remap_domain_gfn_array() - map an array of foreign frames
  * @vma:     VMA to map the pages into
  * @addr:    Address at which to map the pages
  * @gfn:     Array of GFNs to map
@@ -46,14 +46,14 @@ struct vm_area_struct;
  * Returns the number of successfully mapped frames, or a -ve error
  * code.
  */
-int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
                               unsigned long addr,
                               xen_pfn_t *gfn, int nr,
                               int *err_ptr, pgprot_t prot,
                               unsigned domid,
                               struct page **pages);
 
-/* xen_remap_domain_mfn_range() - map a range of foreign frames
+/* xen_remap_domain_gfn_range() - map a range of foreign frames
  * @vma:     VMA to map the pages into
  * @addr:    Address at which to map the pages
  * @gfn:     First GFN to map.
@@ -65,12 +65,12 @@ int xen_remap_domain_mfn_array(struct vm_area_struct *vma,
  * Returns the number of successfully mapped frames, or a -ve error
  * code.
  */
-int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
                               unsigned long addr,
                               xen_pfn_t gfn, int nr,
                               pgprot_t prot, unsigned domid,
                               struct page **pages);
-int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
+int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
                               int numpgs, struct page **pages);
 int xen_xlate_remap_gfn_array(struct vm_area_struct *vma,
                              unsigned long addr,
index dc9b464fefa954a50c83e632c4ebea2ec464839e..35bac8e8b071ae5aa57e3837f4363d8dcd741158 100644 (file)
@@ -155,14 +155,15 @@ static int map_lookup_elem(union bpf_attr *attr)
        void __user *ukey = u64_to_ptr(attr->key);
        void __user *uvalue = u64_to_ptr(attr->value);
        int ufd = attr->map_fd;
-       struct fd f = fdget(ufd);
        struct bpf_map *map;
        void *key, *value, *ptr;
+       struct fd f;
        int err;
 
        if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
                return -EINVAL;
 
+       f = fdget(ufd);
        map = bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
@@ -213,14 +214,15 @@ static int map_update_elem(union bpf_attr *attr)
        void __user *ukey = u64_to_ptr(attr->key);
        void __user *uvalue = u64_to_ptr(attr->value);
        int ufd = attr->map_fd;
-       struct fd f = fdget(ufd);
        struct bpf_map *map;
        void *key, *value;
+       struct fd f;
        int err;
 
        if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
                return -EINVAL;
 
+       f = fdget(ufd);
        map = bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
@@ -265,14 +267,15 @@ static int map_delete_elem(union bpf_attr *attr)
 {
        void __user *ukey = u64_to_ptr(attr->key);
        int ufd = attr->map_fd;
-       struct fd f = fdget(ufd);
        struct bpf_map *map;
+       struct fd f;
        void *key;
        int err;
 
        if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
                return -EINVAL;
 
+       f = fdget(ufd);
        map = bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
@@ -305,14 +308,15 @@ static int map_get_next_key(union bpf_attr *attr)
        void __user *ukey = u64_to_ptr(attr->key);
        void __user *unext_key = u64_to_ptr(attr->next_key);
        int ufd = attr->map_fd;
-       struct fd f = fdget(ufd);
        struct bpf_map *map;
        void *key, *next_key;
+       struct fd f;
        int err;
 
        if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
                return -EINVAL;
 
+       f = fdget(ufd);
        map = bpf_map_get(f);
        if (IS_ERR(map))
                return PTR_ERR(map);
index ed12e385fb75997f559e345989b178bc4c709e22..b074b23000d6e95792e7fcefe0cd29e5a649a6ea 100644 (file)
@@ -283,7 +283,7 @@ static const char *const bpf_class_string[] = {
        [BPF_ALU64] = "alu64",
 };
 
-static const char *const bpf_alu_string[] = {
+static const char *const bpf_alu_string[16] = {
        [BPF_ADD >> 4]  = "+=",
        [BPF_SUB >> 4]  = "-=",
        [BPF_MUL >> 4]  = "*=",
@@ -307,7 +307,7 @@ static const char *const bpf_ldst_string[] = {
        [BPF_DW >> 3] = "u64",
 };
 
-static const char *const bpf_jmp_string[] = {
+static const char *const bpf_jmp_string[16] = {
        [BPF_JA >> 4]   = "jmp",
        [BPF_JEQ >> 4]  = "==",
        [BPF_JGT >> 4]  = ">",
index af5e187553fd6a9d3b033d446642040585995b80..ea748c93a07f1dcd988cc18130d629d6a3556d98 100644 (file)
@@ -16,7 +16,6 @@
 #include <net/rtnetlink.h>
 #include <net/net_namespace.h>
 #include <net/sock.h>
-#include <net/switchdev.h>
 #include <uapi/linux/if_bridge.h>
 
 #include "br_private.h"
index 3cd8cc9e804b37240047421ba6c5855bb72f79d9..5f5a02b49a99617918c8f76ecf7858920152ba53 100644 (file)
@@ -117,10 +117,11 @@ out_filt:
        return err;
 }
 
-static void __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
-                          u16 vid)
+static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
+                         u16 vid)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
+       int err = 0;
 
        /* If driver uses VLAN ndo ops, use 8021q to delete vid
         * on device, otherwise try switchdev ops to delete vid.
@@ -137,8 +138,12 @@ static void __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
                        },
                };
 
-               switchdev_port_obj_del(dev, &vlan_obj);
+               err = switchdev_port_obj_del(dev, &vlan_obj);
+               if (err == -EOPNOTSUPP)
+                       err = 0;
        }
+
+       return err;
 }
 
 static int __vlan_del(struct net_port_vlans *v, u16 vid)
@@ -151,7 +156,11 @@ static int __vlan_del(struct net_port_vlans *v, u16 vid)
 
        if (v->port_idx) {
                struct net_bridge_port *p = v->parent.port;
-               __vlan_vid_del(p->dev, p->br, vid);
+               int err;
+
+               err = __vlan_vid_del(p->dev, p->br, vid);
+               if (err)
+                       return err;
        }
 
        clear_bit(vid, v->vlan_bitmap);
index ae8306e7c56f966196b570eb372a5321d146248a..bf77e3639ce0fd318822cca563c56b4376f9e8b7 100644 (file)
@@ -44,7 +44,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
 }
 EXPORT_SYMBOL(fib_default_rule_add);
 
-u32 fib_default_rule_pref(struct fib_rules_ops *ops)
+static u32 fib_default_rule_pref(struct fib_rules_ops *ops)
 {
        struct list_head *pos;
        struct fib_rule *rule;
@@ -60,7 +60,6 @@ u32 fib_default_rule_pref(struct fib_rules_ops *ops)
 
        return 0;
 }
-EXPORT_SYMBOL(fib_default_rule_pref);
 
 static void notify_rule_change(int event, struct fib_rule *rule,
                               struct fib_rules_ops *ops, struct nlmsghdr *nlh,
@@ -299,8 +298,8 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
        }
        rule->fr_net = net;
 
-       if (tb[FRA_PRIORITY])
-               rule->pref = nla_get_u32(tb[FRA_PRIORITY]);
+       rule->pref = tb[FRA_PRIORITY] ? nla_get_u32(tb[FRA_PRIORITY])
+                                     : fib_default_rule_pref(ops);
 
        if (tb[FRA_IIFNAME]) {
                struct net_device *dev;
@@ -350,9 +349,6 @@ static int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr* nlh)
        else
                rule->suppress_ifgroup = -1;
 
-       if (!tb[FRA_PRIORITY] && ops->default_pref)
-               rule->pref = ops->default_pref(ops);
-
        err = -EINVAL;
        if (tb[FRA_GOTO]) {
                if (rule->action != FR_ACT_GOTO)
index 9d66a0f72f906733878de68e7f2e6bd80932c1b9..295bbd6a56f2e690435148cd557b39eb4c05fed7 100644 (file)
@@ -229,7 +229,6 @@ static const struct fib_rules_ops __net_initconst dn_fib_rules_ops_template = {
        .configure      = dn_fib_rule_configure,
        .compare        = dn_fib_rule_compare,
        .fill           = dn_fib_rule_fill,
-       .default_pref   = fib_default_rule_pref,
        .flush_cache    = dn_fib_rule_flush_cache,
        .nlgroup        = RTNLGRP_DECnet_RULE,
        .policy         = dn_fib_rule_policy,
index 18123d50f576117358e47c4d3de0f5c3dcebafaa..f2bda9e89c61b251b66ed1201c14e568bd5078b3 100644 (file)
@@ -318,7 +318,6 @@ static const struct fib_rules_ops __net_initconst fib4_rules_ops_template = {
        .delete         = fib4_rule_delete,
        .compare        = fib4_rule_compare,
        .fill           = fib4_rule_fill,
-       .default_pref   = fib_default_rule_pref,
        .nlmsg_payload  = fib4_rule_nlmsg_payload,
        .flush_cache    = fib4_rule_flush_cache,
        .nlgroup        = RTNLGRP_IPV4_RULE,
index 3a2c0162c3badeed716599e538ed06426ddc7199..866ee89f5254a4d6b401c4d9152d15640802b6b8 100644 (file)
@@ -233,7 +233,6 @@ static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = {
        .match          = ipmr_rule_match,
        .configure      = ipmr_rule_configure,
        .compare        = ipmr_rule_compare,
-       .default_pref   = fib_default_rule_pref,
        .fill           = ipmr_rule_fill,
        .nlgroup        = RTNLGRP_IPV4_RULE,
        .policy         = ipmr_rule_policy,
index 28011fb1f4a2104a34f81fc0c9fb4a4382bdadac..c6ded6b2a79fb5d8ece3f3b4b1ed0e01707124c3 100644 (file)
@@ -151,6 +151,21 @@ static void bictcp_init(struct sock *sk)
                tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
 }
 
+static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
+{
+       if (event == CA_EVENT_TX_START) {
+               s32 delta = tcp_time_stamp - tcp_sk(sk)->lsndtime;
+               struct bictcp *ca = inet_csk_ca(sk);
+
+               /* We were application limited (idle) for a while.
+                * Shift epoch_start to keep cwnd growth to cubic curve.
+                */
+               if (ca->epoch_start && delta > 0)
+                       ca->epoch_start += delta;
+               return;
+       }
+}
+
 /* calculate the cubic root of x using a table lookup followed by one
  * Newton-Raphson iteration.
  * Avg err ~= 0.195%
@@ -450,6 +465,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
        .cong_avoid     = bictcp_cong_avoid,
        .set_state      = bictcp_state,
        .undo_cwnd      = bictcp_undo_cwnd,
+       .cwnd_event     = bictcp_cwnd_event,
        .pkts_acked     = bictcp_acked,
        .owner          = THIS_MODULE,
        .name           = "cubic",
index 1188e4fcf23bf87d40fb73f6e1483ee004fd74b1..f9a8a12b62ee64d954ae9a4aab75bcdce687650b 100644 (file)
@@ -164,6 +164,9 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
        struct inet_connection_sock *icsk = inet_csk(sk);
        const u32 now = tcp_time_stamp;
 
+       if (tcp_packets_in_flight(tp) == 0)
+               tcp_ca_event(sk, CA_EVENT_TX_START);
+
        tp->lsndtime = now;
 
        /* If it is a reply for ato after last received
@@ -940,9 +943,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
                                                           &md5);
        tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
 
-       if (tcp_packets_in_flight(tp) == 0)
-               tcp_ca_event(sk, CA_EVENT_TX_START);
-
        /* if no packet is in qdisc/device queue, then allow XPS to select
         * another queue. We can be called from tcp_tsq_handler()
         * which holds one reference to sk_wmem_alloc.
index 2367a16eae58a31e01aa0d1d676090b688102593..9f777ec59a59d24566d87643889a8c591dd52637 100644 (file)
@@ -258,11 +258,6 @@ nla_put_failure:
        return -ENOBUFS;
 }
 
-static u32 fib6_rule_default_pref(struct fib_rules_ops *ops)
-{
-       return 0x3FFF;
-}
-
 static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
 {
        return nla_total_size(16) /* dst */
@@ -279,7 +274,6 @@ static const struct fib_rules_ops __net_initconst fib6_rules_ops_template = {
        .configure              = fib6_rule_configure,
        .compare                = fib6_rule_compare,
        .fill                   = fib6_rule_fill,
-       .default_pref           = fib6_rule_default_pref,
        .nlmsg_payload          = fib6_rule_nlmsg_payload,
        .nlgroup                = RTNLGRP_IPV6_RULE,
        .policy                 = fib6_rule_policy,
index 74ceb73c1c9a042b0f8f9f65c264e8426d65f7f3..0e004cc42a22b1593fa308f3adac735568ddf2ee 100644 (file)
@@ -217,7 +217,6 @@ static const struct fib_rules_ops __net_initconst ip6mr_rules_ops_template = {
        .match          = ip6mr_rule_match,
        .configure      = ip6mr_rule_configure,
        .compare        = ip6mr_rule_compare,
-       .default_pref   = fib_default_rule_pref,
        .fill           = ip6mr_rule_fill,
        .nlgroup        = RTNLGRP_IPV6_RULE,
        .policy         = ip6mr_rule_policy,
@@ -550,7 +549,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
 
        if (it->cache == &mrt->mfc6_unres_queue)
                spin_unlock_bh(&mfc_unres_lock);
-       else if (it->cache == mrt->mfc6_cache_array)
+       else if (it->cache == &mrt->mfc6_cache_array[it->ct])
                read_unlock(&mrt_lock);
 }
 
index f45cac6f83563977186732adacedcec55a6b3ce7..53617d71518850dfc26220dd15923b8027c5249a 100644 (file)
@@ -1748,7 +1748,7 @@ static int ip6_convert_metrics(struct mx6_config *mxc,
        return -EINVAL;
 }
 
-int ip6_route_add(struct fib6_config *cfg)
+int ip6_route_info_create(struct fib6_config *cfg, struct rt6_info **rt_ret)
 {
        int err;
        struct net *net = cfg->fc_nlinfo.nl_net;
@@ -1756,7 +1756,6 @@ int ip6_route_add(struct fib6_config *cfg)
        struct net_device *dev = NULL;
        struct inet6_dev *idev = NULL;
        struct fib6_table *table;
-       struct mx6_config mxc = { .mx = NULL, };
        int addr_type;
 
        if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
@@ -1981,6 +1980,32 @@ install_route:
 
        cfg->fc_nlinfo.nl_net = dev_net(dev);
 
+       *rt_ret = rt;
+
+       return 0;
+out:
+       if (dev)
+               dev_put(dev);
+       if (idev)
+               in6_dev_put(idev);
+       if (rt)
+               dst_free(&rt->dst);
+
+       *rt_ret = NULL;
+
+       return err;
+}
+
+int ip6_route_add(struct fib6_config *cfg)
+{
+       struct mx6_config mxc = { .mx = NULL, };
+       struct rt6_info *rt = NULL;
+       int err;
+
+       err = ip6_route_info_create(cfg, &rt);
+       if (err)
+               goto out;
+
        err = ip6_convert_metrics(&mxc, cfg);
        if (err)
                goto out;
@@ -1988,14 +2013,12 @@ install_route:
        err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
 
        kfree(mxc.mx);
+
        return err;
 out:
-       if (dev)
-               dev_put(dev);
-       if (idev)
-               in6_dev_put(idev);
        if (rt)
                dst_free(&rt->dst);
+
        return err;
 }
 
@@ -2776,19 +2799,78 @@ errout:
        return err;
 }
 
-static int ip6_route_multipath(struct fib6_config *cfg, int add)
+struct rt6_nh {
+       struct rt6_info *rt6_info;
+       struct fib6_config r_cfg;
+       struct mx6_config mxc;
+       struct list_head next;
+};
+
+static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
+{
+       struct rt6_nh *nh;
+
+       list_for_each_entry(nh, rt6_nh_list, next) {
+               pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6 nexthop %pI6 ifi %d\n",
+                       &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
+                       nh->r_cfg.fc_ifindex);
+       }
+}
+
+static int ip6_route_info_append(struct list_head *rt6_nh_list,
+                                struct rt6_info *rt, struct fib6_config *r_cfg)
+{
+       struct rt6_nh *nh;
+       struct rt6_info *rtnh;
+       int err = -EEXIST;
+
+       list_for_each_entry(nh, rt6_nh_list, next) {
+               /* check if rt6_info already exists */
+               rtnh = nh->rt6_info;
+
+               if (rtnh->dst.dev == rt->dst.dev &&
+                   rtnh->rt6i_idev == rt->rt6i_idev &&
+                   ipv6_addr_equal(&rtnh->rt6i_gateway,
+                                   &rt->rt6i_gateway))
+                       return err;
+       }
+
+       nh = kzalloc(sizeof(*nh), GFP_KERNEL);
+       if (!nh)
+               return -ENOMEM;
+       nh->rt6_info = rt;
+       err = ip6_convert_metrics(&nh->mxc, r_cfg);
+       if (err) {
+               kfree(nh);
+               return err;
+       }
+       memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
+       list_add_tail(&nh->next, rt6_nh_list);
+
+       return 0;
+}
+
+static int ip6_route_multipath_add(struct fib6_config *cfg)
 {
        struct fib6_config r_cfg;
        struct rtnexthop *rtnh;
+       struct rt6_info *rt;
+       struct rt6_nh *err_nh;
+       struct rt6_nh *nh, *nh_safe;
        int remaining;
        int attrlen;
-       int err = 0, last_err = 0;
+       int err = 1;
+       int nhn = 0;
+       int replace = (cfg->fc_nlinfo.nlh &&
+                      (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
+       LIST_HEAD(rt6_nh_list);
 
        remaining = cfg->fc_mp_len;
-beginning:
        rtnh = (struct rtnexthop *)cfg->fc_mp;
 
-       /* Parse a Multipath Entry */
+       /* Parse a Multipath Entry and build a list (rt6_nh_list) of
+        * rt6_info structs per nexthop
+        */
        while (rtnh_ok(rtnh, remaining)) {
                memcpy(&r_cfg, cfg, sizeof(*cfg));
                if (rtnh->rtnh_ifindex)
@@ -2808,22 +2890,32 @@ beginning:
                        if (nla)
                                r_cfg.fc_encap_type = nla_get_u16(nla);
                }
-               err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
+
+               err = ip6_route_info_create(&r_cfg, &rt);
+               if (err)
+                       goto cleanup;
+
+               err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
                if (err) {
-                       last_err = err;
-                       /* If we are trying to remove a route, do not stop the
-                        * loop when ip6_route_del() fails (because next hop is
-                        * already gone), we should try to remove all next hops.
-                        */
-                       if (add) {
-                               /* If add fails, we should try to delete all
-                                * next hops that have been already added.
-                                */
-                               add = 0;
-                               remaining = cfg->fc_mp_len - remaining;
-                               goto beginning;
-                       }
+                       dst_free(&rt->dst);
+                       goto cleanup;
+               }
+
+               rtnh = rtnh_next(rtnh, &remaining);
+       }
+
+       err_nh = NULL;
+       list_for_each_entry(nh, &rt6_nh_list, next) {
+               err = __ip6_ins_rt(nh->rt6_info, &cfg->fc_nlinfo, &nh->mxc);
+               /* nh->rt6_info is used or freed at this point, reset to NULL*/
+               nh->rt6_info = NULL;
+               if (err) {
+                       if (replace && nhn)
+                               ip6_print_replace_route_err(&rt6_nh_list);
+                       err_nh = nh;
+                       goto add_errout;
                }
+
                /* Because each route is added like a single route we remove
                 * these flags after the first nexthop: if there is a collision,
                 * we have already failed to add the first nexthop:
@@ -2833,6 +2925,62 @@ beginning:
                 */
                cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
                                                     NLM_F_REPLACE);
+               nhn++;
+       }
+
+       goto cleanup;
+
+add_errout:
+       /* Delete routes that were already added */
+       list_for_each_entry(nh, &rt6_nh_list, next) {
+               if (err_nh == nh)
+                       break;
+               ip6_route_del(&nh->r_cfg);
+       }
+
+cleanup:
+       list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
+               if (nh->rt6_info)
+                       dst_free(&nh->rt6_info->dst);
+               kfree(nh->mxc.mx);
+               list_del(&nh->next);
+               kfree(nh);
+       }
+
+       return err;
+}
+
+static int ip6_route_multipath_del(struct fib6_config *cfg)
+{
+       struct fib6_config r_cfg;
+       struct rtnexthop *rtnh;
+       int remaining;
+       int attrlen;
+       int err = 1, last_err = 0;
+
+       remaining = cfg->fc_mp_len;
+       rtnh = (struct rtnexthop *)cfg->fc_mp;
+
+       /* Parse a Multipath Entry */
+       while (rtnh_ok(rtnh, remaining)) {
+               memcpy(&r_cfg, cfg, sizeof(*cfg));
+               if (rtnh->rtnh_ifindex)
+                       r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
+
+               attrlen = rtnh_attrlen(rtnh);
+               if (attrlen > 0) {
+                       struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
+
+                       nla = nla_find(attrs, attrlen, RTA_GATEWAY);
+                       if (nla) {
+                               nla_memcpy(&r_cfg.fc_gateway, nla, 16);
+                               r_cfg.fc_flags |= RTF_GATEWAY;
+                       }
+               }
+               err = ip6_route_del(&r_cfg);
+               if (err)
+                       last_err = err;
+
                rtnh = rtnh_next(rtnh, &remaining);
        }
 
@@ -2849,7 +2997,7 @@ static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
                return err;
 
        if (cfg.fc_mp)
-               return ip6_route_multipath(&cfg, 0);
+               return ip6_route_multipath_del(&cfg);
        else
                return ip6_route_del(&cfg);
 }
@@ -2864,7 +3012,7 @@ static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
                return err;
 
        if (cfg.fc_mp)
-               return ip6_route_multipath(&cfg, 1);
+               return ip6_route_multipath_add(&cfg);
        else
                return ip6_route_add(&cfg);
 }
index 685ec13ed7c2b0a2dcdcf82d7388d1c44d041a26..17b1fe961c5d67b958346dee07e265448408cc71 100644 (file)
@@ -2468,6 +2468,10 @@ static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
            rssi_hyst == bss_conf->cqm_rssi_hyst)
                return 0;
 
+       if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER &&
+           !(sdata->vif.driver_flags & IEEE80211_VIF_SUPPORTS_CQM_RSSI))
+               return -EOPNOTSUPP;
+
        bss_conf->cqm_rssi_thold = rssi_thold;
        bss_conf->cqm_rssi_hyst = rssi_hyst;
 
index 705ef1d040edfb70042fdd9cd25f050b19dab4c0..cd7e55e08a238c23f546ce7d6860759345eff371 100644 (file)
@@ -4267,6 +4267,8 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_supported_band *sband;
        struct cfg80211_chan_def chandef;
        int ret;
+       u32 i;
+       bool have_80mhz;
 
        sband = local->hw.wiphy->bands[cbss->channel->band];
 
@@ -4317,6 +4319,20 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata,
                }
        }
 
+       /* Allow VHT if at least one channel on the sband supports 80 MHz */
+       have_80mhz = false;
+       for (i = 0; i < sband->n_channels; i++) {
+               if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+                                               IEEE80211_CHAN_NO_80MHZ))
+                       continue;
+
+               have_80mhz = true;
+               break;
+       }
+
+       if (!have_80mhz)
+               ifmgd->flags |= IEEE80211_STA_DISABLE_VHT;
+
        ifmgd->flags |= ieee80211_determine_chantype(sdata, sband,
                                                     cbss->channel,
                                                     ht_cap, ht_oper, vht_oper,
index 9857693b91ec721ff71e3f3cd1087ccc289912e1..9ce8883d5f449ed438f0a4a4a4807ede153c6725 100644 (file)
@@ -716,7 +716,7 @@ static bool rate_control_cap_mask(struct ieee80211_sub_if_data *sdata,
 
                /* Filter out rates that the STA does not support */
                *mask &= sta->supp_rates[sband->band];
-               for (i = 0; i < sizeof(mcs_mask); i++)
+               for (i = 0; i < IEEE80211_HT_MCS_MASK_LEN; i++)
                        mcs_mask[i] &= sta->ht_cap.mcs.rx_mask[i];
 
                sta_vht_cap = sta->vht_cap.vht_mcs.rx_mcs_map;
index aee701a5649e59ebd03ef300f25e33eadfc280d5..4e202d0679b26a0dfa0b89c5ae189f2ce7b56883 100644 (file)
@@ -1249,6 +1249,58 @@ static void iee80211_tdls_recalc_chanctx(struct ieee80211_sub_if_data *sdata)
        mutex_unlock(&local->chanctx_mtx);
 }
 
+static int iee80211_tdls_have_ht_peers(struct ieee80211_sub_if_data *sdata)
+{
+       struct sta_info *sta;
+       bool result = false;
+
+       rcu_read_lock();
+       list_for_each_entry_rcu(sta, &sdata->local->sta_list, list) {
+               if (!sta->sta.tdls || sta->sdata != sdata || !sta->uploaded ||
+                   !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
+                   !test_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH) ||
+                   !sta->sta.ht_cap.ht_supported)
+                       continue;
+               result = true;
+               break;
+       }
+       rcu_read_unlock();
+
+       return result;
+}
+
+static void
+iee80211_tdls_recalc_ht_protection(struct ieee80211_sub_if_data *sdata,
+                                  struct sta_info *sta)
+{
+       struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+       bool tdls_ht;
+       u16 protection = IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED |
+                        IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT |
+                        IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
+       u16 opmode;
+
+       /* Nothing to do if the BSS connection uses HT */
+       if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT))
+               return;
+
+       tdls_ht = (sta && sta->sta.ht_cap.ht_supported) ||
+                 iee80211_tdls_have_ht_peers(sdata);
+
+       opmode = sdata->vif.bss_conf.ht_operation_mode;
+
+       if (tdls_ht)
+               opmode |= protection;
+       else
+               opmode &= ~protection;
+
+       if (opmode == sdata->vif.bss_conf.ht_operation_mode)
+               return;
+
+       sdata->vif.bss_conf.ht_operation_mode = opmode;
+       ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_HT);
+}
+
 int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                        const u8 *peer, enum nl80211_tdls_operation oper)
 {
@@ -1274,6 +1326,10 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                return -ENOTSUPP;
        }
 
+       /* protect possible bss_conf changes and avoid concurrency in
+        * ieee80211_bss_info_change_notify()
+        */
+       sdata_lock(sdata);
        mutex_lock(&local->mtx);
        tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
 
@@ -1287,16 +1343,18 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
 
                iee80211_tdls_recalc_chanctx(sdata);
 
-               rcu_read_lock();
+               mutex_lock(&local->sta_mtx);
                sta = sta_info_get(sdata, peer);
                if (!sta) {
-                       rcu_read_unlock();
+                       mutex_unlock(&local->sta_mtx);
                        ret = -ENOLINK;
                        break;
                }
 
+               iee80211_tdls_recalc_ht_protection(sdata, sta);
+
                set_sta_flag(sta, WLAN_STA_TDLS_PEER_AUTH);
-               rcu_read_unlock();
+               mutex_unlock(&local->sta_mtx);
 
                WARN_ON_ONCE(is_zero_ether_addr(sdata->u.mgd.tdls_peer) ||
                             !ether_addr_equal(sdata->u.mgd.tdls_peer, peer));
@@ -1318,6 +1376,11 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                ieee80211_flush_queues(local, sdata, false);
 
                ret = sta_info_destroy_addr(sdata, peer);
+
+               mutex_lock(&local->sta_mtx);
+               iee80211_tdls_recalc_ht_protection(sdata, NULL);
+               mutex_unlock(&local->sta_mtx);
+
                iee80211_tdls_recalc_chanctx(sdata);
                break;
        default:
@@ -1335,6 +1398,7 @@ int ieee80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
                                     &sdata->u.mgd.request_smps_work);
 
        mutex_unlock(&local->mtx);
+       sdata_unlock(sdata);
        return ret;
 }
 
index 834ccdbc74be1ccd518aaa952ad854810d94f4ca..ff1c798921a6acc90181923456cf92bed0973f61 100644 (file)
@@ -120,6 +120,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        struct ieee80211_sta_vht_cap *vht_cap = &sta->sta.vht_cap;
        struct ieee80211_sta_vht_cap own_cap;
        u32 cap_info, i;
+       bool have_80mhz;
 
        memset(vht_cap, 0, sizeof(*vht_cap));
 
@@ -129,6 +130,20 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
        if (!vht_cap_ie || !sband->vht_cap.vht_supported)
                return;
 
+       /* Allow VHT if at least one channel on the sband supports 80 MHz */
+       have_80mhz = false;
+       for (i = 0; i < sband->n_channels; i++) {
+               if (sband->channels[i].flags & (IEEE80211_CHAN_DISABLED |
+                                               IEEE80211_CHAN_NO_80MHZ))
+                       continue;
+
+               have_80mhz = true;
+               break;
+       }
+
+       if (!have_80mhz)
+               return;
+
        /*
         * A VHT STA must support 40 MHz, but if we verify that here
         * then we break a few things - some APs (e.g. Netgear R6300v2
index afe905c208af879a5bd48f473bea8597d39e84cf..691b54fcaf2a47189665454fdcaff419236b020f 100644 (file)
@@ -152,9 +152,13 @@ htable_bits(u32 hashsize)
 #define SET_HOST_MASK(family)  (family == AF_INET ? 32 : 128)
 
 #ifdef IP_SET_HASH_WITH_NET0
+/* cidr from 0 to SET_HOST_MASK() value and c = cidr + 1 */
 #define NLEN(family)           (SET_HOST_MASK(family) + 1)
+#define CIDR_POS(c)            ((c) - 1)
 #else
+/* cidr from 1 to SET_HOST_MASK() value and c = cidr + 1 */
 #define NLEN(family)           SET_HOST_MASK(family)
+#define CIDR_POS(c)            ((c) - 2)
 #endif
 
 #else
@@ -305,7 +309,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
                } else if (h->nets[i].cidr[n] < cidr) {
                        j = i;
                } else if (h->nets[i].cidr[n] == cidr) {
-                       h->nets[cidr - 1].nets[n]++;
+                       h->nets[CIDR_POS(cidr)].nets[n]++;
                        return;
                }
        }
@@ -314,7 +318,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
                        h->nets[i].cidr[n] = h->nets[i - 1].cidr[n];
        }
        h->nets[i].cidr[n] = cidr;
-       h->nets[cidr - 1].nets[n] = 1;
+       h->nets[CIDR_POS(cidr)].nets[n] = 1;
 }
 
 static void
@@ -325,8 +329,8 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 nets_length, u8 n)
        for (i = 0; i < nets_length; i++) {
                if (h->nets[i].cidr[n] != cidr)
                        continue;
-               h->nets[cidr - 1].nets[n]--;
-               if (h->nets[cidr - 1].nets[n] > 0)
+               h->nets[CIDR_POS(cidr)].nets[n]--;
+               if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
                        return;
                for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
                        h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
index 3c862c0a76d1eac5fcb2d94675e4c3a7fe498d89..a93dfebffa811bcaee4be26c935e583b8ed3fc00 100644 (file)
@@ -131,6 +131,13 @@ hash_netnet4_data_next(struct hash_netnet4_elem *next,
 #define HOST_MASK      32
 #include "ip_set_hash_gen.h"
 
+static void
+hash_netnet4_init(struct hash_netnet4_elem *e)
+{
+       e->cidr[0] = HOST_MASK;
+       e->cidr[1] = HOST_MASK;
+}
+
 static int
 hash_netnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
                  const struct xt_action_param *par,
@@ -160,7 +167,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
 {
        const struct hash_netnet *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       struct hash_netnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
+       struct hash_netnet4_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 ip = 0, ip_to = 0, last;
        u32 ip2 = 0, ip2_from = 0, ip2_to = 0, last2;
@@ -169,6 +176,7 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
+       hash_netnet4_init(&e);
        if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
                     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
                return -IPSET_ERR_PROTOCOL;
@@ -357,6 +365,13 @@ hash_netnet6_data_next(struct hash_netnet4_elem *next,
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
 
+static void
+hash_netnet6_init(struct hash_netnet6_elem *e)
+{
+       e->cidr[0] = HOST_MASK;
+       e->cidr[1] = HOST_MASK;
+}
+
 static int
 hash_netnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
                  const struct xt_action_param *par,
@@ -385,13 +400,14 @@ hash_netnet6_uadt(struct ip_set *set, struct nlattr *tb[],
                  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
 {
        ipset_adtfn adtfn = set->variant->adt[adt];
-       struct hash_netnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
+       struct hash_netnet6_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        int ret;
 
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
+       hash_netnet6_init(&e);
        if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
                     !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
                return -IPSET_ERR_PROTOCOL;
index 0c68734f5cc4af2f5c23170c6369b1fad6b7ff0e..9a14c237830f4b2ccbbd999662256d88cd95662e 100644 (file)
@@ -142,6 +142,13 @@ hash_netportnet4_data_next(struct hash_netportnet4_elem *next,
 #define HOST_MASK      32
 #include "ip_set_hash_gen.h"
 
+static void
+hash_netportnet4_init(struct hash_netportnet4_elem *e)
+{
+       e->cidr[0] = HOST_MASK;
+       e->cidr[1] = HOST_MASK;
+}
+
 static int
 hash_netportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
                      const struct xt_action_param *par,
@@ -175,7 +182,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
 {
        const struct hash_netportnet *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       struct hash_netportnet4_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
+       struct hash_netportnet4_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 ip = 0, ip_to = 0, ip_last, p = 0, port, port_to;
        u32 ip2_from = 0, ip2_to = 0, ip2_last, ip2;
@@ -185,6 +192,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
+       hash_netportnet4_init(&e);
        if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
                     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
                     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@@ -412,6 +420,13 @@ hash_netportnet6_data_next(struct hash_netportnet4_elem *next,
 #define IP_SET_EMIT_CREATE
 #include "ip_set_hash_gen.h"
 
+static void
+hash_netportnet6_init(struct hash_netportnet6_elem *e)
+{
+       e->cidr[0] = HOST_MASK;
+       e->cidr[1] = HOST_MASK;
+}
+
 static int
 hash_netportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
                      const struct xt_action_param *par,
@@ -445,7 +460,7 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
 {
        const struct hash_netportnet *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
-       struct hash_netportnet6_elem e = { .cidr = { HOST_MASK, HOST_MASK, }, };
+       struct hash_netportnet6_elem e = { };
        struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
        u32 port, port_to;
        bool with_ports = false;
@@ -454,6 +469,7 @@ hash_netportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
        if (tb[IPSET_ATTR_LINENO])
                *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
 
+       hash_netportnet6_init(&e);
        if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
                     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
                     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
index eedf0495f11f5eb93c4bce1e2ffbb1b04cc273b3..c09d6c7198f60d809b36783ca1de43646025c876 100644 (file)
@@ -313,12 +313,13 @@ out_free:
 }
 EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
 
-static void nf_ct_tmpl_free(struct nf_conn *tmpl)
+void nf_ct_tmpl_free(struct nf_conn *tmpl)
 {
        nf_ct_ext_destroy(tmpl);
        nf_ct_ext_free(tmpl);
        kfree(tmpl);
 }
+EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
 
 static void
 destroy_conntrack(struct nf_conntrack *nfct)
index 888b9558415eb23bedd78b80f1bb458763331e68..c8a4a48bced988a29cd19df06a00117ea026c6ad 100644 (file)
@@ -380,7 +380,7 @@ static int __net_init synproxy_net_init(struct net *net)
 err3:
        free_percpu(snet->stats);
 err2:
-       nf_conntrack_free(ct);
+       nf_ct_tmpl_free(ct);
 err1:
        return err;
 }
index 0c0e8ecf02abbb4214b18f00ef798d728234866b..70277b11f742e8f0a2756c2ba54e1565419adc95 100644 (file)
@@ -444,6 +444,7 @@ done:
 static void nfnetlink_rcv(struct sk_buff *skb)
 {
        struct nlmsghdr *nlh = nlmsg_hdr(skb);
+       u_int16_t res_id;
        int msglen;
 
        if (nlh->nlmsg_len < NLMSG_HDRLEN ||
@@ -468,7 +469,12 @@ static void nfnetlink_rcv(struct sk_buff *skb)
 
                nfgenmsg = nlmsg_data(nlh);
                skb_pull(skb, msglen);
-               nfnetlink_rcv_batch(skb, nlh, nfgenmsg->res_id);
+               /* Work around old nft using host byte order */
+               if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
+                       res_id = NFNL_SUBSYS_NFTABLES;
+               else
+                       res_id = ntohs(nfgenmsg->res_id);
+               nfnetlink_rcv_batch(skb, nlh, res_id);
        } else {
                netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
        }
index 685cc6a17163ffaf1c7649d726f98c5118066fdd..a5cd6d90b78b16ebd0c96c4d1d426ad7aea6e86b 100644 (file)
@@ -301,7 +301,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                           __be32 **packet_id_ptr)
 {
        size_t size;
-       size_t data_len = 0, cap_len = 0;
+       size_t data_len = 0, cap_len = 0, rem_len = 0;
        unsigned int hlen = 0;
        struct sk_buff *skb;
        struct nlattr *nla;
@@ -360,6 +360,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                hlen = min_t(unsigned int, hlen, data_len);
                size += sizeof(struct nlattr) + hlen;
                cap_len = entskb->len;
+               rem_len = data_len - hlen;
                break;
        }
 
@@ -377,7 +378,7 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
                        size += nla_total_size(seclen);
        }
 
-       skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
+       skb = __netlink_alloc_skb(net->nfnl, size, rem_len, queue->peer_portid,
                                  GFP_ATOMIC);
        if (!skb) {
                skb_tx_error(entskb);
index 8e524898ccea234a2b5cae3bdfaf2cd72d023238..faf32d888198a72a50c293312c014bcb63747654 100644 (file)
@@ -255,7 +255,7 @@ out:
        return 0;
 
 err3:
-       nf_conntrack_free(ct);
+       nf_ct_tmpl_free(ct);
 err2:
        nf_ct_l3proto_module_put(par->family);
 err1:
index 50889be1517d04aa3f8d118196205d737a7dfa4d..7f86d3b550601839f730d4c9f15951f5410e3fd4 100644 (file)
@@ -674,12 +674,19 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
 
        mask = datagram_poll(file, sock, wait);
 
-       spin_lock_bh(&sk->sk_receive_queue.lock);
-       if (nlk->rx_ring.pg_vec) {
-               if (netlink_has_valid_frame(&nlk->rx_ring))
-                       mask |= POLLIN | POLLRDNORM;
+       /* We could already have received frames in the normal receive
+        * queue, that will show up as NL_MMAP_STATUS_COPY in the ring,
+        * so if mask contains pollin/etc already, there's no point
+        * walking the ring.
+        */
+       if ((mask & (POLLIN | POLLRDNORM)) != (POLLIN | POLLRDNORM)) {
+               spin_lock_bh(&sk->sk_receive_queue.lock);
+               if (nlk->rx_ring.pg_vec) {
+                       if (netlink_has_valid_frame(&nlk->rx_ring))
+                               mask |= POLLIN | POLLRDNORM;
+               }
+               spin_unlock_bh(&sk->sk_receive_queue.lock);
        }
-       spin_unlock_bh(&sk->sk_receive_queue.lock);
 
        spin_lock_bh(&sk->sk_write_queue.lock);
        if (nlk->tx_ring.pg_vec) {
@@ -1837,15 +1844,16 @@ retry:
 }
 EXPORT_SYMBOL(netlink_unicast);
 
-struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
-                                 u32 dst_portid, gfp_t gfp_mask)
+struct sk_buff *__netlink_alloc_skb(struct sock *ssk, unsigned int size,
+                                   unsigned int ldiff, u32 dst_portid,
+                                   gfp_t gfp_mask)
 {
 #ifdef CONFIG_NETLINK_MMAP
+       unsigned int maxlen, linear_size;
        struct sock *sk = NULL;
        struct sk_buff *skb;
        struct netlink_ring *ring;
        struct nl_mmap_hdr *hdr;
-       unsigned int maxlen;
 
        sk = netlink_getsockbyportid(ssk, dst_portid);
        if (IS_ERR(sk))
@@ -1856,7 +1864,11 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
        if (ring->pg_vec == NULL)
                goto out_put;
 
-       if (ring->frame_size - NL_MMAP_HDRLEN < size)
+       /* We need to account the full linear size needed as a ring
+        * slot cannot have non-linear parts.
+        */
+       linear_size = size + ldiff;
+       if (ring->frame_size - NL_MMAP_HDRLEN < linear_size)
                goto out_put;
 
        skb = alloc_skb_head(gfp_mask);
@@ -1870,13 +1882,14 @@ struct sk_buff *netlink_alloc_skb(struct sock *ssk, unsigned int size,
 
        /* check again under lock */
        maxlen = ring->frame_size - NL_MMAP_HDRLEN;
-       if (maxlen < size)
+       if (maxlen < linear_size)
                goto out_free;
 
        netlink_forward_ring(ring);
        hdr = netlink_current_frame(ring, NL_MMAP_STATUS_UNUSED);
        if (hdr == NULL)
                goto err2;
+
        netlink_ring_setup_skb(skb, sk, ring, hdr);
        netlink_set_status(hdr, NL_MMAP_STATUS_RESERVED);
        atomic_inc(&ring->pending);
@@ -1902,7 +1915,7 @@ out:
 #endif
        return alloc_skb(size, gfp_mask);
 }
-EXPORT_SYMBOL_GPL(netlink_alloc_skb);
+EXPORT_SYMBOL_GPL(__netlink_alloc_skb);
 
 int netlink_has_listeners(struct sock *sk, unsigned int group)
 {
index af7cdef42066bdab81286961f1667d631653da6d..2a071f470d578e135e6a04c462199f9a5cdb7b69 100644 (file)
@@ -5,6 +5,7 @@
 config OPENVSWITCH
        tristate "Open vSwitch"
        depends on INET
+       depends on (!NF_CONNTRACK || NF_CONNTRACK)
        select LIBCRC32C
        select MPLS
        select NET_MPLS_GSO
@@ -31,17 +32,6 @@ config OPENVSWITCH
 
          If unsure, say N.
 
-config OPENVSWITCH_CONNTRACK
-       bool "Open vSwitch conntrack action support"
-       depends on OPENVSWITCH
-       depends on NF_CONNTRACK
-       default OPENVSWITCH
-       ---help---
-         If you say Y here, then Open vSwitch module will be able to pass
-         packets through conntrack.
-
-         Say N to exclude this support and reduce the binary size.
-
 config OPENVSWITCH_GRE
        tristate "Open vSwitch GRE tunneling support"
        depends on OPENVSWITCH
index 5b5913b06f540e5513e5a5618a37597893aa183e..60f809085b920bfa95696542c59bf13b4d6a9510 100644 (file)
@@ -15,7 +15,9 @@ openvswitch-y := \
        vport-internal_dev.o \
        vport-netdev.o
 
-openvswitch-$(CONFIG_OPENVSWITCH_CONNTRACK) += conntrack.o
+ifneq ($(CONFIG_NF_CONNTRACK),)
+openvswitch-y += conntrack.o
+endif
 
 obj-$(CONFIG_OPENVSWITCH_VXLAN)+= vport-vxlan.o
 obj-$(CONFIG_OPENVSWITCH_GENEVE)+= vport-geneve.o
index 3cb30667a7dcb83d0009a0f870f31e8dd72fccb7..43f5dd7a55774414aeb7aad8c0560db3e0596035 100644 (file)
@@ -19,7 +19,7 @@
 struct ovs_conntrack_info;
 enum ovs_key_attr;
 
-#if defined(CONFIG_OPENVSWITCH_CONNTRACK)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 void ovs_ct_init(struct net *);
 void ovs_ct_exit(struct net *);
 bool ovs_ct_verify(struct net *, enum ovs_key_attr attr);
@@ -82,5 +82,5 @@ static inline int ovs_ct_put_key(const struct sw_flow_key *key,
 }
 
 static inline void ovs_ct_free_action(const struct nlattr *a) { }
-#endif
+#endif /* CONFIG_NF_CONNTRACK */
 #endif /* ovs_conntrack.h */
index a50e652eb269dce22f52900754839537678ebda8..49adeef8090caea90be3e42702276314bca1d118 100644 (file)
@@ -70,7 +70,8 @@ static struct hlist_head *rds_conn_bucket(__be32 laddr, __be32 faddr)
 } while (0)
 
 /* rcu read lock must be held or the connection spinlock */
-static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
+static struct rds_connection *rds_conn_lookup(struct net *net,
+                                             struct hlist_head *head,
                                              __be32 laddr, __be32 faddr,
                                              struct rds_transport *trans)
 {
@@ -78,7 +79,7 @@ static struct rds_connection *rds_conn_lookup(struct hlist_head *head,
 
        hlist_for_each_entry_rcu(conn, head, c_hash_node) {
                if (conn->c_faddr == faddr && conn->c_laddr == laddr &&
-                               conn->c_trans == trans) {
+                   conn->c_trans == trans && net == rds_conn_net(conn)) {
                        ret = conn;
                        break;
                }
@@ -132,7 +133,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
        if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
                goto new_conn;
        rcu_read_lock();
-       conn = rds_conn_lookup(head, laddr, faddr, trans);
+       conn = rds_conn_lookup(net, head, laddr, faddr, trans);
        if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
            laddr == faddr && !is_outgoing) {
                /* This is a looped back IB connection, and we're
@@ -189,6 +190,12 @@ new_conn:
                }
        }
 
+       if (trans == NULL) {
+               kmem_cache_free(rds_conn_slab, conn);
+               conn = ERR_PTR(-ENODEV);
+               goto out;
+       }
+
        conn->c_trans = trans;
 
        ret = trans->conn_alloc(conn, gfp);
@@ -239,7 +246,7 @@ new_conn:
                if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
                        found = NULL;
                else
-                       found = rds_conn_lookup(head, laddr, faddr, trans);
+                       found = rds_conn_lookup(net, head, laddr, faddr, trans);
                if (found) {
                        trans->conn_free(conn->c_transport_data);
                        kmem_cache_free(rds_conn_slab, conn);
index f12149a29cb19b1b508b30528dc3bfd032799622..b41e9ea2ffff461847465d621a3284480f433cc5 100644 (file)
@@ -341,7 +341,15 @@ static void __rfkill_switch_all(const enum rfkill_type type, bool blocked)
 {
        struct rfkill *rfkill;
 
-       rfkill_global_states[type].cur = blocked;
+       if (type == RFKILL_TYPE_ALL) {
+               int i;
+
+               for (i = 0; i < NUM_RFKILL_TYPES; i++)
+                       rfkill_global_states[i].cur = blocked;
+       } else {
+               rfkill_global_states[type].cur = blocked;
+       }
+
        list_for_each_entry(rfkill, &rfkill_list, node) {
                if (rfkill->type != type && type != RFKILL_TYPE_ALL)
                        continue;
index 4345790ad3266c353eeac5398593c2a9ce4effda..b7143337e4fa025fdb473732fdc064503e731dd4 100644 (file)
@@ -506,14 +506,22 @@ static void sctp_v4_get_dst(struct sctp_transport *t, union sctp_addr *saddr,
                if (IS_ERR(rt))
                        continue;
 
+               if (!dst)
+                       dst = &rt->dst;
+
                /* Ensure the src address belongs to the output
                 * interface.
                 */
                odev = __ip_dev_find(sock_net(sk), laddr->a.v4.sin_addr.s_addr,
                                     false);
-               if (!odev || odev->ifindex != fl4->flowi4_oif)
+               if (!odev || odev->ifindex != fl4->flowi4_oif) {
+                       if (&rt->dst != dst)
+                               dst_release(&rt->dst);
                        continue;
+               }
 
+               if (dst != &rt->dst)
+                       dst_release(dst);
                dst = &rt->dst;
                break;
        }
index 16c1c43980a12dbe11832ad61777333446b69eea..fda38f830a10869713177220f8d2066c6076da0c 100644 (file)
@@ -853,12 +853,8 @@ int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
                .cb = cb,
                .idx = idx,
        };
-       int err;
-
-       err = switchdev_port_obj_dump(dev, &dump.obj);
-       if (err)
-               return err;
 
+       switchdev_port_obj_dump(dev, &dump.obj);
        return dump.idx;
 }
 EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
index 8b010c976b2f7c8eba5f6fe1cadb516b4e0f7269..41042de3ae9bcfad4504e0bcbb29d3bea4512bb5 100644 (file)
@@ -169,6 +169,30 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
        }
 }
 
+/**
+ * bclink_prepare_wakeup - prepare users for wakeup after congestion
+ * @bcl: broadcast link
+ * @resultq: queue for users which can be woken up
+ * Move a number of waiting users, as permitted by available space in
+ * the send queue, from link wait queue to specified queue for wakeup
+ */
+static void bclink_prepare_wakeup(struct tipc_link *bcl, struct sk_buff_head *resultq)
+{
+       int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
+       int imp, lim;
+       struct sk_buff *skb, *tmp;
+
+       skb_queue_walk_safe(&bcl->wakeupq, skb, tmp) {
+               imp = TIPC_SKB_CB(skb)->chain_imp;
+               lim = bcl->window + bcl->backlog[imp].limit;
+               pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
+               if ((pnd[imp] + bcl->backlog[imp].len) >= lim)
+                       continue;
+               skb_unlink(skb, &bcl->wakeupq);
+               skb_queue_tail(resultq, skb);
+       }
+}
+
 /**
  * tipc_bclink_wakeup_users - wake up pending users
  *
@@ -177,8 +201,12 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
 void tipc_bclink_wakeup_users(struct net *net)
 {
        struct tipc_net *tn = net_generic(net, tipc_net_id);
+       struct tipc_link *bcl = tn->bcl;
+       struct sk_buff_head resultq;
 
-       tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
+       skb_queue_head_init(&resultq);
+       bclink_prepare_wakeup(bcl, &resultq);
+       tipc_sk_rcv(net, &resultq);
 }
 
 /**
index b144485946f2e5ce2ec6411cd52462fa278b185e..2510b231451ec8c7e0f0f33d523739dce3387ca3 100644 (file)
@@ -2625,7 +2625,7 @@ static void restore_regulatory_settings(bool reset_user)
         * settings, user regulatory settings takes precedence.
         */
        if (is_an_alpha2(alpha2))
-               regulatory_hint_user(user_alpha2, NL80211_USER_REG_HINT_USER);
+               regulatory_hint_user(alpha2, NL80211_USER_REG_HINT_USER);
 
        spin_lock(&reg_requests_lock);
        list_splice_tail_init(&tmp_reg_req_list, &reg_requests_list);
index 98c95f2fcba4a63912fb81fbafd3854b08835e00..76e38d231e9959d085673b4eda7e4065f4a7fbb7 100644 (file)
@@ -64,10 +64,10 @@ static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu)
        int ret;
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 
-       timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK;
-       ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
-                                 timer->irq->irq,
-                                 timer->irq->level);
+       kvm_vgic_set_phys_irq_active(timer->map, true);
+       ret = kvm_vgic_inject_mapped_irq(vcpu->kvm, vcpu->vcpu_id,
+                                        timer->map,
+                                        timer->irq->level);
        WARN_ON(ret);
 }
 
@@ -117,7 +117,8 @@ bool kvm_timer_should_fire(struct kvm_vcpu *vcpu)
        cycle_t cval, now;
 
        if ((timer->cntv_ctl & ARCH_TIMER_CTRL_IT_MASK) ||
-               !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE))
+           !(timer->cntv_ctl & ARCH_TIMER_CTRL_ENABLE) ||
+           kvm_vgic_get_phys_irq_active(timer->map))
                return false;
 
        cval = timer->cntv_cval;
@@ -184,10 +185,11 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu)
        timer_arm(timer, ns);
 }
 
-void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
-                         const struct kvm_irq_level *irq)
+int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
+                        const struct kvm_irq_level *irq)
 {
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       struct irq_phys_map *map;
 
        /*
         * The vcpu timer irq number cannot be determined in
@@ -196,6 +198,17 @@ void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
         * vcpu timer irq number when the vcpu is reset.
         */
        timer->irq = irq;
+
+       /*
+        * Tell the VGIC that the virtual interrupt is tied to a
+        * physical interrupt. We do that once per VCPU.
+        */
+       map = kvm_vgic_map_phys_irq(vcpu, irq->irq, host_vtimer_irq);
+       if (WARN_ON(IS_ERR(map)))
+               return PTR_ERR(map);
+
+       timer->map = map;
+       return 0;
 }
 
 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
@@ -335,6 +348,8 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
        struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
 
        timer_disarm(timer);
+       if (timer->map)
+               kvm_vgic_unmap_phys_irq(vcpu, timer->map);
 }
 
 void kvm_timer_enable(struct kvm *kvm)
index f9b9c7c5137214cb56bcc0cf7455d2d712dc6848..8d7b04db8471877fc2d28df790c6316ab51f6399 100644 (file)
@@ -48,6 +48,10 @@ static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
                lr_desc.state |= LR_STATE_ACTIVE;
        if (val & GICH_LR_EOI)
                lr_desc.state |= LR_EOI_INT;
+       if (val & GICH_LR_HW) {
+               lr_desc.state |= LR_HW;
+               lr_desc.hwirq = (val & GICH_LR_PHYSID_CPUID) >> GICH_LR_PHYSID_CPUID_SHIFT;
+       }
 
        return lr_desc;
 }
@@ -55,7 +59,9 @@ static struct vgic_lr vgic_v2_get_lr(const struct kvm_vcpu *vcpu, int lr)
 static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
                           struct vgic_lr lr_desc)
 {
-       u32 lr_val = (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT) | lr_desc.irq;
+       u32 lr_val;
+
+       lr_val = lr_desc.irq;
 
        if (lr_desc.state & LR_STATE_PENDING)
                lr_val |= GICH_LR_PENDING_BIT;
@@ -64,6 +70,14 @@ static void vgic_v2_set_lr(struct kvm_vcpu *vcpu, int lr,
        if (lr_desc.state & LR_EOI_INT)
                lr_val |= GICH_LR_EOI;
 
+       if (lr_desc.state & LR_HW) {
+               lr_val |= GICH_LR_HW;
+               lr_val |= (u32)lr_desc.hwirq << GICH_LR_PHYSID_CPUID_SHIFT;
+       }
+
+       if (lr_desc.irq < VGIC_NR_SGIS)
+               lr_val |= (lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT);
+
        vcpu->arch.vgic_cpu.vgic_v2.vgic_lr[lr] = lr_val;
 }
 
index dff06021e74855a2d6cb9b8830fd30818a63c927..afbf925b00f4ff079ea28925174e1998e54c44d1 100644 (file)
@@ -67,6 +67,10 @@ static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
                lr_desc.state |= LR_STATE_ACTIVE;
        if (val & ICH_LR_EOI)
                lr_desc.state |= LR_EOI_INT;
+       if (val & ICH_LR_HW) {
+               lr_desc.state |= LR_HW;
+               lr_desc.hwirq = (val >> ICH_LR_PHYS_ID_SHIFT) & GENMASK(9, 0);
+       }
 
        return lr_desc;
 }
@@ -84,10 +88,17 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
         * Eventually we want to make this configurable, so we may revisit
         * this in the future.
         */
-       if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
+       switch (vcpu->kvm->arch.vgic.vgic_model) {
+       case KVM_DEV_TYPE_ARM_VGIC_V3:
                lr_val |= ICH_LR_GROUP;
-       else
-               lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
+               break;
+       case  KVM_DEV_TYPE_ARM_VGIC_V2:
+               if (lr_desc.irq < VGIC_NR_SGIS)
+                       lr_val |= (u32)lr_desc.source << GICH_LR_PHYSID_CPUID_SHIFT;
+               break;
+       default:
+               BUG();
+       }
 
        if (lr_desc.state & LR_STATE_PENDING)
                lr_val |= ICH_LR_PENDING_BIT;
@@ -95,6 +106,10 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
                lr_val |= ICH_LR_ACTIVE_BIT;
        if (lr_desc.state & LR_EOI_INT)
                lr_val |= ICH_LR_EOI;
+       if (lr_desc.state & LR_HW) {
+               lr_val |= ICH_LR_HW;
+               lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT;
+       }
 
        vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
 }
index bc40137a022d51e451913f3011639e196ddd135b..9eb489a2c94c2b5ef07146a01ec9ac943034065c 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/of_irq.h>
+#include <linux/rculist.h>
 #include <linux/uaccess.h>
 
 #include <asm/kvm_emulate.h>
  *   cause the interrupt to become inactive in such a situation.
  *   Conversely, writes to GICD_ICPENDRn do not cause the interrupt to become
  *   inactive as long as the external input line is held high.
+ *
+ *
+ * Initialization rules: there are multiple stages to the vgic
+ * initialization, both for the distributor and the CPU interfaces.
+ *
+ * Distributor:
+ *
+ * - kvm_vgic_early_init(): initialization of static data that doesn't
+ *   depend on any sizing information or emulation type. No allocation
+ *   is allowed there.
+ *
+ * - vgic_init(): allocation and initialization of the generic data
+ *   structures that depend on sizing information (number of CPUs,
+ *   number of interrupts). Also initializes the vcpu specific data
+ *   structures. Can be executed lazily for GICv2.
+ *   [to be renamed to kvm_vgic_init??]
+ *
+ * CPU Interface:
+ *
+ * - kvm_vgic_cpu_early_init(): initialization of static data that
+ *   doesn't depend on any sizing information or emulation type. No
+ *   allocation is allowed there.
  */
 
 #include "vgic.h"
@@ -82,6 +105,8 @@ static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu);
 static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu);
 static struct vgic_lr vgic_get_lr(const struct kvm_vcpu *vcpu, int lr);
 static void vgic_set_lr(struct kvm_vcpu *vcpu, int lr, struct vgic_lr lr_desc);
+static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
+                                               int virt_irq);
 
 static const struct vgic_ops *vgic_ops;
 static const struct vgic_params *vgic;
@@ -375,7 +400,7 @@ void vgic_cpu_irq_clear(struct kvm_vcpu *vcpu, int irq)
 
 static bool vgic_can_sample_irq(struct kvm_vcpu *vcpu, int irq)
 {
-       return vgic_irq_is_edge(vcpu, irq) || !vgic_irq_is_queued(vcpu, irq);
+       return !vgic_irq_is_queued(vcpu, irq);
 }
 
 /**
@@ -1115,6 +1140,39 @@ static void vgic_queue_irq_to_lr(struct kvm_vcpu *vcpu, int irq,
        if (!vgic_irq_is_edge(vcpu, irq))
                vlr.state |= LR_EOI_INT;
 
+       if (vlr.irq >= VGIC_NR_SGIS) {
+               struct irq_phys_map *map;
+               map = vgic_irq_map_search(vcpu, irq);
+
+               /*
+                * If we have a mapping, and the virtual interrupt is
+                * being injected, then we must set the state to
+                * active in the physical world. Otherwise the
+                * physical interrupt will fire and the guest will
+                * exit before processing the virtual interrupt.
+                */
+               if (map) {
+                       int ret;
+
+                       BUG_ON(!map->active);
+                       vlr.hwirq = map->phys_irq;
+                       vlr.state |= LR_HW;
+                       vlr.state &= ~LR_EOI_INT;
+
+                       ret = irq_set_irqchip_state(map->irq,
+                                                   IRQCHIP_STATE_ACTIVE,
+                                                   true);
+                       WARN_ON(ret);
+
+                       /*
+                        * Make sure we're not going to sample this
+                        * again, as a HW-backed interrupt cannot be
+                        * in the PENDING_ACTIVE stage.
+                        */
+                       vgic_irq_set_queued(vcpu, irq);
+               }
+       }
+
        vgic_set_lr(vcpu, lr_nr, vlr);
        vgic_sync_lr_elrsr(vcpu, lr_nr, vlr);
 }
@@ -1339,6 +1397,39 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
        return level_pending;
 }
 
+/*
+ * Save the physical active state, and reset it to inactive.
+ *
+ * Return 1 if HW interrupt went from active to inactive, and 0 otherwise.
+ */
+static int vgic_sync_hwirq(struct kvm_vcpu *vcpu, struct vgic_lr vlr)
+{
+       struct irq_phys_map *map;
+       int ret;
+
+       if (!(vlr.state & LR_HW))
+               return 0;
+
+       map = vgic_irq_map_search(vcpu, vlr.irq);
+       BUG_ON(!map || !map->active);
+
+       ret = irq_get_irqchip_state(map->irq,
+                                   IRQCHIP_STATE_ACTIVE,
+                                   &map->active);
+
+       WARN_ON(ret);
+
+       if (map->active) {
+               ret = irq_set_irqchip_state(map->irq,
+                                           IRQCHIP_STATE_ACTIVE,
+                                           false);
+               WARN_ON(ret);
+               return 0;
+       }
+
+       return 1;
+}
+
 /* Sync back the VGIC state after a guest run */
 static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
 {
@@ -1353,14 +1444,31 @@ static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
        elrsr = vgic_get_elrsr(vcpu);
        elrsr_ptr = u64_to_bitmask(&elrsr);
 
-       /* Clear mappings for empty LRs */
-       for_each_set_bit(lr, elrsr_ptr, vgic->nr_lr) {
+       /* Deal with HW interrupts, and clear mappings for empty LRs */
+       for (lr = 0; lr < vgic->nr_lr; lr++) {
                struct vgic_lr vlr;
 
-               if (!test_and_clear_bit(lr, vgic_cpu->lr_used))
+               if (!test_bit(lr, vgic_cpu->lr_used))
                        continue;
 
                vlr = vgic_get_lr(vcpu, lr);
+               if (vgic_sync_hwirq(vcpu, vlr)) {
+                       /*
+                        * So this is a HW interrupt that the guest
+                        * EOI-ed. Clean the LR state and allow the
+                        * interrupt to be sampled again.
+                        */
+                       vlr.state = 0;
+                       vlr.hwirq = 0;
+                       vgic_set_lr(vcpu, lr, vlr);
+                       vgic_irq_clear_queued(vcpu, vlr.irq);
+                       set_bit(lr, elrsr_ptr);
+               }
+
+               if (!test_bit(lr, elrsr_ptr))
+                       continue;
+
+               clear_bit(lr, vgic_cpu->lr_used);
 
                BUG_ON(vlr.irq >= dist->nr_irqs);
                vgic_cpu->vgic_irq_lr_map[vlr.irq] = LR_EMPTY;
@@ -1447,7 +1555,8 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level)
 }
 
 static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
-                                 unsigned int irq_num, bool level)
+                                  struct irq_phys_map *map,
+                                  unsigned int irq_num, bool level)
 {
        struct vgic_dist *dist = &kvm->arch.vgic;
        struct kvm_vcpu *vcpu;
@@ -1455,6 +1564,9 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
        int enabled;
        bool ret = true, can_inject = true;
 
+       if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
+               return -EINVAL;
+
        spin_lock(&dist->lock);
 
        vcpu = kvm_get_vcpu(kvm, cpuid);
@@ -1517,18 +1629,46 @@ static int vgic_update_irq_pending(struct kvm *kvm, int cpuid,
 out:
        spin_unlock(&dist->lock);
 
-       return ret ? cpuid : -EINVAL;
+       if (ret) {
+               /* kick the specified vcpu */
+               kvm_vcpu_kick(kvm_get_vcpu(kvm, cpuid));
+       }
+
+       return 0;
+}
+
+static int vgic_lazy_init(struct kvm *kvm)
+{
+       int ret = 0;
+
+       if (unlikely(!vgic_initialized(kvm))) {
+               /*
+                * We only provide the automatic initialization of the VGIC
+                * for the legacy case of a GICv2. Any other type must
+                * be explicitly initialized once setup with the respective
+                * KVM device call.
+                */
+               if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2)
+                       return -EBUSY;
+
+               mutex_lock(&kvm->lock);
+               ret = vgic_init(kvm);
+               mutex_unlock(&kvm->lock);
+       }
+
+       return ret;
 }
 
 /**
  * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
  * @kvm:     The VM structure pointer
  * @cpuid:   The CPU for PPIs
- * @irq_num: The IRQ number that is assigned to the device
+ * @irq_num: The IRQ number that is assigned to the device. This IRQ
+ *           must not be mapped to a HW interrupt.
  * @level:   Edge-triggered:  true:  to trigger the interrupt
  *                           false: to ignore the call
- *          Level-sensitive  true:  activates an interrupt
- *                           false: deactivates an interrupt
+ *          Level-sensitive  true:  raise the input signal
+ *                           false: lower the input signal
  *
  * The GIC is not concerned with devices being active-LOW or active-HIGH for
  * level-sensitive interrupts.  You can think of the level parameter as 1
@@ -1537,39 +1677,44 @@ out:
 int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
                        bool level)
 {
-       int ret = 0;
-       int vcpu_id;
-
-       if (unlikely(!vgic_initialized(kvm))) {
-               /*
-                * We only provide the automatic initialization of the VGIC
-                * for the legacy case of a GICv2. Any other type must
-                * be explicitly initialized once setup with the respective
-                * KVM device call.
-                */
-               if (kvm->arch.vgic.vgic_model != KVM_DEV_TYPE_ARM_VGIC_V2) {
-                       ret = -EBUSY;
-                       goto out;
-               }
-               mutex_lock(&kvm->lock);
-               ret = vgic_init(kvm);
-               mutex_unlock(&kvm->lock);
+       struct irq_phys_map *map;
+       int ret;
 
-               if (ret)
-                       goto out;
-       }
+       ret = vgic_lazy_init(kvm);
+       if (ret)
+               return ret;
 
-       if (irq_num >= min(kvm->arch.vgic.nr_irqs, 1020))
+       map = vgic_irq_map_search(kvm_get_vcpu(kvm, cpuid), irq_num);
+       if (map)
                return -EINVAL;
 
-       vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level);
-       if (vcpu_id >= 0) {
-               /* kick the specified vcpu */
-               kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id));
-       }
+       return vgic_update_irq_pending(kvm, cpuid, NULL, irq_num, level);
+}
 
-out:
-       return ret;
+/**
+ * kvm_vgic_inject_mapped_irq - Inject a physically mapped IRQ to the vgic
+ * @kvm:     The VM structure pointer
+ * @cpuid:   The CPU for PPIs
+ * @map:     Pointer to a irq_phys_map structure describing the mapping
+ * @level:   Edge-triggered:  true:  to trigger the interrupt
+ *                           false: to ignore the call
+ *          Level-sensitive  true:  raise the input signal
+ *                           false: lower the input signal
+ *
+ * The GIC is not concerned with devices being active-LOW or active-HIGH for
+ * level-sensitive interrupts.  You can think of the level parameter as 1
+ * being HIGH and 0 being LOW and all devices being active-HIGH.
+ */
+int kvm_vgic_inject_mapped_irq(struct kvm *kvm, int cpuid,
+                              struct irq_phys_map *map, bool level)
+{
+       int ret;
+
+       ret = vgic_lazy_init(kvm);
+       if (ret)
+               return ret;
+
+       return vgic_update_irq_pending(kvm, cpuid, map, map->virt_irq, level);
 }
 
 static irqreturn_t vgic_maintenance_handler(int irq, void *data)
@@ -1583,6 +1728,188 @@ static irqreturn_t vgic_maintenance_handler(int irq, void *data)
        return IRQ_HANDLED;
 }
 
+static struct list_head *vgic_get_irq_phys_map_list(struct kvm_vcpu *vcpu,
+                                                   int virt_irq)
+{
+       if (virt_irq < VGIC_NR_PRIVATE_IRQS)
+               return &vcpu->arch.vgic_cpu.irq_phys_map_list;
+       else
+               return &vcpu->kvm->arch.vgic.irq_phys_map_list;
+}
+
+/**
+ * kvm_vgic_map_phys_irq - map a virtual IRQ to a physical IRQ
+ * @vcpu: The VCPU pointer
+ * @virt_irq: The virtual irq number
+ * @irq: The Linux IRQ number
+ *
+ * Establish a mapping between a guest visible irq (@virt_irq) and a
+ * Linux irq (@irq). On injection, @virt_irq will be associated with
+ * the physical interrupt represented by @irq. This mapping can be
+ * established multiple times as long as the parameters are the same.
+ *
+ * Returns a valid pointer on success, and an error pointer otherwise
+ */
+struct irq_phys_map *kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu,
+                                          int virt_irq, int irq)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
+       struct irq_phys_map *map;
+       struct irq_phys_map_entry *entry;
+       struct irq_desc *desc;
+       struct irq_data *data;
+       int phys_irq;
+
+       desc = irq_to_desc(irq);
+       if (!desc) {
+               kvm_err("%s: no interrupt descriptor\n", __func__);
+               return ERR_PTR(-EINVAL);
+       }
+
+       data = irq_desc_get_irq_data(desc);
+       while (data->parent_data)
+               data = data->parent_data;
+
+       phys_irq = data->hwirq;
+
+       /* Create a new mapping */
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return ERR_PTR(-ENOMEM);
+
+       spin_lock(&dist->irq_phys_map_lock);
+
+       /* Try to match an existing mapping */
+       map = vgic_irq_map_search(vcpu, virt_irq);
+       if (map) {
+               /* Make sure this mapping matches */
+               if (map->phys_irq != phys_irq   ||
+                   map->irq      != irq)
+                       map = ERR_PTR(-EINVAL);
+
+               /* Found an existing, valid mapping */
+               goto out;
+       }
+
+       map           = &entry->map;
+       map->virt_irq = virt_irq;
+       map->phys_irq = phys_irq;
+       map->irq      = irq;
+
+       list_add_tail_rcu(&entry->entry, root);
+
+out:
+       spin_unlock(&dist->irq_phys_map_lock);
+       /* If we've found a hit in the existing list, free the useless
+        * entry */
+       if (IS_ERR(map) || map != &entry->map)
+               kfree(entry);
+       return map;
+}
+
+static struct irq_phys_map *vgic_irq_map_search(struct kvm_vcpu *vcpu,
+                                               int virt_irq)
+{
+       struct list_head *root = vgic_get_irq_phys_map_list(vcpu, virt_irq);
+       struct irq_phys_map_entry *entry;
+       struct irq_phys_map *map;
+
+       rcu_read_lock();
+
+       list_for_each_entry_rcu(entry, root, entry) {
+               map = &entry->map;
+               if (map->virt_irq == virt_irq) {
+                       rcu_read_unlock();
+                       return map;
+               }
+       }
+
+       rcu_read_unlock();
+
+       return NULL;
+}
+
+static void vgic_free_phys_irq_map_rcu(struct rcu_head *rcu)
+{
+       struct irq_phys_map_entry *entry;
+
+       entry = container_of(rcu, struct irq_phys_map_entry, rcu);
+       kfree(entry);
+}
+
+/**
+ * kvm_vgic_get_phys_irq_active - Return the active state of a mapped IRQ
+ *
+ * Return the logical active state of a mapped interrupt. This doesn't
+ * necessarily reflects the current HW state.
+ */
+bool kvm_vgic_get_phys_irq_active(struct irq_phys_map *map)
+{
+       BUG_ON(!map);
+       return map->active;
+}
+
+/**
+ * kvm_vgic_set_phys_irq_active - Set the active state of a mapped IRQ
+ *
+ * Set the logical active state of a mapped interrupt. This doesn't
+ * immediately affects the HW state.
+ */
+void kvm_vgic_set_phys_irq_active(struct irq_phys_map *map, bool active)
+{
+       BUG_ON(!map);
+       map->active = active;
+}
+
+/**
+ * kvm_vgic_unmap_phys_irq - Remove a virtual to physical IRQ mapping
+ * @vcpu: The VCPU pointer
+ * @map: The pointer to a mapping obtained through kvm_vgic_map_phys_irq
+ *
+ * Remove an existing mapping between virtual and physical interrupts.
+ */
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, struct irq_phys_map *map)
+{
+       struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
+       struct irq_phys_map_entry *entry;
+       struct list_head *root;
+
+       if (!map)
+               return -EINVAL;
+
+       root = vgic_get_irq_phys_map_list(vcpu, map->virt_irq);
+
+       spin_lock(&dist->irq_phys_map_lock);
+
+       list_for_each_entry(entry, root, entry) {
+               if (&entry->map == map) {
+                       list_del_rcu(&entry->entry);
+                       call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
+                       break;
+               }
+       }
+
+       spin_unlock(&dist->irq_phys_map_lock);
+
+       return 0;
+}
+
+static void vgic_destroy_irq_phys_map(struct kvm *kvm, struct list_head *root)
+{
+       struct vgic_dist *dist = &kvm->arch.vgic;
+       struct irq_phys_map_entry *entry;
+
+       spin_lock(&dist->irq_phys_map_lock);
+
+       list_for_each_entry(entry, root, entry) {
+               list_del_rcu(&entry->entry);
+               call_rcu(&entry->rcu, vgic_free_phys_irq_map_rcu);
+       }
+
+       spin_unlock(&dist->irq_phys_map_lock);
+}
+
 void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
        struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1591,6 +1918,7 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
        kfree(vgic_cpu->active_shared);
        kfree(vgic_cpu->pend_act_shared);
        kfree(vgic_cpu->vgic_irq_lr_map);
+       vgic_destroy_irq_phys_map(vcpu->kvm, &vgic_cpu->irq_phys_map_list);
        vgic_cpu->pending_shared = NULL;
        vgic_cpu->active_shared = NULL;
        vgic_cpu->pend_act_shared = NULL;
@@ -1627,6 +1955,17 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
        return 0;
 }
 
+/**
+ * kvm_vgic_vcpu_early_init - Earliest possible per-vcpu vgic init stage
+ *
+ * No memory allocation should be performed here, only static init.
+ */
+void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu)
+{
+       struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+       INIT_LIST_HEAD(&vgic_cpu->irq_phys_map_list);
+}
+
 /**
  * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
  *
@@ -1664,6 +2003,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
        kfree(dist->irq_spi_target);
        kfree(dist->irq_pending_on_cpu);
        kfree(dist->irq_active_on_cpu);
+       vgic_destroy_irq_phys_map(kvm, &dist->irq_phys_map_list);
        dist->irq_sgi_sources = NULL;
        dist->irq_spi_cpu = NULL;
        dist->irq_spi_target = NULL;
@@ -1787,6 +2127,18 @@ static int init_vgic_model(struct kvm *kvm, int type)
        return 0;
 }
 
+/**
+ * kvm_vgic_early_init - Earliest possible vgic initialization stage
+ *
+ * No memory allocation should be performed here, only static init.
+ */
+void kvm_vgic_early_init(struct kvm *kvm)
+{
+       spin_lock_init(&kvm->arch.vgic.lock);
+       spin_lock_init(&kvm->arch.vgic.irq_phys_map_lock);
+       INIT_LIST_HEAD(&kvm->arch.vgic.irq_phys_map_list);
+}
+
 int kvm_vgic_create(struct kvm *kvm, u32 type)
 {
        int i, vcpu_lock_idx = -1, ret;
@@ -1832,7 +2184,6 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
        if (ret)
                goto out_unlock;
 
-       spin_lock_init(&kvm->arch.vgic.lock);
        kvm->arch.vgic.in_kernel = true;
        kvm->arch.vgic.vgic_model = type;
        kvm->arch.vgic.vctrl_base = vgic->vctrl_base;
index 21c14244f4c4fd1c3c8ffade7f3265df91c89efb..d7ea8e20dae4ee9b353441e5e342a6279d1485d6 100644 (file)
@@ -213,11 +213,15 @@ int kvm_set_irq_routing(struct kvm *kvm,
                        goto out;
 
                r = -EINVAL;
-               if (ue->flags)
+               if (ue->flags) {
+                       kfree(e);
                        goto out;
+               }
                r = setup_routing_entry(new, e, ue);
-               if (r)
+               if (r) {
+                       kfree(e);
                        goto out;
+               }
                ++ue;
        }
 
index 268fc0a5a9324f597214d41572952b52aeb76299..a25a73147f714458dd6c55fe7426649f9dd5baa2 100644 (file)
 MODULE_AUTHOR("Qumranet");
 MODULE_LICENSE("GPL");
 
-static unsigned int halt_poll_ns;
+/* halt polling only reduces halt latency by 5-7 us, 500us is enough */
+static unsigned int halt_poll_ns = 500000;
 module_param(halt_poll_ns, uint, S_IRUGO | S_IWUSR);
 
+/* Default doubles per-vcpu halt_poll_ns. */
+static unsigned int halt_poll_ns_grow = 2;
+module_param(halt_poll_ns_grow, int, S_IRUGO);
+
+/* Default resets per-vcpu halt_poll_ns . */
+static unsigned int halt_poll_ns_shrink;
+module_param(halt_poll_ns_shrink, int, S_IRUGO);
+
 /*
  * Ordering of locks:
  *
@@ -217,6 +226,7 @@ int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
        vcpu->kvm = kvm;
        vcpu->vcpu_id = id;
        vcpu->pid = NULL;
+       vcpu->halt_poll_ns = 0;
        init_waitqueue_head(&vcpu->wq);
        kvm_async_pf_vcpu_init(vcpu);
 
@@ -1937,6 +1947,35 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
 
+static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+       int old, val;
+
+       old = val = vcpu->halt_poll_ns;
+       /* 10us base */
+       if (val == 0 && halt_poll_ns_grow)
+               val = 10000;
+       else
+               val *= halt_poll_ns_grow;
+
+       vcpu->halt_poll_ns = val;
+       trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
+}
+
+static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
+{
+       int old, val;
+
+       old = val = vcpu->halt_poll_ns;
+       if (halt_poll_ns_shrink == 0)
+               val = 0;
+       else
+               val /= halt_poll_ns_shrink;
+
+       vcpu->halt_poll_ns = val;
+       trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
+}
+
 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
 {
        if (kvm_arch_vcpu_runnable(vcpu)) {
@@ -1959,10 +1998,11 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
        ktime_t start, cur;
        DEFINE_WAIT(wait);
        bool waited = false;
+       u64 block_ns;
 
        start = cur = ktime_get();
-       if (halt_poll_ns) {
-               ktime_t stop = ktime_add_ns(ktime_get(), halt_poll_ns);
+       if (vcpu->halt_poll_ns) {
+               ktime_t stop = ktime_add_ns(ktime_get(), vcpu->halt_poll_ns);
 
                do {
                        /*
@@ -1991,7 +2031,21 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
        cur = ktime_get();
 
 out:
-       trace_kvm_vcpu_wakeup(ktime_to_ns(cur) - ktime_to_ns(start), waited);
+       block_ns = ktime_to_ns(cur) - ktime_to_ns(start);
+
+       if (halt_poll_ns) {
+               if (block_ns <= vcpu->halt_poll_ns)
+                       ;
+               /* we had a long block, shrink polling */
+               else if (vcpu->halt_poll_ns && block_ns > halt_poll_ns)
+                       shrink_halt_poll_ns(vcpu);
+               /* we had a short halt and our poll time is too small */
+               else if (vcpu->halt_poll_ns < halt_poll_ns &&
+                       block_ns < halt_poll_ns)
+                       grow_halt_poll_ns(vcpu);
+       }
+
+       trace_kvm_vcpu_wakeup(block_ns, waited);
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_block);