Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
authorAmit Pundir <amit.pundir@linaro.org>
Tue, 15 Nov 2016 12:46:35 +0000 (18:16 +0530)
committerAmit Pundir <amit.pundir@linaro.org>
Tue, 15 Nov 2016 13:03:34 +0000 (18:33 +0530)
Conflicts:
* arch/arm64/include/asm/assembler.h
    Pick changes from AOSP Change-Id: I450594dc311b09b6b832b707a9abb357608cc6e4
    ("UPSTREAM: arm64: include alternative handling in dcache_by_line_op").

* drivers/android/binder.c
    Pick changes from LTS commit 14f09e8e7cd8 ("ANDROID: binder: Add strong ref checks"),
    instead of AOSP Change-Id: I66c15b066808f28bd27bfe50fd0e03ff45a09fca
    ("ANDROID: binder: Add strong ref checks").

* drivers/usb/gadget/function/u_ether.c
    Refactor throttling of highspeed IRQ logic in AOSP by adding
    a check for last queue request as intended by LTS commit
    660c04e8f174 ("usb: gadget: function: u_ether: don't starve tx request queue").
    Fixes AOSP Change-Id: I26515bfd9bbc8f7af38be7835692143f7093118a
    ("USB: gadget: u_ether: Fix data stall issue in RNDIS tethering mode").

Signed-off-by: Amit Pundir <amit.pundir@linaro.org>
182 files changed:
Makefile
arch/arm/Kconfig
arch/arm/include/asm/floppy.h
arch/arm/include/asm/kvm_arm.h
arch/arm/include/asm/kvm_host.h
arch/arm/include/asm/kvm_mmu.h
arch/arm/include/asm/virt.h
arch/arm/kernel/Makefile
arch/arm/kernel/armksyms.c
arch/arm/kernel/psci-call.S [deleted file]
arch/arm/kernel/smccc-call.S [new file with mode: 0644]
arch/arm/kvm/arm.c
arch/arm/kvm/emulate.c
arch/arm/kvm/mmu.c
arch/arm64/Kconfig
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/kvm_arm.h
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/kvm_mmio.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/page.h
arch/arm64/include/asm/suspend.h
arch/arm64/include/asm/sysreg.h
arch/arm64/include/asm/virt.h
arch/arm64/kernel/Makefile
arch/arm64/kernel/arm64ksyms.c
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/head.S
arch/arm64/kernel/hibernate-asm.S [new file with mode: 0644]
arch/arm64/kernel/hibernate.c [new file with mode: 0644]
arch/arm64/kernel/psci-call.S [deleted file]
arch/arm64/kernel/psci.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/sleep.S
arch/arm64/kernel/smccc-call.S [new file with mode: 0644]
arch/arm64/kernel/suspend.c
arch/arm64/kernel/vmlinux.lds.S
arch/arm64/kvm/Makefile
arch/arm64/kvm/guest.c
arch/arm64/kvm/handle_exit.c
arch/arm64/kvm/hyp-init.S
arch/arm64/kvm/hyp.S
arch/arm64/kvm/hyp/Makefile [new file with mode: 0644]
arch/arm64/kvm/hyp/debug-sr.c [new file with mode: 0644]
arch/arm64/kvm/hyp/entry.S [new file with mode: 0644]
arch/arm64/kvm/hyp/fpsimd.S [new file with mode: 0644]
arch/arm64/kvm/hyp/hyp-entry.S [new file with mode: 0644]
arch/arm64/kvm/hyp/hyp.h [new file with mode: 0644]
arch/arm64/kvm/hyp/switch.c [new file with mode: 0644]
arch/arm64/kvm/hyp/sysreg-sr.c [new file with mode: 0644]
arch/arm64/kvm/hyp/timer-sr.c [new file with mode: 0644]
arch/arm64/kvm/hyp/tlb.c [new file with mode: 0644]
arch/arm64/kvm/hyp/vgic-v2-sr.c [new file with mode: 0644]
arch/arm64/kvm/hyp/vgic-v3-sr.c [new file with mode: 0644]
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/vgic-v2-switch.S [deleted file]
arch/arm64/kvm/vgic-v3-switch.S [deleted file]
arch/arm64/mm/proc.S
arch/h8300/include/asm/thread_info.h
arch/h8300/kernel/signal.c
arch/mips/kvm/emulate.c
arch/parisc/kernel/syscall.S
arch/powerpc/kernel/ptrace.c
arch/x86/include/asm/uaccess.h
arch/x86/kvm/emulate.c
arch/x86/kvm/x86.c
arch/x86/xen/mmu.c
drivers/android/binder.c
drivers/char/virtio_console.c
drivers/firewire/net.c
drivers/firmware/Kconfig
drivers/firmware/psci.c
drivers/gpu/drm/amd/amdgpu/atombios_dp.c
drivers/gpu/drm/drm_dp_mst_topology.c
drivers/gpu/drm/exynos/exynos_drm_core.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/radeon_dp_auxch.c
drivers/gpu/drm/radeon/si_dpm.c
drivers/hid/hid-ids.h
drivers/hid/usbhid/hid-quirks.c
drivers/hv/hv_util.c
drivers/i2c/busses/i2c-xgene-slimpro.c
drivers/i2c/i2c-core.c
drivers/input/serio/i8042-x86ia64io.h
drivers/md/dm-raid1.c
drivers/md/dm.c
drivers/misc/genwqe/card_utils.c
drivers/misc/mei/hw-txe.c
drivers/mmc/host/dw_mmc-pltfm.c
drivers/mtd/ubi/fastmap.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/geneve.c
drivers/net/vxlan.c
drivers/of/of_reserved_mem.c
drivers/pwm/core.c
drivers/pwm/sysfs.c
drivers/scsi/arcmsr/arcmsr_hba.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/scsi_debug.c
drivers/tty/vt/vt.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/function/u_ether.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-pci.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/usb-serial.c
drivers/virtio/virtio_ring.c
fs/btrfs/tree-log.c
fs/overlayfs/copy_up.c
fs/ubifs/dir.c
fs/xfs/libxfs/xfs_dquot_buf.c
include/clocksource/arm_arch_timer.h
include/kvm/arm_vgic.h
include/linux/arm-smccc.h [new file with mode: 0644]
include/linux/mroute.h
include/linux/mroute6.h
include/linux/netdevice.h
include/linux/psci.h
include/linux/pwm.h
include/net/ip.h
include/net/sch_generic.h
include/net/sock.h
include/uapi/linux/rtnetlink.h
kernel/cgroup.c
kernel/power/main.c
kernel/power/power.h
kernel/power/swap.c
mm/cma.c
mm/list_lru.c
mm/memcontrol.c
mm/vmscan.c
net/8021q/vlan.c
net/bridge/br_multicast.c
net/core/dev.c
net/core/pktgen.c
net/ethernet/eth.c
net/ipv4/af_inet.c
net/ipv4/fou.c
net/ipv4/gre_offload.c
net/ipv4/ip_sockglue.c
net/ipv4/ipmr.c
net/ipv4/route.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp_input.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv6/addrconf.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_offload.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6mr.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/rx.c
net/netlink/af_netlink.c
net/packet/af_packet.c
net/sched/act_vlan.c
net/sched/cls_api.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
security/keys/proc.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_controller.h
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_realtek.c
sound/usb/quirks-table.h
tools/perf/Makefile.perf
virt/kvm/arm/vgic-v3.c

index 98239d56924cdf2dec04caa12b41140948e48c52..fba9b09a13300c3fc2c0db16fc28125d42980a22 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 4
-SUBLEVEL = 30
+SUBLEVEL = 32
 EXTRAVERSION =
 NAME = Blurry Fish Butt
 
@@ -617,6 +617,7 @@ include arch/$(SRCARCH)/Makefile
 
 KBUILD_CFLAGS  += $(call cc-option,-fno-delete-null-pointer-checks,)
 KBUILD_CFLAGS  += $(call cc-disable-warning,maybe-uninitialized,)
+KBUILD_CFLAGS  += $(call cc-disable-warning,frame-address,)
 
 ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
 KBUILD_CFLAGS  += -Os
index f4a214446b803effdef5447ac0b7497b65353a26..625765fb805adbbad99327017d16e894b8265096 100644 (file)
@@ -39,6 +39,7 @@ config ARM
        select HAVE_ARCH_HARDENED_USERCOPY
        select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT)
        select HAVE_ARCH_TRACEHOOK
+       select HAVE_ARM_SMCCC if CPU_V7
        select HAVE_BPF_JIT
        select HAVE_CC_STACKPROTECTOR
        select HAVE_CONTEXT_TRACKING
@@ -1432,8 +1433,7 @@ config BIG_LITTLE
 
 config BL_SWITCHER
        bool "big.LITTLE switcher support"
-       depends on BIG_LITTLE && MCPM && HOTPLUG_CPU
-       select ARM_CPU_SUSPEND
+       depends on BIG_LITTLE && MCPM && HOTPLUG_CPU && ARM_GIC
        select CPU_PM
        help
          The big.LITTLE "switcher" provides the core functionality to
@@ -1491,7 +1491,7 @@ config HOTPLUG_CPU
 
 config ARM_PSCI
        bool "Support for the ARM Power State Coordination Interface (PSCI)"
-       depends on CPU_V7
+       depends on HAVE_ARM_SMCCC
        select ARM_PSCI_FW
        help
          Say Y here if you want Linux to communicate with system firmware
@@ -2174,7 +2174,8 @@ config ARCH_SUSPEND_POSSIBLE
        def_bool y
 
 config ARM_CPU_SUSPEND
-       def_bool PM_SLEEP
+       def_bool PM_SLEEP || BL_SWITCHER || ARM_PSCI_FW
+       depends on ARCH_SUSPEND_POSSIBLE
 
 config ARCH_HIBERNATION_POSSIBLE
        bool
index f4882553fbb0125927776c0f3a032625937649c6..85a34cc8316ace1b906347fc1de1e79adbd0cb02 100644 (file)
@@ -17,7 +17,7 @@
 
 #define fd_outb(val,port)                      \
        do {                                    \
-               if ((port) == FD_DOR)           \
+               if ((port) == (u32)FD_DOR)      \
                        fd_setdor((val));       \
                else                            \
                        outb((val),(port));     \
index dc641ddf0784304434635080b5732ee215ab588f..e22089fb44dc86b7ed2fdb175bc6ec7b47ee4001 100644 (file)
@@ -19,6 +19,7 @@
 #ifndef __ARM_KVM_ARM_H__
 #define __ARM_KVM_ARM_H__
 
+#include <linux/const.h>
 #include <linux/types.h>
 
 /* Hyp Configuration Register (HCR) bits */
  * space.
  */
 #define KVM_PHYS_SHIFT (40)
-#define KVM_PHYS_SIZE  (1ULL << KVM_PHYS_SHIFT)
-#define KVM_PHYS_MASK  (KVM_PHYS_SIZE - 1ULL)
-#define PTRS_PER_S2_PGD        (1ULL << (KVM_PHYS_SHIFT - 30))
-#define S2_PGD_ORDER   get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
+#define KVM_PHYS_SIZE  (_AC(1, ULL) << KVM_PHYS_SHIFT)
+#define KVM_PHYS_MASK  (KVM_PHYS_SIZE - _AC(1, ULL))
+#define PTRS_PER_S2_PGD        (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
 
 /* Virtualization Translation Control Register (VTCR) bits */
 #define VTCR_SH0       (3 << 12)
 #define VTTBR_X                (5 - KVM_T0SZ)
 #endif
 #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
-#define VTTBR_BADDR_MASK  (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
-#define VTTBR_VMID_SHIFT  (48LLU)
-#define VTTBR_VMID_MASK          (0xffLLU << VTTBR_VMID_SHIFT)
+#define VTTBR_BADDR_MASK  (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
+#define VTTBR_VMID_SHIFT  _AC(48, ULL)
+#define VTTBR_VMID_MASK(size)  (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
 /* Hyp Syndrome Register (HSR) bits */
 #define HSR_EC_SHIFT   (26)
-#define HSR_EC         (0x3fU << HSR_EC_SHIFT)
-#define HSR_IL         (1U << 25)
+#define HSR_EC         (_AC(0x3f, UL) << HSR_EC_SHIFT)
+#define HSR_IL         (_AC(1, UL) << 25)
 #define HSR_ISS                (HSR_IL - 1)
 #define HSR_ISV_SHIFT  (24)
-#define HSR_ISV                (1U << HSR_ISV_SHIFT)
+#define HSR_ISV                (_AC(1, UL) << HSR_ISV_SHIFT)
 #define HSR_SRT_SHIFT  (16)
 #define HSR_SRT_MASK   (0xf << HSR_SRT_SHIFT)
 #define HSR_FSC                (0x3f)
 #define HSR_SSE                (1 << 21)
 #define HSR_WNR                (1 << 6)
 #define HSR_CV_SHIFT   (24)
-#define HSR_CV         (1U << HSR_CV_SHIFT)
+#define HSR_CV         (_AC(1, UL) << HSR_CV_SHIFT)
 #define HSR_COND_SHIFT (20)
-#define HSR_COND       (0xfU << HSR_COND_SHIFT)
+#define HSR_COND       (_AC(0xf, UL) << HSR_COND_SHIFT)
 
 #define FSC_FAULT      (0x04)
 #define FSC_ACCESS     (0x08)
 #define HSR_EC_DABT    (0x24)
 #define HSR_EC_DABT_HYP        (0x25)
 
-#define HSR_WFI_IS_WFE         (1U << 0)
+#define HSR_WFI_IS_WFE         (_AC(1, UL) << 0)
 
-#define HSR_HVC_IMM_MASK       ((1UL << 16) - 1)
+#define HSR_HVC_IMM_MASK       ((_AC(1, UL) << 16) - 1)
 
-#define HSR_DABT_S1PTW         (1U << 7)
-#define HSR_DABT_CM            (1U << 8)
-#define HSR_DABT_EA            (1U << 9)
+#define HSR_DABT_S1PTW         (_AC(1, UL) << 7)
+#define HSR_DABT_CM            (_AC(1, UL) << 8)
+#define HSR_DABT_EA            (_AC(1, UL) << 9)
 
 #define kvm_arm_exception_type \
        {0, "RESET" },          \
index 6692982c9b575db476bc12a37e2d69b27c00d9fb..945bfa5e7752057a0161461ceda83843e98788fa 100644 (file)
@@ -214,6 +214,10 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
        kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
 }
 
+static inline void __cpu_init_stage2(void)
+{
+}
+
 static inline int kvm_arch_dev_ioctl_check_extension(long ext)
 {
        return 0;
index 405aa18833073b88b52103363b54d534d9c69263..9203c21b4673fd8a73b5f5797ed5190305b363ae 100644 (file)
@@ -279,6 +279,11 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
                                       pgd_t *merged_hyp_pgd,
                                       unsigned long hyp_idmap_start) { }
 
+static inline unsigned int kvm_get_vmid_bits(void)
+{
+       return 8;
+}
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ARM_KVM_MMU_H__ */
index 4371f45c578401c7f233e565dbb5fd36d9d59d52..d4ceaf5f299b8d03d6e638a3ea5f72fb5b60a93e 100644 (file)
@@ -74,6 +74,15 @@ static inline bool is_hyp_mode_mismatched(void)
 {
        return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
 }
+
+static inline bool is_kernel_in_hyp_mode(void)
+{
+       return false;
+}
+
+/* The section containing the hypervisor text */
+extern char __hyp_text_start[];
+extern char __hyp_text_end[];
 #endif
 
 #endif /* __ASSEMBLY__ */
index af9e59bf3831b9fd648d095c2070209b1e97677d..80856def246518e05015591f73d3bd0b4993bf8b 100644 (file)
@@ -88,8 +88,9 @@ obj-$(CONFIG_EARLY_PRINTK)    += early_printk.o
 
 obj-$(CONFIG_ARM_VIRT_EXT)     += hyp-stub.o
 ifeq ($(CONFIG_ARM_PSCI),y)
-obj-y                          += psci-call.o
 obj-$(CONFIG_SMP)              += psci_smp.o
 endif
 
+obj-$(CONFIG_HAVE_ARM_SMCCC)   += smccc-call.o
+
 extra-y := $(head-y) vmlinux.lds
index f89811fb9a55f3a490c3633ef99ef52745c58129..7e45f69a0ddc9d0ef3b04c145195cf1b4ad7d84b 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/syscalls.h>
 #include <linux/uaccess.h>
 #include <linux/io.h>
+#include <linux/arm-smccc.h>
 
 #include <asm/checksum.h>
 #include <asm/ftrace.h>
@@ -175,3 +176,8 @@ EXPORT_SYMBOL(__gnu_mcount_nc);
 EXPORT_SYMBOL(__pv_phys_pfn_offset);
 EXPORT_SYMBOL(__pv_offset);
 #endif
+
+#ifdef CONFIG_HAVE_ARM_SMCCC
+EXPORT_SYMBOL(arm_smccc_smc);
+EXPORT_SYMBOL(arm_smccc_hvc);
+#endif
diff --git a/arch/arm/kernel/psci-call.S b/arch/arm/kernel/psci-call.S
deleted file mode 100644 (file)
index a78e9e1..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2015 ARM Limited
- *
- * Author: Mark Rutland <mark.rutland@arm.com>
- */
-
-#include <linux/linkage.h>
-
-#include <asm/opcodes-sec.h>
-#include <asm/opcodes-virt.h>
-
-/* int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
-ENTRY(__invoke_psci_fn_hvc)
-       __HVC(0)
-       bx      lr
-ENDPROC(__invoke_psci_fn_hvc)
-
-/* int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, u32 arg2) */
-ENTRY(__invoke_psci_fn_smc)
-       __SMC(0)
-       bx      lr
-ENDPROC(__invoke_psci_fn_smc)
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S
new file mode 100644 (file)
index 0000000..2e48b67
--- /dev/null
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/linkage.h>
+
+#include <asm/opcodes-sec.h>
+#include <asm/opcodes-virt.h>
+#include <asm/unwind.h>
+
+       /*
+        * Wrap c macros in asm macros to delay expansion until after the
+        * SMCCC asm macro is expanded.
+        */
+       .macro SMCCC_SMC
+       __SMC(0)
+       .endm
+
+       .macro SMCCC_HVC
+       __HVC(0)
+       .endm
+
+       .macro SMCCC instr
+UNWIND(        .fnstart)
+       mov     r12, sp
+       push    {r4-r7}
+UNWIND(        .save   {r4-r7})
+       ldm     r12, {r4-r7}
+       \instr
+       pop     {r4-r7}
+       ldr     r12, [sp, #(4 * 4)]
+       stm     r12, {r0-r3}
+       bx      lr
+UNWIND(        .fnend)
+       .endm
+
+/*
+ * void smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
+ *               unsigned long a3, unsigned long a4, unsigned long a5,
+ *               unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_smc)
+       SMCCC SMCCC_SMC
+ENDPROC(arm_smccc_smc)
+
+/*
+ * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
+ *               unsigned long a3, unsigned long a4, unsigned long a5,
+ *               unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_hvc)
+       SMCCC SMCCC_HVC
+ENDPROC(arm_smccc_hvc)
index c17cb14f368b5b93924d683cb21e818a60dabcf8..5aafc90c436b032bfc16b4f4f8980b944151ac97 100644 (file)
@@ -44,6 +44,7 @@
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_coproc.h>
 #include <asm/kvm_psci.h>
+#include <asm/sections.h>
 
 #ifdef REQUIRES_VIRT
 __asm__(".arch_extension       virt");
@@ -58,9 +59,12 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
 
 /* The VMID used in the VTTBR */
 static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
-static u8 kvm_next_vmid;
+static u32 kvm_next_vmid;
+static unsigned int kvm_vmid_bits __read_mostly;
 static DEFINE_SPINLOCK(kvm_vmid_lock);
 
+static bool vgic_present;
+
 static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
 {
        BUG_ON(preemptible());
@@ -132,7 +136,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        kvm->arch.vmid_gen = 0;
 
        /* The maximum number of VCPUs is limited by the host's GIC model */
-       kvm->arch.max_vcpus = kvm_vgic_get_max_vcpus();
+       kvm->arch.max_vcpus = vgic_present ?
+                               kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
 
        return ret;
 out_free_stage2_pgd:
@@ -170,6 +175,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
        int r;
        switch (ext) {
        case KVM_CAP_IRQCHIP:
+               r = vgic_present;
+               break;
        case KVM_CAP_IOEVENTFD:
        case KVM_CAP_DEVICE_CTRL:
        case KVM_CAP_USER_MEMORY:
@@ -431,11 +438,12 @@ static void update_vttbr(struct kvm *kvm)
        kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
        kvm->arch.vmid = kvm_next_vmid;
        kvm_next_vmid++;
+       kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
 
        /* update vttbr to be used with the new vmid */
        pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm));
        BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK);
-       vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK;
+       vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
        kvm->arch.vttbr = pgd_phys | vmid;
 
        spin_unlock(&kvm_vmid_lock);
@@ -911,6 +919,8 @@ static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm,
 
        switch (dev_id) {
        case KVM_ARM_DEVICE_VGIC_V2:
+               if (!vgic_present)
+                       return -ENXIO;
                return kvm_vgic_addr(kvm, type, &dev_addr->addr, true);
        default:
                return -ENODEV;
@@ -925,6 +935,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
 
        switch (ioctl) {
        case KVM_CREATE_IRQCHIP: {
+               if (!vgic_present)
+                       return -ENXIO;
                return kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
        }
        case KVM_ARM_SET_DEVICE_ADDR: {
@@ -970,6 +982,7 @@ static void cpu_init_hyp_mode(void *dummy)
        vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
 
        __cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
+       __cpu_init_stage2();
 
        kvm_arm_init_debug();
 }
@@ -1066,6 +1079,12 @@ static int init_hyp_mode(void)
                goto out_free_mappings;
        }
 
+       err = create_hyp_mappings(__start_rodata, __end_rodata);
+       if (err) {
+               kvm_err("Cannot map rodata section\n");
+               goto out_free_mappings;
+       }
+
        /*
         * Map the Hyp stack pages
         */
@@ -1110,8 +1129,17 @@ static int init_hyp_mode(void)
         * Init HYP view of VGIC
         */
        err = kvm_vgic_hyp_init();
-       if (err)
+       switch (err) {
+       case 0:
+               vgic_present = true;
+               break;
+       case -ENODEV:
+       case -ENXIO:
+               vgic_present = false;
+               break;
+       default:
                goto out_free_context;
+       }
 
        /*
         * Init HYP architected timer support
@@ -1126,6 +1154,10 @@ static int init_hyp_mode(void)
 
        kvm_perf_init();
 
+       /* set size of VMID supported by CPU */
+       kvm_vmid_bits = kvm_get_vmid_bits();
+       kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+
        kvm_info("Hyp mode initialized successfully\n");
 
        return 0;
index d6c005283678fe5061a50cc8f5efd1febcc0f27b..dc99159857b4ae70d7d3785b75a1c3f0f8639906 100644 (file)
@@ -275,6 +275,40 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
                return vbar;
 }
 
+/*
+ * Switch to an exception mode, updating both CPSR and SPSR. Follow
+ * the logic described in AArch32.EnterMode() from the ARMv8 ARM.
+ */
+static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
+{
+       unsigned long cpsr = *vcpu_cpsr(vcpu);
+       u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+
+       *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
+
+       switch (mode) {
+       case FIQ_MODE:
+               *vcpu_cpsr(vcpu) |= PSR_F_BIT;
+               /* Fall through */
+       case ABT_MODE:
+       case IRQ_MODE:
+               *vcpu_cpsr(vcpu) |= PSR_A_BIT;
+               /* Fall through */
+       default:
+               *vcpu_cpsr(vcpu) |= PSR_I_BIT;
+       }
+
+       *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
+
+       if (sctlr & SCTLR_TE)
+               *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+       if (sctlr & SCTLR_EE)
+               *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+
+       /* Note: These now point to the mode banked copies */
+       *vcpu_spsr(vcpu) = cpsr;
+}
+
 /**
  * kvm_inject_undefined - inject an undefined exception into the guest
  * @vcpu: The VCPU to receive the undefined exception
@@ -286,29 +320,13 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
  */
 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
 {
-       unsigned long new_lr_value;
-       unsigned long new_spsr_value;
        unsigned long cpsr = *vcpu_cpsr(vcpu);
-       u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
        bool is_thumb = (cpsr & PSR_T_BIT);
        u32 vect_offset = 4;
        u32 return_offset = (is_thumb) ? 2 : 4;
 
-       new_spsr_value = cpsr;
-       new_lr_value = *vcpu_pc(vcpu) - return_offset;
-
-       *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
-       *vcpu_cpsr(vcpu) |= PSR_I_BIT;
-       *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
-
-       if (sctlr & SCTLR_TE)
-               *vcpu_cpsr(vcpu) |= PSR_T_BIT;
-       if (sctlr & SCTLR_EE)
-               *vcpu_cpsr(vcpu) |= PSR_E_BIT;
-
-       /* Note: These now point to UND banked copies */
-       *vcpu_spsr(vcpu) = cpsr;
-       *vcpu_reg(vcpu, 14) = new_lr_value;
+       kvm_update_psr(vcpu, UND_MODE);
+       *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
 
        /* Branch to exception vector */
        *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
@@ -320,30 +338,14 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
  */
 static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
 {
-       unsigned long new_lr_value;
-       unsigned long new_spsr_value;
        unsigned long cpsr = *vcpu_cpsr(vcpu);
-       u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
        bool is_thumb = (cpsr & PSR_T_BIT);
        u32 vect_offset;
        u32 return_offset = (is_thumb) ? 4 : 0;
        bool is_lpae;
 
-       new_spsr_value = cpsr;
-       new_lr_value = *vcpu_pc(vcpu) + return_offset;
-
-       *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
-       *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
-       *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
-
-       if (sctlr & SCTLR_TE)
-               *vcpu_cpsr(vcpu) |= PSR_T_BIT;
-       if (sctlr & SCTLR_EE)
-               *vcpu_cpsr(vcpu) |= PSR_E_BIT;
-
-       /* Note: These now point to ABT banked copies */
-       *vcpu_spsr(vcpu) = cpsr;
-       *vcpu_reg(vcpu, 14) = new_lr_value;
+       kvm_update_psr(vcpu, ABT_MODE);
+       *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
 
        if (is_pabt)
                vect_offset = 12;
index 11b6595c26723b511341214e13e1a92fe214a0b2..e2b6801f54e41cf888203ab8cb854aacf4b87da2 100644 (file)
@@ -656,9 +656,9 @@ static void *kvm_alloc_hwpgd(void)
  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
  * @kvm:       The KVM struct pointer for the VM.
  *
- * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
- * support either full 40-bit input addresses or limited to 32-bit input
- * addresses). Clears the allocated pages.
+ * Allocates only the stage-2 HW PGD level table(s) (can support either full
+ * 40-bit input addresses or limited to 32-bit input addresses). Clears the
+ * allocated pages.
  *
  * Note we don't need locking here as this is only called when the VM is
  * created, which can only be done once.
index f4637c624db21170c79536d94e035c75b0e4e00d..2543791ce8c2ea95903a850e794effca827358a4 100644 (file)
@@ -101,6 +101,7 @@ config ARM64
        select SPARSE_IRQ
        select SYSCTL_EXCEPTION_TRACE
        select HAVE_CONTEXT_TRACKING
+       select HAVE_ARM_SMCCC
        help
          ARM 64-bit (AArch64) Linux support.
 
@@ -975,6 +976,14 @@ menu "Power management options"
 
 source "kernel/power/Kconfig"
 
+config ARCH_HIBERNATION_POSSIBLE
+       def_bool y
+       depends on CPU_PM
+
+config ARCH_HIBERNATION_HEADER
+       def_bool y
+       depends on HIBERNATION
+
 config ARCH_SUSPEND_POSSIBLE
        def_bool y
 
index d4c925ccc7dec8689b2ef891ae2e4ebd501fce94..46ee050ab747c16b1b3ec2c5ba5ee34d0e516f0c 100644 (file)
@@ -34,9 +34,9 @@
 #define ARM64_HAS_UAO                          9
 #define ARM64_ALT_PAN_NOT_UAO                  10
 
-#define ARM64_NCAPS                            11
-#define ARM64_WORKAROUND_CAVIUM_27456          12      
-
+#define ARM64_WORKAROUND_CAVIUM_27456          11
+#define ARM64_HAS_VIRT_HOST_EXTN               12
+#define ARM64_NCAPS                            13
 
 #ifndef __ASSEMBLY__
 
index 2d960f8588b0639f80c272f833c3796fd7c286b6..8b709f53f87423ef55f2422204439dc4f63cd86f 100644 (file)
 #define HCR_INT_OVERRIDE   (HCR_FMO | HCR_IMO)
 
 
-/* Hyp System Control Register (SCTLR_EL2) bits */
-#define SCTLR_EL2_EE   (1 << 25)
-#define SCTLR_EL2_WXN  (1 << 19)
-#define SCTLR_EL2_I    (1 << 12)
-#define SCTLR_EL2_SA   (1 << 3)
-#define SCTLR_EL2_C    (1 << 2)
-#define SCTLR_EL2_A    (1 << 1)
-#define SCTLR_EL2_M    1
-#define SCTLR_EL2_FLAGS        (SCTLR_EL2_M | SCTLR_EL2_A | SCTLR_EL2_C |      \
-                        SCTLR_EL2_SA | SCTLR_EL2_I)
-
 /* TCR_EL2 Registers bits */
 #define TCR_EL2_RES1   ((1 << 31) | (1 << 23))
 #define TCR_EL2_TBI    (1 << 20)
 #define VTCR_EL2_SL0_LVL1      (1 << 6)
 #define VTCR_EL2_T0SZ_MASK     0x3f
 #define VTCR_EL2_T0SZ_40B      24
+#define VTCR_EL2_VS            19
 
 /*
  * We configure the Stage-2 page tables to always restrict the IPA space to be
 #define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
 #define VTTBR_BADDR_MASK  (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
 #define VTTBR_VMID_SHIFT  (UL(48))
-#define VTTBR_VMID_MASK          (UL(0xFF) << VTTBR_VMID_SHIFT)
+#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
 
 /* Hyp System Trap Register */
 #define HSTR_EL2_T(x)  (1 << x)
index 419bc6661b5c44198f03a7f2b80c59a3f17e8fb3..054ac25e7c2e7c7d9b9f2c3ef32286c35ed9c254 100644 (file)
 
 #include <asm/virt.h>
 
-/*
- * 0 is reserved as an invalid value.
- * Order *must* be kept in sync with the hyp switch code.
- */
-#define        MPIDR_EL1       1       /* MultiProcessor Affinity Register */
-#define        CSSELR_EL1      2       /* Cache Size Selection Register */
-#define        SCTLR_EL1       3       /* System Control Register */
-#define        ACTLR_EL1       4       /* Auxiliary Control Register */
-#define        CPACR_EL1       5       /* Coprocessor Access Control */
-#define        TTBR0_EL1       6       /* Translation Table Base Register 0 */
-#define        TTBR1_EL1       7       /* Translation Table Base Register 1 */
-#define        TCR_EL1         8       /* Translation Control Register */
-#define        ESR_EL1         9       /* Exception Syndrome Register */
-#define        AFSR0_EL1       10      /* Auxilary Fault Status Register 0 */
-#define        AFSR1_EL1       11      /* Auxilary Fault Status Register 1 */
-#define        FAR_EL1         12      /* Fault Address Register */
-#define        MAIR_EL1        13      /* Memory Attribute Indirection Register */
-#define        VBAR_EL1        14      /* Vector Base Address Register */
-#define        CONTEXTIDR_EL1  15      /* Context ID Register */
-#define        TPIDR_EL0       16      /* Thread ID, User R/W */
-#define        TPIDRRO_EL0     17      /* Thread ID, User R/O */
-#define        TPIDR_EL1       18      /* Thread ID, Privileged */
-#define        AMAIR_EL1       19      /* Aux Memory Attribute Indirection Register */
-#define        CNTKCTL_EL1     20      /* Timer Control Register (EL1) */
-#define        PAR_EL1         21      /* Physical Address Register */
-#define MDSCR_EL1      22      /* Monitor Debug System Control Register */
-#define MDCCINT_EL1    23      /* Monitor Debug Comms Channel Interrupt Enable Reg */
-
-/* 32bit specific registers. Keep them at the end of the range */
-#define        DACR32_EL2      24      /* Domain Access Control Register */
-#define        IFSR32_EL2      25      /* Instruction Fault Status Register */
-#define        FPEXC32_EL2     26      /* Floating-Point Exception Control Register */
-#define        DBGVCR32_EL2    27      /* Debug Vector Catch Register */
-#define        NR_SYS_REGS     28
-
-/* 32bit mapping */
-#define c0_MPIDR       (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
-#define c0_CSSELR      (CSSELR_EL1 * 2)/* Cache Size Selection Register */
-#define c1_SCTLR       (SCTLR_EL1 * 2) /* System Control Register */
-#define c1_ACTLR       (ACTLR_EL1 * 2) /* Auxiliary Control Register */
-#define c1_CPACR       (CPACR_EL1 * 2) /* Coprocessor Access Control */
-#define c2_TTBR0       (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
-#define c2_TTBR0_high  (c2_TTBR0 + 1)  /* TTBR0 top 32 bits */
-#define c2_TTBR1       (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
-#define c2_TTBR1_high  (c2_TTBR1 + 1)  /* TTBR1 top 32 bits */
-#define c2_TTBCR       (TCR_EL1 * 2)   /* Translation Table Base Control R. */
-#define c3_DACR                (DACR32_EL2 * 2)/* Domain Access Control Register */
-#define c5_DFSR                (ESR_EL1 * 2)   /* Data Fault Status Register */
-#define c5_IFSR                (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
-#define c5_ADFSR       (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
-#define c5_AIFSR       (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
-#define c6_DFAR                (FAR_EL1 * 2)   /* Data Fault Address Register */
-#define c6_IFAR                (c6_DFAR + 1)   /* Instruction Fault Address Register */
-#define c7_PAR         (PAR_EL1 * 2)   /* Physical Address Register */
-#define c7_PAR_high    (c7_PAR + 1)    /* PAR top 32 bits */
-#define c10_PRRR       (MAIR_EL1 * 2)  /* Primary Region Remap Register */
-#define c10_NMRR       (c10_PRRR + 1)  /* Normal Memory Remap Register */
-#define c12_VBAR       (VBAR_EL1 * 2)  /* Vector Base Address Register */
-#define c13_CID                (CONTEXTIDR_EL1 * 2)    /* Context ID Register */
-#define c13_TID_URW    (TPIDR_EL0 * 2) /* Thread ID, User R/W */
-#define c13_TID_URO    (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
-#define c13_TID_PRIV   (TPIDR_EL1 * 2) /* Thread ID, Privileged */
-#define c10_AMAIR0     (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
-#define c10_AMAIR1     (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
-#define c14_CNTKCTL    (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
-
-#define cp14_DBGDSCRext        (MDSCR_EL1 * 2)
-#define cp14_DBGBCR0   (DBGBCR0_EL1 * 2)
-#define cp14_DBGBVR0   (DBGBVR0_EL1 * 2)
-#define cp14_DBGBXVR0  (cp14_DBGBVR0 + 1)
-#define cp14_DBGWCR0   (DBGWCR0_EL1 * 2)
-#define cp14_DBGWVR0   (DBGWVR0_EL1 * 2)
-#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
-
-#define NR_COPRO_REGS  (NR_SYS_REGS * 2)
-
 #define ARM_EXCEPTION_IRQ        0
 #define ARM_EXCEPTION_TRAP       1
 
index 25a40213bd9b87cb6802ecfd4204dc66c41e5850..3066328cd86b69a91274e0cb841059b428666140 100644 (file)
@@ -26,7 +26,6 @@
 
 #include <asm/esr.h>
 #include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
 #include <asm/ptrace.h>
 #include <asm/cputype.h>
index 90c6368ad7c859bbc101d88f5273d33380c8d35a..bbdaa56c22244cfcc119ea403ab813424c8fed6c 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/types.h>
 #include <linux/kvm_types.h>
 #include <asm/kvm.h>
-#include <asm/kvm_asm.h>
 #include <asm/kvm_mmio.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -85,6 +84,86 @@ struct kvm_vcpu_fault_info {
        u64 hpfar_el2;          /* Hyp IPA Fault Address Register */
 };
 
+/*
+ * 0 is reserved as an invalid value.
+ * Order should be kept in sync with the save/restore code.
+ */
+enum vcpu_sysreg {
+       __INVALID_SYSREG__,
+       MPIDR_EL1,      /* MultiProcessor Affinity Register */
+       CSSELR_EL1,     /* Cache Size Selection Register */
+       SCTLR_EL1,      /* System Control Register */
+       ACTLR_EL1,      /* Auxiliary Control Register */
+       CPACR_EL1,      /* Coprocessor Access Control */
+       TTBR0_EL1,      /* Translation Table Base Register 0 */
+       TTBR1_EL1,      /* Translation Table Base Register 1 */
+       TCR_EL1,        /* Translation Control Register */
+       ESR_EL1,        /* Exception Syndrome Register */
+       AFSR0_EL1,      /* Auxilary Fault Status Register 0 */
+       AFSR1_EL1,      /* Auxilary Fault Status Register 1 */
+       FAR_EL1,        /* Fault Address Register */
+       MAIR_EL1,       /* Memory Attribute Indirection Register */
+       VBAR_EL1,       /* Vector Base Address Register */
+       CONTEXTIDR_EL1, /* Context ID Register */
+       TPIDR_EL0,      /* Thread ID, User R/W */
+       TPIDRRO_EL0,    /* Thread ID, User R/O */
+       TPIDR_EL1,      /* Thread ID, Privileged */
+       AMAIR_EL1,      /* Aux Memory Attribute Indirection Register */
+       CNTKCTL_EL1,    /* Timer Control Register (EL1) */
+       PAR_EL1,        /* Physical Address Register */
+       MDSCR_EL1,      /* Monitor Debug System Control Register */
+       MDCCINT_EL1,    /* Monitor Debug Comms Channel Interrupt Enable Reg */
+
+       /* 32bit specific registers. Keep them at the end of the range */
+       DACR32_EL2,     /* Domain Access Control Register */
+       IFSR32_EL2,     /* Instruction Fault Status Register */
+       FPEXC32_EL2,    /* Floating-Point Exception Control Register */
+       DBGVCR32_EL2,   /* Debug Vector Catch Register */
+
+       NR_SYS_REGS     /* Nothing after this line! */
+};
+
+/* 32bit mapping */
+#define c0_MPIDR       (MPIDR_EL1 * 2) /* MultiProcessor ID Register */
+#define c0_CSSELR      (CSSELR_EL1 * 2)/* Cache Size Selection Register */
+#define c1_SCTLR       (SCTLR_EL1 * 2) /* System Control Register */
+#define c1_ACTLR       (ACTLR_EL1 * 2) /* Auxiliary Control Register */
+#define c1_CPACR       (CPACR_EL1 * 2) /* Coprocessor Access Control */
+#define c2_TTBR0       (TTBR0_EL1 * 2) /* Translation Table Base Register 0 */
+#define c2_TTBR0_high  (c2_TTBR0 + 1)  /* TTBR0 top 32 bits */
+#define c2_TTBR1       (TTBR1_EL1 * 2) /* Translation Table Base Register 1 */
+#define c2_TTBR1_high  (c2_TTBR1 + 1)  /* TTBR1 top 32 bits */
+#define c2_TTBCR       (TCR_EL1 * 2)   /* Translation Table Base Control R. */
+#define c3_DACR                (DACR32_EL2 * 2)/* Domain Access Control Register */
+#define c5_DFSR                (ESR_EL1 * 2)   /* Data Fault Status Register */
+#define c5_IFSR                (IFSR32_EL2 * 2)/* Instruction Fault Status Register */
+#define c5_ADFSR       (AFSR0_EL1 * 2) /* Auxiliary Data Fault Status R */
+#define c5_AIFSR       (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */
+#define c6_DFAR                (FAR_EL1 * 2)   /* Data Fault Address Register */
+#define c6_IFAR                (c6_DFAR + 1)   /* Instruction Fault Address Register */
+#define c7_PAR         (PAR_EL1 * 2)   /* Physical Address Register */
+#define c7_PAR_high    (c7_PAR + 1)    /* PAR top 32 bits */
+#define c10_PRRR       (MAIR_EL1 * 2)  /* Primary Region Remap Register */
+#define c10_NMRR       (c10_PRRR + 1)  /* Normal Memory Remap Register */
+#define c12_VBAR       (VBAR_EL1 * 2)  /* Vector Base Address Register */
+#define c13_CID                (CONTEXTIDR_EL1 * 2)    /* Context ID Register */
+#define c13_TID_URW    (TPIDR_EL0 * 2) /* Thread ID, User R/W */
+#define c13_TID_URO    (TPIDRRO_EL0 * 2)/* Thread ID, User R/O */
+#define c13_TID_PRIV   (TPIDR_EL1 * 2) /* Thread ID, Privileged */
+#define c10_AMAIR0     (AMAIR_EL1 * 2) /* Aux Memory Attr Indirection Reg */
+#define c10_AMAIR1     (c10_AMAIR0 + 1)/* Aux Memory Attr Indirection Reg */
+#define c14_CNTKCTL    (CNTKCTL_EL1 * 2) /* Timer Control Register (PL1) */
+
+#define cp14_DBGDSCRext        (MDSCR_EL1 * 2)
+#define cp14_DBGBCR0   (DBGBCR0_EL1 * 2)
+#define cp14_DBGBVR0   (DBGBVR0_EL1 * 2)
+#define cp14_DBGBXVR0  (cp14_DBGBVR0 + 1)
+#define cp14_DBGWCR0   (DBGWCR0_EL1 * 2)
+#define cp14_DBGWVR0   (DBGWVR0_EL1 * 2)
+#define cp14_DBGDCCINT (MDCCINT_EL1 * 2)
+
+#define NR_COPRO_REGS  (NR_SYS_REGS * 2)
+
 struct kvm_cpu_context {
        struct kvm_regs gp_regs;
        union {
@@ -247,6 +326,10 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
                       hyp_stack_ptr, vector_ptr);
 }
 
+static inline void __cpu_init_stage2(void)
+{
+}
+
 static inline void kvm_arch_hardware_disable(void) {}
 static inline void kvm_arch_hardware_unsetup(void) {}
 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
index 889c908ee631b526594b5dfc32ef5dfde15480df..fe612a9625766b5fff3698e2d1014d7376744e4f 100644 (file)
@@ -19,7 +19,6 @@
 #define __ARM64_KVM_MMIO_H__
 
 #include <linux/kvm_host.h>
-#include <asm/kvm_asm.h>
 #include <asm/kvm_arm.h>
 
 /*
index 61505676d0853bb65710fc9ad7746db8f58e4658..0bf8b4320a9154fda9e6fd3d999ef37a356bee85 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <asm/page.h>
 #include <asm/memory.h>
+#include <asm/cpufeature.h>
 
 /*
  * As we only have the TTBR0_EL2 register, we cannot express
@@ -158,7 +159,6 @@ static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
 #define PTRS_PER_S2_PGD_SHIFT  (KVM_PHYS_SHIFT - PGDIR_SHIFT)
 #endif
 #define PTRS_PER_S2_PGD                (1 << PTRS_PER_S2_PGD_SHIFT)
-#define S2_PGD_ORDER           get_order(PTRS_PER_S2_PGD * sizeof(pgd_t))
 
 #define kvm_pgd_index(addr)    (((addr) >> PGDIR_SHIFT) & (PTRS_PER_S2_PGD - 1))
 
@@ -302,5 +302,12 @@ static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
        merged_hyp_pgd[idmap_idx] = __pgd(__pa(boot_hyp_pgd) | PMD_TYPE_TABLE);
 }
 
+static inline unsigned int kvm_get_vmid_bits(void)
+{
+       int reg = read_system_reg(SYS_ID_AA64MMFR1_EL1);
+
+       return (cpuid_feature_extract_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
+}
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ARM64_KVM_MMU_H__ */
index ba1b3409d7edd1fc349ef474631d1fd263226e78..ae11e8fdbfd2b29d7e326683cb49cd6a325ef39f 100644 (file)
@@ -71,6 +71,9 @@
 
 #define TASK_UNMAPPED_BASE     (PAGE_ALIGN(TASK_SIZE / 4))
 
+#define KERNEL_START      _text
+#define KERNEL_END        _end
+
 /*
  * The size of the KASAN shadow region. This should be 1/8th of the
  * size of the entire kernel virtual address space.
index 9b2f5a9d019df493fa6021ee3ca6b4779401d8c4..fbafd0ad16df768fa7966a769014db5020294c41 100644 (file)
@@ -19,6 +19,8 @@
 #ifndef __ASM_PAGE_H
 #define __ASM_PAGE_H
 
+#include <linux/const.h>
+
 /* PAGE_SHIFT determines the page size */
 /* CONT_SHIFT determines the number of pages which can be tracked together  */
 #ifdef CONFIG_ARM64_64K_PAGES
index 59a5b0f1e81c3274f3c1c5c0ffb4ce4014ce35c2..024d623f662e588c845f47abd752bfba2470ed2f 100644 (file)
@@ -1,7 +1,8 @@
 #ifndef __ASM_SUSPEND_H
 #define __ASM_SUSPEND_H
 
-#define NR_CTX_REGS 11
+#define NR_CTX_REGS 10
+#define NR_CALLEE_SAVED_REGS 12
 
 /*
  * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
@@ -16,11 +17,34 @@ struct cpu_suspend_ctx {
        u64 sp;
 } __aligned(16);
 
-struct sleep_save_sp {
-       phys_addr_t *save_ptr_stash;
-       phys_addr_t save_ptr_stash_phys;
+/*
+ * Memory to save the cpu state is allocated on the stack by
+ * __cpu_suspend_enter()'s caller, and populated by __cpu_suspend_enter().
+ * This data must survive until cpu_resume() is called.
+ *
+ * This struct desribes the size and the layout of the saved cpu state.
+ * The layout of the callee_saved_regs is defined by the implementation
+ * of __cpu_suspend_enter(), and cpu_resume(). This struct must be passed
+ * in by the caller as __cpu_suspend_enter()'s stack-frame is gone once it
+ * returns, and the data would be subsequently corrupted by the call to the
+ * finisher.
+ */
+struct sleep_stack_data {
+       struct cpu_suspend_ctx  system_regs;
+       unsigned long           callee_saved_regs[NR_CALLEE_SAVED_REGS];
 };
 
+extern unsigned long *sleep_save_stash;
+
 extern int cpu_suspend(unsigned long arg, int (*fn)(unsigned long));
 extern void cpu_resume(void);
+int __cpu_suspend_enter(struct sleep_stack_data *state);
+void __cpu_suspend_exit(void);
+void _cpu_resume(void);
+
+int swsusp_arch_suspend(void);
+int swsusp_arch_resume(void);
+int arch_hibernation_header_save(void *addr, unsigned int max_size);
+int arch_hibernation_header_restore(void *addr);
+
 #endif
index 1a78d6e2a78b58bf21113de3810785a81a35a29d..0961a24e8d4891bdf976f7cf1d39a39cd31cc229 100644 (file)
 #define SET_PSTATE_UAO(x) __inst_arm(0xd5000000 | REG_PSTATE_UAO_IMM |\
                                     (!!x)<<8 | 0x1f)
 
-/* SCTLR_EL1 */
-#define SCTLR_EL1_CP15BEN      (0x1 << 5)
-#define SCTLR_EL1_SED          (0x1 << 8)
-#define SCTLR_EL1_SPAN         (0x1 << 23)
+/* Common SCTLR_ELx flags. */
+#define SCTLR_ELx_EE    (1 << 25)
+#define SCTLR_ELx_I    (1 << 12)
+#define SCTLR_ELx_SA   (1 << 3)
+#define SCTLR_ELx_C    (1 << 2)
+#define SCTLR_ELx_A    (1 << 1)
+#define SCTLR_ELx_M    1
+
+#define SCTLR_ELx_FLAGS        (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \
+                        SCTLR_ELx_SA | SCTLR_ELx_I)
+
+/* SCTLR_EL1 specific flags. */
+#define SCTLR_EL1_SPAN         (1 << 23)
+#define SCTLR_EL1_SED          (1 << 8)
+#define SCTLR_EL1_CP15BEN      (1 << 5)
 
 
 /* id_aa64isar0 */
index 7a5df5252dd736e4038e04e40510351820056183..9f22dd607958ad7cd673e79e91c36e28768c35f7 100644 (file)
@@ -23,6 +23,8 @@
 
 #ifndef __ASSEMBLY__
 
+#include <asm/ptrace.h>
+
 /*
  * __boot_cpu_mode records what mode CPUs were booted in.
  * A correctly-implemented bootloader must start all CPUs in the same mode:
@@ -50,6 +52,14 @@ static inline bool is_hyp_mode_mismatched(void)
        return __boot_cpu_mode[0] != __boot_cpu_mode[1];
 }
 
+static inline bool is_kernel_in_hyp_mode(void)
+{
+       u64 el;
+
+       asm("mrs %0, CurrentEL" : "=r" (el));
+       return el == CurrentEL_EL2;
+}
+
 /* The section containing the hypervisor text */
 extern char __hyp_text_start[];
 extern char __hyp_text_end[];
index 69b15d97fb83b1b853b32407a97ad0e8ec37ce37..20bcc2db06bfab77cc0793af96b7ce051d321e10 100644 (file)
@@ -14,10 +14,10 @@ CFLAGS_REMOVE_return_address.o = -pg
 arm64-obj-y            := debug-monitors.o entry.o irq.o fpsimd.o              \
                           entry-fpsimd.o process.o ptrace.o setup.o signal.o   \
                           sys.o stacktrace.o time.o traps.o io.o vdso.o        \
-                          hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o       \
+                          hyp-stub.o psci.o cpu_ops.o insn.o   \
                           return_address.o cpuinfo.o cpu_errata.o              \
                           cpufeature.o alternative.o cacheinfo.o               \
-                          smp.o smp_spin_table.o topology.o
+                          smp.o smp_spin_table.o topology.o smccc-call.o
 
 extra-$(CONFIG_EFI)                    := efi-entry.o
 
@@ -41,8 +41,10 @@ arm64-obj-$(CONFIG_EFI)                      += efi.o efi-entry.stub.o
 arm64-obj-$(CONFIG_PCI)                        += pci.o
 arm64-obj-$(CONFIG_ARMV8_DEPRECATED)   += armv8_deprecated.o
 arm64-obj-$(CONFIG_ACPI)               += acpi.o
-arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)        += acpi_parking_protocol.o
 arm64-obj-$(CONFIG_RANDOMIZE_BASE)     += kaslr.o
+arm64-obj-$(CONFIG_HIBERNATION)                += hibernate.o hibernate-asm.o
+arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)        += acpi_parking_protocol.o
+arm64-obj-$(CONFIG_PARAVIRT)           += paravirt.o
 
 obj-y                                  += $(arm64-obj-y) vdso/ probes/
 obj-m                                  += $(arm64-obj-m)
index 803965a792e0ea132d9ad4c7693adf25cc9bafe8..ee97181e44770cdec504f8dc15b41cbe5129b67e 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/kprobes.h>
+#include <linux/arm-smccc.h>
 
 #include <asm/checksum.h>
 
@@ -70,3 +71,7 @@ EXPORT_SYMBOL(test_and_change_bit);
 EXPORT_SYMBOL(_mcount);
 NOKPROBE_SYMBOL(_mcount);
 #endif
+
+       /* arm-smccc */
+EXPORT_SYMBOL(arm_smccc_smc);
+EXPORT_SYMBOL(arm_smccc_hvc);
index c54384b7e8b23f4490e6ed901a9472dbf803f5bf..dac70c160289569782878ac82c3525f37afd71b5 100644 (file)
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include <linux/kvm_host.h>
+#include <linux/suspend.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/smp_plat.h>
 #include <asm/suspend.h>
 #include <asm/vdso_datapage.h>
 #include <linux/kbuild.h>
+#include <linux/arm-smccc.h>
 
 int main(void)
 {
@@ -123,58 +125,25 @@ int main(void)
   DEFINE(CPU_GP_REGS,          offsetof(struct kvm_cpu_context, gp_regs));
   DEFINE(CPU_USER_PT_REGS,     offsetof(struct kvm_regs, regs));
   DEFINE(CPU_FP_REGS,          offsetof(struct kvm_regs, fp_regs));
-  DEFINE(CPU_SP_EL1,           offsetof(struct kvm_regs, sp_el1));
-  DEFINE(CPU_ELR_EL1,          offsetof(struct kvm_regs, elr_el1));
-  DEFINE(CPU_SPSR,             offsetof(struct kvm_regs, spsr));
-  DEFINE(CPU_SYSREGS,          offsetof(struct kvm_cpu_context, sys_regs));
+  DEFINE(VCPU_FPEXC32_EL2,     offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
   DEFINE(VCPU_ESR_EL2,         offsetof(struct kvm_vcpu, arch.fault.esr_el2));
   DEFINE(VCPU_FAR_EL2,         offsetof(struct kvm_vcpu, arch.fault.far_el2));
   DEFINE(VCPU_HPFAR_EL2,       offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
-  DEFINE(VCPU_DEBUG_FLAGS,     offsetof(struct kvm_vcpu, arch.debug_flags));
-  DEFINE(VCPU_DEBUG_PTR,       offsetof(struct kvm_vcpu, arch.debug_ptr));
-  DEFINE(DEBUG_BCR,            offsetof(struct kvm_guest_debug_arch, dbg_bcr));
-  DEFINE(DEBUG_BVR,            offsetof(struct kvm_guest_debug_arch, dbg_bvr));
-  DEFINE(DEBUG_WCR,            offsetof(struct kvm_guest_debug_arch, dbg_wcr));
-  DEFINE(DEBUG_WVR,            offsetof(struct kvm_guest_debug_arch, dbg_wvr));
-  DEFINE(VCPU_HCR_EL2,         offsetof(struct kvm_vcpu, arch.hcr_el2));
-  DEFINE(VCPU_MDCR_EL2,        offsetof(struct kvm_vcpu, arch.mdcr_el2));
-  DEFINE(VCPU_IRQ_LINES,       offsetof(struct kvm_vcpu, arch.irq_lines));
   DEFINE(VCPU_HOST_CONTEXT,    offsetof(struct kvm_vcpu, arch.host_cpu_context));
-  DEFINE(VCPU_HOST_DEBUG_STATE, offsetof(struct kvm_vcpu, arch.host_debug_state));
-  DEFINE(VCPU_TIMER_CNTV_CTL,  offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
-  DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
-  DEFINE(KVM_TIMER_CNTVOFF,    offsetof(struct kvm, arch.timer.cntvoff));
-  DEFINE(KVM_TIMER_ENABLED,    offsetof(struct kvm, arch.timer.enabled));
-  DEFINE(VCPU_KVM,             offsetof(struct kvm_vcpu, kvm));
-  DEFINE(VCPU_VGIC_CPU,                offsetof(struct kvm_vcpu, arch.vgic_cpu));
-  DEFINE(VGIC_V2_CPU_HCR,      offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
-  DEFINE(VGIC_V2_CPU_VMCR,     offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
-  DEFINE(VGIC_V2_CPU_MISR,     offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
-  DEFINE(VGIC_V2_CPU_EISR,     offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
-  DEFINE(VGIC_V2_CPU_ELRSR,    offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
-  DEFINE(VGIC_V2_CPU_APR,      offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
-  DEFINE(VGIC_V2_CPU_LR,       offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
-  DEFINE(VGIC_V3_CPU_SRE,      offsetof(struct vgic_cpu, vgic_v3.vgic_sre));
-  DEFINE(VGIC_V3_CPU_HCR,      offsetof(struct vgic_cpu, vgic_v3.vgic_hcr));
-  DEFINE(VGIC_V3_CPU_VMCR,     offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr));
-  DEFINE(VGIC_V3_CPU_MISR,     offsetof(struct vgic_cpu, vgic_v3.vgic_misr));
-  DEFINE(VGIC_V3_CPU_EISR,     offsetof(struct vgic_cpu, vgic_v3.vgic_eisr));
-  DEFINE(VGIC_V3_CPU_ELRSR,    offsetof(struct vgic_cpu, vgic_v3.vgic_elrsr));
-  DEFINE(VGIC_V3_CPU_AP0R,     offsetof(struct vgic_cpu, vgic_v3.vgic_ap0r));
-  DEFINE(VGIC_V3_CPU_AP1R,     offsetof(struct vgic_cpu, vgic_v3.vgic_ap1r));
-  DEFINE(VGIC_V3_CPU_LR,       offsetof(struct vgic_cpu, vgic_v3.vgic_lr));
-  DEFINE(VGIC_CPU_NR_LR,       offsetof(struct vgic_cpu, nr_lr));
-  DEFINE(KVM_VTTBR,            offsetof(struct kvm, arch.vttbr));
-  DEFINE(KVM_VGIC_VCTRL,       offsetof(struct kvm, arch.vgic.vctrl_base));
 #endif
 #ifdef CONFIG_CPU_PM
   DEFINE(CPU_SUSPEND_SZ,       sizeof(struct cpu_suspend_ctx));
   DEFINE(CPU_CTX_SP,           offsetof(struct cpu_suspend_ctx, sp));
   DEFINE(MPIDR_HASH_MASK,      offsetof(struct mpidr_hash, mask));
   DEFINE(MPIDR_HASH_SHIFTS,    offsetof(struct mpidr_hash, shift_aff));
-  DEFINE(SLEEP_SAVE_SP_SZ,     sizeof(struct sleep_save_sp));
-  DEFINE(SLEEP_SAVE_SP_PHYS,   offsetof(struct sleep_save_sp, save_ptr_stash_phys));
-  DEFINE(SLEEP_SAVE_SP_VIRT,   offsetof(struct sleep_save_sp, save_ptr_stash));
+  DEFINE(SLEEP_STACK_DATA_SYSTEM_REGS, offsetof(struct sleep_stack_data, system_regs));
+  DEFINE(SLEEP_STACK_DATA_CALLEE_REGS, offsetof(struct sleep_stack_data, callee_saved_regs));
 #endif
+  DEFINE(ARM_SMCCC_RES_X0_OFFS,        offsetof(struct arm_smccc_res, a0));
+  DEFINE(ARM_SMCCC_RES_X2_OFFS,        offsetof(struct arm_smccc_res, a2));
+  BLANK();
+  DEFINE(HIBERN_PBE_ORIG,      offsetof(struct pbe, orig_address));
+  DEFINE(HIBERN_PBE_ADDR,      offsetof(struct pbe, address));
+  DEFINE(HIBERN_PBE_NEXT,      offsetof(struct pbe, next));
   return 0;
 }
index 40ee3f2933e78dffe011e4d2e5615f0d42222e53..a0c41dae0d8118d5b9c888299cad0895f0ac9036 100644 (file)
@@ -26,6 +26,7 @@
 #include <asm/cpu_ops.h>
 #include <asm/processor.h>
 #include <asm/sysreg.h>
+#include <asm/virt.h>
 
 unsigned long elf_hwcap __read_mostly;
 EXPORT_SYMBOL_GPL(elf_hwcap);
@@ -647,6 +648,11 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry)
        return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, rv_min, rv_max);
 }
 
+static bool runs_at_el2(const struct arm64_cpu_capabilities *entry)
+{
+       return is_kernel_in_hyp_mode();
+}
+
 static const struct arm64_cpu_capabilities arm64_features[] = {
        {
                .desc = "GIC system register CPU interface",
@@ -699,6 +705,11 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
                .matches = cpufeature_pan_not_uao,
        },
 #endif /* CONFIG_ARM64_PAN */
+       {
+               .desc = "Virtualization Host Extensions",
+               .capability = ARM64_HAS_VIRT_HOST_EXTN,
+               .matches = runs_at_el2,
+       },
        {},
 };
 
index c54df6d8d8fe8e52f7b261c0ca0ed6f2cb1fd21e..8cfd5ab377434b2ccafd5b8d7c4701386faf8a95 100644 (file)
@@ -50,9 +50,6 @@
 #error TEXT_OFFSET must be less than 2MB
 #endif
 
-#define KERNEL_START   _text
-#define KERNEL_END     _end
-
 /*
  * Kernel startup entry point.
  * ---------------------------
@@ -666,7 +663,7 @@ ENDPROC(__secondary_switched)
  * If it isn't, park the CPU
  */
        .section        ".idmap.text", "ax"
-__enable_mmu:
+ENTRY(__enable_mmu)
        mrs     x18, sctlr_el1                  // preserve old SCTLR_EL1 value
        mrs     x1, ID_AA64MMFR0_EL1
        ubfx    x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S
new file mode 100644 (file)
index 0000000..46f29b6
--- /dev/null
@@ -0,0 +1,176 @@
+/*
+ * Hibernate low-level support
+ *
+ * Copyright (C) 2016 ARM Ltd.
+ * Author:     James Morse <james.morse@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/linkage.h>
+#include <linux/errno.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/cputype.h>
+#include <asm/memory.h>
+#include <asm/page.h>
+#include <asm/virt.h>
+
+/*
+ * To prevent the possibility of old and new partial table walks being visible
+ * in the tlb, switch the ttbr to a zero page when we invalidate the old
+ * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
+ * Even switching to our copied tables will cause a changed output address at
+ * each stage of the walk.
+ */
+.macro break_before_make_ttbr_switch zero_page, page_table
+       msr     ttbr1_el1, \zero_page
+       isb
+       tlbi    vmalle1is
+       dsb     ish
+       msr     ttbr1_el1, \page_table
+       isb
+.endm
+
+
+/*
+ * Resume from hibernate
+ *
+ * Loads temporary page tables then restores the memory image.
+ * Finally branches to cpu_resume() to restore the state saved by
+ * swsusp_arch_suspend().
+ *
+ * Because this code has to be copied to a 'safe' page, it can't call out to
+ * other functions by PC-relative address. Also remember that it may be
+ * mid-way through over-writing other functions. For this reason it contains
+ * code from flush_icache_range() and uses the copy_page() macro.
+ *
+ * This 'safe' page is mapped via ttbr0, and executed from there. This function
+ * switches to a copy of the linear map in ttbr1, performs the restore, then
+ * switches ttbr1 to the original kernel's swapper_pg_dir.
+ *
+ * All of memory gets written to, including code. We need to clean the kernel
+ * text to the Point of Coherence (PoC) before secondary cores can be booted.
+ * Because the kernel modules and executable pages mapped to user space are
+ * also written as data, we clean all pages we touch to the Point of
+ * Unification (PoU).
+ *
+ * x0: physical address of temporary page tables
+ * x1: physical address of swapper page tables
+ * x2: address of cpu_resume
+ * x3: linear map address of restore_pblist in the current kernel
+ * x4: physical address of __hyp_stub_vectors, or 0
+ * x5: physical address of a  zero page that remains zero after resume
+ */
+.pushsection    ".hibernate_exit.text", "ax"
+ENTRY(swsusp_arch_suspend_exit)
+       /*
+        * We execute from ttbr0, change ttbr1 to our copied linear map tables
+        * with a break-before-make via the zero page
+        */
+       break_before_make_ttbr_switch   x5, x0
+
+       mov     x21, x1
+       mov     x30, x2
+       mov     x24, x4
+       mov     x25, x5
+
+       /* walk the restore_pblist and use copy_page() to over-write memory */
+       mov     x19, x3
+
+1:     ldr     x10, [x19, #HIBERN_PBE_ORIG]
+       mov     x0, x10
+       ldr     x1, [x19, #HIBERN_PBE_ADDR]
+
+       copy_page       x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
+
+       add     x1, x10, #PAGE_SIZE
+       /* Clean the copied page to PoU - based on flush_icache_range() */
+       dcache_line_size x2, x3
+       sub     x3, x2, #1
+       bic     x4, x10, x3
+2:     dc      cvau, x4        /* clean D line / unified line */
+       add     x4, x4, x2
+       cmp     x4, x1
+       b.lo    2b
+
+       ldr     x19, [x19, #HIBERN_PBE_NEXT]
+       cbnz    x19, 1b
+       dsb     ish             /* wait for PoU cleaning to finish */
+
+       /* switch to the restored kernels page tables */
+       break_before_make_ttbr_switch   x25, x21
+
+       ic      ialluis
+       dsb     ish
+       isb
+
+       cbz     x24, 3f         /* Do we need to re-initialise EL2? */
+       hvc     #0
+3:     ret
+
+       .ltorg
+ENDPROC(swsusp_arch_suspend_exit)
+
+/*
+ * Restore the hyp stub.
+ * This must be done before the hibernate page is unmapped by _cpu_resume(),
+ * but happens before any of the hyp-stub's code is cleaned to PoC.
+ *
+ * x24: The physical address of __hyp_stub_vectors
+ */
+el1_sync:
+       msr     vbar_el2, x24
+       eret
+ENDPROC(el1_sync)
+
+.macro invalid_vector  label
+\label:
+       b \label
+ENDPROC(\label)
+.endm
+
+       invalid_vector  el2_sync_invalid
+       invalid_vector  el2_irq_invalid
+       invalid_vector  el2_fiq_invalid
+       invalid_vector  el2_error_invalid
+       invalid_vector  el1_sync_invalid
+       invalid_vector  el1_irq_invalid
+       invalid_vector  el1_fiq_invalid
+       invalid_vector  el1_error_invalid
+
+/* el2 vectors - switch el2 here while we restore the memory image. */
+       .align 11
+ENTRY(hibernate_el2_vectors)
+       ventry  el2_sync_invalid                // Synchronous EL2t
+       ventry  el2_irq_invalid                 // IRQ EL2t
+       ventry  el2_fiq_invalid                 // FIQ EL2t
+       ventry  el2_error_invalid               // Error EL2t
+
+       ventry  el2_sync_invalid                // Synchronous EL2h
+       ventry  el2_irq_invalid                 // IRQ EL2h
+       ventry  el2_fiq_invalid                 // FIQ EL2h
+       ventry  el2_error_invalid               // Error EL2h
+
+       ventry  el1_sync                        // Synchronous 64-bit EL1
+       ventry  el1_irq_invalid                 // IRQ 64-bit EL1
+       ventry  el1_fiq_invalid                 // FIQ 64-bit EL1
+       ventry  el1_error_invalid               // Error 64-bit EL1
+
+       ventry  el1_sync_invalid                // Synchronous 32-bit EL1
+       ventry  el1_irq_invalid                 // IRQ 32-bit EL1
+       ventry  el1_fiq_invalid                 // FIQ 32-bit EL1
+       ventry  el1_error_invalid               // Error 32-bit EL1
+END(hibernate_el2_vectors)
+
+.popsection
diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
new file mode 100644 (file)
index 0000000..f8df75d
--- /dev/null
@@ -0,0 +1,487 @@
+/*:
+ * Hibernate support specific for ARM64
+ *
+ * Derived from work on ARM hibernation support by:
+ *
+ * Ubuntu project, hibernation support for mach-dove
+ * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
+ * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
+ *  https://lkml.org/lkml/2010/6/18/4
+ *  https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
+ *  https://patchwork.kernel.org/patch/96442/
+ *
+ * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#define pr_fmt(x) "hibernate: " x
+#include <linux/kvm_host.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/pm.h>
+#include <linux/sched.h>
+#include <linux/suspend.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
+
+#include <asm/barrier.h>
+#include <asm/cacheflush.h>
+#include <asm/irqflags.h>
+#include <asm/memory.h>
+#include <asm/mmu_context.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
+#include <asm/sections.h>
+#include <asm/suspend.h>
+#include <asm/virt.h>
+
+/*
+ * Hibernate core relies on this value being 0 on resume, and marks it
+ * __nosavedata assuming it will keep the resume kernel's '0' value. This
+ * doesn't happen with either KASLR.
+ *
+ * defined as "__visible int in_suspend __nosavedata" in
+ * kernel/power/hibernate.c
+ */
+extern int in_suspend;
+
+/* Find a symbols alias in the linear map */
+#define LMADDR(x)      phys_to_virt(virt_to_phys(x))
+
+/* Do we need to reset el2? */
+#define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
+
+/*
+ * Start/end of the hibernate exit code, this must be copied to a 'safe'
+ * location in memory, and executed from there.
+ */
+extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
+
+/* temporary el2 vectors in the __hibernate_exit_text section. */
+extern char hibernate_el2_vectors[];
+
+/* hyp-stub vectors, used to restore el2 during resume from hibernate. */
+extern char __hyp_stub_vectors[];
+
+/*
+ * Values that may not change over hibernate/resume. We put the build number
+ * and date in here so that we guarantee not to resume with a different
+ * kernel.
+ */
+struct arch_hibernate_hdr_invariants {
+       char            uts_version[__NEW_UTS_LEN + 1];
+};
+
+/* These values need to be know across a hibernate/restore. */
+static struct arch_hibernate_hdr {
+       struct arch_hibernate_hdr_invariants invariants;
+
+       /* These are needed to find the relocated kernel if built with kaslr */
+       phys_addr_t     ttbr1_el1;
+       void            (*reenter_kernel)(void);
+
+       /*
+        * We need to know where the __hyp_stub_vectors are after restore to
+        * re-configure el2.
+        */
+       phys_addr_t     __hyp_stub_vectors;
+} resume_hdr;
+
+static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
+{
+       memset(i, 0, sizeof(*i));
+       memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
+}
+
+int pfn_is_nosave(unsigned long pfn)
+{
+       unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
+       unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
+
+       return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
+}
+
+void notrace save_processor_state(void)
+{
+       WARN_ON(num_online_cpus() != 1);
+}
+
+void notrace restore_processor_state(void)
+{
+}
+
+int arch_hibernation_header_save(void *addr, unsigned int max_size)
+{
+       struct arch_hibernate_hdr *hdr = addr;
+
+       if (max_size < sizeof(*hdr))
+               return -EOVERFLOW;
+
+       arch_hdr_invariants(&hdr->invariants);
+       hdr->ttbr1_el1          = virt_to_phys(swapper_pg_dir);
+       hdr->reenter_kernel     = _cpu_resume;
+
+       /* We can't use __hyp_get_vectors() because kvm may still be loaded */
+       if (el2_reset_needed())
+               hdr->__hyp_stub_vectors = virt_to_phys(__hyp_stub_vectors);
+       else
+               hdr->__hyp_stub_vectors = 0;
+
+       return 0;
+}
+EXPORT_SYMBOL(arch_hibernation_header_save);
+
+int arch_hibernation_header_restore(void *addr)
+{
+       struct arch_hibernate_hdr_invariants invariants;
+       struct arch_hibernate_hdr *hdr = addr;
+
+       arch_hdr_invariants(&invariants);
+       if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
+               pr_crit("Hibernate image not generated by this kernel!\n");
+               return -EINVAL;
+       }
+
+       resume_hdr = *hdr;
+
+       return 0;
+}
+EXPORT_SYMBOL(arch_hibernation_header_restore);
+
+/*
+ * Copies length bytes, starting at src_start into an new page,
+ * perform cache maintentance, then maps it at the specified address low
+ * address as executable.
+ *
+ * This is used by hibernate to copy the code it needs to execute when
+ * overwriting the kernel text. This function generates a new set of page
+ * tables, which it loads into ttbr0.
+ *
+ * Length is provided as we probably only want 4K of data, even on a 64K
+ * page system.
+ */
+static int create_safe_exec_page(void *src_start, size_t length,
+                                unsigned long dst_addr,
+                                phys_addr_t *phys_dst_addr,
+                                void *(*allocator)(gfp_t mask),
+                                gfp_t mask)
+{
+       int rc = 0;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *pte;
+       unsigned long dst = (unsigned long)allocator(mask);
+
+       if (!dst) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       memcpy((void *)dst, src_start, length);
+       flush_icache_range(dst, dst + length);
+
+       pgd = pgd_offset_raw(allocator(mask), dst_addr);
+       if (pgd_none(*pgd)) {
+               pud = allocator(mask);
+               if (!pud) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+               pgd_populate(&init_mm, pgd, pud);
+       }
+
+       pud = pud_offset(pgd, dst_addr);
+       if (pud_none(*pud)) {
+               pmd = allocator(mask);
+               if (!pmd) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+               pud_populate(&init_mm, pud, pmd);
+       }
+
+       pmd = pmd_offset(pud, dst_addr);
+       if (pmd_none(*pmd)) {
+               pte = allocator(mask);
+               if (!pte) {
+                       rc = -ENOMEM;
+                       goto out;
+               }
+               pmd_populate_kernel(&init_mm, pmd, pte);
+       }
+
+       pte = pte_offset_kernel(pmd, dst_addr);
+       set_pte(pte, __pte(virt_to_phys((void *)dst) |
+                        pgprot_val(PAGE_KERNEL_EXEC)));
+
+       /* Load our new page tables */
+       asm volatile("msr       ttbr0_el1, %0;"
+                    "isb;"
+                    "tlbi      vmalle1is;"
+                    "dsb       ish;"
+                    "isb" : : "r"(virt_to_phys(pgd)));
+
+       *phys_dst_addr = virt_to_phys((void *)dst);
+
+out:
+       return rc;
+}
+
+
+int swsusp_arch_suspend(void)
+{
+       int ret = 0;
+       unsigned long flags;
+       struct sleep_stack_data state;
+
+       local_dbg_save(flags);
+
+       if (__cpu_suspend_enter(&state)) {
+               ret = swsusp_save();
+       } else {
+               /* Clean kernel to PoC for secondary core startup */
+               __flush_dcache_area(LMADDR(KERNEL_START), KERNEL_END - KERNEL_START);
+
+               /*
+                * Tell the hibernation core that we've just restored
+                * the memory
+                */
+               in_suspend = 0;
+
+               __cpu_suspend_exit();
+       }
+
+       local_dbg_restore(flags);
+
+       return ret;
+}
+
+static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
+                   unsigned long end)
+{
+       pte_t *src_pte;
+       pte_t *dst_pte;
+       unsigned long addr = start;
+
+       dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC);
+       if (!dst_pte)
+               return -ENOMEM;
+       pmd_populate_kernel(&init_mm, dst_pmd, dst_pte);
+       dst_pte = pte_offset_kernel(dst_pmd, start);
+
+       src_pte = pte_offset_kernel(src_pmd, start);
+       do {
+               if (!pte_none(*src_pte))
+                       /*
+                        * Resume will overwrite areas that may be marked
+                        * read only (code, rodata). Clear the RDONLY bit from
+                        * the temporary mappings we use during restore.
+                        */
+                       set_pte(dst_pte, __pte(pte_val(*src_pte) & ~PTE_RDONLY));
+       } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
+
+       return 0;
+}
+
+static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start,
+                   unsigned long end)
+{
+       pmd_t *src_pmd;
+       pmd_t *dst_pmd;
+       unsigned long next;
+       unsigned long addr = start;
+
+       if (pud_none(*dst_pud)) {
+               dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
+               if (!dst_pmd)
+                       return -ENOMEM;
+               pud_populate(&init_mm, dst_pud, dst_pmd);
+       }
+       dst_pmd = pmd_offset(dst_pud, start);
+
+       src_pmd = pmd_offset(src_pud, start);
+       do {
+               next = pmd_addr_end(addr, end);
+               if (pmd_none(*src_pmd))
+                       continue;
+               if (pmd_table(*src_pmd)) {
+                       if (copy_pte(dst_pmd, src_pmd, addr, next))
+                               return -ENOMEM;
+               } else {
+                       set_pmd(dst_pmd,
+                               __pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY));
+               }
+       } while (dst_pmd++, src_pmd++, addr = next, addr != end);
+
+       return 0;
+}
+
+static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start,
+                   unsigned long end)
+{
+       pud_t *dst_pud;
+       pud_t *src_pud;
+       unsigned long next;
+       unsigned long addr = start;
+
+       if (pgd_none(*dst_pgd)) {
+               dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC);
+               if (!dst_pud)
+                       return -ENOMEM;
+               pgd_populate(&init_mm, dst_pgd, dst_pud);
+       }
+       dst_pud = pud_offset(dst_pgd, start);
+
+       src_pud = pud_offset(src_pgd, start);
+       do {
+               next = pud_addr_end(addr, end);
+               if (pud_none(*src_pud))
+                       continue;
+               if (pud_table(*(src_pud))) {
+                       if (copy_pmd(dst_pud, src_pud, addr, next))
+                               return -ENOMEM;
+               } else {
+                       set_pud(dst_pud,
+                               __pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY));
+               }
+       } while (dst_pud++, src_pud++, addr = next, addr != end);
+
+       return 0;
+}
+
+static int copy_page_tables(pgd_t *dst_pgd, unsigned long start,
+                           unsigned long end)
+{
+       unsigned long next;
+       unsigned long addr = start;
+       pgd_t *src_pgd = pgd_offset_k(start);
+
+       dst_pgd = pgd_offset_raw(dst_pgd, start);
+       do {
+               next = pgd_addr_end(addr, end);
+               if (pgd_none(*src_pgd))
+                       continue;
+               if (copy_pud(dst_pgd, src_pgd, addr, next))
+                       return -ENOMEM;
+       } while (dst_pgd++, src_pgd++, addr = next, addr != end);
+
+       return 0;
+}
+
+/*
+ * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
+ *
+ * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
+ * we don't need to free it here.
+ */
+int swsusp_arch_resume(void)
+{
+       int rc = 0;
+       void *zero_page;
+       size_t exit_size;
+       pgd_t *tmp_pg_dir;
+       void *lm_restore_pblist;
+       phys_addr_t phys_hibernate_exit;
+       void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
+                                         void *, phys_addr_t, phys_addr_t);
+
+       /*
+        * Locate the exit code in the bottom-but-one page, so that *NULL
+        * still has disastrous affects.
+        */
+       hibernate_exit = (void *)PAGE_SIZE;
+       exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
+       /*
+        * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
+        * a new set of ttbr0 page tables and load them.
+        */
+       rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
+                                  (unsigned long)hibernate_exit,
+                                  &phys_hibernate_exit,
+                                  (void *)get_safe_page, GFP_ATOMIC);
+       if (rc) {
+               pr_err("Failed to create safe executable page for hibernate_exit code.");
+               goto out;
+       }
+
+       /*
+        * The hibernate exit text contains a set of el2 vectors, that will
+        * be executed at el2 with the mmu off in order to reload hyp-stub.
+        */
+       __flush_dcache_area(hibernate_exit, exit_size);
+
+       /*
+        * Restoring the memory image will overwrite the ttbr1 page tables.
+        * Create a second copy of just the linear map, and use this when
+        * restoring.
+        */
+       tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+       if (!tmp_pg_dir) {
+               pr_err("Failed to allocate memory for temporary page tables.");
+               rc = -ENOMEM;
+               goto out;
+       }
+       rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+       if (rc)
+               goto out;
+
+       /*
+        * Since we only copied the linear map, we need to find restore_pblist's
+        * linear map address.
+        */
+       lm_restore_pblist = LMADDR(restore_pblist);
+
+       /*
+        * KASLR will cause the el2 vectors to be in a different location in
+        * the resumed kernel. Load hibernate's temporary copy into el2.
+        *
+        * We can skip this step if we booted at EL1, or are running with VHE.
+        */
+       if (el2_reset_needed()) {
+               phys_addr_t el2_vectors = phys_hibernate_exit;  /* base */
+               el2_vectors += hibernate_el2_vectors -
+                              __hibernate_exit_text_start;     /* offset */
+
+               __hyp_set_vectors(el2_vectors);
+       }
+
+       /*
+        * We need a zero page that is zero before & after resume in order to
+        * to break before make on the ttbr1 page tables.
+        */
+       zero_page = (void *)get_safe_page(GFP_ATOMIC);
+
+       hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
+                      resume_hdr.reenter_kernel, lm_restore_pblist,
+                      resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
+
+out:
+       return rc;
+}
+
+static int check_boot_cpu_online_pm_callback(struct notifier_block *nb,
+                                            unsigned long action, void *ptr)
+{
+       if (action == PM_HIBERNATION_PREPARE &&
+            cpumask_first(cpu_online_mask) != 0) {
+               pr_warn("CPU0 is offline.\n");
+               return notifier_from_errno(-ENODEV);
+       }
+
+       return NOTIFY_OK;
+}
+
+static int __init check_boot_cpu_online_init(void)
+{
+       /*
+        * Set this pm_notifier callback with a lower priority than
+        * cpu_hotplug_pm_callback, so that cpu_hotplug_pm_callback will be
+        * called earlier to disable cpu hotplug before the cpu online check.
+        */
+       pm_notifier(check_boot_cpu_online_pm_callback, -INT_MAX);
+
+       return 0;
+}
+core_initcall(check_boot_cpu_online_init);
diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S
deleted file mode 100644 (file)
index cf83e61..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * Copyright (C) 2015 ARM Limited
- *
- * Author: Will Deacon <will.deacon@arm.com>
- */
-
-#include <linux/linkage.h>
-
-/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
-ENTRY(__invoke_psci_fn_hvc)
-       hvc     #0
-       ret
-ENDPROC(__invoke_psci_fn_hvc)
-
-/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */
-ENTRY(__invoke_psci_fn_smc)
-       smc     #0
-       ret
-ENDPROC(__invoke_psci_fn_smc)
index f67f35b6edb12e4d34e1db17750b07a0bec72e39..42816bebb1e0f732d788780fde431028f202c31a 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/smp.h>
 #include <linux/delay.h>
 #include <linux/psci.h>
-#include <linux/slab.h>
 
 #include <uapi/linux/psci.h>
 
 #include <asm/cpu_ops.h>
 #include <asm/errno.h>
 #include <asm/smp_plat.h>
-#include <asm/suspend.h>
-
-static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
-
-static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
-{
-       int i, ret, count = 0;
-       u32 *psci_states;
-       struct device_node *state_node, *cpu_node;
-
-       cpu_node = of_get_cpu_node(cpu, NULL);
-       if (!cpu_node)
-               return -ENODEV;
-
-       /*
-        * If the PSCI cpu_suspend function hook has not been initialized
-        * idle states must not be enabled, so bail out
-        */
-       if (!psci_ops.cpu_suspend)
-               return -EOPNOTSUPP;
-
-       /* Count idle states */
-       while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
-                                             count))) {
-               count++;
-               of_node_put(state_node);
-       }
-
-       if (!count)
-               return -ENODEV;
-
-       psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
-       if (!psci_states)
-               return -ENOMEM;
-
-       for (i = 0; i < count; i++) {
-               u32 state;
-
-               state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
-
-               ret = of_property_read_u32(state_node,
-                                          "arm,psci-suspend-param",
-                                          &state);
-               if (ret) {
-                       pr_warn(" * %s missing arm,psci-suspend-param property\n",
-                               state_node->full_name);
-                       of_node_put(state_node);
-                       goto free_mem;
-               }
-
-               of_node_put(state_node);
-               pr_debug("psci-power-state %#x index %d\n", state, i);
-               if (!psci_power_state_is_valid(state)) {
-                       pr_warn("Invalid PSCI power state %#x\n", state);
-                       ret = -EINVAL;
-                       goto free_mem;
-               }
-               psci_states[i] = state;
-       }
-       /* Idle states parsed correctly, initialize per-cpu pointer */
-       per_cpu(psci_power_state, cpu) = psci_states;
-       return 0;
-
-free_mem:
-       kfree(psci_states);
-       return ret;
-}
 
 static int __init cpu_psci_cpu_init(unsigned int cpu)
 {
@@ -178,38 +110,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
 }
 #endif
 
-static int psci_suspend_finisher(unsigned long index)
-{
-       u32 *state = __this_cpu_read(psci_power_state);
-
-       return psci_ops.cpu_suspend(state[index - 1],
-                                   virt_to_phys(cpu_resume));
-}
-
-static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
-{
-       int ret;
-       u32 *state = __this_cpu_read(psci_power_state);
-       /*
-        * idle state index 0 corresponds to wfi, should never be called
-        * from the cpu_suspend operations
-        */
-       if (WARN_ON_ONCE(!index))
-               return -EINVAL;
-
-       if (!psci_power_state_loses_context(state[index - 1]))
-               ret = psci_ops.cpu_suspend(state[index - 1], 0);
-       else
-               ret = cpu_suspend(index, psci_suspend_finisher);
-
-       return ret;
-}
-
 const struct cpu_operations cpu_psci_ops = {
        .name           = "psci",
 #ifdef CONFIG_CPU_IDLE
-       .cpu_init_idle  = cpu_psci_cpu_init_idle,
-       .cpu_suspend    = cpu_psci_cpu_suspend,
+       .cpu_init_idle  = psci_cpu_init_idle,
+       .cpu_suspend    = psci_cpu_suspend_enter,
 #endif
        .cpu_init       = cpu_psci_cpu_init,
        .cpu_prepare    = cpu_psci_cpu_prepare,
index 6591bf23422b3471fa8fb83d04445aac33dd808d..0153c0d8ddb18e9cf94a17db4e230617eaad2b7b 100644 (file)
@@ -175,7 +175,6 @@ static void __init smp_build_mpidr_hash(void)
         */
        if (mpidr_hash_size() > 4 * num_possible_cpus())
                pr_warn("Large number of MPIDR hash buckets detected\n");
-       __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
 }
 
 static void __init setup_machine_fdt(phys_addr_t dt_phys)
index e33fe33876ab3804f2c6dcd6c5458e576596ef24..c2bf5a58039f1b7f749912bc18f58bf5007e80fb 100644 (file)
        orr     \dst, \dst, \mask               // dst|=(aff3>>rs3)
        .endm
 /*
- * Save CPU state for a suspend and execute the suspend finisher.
- * On success it will return 0 through cpu_resume - ie through a CPU
- * soft/hard reboot from the reset vector.
- * On failure it returns the suspend finisher return value or force
- * -EOPNOTSUPP if the finisher erroneously returns 0 (the suspend finisher
- * is not allowed to return, if it does this must be considered failure).
- * It saves callee registers, and allocates space on the kernel stack
- * to save the CPU specific registers + some other data for resume.
+ * Save CPU state in the provided sleep_stack_data area, and publish its
+ * location for cpu_resume()'s use in sleep_save_stash.
  *
- *  x0 = suspend finisher argument
- *  x1 = suspend finisher function pointer
+ * cpu_resume() will restore this saved state, and return. Because the
+ * link-register is saved and restored, it will appear to return from this
+ * function. So that the caller can tell the suspend/resume paths apart,
+ * __cpu_suspend_enter() will always return a non-zero value, whereas the
+ * path through cpu_resume() will return 0.
+ *
+ *  x0 = struct sleep_stack_data area
  */
 ENTRY(__cpu_suspend_enter)
-       stp     x29, lr, [sp, #-96]!
-       stp     x19, x20, [sp,#16]
-       stp     x21, x22, [sp,#32]
-       stp     x23, x24, [sp,#48]
-       stp     x25, x26, [sp,#64]
-       stp     x27, x28, [sp,#80]
-       /*
-        * Stash suspend finisher and its argument in x20 and x19
-        */
-       mov     x19, x0
-       mov     x20, x1
+       stp     x29, lr, [x0, #SLEEP_STACK_DATA_CALLEE_REGS]
+       stp     x19, x20, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+16]
+       stp     x21, x22, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+32]
+       stp     x23, x24, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+48]
+       stp     x25, x26, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+64]
+       stp     x27, x28, [x0,#SLEEP_STACK_DATA_CALLEE_REGS+80]
+
+       /* save the sp in cpu_suspend_ctx */
        mov     x2, sp
-       sub     sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
-       mov     x0, sp
-       /*
-        * x0 now points to struct cpu_suspend_ctx allocated on the stack
-        */
-       str     x2, [x0, #CPU_CTX_SP]
-       ldr     x1, =sleep_save_sp
-       ldr     x1, [x1, #SLEEP_SAVE_SP_VIRT]
+       str     x2, [x0, #SLEEP_STACK_DATA_SYSTEM_REGS + CPU_CTX_SP]
+
+       /* find the mpidr_hash */
+       ldr     x1, =sleep_save_stash
+       ldr     x1, [x1]
        mrs     x7, mpidr_el1
        ldr     x9, =mpidr_hash
        ldr     x10, [x9, #MPIDR_HASH_MASK]
@@ -93,70 +86,28 @@ ENTRY(__cpu_suspend_enter)
        ldp     w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
        compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
        add     x1, x1, x8, lsl #3
-       bl      __cpu_suspend_save
-       /*
-        * Grab suspend finisher in x20 and its argument in x19
-        */
-       mov     x0, x19
-       mov     x1, x20
-       /*
-        * We are ready for power down, fire off the suspend finisher
-        * in x1, with argument in x0
-        */
-       blr     x1
-        /*
-        * Never gets here, unless suspend finisher fails.
-        * Successful cpu_suspend should return from cpu_resume, returning
-        * through this code path is considered an error
-        * If the return value is set to 0 force x0 = -EOPNOTSUPP
-        * to make sure a proper error condition is propagated
-        */
-       cmp     x0, #0
-       mov     x3, #-EOPNOTSUPP
-       csel    x0, x3, x0, eq
-       add     sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
-       ldp     x19, x20, [sp, #16]
-       ldp     x21, x22, [sp, #32]
-       ldp     x23, x24, [sp, #48]
-       ldp     x25, x26, [sp, #64]
-       ldp     x27, x28, [sp, #80]
-       ldp     x29, lr, [sp], #96
+
+       str     x0, [x1]
+       add     x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
+       stp     x29, lr, [sp, #-16]!
+       bl      cpu_do_suspend
+       ldp     x29, lr, [sp], #16
+       mov     x0, #1
        ret
 ENDPROC(__cpu_suspend_enter)
        .ltorg
 
-/*
- * x0 must contain the sctlr value retrieved from restored context
- */
-       .pushsection    ".idmap.text", "ax"
-ENTRY(cpu_resume_mmu)
-       ldr     x3, =cpu_resume_after_mmu
-       msr     sctlr_el1, x0           // restore sctlr_el1
-       isb
-       /*
-        * Invalidate the local I-cache so that any instructions fetched
-        * speculatively from the PoC are discarded, since they may have
-        * been dynamically patched at the PoU.
-        */
-       ic      iallu
-       dsb     nsh
-       isb
-       br      x3                      // global jump to virtual address
-ENDPROC(cpu_resume_mmu)
-       .popsection
-cpu_resume_after_mmu:
-       mov     x0, #0                  // return zero on success
-       ldp     x19, x20, [sp, #16]
-       ldp     x21, x22, [sp, #32]
-       ldp     x23, x24, [sp, #48]
-       ldp     x25, x26, [sp, #64]
-       ldp     x27, x28, [sp, #80]
-       ldp     x29, lr, [sp], #96
-       ret
-ENDPROC(cpu_resume_after_mmu)
-
 ENTRY(cpu_resume)
        bl      el2_setup               // if in EL2 drop to EL1 cleanly
+       /* enable the MMU early - so we can access sleep_save_stash by va */
+       adr_l   lr, __enable_mmu        /* __cpu_setup will return here */
+       ldr     x27, =_cpu_resume       /* __enable_mmu will branch here */
+       adrp    x25, idmap_pg_dir
+       adrp    x26, swapper_pg_dir
+       b       __cpu_setup
+ENDPROC(cpu_resume)
+
+ENTRY(_cpu_resume)
        mrs     x1, mpidr_el1
        adrp    x8, mpidr_hash
        add x8, x8, #:lo12:mpidr_hash // x8 = struct mpidr_hash phys address
@@ -166,20 +117,27 @@ ENTRY(cpu_resume)
        ldp     w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
        compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
         /* x7 contains hash index, let's use it to grab context pointer */
-       ldr_l   x0, sleep_save_sp + SLEEP_SAVE_SP_PHYS
+       ldr_l   x0, sleep_save_stash
        ldr     x0, [x0, x7, lsl #3]
+       add     x29, x0, #SLEEP_STACK_DATA_CALLEE_REGS
+       add     x0, x0, #SLEEP_STACK_DATA_SYSTEM_REGS
        /* load sp from context */
        ldr     x2, [x0, #CPU_CTX_SP]
-       /* load physical address of identity map page table in x1 */
-       adrp    x1, idmap_pg_dir
        mov     sp, x2
        /* save thread_info */
        and     x2, x2, #~(THREAD_SIZE - 1)
        msr     sp_el0, x2
        /*
-        * cpu_do_resume expects x0 to contain context physical address
-        * pointer and x1 to contain physical address of 1:1 page tables
+        * cpu_do_resume expects x0 to contain context address pointer
         */
-       bl      cpu_do_resume           // PC relative jump, MMU off
-       b       cpu_resume_mmu          // Resume MMU, never returns
-ENDPROC(cpu_resume)
+       bl      cpu_do_resume
+
+       ldp     x19, x20, [x29, #16]
+       ldp     x21, x22, [x29, #32]
+       ldp     x23, x24, [x29, #48]
+       ldp     x25, x26, [x29, #64]
+       ldp     x27, x28, [x29, #80]
+       ldp     x29, lr, [x29]
+       mov     x0, #0
+       ret
+ENDPROC(_cpu_resume)
diff --git a/arch/arm64/kernel/smccc-call.S b/arch/arm64/kernel/smccc-call.S
new file mode 100644 (file)
index 0000000..ae0496f
--- /dev/null
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License Version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+
+       .macro SMCCC instr
+       .cfi_startproc
+       \instr  #0
+       ldr     x4, [sp]
+       stp     x0, x1, [x4, #ARM_SMCCC_RES_X0_OFFS]
+       stp     x2, x3, [x4, #ARM_SMCCC_RES_X2_OFFS]
+       ret
+       .cfi_endproc
+       .endm
+
+/*
+ * void arm_smccc_smc(unsigned long a0, unsigned long a1, unsigned long a2,
+ *               unsigned long a3, unsigned long a4, unsigned long a5,
+ *               unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_smc)
+       SMCCC   smc
+ENDPROC(arm_smccc_smc)
+
+/*
+ * void arm_smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2,
+ *               unsigned long a3, unsigned long a4, unsigned long a5,
+ *               unsigned long a6, unsigned long a7, struct arm_smccc_res *res)
+ */
+ENTRY(arm_smccc_hvc)
+       SMCCC   hvc
+ENDPROC(arm_smccc_hvc)
index 66055392f445ef47a7fb3749ca6024df1ea185c9..b616e365cee33fdb2e61520943e2d8a9ae372861 100644 (file)
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
 
-extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long));
 /*
- * This is called by __cpu_suspend_enter() to save the state, and do whatever
- * flushing is required to ensure that when the CPU goes to sleep we have
- * the necessary data available when the caches are not searched.
- *
- * ptr: CPU context virtual address
- * save_ptr: address of the location where the context physical address
- *           must be saved
+ * This is allocated by cpu_suspend_init(), and used to store a pointer to
+ * the 'struct sleep_stack_data' the contains a particular CPUs state.
  */
-void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr,
-                               phys_addr_t *save_ptr)
-{
-       *save_ptr = virt_to_phys(ptr);
-
-       cpu_do_suspend(ptr);
-       /*
-        * Only flush the context that must be retrieved with the MMU
-        * off. VA primitives ensure the flush is applied to all
-        * cache levels so context is pushed to DRAM.
-        */
-       __flush_dcache_area(ptr, sizeof(*ptr));
-       __flush_dcache_area(save_ptr, sizeof(*save_ptr));
-}
+unsigned long *sleep_save_stash;
 
 /*
  * This hook is provided so that cpu_suspend code can restore HW
@@ -51,6 +32,30 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
        hw_breakpoint_restore = hw_bp_restore;
 }
 
+void notrace __cpu_suspend_exit(void)
+{
+       /*
+        * We are resuming from reset with the idmap active in TTBR0_EL1.
+        * We must uninstall the idmap and restore the expected MMU
+        * state before we can possibly return to userspace.
+        */
+       cpu_uninstall_idmap();
+
+       /*
+        * Restore per-cpu offset before any kernel
+        * subsystem relying on it has a chance to run.
+        */
+       set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+
+       /*
+        * Restore HW breakpoint registers to sane values
+        * before debug exceptions are possibly reenabled
+        * through local_dbg_restore.
+        */
+       if (hw_breakpoint_restore)
+               hw_breakpoint_restore(NULL);
+}
+
 /*
  * cpu_suspend
  *
@@ -60,8 +65,9 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
  */
 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
 {
-       int ret;
+       int ret = 0;
        unsigned long flags;
+       struct sleep_stack_data state;
 
        /*
         * From this point debug exceptions are disabled to prevent
@@ -77,34 +83,21 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
         */
        pause_graph_tracing();
 
-       /*
-        * mm context saved on the stack, it will be restored when
-        * the cpu comes out of reset through the identity mapped
-        * page tables, so that the thread address space is properly
-        * set-up on function return.
-        */
-       ret = __cpu_suspend_enter(arg, fn);
-       if (ret == 0) {
-               /*
-                * We are resuming from reset with the idmap active in TTBR0_EL1.
-                * We must uninstall the idmap and restore the expected MMU
-                * state before we can possibly return to userspace.
-                */
-               cpu_uninstall_idmap();
+       if (__cpu_suspend_enter(&state)) {
+               /* Call the suspend finisher */
+               ret = fn(arg);
 
                /*
-                * Restore per-cpu offset before any kernel
-                * subsystem relying on it has a chance to run.
+                * Never gets here, unless the suspend finisher fails.
+                * Successful cpu_suspend() should return from cpu_resume(),
+                * returning through this code path is considered an error
+                * If the return value is set to 0 force ret = -EOPNOTSUPP
+                * to make sure a proper error condition is propagated
                 */
-               set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
-
-               /*
-                * Restore HW breakpoint registers to sane values
-                * before debug exceptions are possibly reenabled
-                * through local_dbg_restore.
-                */
-               if (hw_breakpoint_restore)
-                       hw_breakpoint_restore(NULL);
+               if (!ret)
+                       ret = -EOPNOTSUPP;
+       } else {
+               __cpu_suspend_exit();
        }
 
        unpause_graph_tracing();
@@ -119,22 +112,15 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
        return ret;
 }
 
-struct sleep_save_sp sleep_save_sp;
-
 static int __init cpu_suspend_init(void)
 {
-       void *ctx_ptr;
-
        /* ctx_ptr is an array of physical addresses */
-       ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
+       sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash),
+                                  GFP_KERNEL);
 
-       if (WARN_ON(!ctx_ptr))
+       if (WARN_ON(!sleep_save_stash))
                return -ENOMEM;
 
-       sleep_save_sp.save_ptr_stash = ctx_ptr;
-       sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
-       __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
-
        return 0;
 }
 early_initcall(cpu_suspend_init);
index 9d25a1058c65d7b22b9ccadf0c2fec1989580beb..7a5228c7abdd7cc9c8226df887a2ec9e7b4d053d 100644 (file)
@@ -46,6 +46,16 @@ jiffies = jiffies_64;
        *(.idmap.text)                                  \
        VMLINUX_SYMBOL(__idmap_text_end) = .;
 
+#ifdef CONFIG_HIBERNATION
+#define HIBERNATE_TEXT                                 \
+       . = ALIGN(SZ_4K);                               \
+       VMLINUX_SYMBOL(__hibernate_exit_text_start) = .;\
+       *(.hibernate_exit.text)                         \
+       VMLINUX_SYMBOL(__hibernate_exit_text_end) = .;
+#else
+#define HIBERNATE_TEXT
+#endif
+
 /*
  * The size of the PE/COFF section that covers the kernel image, which
  * runs from stext to _edata, must be a round multiple of the PE/COFF
@@ -115,6 +125,7 @@ SECTIONS
                        KPROBES_TEXT
                        HYPERVISOR_TEXT
                        IDMAP_TEXT
+                       HIBERNATE_TEXT
                        *(.fixup)
                        *(.gnu.warning)
                . = ALIGN(16);
@@ -203,6 +214,10 @@ ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
        "HYP init code too big or misaligned")
 ASSERT(__idmap_text_end - (__idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K,
        "ID map text too big or misaligned")
+#ifdef CONFIG_HIBERNATION
+ASSERT(__hibernate_exit_text_end - (__hibernate_exit_text_start & ~(SZ_4K - 1))
+       <= SZ_4K, "Hibernate exit text too big or misaligned")
+#endif
 
 /*
  * If padding is applied before .head.text, virt<->phys conversions will fail.
index 1949fe5f54246a3f14983753570f4e4d0f978e76..caee9ee8e12af1eef4c13ebd1f750d7a3b5ee7f2 100644 (file)
@@ -10,6 +10,7 @@ KVM=../../../virt/kvm
 ARM=../../../arch/arm/kvm
 
 obj-$(CONFIG_KVM_ARM_HOST) += kvm.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp/
 
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(ARM)/arm.o $(ARM)/mmu.o $(ARM)/mmio.o
@@ -22,8 +23,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generi
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
-kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v2-switch.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
-kvm-$(CONFIG_KVM_ARM_HOST) += vgic-v3-switch.o
 kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
index 3039f080e2d5820ed4446aac0871dee42ffa5ad2..e5ee8880d5d9b2014ea6859ea455daa33beb2ebd 100644 (file)
@@ -28,7 +28,6 @@
 #include <asm/cputype.h>
 #include <asm/uaccess.h>
 #include <asm/kvm.h>
-#include <asm/kvm_asm.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_coproc.h>
 
index 15f0477b0d2adc53d86573b1733d2fa7f368bbd9..198cf10b262d85d5df12963366eeda8d41a5ef83 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/kvm_host.h>
 
 #include <asm/esr.h>
+#include <asm/kvm_asm.h>
 #include <asm/kvm_coproc.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_mmu.h>
index 84c338f017b2121b0000484bbca981cbabc49abb..034d152c3fbe7a6afce7f7ee827b1d6825b84cde 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/kvm_arm.h>
 #include <asm/kvm_mmu.h>
 #include <asm/pgtable-hwdef.h>
+#include <asm/sysreg.h>
 
        .text
        .pushsection    .hyp.idmap.text, "ax"
@@ -96,6 +97,14 @@ __do_hyp_init:
 
        ldr     x4, =VTCR_EL2_FLAGS
        bfi     x4, x5, #16, #3
+       /*
+        * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in
+        * VTCR_EL2.
+        */
+       mrs     x5, ID_AA64MMFR1_EL1
+       ubfx    x5, x5, #5, #1
+       lsl     x5, x5, #VTCR_EL2_VS
+       orr     x4, x4, x5
 
        msr     vtcr_el2, x4
 
@@ -108,8 +117,8 @@ __do_hyp_init:
        dsb     sy
 
        mrs     x4, sctlr_el2
-       and     x4, x4, #SCTLR_EL2_EE   // preserve endianness of EL2
-       ldr     x5, =SCTLR_EL2_FLAGS
+       and     x4, x4, #SCTLR_ELx_EE   // preserve endianness of EL2
+       ldr     x5, =SCTLR_ELx_FLAGS
        orr     x4, x4, x5
        msr     sctlr_el2, x4
        isb
index 309e3479dc2c48fb47ca28a44b81e9d06b73e7bf..48f19a37b3df552a08db7b186a65ae4fad4b32e4 100644 (file)
 #include <linux/linkage.h>
 
 #include <asm/alternative.h>
-#include <asm/asm-offsets.h>
 #include <asm/assembler.h>
 #include <asm/cpufeature.h>
-#include <asm/debug-monitors.h>
-#include <asm/esr.h>
-#include <asm/fpsimdmacros.h>
-#include <asm/kvm.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_mmu.h>
-#include <asm/memory.h>
-
-#define CPU_GP_REG_OFFSET(x)   (CPU_GP_REGS + x)
-#define CPU_XREG_OFFSET(x)     CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
-#define CPU_SPSR_OFFSET(x)     CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
-#define CPU_SYSREG_OFFSET(x)   (CPU_SYSREGS + 8*x)
-
-       .text
-       .pushsection    .hyp.text, "ax"
-       .align  PAGE_SHIFT
-
-.macro save_common_regs
-       // x2: base address for cpu context
-       // x3: tmp register
-
-       add     x3, x2, #CPU_XREG_OFFSET(19)
-       stp     x19, x20, [x3]
-       stp     x21, x22, [x3, #16]
-       stp     x23, x24, [x3, #32]
-       stp     x25, x26, [x3, #48]
-       stp     x27, x28, [x3, #64]
-       stp     x29, lr, [x3, #80]
-
-       mrs     x19, sp_el0
-       mrs     x20, elr_el2            // pc before entering el2
-       mrs     x21, spsr_el2           // pstate before entering el2
-
-       stp     x19, x20, [x3, #96]
-       str     x21, [x3, #112]
-
-       mrs     x22, sp_el1
-       mrs     x23, elr_el1
-       mrs     x24, spsr_el1
-
-       str     x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
-       str     x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
-       str     x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
-.endm
-
-.macro restore_common_regs
-       // x2: base address for cpu context
-       // x3: tmp register
-
-       ldr     x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
-       ldr     x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
-       ldr     x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
-
-       msr     sp_el1, x22
-       msr     elr_el1, x23
-       msr     spsr_el1, x24
-
-       add     x3, x2, #CPU_XREG_OFFSET(31)    // SP_EL0
-       ldp     x19, x20, [x3]
-       ldr     x21, [x3, #16]
-
-       msr     sp_el0, x19
-       msr     elr_el2, x20            // pc on return from el2
-       msr     spsr_el2, x21           // pstate on return from el2
-
-       add     x3, x2, #CPU_XREG_OFFSET(19)
-       ldp     x19, x20, [x3]
-       ldp     x21, x22, [x3, #16]
-       ldp     x23, x24, [x3, #32]
-       ldp     x25, x26, [x3, #48]
-       ldp     x27, x28, [x3, #64]
-       ldp     x29, lr, [x3, #80]
-.endm
-
-.macro save_host_regs
-       save_common_regs
-.endm
-
-.macro restore_host_regs
-       restore_common_regs
-.endm
-
-.macro save_fpsimd
-       // x2: cpu context address
-       // x3, x4: tmp regs
-       add     x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
-       fpsimd_save x3, 4
-.endm
-
-.macro restore_fpsimd
-       // x2: cpu context address
-       // x3, x4: tmp regs
-       add     x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
-       fpsimd_restore x3, 4
-.endm
-
-.macro save_guest_regs
-       // x0 is the vcpu address
-       // x1 is the return code, do not corrupt!
-       // x2 is the cpu context
-       // x3 is a tmp register
-       // Guest's x0-x3 are on the stack
-
-       // Compute base to save registers
-       add     x3, x2, #CPU_XREG_OFFSET(4)
-       stp     x4, x5, [x3]
-       stp     x6, x7, [x3, #16]
-       stp     x8, x9, [x3, #32]
-       stp     x10, x11, [x3, #48]
-       stp     x12, x13, [x3, #64]
-       stp     x14, x15, [x3, #80]
-       stp     x16, x17, [x3, #96]
-       str     x18, [x3, #112]
-
-       pop     x6, x7                  // x2, x3
-       pop     x4, x5                  // x0, x1
-
-       add     x3, x2, #CPU_XREG_OFFSET(0)
-       stp     x4, x5, [x3]
-       stp     x6, x7, [x3, #16]
-
-       save_common_regs
-.endm
-
-.macro restore_guest_regs
-       // x0 is the vcpu address.
-       // x2 is the cpu context
-       // x3 is a tmp register
-
-       // Prepare x0-x3 for later restore
-       add     x3, x2, #CPU_XREG_OFFSET(0)
-       ldp     x4, x5, [x3]
-       ldp     x6, x7, [x3, #16]
-       push    x4, x5          // Push x0-x3 on the stack
-       push    x6, x7
-
-       // x4-x18
-       ldp     x4, x5, [x3, #32]
-       ldp     x6, x7, [x3, #48]
-       ldp     x8, x9, [x3, #64]
-       ldp     x10, x11, [x3, #80]
-       ldp     x12, x13, [x3, #96]
-       ldp     x14, x15, [x3, #112]
-       ldp     x16, x17, [x3, #128]
-       ldr     x18, [x3, #144]
-
-       // x19-x29, lr, sp*, elr*, spsr*
-       restore_common_regs
-
-       // Last bits of the 64bit state
-       pop     x2, x3
-       pop     x0, x1
-
-       // Do not touch any register after this!
-.endm
-
-/*
- * Macros to perform system register save/restore.
- *
- * Ordering here is absolutely critical, and must be kept consistent
- * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
- * and in kvm_asm.h.
- *
- * In other words, don't touch any of these unless you know what
- * you are doing.
- */
-.macro save_sysregs
-       // x2: base address for cpu context
-       // x3: tmp register
-
-       add     x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
-
-       mrs     x4,     vmpidr_el2
-       mrs     x5,     csselr_el1
-       mrs     x6,     sctlr_el1
-       mrs     x7,     actlr_el1
-       mrs     x8,     cpacr_el1
-       mrs     x9,     ttbr0_el1
-       mrs     x10,    ttbr1_el1
-       mrs     x11,    tcr_el1
-       mrs     x12,    esr_el1
-       mrs     x13,    afsr0_el1
-       mrs     x14,    afsr1_el1
-       mrs     x15,    far_el1
-       mrs     x16,    mair_el1
-       mrs     x17,    vbar_el1
-       mrs     x18,    contextidr_el1
-       mrs     x19,    tpidr_el0
-       mrs     x20,    tpidrro_el0
-       mrs     x21,    tpidr_el1
-       mrs     x22,    amair_el1
-       mrs     x23,    cntkctl_el1
-       mrs     x24,    par_el1
-       mrs     x25,    mdscr_el1
-
-       stp     x4, x5, [x3]
-       stp     x6, x7, [x3, #16]
-       stp     x8, x9, [x3, #32]
-       stp     x10, x11, [x3, #48]
-       stp     x12, x13, [x3, #64]
-       stp     x14, x15, [x3, #80]
-       stp     x16, x17, [x3, #96]
-       stp     x18, x19, [x3, #112]
-       stp     x20, x21, [x3, #128]
-       stp     x22, x23, [x3, #144]
-       stp     x24, x25, [x3, #160]
-.endm
-
-.macro save_debug type
-       // x4: pointer to register set
-       // x5: number of registers to skip
-       // x6..x22 trashed
-
-       adr     x22, 1f
-       add     x22, x22, x5, lsl #2
-       br      x22
-1:
-       mrs     x21, \type\()15_el1
-       mrs     x20, \type\()14_el1
-       mrs     x19, \type\()13_el1
-       mrs     x18, \type\()12_el1
-       mrs     x17, \type\()11_el1
-       mrs     x16, \type\()10_el1
-       mrs     x15, \type\()9_el1
-       mrs     x14, \type\()8_el1
-       mrs     x13, \type\()7_el1
-       mrs     x12, \type\()6_el1
-       mrs     x11, \type\()5_el1
-       mrs     x10, \type\()4_el1
-       mrs     x9, \type\()3_el1
-       mrs     x8, \type\()2_el1
-       mrs     x7, \type\()1_el1
-       mrs     x6, \type\()0_el1
-
-       adr     x22, 1f
-       add     x22, x22, x5, lsl #2
-       br      x22
-1:
-       str     x21, [x4, #(15 * 8)]
-       str     x20, [x4, #(14 * 8)]
-       str     x19, [x4, #(13 * 8)]
-       str     x18, [x4, #(12 * 8)]
-       str     x17, [x4, #(11 * 8)]
-       str     x16, [x4, #(10 * 8)]
-       str     x15, [x4, #(9 * 8)]
-       str     x14, [x4, #(8 * 8)]
-       str     x13, [x4, #(7 * 8)]
-       str     x12, [x4, #(6 * 8)]
-       str     x11, [x4, #(5 * 8)]
-       str     x10, [x4, #(4 * 8)]
-       str     x9, [x4, #(3 * 8)]
-       str     x8, [x4, #(2 * 8)]
-       str     x7, [x4, #(1 * 8)]
-       str     x6, [x4, #(0 * 8)]
-.endm
-
-.macro restore_sysregs
-       // x2: base address for cpu context
-       // x3: tmp register
-
-       add     x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
-
-       ldp     x4, x5, [x3]
-       ldp     x6, x7, [x3, #16]
-       ldp     x8, x9, [x3, #32]
-       ldp     x10, x11, [x3, #48]
-       ldp     x12, x13, [x3, #64]
-       ldp     x14, x15, [x3, #80]
-       ldp     x16, x17, [x3, #96]
-       ldp     x18, x19, [x3, #112]
-       ldp     x20, x21, [x3, #128]
-       ldp     x22, x23, [x3, #144]
-       ldp     x24, x25, [x3, #160]
-
-       msr     vmpidr_el2,     x4
-       msr     csselr_el1,     x5
-       msr     sctlr_el1,      x6
-       msr     actlr_el1,      x7
-       msr     cpacr_el1,      x8
-       msr     ttbr0_el1,      x9
-       msr     ttbr1_el1,      x10
-       msr     tcr_el1,        x11
-       msr     esr_el1,        x12
-       msr     afsr0_el1,      x13
-       msr     afsr1_el1,      x14
-       msr     far_el1,        x15
-       msr     mair_el1,       x16
-       msr     vbar_el1,       x17
-       msr     contextidr_el1, x18
-       msr     tpidr_el0,      x19
-       msr     tpidrro_el0,    x20
-       msr     tpidr_el1,      x21
-       msr     amair_el1,      x22
-       msr     cntkctl_el1,    x23
-       msr     par_el1,        x24
-       msr     mdscr_el1,      x25
-.endm
-
-.macro restore_debug type
-       // x4: pointer to register set
-       // x5: number of registers to skip
-       // x6..x22 trashed
-
-       adr     x22, 1f
-       add     x22, x22, x5, lsl #2
-       br      x22
-1:
-       ldr     x21, [x4, #(15 * 8)]
-       ldr     x20, [x4, #(14 * 8)]
-       ldr     x19, [x4, #(13 * 8)]
-       ldr     x18, [x4, #(12 * 8)]
-       ldr     x17, [x4, #(11 * 8)]
-       ldr     x16, [x4, #(10 * 8)]
-       ldr     x15, [x4, #(9 * 8)]
-       ldr     x14, [x4, #(8 * 8)]
-       ldr     x13, [x4, #(7 * 8)]
-       ldr     x12, [x4, #(6 * 8)]
-       ldr     x11, [x4, #(5 * 8)]
-       ldr     x10, [x4, #(4 * 8)]
-       ldr     x9, [x4, #(3 * 8)]
-       ldr     x8, [x4, #(2 * 8)]
-       ldr     x7, [x4, #(1 * 8)]
-       ldr     x6, [x4, #(0 * 8)]
-
-       adr     x22, 1f
-       add     x22, x22, x5, lsl #2
-       br      x22
-1:
-       msr     \type\()15_el1, x21
-       msr     \type\()14_el1, x20
-       msr     \type\()13_el1, x19
-       msr     \type\()12_el1, x18
-       msr     \type\()11_el1, x17
-       msr     \type\()10_el1, x16
-       msr     \type\()9_el1, x15
-       msr     \type\()8_el1, x14
-       msr     \type\()7_el1, x13
-       msr     \type\()6_el1, x12
-       msr     \type\()5_el1, x11
-       msr     \type\()4_el1, x10
-       msr     \type\()3_el1, x9
-       msr     \type\()2_el1, x8
-       msr     \type\()1_el1, x7
-       msr     \type\()0_el1, x6
-.endm
-
-.macro skip_32bit_state tmp, target
-       // Skip 32bit state if not needed
-       mrs     \tmp, hcr_el2
-       tbnz    \tmp, #HCR_RW_SHIFT, \target
-.endm
-
-.macro skip_tee_state tmp, target
-       // Skip ThumbEE state if not needed
-       mrs     \tmp, id_pfr0_el1
-       tbz     \tmp, #12, \target
-.endm
-
-.macro skip_debug_state tmp, target
-       ldr     \tmp, [x0, #VCPU_DEBUG_FLAGS]
-       tbz     \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
-.endm
-
-/*
- * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled)
- */
-.macro skip_fpsimd_state tmp, target
-       mrs     \tmp, cptr_el2
-       tbnz    \tmp, #CPTR_EL2_TFP_SHIFT, \target
-.endm
-
-.macro compute_debug_state target
-       // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
-       // is set, we do a full save/restore cycle and disable trapping.
-       add     x25, x0, #VCPU_CONTEXT
-
-       // Check the state of MDSCR_EL1
-       ldr     x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
-       and     x26, x25, #DBG_MDSCR_KDE
-       and     x25, x25, #DBG_MDSCR_MDE
-       adds    xzr, x25, x26
-       b.eq    9998f           // Nothing to see there
-
-       // If any interesting bits was set, we must set the flag
-       mov     x26, #KVM_ARM64_DEBUG_DIRTY
-       str     x26, [x0, #VCPU_DEBUG_FLAGS]
-       b       9999f           // Don't skip restore
-
-9998:
-       // Otherwise load the flags from memory in case we recently
-       // trapped
-       skip_debug_state x25, \target
-9999:
-.endm
-
-.macro save_guest_32bit_state
-       skip_32bit_state x3, 1f
-
-       add     x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
-       mrs     x4, spsr_abt
-       mrs     x5, spsr_und
-       mrs     x6, spsr_irq
-       mrs     x7, spsr_fiq
-       stp     x4, x5, [x3]
-       stp     x6, x7, [x3, #16]
-
-       add     x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
-       mrs     x4, dacr32_el2
-       mrs     x5, ifsr32_el2
-       stp     x4, x5, [x3]
-
-       skip_fpsimd_state x8, 2f
-       mrs     x6, fpexc32_el2
-       str     x6, [x3, #16]
-2:
-       skip_debug_state x8, 1f
-       mrs     x7, dbgvcr32_el2
-       str     x7, [x3, #24]
-1:
-.endm
-
-.macro restore_guest_32bit_state
-       skip_32bit_state x3, 1f
-
-       add     x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
-       ldp     x4, x5, [x3]
-       ldp     x6, x7, [x3, #16]
-       msr     spsr_abt, x4
-       msr     spsr_und, x5
-       msr     spsr_irq, x6
-       msr     spsr_fiq, x7
-
-       add     x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
-       ldp     x4, x5, [x3]
-       msr     dacr32_el2, x4
-       msr     ifsr32_el2, x5
-
-       skip_debug_state x8, 1f
-       ldr     x7, [x3, #24]
-       msr     dbgvcr32_el2, x7
-1:
-.endm
-
-.macro activate_traps
-       ldr     x2, [x0, #VCPU_HCR_EL2]
-
-       /*
-        * We are about to set CPTR_EL2.TFP to trap all floating point
-        * register accesses to EL2, however, the ARM ARM clearly states that
-        * traps are only taken to EL2 if the operation would not otherwise
-        * trap to EL1.  Therefore, always make sure that for 32-bit guests,
-        * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
-        */
-       tbnz    x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state
-       mov     x3, #(1 << 30)
-       msr     fpexc32_el2, x3
-       isb
-99:
-       msr     hcr_el2, x2
-       mov     x2, #CPTR_EL2_TTA
-       orr     x2, x2, #CPTR_EL2_TFP
-       msr     cptr_el2, x2
-
-       mov     x2, #(1 << 15)  // Trap CP15 Cr=15
-       msr     hstr_el2, x2
-
-       // Monitor Debug Config - see kvm_arm_setup_debug()
-       ldr     x2, [x0, #VCPU_MDCR_EL2]
-       msr     mdcr_el2, x2
-.endm
-
-.macro deactivate_traps
-       mov     x2, #HCR_RW
-       msr     hcr_el2, x2
-       msr     hstr_el2, xzr
-
-       mrs     x2, mdcr_el2
-       and     x2, x2, #MDCR_EL2_HPMN_MASK
-       msr     mdcr_el2, x2
-.endm
-
-.macro activate_vm
-       ldr     x1, [x0, #VCPU_KVM]
-       kern_hyp_va     x1
-       ldr     x2, [x1, #KVM_VTTBR]
-       msr     vttbr_el2, x2
-.endm
-
-.macro deactivate_vm
-       msr     vttbr_el2, xzr
-.endm
-
-/*
- * Call into the vgic backend for state saving
- */
-.macro save_vgic_state
-alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
-       bl      __save_vgic_v2_state
-alternative_else
-       bl      __save_vgic_v3_state
-alternative_endif
-       mrs     x24, hcr_el2
-       mov     x25, #HCR_INT_OVERRIDE
-       neg     x25, x25
-       and     x24, x24, x25
-       msr     hcr_el2, x24
-.endm
-
-/*
- * Call into the vgic backend for state restoring
- */
-.macro restore_vgic_state
-       mrs     x24, hcr_el2
-       ldr     x25, [x0, #VCPU_IRQ_LINES]
-       orr     x24, x24, #HCR_INT_OVERRIDE
-       orr     x24, x24, x25
-       msr     hcr_el2, x24
-alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
-       bl      __restore_vgic_v2_state
-alternative_else
-       bl      __restore_vgic_v3_state
-alternative_endif
-.endm
-
-.macro save_timer_state
-       // x0: vcpu pointer
-       ldr     x2, [x0, #VCPU_KVM]
-       kern_hyp_va x2
-       ldr     w3, [x2, #KVM_TIMER_ENABLED]
-       cbz     w3, 1f
-
-       mrs     x3, cntv_ctl_el0
-       and     x3, x3, #3
-       str     w3, [x0, #VCPU_TIMER_CNTV_CTL]
-
-       isb
-
-       mrs     x3, cntv_cval_el0
-       str     x3, [x0, #VCPU_TIMER_CNTV_CVAL]
-
-1:
-       // Disable the virtual timer
-       msr     cntv_ctl_el0, xzr
-
-       // Allow physical timer/counter access for the host
-       mrs     x2, cnthctl_el2
-       orr     x2, x2, #3
-       msr     cnthctl_el2, x2
-
-       // Clear cntvoff for the host
-       msr     cntvoff_el2, xzr
-.endm
-
-.macro restore_timer_state
-       // x0: vcpu pointer
-       // Disallow physical timer access for the guest
-       // Physical counter access is allowed
-       mrs     x2, cnthctl_el2
-       orr     x2, x2, #1
-       bic     x2, x2, #2
-       msr     cnthctl_el2, x2
-
-       ldr     x2, [x0, #VCPU_KVM]
-       kern_hyp_va x2
-       ldr     w3, [x2, #KVM_TIMER_ENABLED]
-       cbz     w3, 1f
-
-       ldr     x3, [x2, #KVM_TIMER_CNTVOFF]
-       msr     cntvoff_el2, x3
-       ldr     x2, [x0, #VCPU_TIMER_CNTV_CVAL]
-       msr     cntv_cval_el0, x2
-       isb
-
-       ldr     w2, [x0, #VCPU_TIMER_CNTV_CTL]
-       and     x2, x2, #3
-       msr     cntv_ctl_el0, x2
-1:
-.endm
-
-__save_sysregs:
-       save_sysregs
-       ret
-
-__restore_sysregs:
-       restore_sysregs
-       ret
-
-/* Save debug state */
-__save_debug:
-       // x2: ptr to CPU context
-       // x3: ptr to debug reg struct
-       // x4/x5/x6-22/x24-26: trashed
-
-       mrs     x26, id_aa64dfr0_el1
-       ubfx    x24, x26, #12, #4       // Extract BRPs
-       ubfx    x25, x26, #20, #4       // Extract WRPs
-       mov     w26, #15
-       sub     w24, w26, w24           // How many BPs to skip
-       sub     w25, w26, w25           // How many WPs to skip
-
-       mov     x5, x24
-       add     x4, x3, #DEBUG_BCR
-       save_debug dbgbcr
-       add     x4, x3, #DEBUG_BVR
-       save_debug dbgbvr
-
-       mov     x5, x25
-       add     x4, x3, #DEBUG_WCR
-       save_debug dbgwcr
-       add     x4, x3, #DEBUG_WVR
-       save_debug dbgwvr
-
-       mrs     x21, mdccint_el1
-       str     x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
-       ret
-
-/* Restore debug state */
-__restore_debug:
-       // x2: ptr to CPU context
-       // x3: ptr to debug reg struct
-       // x4/x5/x6-22/x24-26: trashed
-
-       mrs     x26, id_aa64dfr0_el1
-       ubfx    x24, x26, #12, #4       // Extract BRPs
-       ubfx    x25, x26, #20, #4       // Extract WRPs
-       mov     w26, #15
-       sub     w24, w26, w24           // How many BPs to skip
-       sub     w25, w26, w25           // How many WPs to skip
-
-       mov     x5, x24
-       add     x4, x3, #DEBUG_BCR
-       restore_debug dbgbcr
-       add     x4, x3, #DEBUG_BVR
-       restore_debug dbgbvr
-
-       mov     x5, x25
-       add     x4, x3, #DEBUG_WCR
-       restore_debug dbgwcr
-       add     x4, x3, #DEBUG_WVR
-       restore_debug dbgwvr
-
-       ldr     x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
-       msr     mdccint_el1, x21
-
-       ret
-
-__save_fpsimd:
-       skip_fpsimd_state x3, 1f
-       save_fpsimd
-1:     ret
-
-__restore_fpsimd:
-       skip_fpsimd_state x3, 1f
-       restore_fpsimd
-1:     ret
-
-switch_to_guest_fpsimd:
-       push    x4, lr
-
-       mrs     x2, cptr_el2
-       bic     x2, x2, #CPTR_EL2_TFP
-       msr     cptr_el2, x2
-       isb
-
-       mrs     x0, tpidr_el2
-
-       ldr     x2, [x0, #VCPU_HOST_CONTEXT]
-       kern_hyp_va x2
-       bl __save_fpsimd
-
-       add     x2, x0, #VCPU_CONTEXT
-       bl __restore_fpsimd
-
-       skip_32bit_state x3, 1f
-       ldr     x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
-       msr     fpexc32_el2, x4
-1:
-       pop     x4, lr
-       pop     x2, x3
-       pop     x0, x1
-
-       eret
-
-/*
- * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
- *
- * This is the world switch. The first half of the function
- * deals with entering the guest, and anything from __kvm_vcpu_return
- * to the end of the function deals with reentering the host.
- * On the enter path, only x0 (vcpu pointer) must be preserved until
- * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
- * code) must both be preserved until the epilogue.
- * In both cases, x2 points to the CPU context we're saving/restoring from/to.
- */
-ENTRY(__kvm_vcpu_run)
-       kern_hyp_va     x0
-       msr     tpidr_el2, x0   // Save the vcpu register
-
-       // Host context
-       ldr     x2, [x0, #VCPU_HOST_CONTEXT]
-       kern_hyp_va x2
-
-       save_host_regs
-       bl __save_sysregs
-
-       compute_debug_state 1f
-       add     x3, x0, #VCPU_HOST_DEBUG_STATE
-       bl      __save_debug
-1:
-       activate_traps
-       activate_vm
-
-       restore_vgic_state
-       restore_timer_state
-
-       // Guest context
-       add     x2, x0, #VCPU_CONTEXT
-
-       // We must restore the 32-bit state before the sysregs, thanks
-       // to Cortex-A57 erratum #852523.
-       restore_guest_32bit_state
-       bl __restore_sysregs
-
-       skip_debug_state x3, 1f
-       ldr     x3, [x0, #VCPU_DEBUG_PTR]
-       kern_hyp_va x3
-       bl      __restore_debug
-1:
-       restore_guest_regs
-
-       // That's it, no more messing around.
-       eret
-
-__kvm_vcpu_return:
-       // Assume x0 is the vcpu pointer, x1 the return code
-       // Guest's x0-x3 are on the stack
-
-       // Guest context
-       add     x2, x0, #VCPU_CONTEXT
-
-       save_guest_regs
-       bl __save_fpsimd
-       bl __save_sysregs
-
-       skip_debug_state x3, 1f
-       ldr     x3, [x0, #VCPU_DEBUG_PTR]
-       kern_hyp_va x3
-       bl      __save_debug
-1:
-       save_guest_32bit_state
-
-       save_timer_state
-       save_vgic_state
-
-       deactivate_traps
-       deactivate_vm
-
-       // Host context
-       ldr     x2, [x0, #VCPU_HOST_CONTEXT]
-       kern_hyp_va x2
-
-       bl __restore_sysregs
-       bl __restore_fpsimd
-       /* Clear FPSIMD and Trace trapping */
-       msr     cptr_el2, xzr
-
-       skip_debug_state x3, 1f
-       // Clear the dirty flag for the next run, as all the state has
-       // already been saved. Note that we nuke the whole 64bit word.
-       // If we ever add more flags, we'll have to be more careful...
-       str     xzr, [x0, #VCPU_DEBUG_FLAGS]
-       add     x3, x0, #VCPU_HOST_DEBUG_STATE
-       bl      __restore_debug
-1:
-       restore_host_regs
-
-       mov     x0, x1
-       ret
-END(__kvm_vcpu_run)
-
-// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
-ENTRY(__kvm_tlb_flush_vmid_ipa)
-       dsb     ishst
-
-       kern_hyp_va     x0
-       ldr     x2, [x0, #KVM_VTTBR]
-       msr     vttbr_el2, x2
-       isb
-
-       /*
-        * We could do so much better if we had the VA as well.
-        * Instead, we invalidate Stage-2 for this IPA, and the
-        * whole of Stage-1. Weep...
-        */
-       lsr     x1, x1, #12
-       tlbi    ipas2e1is, x1
-       /*
-        * We have to ensure completion of the invalidation at Stage-2,
-        * since a table walk on another CPU could refill a TLB with a
-        * complete (S1 + S2) walk based on the old Stage-2 mapping if
-        * the Stage-1 invalidation happened first.
-        */
-       dsb     ish
-       tlbi    vmalle1is
-       dsb     ish
-       isb
-
-       msr     vttbr_el2, xzr
-       ret
-ENDPROC(__kvm_tlb_flush_vmid_ipa)
-
-/**
- * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
- * @struct kvm *kvm - pointer to kvm structure
- *
- * Invalidates all Stage 1 and 2 TLB entries for current VMID.
- */
-ENTRY(__kvm_tlb_flush_vmid)
-       dsb     ishst
-
-       kern_hyp_va     x0
-       ldr     x2, [x0, #KVM_VTTBR]
-       msr     vttbr_el2, x2
-       isb
-
-       tlbi    vmalls12e1is
-       dsb     ish
-       isb
-
-       msr     vttbr_el2, xzr
-       ret
-ENDPROC(__kvm_tlb_flush_vmid)
-
-ENTRY(__kvm_flush_vm_context)
-       dsb     ishst
-       tlbi    alle1is
-       ic      ialluis
-       dsb     ish
-       ret
-ENDPROC(__kvm_flush_vm_context)
-
-__kvm_hyp_panic:
-       // Stash PAR_EL1 before corrupting it in __restore_sysregs
-       mrs     x0, par_el1
-       push    x0, xzr
-
-       // Guess the context by looking at VTTBR:
-       // If zero, then we're already a host.
-       // Otherwise restore a minimal host context before panicing.
-       mrs     x0, vttbr_el2
-       cbz     x0, 1f
-
-       mrs     x0, tpidr_el2
-
-       deactivate_traps
-       deactivate_vm
-
-       ldr     x2, [x0, #VCPU_HOST_CONTEXT]
-       kern_hyp_va x2
-
-       bl __restore_sysregs
-
-       /*
-        * Make sure we have a valid host stack, and don't leave junk in the
-        * frame pointer that will give us a misleading host stack unwinding.
-        */
-       ldr     x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
-       msr     sp_el1, x22
-       mov     x29, xzr
-
-1:     adr     x0, __hyp_panic_str
-       adr     x1, 2f
-       ldp     x2, x3, [x1]
-       sub     x0, x0, x2
-       add     x0, x0, x3
-       mrs     x1, spsr_el2
-       mrs     x2, elr_el2
-       mrs     x3, esr_el2
-       mrs     x4, far_el2
-       mrs     x5, hpfar_el2
-       pop     x6, xzr         // active context PAR_EL1
-       mrs     x7, tpidr_el2
-
-       mov     lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
-                     PSR_MODE_EL1h)
-       msr     spsr_el2, lr
-       ldr     lr, =panic
-       msr     elr_el2, lr
-       eret
-
-       .align  3
-2:     .quad   HYP_PAGE_OFFSET
-       .quad   PAGE_OFFSET
-ENDPROC(__kvm_hyp_panic)
-
-__hyp_panic_str:
-       .ascii  "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
-
-       .align  2
 
 /*
  * u64 __kvm_call_hyp(void *hypfn, ...);
@@ -934,189 +33,18 @@ __hyp_panic_str:
  * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
  * function pointer can be passed).  The function being called must be mapped
  * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are
- * passed in r0 and r1.
+ * passed in x0.
  *
  * A function pointer with a value of 0 has a special meaning, and is
  * used to implement __hyp_get_vectors in the same way as in
  * arch/arm64/kernel/hyp_stub.S.
  */
 ENTRY(__kvm_call_hyp)
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN    
        hvc     #0
        ret
-ENDPROC(__kvm_call_hyp)
-
-.macro invalid_vector  label, target
-       .align  2
-\label:
-       b \target
-ENDPROC(\label)
-.endm
-
-       /* None of these should ever happen */
-       invalid_vector  el2t_sync_invalid, __kvm_hyp_panic
-       invalid_vector  el2t_irq_invalid, __kvm_hyp_panic
-       invalid_vector  el2t_fiq_invalid, __kvm_hyp_panic
-       invalid_vector  el2t_error_invalid, __kvm_hyp_panic
-       invalid_vector  el2h_sync_invalid, __kvm_hyp_panic
-       invalid_vector  el2h_irq_invalid, __kvm_hyp_panic
-       invalid_vector  el2h_fiq_invalid, __kvm_hyp_panic
-       invalid_vector  el2h_error_invalid, __kvm_hyp_panic
-       invalid_vector  el1_sync_invalid, __kvm_hyp_panic
-       invalid_vector  el1_irq_invalid, __kvm_hyp_panic
-       invalid_vector  el1_fiq_invalid, __kvm_hyp_panic
-       invalid_vector  el1_error_invalid, __kvm_hyp_panic
-
-el1_sync:                                      // Guest trapped into EL2
-       push    x0, x1
-       push    x2, x3
-
-       mrs     x1, esr_el2
-       lsr     x2, x1, #ESR_ELx_EC_SHIFT
-
-       cmp     x2, #ESR_ELx_EC_HVC64
-       b.ne    el1_trap
-
-       mrs     x3, vttbr_el2                   // If vttbr is valid, the 64bit guest
-       cbnz    x3, el1_trap                    // called HVC
-
-       /* Here, we're pretty sure the host called HVC. */
-       pop     x2, x3
-       pop     x0, x1
-
-       /* Check for __hyp_get_vectors */
-       cbnz    x0, 1f
-       mrs     x0, vbar_el2
-       b       2f
-
-1:     push    lr, xzr
-
-       /*
-        * Compute the function address in EL2, and shuffle the parameters.
-        */
-       kern_hyp_va     x0
-       mov     lr, x0
-       mov     x0, x1
-       mov     x1, x2
-       mov     x2, x3
-       blr     lr
-
-       pop     lr, xzr
-2:     eret
-
-el1_trap:
-       /*
-        * x1: ESR
-        * x2: ESR_EC
-        */
-
-       /* Guest accessed VFP/SIMD registers, save host, restore Guest */
-       cmp     x2, #ESR_ELx_EC_FP_ASIMD
-       b.eq    switch_to_guest_fpsimd
-
-       cmp     x2, #ESR_ELx_EC_DABT_LOW
-       mov     x0, #ESR_ELx_EC_IABT_LOW
-       ccmp    x2, x0, #4, ne
-       b.ne    1f              // Not an abort we care about
-
-       /* This is an abort. Check for permission fault */
-alternative_if_not ARM64_WORKAROUND_834220
-       and     x2, x1, #ESR_ELx_FSC_TYPE
-       cmp     x2, #FSC_PERM
-       b.ne    1f              // Not a permission fault
 alternative_else
-       nop                     // Use the permission fault path to
-       nop                     // check for a valid S1 translation,
-       nop                     // regardless of the ESR value.
+       b       __vhe_hyp_call
+       nop
 alternative_endif
-
-       /*
-        * Check for Stage-1 page table walk, which is guaranteed
-        * to give a valid HPFAR_EL2.
-        */
-       tbnz    x1, #7, 1f      // S1PTW is set
-
-       /* Preserve PAR_EL1 */
-       mrs     x3, par_el1
-       push    x3, xzr
-
-       /*
-        * Permission fault, HPFAR_EL2 is invalid.
-        * Resolve the IPA the hard way using the guest VA.
-        * Stage-1 translation already validated the memory access rights.
-        * As such, we can use the EL1 translation regime, and don't have
-        * to distinguish between EL0 and EL1 access.
-        */
-       mrs     x2, far_el2
-       at      s1e1r, x2
-       isb
-
-       /* Read result */
-       mrs     x3, par_el1
-       pop     x0, xzr                 // Restore PAR_EL1 from the stack
-       msr     par_el1, x0
-       tbnz    x3, #0, 3f              // Bail out if we failed the translation
-       ubfx    x3, x3, #12, #36        // Extract IPA
-       lsl     x3, x3, #4              // and present it like HPFAR
-       b       2f
-
-1:     mrs     x3, hpfar_el2
-       mrs     x2, far_el2
-
-2:     mrs     x0, tpidr_el2
-       str     w1, [x0, #VCPU_ESR_EL2]
-       str     x2, [x0, #VCPU_FAR_EL2]
-       str     x3, [x0, #VCPU_HPFAR_EL2]
-
-       mov     x1, #ARM_EXCEPTION_TRAP
-       b       __kvm_vcpu_return
-
-       /*
-        * Translation failed. Just return to the guest and
-        * let it fault again. Another CPU is probably playing
-        * behind our back.
-        */
-3:     pop     x2, x3
-       pop     x0, x1
-
-       eret
-
-el1_irq:
-       push    x0, x1
-       push    x2, x3
-       mrs     x0, tpidr_el2
-       mov     x1, #ARM_EXCEPTION_IRQ
-       b       __kvm_vcpu_return
-
-       .ltorg
-
-       .align 11
-
-ENTRY(__kvm_hyp_vector)
-       ventry  el2t_sync_invalid               // Synchronous EL2t
-       ventry  el2t_irq_invalid                // IRQ EL2t
-       ventry  el2t_fiq_invalid                // FIQ EL2t
-       ventry  el2t_error_invalid              // Error EL2t
-
-       ventry  el2h_sync_invalid               // Synchronous EL2h
-       ventry  el2h_irq_invalid                // IRQ EL2h
-       ventry  el2h_fiq_invalid                // FIQ EL2h
-       ventry  el2h_error_invalid              // Error EL2h
-
-       ventry  el1_sync                        // Synchronous 64-bit EL1
-       ventry  el1_irq                         // IRQ 64-bit EL1
-       ventry  el1_fiq_invalid                 // FIQ 64-bit EL1
-       ventry  el1_error_invalid               // Error 64-bit EL1
-
-       ventry  el1_sync                        // Synchronous 32-bit EL1
-       ventry  el1_irq                         // IRQ 32-bit EL1
-       ventry  el1_fiq_invalid                 // FIQ 32-bit EL1
-       ventry  el1_error_invalid               // Error 32-bit EL1
-ENDPROC(__kvm_hyp_vector)
-
-
-ENTRY(__kvm_get_mdcr_el2)
-       mrs     x0, mdcr_el2
-       ret
-ENDPROC(__kvm_get_mdcr_el2)
-
-       .popsection
+ENDPROC(__kvm_call_hyp)
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
new file mode 100644 (file)
index 0000000..826032b
--- /dev/null
@@ -0,0 +1,14 @@
+#
+# Makefile for Kernel-based Virtual Machine module, HYP part
+#
+
+obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += entry.o
+obj-$(CONFIG_KVM_ARM_HOST) += switch.o
+obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
+obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
new file mode 100644 (file)
index 0000000..2f8bca8
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+
+#include <asm/debug-monitors.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+#define read_debug(r,n)                read_sysreg(r##n##_el1)
+#define write_debug(v,r,n)     write_sysreg(v, r##n##_el1)
+
+#define save_debug(ptr,reg,nr)                                         \
+       switch (nr) {                                                   \
+       case 15:        ptr[15] = read_debug(reg, 15);                  \
+       case 14:        ptr[14] = read_debug(reg, 14);                  \
+       case 13:        ptr[13] = read_debug(reg, 13);                  \
+       case 12:        ptr[12] = read_debug(reg, 12);                  \
+       case 11:        ptr[11] = read_debug(reg, 11);                  \
+       case 10:        ptr[10] = read_debug(reg, 10);                  \
+       case 9:         ptr[9] = read_debug(reg, 9);                    \
+       case 8:         ptr[8] = read_debug(reg, 8);                    \
+       case 7:         ptr[7] = read_debug(reg, 7);                    \
+       case 6:         ptr[6] = read_debug(reg, 6);                    \
+       case 5:         ptr[5] = read_debug(reg, 5);                    \
+       case 4:         ptr[4] = read_debug(reg, 4);                    \
+       case 3:         ptr[3] = read_debug(reg, 3);                    \
+       case 2:         ptr[2] = read_debug(reg, 2);                    \
+       case 1:         ptr[1] = read_debug(reg, 1);                    \
+       default:        ptr[0] = read_debug(reg, 0);                    \
+       }
+
+#define restore_debug(ptr,reg,nr)                                      \
+       switch (nr) {                                                   \
+       case 15:        write_debug(ptr[15], reg, 15);                  \
+       case 14:        write_debug(ptr[14], reg, 14);                  \
+       case 13:        write_debug(ptr[13], reg, 13);                  \
+       case 12:        write_debug(ptr[12], reg, 12);                  \
+       case 11:        write_debug(ptr[11], reg, 11);                  \
+       case 10:        write_debug(ptr[10], reg, 10);                  \
+       case 9:         write_debug(ptr[9], reg, 9);                    \
+       case 8:         write_debug(ptr[8], reg, 8);                    \
+       case 7:         write_debug(ptr[7], reg, 7);                    \
+       case 6:         write_debug(ptr[6], reg, 6);                    \
+       case 5:         write_debug(ptr[5], reg, 5);                    \
+       case 4:         write_debug(ptr[4], reg, 4);                    \
+       case 3:         write_debug(ptr[3], reg, 3);                    \
+       case 2:         write_debug(ptr[2], reg, 2);                    \
+       case 1:         write_debug(ptr[1], reg, 1);                    \
+       default:        write_debug(ptr[0], reg, 0);                    \
+       }
+
+void __hyp_text __debug_save_state(struct kvm_vcpu *vcpu,
+                                  struct kvm_guest_debug_arch *dbg,
+                                  struct kvm_cpu_context *ctxt)
+{
+       u64 aa64dfr0;
+       int brps, wrps;
+
+       if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
+               return;
+
+       aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
+       brps = (aa64dfr0 >> 12) & 0xf;
+       wrps = (aa64dfr0 >> 20) & 0xf;
+
+       save_debug(dbg->dbg_bcr, dbgbcr, brps);
+       save_debug(dbg->dbg_bvr, dbgbvr, brps);
+       save_debug(dbg->dbg_wcr, dbgwcr, wrps);
+       save_debug(dbg->dbg_wvr, dbgwvr, wrps);
+
+       ctxt->sys_regs[MDCCINT_EL1] = read_sysreg(mdccint_el1);
+}
+
+void __hyp_text __debug_restore_state(struct kvm_vcpu *vcpu,
+                                     struct kvm_guest_debug_arch *dbg,
+                                     struct kvm_cpu_context *ctxt)
+{
+       u64 aa64dfr0;
+       int brps, wrps;
+
+       if (!(vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY))
+               return;
+
+       aa64dfr0 = read_sysreg(id_aa64dfr0_el1);
+
+       brps = (aa64dfr0 >> 12) & 0xf;
+       wrps = (aa64dfr0 >> 20) & 0xf;
+
+       restore_debug(dbg->dbg_bcr, dbgbcr, brps);
+       restore_debug(dbg->dbg_bvr, dbgbvr, brps);
+       restore_debug(dbg->dbg_wcr, dbgwcr, wrps);
+       restore_debug(dbg->dbg_wvr, dbgwvr, wrps);
+
+       write_sysreg(ctxt->sys_regs[MDCCINT_EL1], mdccint_el1);
+}
+
+void __hyp_text __debug_cond_save_host_state(struct kvm_vcpu *vcpu)
+{
+       /* If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY is set, perform
+        * a full save/restore cycle. */
+       if ((vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_KDE) ||
+           (vcpu->arch.ctxt.sys_regs[MDSCR_EL1] & DBG_MDSCR_MDE))
+               vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
+
+       __debug_save_state(vcpu, &vcpu->arch.host_debug_state,
+                          kern_hyp_va(vcpu->arch.host_cpu_context));
+}
+
+void __hyp_text __debug_cond_restore_host_state(struct kvm_vcpu *vcpu)
+{
+       __debug_restore_state(vcpu, &vcpu->arch.host_debug_state,
+                             kern_hyp_va(vcpu->arch.host_cpu_context));
+
+       if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+               vcpu->arch.debug_flags &= ~KVM_ARM64_DEBUG_DIRTY;
+}
+
+static u32 __hyp_text __debug_read_mdcr_el2(void)
+{
+       return read_sysreg(mdcr_el2);
+}
+
+__alias(__debug_read_mdcr_el2) u32 __kvm_get_mdcr_el2(void);
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
new file mode 100644 (file)
index 0000000..fd0fbe9
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+#include <asm/fpsimdmacros.h>
+#include <asm/kvm.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+#define CPU_GP_REG_OFFSET(x)   (CPU_GP_REGS + x)
+#define CPU_XREG_OFFSET(x)     CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
+
+       .text
+       .pushsection    .hyp.text, "ax"
+
+.macro save_callee_saved_regs ctxt
+       stp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
+       stp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
+       stp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
+       stp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
+       stp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
+       stp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
+.endm
+
+.macro restore_callee_saved_regs ctxt
+       ldp     x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
+       ldp     x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
+       ldp     x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
+       ldp     x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
+       ldp     x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
+       ldp     x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
+.endm
+
+/*
+ * u64 __guest_enter(struct kvm_vcpu *vcpu,
+ *                  struct kvm_cpu_context *host_ctxt);
+ */
+ENTRY(__guest_enter)
+       // x0: vcpu
+       // x1: host/guest context
+       // x2-x18: clobbered by macros
+
+       // Store the host regs
+       save_callee_saved_regs x1
+
+       // Preserve vcpu & host_ctxt for use at exit time
+       stp     x0, x1, [sp, #-16]!
+
+       add     x1, x0, #VCPU_CONTEXT
+
+       // Prepare x0-x1 for later restore by pushing them onto the stack
+       ldp     x2, x3, [x1, #CPU_XREG_OFFSET(0)]
+       stp     x2, x3, [sp, #-16]!
+
+       // x2-x18
+       ldp     x2, x3,   [x1, #CPU_XREG_OFFSET(2)]
+       ldp     x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
+       ldp     x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
+       ldp     x8, x9,   [x1, #CPU_XREG_OFFSET(8)]
+       ldp     x10, x11, [x1, #CPU_XREG_OFFSET(10)]
+       ldp     x12, x13, [x1, #CPU_XREG_OFFSET(12)]
+       ldp     x14, x15, [x1, #CPU_XREG_OFFSET(14)]
+       ldp     x16, x17, [x1, #CPU_XREG_OFFSET(16)]
+       ldr     x18,      [x1, #CPU_XREG_OFFSET(18)]
+
+       // x19-x29, lr
+       restore_callee_saved_regs x1
+
+       // Last bits of the 64bit state
+       ldp     x0, x1, [sp], #16
+
+       // Do not touch any register after this!
+       eret
+ENDPROC(__guest_enter)
+
+ENTRY(__guest_exit)
+       // x0: vcpu
+       // x1: return code
+       // x2-x3: free
+       // x4-x29,lr: vcpu regs
+       // vcpu x0-x3 on the stack
+
+       add     x2, x0, #VCPU_CONTEXT
+
+       stp     x4, x5,   [x2, #CPU_XREG_OFFSET(4)]
+       stp     x6, x7,   [x2, #CPU_XREG_OFFSET(6)]
+       stp     x8, x9,   [x2, #CPU_XREG_OFFSET(8)]
+       stp     x10, x11, [x2, #CPU_XREG_OFFSET(10)]
+       stp     x12, x13, [x2, #CPU_XREG_OFFSET(12)]
+       stp     x14, x15, [x2, #CPU_XREG_OFFSET(14)]
+       stp     x16, x17, [x2, #CPU_XREG_OFFSET(16)]
+       str     x18,      [x2, #CPU_XREG_OFFSET(18)]
+
+       ldp     x6, x7, [sp], #16       // x2, x3
+       ldp     x4, x5, [sp], #16       // x0, x1
+
+       stp     x4, x5, [x2, #CPU_XREG_OFFSET(0)]
+       stp     x6, x7, [x2, #CPU_XREG_OFFSET(2)]
+
+       save_callee_saved_regs x2
+
+       // Restore vcpu & host_ctxt from the stack
+       // (preserving return code in x1)
+       ldp     x0, x2, [sp], #16
+       // Now restore the host regs
+       restore_callee_saved_regs x2
+
+       mov     x0, x1
+       ret
+ENDPROC(__guest_exit)
+
+ENTRY(__fpsimd_guest_restore)
+       stp     x4, lr, [sp, #-16]!
+
+       mrs     x2, cptr_el2
+       bic     x2, x2, #CPTR_EL2_TFP
+       msr     cptr_el2, x2
+       isb
+
+       mrs     x3, tpidr_el2
+
+       ldr     x0, [x3, #VCPU_HOST_CONTEXT]
+       kern_hyp_va x0
+       add     x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
+       bl      __fpsimd_save_state
+
+       add     x2, x3, #VCPU_CONTEXT
+       add     x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
+       bl      __fpsimd_restore_state
+
+       // Skip restoring fpexc32 for AArch64 guests
+       mrs     x1, hcr_el2
+       tbnz    x1, #HCR_RW_SHIFT, 1f
+       ldr     x4, [x3, #VCPU_FPEXC32_EL2]
+       msr     fpexc32_el2, x4
+1:
+       ldp     x4, lr, [sp], #16
+       ldp     x2, x3, [sp], #16
+       ldp     x0, x1, [sp], #16
+
+       eret
+ENDPROC(__fpsimd_guest_restore)
diff --git a/arch/arm64/kvm/hyp/fpsimd.S b/arch/arm64/kvm/hyp/fpsimd.S
new file mode 100644 (file)
index 0000000..da3f22c
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/fpsimdmacros.h>
+
+       .text
+       .pushsection    .hyp.text, "ax"
+
+ENTRY(__fpsimd_save_state)
+       fpsimd_save     x0, 1
+       ret
+ENDPROC(__fpsimd_save_state)
+
+ENTRY(__fpsimd_restore_state)
+       fpsimd_restore  x0, 1
+       ret
+ENDPROC(__fpsimd_restore_state)
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
new file mode 100644 (file)
index 0000000..1bdeee7
--- /dev/null
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/alternative.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/cpufeature.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+       .text
+       .pushsection    .hyp.text, "ax"
+
+.macro save_x0_to_x3
+       stp     x0, x1, [sp, #-16]!
+       stp     x2, x3, [sp, #-16]!
+.endm
+
+.macro restore_x0_to_x3
+       ldp     x2, x3, [sp], #16
+       ldp     x0, x1, [sp], #16
+.endm
+
+.macro do_el2_call
+       /*
+        * Shuffle the parameters before calling the function
+        * pointed to in x0. Assumes parameters in x[1,2,3].
+        */
+       sub     sp, sp, #16
+       str     lr, [sp]
+       mov     lr, x0
+       mov     x0, x1
+       mov     x1, x2
+       mov     x2, x3
+       blr     lr
+       ldr     lr, [sp]
+       add     sp, sp, #16
+.endm
+
+ENTRY(__vhe_hyp_call)
+       do_el2_call
+       /*
+        * We used to rely on having an exception return to get
+        * an implicit isb. In the E2H case, we don't have it anymore.
+        * rather than changing all the leaf functions, just do it here
+        * before returning to the rest of the kernel.
+        */
+       isb
+       ret
+ENDPROC(__vhe_hyp_call)
+       
+el1_sync:                              // Guest trapped into EL2
+       save_x0_to_x3
+
+       mrs     x1, esr_el2
+       lsr     x2, x1, #ESR_ELx_EC_SHIFT
+
+       cmp     x2, #ESR_ELx_EC_HVC64
+       b.ne    el1_trap
+
+       mrs     x3, vttbr_el2           // If vttbr is valid, the 64bit guest
+       cbnz    x3, el1_trap            // called HVC
+
+       /* Here, we're pretty sure the host called HVC. */
+       restore_x0_to_x3
+
+       /* Check for __hyp_get_vectors */
+       cbnz    x0, 1f
+       mrs     x0, vbar_el2
+       b       2f
+
+1:
+       /*
+        * Perform the EL2 call
+        */
+       kern_hyp_va     x0
+       do_el2_call
+
+2:     eret
+
+el1_trap:
+       /*
+        * x1: ESR
+        * x2: ESR_EC
+        */
+
+       /* Guest accessed VFP/SIMD registers, save host, restore Guest */
+       cmp     x2, #ESR_ELx_EC_FP_ASIMD
+       b.eq    __fpsimd_guest_restore
+
+       cmp     x2, #ESR_ELx_EC_DABT_LOW
+       mov     x0, #ESR_ELx_EC_IABT_LOW
+       ccmp    x2, x0, #4, ne
+       b.ne    1f              // Not an abort we care about
+
+       /* This is an abort. Check for permission fault */
+alternative_if_not ARM64_WORKAROUND_834220
+       and     x2, x1, #ESR_ELx_FSC_TYPE
+       cmp     x2, #FSC_PERM
+       b.ne    1f              // Not a permission fault
+alternative_else
+       nop                     // Use the permission fault path to
+       nop                     // check for a valid S1 translation,
+       nop                     // regardless of the ESR value.
+alternative_endif
+
+       /*
+        * Check for Stage-1 page table walk, which is guaranteed
+        * to give a valid HPFAR_EL2.
+        */
+       tbnz    x1, #7, 1f      // S1PTW is set
+
+       /* Preserve PAR_EL1 */
+       mrs     x3, par_el1
+       stp     x3, xzr, [sp, #-16]!
+
+       /*
+        * Permission fault, HPFAR_EL2 is invalid.
+        * Resolve the IPA the hard way using the guest VA.
+        * Stage-1 translation already validated the memory access rights.
+        * As such, we can use the EL1 translation regime, and don't have
+        * to distinguish between EL0 and EL1 access.
+        */
+       mrs     x2, far_el2
+       at      s1e1r, x2
+       isb
+
+       /* Read result */
+       mrs     x3, par_el1
+       ldp     x0, xzr, [sp], #16      // Restore PAR_EL1 from the stack
+       msr     par_el1, x0
+       tbnz    x3, #0, 3f              // Bail out if we failed the translation
+       ubfx    x3, x3, #12, #36        // Extract IPA
+       lsl     x3, x3, #4              // and present it like HPFAR
+       b       2f
+
+1:     mrs     x3, hpfar_el2
+       mrs     x2, far_el2
+
+2:     mrs     x0, tpidr_el2
+       str     w1, [x0, #VCPU_ESR_EL2]
+       str     x2, [x0, #VCPU_FAR_EL2]
+       str     x3, [x0, #VCPU_HPFAR_EL2]
+
+       mov     x1, #ARM_EXCEPTION_TRAP
+       b       __guest_exit
+
+       /*
+        * Translation failed. Just return to the guest and
+        * let it fault again. Another CPU is probably playing
+        * behind our back.
+        */
+3:     restore_x0_to_x3
+
+       eret
+
+el1_irq:
+       save_x0_to_x3
+       mrs     x0, tpidr_el2
+       mov     x1, #ARM_EXCEPTION_IRQ
+       b       __guest_exit
+
+ENTRY(__hyp_do_panic)
+       mov     lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
+                     PSR_MODE_EL1h)
+       msr     spsr_el2, lr
+       ldr     lr, =panic
+       msr     elr_el2, lr
+       eret
+ENDPROC(__hyp_do_panic)
+
+.macro invalid_vector  label, target = __hyp_panic
+       .align  2
+\label:
+       b \target
+ENDPROC(\label)
+.endm
+
+       /* None of these should ever happen */
+       invalid_vector  el2t_sync_invalid
+       invalid_vector  el2t_irq_invalid
+       invalid_vector  el2t_fiq_invalid
+       invalid_vector  el2t_error_invalid
+       invalid_vector  el2h_sync_invalid
+       invalid_vector  el2h_irq_invalid
+       invalid_vector  el2h_fiq_invalid
+       invalid_vector  el2h_error_invalid
+       invalid_vector  el1_sync_invalid
+       invalid_vector  el1_irq_invalid
+       invalid_vector  el1_fiq_invalid
+       invalid_vector  el1_error_invalid
+
+       .ltorg
+
+       .align 11
+
+ENTRY(__kvm_hyp_vector)
+       ventry  el2t_sync_invalid               // Synchronous EL2t
+       ventry  el2t_irq_invalid                // IRQ EL2t
+       ventry  el2t_fiq_invalid                // FIQ EL2t
+       ventry  el2t_error_invalid              // Error EL2t
+
+       ventry  el2h_sync_invalid               // Synchronous EL2h
+       ventry  el2h_irq_invalid                // IRQ EL2h
+       ventry  el2h_fiq_invalid                // FIQ EL2h
+       ventry  el2h_error_invalid              // Error EL2h
+
+       ventry  el1_sync                        // Synchronous 64-bit EL1
+       ventry  el1_irq                         // IRQ 64-bit EL1
+       ventry  el1_fiq_invalid                 // FIQ 64-bit EL1
+       ventry  el1_error_invalid               // Error 64-bit EL1
+
+       ventry  el1_sync                        // Synchronous 32-bit EL1
+       ventry  el1_irq                         // IRQ 32-bit EL1
+       ventry  el1_fiq_invalid                 // FIQ 32-bit EL1
+       ventry  el1_error_invalid               // Error 32-bit EL1
+ENDPROC(__kvm_hyp_vector)
diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
new file mode 100644 (file)
index 0000000..fb27517
--- /dev/null
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_HYP_H__
+#define __ARM64_KVM_HYP_H__
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_mmu.h>
+#include <asm/sysreg.h>
+
+#define __hyp_text __section(.hyp.text) notrace
+
+#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK)
+#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \
+                                                     + PAGE_OFFSET)
+
+/**
+ * hyp_alternate_select - Generates patchable code sequences that are
+ * used to switch between two implementations of a function, depending
+ * on the availability of a feature.
+ *
+ * @fname: a symbol name that will be defined as a function returning a
+ * function pointer whose type will match @orig and @alt
+ * @orig: A pointer to the default function, as returned by @fname when
+ * @cond doesn't hold
+ * @alt: A pointer to the alternate function, as returned by @fname
+ * when @cond holds
+ * @cond: a CPU feature (as described in asm/cpufeature.h)
+ */
+#define hyp_alternate_select(fname, orig, alt, cond)                   \
+typeof(orig) * __hyp_text fname(void)                                  \
+{                                                                      \
+       typeof(alt) *val = orig;                                        \
+       asm volatile(ALTERNATIVE("nop           \n",                    \
+                                "mov   %0, %1  \n",                    \
+                                cond)                                  \
+                    : "+r" (val) : "r" (alt));                         \
+       return val;                                                     \
+}
+
+void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+
+void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
+
+void __timer_save_state(struct kvm_vcpu *vcpu);
+void __timer_restore_state(struct kvm_vcpu *vcpu);
+
+void __sysreg_save_state(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
+void __sysreg32_save_state(struct kvm_vcpu *vcpu);
+void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
+
+void __debug_save_state(struct kvm_vcpu *vcpu,
+                       struct kvm_guest_debug_arch *dbg,
+                       struct kvm_cpu_context *ctxt);
+void __debug_restore_state(struct kvm_vcpu *vcpu,
+                          struct kvm_guest_debug_arch *dbg,
+                          struct kvm_cpu_context *ctxt);
+void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
+void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
+
+void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
+void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
+static inline bool __fpsimd_enabled(void)
+{
+       return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
+}
+
+u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
+void __noreturn __hyp_do_panic(unsigned long, ...);
+
+#endif /* __ARM64_KVM_HYP_H__ */
+
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
new file mode 100644 (file)
index 0000000..ca8f5a5
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hyp.h"
+
+static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
+{
+       u64 val;
+
+       /*
+        * We are about to set CPTR_EL2.TFP to trap all floating point
+        * register accesses to EL2, however, the ARM ARM clearly states that
+        * traps are only taken to EL2 if the operation would not otherwise
+        * trap to EL1.  Therefore, always make sure that for 32-bit guests,
+        * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
+        */
+       val = vcpu->arch.hcr_el2;
+       if (!(val & HCR_RW)) {
+               write_sysreg(1 << 30, fpexc32_el2);
+               isb();
+       }
+       write_sysreg(val, hcr_el2);
+       /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
+       write_sysreg(1 << 15, hstr_el2);
+       write_sysreg(CPTR_EL2_TTA | CPTR_EL2_TFP, cptr_el2);
+       write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
+}
+
+static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
+{
+       write_sysreg(HCR_RW, hcr_el2);
+       write_sysreg(0, hstr_el2);
+       write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
+       write_sysreg(0, cptr_el2);
+}
+
+static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+       write_sysreg(kvm->arch.vttbr, vttbr_el2);
+}
+
+static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
+{
+       write_sysreg(0, vttbr_el2);
+}
+
+static hyp_alternate_select(__vgic_call_save_state,
+                           __vgic_v2_save_state, __vgic_v3_save_state,
+                           ARM64_HAS_SYSREG_GIC_CPUIF);
+
+static hyp_alternate_select(__vgic_call_restore_state,
+                           __vgic_v2_restore_state, __vgic_v3_restore_state,
+                           ARM64_HAS_SYSREG_GIC_CPUIF);
+
+static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
+{
+       __vgic_call_save_state()(vcpu);
+       write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
+}
+
+static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
+{
+       u64 val;
+
+       val = read_sysreg(hcr_el2);
+       val |=  HCR_INT_OVERRIDE;
+       val |= vcpu->arch.irq_lines;
+       write_sysreg(val, hcr_el2);
+
+       __vgic_call_restore_state()(vcpu);
+}
+
+static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpu_context *host_ctxt;
+       struct kvm_cpu_context *guest_ctxt;
+       bool fp_enabled;
+       u64 exit_code;
+
+       vcpu = kern_hyp_va(vcpu);
+       write_sysreg(vcpu, tpidr_el2);
+
+       host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+       guest_ctxt = &vcpu->arch.ctxt;
+
+       __sysreg_save_state(host_ctxt);
+       __debug_cond_save_host_state(vcpu);
+
+       __activate_traps(vcpu);
+       __activate_vm(vcpu);
+
+       __vgic_restore_state(vcpu);
+       __timer_restore_state(vcpu);
+
+       /*
+        * We must restore the 32-bit state before the sysregs, thanks
+        * to Cortex-A57 erratum #852523.
+        */
+       __sysreg32_restore_state(vcpu);
+       __sysreg_restore_state(guest_ctxt);
+       __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
+
+       /* Jump in the fire! */
+       exit_code = __guest_enter(vcpu, host_ctxt);
+       /* And we're baaack! */
+
+       fp_enabled = __fpsimd_enabled();
+
+       __sysreg_save_state(guest_ctxt);
+       __sysreg32_save_state(vcpu);
+       __timer_save_state(vcpu);
+       __vgic_save_state(vcpu);
+
+       __deactivate_traps(vcpu);
+       __deactivate_vm(vcpu);
+
+       __sysreg_restore_state(host_ctxt);
+
+       if (fp_enabled) {
+               __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
+               __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
+       }
+
+       __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
+       __debug_cond_restore_host_state(vcpu);
+
+       return exit_code;
+}
+
+__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
+
+void __hyp_text __noreturn __hyp_panic(void)
+{
+       unsigned long str_va = (unsigned long)__hyp_panic_string;
+       u64 spsr = read_sysreg(spsr_el2);
+       u64 elr = read_sysreg(elr_el2);
+       u64 par = read_sysreg(par_el1);
+
+       if (read_sysreg(vttbr_el2)) {
+               struct kvm_vcpu *vcpu;
+               struct kvm_cpu_context *host_ctxt;
+
+               vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2);
+               host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+               __deactivate_traps(vcpu);
+               __deactivate_vm(vcpu);
+               __sysreg_restore_state(host_ctxt);
+       }
+
+       /* Call panic for real */
+       __hyp_do_panic(hyp_kern_va(str_va),
+                      spsr,  elr,
+                      read_sysreg(esr_el2),   read_sysreg(far_el2),
+                      read_sysreg(hpfar_el2), par,
+                      (void *)read_sysreg(tpidr_el2));
+
+       unreachable();
+}
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
new file mode 100644 (file)
index 0000000..4256309
--- /dev/null
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+/* ctxt is already in the HYP VA space */
+void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+{
+       ctxt->sys_regs[MPIDR_EL1]       = read_sysreg(vmpidr_el2);
+       ctxt->sys_regs[CSSELR_EL1]      = read_sysreg(csselr_el1);
+       ctxt->sys_regs[SCTLR_EL1]       = read_sysreg(sctlr_el1);
+       ctxt->sys_regs[ACTLR_EL1]       = read_sysreg(actlr_el1);
+       ctxt->sys_regs[CPACR_EL1]       = read_sysreg(cpacr_el1);
+       ctxt->sys_regs[TTBR0_EL1]       = read_sysreg(ttbr0_el1);
+       ctxt->sys_regs[TTBR1_EL1]       = read_sysreg(ttbr1_el1);
+       ctxt->sys_regs[TCR_EL1]         = read_sysreg(tcr_el1);
+       ctxt->sys_regs[ESR_EL1]         = read_sysreg(esr_el1);
+       ctxt->sys_regs[AFSR0_EL1]       = read_sysreg(afsr0_el1);
+       ctxt->sys_regs[AFSR1_EL1]       = read_sysreg(afsr1_el1);
+       ctxt->sys_regs[FAR_EL1]         = read_sysreg(far_el1);
+       ctxt->sys_regs[MAIR_EL1]        = read_sysreg(mair_el1);
+       ctxt->sys_regs[VBAR_EL1]        = read_sysreg(vbar_el1);
+       ctxt->sys_regs[CONTEXTIDR_EL1]  = read_sysreg(contextidr_el1);
+       ctxt->sys_regs[TPIDR_EL0]       = read_sysreg(tpidr_el0);
+       ctxt->sys_regs[TPIDRRO_EL0]     = read_sysreg(tpidrro_el0);
+       ctxt->sys_regs[TPIDR_EL1]       = read_sysreg(tpidr_el1);
+       ctxt->sys_regs[AMAIR_EL1]       = read_sysreg(amair_el1);
+       ctxt->sys_regs[CNTKCTL_EL1]     = read_sysreg(cntkctl_el1);
+       ctxt->sys_regs[PAR_EL1]         = read_sysreg(par_el1);
+       ctxt->sys_regs[MDSCR_EL1]       = read_sysreg(mdscr_el1);
+
+       ctxt->gp_regs.regs.sp           = read_sysreg(sp_el0);
+       ctxt->gp_regs.regs.pc           = read_sysreg(elr_el2);
+       ctxt->gp_regs.regs.pstate       = read_sysreg(spsr_el2);
+       ctxt->gp_regs.sp_el1            = read_sysreg(sp_el1);
+       ctxt->gp_regs.elr_el1           = read_sysreg(elr_el1);
+       ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1);
+}
+
+void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+{
+       write_sysreg(ctxt->sys_regs[MPIDR_EL1],   vmpidr_el2);
+       write_sysreg(ctxt->sys_regs[CSSELR_EL1],  csselr_el1);
+       write_sysreg(ctxt->sys_regs[SCTLR_EL1],   sctlr_el1);
+       write_sysreg(ctxt->sys_regs[ACTLR_EL1],   actlr_el1);
+       write_sysreg(ctxt->sys_regs[CPACR_EL1],   cpacr_el1);
+       write_sysreg(ctxt->sys_regs[TTBR0_EL1],   ttbr0_el1);
+       write_sysreg(ctxt->sys_regs[TTBR1_EL1],   ttbr1_el1);
+       write_sysreg(ctxt->sys_regs[TCR_EL1],     tcr_el1);
+       write_sysreg(ctxt->sys_regs[ESR_EL1],     esr_el1);
+       write_sysreg(ctxt->sys_regs[AFSR0_EL1],   afsr0_el1);
+       write_sysreg(ctxt->sys_regs[AFSR1_EL1],   afsr1_el1);
+       write_sysreg(ctxt->sys_regs[FAR_EL1],     far_el1);
+       write_sysreg(ctxt->sys_regs[MAIR_EL1],    mair_el1);
+       write_sysreg(ctxt->sys_regs[VBAR_EL1],    vbar_el1);
+       write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1);
+       write_sysreg(ctxt->sys_regs[TPIDR_EL0],   tpidr_el0);
+       write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
+       write_sysreg(ctxt->sys_regs[TPIDR_EL1],   tpidr_el1);
+       write_sysreg(ctxt->sys_regs[AMAIR_EL1],   amair_el1);
+       write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1);
+       write_sysreg(ctxt->sys_regs[PAR_EL1],     par_el1);
+       write_sysreg(ctxt->sys_regs[MDSCR_EL1],   mdscr_el1);
+
+       write_sysreg(ctxt->gp_regs.regs.sp,     sp_el0);
+       write_sysreg(ctxt->gp_regs.regs.pc,     elr_el2);
+       write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2);
+       write_sysreg(ctxt->gp_regs.sp_el1,      sp_el1);
+       write_sysreg(ctxt->gp_regs.elr_el1,     elr_el1);
+       write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1);
+}
+
+void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
+{
+       u64 *spsr, *sysreg;
+
+       if (read_sysreg(hcr_el2) & HCR_RW)
+               return;
+
+       spsr = vcpu->arch.ctxt.gp_regs.spsr;
+       sysreg = vcpu->arch.ctxt.sys_regs;
+
+       spsr[KVM_SPSR_ABT] = read_sysreg(spsr_abt);
+       spsr[KVM_SPSR_UND] = read_sysreg(spsr_und);
+       spsr[KVM_SPSR_IRQ] = read_sysreg(spsr_irq);
+       spsr[KVM_SPSR_FIQ] = read_sysreg(spsr_fiq);
+
+       sysreg[DACR32_EL2] = read_sysreg(dacr32_el2);
+       sysreg[IFSR32_EL2] = read_sysreg(ifsr32_el2);
+
+       if (__fpsimd_enabled())
+               sysreg[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
+
+       if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+               sysreg[DBGVCR32_EL2] = read_sysreg(dbgvcr32_el2);
+}
+
+void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
+{
+       u64 *spsr, *sysreg;
+
+       if (read_sysreg(hcr_el2) & HCR_RW)
+               return;
+
+       spsr = vcpu->arch.ctxt.gp_regs.spsr;
+       sysreg = vcpu->arch.ctxt.sys_regs;
+
+       write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
+       write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
+       write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
+       write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);
+
+       write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
+       write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);
+
+       if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
+               write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
+}
diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/arch/arm64/kvm/hyp/timer-sr.c
new file mode 100644 (file)
index 0000000..1051e5d
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <clocksource/arm_arch_timer.h>
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       u64 val;
+
+       if (kvm->arch.timer.enabled) {
+               timer->cntv_ctl = read_sysreg(cntv_ctl_el0);
+               timer->cntv_cval = read_sysreg(cntv_cval_el0);
+       }
+
+       /* Disable the virtual timer */
+       write_sysreg(0, cntv_ctl_el0);
+
+       /* Allow physical timer/counter access for the host */
+       val = read_sysreg(cnthctl_el2);
+       val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+       write_sysreg(val, cnthctl_el2);
+
+       /* Clear cntvoff for the host */
+       write_sysreg(0, cntvoff_el2);
+}
+
+void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+       struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
+       u64 val;
+
+       /*
+        * Disallow physical timer access for the guest
+        * Physical counter access is allowed
+        */
+       val = read_sysreg(cnthctl_el2);
+       val &= ~CNTHCTL_EL1PCEN;
+       val |= CNTHCTL_EL1PCTEN;
+       write_sysreg(val, cnthctl_el2);
+
+       if (kvm->arch.timer.enabled) {
+               write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
+               write_sysreg(timer->cntv_cval, cntv_cval_el0);
+               isb();
+               write_sysreg(timer->cntv_ctl, cntv_ctl_el0);
+       }
+}
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
new file mode 100644 (file)
index 0000000..2a7e0d8
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hyp.h"
+
+static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+{
+       dsb(ishst);
+
+       /* Switch to requested VMID */
+       kvm = kern_hyp_va(kvm);
+       write_sysreg(kvm->arch.vttbr, vttbr_el2);
+       isb();
+
+       /*
+        * We could do so much better if we had the VA as well.
+        * Instead, we invalidate Stage-2 for this IPA, and the
+        * whole of Stage-1. Weep...
+        */
+       ipa >>= 12;
+       asm volatile("tlbi ipas2e1is, %0" : : "r" (ipa));
+
+       /*
+        * We have to ensure completion of the invalidation at Stage-2,
+        * since a table walk on another CPU could refill a TLB with a
+        * complete (S1 + S2) walk based on the old Stage-2 mapping if
+        * the Stage-1 invalidation happened first.
+        */
+       dsb(ish);
+       asm volatile("tlbi vmalle1is" : : );
+       dsb(ish);
+       isb();
+
+       write_sysreg(0, vttbr_el2);
+}
+
+__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm,
+                                                           phys_addr_t ipa);
+
+static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
+{
+       dsb(ishst);
+
+       /* Switch to requested VMID */
+       kvm = kern_hyp_va(kvm);
+       write_sysreg(kvm->arch.vttbr, vttbr_el2);
+       isb();
+
+       asm volatile("tlbi vmalls12e1is" : : );
+       dsb(ish);
+       isb();
+
+       write_sysreg(0, vttbr_el2);
+}
+
+__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm);
+
+static void __hyp_text __tlb_flush_vm_context(void)
+{
+       dsb(ishst);
+       asm volatile("tlbi alle1is      \n"
+                    "ic ialluis          ": : );
+       dsb(ish);
+}
+
+__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c
new file mode 100644 (file)
index 0000000..e717612
--- /dev/null
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+       struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+       struct vgic_dist *vgic = &kvm->arch.vgic;
+       void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+       u32 eisr0, eisr1, elrsr0, elrsr1;
+       int i, nr_lr;
+
+       if (!base)
+               return;
+
+       nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+       cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
+       cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
+       eisr0  = readl_relaxed(base + GICH_EISR0);
+       elrsr0 = readl_relaxed(base + GICH_ELRSR0);
+       if (unlikely(nr_lr > 32)) {
+               eisr1  = readl_relaxed(base + GICH_EISR1);
+               elrsr1 = readl_relaxed(base + GICH_ELRSR1);
+       } else {
+               eisr1 = elrsr1 = 0;
+       }
+#ifdef CONFIG_CPU_BIG_ENDIAN
+       cpu_if->vgic_eisr  = ((u64)eisr0 << 32) | eisr1;
+       cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
+#else
+       cpu_if->vgic_eisr  = ((u64)eisr1 << 32) | eisr0;
+       cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
+#endif
+       cpu_if->vgic_apr    = readl_relaxed(base + GICH_APR);
+
+       writel_relaxed(0, base + GICH_HCR);
+
+       for (i = 0; i < nr_lr; i++)
+               cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
+}
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
+{
+       struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+       struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+       struct vgic_dist *vgic = &kvm->arch.vgic;
+       void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+       int i, nr_lr;
+
+       if (!base)
+               return;
+
+       writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
+       writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
+       writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
+
+       nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+       for (i = 0; i < nr_lr; i++)
+               writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4));
+}
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
new file mode 100644 (file)
index 0000000..9142e08
--- /dev/null
@@ -0,0 +1,228 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/irqchip/arm-gic-v3.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_mmu.h>
+
+#include "hyp.h"
+
+#define vtr_to_max_lr_idx(v)           ((v) & 0xf)
+#define vtr_to_nr_pri_bits(v)          (((u32)(v) >> 29) + 1)
+
+#define read_gicreg(r)                                                 \
+       ({                                                              \
+               u64 reg;                                                \
+               asm volatile("mrs_s %0, " __stringify(r) : "=r" (reg)); \
+               reg;                                                    \
+       })
+
+#define write_gicreg(v,r)                                              \
+       do {                                                            \
+               u64 __val = (v);                                        \
+               asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
+       } while (0)
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
+{
+       struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+       u64 val;
+       u32 max_lr_idx, nr_pri_bits;
+
+       /*
+        * Make sure stores to the GIC via the memory mapped interface
+        * are now visible to the system register interface.
+        */
+       dsb(st);
+
+       cpu_if->vgic_vmcr  = read_gicreg(ICH_VMCR_EL2);
+       cpu_if->vgic_misr  = read_gicreg(ICH_MISR_EL2);
+       cpu_if->vgic_eisr  = read_gicreg(ICH_EISR_EL2);
+       cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
+
+       write_gicreg(0, ICH_HCR_EL2);
+       val = read_gicreg(ICH_VTR_EL2);
+       max_lr_idx = vtr_to_max_lr_idx(val);
+       nr_pri_bits = vtr_to_nr_pri_bits(val);
+
+       switch (max_lr_idx) {
+       case 15:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)] = read_gicreg(ICH_LR15_EL2);
+       case 14:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)] = read_gicreg(ICH_LR14_EL2);
+       case 13:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)] = read_gicreg(ICH_LR13_EL2);
+       case 12:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)] = read_gicreg(ICH_LR12_EL2);
+       case 11:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)] = read_gicreg(ICH_LR11_EL2);
+       case 10:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)] = read_gicreg(ICH_LR10_EL2);
+       case 9:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)] = read_gicreg(ICH_LR9_EL2);
+       case 8:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)] = read_gicreg(ICH_LR8_EL2);
+       case 7:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)] = read_gicreg(ICH_LR7_EL2);
+       case 6:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)] = read_gicreg(ICH_LR6_EL2);
+       case 5:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)] = read_gicreg(ICH_LR5_EL2);
+       case 4:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)] = read_gicreg(ICH_LR4_EL2);
+       case 3:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)] = read_gicreg(ICH_LR3_EL2);
+       case 2:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)] = read_gicreg(ICH_LR2_EL2);
+       case 1:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)] = read_gicreg(ICH_LR1_EL2);
+       case 0:
+               cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)] = read_gicreg(ICH_LR0_EL2);
+       }
+
+       switch (nr_pri_bits) {
+       case 7:
+               cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
+               cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
+       case 6:
+               cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
+       default:
+               cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
+       }
+
+       switch (nr_pri_bits) {
+       case 7:
+               cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
+               cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
+       case 6:
+               cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
+       default:
+               cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
+       }
+
+       val = read_gicreg(ICC_SRE_EL2);
+       write_gicreg(val | ICC_SRE_EL2_ENABLE, ICC_SRE_EL2);
+       isb(); /* Make sure ENABLE is set at EL2 before setting SRE at EL1 */
+       write_gicreg(1, ICC_SRE_EL1);
+}
+
+void __hyp_text __vgic_v3_restore_state(struct kvm_vcpu *vcpu)
+{
+       struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+       u64 val;
+       u32 max_lr_idx, nr_pri_bits;
+
+       /*
+        * VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
+        * Group0 interrupt (as generated in GICv2 mode) to be
+        * delivered as a FIQ to the guest, with potentially fatal
+        * consequences. So we must make sure that ICC_SRE_EL1 has
+        * been actually programmed with the value we want before
+        * starting to mess with the rest of the GIC.
+        */
+       write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
+       isb();
+
+       write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
+       write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
+
+       val = read_gicreg(ICH_VTR_EL2);
+       max_lr_idx = vtr_to_max_lr_idx(val);
+       nr_pri_bits = vtr_to_nr_pri_bits(val);
+
+       switch (nr_pri_bits) {
+       case 7:
+                write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
+                write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
+       case 6:
+                write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
+       default:
+                write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
+       }                                          
+                                                  
+       switch (nr_pri_bits) {
+       case 7:
+                write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
+                write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
+       case 6:
+                write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
+       default:
+                write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
+       }
+
+       switch (max_lr_idx) {
+       case 15:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2);
+       case 14:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)], ICH_LR14_EL2);
+       case 13:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)], ICH_LR13_EL2);
+       case 12:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)], ICH_LR12_EL2);
+       case 11:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)], ICH_LR11_EL2);
+       case 10:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)], ICH_LR10_EL2);
+       case 9:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)], ICH_LR9_EL2);
+       case 8:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)], ICH_LR8_EL2);
+       case 7:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)], ICH_LR7_EL2);
+       case 6:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)], ICH_LR6_EL2);
+       case 5:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)], ICH_LR5_EL2);
+       case 4:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)], ICH_LR4_EL2);
+       case 3:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)], ICH_LR3_EL2);
+       case 2:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)], ICH_LR2_EL2);
+       case 1:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)], ICH_LR1_EL2);
+       case 0:
+               write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)], ICH_LR0_EL2);
+       }
+
+       /*
+        * Ensures that the above will have reached the
+        * (re)distributors. This ensure the guest will read the
+        * correct values from the memory-mapped interface.
+        */
+       isb();
+       dsb(sy);
+
+       /*
+        * Prevent the guest from touching the GIC system registers if
+        * SRE isn't enabled for GICv3 emulation.
+        */
+       if (!cpu_if->vgic_sre) {
+               write_gicreg(read_gicreg(ICC_SRE_EL2) & ~ICC_SRE_EL2_ENABLE,
+                            ICC_SRE_EL2);
+       }
+}
+
+static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void)
+{
+       return read_gicreg(ICH_VTR_EL2);
+}
+
+__alias(__vgic_v3_read_ich_vtr_el2) u64 __vgic_v3_get_ich_vtr_el2(void);
index d2650e84faf2f53f2afbdbd15e1f54d217e3fdb8..eec3598b4184077b83b5a1f24321891cb110f5bb 100644 (file)
@@ -29,6 +29,7 @@
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
 #include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
 #include <asm/kvm_coproc.h>
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_host.h>
@@ -219,9 +220,9 @@ static bool trap_debug_regs(struct kvm_vcpu *vcpu,
  * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
  * hyp.S code switches between host and guest values in future.
  */
-static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
-                             struct sys_reg_params *p,
-                             u64 *dbg_reg)
+static void reg_to_dbg(struct kvm_vcpu *vcpu,
+                      struct sys_reg_params *p,
+                      u64 *dbg_reg)
 {
        u64 val = p->regval;
 
@@ -234,18 +235,18 @@ static inline void reg_to_dbg(struct kvm_vcpu *vcpu,
        vcpu->arch.debug_flags |= KVM_ARM64_DEBUG_DIRTY;
 }
 
-static inline void dbg_to_reg(struct kvm_vcpu *vcpu,
-                             struct sys_reg_params *p,
-                             u64 *dbg_reg)
+static void dbg_to_reg(struct kvm_vcpu *vcpu,
+                      struct sys_reg_params *p,
+                      u64 *dbg_reg)
 {
        p->regval = *dbg_reg;
        if (p->is_32bit)
                p->regval &= 0xffffffffUL;
 }
 
-static inline bool trap_bvr(struct kvm_vcpu *vcpu,
-                           struct sys_reg_params *p,
-                           const struct sys_reg_desc *rd)
+static bool trap_bvr(struct kvm_vcpu *vcpu,
+                    struct sys_reg_params *p,
+                    const struct sys_reg_desc *rd)
 {
        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 
@@ -279,15 +280,15 @@ static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        return 0;
 }
 
-static inline void reset_bvr(struct kvm_vcpu *vcpu,
-                            const struct sys_reg_desc *rd)
+static void reset_bvr(struct kvm_vcpu *vcpu,
+                     const struct sys_reg_desc *rd)
 {
        vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
 }
 
-static inline bool trap_bcr(struct kvm_vcpu *vcpu,
-                           struct sys_reg_params *p,
-                           const struct sys_reg_desc *rd)
+static bool trap_bcr(struct kvm_vcpu *vcpu,
+                    struct sys_reg_params *p,
+                    const struct sys_reg_desc *rd)
 {
        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
 
@@ -322,15 +323,15 @@ static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        return 0;
 }
 
-static inline void reset_bcr(struct kvm_vcpu *vcpu,
-                            const struct sys_reg_desc *rd)
+static void reset_bcr(struct kvm_vcpu *vcpu,
+                     const struct sys_reg_desc *rd)
 {
        vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
 }
 
-static inline bool trap_wvr(struct kvm_vcpu *vcpu,
-                           struct sys_reg_params *p,
-                           const struct sys_reg_desc *rd)
+static bool trap_wvr(struct kvm_vcpu *vcpu,
+                    struct sys_reg_params *p,
+                    const struct sys_reg_desc *rd)
 {
        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
 
@@ -365,15 +366,15 @@ static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        return 0;
 }
 
-static inline void reset_wvr(struct kvm_vcpu *vcpu,
-                            const struct sys_reg_desc *rd)
+static void reset_wvr(struct kvm_vcpu *vcpu,
+                     const struct sys_reg_desc *rd)
 {
        vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
 }
 
-static inline bool trap_wcr(struct kvm_vcpu *vcpu,
-                           struct sys_reg_params *p,
-                           const struct sys_reg_desc *rd)
+static bool trap_wcr(struct kvm_vcpu *vcpu,
+                    struct sys_reg_params *p,
+                    const struct sys_reg_desc *rd)
 {
        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
 
@@ -407,8 +408,8 @@ static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
        return 0;
 }
 
-static inline void reset_wcr(struct kvm_vcpu *vcpu,
-                            const struct sys_reg_desc *rd)
+static void reset_wcr(struct kvm_vcpu *vcpu,
+                     const struct sys_reg_desc *rd)
 {
        vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
 }
@@ -722,9 +723,9 @@ static bool trap_debug32(struct kvm_vcpu *vcpu,
  * system is in.
  */
 
-static inline bool trap_xvr(struct kvm_vcpu *vcpu,
-                           struct sys_reg_params *p,
-                           const struct sys_reg_desc *rd)
+static bool trap_xvr(struct kvm_vcpu *vcpu,
+                    struct sys_reg_params *p,
+                    const struct sys_reg_desc *rd)
 {
        u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
 
diff --git a/arch/arm64/kvm/vgic-v2-switch.S b/arch/arm64/kvm/vgic-v2-switch.S
deleted file mode 100644 (file)
index 3f00071..0000000
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (C) 2012,2013 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <linux/irqchip/arm-gic.h>
-
-#include <asm/assembler.h>
-#include <asm/memory.h>
-#include <asm/asm-offsets.h>
-#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-#include <asm/kvm_mmu.h>
-
-       .text
-       .pushsection    .hyp.text, "ax"
-
-/*
- * Save the VGIC CPU state into memory
- * x0: Register pointing to VCPU struct
- * Do not corrupt x1!!!
- */
-ENTRY(__save_vgic_v2_state)
-__save_vgic_v2_state:
-       /* Get VGIC VCTRL base into x2 */
-       ldr     x2, [x0, #VCPU_KVM]
-       kern_hyp_va     x2
-       ldr     x2, [x2, #KVM_VGIC_VCTRL]
-       kern_hyp_va     x2
-       cbz     x2, 2f          // disabled
-
-       /* Compute the address of struct vgic_cpu */
-       add     x3, x0, #VCPU_VGIC_CPU
-
-       /* Save all interesting registers */
-       ldr     w5, [x2, #GICH_VMCR]
-       ldr     w6, [x2, #GICH_MISR]
-       ldr     w7, [x2, #GICH_EISR0]
-       ldr     w8, [x2, #GICH_EISR1]
-       ldr     w9, [x2, #GICH_ELRSR0]
-       ldr     w10, [x2, #GICH_ELRSR1]
-       ldr     w11, [x2, #GICH_APR]
-CPU_BE(        rev     w5,  w5  )
-CPU_BE(        rev     w6,  w6  )
-CPU_BE(        rev     w7,  w7  )
-CPU_BE(        rev     w8,  w8  )
-CPU_BE(        rev     w9,  w9  )
-CPU_BE(        rev     w10, w10 )
-CPU_BE(        rev     w11, w11 )
-
-       str     w5, [x3, #VGIC_V2_CPU_VMCR]
-       str     w6, [x3, #VGIC_V2_CPU_MISR]
-CPU_LE(        str     w7, [x3, #VGIC_V2_CPU_EISR] )
-CPU_LE(        str     w8, [x3, #(VGIC_V2_CPU_EISR + 4)] )
-CPU_LE(        str     w9, [x3, #VGIC_V2_CPU_ELRSR] )
-CPU_LE(        str     w10, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
-CPU_BE(        str     w7, [x3, #(VGIC_V2_CPU_EISR + 4)] )
-CPU_BE(        str     w8, [x3, #VGIC_V2_CPU_EISR] )
-CPU_BE(        str     w9, [x3, #(VGIC_V2_CPU_ELRSR + 4)] )
-CPU_BE(        str     w10, [x3, #VGIC_V2_CPU_ELRSR] )
-       str     w11, [x3, #VGIC_V2_CPU_APR]
-
-       /* Clear GICH_HCR */
-       str     wzr, [x2, #GICH_HCR]
-
-       /* Save list registers */
-       add     x2, x2, #GICH_LR0
-       ldr     w4, [x3, #VGIC_CPU_NR_LR]
-       add     x3, x3, #VGIC_V2_CPU_LR
-1:     ldr     w5, [x2], #4
-CPU_BE(        rev     w5, w5 )
-       str     w5, [x3], #4
-       sub     w4, w4, #1
-       cbnz    w4, 1b
-2:
-       ret
-ENDPROC(__save_vgic_v2_state)
-
-/*
- * Restore the VGIC CPU state from memory
- * x0: Register pointing to VCPU struct
- */
-ENTRY(__restore_vgic_v2_state)
-__restore_vgic_v2_state:
-       /* Get VGIC VCTRL base into x2 */
-       ldr     x2, [x0, #VCPU_KVM]
-       kern_hyp_va     x2
-       ldr     x2, [x2, #KVM_VGIC_VCTRL]
-       kern_hyp_va     x2
-       cbz     x2, 2f          // disabled
-
-       /* Compute the address of struct vgic_cpu */
-       add     x3, x0, #VCPU_VGIC_CPU
-
-       /* We only restore a minimal set of registers */
-       ldr     w4, [x3, #VGIC_V2_CPU_HCR]
-       ldr     w5, [x3, #VGIC_V2_CPU_VMCR]
-       ldr     w6, [x3, #VGIC_V2_CPU_APR]
-CPU_BE(        rev     w4, w4 )
-CPU_BE(        rev     w5, w5 )
-CPU_BE(        rev     w6, w6 )
-
-       str     w4, [x2, #GICH_HCR]
-       str     w5, [x2, #GICH_VMCR]
-       str     w6, [x2, #GICH_APR]
-
-       /* Restore list registers */
-       add     x2, x2, #GICH_LR0
-       ldr     w4, [x3, #VGIC_CPU_NR_LR]
-       add     x3, x3, #VGIC_V2_CPU_LR
-1:     ldr     w5, [x3], #4
-CPU_BE(        rev     w5, w5 )
-       str     w5, [x2], #4
-       sub     w4, w4, #1
-       cbnz    w4, 1b
-2:
-       ret
-ENDPROC(__restore_vgic_v2_state)
-
-       .popsection
diff --git a/arch/arm64/kvm/vgic-v3-switch.S b/arch/arm64/kvm/vgic-v3-switch.S
deleted file mode 100644 (file)
index 3c20730..0000000
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * Copyright (C) 2012,2013 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program.  If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <linux/irqchip/arm-gic-v3.h>
-
-#include <asm/assembler.h>
-#include <asm/memory.h>
-#include <asm/asm-offsets.h>
-#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-
-       .text
-       .pushsection    .hyp.text, "ax"
-
-/*
- * We store LRs in reverse order to let the CPU deal with streaming
- * access. Use this macro to make it look saner...
- */
-#define LR_OFFSET(n)   (VGIC_V3_CPU_LR + (15 - n) * 8)
-
-/*
- * Save the VGIC CPU state into memory
- * x0: Register pointing to VCPU struct
- * Do not corrupt x1!!!
- */
-.macro save_vgic_v3_state
-       // Compute the address of struct vgic_cpu
-       add     x3, x0, #VCPU_VGIC_CPU
-
-       // Make sure stores to the GIC via the memory mapped interface
-       // are now visible to the system register interface
-       dsb     st
-
-       // Save all interesting registers
-       mrs_s   x5, ICH_VMCR_EL2
-       mrs_s   x6, ICH_MISR_EL2
-       mrs_s   x7, ICH_EISR_EL2
-       mrs_s   x8, ICH_ELSR_EL2
-
-       str     w5, [x3, #VGIC_V3_CPU_VMCR]
-       str     w6, [x3, #VGIC_V3_CPU_MISR]
-       str     w7, [x3, #VGIC_V3_CPU_EISR]
-       str     w8, [x3, #VGIC_V3_CPU_ELRSR]
-
-       msr_s   ICH_HCR_EL2, xzr
-
-       mrs_s   x21, ICH_VTR_EL2
-       mvn     w22, w21
-       ubfiz   w23, w22, 2, 4  // w23 = (15 - ListRegs) * 4
-
-       adr     x24, 1f
-       add     x24, x24, x23
-       br      x24
-
-1:
-       mrs_s   x20, ICH_LR15_EL2
-       mrs_s   x19, ICH_LR14_EL2
-       mrs_s   x18, ICH_LR13_EL2
-       mrs_s   x17, ICH_LR12_EL2
-       mrs_s   x16, ICH_LR11_EL2
-       mrs_s   x15, ICH_LR10_EL2
-       mrs_s   x14, ICH_LR9_EL2
-       mrs_s   x13, ICH_LR8_EL2
-       mrs_s   x12, ICH_LR7_EL2
-       mrs_s   x11, ICH_LR6_EL2
-       mrs_s   x10, ICH_LR5_EL2
-       mrs_s   x9, ICH_LR4_EL2
-       mrs_s   x8, ICH_LR3_EL2
-       mrs_s   x7, ICH_LR2_EL2
-       mrs_s   x6, ICH_LR1_EL2
-       mrs_s   x5, ICH_LR0_EL2
-
-       adr     x24, 1f
-       add     x24, x24, x23
-       br      x24
-
-1:
-       str     x20, [x3, #LR_OFFSET(15)]
-       str     x19, [x3, #LR_OFFSET(14)]
-       str     x18, [x3, #LR_OFFSET(13)]
-       str     x17, [x3, #LR_OFFSET(12)]
-       str     x16, [x3, #LR_OFFSET(11)]
-       str     x15, [x3, #LR_OFFSET(10)]
-       str     x14, [x3, #LR_OFFSET(9)]
-       str     x13, [x3, #LR_OFFSET(8)]
-       str     x12, [x3, #LR_OFFSET(7)]
-       str     x11, [x3, #LR_OFFSET(6)]
-       str     x10, [x3, #LR_OFFSET(5)]
-       str     x9, [x3, #LR_OFFSET(4)]
-       str     x8, [x3, #LR_OFFSET(3)]
-       str     x7, [x3, #LR_OFFSET(2)]
-       str     x6, [x3, #LR_OFFSET(1)]
-       str     x5, [x3, #LR_OFFSET(0)]
-
-       tbnz    w21, #29, 6f    // 6 bits
-       tbz     w21, #30, 5f    // 5 bits
-                               // 7 bits
-       mrs_s   x20, ICH_AP0R3_EL2
-       str     w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
-       mrs_s   x19, ICH_AP0R2_EL2
-       str     w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
-6:     mrs_s   x18, ICH_AP0R1_EL2
-       str     w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
-5:     mrs_s   x17, ICH_AP0R0_EL2
-       str     w17, [x3, #VGIC_V3_CPU_AP0R]
-
-       tbnz    w21, #29, 6f    // 6 bits
-       tbz     w21, #30, 5f    // 5 bits
-                               // 7 bits
-       mrs_s   x20, ICH_AP1R3_EL2
-       str     w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
-       mrs_s   x19, ICH_AP1R2_EL2
-       str     w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
-6:     mrs_s   x18, ICH_AP1R1_EL2
-       str     w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
-5:     mrs_s   x17, ICH_AP1R0_EL2
-       str     w17, [x3, #VGIC_V3_CPU_AP1R]
-
-       // Restore SRE_EL1 access and re-enable SRE at EL1.
-       mrs_s   x5, ICC_SRE_EL2
-       orr     x5, x5, #ICC_SRE_EL2_ENABLE
-       msr_s   ICC_SRE_EL2, x5
-       isb
-       mov     x5, #1
-       msr_s   ICC_SRE_EL1, x5
-.endm
-
-/*
- * Restore the VGIC CPU state from memory
- * x0: Register pointing to VCPU struct
- */
-.macro restore_vgic_v3_state
-       // Compute the address of struct vgic_cpu
-       add     x3, x0, #VCPU_VGIC_CPU
-
-       // Restore all interesting registers
-       ldr     w4, [x3, #VGIC_V3_CPU_HCR]
-       ldr     w5, [x3, #VGIC_V3_CPU_VMCR]
-       ldr     w25, [x3, #VGIC_V3_CPU_SRE]
-
-       msr_s   ICC_SRE_EL1, x25
-
-       // make sure SRE is valid before writing the other registers
-       isb
-
-       msr_s   ICH_HCR_EL2, x4
-       msr_s   ICH_VMCR_EL2, x5
-
-       mrs_s   x21, ICH_VTR_EL2
-
-       tbnz    w21, #29, 6f    // 6 bits
-       tbz     w21, #30, 5f    // 5 bits
-                               // 7 bits
-       ldr     w20, [x3, #(VGIC_V3_CPU_AP1R + 3*4)]
-       msr_s   ICH_AP1R3_EL2, x20
-       ldr     w19, [x3, #(VGIC_V3_CPU_AP1R + 2*4)]
-       msr_s   ICH_AP1R2_EL2, x19
-6:     ldr     w18, [x3, #(VGIC_V3_CPU_AP1R + 1*4)]
-       msr_s   ICH_AP1R1_EL2, x18
-5:     ldr     w17, [x3, #VGIC_V3_CPU_AP1R]
-       msr_s   ICH_AP1R0_EL2, x17
-
-       tbnz    w21, #29, 6f    // 6 bits
-       tbz     w21, #30, 5f    // 5 bits
-                               // 7 bits
-       ldr     w20, [x3, #(VGIC_V3_CPU_AP0R + 3*4)]
-       msr_s   ICH_AP0R3_EL2, x20
-       ldr     w19, [x3, #(VGIC_V3_CPU_AP0R + 2*4)]
-       msr_s   ICH_AP0R2_EL2, x19
-6:     ldr     w18, [x3, #(VGIC_V3_CPU_AP0R + 1*4)]
-       msr_s   ICH_AP0R1_EL2, x18
-5:     ldr     w17, [x3, #VGIC_V3_CPU_AP0R]
-       msr_s   ICH_AP0R0_EL2, x17
-
-       and     w22, w21, #0xf
-       mvn     w22, w21
-       ubfiz   w23, w22, 2, 4  // w23 = (15 - ListRegs) * 4
-
-       adr     x24, 1f
-       add     x24, x24, x23
-       br      x24
-
-1:
-       ldr     x20, [x3, #LR_OFFSET(15)]
-       ldr     x19, [x3, #LR_OFFSET(14)]
-       ldr     x18, [x3, #LR_OFFSET(13)]
-       ldr     x17, [x3, #LR_OFFSET(12)]
-       ldr     x16, [x3, #LR_OFFSET(11)]
-       ldr     x15, [x3, #LR_OFFSET(10)]
-       ldr     x14, [x3, #LR_OFFSET(9)]
-       ldr     x13, [x3, #LR_OFFSET(8)]
-       ldr     x12, [x3, #LR_OFFSET(7)]
-       ldr     x11, [x3, #LR_OFFSET(6)]
-       ldr     x10, [x3, #LR_OFFSET(5)]
-       ldr     x9, [x3, #LR_OFFSET(4)]
-       ldr     x8, [x3, #LR_OFFSET(3)]
-       ldr     x7, [x3, #LR_OFFSET(2)]
-       ldr     x6, [x3, #LR_OFFSET(1)]
-       ldr     x5, [x3, #LR_OFFSET(0)]
-
-       adr     x24, 1f
-       add     x24, x24, x23
-       br      x24
-
-1:
-       msr_s   ICH_LR15_EL2, x20
-       msr_s   ICH_LR14_EL2, x19
-       msr_s   ICH_LR13_EL2, x18
-       msr_s   ICH_LR12_EL2, x17
-       msr_s   ICH_LR11_EL2, x16
-       msr_s   ICH_LR10_EL2, x15
-       msr_s   ICH_LR9_EL2,  x14
-       msr_s   ICH_LR8_EL2,  x13
-       msr_s   ICH_LR7_EL2,  x12
-       msr_s   ICH_LR6_EL2,  x11
-       msr_s   ICH_LR5_EL2,  x10
-       msr_s   ICH_LR4_EL2,   x9
-       msr_s   ICH_LR3_EL2,   x8
-       msr_s   ICH_LR2_EL2,   x7
-       msr_s   ICH_LR1_EL2,   x6
-       msr_s   ICH_LR0_EL2,   x5
-
-       // Ensure that the above will have reached the
-       // (re)distributors. This ensure the guest will read
-       // the correct values from the memory-mapped interface.
-       isb
-       dsb     sy
-
-       // Prevent the guest from touching the GIC system registers
-       // if SRE isn't enabled for GICv3 emulation
-       cbnz    x25, 1f
-       mrs_s   x5, ICC_SRE_EL2
-       and     x5, x5, #~ICC_SRE_EL2_ENABLE
-       msr_s   ICC_SRE_EL2, x5
-1:
-.endm
-
-ENTRY(__save_vgic_v3_state)
-       save_vgic_v3_state
-       ret
-ENDPROC(__save_vgic_v3_state)
-
-ENTRY(__restore_vgic_v3_state)
-       restore_vgic_v3_state
-       ret
-ENDPROC(__restore_vgic_v3_state)
-
-ENTRY(__vgic_v3_get_ich_vtr_el2)
-       mrs_s   x0, ICH_VTR_EL2
-       ret
-ENDPROC(__vgic_v3_get_ich_vtr_el2)
-
-       .popsection
index 85a542b2157521da47b39e2b2bff0180cf5b665a..8292784d44c95508c50be40b201454fafc488d2e 100644 (file)
@@ -24,6 +24,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/hwcap.h>
 #include <asm/pgtable.h>
+#include <asm/pgtable-hwdef.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative.h>
 
@@ -63,62 +64,50 @@ ENTRY(cpu_do_suspend)
        mrs     x2, tpidr_el0
        mrs     x3, tpidrro_el0
        mrs     x4, contextidr_el1
-       mrs     x5, mair_el1
-       mrs     x6, cpacr_el1
-       mrs     x7, ttbr1_el1
-       mrs     x8, tcr_el1
-       mrs     x9, vbar_el1
-       mrs     x10, mdscr_el1
-       mrs     x11, oslsr_el1
-       mrs     x12, sctlr_el1
+       mrs     x5, cpacr_el1
+       mrs     x6, tcr_el1
+       mrs     x7, vbar_el1
+       mrs     x8, mdscr_el1
+       mrs     x9, oslsr_el1
+       mrs     x10, sctlr_el1
        stp     x2, x3, [x0]
-       stp     x4, x5, [x0, #16]
-       stp     x6, x7, [x0, #32]
-       stp     x8, x9, [x0, #48]
-       stp     x10, x11, [x0, #64]
-       str     x12, [x0, #80]
+       stp     x4, xzr, [x0, #16]
+       stp     x5, x6, [x0, #32]
+       stp     x7, x8, [x0, #48]
+       stp     x9, x10, [x0, #64]
        ret
 ENDPROC(cpu_do_suspend)
 
 /**
  * cpu_do_resume - restore CPU register context
  *
- * x0: Physical address of context pointer
- * x1: ttbr0_el1 to be restored
- *
- * Returns:
- *     sctlr_el1 value in x0
+ * x0: Address of context pointer
  */
 ENTRY(cpu_do_resume)
-       /*
-        * Invalidate local tlb entries before turning on MMU
-        */
-       tlbi    vmalle1
        ldp     x2, x3, [x0]
        ldp     x4, x5, [x0, #16]
-       ldp     x6, x7, [x0, #32]
-       ldp     x8, x9, [x0, #48]
-       ldp     x10, x11, [x0, #64]
-       ldr     x12, [x0, #80]
+       ldp     x6, x8, [x0, #32]
+       ldp     x9, x10, [x0, #48]
+       ldp     x11, x12, [x0, #64]
        msr     tpidr_el0, x2
        msr     tpidrro_el0, x3
        msr     contextidr_el1, x4
-       msr     mair_el1, x5
        msr     cpacr_el1, x6
-       msr     ttbr0_el1, x1
-       msr     ttbr1_el1, x7
-       tcr_set_idmap_t0sz x8, x7
+
+       /* Don't change t0sz here, mask those bits when restoring */
+       mrs     x5, tcr_el1
+       bfi     x8, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
+
        msr     tcr_el1, x8
        msr     vbar_el1, x9
        msr     mdscr_el1, x10
+       msr     sctlr_el1, x12
        /*
         * Restore oslsr_el1 by writing oslar_el1
         */
        ubfx    x11, x11, #1, #1
        msr     oslar_el1, x11
        reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
-       mov     x0, x12
-       dsb     nsh             // Make sure local tlb invalidation completed
        isb
        ret
 ENDPROC(cpu_do_resume)
index b408fe660cf8ceab685de783220a2db991b4dfc3..3cef06875f5ca32930c06b07db027e86f362ce94 100644 (file)
@@ -31,7 +31,6 @@ struct thread_info {
        int                cpu;                 /* cpu we're on */
        int                preempt_count;       /* 0 => preemptable, <0 => BUG */
        mm_segment_t            addr_limit;
-       struct restart_block restart_block;
 };
 
 /*
@@ -44,9 +43,6 @@ struct thread_info {
        .cpu =          0,                      \
        .preempt_count = INIT_PREEMPT_COUNT,    \
        .addr_limit     = KERNEL_DS,            \
-       .restart_block  = {                     \
-               .fn = do_no_restart_syscall,    \
-       },                                      \
 }
 
 #define init_thread_info       (init_thread_union.thread_info)
index 380fffd081b2d3c8c4fb962aca8bc0efbd5783bb..036ad04edd2d8aa2d72c5be62d8ce0752ccc9d6a 100644 (file)
@@ -79,7 +79,7 @@ restore_sigcontext(struct sigcontext *usc, int *pd0)
        unsigned int er0;
 
        /* Always make any pending restarted system calls return -EINTR */
-       current_thread_info()->restart_block.fn = do_no_restart_syscall;
+       current->restart_block.fn = do_no_restart_syscall;
 
        /* restore passed registers */
 #define COPY(r)  do { err |= get_user(regs->r, &usc->sc_##r); } while (0)
index 3251b206e55ab03964a6f2337be20877f315de77..4298aeb1e20f3ef9ebc2b477d411ffc0cdbf35ef 100644 (file)
@@ -752,15 +752,15 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
        struct mips_coproc *cop0 = vcpu->arch.cop0;
        enum emulation_result er = EMULATE_DONE;
 
-       if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
+       if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
+               kvm_clear_c0_guest_status(cop0, ST0_ERL);
+               vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
+       } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
                kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
                          kvm_read_c0_guest_epc(cop0));
                kvm_clear_c0_guest_status(cop0, ST0_EXL);
                vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
 
-       } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
-               kvm_clear_c0_guest_status(cop0, ST0_ERL);
-               vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
        } else {
                kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
                        vcpu->arch.pc);
@@ -822,7 +822,7 @@ static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
        bool user;
 
        /* No need to flush for entries which are already invalid */
-       if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
+       if (!((tlb->tlb_lo0 | tlb->tlb_lo1) & MIPS3_PG_V))
                return;
        /* User address space doesn't need flushing for KSeg2/3 changes */
        user = tlb->tlb_hi < KVM_GUEST_KSEG0;
index fbafa0d0e2bf865db726d15f9c4beb4817afe86c..a86b19fccb63bada4d94cc6e4615c19b919743c7 100644 (file)
@@ -106,8 +106,6 @@ linux_gateway_entry:
        mtsp    %r0,%sr4                        /* get kernel space into sr4 */
        mtsp    %r0,%sr5                        /* get kernel space into sr5 */
        mtsp    %r0,%sr6                        /* get kernel space into sr6 */
-       mfsp    %sr7,%r1                        /* save user sr7 */
-       mtsp    %r1,%sr3                        /* and store it in sr3 */
 
 #ifdef CONFIG_64BIT
        /* for now we can *always* set the W bit on entry to the syscall
@@ -133,6 +131,14 @@ linux_gateway_entry:
        depdi   0, 31, 32, %r21
 1:     
 #endif
+
+       /* We use a rsm/ssm pair to prevent sr3 from being clobbered
+        * by external interrupts.
+        */
+       mfsp    %sr7,%r1                        /* save user sr7 */
+       rsm     PSW_SM_I, %r0                   /* disable interrupts */
+       mtsp    %r1,%sr3                        /* and store it in sr3 */
+
        mfctl   %cr30,%r1
        xor     %r1,%r30,%r30                   /* ye olde xor trick */
        xor     %r1,%r30,%r1
@@ -147,6 +153,7 @@ linux_gateway_entry:
         */
 
        mtsp    %r0,%sr7                        /* get kernel space into sr7 */
+       ssm     PSW_SM_I, %r0                   /* enable interrupts */
        STREGM  %r1,FRAME_SIZE(%r30)            /* save r1 (usp) here for now */
        mfctl   %cr30,%r1                       /* get task ptr in %r1 */
        LDREG   TI_TASK(%r1),%r1
index 737c0d0b53ac43dcdc41f09b0a3177cf8b4e389b..b38fd081b22235500b4b43cad1e11da9ea294e91 100644 (file)
@@ -376,7 +376,7 @@ static int fpr_get(struct task_struct *target, const struct user_regset *regset,
 
 #else
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
-                    offsetof(struct thread_fp_state, fpr[32][0]));
+                    offsetof(struct thread_fp_state, fpr[32]));
 
        return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
                                   &target->thread.fp_state, 0, -1);
@@ -404,7 +404,7 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
        return 0;
 #else
        BUILD_BUG_ON(offsetof(struct thread_fp_state, fpscr) !=
-                    offsetof(struct thread_fp_state, fpr[32][0]));
+                    offsetof(struct thread_fp_state, fpr[32]));
 
        return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                  &target->thread.fp_state, 0, -1);
index dbe64f27280e34138dc5163b587579db35d3c768..7402eb4b509d3e601df3a254557fc134eaa1cc43 100644 (file)
@@ -339,7 +339,7 @@ do {                                                                        \
 #define __get_user_asm_u64(x, ptr, retval, errret) \
         __get_user_asm(x, ptr, retval, "q", "", "=r", errret)
 #define __get_user_asm_ex_u64(x, ptr) \
-        __get_user_asm_ex(x, ptr, "q", "", "=r")
+        __get_user_asm_ex(x, ptr, "q", "", "=&r")
 #endif
 
 #define __get_user_size(x, ptr, size, retval, errret)                  \
@@ -386,13 +386,13 @@ do {                                                                      \
        __chk_user_ptr(ptr);                                            \
        switch (size) {                                                 \
        case 1:                                                         \
-               __get_user_asm_ex(x, ptr, "b", "b", "=q");              \
+               __get_user_asm_ex(x, ptr, "b", "b", "=&q");             \
                break;                                                  \
        case 2:                                                         \
-               __get_user_asm_ex(x, ptr, "w", "w", "=r");              \
+               __get_user_asm_ex(x, ptr, "w", "w", "=&r");             \
                break;                                                  \
        case 4:                                                         \
-               __get_user_asm_ex(x, ptr, "l", "k", "=r");              \
+               __get_user_asm_ex(x, ptr, "l", "k", "=&r");             \
                break;                                                  \
        case 8:                                                         \
                __get_user_asm_ex_u64(x, ptr);                          \
@@ -406,7 +406,7 @@ do {                                                                        \
        asm volatile("1:        mov"itype" %1,%"rtype"0\n"              \
                     "2:\n"                                             \
                     _ASM_EXTABLE_EX(1b, 2b)                            \
-                    : ltype(x) : "m" (__m(addr)))
+                    : ltype(x) : "m" (__m(addr)), "0" (0))
 
 #define __put_user_nocheck(x, ptr, size)                       \
 ({                                                             \
index b9b09fec173bf2fedb0b9ee122b3c4668eb6e471..5fa652c16a50beb890662a56be394b6beb46d1b2 100644 (file)
@@ -5033,7 +5033,7 @@ done_prefixes:
        /* Decode and fetch the destination operand: register or memory. */
        rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
 
-       if (ctxt->rip_relative)
+       if (ctxt->rip_relative && likely(ctxt->memopp))
                ctxt->memopp->addr.mem.ea = address_mask(ctxt,
                                        ctxt->memopp->addr.mem.ea + ctxt->_eip);
 
index d7cb9577fa31fbe0f06e305933c0caf6c9645eb0..685ef431a41d1df9ae19fb59617ac3a956c284ef 100644 (file)
@@ -7252,10 +7252,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 {
+       void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
+
        kvmclock_reset(vcpu);
 
-       free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
        kvm_x86_ops->vcpu_free(vcpu);
+       free_cpumask_var(wbinvd_dirty_mask);
 }
 
 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
index cb5e266a8bf752297eaf6fc0d822c9398239fadf..1e56ff58345982b99185db7f25d598f46875c300 100644 (file)
@@ -1113,7 +1113,7 @@ static void __init xen_cleanhighmap(unsigned long vaddr,
 
        /* NOTE: The loop is more greedy than the cleanup_highmap variant.
         * We include the PMD passed in on _both_ boundaries. */
-       for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PAGE_SIZE));
+       for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD));
                        pmd++, vaddr += PMD_SIZE) {
                if (pmd_none(*pmd))
                        continue;
index bcf0a9420619fa34a1dd7c17088b846693ae197d..363db309ef79d54322628720e0c0e3e370575c60 100644 (file)
@@ -1003,7 +1003,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
 
 
 static struct binder_ref *binder_get_ref(struct binder_proc *proc,
-                                        uint32_t desc, bool need_strong_ref)
+                                        u32 desc, bool need_strong_ref)
 {
        struct rb_node *n = proc->refs_by_desc.rb_node;
        struct binder_ref *ref;
@@ -1290,8 +1290,10 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
-                       struct binder_ref *ref = binder_get_ref(proc, fp->handle,
-                                               fp->type == BINDER_TYPE_HANDLE);
+                       struct binder_ref *ref;
+
+                       ref = binder_get_ref(proc, fp->handle,
+                                            fp->type == BINDER_TYPE_HANDLE);
 
                        if (ref == NULL) {
                                pr_err("transaction release %d bad handle %d\n",
@@ -1597,8 +1599,10 @@ static void binder_transaction(struct binder_proc *proc,
                } break;
                case BINDER_TYPE_HANDLE:
                case BINDER_TYPE_WEAK_HANDLE: {
-                       struct binder_ref *ref = binder_get_ref(proc, fp->handle,
-                                               fp->type == BINDER_TYPE_HANDLE);
+                       struct binder_ref *ref;
+
+                       ref = binder_get_ref(proc, fp->handle,
+                                            fp->type == BINDER_TYPE_HANDLE);
 
                        if (ref == NULL) {
                                binder_user_error("%d:%d got transaction with invalid handle, %d\n",
index d2406fe2553396a807a169ddd93b656a4876a537..090183f812beb18dee9b88bbeff7da41801dcf40 100644 (file)
@@ -1533,19 +1533,29 @@ static void remove_port_data(struct port *port)
        spin_lock_irq(&port->inbuf_lock);
        /* Remove unused data this port might have received. */
        discard_port_data(port);
+       spin_unlock_irq(&port->inbuf_lock);
 
        /* Remove buffers we queued up for the Host to send us data in. */
-       while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
-               free_buf(buf, true);
-       spin_unlock_irq(&port->inbuf_lock);
+       do {
+               spin_lock_irq(&port->inbuf_lock);
+               buf = virtqueue_detach_unused_buf(port->in_vq);
+               spin_unlock_irq(&port->inbuf_lock);
+               if (buf)
+                       free_buf(buf, true);
+       } while (buf);
 
        spin_lock_irq(&port->outvq_lock);
        reclaim_consumed_buffers(port);
+       spin_unlock_irq(&port->outvq_lock);
 
        /* Free pending buffers from the out-queue. */
-       while ((buf = virtqueue_detach_unused_buf(port->out_vq)))
-               free_buf(buf, true);
-       spin_unlock_irq(&port->outvq_lock);
+       do {
+               spin_lock_irq(&port->outvq_lock);
+               buf = virtqueue_detach_unused_buf(port->out_vq);
+               spin_unlock_irq(&port->outvq_lock);
+               if (buf)
+                       free_buf(buf, true);
+       } while (buf);
 }
 
 /*
index f4ea80d602f73bd6bc5adac2a32bbbe2b58de716..b9d2f76a0cf7a036a2794436db34887bf75590fe 100644 (file)
@@ -73,13 +73,13 @@ struct rfc2734_header {
 
 #define fwnet_get_hdr_lf(h)            (((h)->w0 & 0xc0000000) >> 30)
 #define fwnet_get_hdr_ether_type(h)    (((h)->w0 & 0x0000ffff))
-#define fwnet_get_hdr_dg_size(h)       (((h)->w0 & 0x0fff0000) >> 16)
+#define fwnet_get_hdr_dg_size(h)       ((((h)->w0 & 0x0fff0000) >> 16) + 1)
 #define fwnet_get_hdr_fg_off(h)                (((h)->w0 & 0x00000fff))
 #define fwnet_get_hdr_dgl(h)           (((h)->w1 & 0xffff0000) >> 16)
 
-#define fwnet_set_hdr_lf(lf)           ((lf)  << 30)
+#define fwnet_set_hdr_lf(lf)           ((lf) << 30)
 #define fwnet_set_hdr_ether_type(et)   (et)
-#define fwnet_set_hdr_dg_size(dgs)     ((dgs) << 16)
+#define fwnet_set_hdr_dg_size(dgs)     (((dgs) - 1) << 16)
 #define fwnet_set_hdr_fg_off(fgo)      (fgo)
 
 #define fwnet_set_hdr_dgl(dgl)         ((dgl) << 16)
@@ -578,6 +578,9 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
        int retval;
        u16 ether_type;
 
+       if (len <= RFC2374_UNFRAG_HDR_SIZE)
+               return 0;
+
        hdr.w0 = be32_to_cpu(buf[0]);
        lf = fwnet_get_hdr_lf(&hdr);
        if (lf == RFC2374_HDR_UNFRAG) {
@@ -602,7 +605,12 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
                return fwnet_finish_incoming_packet(net, skb, source_node_id,
                                                    is_broadcast, ether_type);
        }
+
        /* A datagram fragment has been received, now the fun begins. */
+
+       if (len <= RFC2374_FRAG_HDR_SIZE)
+               return 0;
+
        hdr.w1 = ntohl(buf[1]);
        buf += 2;
        len -= RFC2374_FRAG_HDR_SIZE;
@@ -614,7 +622,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
                fg_off = fwnet_get_hdr_fg_off(&hdr);
        }
        datagram_label = fwnet_get_hdr_dgl(&hdr);
-       dg_size = fwnet_get_hdr_dg_size(&hdr); /* ??? + 1 */
+       dg_size = fwnet_get_hdr_dg_size(&hdr);
+
+       if (fg_off + len > dg_size)
+               return 0;
 
        spin_lock_irqsave(&dev->lock, flags);
 
@@ -722,6 +733,22 @@ static void fwnet_receive_packet(struct fw_card *card, struct fw_request *r,
        fw_send_response(card, r, rcode);
 }
 
+static int gasp_source_id(__be32 *p)
+{
+       return be32_to_cpu(p[0]) >> 16;
+}
+
+static u32 gasp_specifier_id(__be32 *p)
+{
+       return (be32_to_cpu(p[0]) & 0xffff) << 8 |
+              (be32_to_cpu(p[1]) & 0xff000000) >> 24;
+}
+
+static u32 gasp_version(__be32 *p)
+{
+       return be32_to_cpu(p[1]) & 0xffffff;
+}
+
 static void fwnet_receive_broadcast(struct fw_iso_context *context,
                u32 cycle, size_t header_length, void *header, void *data)
 {
@@ -731,9 +758,6 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
        __be32 *buf_ptr;
        int retval;
        u32 length;
-       u16 source_node_id;
-       u32 specifier_id;
-       u32 ver;
        unsigned long offset;
        unsigned long flags;
 
@@ -750,22 +774,17 @@ static void fwnet_receive_broadcast(struct fw_iso_context *context,
 
        spin_unlock_irqrestore(&dev->lock, flags);
 
-       specifier_id =    (be32_to_cpu(buf_ptr[0]) & 0xffff) << 8
-                       | (be32_to_cpu(buf_ptr[1]) & 0xff000000) >> 24;
-       ver = be32_to_cpu(buf_ptr[1]) & 0xffffff;
-       source_node_id = be32_to_cpu(buf_ptr[0]) >> 16;
-
-       if (specifier_id == IANA_SPECIFIER_ID &&
-           (ver == RFC2734_SW_VERSION
+       if (length > IEEE1394_GASP_HDR_SIZE &&
+           gasp_specifier_id(buf_ptr) == IANA_SPECIFIER_ID &&
+           (gasp_version(buf_ptr) == RFC2734_SW_VERSION
 #if IS_ENABLED(CONFIG_IPV6)
-            || ver == RFC3146_SW_VERSION
+            || gasp_version(buf_ptr) == RFC3146_SW_VERSION
 #endif
-           )) {
-               buf_ptr += 2;
-               length -= IEEE1394_GASP_HDR_SIZE;
-               fwnet_incoming_packet(dev, buf_ptr, length, source_node_id,
+           ))
+               fwnet_incoming_packet(dev, buf_ptr + 2,
+                                     length - IEEE1394_GASP_HDR_SIZE,
+                                     gasp_source_id(buf_ptr),
                                      context->card->generation, true);
-       }
 
        packet.payload_length = dev->rcv_buffer_size;
        packet.interrupt = 1;
index cf478fe6b335bc2cde8da7ae7f6695f9576f38ef..49a3a1185bb607ed45297fe4dea5ff2c6344f37f 100644 (file)
@@ -173,6 +173,9 @@ config QCOM_SCM_64
        def_bool y
        depends on QCOM_SCM && ARM64
 
+config HAVE_ARM_SMCCC
+       bool
+
 source "drivers/firmware/broadcom/Kconfig"
 source "drivers/firmware/google/Kconfig"
 source "drivers/firmware/efi/Kconfig"
index d24f35d74b27079afeae5c08d601c3bcd899dee6..11bfee8b79a9f65418bcbe53dfc708edb220e69d 100644 (file)
@@ -13,6 +13,8 @@
 
 #define pr_fmt(fmt) "psci: " fmt
 
+#include <linux/arm-smccc.h>
+#include <linux/cpuidle.h>
 #include <linux/errno.h>
 #include <linux/linkage.h>
 #include <linux/of.h>
 #include <linux/printk.h>
 #include <linux/psci.h>
 #include <linux/reboot.h>
+#include <linux/slab.h>
 #include <linux/suspend.h>
 
 #include <uapi/linux/psci.h>
 
+#include <asm/cpuidle.h>
 #include <asm/cputype.h>
 #include <asm/system_misc.h>
 #include <asm/smp_plat.h>
@@ -58,8 +62,6 @@ struct psci_operations psci_ops;
 
 typedef unsigned long (psci_fn)(unsigned long, unsigned long,
                                unsigned long, unsigned long);
-asmlinkage psci_fn __invoke_psci_fn_hvc;
-asmlinkage psci_fn __invoke_psci_fn_smc;
 static psci_fn *invoke_psci_fn;
 
 enum psci_function {
@@ -107,6 +109,26 @@ bool psci_power_state_is_valid(u32 state)
        return !(state & ~valid_mask);
 }
 
+static unsigned long __invoke_psci_fn_hvc(unsigned long function_id,
+                       unsigned long arg0, unsigned long arg1,
+                       unsigned long arg2)
+{
+       struct arm_smccc_res res;
+
+       arm_smccc_hvc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+       return res.a0;
+}
+
+static unsigned long __invoke_psci_fn_smc(unsigned long function_id,
+                       unsigned long arg0, unsigned long arg1,
+                       unsigned long arg2)
+{
+       struct arm_smccc_res res;
+
+       arm_smccc_smc(function_id, arg0, arg1, arg2, 0, 0, 0, 0, &res);
+       return res.a0;
+}
+
 static int psci_to_linux_errno(int errno)
 {
        switch (errno) {
@@ -225,6 +247,123 @@ static int __init psci_features(u32 psci_func_id)
                              psci_func_id, 0, 0);
 }
 
+#ifdef CONFIG_CPU_IDLE
+static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
+
+static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu)
+{
+       int i, ret, count = 0;
+       u32 *psci_states;
+       struct device_node *state_node;
+
+       /*
+        * If the PSCI cpu_suspend function hook has not been initialized
+        * idle states must not be enabled, so bail out
+        */
+       if (!psci_ops.cpu_suspend)
+               return -EOPNOTSUPP;
+
+       /* Count idle states */
+       while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
+                                             count))) {
+               count++;
+               of_node_put(state_node);
+       }
+
+       if (!count)
+               return -ENODEV;
+
+       psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
+       if (!psci_states)
+               return -ENOMEM;
+
+       for (i = 0; i < count; i++) {
+               u32 state;
+
+               state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
+
+               ret = of_property_read_u32(state_node,
+                                          "arm,psci-suspend-param",
+                                          &state);
+               if (ret) {
+                       pr_warn(" * %s missing arm,psci-suspend-param property\n",
+                               state_node->full_name);
+                       of_node_put(state_node);
+                       goto free_mem;
+               }
+
+               of_node_put(state_node);
+               pr_debug("psci-power-state %#x index %d\n", state, i);
+               if (!psci_power_state_is_valid(state)) {
+                       pr_warn("Invalid PSCI power state %#x\n", state);
+                       ret = -EINVAL;
+                       goto free_mem;
+               }
+               psci_states[i] = state;
+       }
+       /* Idle states parsed correctly, initialize per-cpu pointer */
+       per_cpu(psci_power_state, cpu) = psci_states;
+       return 0;
+
+free_mem:
+       kfree(psci_states);
+       return ret;
+}
+
+int psci_cpu_init_idle(unsigned int cpu)
+{
+       struct device_node *cpu_node;
+       int ret;
+
+       cpu_node = of_get_cpu_node(cpu, NULL);
+       if (!cpu_node)
+               return -ENODEV;
+
+       ret = psci_dt_cpu_init_idle(cpu_node, cpu);
+
+       of_node_put(cpu_node);
+
+       return ret;
+}
+
+static int psci_suspend_finisher(unsigned long index)
+{
+       u32 *state = __this_cpu_read(psci_power_state);
+
+       return psci_ops.cpu_suspend(state[index - 1],
+                                   virt_to_phys(cpu_resume));
+}
+
+int psci_cpu_suspend_enter(unsigned long index)
+{
+       int ret;
+       u32 *state = __this_cpu_read(psci_power_state);
+       /*
+        * idle state index 0 corresponds to wfi, should never be called
+        * from the cpu_suspend operations
+        */
+       if (WARN_ON_ONCE(!index))
+               return -EINVAL;
+
+       if (!psci_power_state_loses_context(state[index - 1]))
+               ret = psci_ops.cpu_suspend(state[index - 1], 0);
+       else
+               ret = cpu_suspend(index, psci_suspend_finisher);
+
+       return ret;
+}
+
+/* ARM specific CPU idle operations */
+#ifdef CONFIG_ARM
+static struct cpuidle_ops psci_cpuidle_ops __initdata = {
+       .suspend = psci_cpu_suspend_enter,
+       .init = psci_dt_cpu_init_idle,
+};
+
+CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops);
+#endif
+#endif
+
 static int psci_system_suspend(unsigned long unused)
 {
        return invoke_psci_fn(PSCI_FN_NATIVE(1_0, SYSTEM_SUSPEND),
index 21aacc1f45c1fd7afded7f6088c8a365e9005224..7f85c2c1d68156a4d91bd4b18332df6f1886fb7c 100644 (file)
@@ -265,15 +265,27 @@ static int amdgpu_atombios_dp_get_dp_link_config(struct drm_connector *connector
        unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
        unsigned lane_num, i, max_pix_clock;
 
-       for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
-               for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
-                       max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+       if (amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+           ENCODER_OBJECT_ID_NUTMEG) {
+               for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+                       max_pix_clock = (lane_num * 270000 * 8) / bpp;
                        if (max_pix_clock >= pix_clock) {
                                *dp_lanes = lane_num;
-                               *dp_rate = link_rates[i];
+                               *dp_rate = 270000;
                                return 0;
                        }
                }
+       } else {
+               for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+                       for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+                               max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+                               if (max_pix_clock >= pix_clock) {
+                                       *dp_lanes = lane_num;
+                                       *dp_rate = link_rates[i];
+                                       return 0;
+                               }
+                       }
+               }
        }
 
        return -EINVAL;
index 2485fb652716852f601a342596648af78d1a273b..7cb2815e815e156aa9db7c89f164af42e96aef35 100644 (file)
@@ -909,6 +909,7 @@ static void drm_dp_destroy_port(struct kref *kref)
                /* no need to clean up vcpi
                 * as if we have no connector we never setup a vcpi */
                drm_dp_port_teardown_pdt(port, port->pdt);
+               port->pdt = DP_PEER_DEVICE_NONE;
        }
        kfree(port);
 }
@@ -1154,7 +1155,9 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
                        drm_dp_put_port(port);
                        goto out;
                }
-               if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
+               if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
+                    port->pdt == DP_PEER_DEVICE_SST_SINK) &&
+                   port->port_num >= DP_MST_LOGICAL_PORT_0) {
                        port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
                        drm_mode_connector_set_tile_property(port->connector);
                }
@@ -2872,6 +2875,7 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
                mgr->cbs->destroy_connector(mgr, port->connector);
 
                drm_dp_port_teardown_pdt(port, port->pdt);
+               port->pdt = DP_PEER_DEVICE_NONE;
 
                if (!port->input && port->vcpi.vcpi > 0) {
                        drm_dp_mst_reset_vcpi_slots(mgr, port);
index 7f55ba6771c6b94e5f45bee6bdec078c27c74f5b..011211e4167d41dce17d56e8baee0b168b37e7b6 100644 (file)
@@ -101,7 +101,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
        return 0;
 
 err:
-       list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
+       list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
                if (subdrv->close)
                        subdrv->close(dev, subdrv->dev, file);
        }
index 44ee72e04df9e953bafe64cdfdadf2f01c1f9bce..b5760851195cfbcb0cfca10a88a154b38436c0b6 100644 (file)
@@ -315,15 +315,27 @@ int radeon_dp_get_dp_link_config(struct drm_connector *connector,
        unsigned max_lane_num = drm_dp_max_lane_count(dpcd);
        unsigned lane_num, i, max_pix_clock;
 
-       for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
-               for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
-                       max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+       if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
+           ENCODER_OBJECT_ID_NUTMEG) {
+               for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+                       max_pix_clock = (lane_num * 270000 * 8) / bpp;
                        if (max_pix_clock >= pix_clock) {
                                *dp_lanes = lane_num;
-                               *dp_rate = link_rates[i];
+                               *dp_rate = 270000;
                                return 0;
                        }
                }
+       } else {
+               for (i = 0; i < ARRAY_SIZE(link_rates) && link_rates[i] <= max_link_rate; i++) {
+                       for (lane_num = 1; lane_num <= max_lane_num; lane_num <<= 1) {
+                               max_pix_clock = (lane_num * link_rates[i] * 8) / bpp;
+                               if (max_pix_clock >= pix_clock) {
+                                       *dp_lanes = lane_num;
+                                       *dp_rate = link_rates[i];
+                                       return 0;
+                               }
+                       }
+               }
        }
 
        return -EINVAL;
index 158872eb78e41ae94276b1abdad3fdce76ef463f..a3a321208fd88aaad3b5e7db27765e0130661de8 100644 (file)
@@ -1396,9 +1396,7 @@ static void cayman_pcie_gart_fini(struct radeon_device *rdev)
 void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
                              int ring, u32 cp_int_cntl)
 {
-       u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
-
-       WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+       WREG32(SRBM_GFX_CNTL, RINGID(ring));
        WREG32(CP_INT_CNTL, cp_int_cntl);
 }
 
index db64e0062689b076842b9710c33e4660c96e9985..3b0c229d7dcd23ffb7184ad79e2ebaa8f001cca9 100644 (file)
@@ -105,7 +105,7 @@ radeon_dp_aux_transfer_native(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg
 
        tmp &= AUX_HPD_SEL(0x7);
        tmp |= AUX_HPD_SEL(chan->rec.hpd);
-       tmp |= AUX_EN | AUX_LS_READ_EN | AUX_HPD_DISCON(0x1);
+       tmp |= AUX_EN | AUX_LS_READ_EN;
 
        WREG32(AUX_CONTROL + aux_offset[instance], tmp);
 
index 472e0771832eef0ab248f664ccf16d3730fcc0e4..10191b935937c55ea125ace1a55d344bbe4bf146 100644 (file)
@@ -2999,6 +2999,49 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
        int i;
        struct si_dpm_quirk *p = si_dpm_quirk_list;
 
+       /* limit all SI kickers */
+       if (rdev->family == CHIP_PITCAIRN) {
+               if ((rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->device == 0x6810) ||
+                   (rdev->pdev->device == 0x6811) ||
+                   (rdev->pdev->device == 0x6816) ||
+                   (rdev->pdev->device == 0x6817) ||
+                   (rdev->pdev->device == 0x6806))
+                       max_mclk = 120000;
+       } else if (rdev->family == CHIP_VERDE) {
+               if ((rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->revision == 0x83) ||
+                   (rdev->pdev->revision == 0x87) ||
+                   (rdev->pdev->device == 0x6820) ||
+                   (rdev->pdev->device == 0x6821) ||
+                   (rdev->pdev->device == 0x6822) ||
+                   (rdev->pdev->device == 0x6823) ||
+                   (rdev->pdev->device == 0x682A) ||
+                   (rdev->pdev->device == 0x682B)) {
+                       max_sclk = 75000;
+                       max_mclk = 80000;
+               }
+       } else if (rdev->family == CHIP_OLAND) {
+               if ((rdev->pdev->revision == 0xC7) ||
+                   (rdev->pdev->revision == 0x80) ||
+                   (rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->revision == 0x83) ||
+                   (rdev->pdev->device == 0x6604) ||
+                   (rdev->pdev->device == 0x6605)) {
+                       max_sclk = 75000;
+                       max_mclk = 80000;
+               }
+       } else if (rdev->family == CHIP_HAINAN) {
+               if ((rdev->pdev->revision == 0x81) ||
+                   (rdev->pdev->revision == 0x83) ||
+                   (rdev->pdev->revision == 0xC3) ||
+                   (rdev->pdev->device == 0x6664) ||
+                   (rdev->pdev->device == 0x6665) ||
+                   (rdev->pdev->device == 0x6667)) {
+                       max_sclk = 75000;
+                       max_mclk = 80000;
+               }
+       }
        /* Apply dpm quirks */
        while (p && p->chip_device != 0) {
                if (rdev->pdev->vendor == p->chip_vendor &&
@@ -3011,16 +3054,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
                }
                ++p;
        }
-       /* limit mclk on all R7 370 parts for stability */
-       if (rdev->pdev->device == 0x6811 &&
-           rdev->pdev->revision == 0x81)
-               max_mclk = 120000;
-       /* limit sclk/mclk on Jet parts for stability */
-       if (rdev->pdev->device == 0x6665 &&
-           rdev->pdev->revision == 0xc3) {
-               max_sclk = 75000;
-               max_mclk = 80000;
-       }
 
        if (rps->vce_active) {
                rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
index 909ab0176ef2313a382309c7319c9920a6995cf1..e370306241658bf25976069768bc807334cdc0d0 100644 (file)
 #define USB_DEVICE_ID_ATEN_4PORTKVM    0x2205
 #define USB_DEVICE_ID_ATEN_4PORTKVMC   0x2208
 #define USB_DEVICE_ID_ATEN_CS682       0x2213
+#define USB_DEVICE_ID_ATEN_CS692       0x8021
 
 #define USB_VENDOR_ID_ATMEL            0x03eb
 #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
index dc8e6adf95a4357ffaf9121a2da8ddc9d2700223..6ca6ab00fa93ffdb57f288bd130ce13f2f89d040 100644 (file)
@@ -61,6 +61,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS692, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
index 7994ec2e4151b085bbcb596d8ddbd4d8765b939e..41f5896224bd4723db7f87179c7466df423238a7 100644 (file)
@@ -283,10 +283,14 @@ static void heartbeat_onchannelcallback(void *context)
        u8 *hbeat_txf_buf = util_heartbeat.recv_buffer;
        struct icmsg_negotiate *negop = NULL;
 
-       vmbus_recvpacket(channel, hbeat_txf_buf,
-                        PAGE_SIZE, &recvlen, &requestid);
+       while (1) {
+
+               vmbus_recvpacket(channel, hbeat_txf_buf,
+                                PAGE_SIZE, &recvlen, &requestid);
+
+               if (!recvlen)
+                       break;
 
-       if (recvlen > 0) {
                icmsghdrp = (struct icmsg_hdr *)&hbeat_txf_buf[
                                sizeof(struct vmbuspipe_hdr)];
 
index 4233f5695352fdc951c54c54dc6fbb9de926d26e..3c38029e3fe976cb4ce854dd3d811a849d94aa66 100644 (file)
@@ -105,7 +105,7 @@ struct slimpro_i2c_dev {
        struct mbox_chan *mbox_chan;
        struct mbox_client mbox_client;
        struct completion rd_complete;
-       u8 dma_buffer[I2C_SMBUS_BLOCK_MAX];
+       u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */
        u32 *resp_msg;
 };
 
index ba8eb087f224655d1853c1192371759cf4d5e9f0..d625167357cc828c6be92ba89676404b0d15db75 100644 (file)
@@ -1876,6 +1876,7 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
        /* add the driver to the list of i2c drivers in the driver core */
        driver->driver.owner = owner;
        driver->driver.bus = &i2c_bus_type;
+       INIT_LIST_HEAD(&driver->clients);
 
        /* When registration returns, the driver core
         * will have called probe() for all matching-but-unbound devices.
@@ -1886,7 +1887,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
 
        pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
 
-       INIT_LIST_HEAD(&driver->clients);
        /* Walk the adapters that are already present */
        i2c_for_each_dev(driver, __process_new_driver);
 
index f4bfb4b2d50a356336fdab08124f52085a82f24f..073246c7d1634eef411c1bf07d7f72ab47cad369 100644 (file)
@@ -877,6 +877,13 @@ static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
                },
        },
+       {
+               /* Schenker XMG C504 - Elantech touchpad */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "XMG"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "C504"),
+               },
+       },
        { }
 };
 
index f2a363a89629902350649cb818f6159ecda1afd9..115bd3846c3f060e431e7410a6e2b0a1fd162c47 100644 (file)
@@ -1288,6 +1288,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
 
                        dm_bio_restore(bd, bio);
                        bio_record->details.bi_bdev = NULL;
+                       bio->bi_error = 0;
 
                        queue_bio(ms, bio, rw);
                        return DM_ENDIO_INCOMPLETE;
index 84aa8b1d048031d26f76d11972e8ba1e3449dfde..3384a3eef917bc3a98f4a1598e5629f705c309e2 100644 (file)
@@ -2260,8 +2260,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
        if (md->bs)
                bioset_free(md->bs);
 
-       cleanup_srcu_struct(&md->io_barrier);
-
        if (md->disk) {
                spin_lock(&_minor_lock);
                md->disk->private_data = NULL;
@@ -2273,6 +2271,8 @@ static void cleanup_mapped_device(struct mapped_device *md)
        if (md->queue)
                blk_cleanup_queue(md->queue);
 
+       cleanup_srcu_struct(&md->io_barrier);
+
        if (md->bdev) {
                bdput(md->bdev);
                md->bdev = NULL;
index 222367cc8c815ba9214e9e9c25a8482f7967a772..524660510599cb43daa4b3fbb2f34b60da7b39ef 100644 (file)
@@ -352,17 +352,27 @@ int genwqe_alloc_sync_sgl(struct genwqe_dev *cd, struct genwqe_sgl *sgl,
                if (copy_from_user(sgl->lpage, user_addr + user_size -
                                   sgl->lpage_size, sgl->lpage_size)) {
                        rc = -EFAULT;
-                       goto err_out1;
+                       goto err_out2;
                }
        }
        return 0;
 
+ err_out2:
+       __genwqe_free_consistent(cd, PAGE_SIZE, sgl->lpage,
+                                sgl->lpage_dma_addr);
+       sgl->lpage = NULL;
+       sgl->lpage_dma_addr = 0;
  err_out1:
        __genwqe_free_consistent(cd, PAGE_SIZE, sgl->fpage,
                                 sgl->fpage_dma_addr);
+       sgl->fpage = NULL;
+       sgl->fpage_dma_addr = 0;
  err_out:
        __genwqe_free_consistent(cd, sgl->sgl_size, sgl->sgl,
                                 sgl->sgl_dma_addr);
+       sgl->sgl = NULL;
+       sgl->sgl_dma_addr = 0;
+       sgl->sgl_size = 0;
        return -ENOMEM;
 }
 
index bae680c648ffc9dcd188eefac028028f4f0a2d2f..396d75d9fb11cce88647e8343a7f595a465026c5 100644 (file)
@@ -972,11 +972,13 @@ static bool mei_txe_check_and_ack_intrs(struct mei_device *dev, bool do_ack)
        hisr = mei_txe_br_reg_read(hw, HISR_REG);
 
        aliveness = mei_txe_aliveness_get(dev);
-       if (hhisr & IPC_HHIER_SEC && aliveness)
+       if (hhisr & IPC_HHIER_SEC && aliveness) {
                ipc_isr = mei_txe_sec_reg_read_silent(hw,
                                SEC_IPC_HOST_INT_STATUS_REG);
-       else
+       } else {
                ipc_isr = 0;
+               hhisr &= ~IPC_HHIER_SEC;
+       }
 
        generated = generated ||
                (hisr & HISR_INT_STS_MSK) ||
index 81bdeeb05a4d23426ecf39119a54d544eb8342f9..7dcfb1d5034fba36f02405807387a049ba37dd28 100644 (file)
@@ -59,12 +59,13 @@ int dw_mci_pltfm_register(struct platform_device *pdev,
        host->pdata = pdev->dev.platform_data;
 
        regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       /* Get registers' physical base address */
-       host->phy_regs = regs->start;
        host->regs = devm_ioremap_resource(&pdev->dev, regs);
        if (IS_ERR(host->regs))
                return PTR_ERR(host->regs);
 
+       /* Get registers' physical base address */
+       host->phy_regs = regs->start;
+
        platform_set_drvdata(pdev, host);
        return dw_mci_probe(host);
 }
index 990898b9dc7289f881ecf357573bbc1d79bbf6ad..bba7dd1b5ebf19f03be99c1aeb0a6f291e7fbae2 100644 (file)
@@ -513,10 +513,11 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
                        unsigned long long ec = be64_to_cpu(ech->ec);
                        unmap_peb(ai, pnum);
                        dbg_bld("Adding PEB to free: %i", pnum);
+
                        if (err == UBI_IO_FF_BITFLIPS)
-                               add_aeb(ai, free, pnum, ec, 1);
-                       else
-                               add_aeb(ai, free, pnum, ec, 0);
+                               scrub = 1;
+
+                       add_aeb(ai, free, pnum, ec, scrub);
                        continue;
                } else if (err == 0 || err == UBI_IO_BITFLIPS) {
                        dbg_bld("Found non empty PEB:%i in pool", pnum);
@@ -748,11 +749,11 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
                             fmvhdr->vol_type,
                             be32_to_cpu(fmvhdr->last_eb_bytes));
 
-               if (!av)
-                       goto fail_bad;
-               if (PTR_ERR(av) == -EINVAL) {
-                       ubi_err(ubi, "volume (ID %i) already exists",
-                               fmvhdr->vol_id);
+               if (IS_ERR(av)) {
+                       if (PTR_ERR(av) == -EEXIST)
+                               ubi_err(ubi, "volume (ID %i) already exists",
+                                       fmvhdr->vol_id);
+
                        goto fail_bad;
                }
 
index ca5ac5d6f4e6ca6d87ce029b7f4e06e8653e2bac..49056c33be74d05e19d2a60f6003b3c51fccbef0 100644 (file)
@@ -18142,14 +18142,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
 
        rtnl_lock();
 
-       /* We needn't recover from permanent error */
-       if (state == pci_channel_io_frozen)
-               tp->pcierr_recovery = true;
-
        /* We probably don't have netdev yet */
        if (!netdev || !netif_running(netdev))
                goto done;
 
+       /* We needn't recover from permanent error */
+       if (state == pci_channel_io_frozen)
+               tp->pcierr_recovery = true;
+
        tg3_phy_stop(tp);
 
        tg3_netif_stop(tp);
@@ -18246,7 +18246,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
 
        rtnl_lock();
 
-       if (!netif_running(netdev))
+       if (!netdev || !netif_running(netdev))
                goto done;
 
        tg3_full_lock(tp, 0);
index f6147ffc7fbca76f4f6f512caa9e9612a32cf00a..ab716042bdd2245d9d05d4079430f573d88fe9a2 100644 (file)
@@ -944,11 +944,11 @@ fec_restart(struct net_device *ndev)
         * enet-mac reset will reset mac address registers too,
         * so need to reconfigure it.
         */
-       if (fep->quirks & FEC_QUIRK_ENET_MAC) {
-               memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
-               writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
-               writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
-       }
+       memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+       writel((__force u32)cpu_to_be32(temp_mac[0]),
+              fep->hwp + FEC_ADDR_LOW);
+       writel((__force u32)cpu_to_be32(temp_mac[1]),
+              fep->hwp + FEC_ADDR_HIGH);
 
        /* Clear any outstanding interrupt. */
        writel(0xffffffff, fep->hwp + FEC_IEVENT);
index 8c44cf6ff7a2f16e96bd91f577a028d5dee9e754..23a0388100834cfa59223788aee289bae0c42053 100644 (file)
@@ -540,7 +540,7 @@ static inline void  smc_rcv(struct net_device *dev)
 #define smc_special_lock(lock, flags)          spin_lock_irqsave(lock, flags)
 #define smc_special_unlock(lock, flags)        spin_unlock_irqrestore(lock, flags)
 #else
-#define smc_special_trylock(lock, flags)       (flags == flags)
+#define smc_special_trylock(lock, flags)       ((void)flags, true)
 #define smc_special_lock(lock, flags)          do { flags = 0; } while (0)
 #define smc_special_unlock(lock, flags)        do { flags = 0; } while (0)
 #endif
index 69e31e2a68fcc5782f8553eabbfe588158765d17..4827c6987ac3f87234e7b8acb64bb9d6bbd03c94 100644 (file)
@@ -440,7 +440,7 @@ static struct sk_buff **geneve_gro_receive(struct sk_buff **head,
 
        skb_gro_pull(skb, gh_len);
        skb_gro_postpull_rcsum(skb, gh, gh_len);
-       pp = ptype->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 003780901628760e733b07b0fc7cf43e87e373f2..6fa8e165878e1d9968e2665e0e7bcd2ca0406e25 100644 (file)
@@ -593,7 +593,7 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head,
                }
        }
 
-       pp = eth_gro_receive(head, skb);
+       pp = call_gro_receive(eth_gro_receive, head, skb);
 
 out:
        skb_gro_remcsum_cleanup(skb, &grc);
index ed01c0172e4a5f8b45e0a382b789f0cd20d60987..07dd81586c52b6761a9683b30872e36842f965ce 100644 (file)
@@ -127,8 +127,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node,
        }
 
        /* Need adjust the alignment to satisfy the CMA requirement */
-       if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool"))
-               align = max(align, (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
+       if (IS_ENABLED(CONFIG_CMA) && of_flat_dt_is_compatible(node, "shared-dma-pool")) {
+               unsigned long order =
+                       max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
+
+               align = max(align, (phys_addr_t)PAGE_SIZE << order);
+       }
 
        prop = of_get_flat_dt_prop(node, "alloc-ranges", &len);
        if (prop) {
index 7831bc6b51dddb66960f8ef97fde2da1ce33572a..ec84ff8ad1b4a59ebbd9c93ea62edb2f714460f6 100644 (file)
@@ -321,6 +321,8 @@ int pwmchip_remove(struct pwm_chip *chip)
        unsigned int i;
        int ret = 0;
 
+       pwmchip_sysfs_unexport_children(chip);
+
        mutex_lock(&pwm_lock);
 
        for (i = 0; i < chip->npwm; i++) {
index 9c90886f41234153d044d24f94a9ae1e82efbd83..c20163b929911d386a4a226dbdc528704eadd451 100644 (file)
@@ -350,6 +350,24 @@ void pwmchip_sysfs_unexport(struct pwm_chip *chip)
        }
 }
 
+void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+{
+       struct device *parent;
+       unsigned int i;
+
+       parent = class_find_device(&pwm_class, NULL, chip,
+                                  pwmchip_sysfs_match);
+       if (!parent)
+               return;
+
+       for (i = 0; i < chip->npwm; i++) {
+               struct pwm_device *pwm = &chip->pwms[i];
+
+               if (test_bit(PWMF_EXPORTED, &pwm->flags))
+                       pwm_unexport_child(parent, pwm);
+       }
+}
+
 static int __init pwm_sysfs_init(void)
 {
        return class_register(&pwm_class);
index 8db9f3a5844d3948e3528570299f323db265f86d..7aa01c1960ea99f01b946c5fa2d714c99ea39247 100644 (file)
@@ -2545,18 +2545,9 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
        struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
        struct CommandControlBlock *ccb;
        int target = cmd->device->id;
-       int lun = cmd->device->lun;
-       uint8_t scsicmd = cmd->cmnd[0];
        cmd->scsi_done = done;
        cmd->host_scribble = NULL;
        cmd->result = 0;
-       if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){
-               if(acb->devstate[target][lun] == ARECA_RAID_GONE) {
-                       cmd->result = (DID_NO_CONNECT << 16);
-               }
-               cmd->scsi_done(cmd);
-               return 0;
-       }
        if (target == 16) {
                /* virtual device for iop message transfer */
                arcmsr_handle_virtual_command(acb, cmd);
index ef4ff03242ea13d825a3ba1993eb9489911d8af4..aaf7da07a358cb85a915943f1a5821572a53ef1c 100644 (file)
@@ -1923,7 +1923,7 @@ struct megasas_instance_template {
 };
 
 #define MEGASAS_IS_LOGICAL(scp)                                                \
-       (scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1
+       ((scp->device->channel < MEGASAS_MAX_PD_CHANNELS) ? 0 : 1)
 
 #define MEGASAS_DEV_INDEX(scp)                                         \
        (((scp->device->channel % 2) * MEGASAS_MAX_DEV_PER_CHANNEL) +   \
index 278e10cd771f65eca88a87d500924e82f856de7c..17c440b9d086afa341a386c69099c05fceea8b55 100644 (file)
@@ -1688,16 +1688,13 @@ megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
                goto out_done;
        }
 
-       switch (scmd->cmnd[0]) {
-       case SYNCHRONIZE_CACHE:
-               /*
-                * FW takes care of flush cache on its own
-                * No need to send it down
-                */
+       /*
+        * FW takes care of flush cache on its own for Virtual Disk.
+        * No need to send it down for VD. For JBOD send SYNCHRONIZE_CACHE to FW.
+        */
+       if ((scmd->cmnd[0] == SYNCHRONIZE_CACHE) && MEGASAS_IS_LOGICAL(scmd)) {
                scmd->result = DID_OK << 16;
                goto out_done;
-       default:
-               break;
        }
 
        if (instance->instancet->build_and_issue_cmd(instance, scmd)) {
index d09d60293c272663b5b9b84f27ce2fab3e61a6aa..e357a393d56e0908c74149d310f2244fadd23656 100644 (file)
@@ -4981,6 +4981,7 @@ static void __exit scsi_debug_exit(void)
        bus_unregister(&pseudo_lld_bus);
        root_device_unregister(pseudo_primary);
 
+       vfree(map_storep);
        vfree(dif_storep);
        vfree(fake_storep);
 }
index 136ebaaa9cc094b9faa275ae019d6e145d4001cd..5ab54ef4f30428f72fc22d694f176ef126416dfd 100644 (file)
@@ -872,10 +872,15 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
        if (new_cols == vc->vc_cols && new_rows == vc->vc_rows)
                return 0;
 
+       if (new_screen_size > (4 << 20))
+               return -EINVAL;
        newscreen = kmalloc(new_screen_size, GFP_USER);
        if (!newscreen)
                return -ENOMEM;
 
+       if (vc == sel_cons)
+               clear_selection();
+
        old_rows = vc->vc_rows;
        old_row_size = vc->vc_size_row;
 
@@ -1173,7 +1178,7 @@ static void csi_J(struct vc_data *vc, int vpar)
                        break;
                case 3: /* erase scroll-back buffer (and whole display) */
                        scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
-                                   vc->vc_screenbuf_size >> 1);
+                                   vc->vc_screenbuf_size);
                        set_origin(vc);
                        if (CON_IS_VISIBLE(vc))
                                update_screen(vc);
index fb79dca9484b5afed9318e15c0859e43dbc99718..5ae2b7d3a74a346f023706142057f30566089992 100644 (file)
@@ -2845,7 +2845,7 @@ err3:
        kfree(dwc->setup_buf);
 
 err2:
-       dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+       dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
                        dwc->ep0_trb, dwc->ep0_trb_addr);
 
 err1:
@@ -2869,7 +2869,7 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
 
        kfree(dwc->setup_buf);
 
-       dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
+       dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb) * 2,
                        dwc->ep0_trb, dwc->ep0_trb_addr);
 
        dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
index 74e9f5b5a45dd64d3cd4c3934d06a1637f1af6ef..e4920e5e1d647ff92c5118545309f1eb99396c36 100644 (file)
@@ -743,7 +743,8 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
 
        /* throttle highspeed IRQ rate back slightly */
        if (gadget_is_dualspeed(dev->gadget) &&
-                        (dev->gadget->speed == USB_SPEED_HIGH)) {
+                        (dev->gadget->speed == USB_SPEED_HIGH) &&
+                        !list_empty(&dev->tx_reqs)) {
                dev->tx_qlen++;
                if (dev->tx_qlen == (dev->qmult/2)) {
                        req->no_interrupt = 0;
index 760cb57e954efeb8977c3a8e55c9351390e3b67a..9d1192aea9d0c9ea81e2a2cbd6dda2483267da3f 100644 (file)
@@ -72,7 +72,7 @@
 static const char      hcd_name [] = "ohci_hcd";
 
 #define        STATECHANGE_DELAY       msecs_to_jiffies(300)
-#define        IO_WATCHDOG_DELAY       msecs_to_jiffies(250)
+#define        IO_WATCHDOG_DELAY       msecs_to_jiffies(275)
 
 #include "ohci.h"
 #include "pci-quirks.h"
index 1da876605e4d9d863274643885a6f9da7b8b5ee4..b9d6940479daed5a5a3fb71e029e622770fdfef4 100644 (file)
@@ -1157,7 +1157,7 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
                                xhci_set_link_state(xhci, port_array, wIndex,
                                                        XDEV_RESUME);
                                spin_unlock_irqrestore(&xhci->lock, flags);
-                               msleep(20);
+                               msleep(USB_RESUME_TIMEOUT);
                                spin_lock_irqsave(&xhci->lock, flags);
                                xhci_set_link_state(xhci, port_array, wIndex,
                                                        XDEV_U0);
@@ -1401,7 +1401,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
 
        if (need_usb2_u3_exit) {
                spin_unlock_irqrestore(&xhci->lock, flags);
-               msleep(20);
+               msleep(USB_RESUME_TIMEOUT);
                spin_lock_irqsave(&xhci->lock, flags);
        }
 
index 963867c2c1d55b29745c742ea61863e183ef9987..cf147ccac7d365091e77443cabe1f7f2ddc1dd1a 100644 (file)
@@ -45,6 +45,7 @@
 
 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI     0x8c31
 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI  0x9c31
+#define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI       0x9cb1
 #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI            0x22b5
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI                0xa12f
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI       0x9d2f
@@ -154,7 +155,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
        }
        if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
-               pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
+               (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI ||
+                pdev->device == PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_XHCI)) {
                xhci->quirks |= XHCI_SPURIOUS_REBOOT;
                xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
        }
index 6eccded3bc338dcb1940d42a4729d9685458a2d9..976195e748a355ee3c3d9694d4b4b21e910a7775 100644 (file)
@@ -845,7 +845,9 @@ static int cp210x_tiocmget(struct tty_struct *tty)
        unsigned int control;
        int result;
 
-       cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
+       result = cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1);
+       if (result)
+               return result;
 
        result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0)
                |((control & CONTROL_RTS) ? TIOCM_RTS : 0)
index 8c48c9d83d48c8f3bfa3abc808badc2bedab16c0..494167fe6a2c5666c9df3e1b08f7c8e00e965861 100644 (file)
@@ -986,7 +986,8 @@ static const struct usb_device_id id_table_combined[] = {
        /* ekey Devices */
        { USB_DEVICE(FTDI_VID, FTDI_EKEY_CONV_USB_PID) },
        /* Infineon Devices */
-       { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
+       { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC1798_PID, 1) },
+       { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_TC2X7_PID, 1) },
        /* GE Healthcare devices */
        { USB_DEVICE(GE_HEALTHCARE_VID, GE_HEALTHCARE_NEMO_TRACKER_PID) },
        /* Active Research (Actisense) devices */
index f87a938cf00571eb69edbd8d625f58041384d5fa..21011c0a4c6401ffd942e83040f3df53099d9e91 100644 (file)
 /*
  * Infineon Technologies
  */
-#define INFINEON_VID           0x058b
-#define INFINEON_TRIBOARD_PID  0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_VID                   0x058b
+#define INFINEON_TRIBOARD_TC1798_PID   0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+#define INFINEON_TRIBOARD_TC2X7_PID    0x0043 /* DAS JTAG TriBoard TC2X7 V1.0 */
 
 /*
  * Acton Research Corp.
index a0ca291bc07f7377c43deebbccc31f9268f4db35..e7e29c797824fc3b93c97cccb0643604ed31f651 100644 (file)
@@ -1077,7 +1077,8 @@ static int usb_serial_probe(struct usb_interface *interface,
 
        serial->disconnected = 0;
 
-       usb_serial_console_init(serial->port[0]->minor);
+       if (num_ports > 0)
+               usb_serial_console_init(serial->port[0]->minor);
 exit:
        module_put(type->driver.owner);
        return 0;
index dc2b94142f53251560e0734b769f5bf5f7f1feb0..a01a41a4126934c4b9af04853e409011aec3bbce 100644 (file)
@@ -548,7 +548,8 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
 
        if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
                vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
-               vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+               if (!vq->event)
+                       vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
        }
 
 }
@@ -580,7 +581,8 @@ unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
         * entry. Always do both to keep code simple. */
        if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
                vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
-               vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+               if (!vq->event)
+                       vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
        }
        vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
        END_USE(vq);
@@ -648,10 +650,11 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
         * more to do. */
        /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
         * either clear the flags bit or point the event index at the next
-        * entry. Always do both to keep code simple. */
+        * entry. Always update the event index to keep code simple. */
        if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
                vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
-               vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
+               if (!vq->event)
+                       vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
        }
        /* TODO: tune this threshold */
        bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
@@ -770,7 +773,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
        /* No callback?  Tell other side not to bother us. */
        if (!callback) {
                vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
-               vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
+               if (!vq->event)
+                       vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
        }
 
        /* Put everything in free lists. */
index 1415f6d5863342ea35fcaf125b6484a121861b77..f7441193bf35aee7565d73f4a9d4e618ab00ea6d 100644 (file)
@@ -2696,14 +2696,12 @@ static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root,
                                             int index, int error)
 {
        struct btrfs_log_ctx *ctx;
+       struct btrfs_log_ctx *safe;
 
-       if (!error) {
-               INIT_LIST_HEAD(&root->log_ctxs[index]);
-               return;
-       }
-
-       list_for_each_entry(ctx, &root->log_ctxs[index], list)
+       list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) {
+               list_del_init(&ctx->list);
                ctx->log_ret = error;
+       }
 
        INIT_LIST_HEAD(&root->log_ctxs[index]);
 }
@@ -2944,13 +2942,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
        mutex_unlock(&root->log_mutex);
 
 out_wake_log_root:
-       /*
-        * We needn't get log_mutex here because we are sure all
-        * the other tasks are blocked.
-        */
+       mutex_lock(&log_root_tree->log_mutex);
        btrfs_remove_all_log_ctxs(log_root_tree, index2, ret);
 
-       mutex_lock(&log_root_tree->log_mutex);
        log_root_tree->log_transid_committed++;
        atomic_set(&log_root_tree->log_commit[index2], 0);
        mutex_unlock(&log_root_tree->log_mutex);
@@ -2961,10 +2955,8 @@ out_wake_log_root:
        if (waitqueue_active(&log_root_tree->log_commit_wait[index2]))
                wake_up(&log_root_tree->log_commit_wait[index2]);
 out:
-       /* See above. */
-       btrfs_remove_all_log_ctxs(root, index1, ret);
-
        mutex_lock(&root->log_mutex);
+       btrfs_remove_all_log_ctxs(root, index1, ret);
        root->log_transid_committed++;
        atomic_set(&root->log_commit[index1], 0);
        mutex_unlock(&root->log_mutex);
index ea0dd9ee138d2209cdd2017ff52070faf5116754..63a0d0ba36de11ef0a606c2d098663b750b2e282 100644 (file)
@@ -139,6 +139,8 @@ static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
                len -= bytes;
        }
 
+       if (!error)
+               error = vfs_fsync(new_file, 0);
        fput(new_file);
 out_fput:
        fput(old_file);
index e49bd2808bf3e397b9b499fe4d6036ad7600a71b..f5d5ee43ae6eb775a59f0d2deee5ba907531f45a 100644 (file)
@@ -350,7 +350,7 @@ static unsigned int vfs_dent_type(uint8_t type)
  */
 static int ubifs_readdir(struct file *file, struct dir_context *ctx)
 {
-       int err;
+       int err = 0;
        struct qstr nm;
        union ubifs_key key;
        struct ubifs_dent_node *dent;
@@ -452,14 +452,20 @@ out:
        kfree(file->private_data);
        file->private_data = NULL;
 
-       if (err != -ENOENT) {
+       if (err != -ENOENT)
                ubifs_err(c, "cannot find next direntry, error %d", err);
-               return err;
-       }
+       else
+               /*
+                * -ENOENT is a non-fatal error in this context, the TNC uses
+                * it to indicate that the cursor moved past the current directory
+                * and readdir() has to stop.
+                */
+               err = 0;
+
 
        /* 2 is a special value indicating that there are no more direntries */
        ctx->pos = 2;
-       return 0;
+       return err;
 }
 
 /* Free saved readdir() state when the directory is closed */
index 3cc3cf7674746f279fcb0acf4eae27d49d089814..ac9a003dd29acf80d7c766788cb40d1f98d9132c 100644 (file)
@@ -191,8 +191,7 @@ xfs_dquot_buf_verify_crc(
        if (mp->m_quotainfo)
                ndquots = mp->m_quotainfo->qi_dqperchunk;
        else
-               ndquots = xfs_calc_dquots_per_chunk(
-                                       XFS_BB_TO_FSB(mp, bp->b_length));
+               ndquots = xfs_calc_dquots_per_chunk(bp->b_length);
 
        for (i = 0; i < ndquots; i++, d++) {
                if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk),
index 9916d0e4eff505f18cf5e30ad52dbe2e37b3fa3b..25d0914481a26d04a001a91216df6fd68c52e87c 100644 (file)
 #define ARCH_TIMER_CTRL_IT_MASK                (1 << 1)
 #define ARCH_TIMER_CTRL_IT_STAT                (1 << 2)
 
+#define CNTHCTL_EL1PCTEN               (1 << 0)
+#define CNTHCTL_EL1PCEN                        (1 << 1)
+#define CNTHCTL_EVNTEN                 (1 << 2)
+#define CNTHCTL_EVNTDIR                        (1 << 3)
+#define CNTHCTL_EVNTI                  (0xF << 4)
+
 enum arch_timer_reg {
        ARCH_TIMER_REG_CTRL,
        ARCH_TIMER_REG_TVAL,
index d2f41477f8ae77600a8683890b3615766b9a3701..13a3d537811b9f7d12b3fa892b8312330f89d70f 100644 (file)
@@ -279,6 +279,12 @@ struct vgic_v2_cpu_if {
        u32             vgic_lr[VGIC_V2_MAX_LRS];
 };
 
+/*
+ * LRs are stored in reverse order in memory. make sure we index them
+ * correctly.
+ */
+#define VGIC_V3_LR_INDEX(lr)           (VGIC_V3_MAX_LRS - 1 - lr)
+
 struct vgic_v3_cpu_if {
 #ifdef CONFIG_KVM_ARM_VGIC_V3
        u32             vgic_hcr;
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
new file mode 100644 (file)
index 0000000..b5abfda
--- /dev/null
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2015, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __LINUX_ARM_SMCCC_H
+#define __LINUX_ARM_SMCCC_H
+
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+/*
+ * This file provides common defines for ARM SMC Calling Convention as
+ * specified in
+ * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html
+ */
+
+#define ARM_SMCCC_STD_CALL             0
+#define ARM_SMCCC_FAST_CALL            1
+#define ARM_SMCCC_TYPE_SHIFT           31
+
+#define ARM_SMCCC_SMC_32               0
+#define ARM_SMCCC_SMC_64               1
+#define ARM_SMCCC_CALL_CONV_SHIFT      30
+
+#define ARM_SMCCC_OWNER_MASK           0x3F
+#define ARM_SMCCC_OWNER_SHIFT          24
+
+#define ARM_SMCCC_FUNC_MASK            0xFFFF
+
+#define ARM_SMCCC_IS_FAST_CALL(smc_val)        \
+       ((smc_val) & (ARM_SMCCC_FAST_CALL << ARM_SMCCC_TYPE_SHIFT))
+#define ARM_SMCCC_IS_64(smc_val) \
+       ((smc_val) & (ARM_SMCCC_SMC_64 << ARM_SMCCC_CALL_CONV_SHIFT))
+#define ARM_SMCCC_FUNC_NUM(smc_val)    ((smc_val) & ARM_SMCCC_FUNC_MASK)
+#define ARM_SMCCC_OWNER_NUM(smc_val) \
+       (((smc_val) >> ARM_SMCCC_OWNER_SHIFT) & ARM_SMCCC_OWNER_MASK)
+
+#define ARM_SMCCC_CALL_VAL(type, calling_convention, owner, func_num) \
+       (((type) << ARM_SMCCC_TYPE_SHIFT) | \
+       ((calling_convention) << ARM_SMCCC_CALL_CONV_SHIFT) | \
+       (((owner) & ARM_SMCCC_OWNER_MASK) << ARM_SMCCC_OWNER_SHIFT) | \
+       ((func_num) & ARM_SMCCC_FUNC_MASK))
+
+#define ARM_SMCCC_OWNER_ARCH           0
+#define ARM_SMCCC_OWNER_CPU            1
+#define ARM_SMCCC_OWNER_SIP            2
+#define ARM_SMCCC_OWNER_OEM            3
+#define ARM_SMCCC_OWNER_STANDARD       4
+#define ARM_SMCCC_OWNER_TRUSTED_APP    48
+#define ARM_SMCCC_OWNER_TRUSTED_APP_END        49
+#define ARM_SMCCC_OWNER_TRUSTED_OS     50
+#define ARM_SMCCC_OWNER_TRUSTED_OS_END 63
+
+/**
+ * struct arm_smccc_res - Result from SMC/HVC call
+ * @a0-a3 result values from registers 0 to 3
+ */
+struct arm_smccc_res {
+       unsigned long a0;
+       unsigned long a1;
+       unsigned long a2;
+       unsigned long a3;
+};
+
+/**
+ * arm_smccc_smc() - make SMC calls
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This function is used to make SMC calls following SMC Calling Convention.
+ * The content of the supplied param are copied to registers 0 to 7 prior
+ * to the SMC instruction. The return values are updated with the content
+ * from register 0 to 3 on return from the SMC instruction.
+ */
+asmlinkage void arm_smccc_smc(unsigned long a0, unsigned long a1,
+                       unsigned long a2, unsigned long a3, unsigned long a4,
+                       unsigned long a5, unsigned long a6, unsigned long a7,
+                       struct arm_smccc_res *res);
+
+/**
+ * arm_smccc_hvc() - make HVC calls
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This function is used to make HVC calls following SMC Calling
+ * Convention.  The content of the supplied param are copied to registers 0
+ * to 7 prior to the HVC instruction. The return values are updated with
+ * the content from register 0 to 3 on return from the HVC instruction.
+ */
+asmlinkage void arm_smccc_hvc(unsigned long a0, unsigned long a1,
+                       unsigned long a2, unsigned long a3, unsigned long a4,
+                       unsigned long a5, unsigned long a6, unsigned long a7,
+                       struct arm_smccc_res *res);
+
+#endif /*__LINUX_ARM_SMCCC_H*/
index 79aaa9fc1a15d6ed7caac2b147ae030b18da060a..d5277fc3ce2eea2470891f360031f694d92b3996 100644 (file)
@@ -103,5 +103,5 @@ struct mfc_cache {
 struct rtmsg;
 extern int ipmr_get_route(struct net *net, struct sk_buff *skb,
                          __be32 saddr, __be32 daddr,
-                         struct rtmsg *rtm, int nowait);
+                         struct rtmsg *rtm, int nowait, u32 portid);
 #endif
index 66982e7640514389098c140a39665f95261071d8..f831155dc7d1201f5b2ef899a51a1e3a45f768e8 100644 (file)
@@ -115,7 +115,7 @@ struct mfc6_cache {
 
 struct rtmsg;
 extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
-                          struct rtmsg *rtm, int nowait);
+                          struct rtmsg *rtm, int nowait, u32 portid);
 
 #ifdef CONFIG_IPV6_MROUTE
 extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
index 12b4d54a8ffaccb2616d848830c83fd025b01a2e..9d6025703f736746b11890ab9bbb9571ecc09831 100644 (file)
@@ -2003,7 +2003,10 @@ struct napi_gro_cb {
        /* Used in foo-over-udp, set in udp[46]_gro_receive */
        u8      is_ipv6:1;
 
-       /* 7 bit hole */
+       /* Number of gro_receive callbacks this packet already went through */
+       u8 recursion_counter:4;
+
+       /* 3 bit hole */
 
        /* used to support CHECKSUM_COMPLETE for tunneling protocols */
        __wsum  csum;
@@ -2014,6 +2017,25 @@ struct napi_gro_cb {
 
 #define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
 
+#define GRO_RECURSION_LIMIT 15
+static inline int gro_recursion_inc_test(struct sk_buff *skb)
+{
+       return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
+}
+
+typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
+static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
+                                               struct sk_buff **head,
+                                               struct sk_buff *skb)
+{
+       if (unlikely(gro_recursion_inc_test(skb))) {
+               NAPI_GRO_CB(skb)->flush |= 1;
+               return NULL;
+       }
+
+       return cb(head, skb);
+}
+
 struct packet_type {
        __be16                  type;   /* This is really htons(ether_type). */
        struct net_device       *dev;   /* NULL is wildcarded here           */
@@ -2059,6 +2081,22 @@ struct udp_offload {
        struct udp_offload_callbacks callbacks;
 };
 
+typedef struct sk_buff **(*gro_receive_udp_t)(struct sk_buff **,
+                                             struct sk_buff *,
+                                             struct udp_offload *);
+static inline struct sk_buff **call_gro_receive_udp(gro_receive_udp_t cb,
+                                                   struct sk_buff **head,
+                                                   struct sk_buff *skb,
+                                                   struct udp_offload *uoff)
+{
+       if (unlikely(gro_recursion_inc_test(skb))) {
+               NAPI_GRO_CB(skb)->flush |= 1;
+               return NULL;
+       }
+
+       return cb(head, skb, uoff);
+}
+
 /* often modified stats are per cpu, other are shared (netdev->stats) */
 struct pcpu_sw_netstats {
        u64     rx_packets;
index 12c4865457adc3d0412c573b710feec7d3d6fd81..393efe2edf9afb9d8a38d1a16201e78c64881026 100644 (file)
@@ -24,6 +24,9 @@ bool psci_tos_resident_on(int cpu);
 bool psci_power_state_loses_context(u32 state);
 bool psci_power_state_is_valid(u32 state);
 
+int psci_cpu_init_idle(unsigned int cpu);
+int psci_cpu_suspend_enter(unsigned long index);
+
 struct psci_operations {
        int (*cpu_suspend)(u32 state, unsigned long entry_point);
        int (*cpu_off)(u32 state);
index cfc3ed46cad20a26ffa131b51e25927ac6045f4c..aa8736d5b2f38ba1319a9f030da47ef18f537761 100644 (file)
@@ -331,6 +331,7 @@ static inline void pwm_remove_table(struct pwm_lookup *table, size_t num)
 #ifdef CONFIG_PWM_SYSFS
 void pwmchip_sysfs_export(struct pwm_chip *chip);
 void pwmchip_sysfs_unexport(struct pwm_chip *chip);
+void pwmchip_sysfs_unexport_children(struct pwm_chip *chip);
 #else
 static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
 {
@@ -339,6 +340,10 @@ static inline void pwmchip_sysfs_export(struct pwm_chip *chip)
 static inline void pwmchip_sysfs_unexport(struct pwm_chip *chip)
 {
 }
+
+static inline void pwmchip_sysfs_unexport_children(struct pwm_chip *chip)
+{
+}
 #endif /* CONFIG_PWM_SYSFS */
 
 #endif /* __LINUX_PWM_H */
index 4f3ef345f4c2efa31f2794011589e2540a99ae9c..f78c3a52529bc7885db8a01d5a3d604a99b87128 100644 (file)
@@ -554,7 +554,7 @@ int ip_options_rcv_srr(struct sk_buff *skb);
  */
 
 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
-void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
+void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int tlen, int offset);
 int ip_cmsg_send(struct net *net, struct msghdr *msg,
                 struct ipcm_cookie *ipc, bool allow_ipv6);
 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
@@ -576,7 +576,7 @@ void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
 
 static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
 {
-       ip_cmsg_recv_offset(msg, skb, 0);
+       ip_cmsg_recv_offset(msg, skb, 0, 0);
 }
 
 bool icmp_global_allow(void);
index 86df0835f6b58517c671e200779d9045200cb5d3..e5bba897d20649223bf393b94a66677177e0f554 100644 (file)
@@ -408,6 +408,15 @@ bool tcf_destroy(struct tcf_proto *tp, bool force);
 void tcf_destroy_chain(struct tcf_proto __rcu **fl);
 int skb_do_redirect(struct sk_buff *);
 
+static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+       return G_TC_AT(skb->tc_verd) & AT_INGRESS;
+#else
+       return false;
+#endif
+}
+
 /* Reset all TX qdiscs greater then index of a device.  */
 static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
 {
index 2d663ee8494daa5b3d8968542bc014472d361187..8f77df63a8f46670d8ff9be2be3b62d4e76032f8 100644 (file)
@@ -1426,6 +1426,16 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
        if (!sk_has_account(sk))
                return;
        sk->sk_forward_alloc += size;
+
+       /* Avoid a possible overflow.
+        * TCP send queues can make this happen, if sk_mem_reclaim()
+        * is not called and more than 2 GBytes are released at once.
+        *
+        * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
+        * no need to hold that much forward allocation anyway.
+        */
+       if (unlikely(sk->sk_forward_alloc >= 1 << 21))
+               __sk_mem_reclaim(sk, 1 << 20);
 }
 
 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
index 3eb02a1d6d8cca26f00e5d5c6399436100b172b8..a2fad11894ffd16e5640ad3501eed3aca4b60438 100644 (file)
@@ -344,7 +344,7 @@ struct rtnexthop {
 #define RTNH_F_OFFLOAD         8       /* offloaded route */
 #define RTNH_F_LINKDOWN                16      /* carrier-down on nexthop */
 
-#define RTNH_COMPARE_MASK      (RTNH_F_DEAD | RTNH_F_LINKDOWN)
+#define RTNH_COMPARE_MASK      (RTNH_F_DEAD | RTNH_F_LINKDOWN | RTNH_F_OFFLOAD)
 
 /* Macros to handle hexthops */
 
index e4552a3cbf418a666a55a462707aba46b0d66960..f48196a7414c0628504daa321f8d6a857eab37d7 100644 (file)
@@ -236,6 +236,9 @@ static int cgroup_addrm_files(struct cgroup_subsys_state *css,
  */
 static bool cgroup_ssid_enabled(int ssid)
 {
+       if (CGROUP_SUBSYS_COUNT == 0)
+               return false;
+
        return static_key_enabled(cgroup_subsys_enabled_key[ssid]);
 }
 
index b2dd4d999900a26edd9cd26fb952b84b98ee411f..27946975eff004f210d563e73218f1fe6d0b9a9e 100644 (file)
@@ -280,13 +280,7 @@ static ssize_t pm_wakeup_irq_show(struct kobject *kobj,
        return pm_wakeup_irq ? sprintf(buf, "%u\n", pm_wakeup_irq) : -ENODATA;
 }
 
-static ssize_t pm_wakeup_irq_store(struct kobject *kobj,
-                                       struct kobj_attribute *attr,
-                                       const char *buf, size_t n)
-{
-       return -EINVAL;
-}
-power_attr(pm_wakeup_irq);
+power_attr_ro(pm_wakeup_irq);
 
 #else /* !CONFIG_PM_SLEEP_DEBUG */
 static inline void pm_print_times_init(void) {}
@@ -564,14 +558,7 @@ static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
        return show_trace_dev_match(buf, PAGE_SIZE);
 }
 
-static ssize_t
-pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
-                        const char *buf, size_t n)
-{
-       return -EINVAL;
-}
-
-power_attr(pm_trace_dev_match);
+power_attr_ro(pm_trace_dev_match);
 
 #endif /* CONFIG_PM_TRACE */
 
index caadb566e82bb51a5348d6ba67a73bda8c99f37d..efe1b3b17c88d0eb793fa84ceec00e5b31604af4 100644 (file)
@@ -77,6 +77,15 @@ static struct kobj_attribute _name##_attr = {        \
        .store  = _name##_store,                \
 }
 
+#define power_attr_ro(_name) \
+static struct kobj_attribute _name##_attr = {  \
+       .attr   = {                             \
+               .name = __stringify(_name),     \
+               .mode = S_IRUGO,                \
+       },                                      \
+       .show   = _name##_show,                 \
+}
+
 /* Preferred image size in bytes (default 500 MB) */
 extern unsigned long image_size;
 /* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
index 12cd989dadf639c3276ca228fef1431284c862ec..160e1006640d585f417ae37ecab304e407971e67 100644 (file)
 
 #define HIBERNATE_SIG  "S1SUSPEND"
 
+/*
+ * When reading an {un,}compressed image, we may restore pages in place,
+ * in which case some architectures need these pages cleaning before they
+ * can be executed. We don't know which pages these may be, so clean the lot.
+ */
+static bool clean_pages_on_read;
+static bool clean_pages_on_decompress;
+
 /*
  *     The swap map is a data structure used for keeping track of each page
  *     written to a swap partition.  It consists of many swap_map_page
@@ -241,6 +249,9 @@ static void hib_end_io(struct bio *bio)
 
        if (bio_data_dir(bio) == WRITE)
                put_page(page);
+       else if (clean_pages_on_read)
+               flush_icache_range((unsigned long)page_address(page),
+                                  (unsigned long)page_address(page) + PAGE_SIZE);
 
        if (bio->bi_error && !hb->error)
                hb->error = bio->bi_error;
@@ -1049,6 +1060,7 @@ static int load_image(struct swap_map_handle *handle,
 
        hib_init_batch(&hb);
 
+       clean_pages_on_read = true;
        printk(KERN_INFO "PM: Loading image data pages (%u pages)...\n",
                nr_to_read);
        m = nr_to_read / 10;
@@ -1124,6 +1136,10 @@ static int lzo_decompress_threadfn(void *data)
                d->unc_len = LZO_UNC_SIZE;
                d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len,
                                               d->unc, &d->unc_len);
+               if (clean_pages_on_decompress)
+                       flush_icache_range((unsigned long)d->unc,
+                                          (unsigned long)d->unc + d->unc_len);
+
                atomic_set(&d->stop, 1);
                wake_up(&d->done);
        }
@@ -1189,6 +1205,8 @@ static int load_image_lzo(struct swap_map_handle *handle,
        }
        memset(crc, 0, offsetof(struct crc_data, go));
 
+       clean_pages_on_decompress = true;
+
        /*
         * Start the decompression threads.
         */
index ea506eb18cd6b2cff00606a6c1e1387b48099ffa..bd0e1412475eb872dd354999d3120a55cce925be 100644 (file)
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -183,7 +183,8 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
                return -EINVAL;
 
        /* ensure minimal alignment required by mm core */
-       alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+       alignment = PAGE_SIZE <<
+                       max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
 
        /* alignment should be aligned with order_per_bit */
        if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
@@ -266,8 +267,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
         * migratetype page by page allocator's buddy algorithm. In the case,
         * you couldn't get a contiguous memory, which is not what we want.
         */
-       alignment = max(alignment,
-               (phys_addr_t)PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order));
+       alignment = max(alignment,  (phys_addr_t)PAGE_SIZE <<
+                         max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
        base = ALIGN(base, alignment);
        size = ALIGN(size, alignment);
        limit &= ~(alignment - 1);
index afc71ea9a381f853faf65a05edc7f411a378d7e5..5d8dffd5b57c83575b2daf4b9ded58b63c5c6f6e 100644 (file)
@@ -554,6 +554,8 @@ int __list_lru_init(struct list_lru *lru, bool memcg_aware,
        err = memcg_init_list_lru(lru, memcg_aware);
        if (err) {
                kfree(lru->node);
+               /* Do this so a list_lru_destroy() doesn't crash: */
+               lru->node = NULL;
                goto out;
        }
 
index 1e50d37ee132b3768ecb052486e0dd62f2a0d066..17dfe70f3309778e92d6968ed87b15d4baa7897a 100644 (file)
@@ -2055,6 +2055,15 @@ retry:
                     current->flags & PF_EXITING))
                goto force;
 
+       /*
+        * Prevent unbounded recursion when reclaim operations need to
+        * allocate memory. This might exceed the limits temporarily,
+        * but we prefer facilitating memory reclaim and getting back
+        * under the limit over triggering OOM kills in these cases.
+        */
+       if (unlikely(current->flags & PF_MEMALLOC))
+               goto force;
+
        if (unlikely(task_in_memcg_oom(current)))
                goto nomem;
 
index 0838e9f02b11e778afe0a3da51698f54c54d5051..de1c59d8daa32ac3908e549cfdfd2df16f99b669 100644 (file)
@@ -2910,7 +2910,9 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
                                            sc.may_writepage,
                                            sc.gfp_mask);
 
+       current->flags |= PF_MEMALLOC;
        nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+       current->flags &= ~PF_MEMALLOC;
 
        trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
 
index d2cd9de4b7241dcc8c1df5fd7832b3b317799923..ad8d6e6b87cab9c2c03c9e317513880d1b755b95 100644 (file)
@@ -659,7 +659,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
 
        skb_gro_pull(skb, sizeof(*vhdr));
        skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
-       pp = ptype->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 9542e84a9455145dabcf7fc4c795f37d29958d6a..d80c15d028fe2a65e0a61d0d2fad447ecad13102 100644 (file)
@@ -951,13 +951,12 @@ static void br_multicast_enable(struct bridge_mcast_own_query *query)
                mod_timer(&query->timer, jiffies);
 }
 
-void br_multicast_enable_port(struct net_bridge_port *port)
+static void __br_multicast_enable_port(struct net_bridge_port *port)
 {
        struct net_bridge *br = port->br;
 
-       spin_lock(&br->multicast_lock);
        if (br->multicast_disabled || !netif_running(br->dev))
-               goto out;
+               return;
 
        br_multicast_enable(&port->ip4_own_query);
 #if IS_ENABLED(CONFIG_IPV6)
@@ -965,8 +964,14 @@ void br_multicast_enable_port(struct net_bridge_port *port)
 #endif
        if (port->multicast_router == 2 && hlist_unhashed(&port->rlist))
                br_multicast_add_router(br, port);
+}
 
-out:
+void br_multicast_enable_port(struct net_bridge_port *port)
+{
+       struct net_bridge *br = port->br;
+
+       spin_lock(&br->multicast_lock);
+       __br_multicast_enable_port(port);
        spin_unlock(&br->multicast_lock);
 }
 
@@ -1905,8 +1910,9 @@ static void br_multicast_start_querier(struct net_bridge *br,
 
 int br_multicast_toggle(struct net_bridge *br, unsigned long val)
 {
-       int err = 0;
        struct net_bridge_mdb_htable *mdb;
+       struct net_bridge_port *port;
+       int err = 0;
 
        spin_lock_bh(&br->multicast_lock);
        if (br->multicast_disabled == !val)
@@ -1934,10 +1940,9 @@ rollback:
                        goto rollback;
        }
 
-       br_multicast_start_querier(br, &br->ip4_own_query);
-#if IS_ENABLED(CONFIG_IPV6)
-       br_multicast_start_querier(br, &br->ip6_own_query);
-#endif
+       br_multicast_open(br);
+       list_for_each_entry(port, &br->port_list, list)
+               __br_multicast_enable_port(port);
 
 unlock:
        spin_unlock_bh(&br->multicast_lock);
index 0989fea88c4480ed88086c628dd7d7cf832e37eb..b3fa4b86ab4ce59d4fe1a0c42060f59ab649bc0a 100644 (file)
@@ -2836,6 +2836,7 @@ struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *d
        }
        return head;
 }
+EXPORT_SYMBOL_GPL(validate_xmit_skb_list);
 
 static void qdisc_pkt_len_init(struct sk_buff *skb)
 {
@@ -4240,6 +4241,7 @@ static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff
                NAPI_GRO_CB(skb)->flush = 0;
                NAPI_GRO_CB(skb)->free = 0;
                NAPI_GRO_CB(skb)->encap_mark = 0;
+               NAPI_GRO_CB(skb)->recursion_counter = 0;
                NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
 
                /* Setup for GRO checksum validation */
@@ -5204,6 +5206,7 @@ static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
 
 static int __netdev_adjacent_dev_insert(struct net_device *dev,
                                        struct net_device *adj_dev,
+                                       u16 ref_nr,
                                        struct list_head *dev_list,
                                        void *private, bool master)
 {
@@ -5213,7 +5216,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
        adj = __netdev_find_adj(adj_dev, dev_list);
 
        if (adj) {
-               adj->ref_nr++;
+               adj->ref_nr += ref_nr;
                return 0;
        }
 
@@ -5223,7 +5226,7 @@ static int __netdev_adjacent_dev_insert(struct net_device *dev,
 
        adj->dev = adj_dev;
        adj->master = master;
-       adj->ref_nr = 1;
+       adj->ref_nr = ref_nr;
        adj->private = private;
        dev_hold(adj_dev);
 
@@ -5262,6 +5265,7 @@ free_adj:
 
 static void __netdev_adjacent_dev_remove(struct net_device *dev,
                                         struct net_device *adj_dev,
+                                        u16 ref_nr,
                                         struct list_head *dev_list)
 {
        struct netdev_adjacent *adj;
@@ -5274,10 +5278,10 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
                BUG();
        }
 
-       if (adj->ref_nr > 1) {
-               pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
-                        adj->ref_nr-1);
-               adj->ref_nr--;
+       if (adj->ref_nr > ref_nr) {
+               pr_debug("%s to %s ref_nr-%d = %d\n", dev->name, adj_dev->name,
+                        ref_nr, adj->ref_nr-ref_nr);
+               adj->ref_nr -= ref_nr;
                return;
        }
 
@@ -5296,21 +5300,22 @@ static void __netdev_adjacent_dev_remove(struct net_device *dev,
 
 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
                                            struct net_device *upper_dev,
+                                           u16 ref_nr,
                                            struct list_head *up_list,
                                            struct list_head *down_list,
                                            void *private, bool master)
 {
        int ret;
 
-       ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
-                                          master);
+       ret = __netdev_adjacent_dev_insert(dev, upper_dev, ref_nr, up_list,
+                                          private, master);
        if (ret)
                return ret;
 
-       ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
-                                          false);
+       ret = __netdev_adjacent_dev_insert(upper_dev, dev, ref_nr, down_list,
+                                          private, false);
        if (ret) {
-               __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
+               __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
                return ret;
        }
 
@@ -5318,9 +5323,10 @@ static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
 }
 
 static int __netdev_adjacent_dev_link(struct net_device *dev,
-                                     struct net_device *upper_dev)
+                                     struct net_device *upper_dev,
+                                     u16 ref_nr)
 {
-       return __netdev_adjacent_dev_link_lists(dev, upper_dev,
+       return __netdev_adjacent_dev_link_lists(dev, upper_dev, ref_nr,
                                                &dev->all_adj_list.upper,
                                                &upper_dev->all_adj_list.lower,
                                                NULL, false);
@@ -5328,17 +5334,19 @@ static int __netdev_adjacent_dev_link(struct net_device *dev,
 
 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
                                               struct net_device *upper_dev,
+                                              u16 ref_nr,
                                               struct list_head *up_list,
                                               struct list_head *down_list)
 {
-       __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
-       __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
+       __netdev_adjacent_dev_remove(dev, upper_dev, ref_nr, up_list);
+       __netdev_adjacent_dev_remove(upper_dev, dev, ref_nr, down_list);
 }
 
 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
-                                        struct net_device *upper_dev)
+                                        struct net_device *upper_dev,
+                                        u16 ref_nr)
 {
-       __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+       __netdev_adjacent_dev_unlink_lists(dev, upper_dev, ref_nr,
                                           &dev->all_adj_list.upper,
                                           &upper_dev->all_adj_list.lower);
 }
@@ -5347,17 +5355,17 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
                                                struct net_device *upper_dev,
                                                void *private, bool master)
 {
-       int ret = __netdev_adjacent_dev_link(dev, upper_dev);
+       int ret = __netdev_adjacent_dev_link(dev, upper_dev, 1);
 
        if (ret)
                return ret;
 
-       ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
+       ret = __netdev_adjacent_dev_link_lists(dev, upper_dev, 1,
                                               &dev->adj_list.upper,
                                               &upper_dev->adj_list.lower,
                                               private, master);
        if (ret) {
-               __netdev_adjacent_dev_unlink(dev, upper_dev);
+               __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
                return ret;
        }
 
@@ -5367,8 +5375,8 @@ static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
                                                   struct net_device *upper_dev)
 {
-       __netdev_adjacent_dev_unlink(dev, upper_dev);
-       __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
+       __netdev_adjacent_dev_unlink(dev, upper_dev, 1);
+       __netdev_adjacent_dev_unlink_lists(dev, upper_dev, 1,
                                           &dev->adj_list.upper,
                                           &upper_dev->adj_list.lower);
 }
@@ -5420,7 +5428,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
                list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
                        pr_debug("Interlinking %s with %s, non-neighbour\n",
                                 i->dev->name, j->dev->name);
-                       ret = __netdev_adjacent_dev_link(i->dev, j->dev);
+                       ret = __netdev_adjacent_dev_link(i->dev, j->dev, i->ref_nr);
                        if (ret)
                                goto rollback_mesh;
                }
@@ -5430,7 +5438,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
                pr_debug("linking %s's upper device %s with %s\n",
                         upper_dev->name, i->dev->name, dev->name);
-               ret = __netdev_adjacent_dev_link(dev, i->dev);
+               ret = __netdev_adjacent_dev_link(dev, i->dev, i->ref_nr);
                if (ret)
                        goto rollback_upper_mesh;
        }
@@ -5439,7 +5447,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        list_for_each_entry(i, &dev->all_adj_list.lower, list) {
                pr_debug("linking %s's lower device %s with %s\n", dev->name,
                         i->dev->name, upper_dev->name);
-               ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
+               ret = __netdev_adjacent_dev_link(i->dev, upper_dev, i->ref_nr);
                if (ret)
                        goto rollback_lower_mesh;
        }
@@ -5453,7 +5461,7 @@ rollback_lower_mesh:
        list_for_each_entry(i, &dev->all_adj_list.lower, list) {
                if (i == to_i)
                        break;
-               __netdev_adjacent_dev_unlink(i->dev, upper_dev);
+               __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
        }
 
        i = NULL;
@@ -5463,7 +5471,7 @@ rollback_upper_mesh:
        list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
                if (i == to_i)
                        break;
-               __netdev_adjacent_dev_unlink(dev, i->dev);
+               __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
        }
 
        i = j = NULL;
@@ -5475,7 +5483,7 @@ rollback_mesh:
                list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
                        if (i == to_i && j == to_j)
                                break;
-                       __netdev_adjacent_dev_unlink(i->dev, j->dev);
+                       __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
                }
                if (i == to_i)
                        break;
@@ -5559,16 +5567,16 @@ void netdev_upper_dev_unlink(struct net_device *dev,
         */
        list_for_each_entry(i, &dev->all_adj_list.lower, list)
                list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
-                       __netdev_adjacent_dev_unlink(i->dev, j->dev);
+                       __netdev_adjacent_dev_unlink(i->dev, j->dev, i->ref_nr);
 
        /* remove also the devices itself from lower/upper device
         * list
         */
        list_for_each_entry(i, &dev->all_adj_list.lower, list)
-               __netdev_adjacent_dev_unlink(i->dev, upper_dev);
+               __netdev_adjacent_dev_unlink(i->dev, upper_dev, i->ref_nr);
 
        list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
-               __netdev_adjacent_dev_unlink(dev, i->dev);
+               __netdev_adjacent_dev_unlink(dev, i->dev, i->ref_nr);
 
        call_netdevice_notifiers_info(NETDEV_CHANGEUPPER, dev,
                                      &changeupper_info.info);
index 4da4d51a2ccfd6e8b8833c35cc0604991536ed68..b6327601f97974b7f7d1ccf31c3f0cfb9306d4fb 100644 (file)
 #define M_NETIF_RECEIVE        1       /* Inject packets into stack */
 
 /* If lock -- protects updating of if_list */
-#define   if_lock(t)           spin_lock(&(t->if_lock));
-#define   if_unlock(t)           spin_unlock(&(t->if_lock));
+#define   if_lock(t)           mutex_lock(&(t->if_lock));
+#define   if_unlock(t)           mutex_unlock(&(t->if_lock));
 
 /* Used to help with determining the pkts on receive */
 #define PKTGEN_MAGIC 0xbe9be955
@@ -422,7 +422,7 @@ struct pktgen_net {
 };
 
 struct pktgen_thread {
-       spinlock_t if_lock;             /* for list of devices */
+       struct mutex if_lock;           /* for list of devices */
        struct list_head if_list;       /* All device here */
        struct list_head th_list;
        struct task_struct *tsk;
@@ -2002,11 +2002,13 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
 {
        struct pktgen_thread *t;
 
+       mutex_lock(&pktgen_thread_lock);
+
        list_for_each_entry(t, &pn->pktgen_threads, th_list) {
                struct pktgen_dev *pkt_dev;
 
-               rcu_read_lock();
-               list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
+               if_lock(t);
+               list_for_each_entry(pkt_dev, &t->if_list, list) {
                        if (pkt_dev->odev != dev)
                                continue;
 
@@ -2021,8 +2023,9 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
                                       dev->name);
                        break;
                }
-               rcu_read_unlock();
+               if_unlock(t);
        }
+       mutex_unlock(&pktgen_thread_lock);
 }
 
 static int pktgen_device_event(struct notifier_block *unused,
@@ -2278,7 +2281,7 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
 
 static inline void set_pkt_overhead(struct pktgen_dev *pkt_dev)
 {
-       pkt_dev->pkt_overhead = LL_RESERVED_SPACE(pkt_dev->odev);
+       pkt_dev->pkt_overhead = 0;
        pkt_dev->pkt_overhead += pkt_dev->nr_labels*sizeof(u32);
        pkt_dev->pkt_overhead += VLAN_TAG_SIZE(pkt_dev);
        pkt_dev->pkt_overhead += SVLAN_TAG_SIZE(pkt_dev);
@@ -2769,13 +2772,13 @@ static void pktgen_finalize_skb(struct pktgen_dev *pkt_dev, struct sk_buff *skb,
 }
 
 static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
-                                       struct pktgen_dev *pkt_dev,
-                                       unsigned int extralen)
+                                       struct pktgen_dev *pkt_dev)
 {
+       unsigned int extralen = LL_RESERVED_SPACE(dev);
        struct sk_buff *skb = NULL;
-       unsigned int size = pkt_dev->cur_pkt_size + 64 + extralen +
-                           pkt_dev->pkt_overhead;
+       unsigned int size;
 
+       size = pkt_dev->cur_pkt_size + 64 + extralen + pkt_dev->pkt_overhead;
        if (pkt_dev->flags & F_NODE) {
                int node = pkt_dev->node >= 0 ? pkt_dev->node : numa_node_id();
 
@@ -2788,8 +2791,9 @@ static struct sk_buff *pktgen_alloc_skb(struct net_device *dev,
                 skb = __netdev_alloc_skb(dev, size, GFP_NOWAIT);
        }
 
+       /* the caller pre-fetches from skb->data and reserves for the mac hdr */
        if (likely(skb))
-               skb_reserve(skb, LL_RESERVED_SPACE(dev));
+               skb_reserve(skb, extralen - 16);
 
        return skb;
 }
@@ -2822,16 +2826,14 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
        mod_cur_headers(pkt_dev);
        queue_map = pkt_dev->cur_queue_map;
 
-       datalen = (odev->hard_header_len + 16) & ~0xf;
-
-       skb = pktgen_alloc_skb(odev, pkt_dev, datalen);
+       skb = pktgen_alloc_skb(odev, pkt_dev);
        if (!skb) {
                sprintf(pkt_dev->result, "No memory");
                return NULL;
        }
 
        prefetchw(skb->data);
-       skb_reserve(skb, datalen);
+       skb_reserve(skb, 16);
 
        /*  Reserve for ethernet and IP header  */
        eth = (__u8 *) skb_push(skb, 14);
@@ -2951,7 +2953,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
        mod_cur_headers(pkt_dev);
        queue_map = pkt_dev->cur_queue_map;
 
-       skb = pktgen_alloc_skb(odev, pkt_dev, 16);
+       skb = pktgen_alloc_skb(odev, pkt_dev);
        if (!skb) {
                sprintf(pkt_dev->result, "No memory");
                return NULL;
@@ -3727,7 +3729,7 @@ static int __net_init pktgen_create_thread(int cpu, struct pktgen_net *pn)
                return -ENOMEM;
        }
 
-       spin_lock_init(&t->if_lock);
+       mutex_init(&t->if_lock);
        t->cpu = cpu;
 
        INIT_LIST_HEAD(&t->if_list);
index 9e63f252a89ead08fda157e716607938452c0b49..de85d4e1cf43c10016e159248a94e7fd544e4c04 100644 (file)
@@ -436,7 +436,7 @@ struct sk_buff **eth_gro_receive(struct sk_buff **head,
 
        skb_gro_pull(skb, sizeof(*eh));
        skb_gro_postpull_rcsum(skb, eh, sizeof(*eh));
-       pp = ptype->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index caa6f158a0775d5dd86b6e76198dbca5290c33b0..68bf7bdf7fdb71a7e5abd54e8826e1cfb67e52ec 100644 (file)
@@ -1387,7 +1387,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
        skb_gro_pull(skb, sizeof(*iph));
        skb_set_transport_header(skb, skb_gro_offset(skb));
 
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 08d7de55e57edf23a8b1425e3dda1b6d51fb671b..08d8ee12453801653860354220a60fe043252d4d 100644 (file)
@@ -201,7 +201,7 @@ static struct sk_buff **fou_gro_receive(struct sk_buff **head,
        if (!ops || !ops->callbacks.gro_receive)
                goto out_unlock;
 
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
@@ -360,7 +360,7 @@ static struct sk_buff **gue_gro_receive(struct sk_buff **head,
        if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive))
                goto out_unlock;
 
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index e603004c1af8293f0d8b919a5c1f2023bc4ceebd..79ae0d7becbf522250f31e84674a515b5ed5a7d4 100644 (file)
@@ -219,7 +219,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
        /* Adjusted NAPI_GRO_CB(skb)->csum after skb_gro_pull()*/
        skb_gro_postpull_rcsum(skb, greh, grehlen);
 
-       pp = ptype->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index a50124260f5a4aaa98a3e4a582dbcbdbc236e370..9ce202549e7a1d7de0dbc02f45aef0ed901c4e90 100644 (file)
@@ -98,7 +98,7 @@ static void ip_cmsg_recv_retopts(struct msghdr *msg, struct sk_buff *skb)
 }
 
 static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
-                                 int offset)
+                                 int tlen, int offset)
 {
        __wsum csum = skb->csum;
 
@@ -106,7 +106,9 @@ static void ip_cmsg_recv_checksum(struct msghdr *msg, struct sk_buff *skb,
                return;
 
        if (offset != 0)
-               csum = csum_sub(csum, csum_partial(skb->data, offset, 0));
+               csum = csum_sub(csum,
+                               csum_partial(skb->data + tlen,
+                                            offset, 0));
 
        put_cmsg(msg, SOL_IP, IP_CHECKSUM, sizeof(__wsum), &csum);
 }
@@ -152,7 +154,7 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
 }
 
 void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
-                        int offset)
+                        int tlen, int offset)
 {
        struct inet_sock *inet = inet_sk(skb->sk);
        unsigned int flags = inet->cmsg_flags;
@@ -215,7 +217,7 @@ void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb,
        }
 
        if (flags & IP_CMSG_CHECKSUM)
-               ip_cmsg_recv_checksum(msg, skb, offset);
+               ip_cmsg_recv_checksum(msg, skb, tlen, offset);
 }
 EXPORT_SYMBOL(ip_cmsg_recv_offset);
 
index 9d1e555496e35fa878cb61491336391b93c5183f..8e77786549c614a8aa0d979dbab37cdb84d4cea8 100644 (file)
@@ -2192,7 +2192,7 @@ static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
 
 int ipmr_get_route(struct net *net, struct sk_buff *skb,
                   __be32 saddr, __be32 daddr,
-                  struct rtmsg *rtm, int nowait)
+                  struct rtmsg *rtm, int nowait, u32 portid)
 {
        struct mfc_cache *cache;
        struct mr_table *mrt;
@@ -2237,6 +2237,7 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
                        return -ENOMEM;
                }
 
+               NETLINK_CB(skb2).portid = portid;
                skb_push(skb2, sizeof(struct iphdr));
                skb_reset_network_header(skb2);
                iph = ip_hdr(skb2);
index fb54659320d86af6a08355ebe2783ec4be3c97fe..39483512a54180b6bfec09f997f2d40bd123db87 100644 (file)
@@ -2499,7 +2499,8 @@ static int rt_fill_info(struct net *net,  __be32 dst, __be32 src, u32 table_id,
                    IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
                        int err = ipmr_get_route(net, skb,
                                                 fl4->saddr, fl4->daddr,
-                                                r, nowait);
+                                                r, nowait, portid);
+
                        if (err <= 0) {
                                if (!nowait) {
                                        if (err == 0)
index b1784c897e6cdc781036914b709096fc09415e10..46123369144ffa5e7b4cbfea0355efa867c1d996 100644 (file)
@@ -97,11 +97,11 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low
                container_of(table->data, struct net, ipv4.ping_group_range.range);
        unsigned int seq;
        do {
-               seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
+               seq = read_seqbegin(&net->ipv4.ping_group_range.lock);
 
                *low = data[0];
                *high = data[1];
-       } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
+       } while (read_seqretry(&net->ipv4.ping_group_range.lock, seq));
 }
 
 /* Update system visible IP port range */
@@ -110,10 +110,10 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
        kgid_t *data = table->data;
        struct net *net =
                container_of(table->data, struct net, ipv4.ping_group_range.range);
-       write_seqlock(&net->ipv4.ip_local_ports.lock);
+       write_seqlock(&net->ipv4.ping_group_range.lock);
        data[0] = low;
        data[1] = high;
-       write_sequnlock(&net->ipv4.ip_local_ports.lock);
+       write_sequnlock(&net->ipv4.ping_group_range.lock);
 }
 
 /* Validate changes from /proc interface. */
index 2bf1110aa2ae74ee04728626334cc464a8cac060..35e97ff3054a8de4b4988c5ec70ea0f46fbea20d 100644 (file)
@@ -2325,10 +2325,9 @@ static void DBGUNDO(struct sock *sk, const char *msg)
        }
 #if IS_ENABLED(CONFIG_IPV6)
        else if (sk->sk_family == AF_INET6) {
-               struct ipv6_pinfo *np = inet6_sk(sk);
                pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
                         msg,
-                        &np->daddr, ntohs(inet->inet_dport),
+                        &sk->sk_v6_daddr, ntohs(inet->inet_dport),
                         tp->snd_cwnd, tcp_left_out(tp),
                         tp->snd_ssthresh, tp->prior_ssthresh,
                         tp->packets_out);
index 9eb81a4b0da20b55229d488f5e4e8a69e7f282d2..ca3731721d81dac1b73ef94aee51ac2b4e3834e4 100644 (file)
@@ -1950,12 +1950,14 @@ static int tcp_mtu_probe(struct sock *sk)
        len = 0;
        tcp_for_write_queue_from_safe(skb, next, sk) {
                copy = min_t(int, skb->len, probe_size - len);
-               if (nskb->ip_summed)
+               if (nskb->ip_summed) {
                        skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
-               else
-                       nskb->csum = skb_copy_and_csum_bits(skb, 0,
-                                                           skb_put(nskb, copy),
-                                                           copy, nskb->csum);
+               } else {
+                       __wsum csum = skb_copy_and_csum_bits(skb, 0,
+                                                            skb_put(nskb, copy),
+                                                            copy, 0);
+                       nskb->csum = csum_block_add(nskb->csum, csum, len);
+               }
 
                if (skb->len <= copy) {
                        /* We've eaten all the data from this skb.
@@ -2569,7 +2571,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
         * copying overhead: fragmentation, tunneling, mangling etc.
         */
        if (atomic_read(&sk->sk_wmem_alloc) >
-           min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
+           min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
+                 sk->sk_sndbuf))
                return -EAGAIN;
 
        if (skb_still_in_host_queue(sk, skb))
index defc9cad1797eb696653784fb1560652eb23d8d0..381a035fcfa1ca57e28ddbd33a94f5ab6344112d 100644 (file)
@@ -1343,7 +1343,7 @@ try_again:
                *addr_len = sizeof(*sin);
        }
        if (inet->cmsg_flags)
-               ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr));
+               ip_cmsg_recv_offset(msg, skb, sizeof(struct udphdr), off);
 
        err = copied;
        if (flags & MSG_TRUNC)
index 0e36e56dfd225ad3757e14445f9364ecff33ff9b..6396f1c80ae9ef36469acf69c4ba496ffd85dec8 100644 (file)
@@ -339,8 +339,8 @@ unflush:
        skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
        skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
        NAPI_GRO_CB(skb)->proto = uo_priv->offload->ipproto;
-       pp = uo_priv->offload->callbacks.gro_receive(head, skb,
-                                                    uo_priv->offload);
+       pp = call_gro_receive_udp(uo_priv->offload->callbacks.gro_receive,
+                                 head, skb, uo_priv->offload);
 
 out_unlock:
        rcu_read_unlock();
index 563a91f15f68e706362a403364f5b13b9f3b3fc6..1e541578a66d3eb63d864884156903909c4990e0 100644 (file)
@@ -2943,7 +2943,7 @@ static void init_loopback(struct net_device *dev)
                                 * lo device down, release this obsolete dst and
                                 * reallocate a new router for ifa.
                                 */
-                               if (sp_ifa->rt->dst.obsolete > 0) {
+                               if (!atomic_read(&sp_ifa->rt->rt6i_ref)) {
                                        ip6_rt_put(sp_ifa->rt);
                                        sp_ifa->rt = NULL;
                                } else {
index 4650c6824783ea29f687780aa61669a82e889858..17430f3410737fc1ae3fd0bd3375d18017454772 100644 (file)
@@ -886,7 +886,6 @@ static int ip6gre_xmit_other(struct sk_buff *skb, struct net_device *dev)
                encap_limit = t->parms.encap_limit;
 
        memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6));
-       fl6.flowi6_proto = skb->protocol;
 
        err = ip6gre_xmit2(skb, dev, 0, &fl6, encap_limit, &mtu);
 
index 82e9f30760283aca2f3d9468573aea607bd5bf6b..efe6268b8bc3e279def9b5322c8fee2c3c5ec92f 100644 (file)
@@ -247,7 +247,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
 
        skb_gro_postpull_rcsum(skb, iph, nlen);
 
-       pp = ops->callbacks.gro_receive(head, skb);
+       pp = call_gro_receive(ops->callbacks.gro_receive, head, skb);
 
 out_unlock:
        rcu_read_unlock();
index 3991b21e24ad359e64c415e4108c485cf31a49cc..e8878886eba425fe39c4b95ae399d0bf551f30cd 100644 (file)
@@ -246,6 +246,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
        hash = HASH(&any, local);
        for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
                if (ipv6_addr_equal(local, &t->parms.laddr) &&
+                   ipv6_addr_any(&t->parms.raddr) &&
                    (t->dev->flags & IFF_UP))
                        return t;
        }
@@ -253,6 +254,7 @@ ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_
        hash = HASH(remote, &any);
        for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
                if (ipv6_addr_equal(remote, &t->parms.raddr) &&
+                   ipv6_addr_any(&t->parms.laddr) &&
                    (t->dev->flags & IFF_UP))
                        return t;
        }
index e207cb2468dab0799422b2fc4620d729ed09a44b..d9843e5a667fe76c62eea32cfeae892b1a2663ef 100644 (file)
@@ -2276,8 +2276,8 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
        return 1;
 }
 
-int ip6mr_get_route(struct net *net,
-                   struct sk_buff *skb, struct rtmsg *rtm, int nowait)
+int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
+                   int nowait, u32 portid)
 {
        int err;
        struct mr6_table *mrt;
@@ -2322,6 +2322,7 @@ int ip6mr_get_route(struct net *net,
                        return -ENOMEM;
                }
 
+               NETLINK_CB(skb2).portid = portid;
                skb_reset_transport_header(skb2);
 
                skb_put(skb2, sizeof(struct ipv6hdr));
index aed4f305f5f60d33097f2421dbb515966d3b43f7..46476a3af2ad68682c964060004ace0d7097b20b 100644 (file)
@@ -3128,7 +3128,9 @@ static int rt6_fill_node(struct net *net,
        if (iif) {
 #ifdef CONFIG_IPV6_MROUTE
                if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
-                       int err = ip6mr_get_route(net, skb, rtm, nowait);
+                       int err = ip6mr_get_route(net, skb, rtm, nowait,
+                                                 portid);
+
                        if (err <= 0) {
                                if (!nowait) {
                                        if (err == 0)
index f58632cc45dc1e2367f2f1f7131a7e215f0d9dda..b831f3eb55a40f9de32a3cf30ba34e9dd853ec23 100644 (file)
@@ -1180,6 +1180,16 @@ out:
        return NULL;
 }
 
+static void tcp_v6_restore_cb(struct sk_buff *skb)
+{
+       /* We need to move header back to the beginning if xfrm6_policy_check()
+        * and tcp_v6_fill_cb() are going to be called again.
+        * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
+        */
+       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
+               sizeof(struct inet6_skb_parm));
+}
+
 /* The socket must have it's spinlock held when we get
  * here, unless it is a TCP_LISTEN socket.
  *
@@ -1309,6 +1319,7 @@ ipv6_pktoptions:
                        np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
                if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
                        skb_set_owner_r(opt_skb, sk);
+                       tcp_v6_restore_cb(opt_skb);
                        opt_skb = xchg(&np->pktoptions, opt_skb);
                } else {
                        __kfree_skb(opt_skb);
@@ -1342,15 +1353,6 @@ static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
        TCP_SKB_CB(skb)->sacked = 0;
 }
 
-static void tcp_v6_restore_cb(struct sk_buff *skb)
-{
-       /* We need to move header back to the beginning if xfrm6_policy_check()
-        * and tcp_v6_fill_cb() are going to be called again.
-        */
-       memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
-               sizeof(struct inet6_skb_parm));
-}
-
 static int tcp_v6_rcv(struct sk_buff *skb)
 {
        const struct tcphdr *th;
index 2415e55aaf8f590825f95cb236a3c1a496f4dbc2..1207379c1cce3a214afef4c2a39230360aa4bdc7 100644 (file)
@@ -498,7 +498,8 @@ try_again:
 
        if (is_udp4) {
                if (inet->cmsg_flags)
-                       ip_cmsg_recv(msg, skb);
+                       ip_cmsg_recv_offset(msg, skb,
+                                           sizeof(struct udphdr), off);
        } else {
                if (np->rxopt.all)
                        ip6_datagram_recv_specific_ctl(sk, msg, skb);
index a3bb8f7f5fc57739c31a58a7a24377d260e12da2..2b528389409f62e75aaa4f0a3794d4af558d48fe 100644 (file)
@@ -2203,16 +2203,22 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
        if (!(status->rx_flags & IEEE80211_RX_AMSDU))
                return RX_CONTINUE;
 
-       if (ieee80211_has_a4(hdr->frame_control) &&
-           rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
-           !rx->sdata->u.vlan.sta)
-               return RX_DROP_UNUSABLE;
+       if (unlikely(ieee80211_has_a4(hdr->frame_control))) {
+               switch (rx->sdata->vif.type) {
+               case NL80211_IFTYPE_AP_VLAN:
+                       if (!rx->sdata->u.vlan.sta)
+                               return RX_DROP_UNUSABLE;
+                       break;
+               case NL80211_IFTYPE_STATION:
+                       if (!rx->sdata->u.mgd.use_4addr)
+                               return RX_DROP_UNUSABLE;
+                       break;
+               default:
+                       return RX_DROP_UNUSABLE;
+               }
+       }
 
-       if (is_multicast_ether_addr(hdr->addr1) &&
-           ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
-             rx->sdata->u.vlan.sta) ||
-            (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
-             rx->sdata->u.mgd.use_4addr)))
+       if (is_multicast_ether_addr(hdr->addr1))
                return RX_DROP_UNUSABLE;
 
        skb->dev = dev;
index 7a5fa0c98377643aadcb047184eb7a32c4463c5f..28fc283c1ec1fbf4b5049b8d52e087b48628aae6 100644 (file)
@@ -2557,7 +2557,7 @@ static int netlink_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
        /* Record the max length of recvmsg() calls for future allocations */
        nlk->max_recvmsg_len = max(nlk->max_recvmsg_len, len);
        nlk->max_recvmsg_len = min_t(size_t, nlk->max_recvmsg_len,
-                                    16384);
+                                    SKB_WITH_OVERHEAD(32768));
 
        copied = data_skb->len;
        if (len < copied) {
@@ -2810,14 +2810,13 @@ static int netlink_dump(struct sock *sk)
        if (alloc_min_size < nlk->max_recvmsg_len) {
                alloc_size = nlk->max_recvmsg_len;
                skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
-                                       GFP_KERNEL |
-                                       __GFP_NOWARN |
-                                       __GFP_NORETRY);
+                                       (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM) |
+                                       __GFP_NOWARN | __GFP_NORETRY);
        }
        if (!skb) {
                alloc_size = alloc_min_size;
                skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
-                                       GFP_KERNEL);
+                                       (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM));
        }
        if (!skb)
                goto errout_skb;
index a86f26d05bc22e378ef96943f957724bbaca4140..34e4fcfd240be1896029a059b564668aa8cba859 100644 (file)
@@ -249,7 +249,7 @@ static void __fanout_link(struct sock *sk, struct packet_sock *po);
 static int packet_direct_xmit(struct sk_buff *skb)
 {
        struct net_device *dev = skb->dev;
-       netdev_features_t features;
+       struct sk_buff *orig_skb = skb;
        struct netdev_queue *txq;
        int ret = NETDEV_TX_BUSY;
 
@@ -257,9 +257,8 @@ static int packet_direct_xmit(struct sk_buff *skb)
                     !netif_carrier_ok(dev)))
                goto drop;
 
-       features = netif_skb_features(skb);
-       if (skb_needs_linearize(skb, features) &&
-           __skb_linearize(skb))
+       skb = validate_xmit_skb_list(skb, dev);
+       if (skb != orig_skb)
                goto drop;
 
        txq = skb_get_tx_queue(dev, skb);
@@ -279,7 +278,7 @@ static int packet_direct_xmit(struct sk_buff *skb)
        return ret;
 drop:
        atomic_long_inc(&dev->tx_dropped);
-       kfree_skb(skb);
+       kfree_skb_list(skb);
        return NET_XMIT_DROP;
 }
 
@@ -3855,6 +3854,7 @@ static int packet_notifier(struct notifier_block *this,
                                }
                                if (msg == NETDEV_UNREGISTER) {
                                        packet_cached_dev_reset(po);
+                                       fanout_release(sk);
                                        po->ifindex = -1;
                                        if (po->prot_hook.dev)
                                                dev_put(po->prot_hook.dev);
index 796785e0bf96b0e65f598d3b2dad8256485d034a..d7edba4536bd22258cdcb302ce1c863a6082c890 100644 (file)
@@ -33,6 +33,12 @@ static int tcf_vlan(struct sk_buff *skb, const struct tc_action *a,
        bstats_update(&v->tcf_bstats, skb);
        action = v->tcf_action;
 
+       /* Ensure 'data' points at mac_header prior calling vlan manipulating
+        * functions.
+        */
+       if (skb_at_tc_ingress(skb))
+               skb_push_rcsum(skb, skb->mac_len);
+
        switch (v->tcfv_action) {
        case TCA_VLAN_ACT_POP:
                err = skb_vlan_pop(skb);
@@ -54,6 +60,9 @@ drop:
        action = TC_ACT_SHOT;
        v->tcf_qstats.drops++;
 unlock:
+       if (skb_at_tc_ingress(skb))
+               skb_pull_rcsum(skb, skb->mac_len);
+
        spin_unlock(&v->tcf_lock);
        return action;
 }
index a75864d93142153bfff4ab765620e10bcfab3e96..ecc1904e454f052d58a1d121c35a3a07aa1c6a48 100644 (file)
@@ -315,7 +315,8 @@ replay:
                        if (err == 0) {
                                struct tcf_proto *next = rtnl_dereference(tp->next);
 
-                               tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
+                               tfilter_notify(net, skb, n, tp,
+                                              t->tcm_handle, RTM_DELTFILTER);
                                if (tcf_destroy(tp, false))
                                        RCU_INIT_POINTER(*back, next);
                        }
index 22c2bf367d7e8c7025065f33eabfd7e93a7f4021..29c7c43de1086e79e1888bd9398f9b24d2b634a8 100644 (file)
@@ -3426,6 +3426,12 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
                        return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
                                                  commands);
 
+               /* Report violation if chunk len overflows */
+               ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
+               if (ch_end > skb_tail_pointer(skb))
+                       return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
+                                                 commands);
+
                /* Now that we know we at least have a chunk header,
                 * do things that are type appropriate.
                 */
@@ -3457,12 +3463,6 @@ sctp_disposition_t sctp_sf_ootb(struct net *net,
                        }
                }
 
-               /* Report violation if chunk len overflows */
-               ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length));
-               if (ch_end > skb_tail_pointer(skb))
-                       return sctp_sf_violation_chunklen(net, ep, asoc, type, arg,
-                                                 commands);
-
                ch = (sctp_chunkhdr_t *) ch_end;
        } while (ch_end < skb_tail_pointer(skb));
 
index be1489fc3234bd2ed0281d50f7e11a2a205d38d1..402817be3873f0d2eb5236949a2e4aec7e1cef74 100644 (file)
@@ -4371,7 +4371,7 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
                                  int __user *optlen)
 {
-       if (len <= 0)
+       if (len == 0)
                return -EINVAL;
        if (len > sizeof(struct sctp_event_subscribe))
                len = sizeof(struct sctp_event_subscribe);
@@ -5972,6 +5972,9 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
        if (get_user(len, optlen))
                return -EFAULT;
 
+       if (len < 0)
+               return -EINVAL;
+
        lock_sock(sk);
 
        switch (optname) {
index f0611a6368cd2572188f9a066291b9c8d717f95d..b9f531c9e4fa753d326752b63dc2cf599579ffeb 100644 (file)
@@ -181,7 +181,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
        struct timespec now;
        unsigned long timo;
        key_ref_t key_ref, skey_ref;
-       char xbuf[12];
+       char xbuf[16];
        int rc;
 
        struct keyring_search_context ctx = {
index 22dbfa563919fca4ace4d3d50b7c822008c7bb34..5baf8b56b6e7604637bca89eae54ca63ce4e6232 100644 (file)
@@ -956,7 +956,7 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
        status = azx_readb(chip, RIRBSTS);
        if (status & RIRB_INT_MASK) {
                if (status & RIRB_INT_RESPONSE) {
-                       if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)
+                       if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
                                udelay(80);
                        snd_hdac_bus_update_rirb(bus);
                }
@@ -1055,11 +1055,6 @@ int azx_bus_init(struct azx *chip, const char *model,
        if (chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)
                bus->core.corbrp_self_clear = true;
 
-       if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) {
-               dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
-               bus->needs_damn_long_delay = 1;
-       }
-
        if (chip->driver_caps & AZX_DCAPS_4K_BDLE_BOUNDARY)
                bus->core.align_bdle_4k = true;
 
index 7b635d68cfe1a2d76efd9826b2020c10f8c834a3..b17539537b2e0b90cb55f95a6d3e0689003822a8 100644 (file)
@@ -32,8 +32,8 @@
 #define AZX_DCAPS_NO_MSI       (1 << 9)        /* No MSI support */
 #define AZX_DCAPS_SNOOP_MASK   (3 << 10)       /* snoop type mask */
 #define AZX_DCAPS_SNOOP_OFF    (1 << 12)       /* snoop default off */
-#define AZX_DCAPS_RIRB_DELAY   (1 << 13)       /* Long delay in read loop */
-#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14)     /* Put a delay before read */
+/* 13 unused */
+/* 14 unused */
 #define AZX_DCAPS_CTX_WORKAROUND (1 << 15)     /* X-Fi workaround */
 #define AZX_DCAPS_POSFIX_LPIB  (1 << 16)       /* Use LPIB as default */
 #define AZX_DCAPS_POSFIX_VIA   (1 << 17)       /* Use VIACOMBO as default */
index d4671973d8890626555eac72e24a10f74cea8423..ad4a1e9a3ae1be732bd2fa602cd01eea511a7160 100644 (file)
@@ -334,8 +334,7 @@ enum {
 
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
-       (AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI | /*AZX_DCAPS_ALIGN_BUFSIZE |*/ \
-        AZX_DCAPS_NO_64BIT | AZX_DCAPS_CORBRP_SELF_CLEAR |\
+       (AZX_DCAPS_NO_MSI | AZX_DCAPS_CORBRP_SELF_CLEAR |\
         AZX_DCAPS_SNOOP_TYPE(NVIDIA))
 
 #define AZX_DCAPS_PRESET_CTHDA \
@@ -1637,6 +1636,11 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
                return err;
        }
 
+       if (chip->driver_type == AZX_DRIVER_NVIDIA) {
+               dev_dbg(chip->card->dev, "Enable delay in RIRB handling\n");
+               chip->bus.needs_damn_long_delay = 1;
+       }
+
        err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
        if (err < 0) {
                dev_err(card->dev, "Error creating device [card]!\n");
@@ -1720,6 +1724,10 @@ static int azx_first_init(struct azx *chip)
                }
        }
 
+       /* NVidia hardware normally only supports up to 40 bits of DMA */
+       if (chip->pci->vendor == PCI_VENDOR_ID_NVIDIA)
+               dma_bits = 40;
+
        /* disable 64bit DMA address on some devices */
        if (chip->driver_caps & AZX_DCAPS_NO_64BIT) {
                dev_dbg(card->dev, "Disabling 64bit DMA\n");
@@ -2406,14 +2414,12 @@ static const struct pci_device_id azx_ids[] = {
          .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8,
          .class_mask = 0xffffff,
          .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
-         AZX_DCAPS_NO_64BIT |
-         AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
+         AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB },
 #else
        /* this entry seems still valid -- i.e. without emu20kx chip */
        { PCI_DEVICE(0x1102, 0x0009),
          .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
-         AZX_DCAPS_NO_64BIT |
-         AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
+         AZX_DCAPS_NO_64BIT | AZX_DCAPS_POSFIX_LPIB },
 #endif
        /* CM8888 */
        { PCI_DEVICE(0x13f6, 0x5011),
index 58c0aad372842125be5529d07aecbbe98e2c8859..17fd81736d3d67f6a6fb67ef1ad624a6d96d93a9 100644 (file)
@@ -464,6 +464,8 @@ static int hda_tegra_create(struct snd_card *card,
        if (err < 0)
                return err;
 
+       chip->bus.needs_damn_long_delay = 1;
+
        err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
        if (err < 0) {
                dev_err(card->dev, "Error creating device\n");
@@ -481,8 +483,7 @@ MODULE_DEVICE_TABLE(of, hda_tegra_match);
 
 static int hda_tegra_probe(struct platform_device *pdev)
 {
-       const unsigned int driver_flags = AZX_DCAPS_RIRB_DELAY |
-                                         AZX_DCAPS_CORBRP_SELF_CLEAR;
+       const unsigned int driver_flags = AZX_DCAPS_CORBRP_SELF_CLEAR;
        struct snd_card *card;
        struct azx *chip;
        struct hda_tegra *hda;
index b1fa50aed88878b535a39b4c8f2016c5e361c198..f0986cac82f1a5fb464e786b992f312345c3b6f8 100644 (file)
@@ -5793,8 +5793,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
 #define ALC295_STANDARD_PINS \
        {0x12, 0xb7a60130}, \
        {0x14, 0x90170110}, \
-       {0x17, 0x21014020}, \
-       {0x18, 0x21a19030}, \
        {0x21, 0x04211020}
 
 #define ALC298_STANDARD_PINS \
@@ -5840,10 +5838,18 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x14, 0x90170110},
                {0x1b, 0x02011020},
                {0x21, 0x0221101f}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x14, 0x90170110},
+               {0x1b, 0x01011020},
+               {0x21, 0x0221101f}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x14, 0x90170130},
                {0x1b, 0x01014020},
                {0x21, 0x0221103f}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x14, 0x90170130},
+               {0x1b, 0x01011020},
+               {0x21, 0x0221103f}),
        SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                {0x14, 0x90170130},
                {0x1b, 0x02011020},
@@ -6021,7 +6027,13 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                ALC292_STANDARD_PINS,
                {0x13, 0x90a60140}),
        SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
-               ALC295_STANDARD_PINS),
+               ALC295_STANDARD_PINS,
+               {0x17, 0x21014020},
+               {0x18, 0x21a19030}),
+       SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC295_STANDARD_PINS,
+               {0x17, 0x21014040},
+               {0x18, 0x21a19050}),
        SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC298_STANDARD_PINS,
                {0x17, 0x90170110}),
index c60a776e815d72f14b9b6345f2e8a0266f8ec1b6..8a59d4782a0f4d3c33b3e6840cbe265ba8ee4406 100644 (file)
@@ -2907,6 +2907,23 @@ AU0828_DEVICE(0x2040, 0x7260, "Hauppauge", "HVR-950Q"),
 AU0828_DEVICE(0x2040, 0x7213, "Hauppauge", "HVR-950Q"),
 AU0828_DEVICE(0x2040, 0x7270, "Hauppauge", "HVR-950Q"),
 
+/* Syntek STK1160 */
+{
+       .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+                      USB_DEVICE_ID_MATCH_INT_CLASS |
+                      USB_DEVICE_ID_MATCH_INT_SUBCLASS,
+       .idVendor = 0x05e1,
+       .idProduct = 0x0408,
+       .bInterfaceClass = USB_CLASS_AUDIO,
+       .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "Syntek",
+               .product_name = "STK1160",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_AUDIO_ALIGN_TRANSFER
+       }
+},
+
 /* Digidesign Mbox */
 {
        /* Thanks to Clemens Ladisch <clemens@ladisch.de> */
index 34846e71fdbdd13ad946daae10f70f75f2a395cf..74c265e0ffa0f3088a88b07e353254694559c668 100644 (file)
@@ -423,7 +423,7 @@ $(LIBTRACEEVENT)-clean:
        $(call QUIET_CLEAN, libtraceevent)
        $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) O=$(OUTPUT) clean >/dev/null
 
-install-traceevent-plugins: $(LIBTRACEEVENT)
+install-traceevent-plugins: libtraceevent_plugins
        $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) install_plugins
 
 $(LIBAPI): fixdep FORCE
index 487d6357b7e75039ff914077eb11d890f938e51d..453eafd4dd6e5fa3f48b458e2fd62df0792c3554 100644 (file)
@@ -28,6 +28,7 @@
 
 #include <asm/kvm_emulate.h>
 #include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
 #include <asm/kvm_mmu.h>
 
 /* These are for GICv2 emulation only */
 #define GICH_LR_PHYSID_CPUID           (7UL << GICH_LR_PHYSID_CPUID_SHIFT)
 #define ICH_LR_VIRTUALID_MASK          (BIT_ULL(32) - 1)
 
-/*
- * LRs are stored in reverse order in memory. make sure we index them
- * correctly.
- */
-#define LR_INDEX(lr)                   (VGIC_V3_MAX_LRS - 1 - lr)
-
 static u32 ich_vtr_el2;
 
 static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
 {
        struct vgic_lr lr_desc;
-       u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)];
+       u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)];
 
        if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
                lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
@@ -111,7 +106,7 @@ static void vgic_v3_set_lr(struct kvm_vcpu *vcpu, int lr,
                lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT;
        }
 
-       vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[LR_INDEX(lr)] = lr_val;
+       vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)] = lr_val;
 
        if (!(lr_desc.state & LR_STATE_MASK))
                vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);