BACKPORT: arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1
authorCatalin Marinas <catalin.marinas@arm.com>
Fri, 1 Jul 2016 15:53:00 +0000 (16:53 +0100)
committerAmit Pundir <amit.pundir@linaro.org>
Mon, 16 Jan 2017 09:20:50 +0000 (14:50 +0530)
This patch adds the uaccess macros/functions to disable access to user
space by setting TTBR0_EL1 to a reserved zeroed page. Since the value
written to TTBR0_EL1 must be a physical address, for simplicity this
patch introduces a reserved_ttbr0 page at a constant offset from
swapper_pg_dir. The uaccess_disable code uses the ttbr1_el1 value
adjusted by the reserved_ttbr0 offset.

Enabling access to user is done by restoring TTBR0_EL1 with the value
from the struct thread_info ttbr0 variable. Interrupts must be disabled
during the uaccess_ttbr0_enable code to ensure the atomicity of the
thread_info.ttbr0 read and TTBR0_EL1 write. This patch also moves the
get_thread_info asm macro from entry.S to assembler.h for reuse in the
uaccess_ttbr0_* macros.

Cc: Will Deacon <will.deacon@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Bug: 31432001
Change-Id: I54ada623160cb47f5762e0e39a5e84a75252dbfd
(cherry picked from commit 4b65a5db362783ab4b04ca1c1d2ad70ed9b0ba2a)
Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/kernel-pgtable.h
arch/arm64/include/asm/thread_info.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/cpufeature.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/vmlinux.lds.S

index 7b2a8925ac86426fdd1cdc5ddaab2a38982d4837..d8855ca6068a35ba2873d37b016396b407895d48 100644 (file)
        msr     daifclr, #2
        .endm
 
+       .macro  save_and_disable_irq, flags
+       mrs     \flags, daif
+       msr     daifset, #2
+       .endm
+
+       .macro  restore_irq, flags
+       msr     daif, \flags
+       .endm
+
 /*
  * Enable and disable debug exceptions.
  */
@@ -371,6 +380,13 @@ alternative_endif
        movk    \reg, :abs_g0_nc:\val
        .endm
 
+/*
+ * Return the current thread_info.
+ */
+       .macro  get_thread_info, rd
+       mrs     \rd, sp_el0
+       .endm
+
 /*
  * Errata workaround post TTBR0_EL1 update.
  */
index 1695f77d8bf2fa73f3ed464c4cb3f6d9f75da382..8e1f826caf992966c18ed9c696ea95021a312e63 100644 (file)
@@ -189,6 +189,12 @@ static inline bool system_supports_mixed_endian_el0(void)
        return id_aa64mmfr0_mixed_endian_el0(read_system_reg(SYS_ID_AA64MMFR0_EL1));
 }
 
+static inline bool system_uses_ttbr0_pan(void)
+{
+       return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
+               !cpus_have_cap(ARM64_HAS_PAN);
+}
+
 #endif /* __ASSEMBLY__ */
 
 #endif
index 7e51d1b57c0c56461a9be0fb825fa84a9faeea8b..7803343e5881fbd7b2f635b25082d3e91d2583f8 100644 (file)
@@ -19,6 +19,7 @@
 #ifndef __ASM_KERNEL_PGTABLE_H
 #define __ASM_KERNEL_PGTABLE_H
 
+#include <asm/pgtable.h>
 #include <asm/sparsemem.h>
 
 /*
 #define SWAPPER_DIR_SIZE       (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE)
 #define IDMAP_DIR_SIZE         (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+#define RESERVED_TTBR0_SIZE    (PAGE_SIZE)
+#else
+#define RESERVED_TTBR0_SIZE    (0)
+#endif
+
 /* Initial memory map size */
 #if ARM64_SWAPPER_USES_SECTION_MAPS
 #define SWAPPER_BLOCK_SHIFT    SECTION_SHIFT
index abd64bd1f6d9f0160a3122555cf23be1a30f87eb..794d22603f04ffd5e8d176c450d449566173b1d2 100644 (file)
@@ -48,6 +48,9 @@ struct thread_info {
        unsigned long           flags;          /* low level flags */
        mm_segment_t            addr_limit;     /* address limit */
        struct task_struct      *task;          /* main task structure */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       u64                     ttbr0;          /* saved TTBR0_EL1 */
+#endif
        int                     preempt_count;  /* 0 => preemptable, <0 => bug */
        int                     cpu;            /* cpu */
 };
index 8259eded53dca542336ed5a352be5c6961e5086d..955c6e58a624efb5ce949da0dfca4d87f66c0ef7 100644 (file)
@@ -19,6 +19,7 @@
 #define __ASM_UACCESS_H
 
 #include <asm/alternative.h>
+#include <asm/kernel-pgtable.h>
 #include <asm/sysreg.h>
 
 #ifndef __ASSEMBLY__
@@ -129,16 +130,71 @@ static inline void set_fs(mm_segment_t fs)
 /*
  * User access enabling/disabling.
  */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+static inline void __uaccess_ttbr0_disable(void)
+{
+       unsigned long ttbr;
+
+       /* reserved_ttbr0 placed at the end of swapper_pg_dir */
+       ttbr = read_sysreg(ttbr1_el1) + SWAPPER_DIR_SIZE;
+       write_sysreg(ttbr, ttbr0_el1);
+       isb();
+}
+
+static inline void __uaccess_ttbr0_enable(void)
+{
+       unsigned long flags;
+
+       /*
+        * Disable interrupts to avoid preemption between reading the 'ttbr0'
+        * variable and the MSR. A context switch could trigger an ASID
+        * roll-over and an update of 'ttbr0'.
+        */
+       local_irq_save(flags);
+       write_sysreg(current_thread_info()->ttbr0, ttbr0_el1);
+       isb();
+       local_irq_restore(flags);
+}
+
+static inline bool uaccess_ttbr0_disable(void)
+{
+       if (!system_uses_ttbr0_pan())
+               return false;
+       __uaccess_ttbr0_disable();
+       return true;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+       if (!system_uses_ttbr0_pan())
+               return false;
+       __uaccess_ttbr0_enable();
+       return true;
+}
+#else
+static inline bool uaccess_ttbr0_disable(void)
+{
+       return false;
+}
+
+static inline bool uaccess_ttbr0_enable(void)
+{
+       return false;
+}
+#endif
+
 #define __uaccess_disable(alt)                                         \
 do {                                                                   \
-       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,                  \
-                       CONFIG_ARM64_PAN));                             \
+       if (!uaccess_ttbr0_disable())                                   \
+               asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt,          \
+                               CONFIG_ARM64_PAN));                     \
 } while (0)
 
 #define __uaccess_enable(alt)                                          \
 do {                                                                   \
-       asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,                  \
-                       CONFIG_ARM64_PAN));                             \
+       if (uaccess_ttbr0_enable())                                     \
+               asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt,          \
+                               CONFIG_ARM64_PAN));                     \
 } while (0)
 
 static inline void uaccess_disable(void)
@@ -369,16 +425,56 @@ extern __must_check long strnlen_user(const char __user *str, long n);
 #include <asm/assembler.h>
 
 /*
- * User access enabling/disabling macros. These are no-ops when UAO is
- * present.
+ * User access enabling/disabling macros.
+ */
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       .macro  __uaccess_ttbr0_disable, tmp1
+       mrs     \tmp1, ttbr1_el1                // swapper_pg_dir
+       add     \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
+       msr     ttbr0_el1, \tmp1                // set reserved TTBR0_EL1
+       isb
+       .endm
+
+       .macro  __uaccess_ttbr0_enable, tmp1
+       get_thread_info \tmp1
+       ldr     \tmp1, [\tmp1, #TSK_TI_TTBR0]   // load saved TTBR0_EL1
+       msr     ttbr0_el1, \tmp1                // set the non-PAN TTBR0_EL1
+       isb
+       .endm
+
+       .macro  uaccess_ttbr0_disable, tmp1
+alternative_if_not ARM64_HAS_PAN
+       __uaccess_ttbr0_disable \tmp1
+alternative_else_nop_endif
+       .endm
+
+       .macro  uaccess_ttbr0_enable, tmp1, tmp2
+alternative_if_not ARM64_HAS_PAN
+       save_and_disable_irq \tmp2              // avoid preemption
+       __uaccess_ttbr0_enable \tmp1
+       restore_irq \tmp2
+alternative_else_nop_endif
+       .endm
+#else
+       .macro  uaccess_ttbr0_disable, tmp1
+       .endm
+
+       .macro  uaccess_ttbr0_enable, tmp1, tmp2
+       .endm
+#endif
+
+/*
+ * These macros are no-ops when UAO is present.
  */
        .macro  uaccess_disable_not_uao, tmp1
+       uaccess_ttbr0_disable \tmp1
 alternative_if ARM64_ALT_PAN_NOT_UAO
        SET_PSTATE_PAN(1)
 alternative_else_nop_endif
        .endm
 
        .macro  uaccess_enable_not_uao, tmp1, tmp2
+       uaccess_ttbr0_enable \tmp1, \tmp2
 alternative_if ARM64_ALT_PAN_NOT_UAO
        SET_PSTATE_PAN(0)
 alternative_else_nop_endif
index 2bb17bd556f8dd5114e52b7fec90bf926cd012c7..c9ea87198789771cc5d3662d43a54fa0aedc3514 100644 (file)
@@ -40,6 +40,9 @@ int main(void)
   DEFINE(TI_ADDR_LIMIT,                offsetof(struct thread_info, addr_limit));
   DEFINE(TI_TASK,              offsetof(struct thread_info, task));
   DEFINE(TI_CPU,               offsetof(struct thread_info, cpu));
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+  DEFINE(TSK_TI_TTBR0,         offsetof(struct thread_info, ttbr0));
+#endif
   BLANK();
   DEFINE(THREAD_CPU_CONTEXT,   offsetof(struct task_struct, thread.cpu_context));
   BLANK();
index eda7d5915fbb209bf4057cd1f621beadd8c02606..cdf1dca6413385f7ac2bbaa1faeff8198b8d4d92 100644 (file)
@@ -46,6 +46,7 @@ unsigned int compat_elf_hwcap2 __read_mostly;
 #endif
 
 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
+EXPORT_SYMBOL(cpu_hwcaps);
 
 #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
        {                                               \
index 367e850ece4d56922b3a517ce1656e7adf547490..8aa564f58b6131a337d673909e46f9f5dbaac51c 100644 (file)
@@ -187,10 +187,6 @@ alternative_endif
        eret                                    // return to kernel
        .endm
 
-       .macro  get_thread_info, rd
-       mrs     \rd, sp_el0
-       .endm
-
        .macro  irq_stack_entry
        mov     x19, sp                 // preserve the original sp
 
index 0ea9a97467a6f606be6ef8618d5ded3a9fbf867e..8cfd5ab377434b2ccafd5b8d7c4701386faf8a95 100644 (file)
@@ -318,14 +318,14 @@ __create_page_tables:
         * dirty cache lines being evicted.
         */
        mov     x0, x25
-       add     x1, x26, #SWAPPER_DIR_SIZE
+       add     x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
        bl      __inval_cache_range
 
        /*
         * Clear the idmap and swapper page tables.
         */
        mov     x0, x25
-       add     x6, x26, #SWAPPER_DIR_SIZE
+       add     x6, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
 1:     stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
        stp     xzr, xzr, [x0], #16
@@ -404,7 +404,7 @@ __create_page_tables:
         * tables again to remove any speculatively loaded cache lines.
         */
        mov     x0, x25
-       add     x1, x26, #SWAPPER_DIR_SIZE
+       add     x1, x26, #SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE
        dmb     sy
        bl      __inval_cache_range
 
index 9442a51d3540558a363e095bd31c6e3fcf9fb119..7a5228c7abdd7cc9c8226df887a2ec9e7b4d053d 100644 (file)
@@ -194,6 +194,11 @@ SECTIONS
        swapper_pg_dir = .;
        . += SWAPPER_DIR_SIZE;
 
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       reserved_ttbr0 = .;
+       . += RESERVED_TTBR0_SIZE;
+#endif
+
        _end = .;
 
        STABS_DEBUG