From 7e73e2156c5ff4a2219ac832e58c7193775e3f8f Mon Sep 17 00:00:00 2001 From: Sami Tolvanen Date: Wed, 14 Dec 2016 12:32:25 -0800 Subject: [PATCH] Revert "FROMLIST: arm64: Disable TTBR0_EL1 during normal kernel execution" This reverts commit 5775ca34829caf0664c8ccc02fd0e93cb6022e0f. Bug: 31432001 Change-Id: I9b07c2f01bc2bcfed51f60ab487034639f5e1960 Signed-off-by: Sami Tolvanen --- arch/arm64/include/asm/efi.h | 26 +---------- arch/arm64/include/asm/mmu_context.h | 51 ++++++--------------- arch/arm64/include/asm/ptrace.h | 2 - arch/arm64/kernel/entry.S | 67 ---------------------------- arch/arm64/kernel/setup.c | 9 ---- arch/arm64/mm/context.c | 7 +-- 6 files changed, 16 insertions(+), 146 deletions(-) diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 932f5a56d1a6..8e88a696c9cb 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -1,7 +1,6 @@ #ifndef _ASM_EFI_H #define _ASM_EFI_H -#include #include #include #include @@ -70,30 +69,7 @@ int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); static inline void efi_set_pgd(struct mm_struct *mm) { - __switch_mm(mm); - - if (system_uses_ttbr0_pan()) { - if (mm != current->active_mm) { - /* - * Update the current thread's saved ttbr0 since it is - * restored as part of a return from exception. Set - * the hardware TTBR0_EL1 using cpu_switch_mm() - * directly to enable potential errata workarounds. - */ - update_saved_ttbr0(current, mm); - cpu_switch_mm(mm->pgd, mm); - } else { - /* - * Defer the switch to the current thread's TTBR0_EL1 - * until uaccess_enable(). Restore the current - * thread's saved ttbr0 corresponding to its active_mm - * (if different from init_mm). - */ - cpu_set_reserved_ttbr0(); - if (current->active_mm != &init_mm) - update_saved_ttbr0(current, current->active_mm); - } - } + switch_mm(NULL, mm, NULL); } void efi_virtmap_load(void); diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 4a32fd5f101d..a00f7cf35bbd 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -23,7 +23,6 @@ #include #include -#include #include #include #include @@ -114,7 +113,7 @@ static inline void cpu_uninstall_idmap(void) local_flush_tlb_all(); cpu_set_default_tcr_t0sz(); - if (mm != &init_mm && !system_uses_ttbr0_pan()) + if (mm != &init_mm) cpu_switch_mm(mm->pgd, mm); } @@ -174,27 +173,21 @@ enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) { } -#ifdef CONFIG_ARM64_SW_TTBR0_PAN -static inline void update_saved_ttbr0(struct task_struct *tsk, - struct mm_struct *mm) -{ - if (system_uses_ttbr0_pan()) { - BUG_ON(mm->pgd == swapper_pg_dir); - task_thread_info(tsk)->ttbr0 = - virt_to_phys(mm->pgd) | ASID(mm) << 48; - } -} -#else -static inline void update_saved_ttbr0(struct task_struct *tsk, - struct mm_struct *mm) -{ -} -#endif - -static inline void __switch_mm(struct mm_struct *next) +/* + * This is the actual mm switch as far as the scheduler + * is concerned. No registers are touched. We avoid + * calling the CPU specific function when the mm hasn't + * actually changed. + */ +static inline void +switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *tsk) { unsigned int cpu = smp_processor_id(); + if (prev == next) + return; + /* * init_mm.pgd does not contain any user mappings and it is always * active for kernel addresses in TTBR1. Just set the reserved TTBR0. @@ -207,23 +200,7 @@ static inline void __switch_mm(struct mm_struct *next) check_and_switch_context(next, cpu); } -static inline void -switch_mm(struct mm_struct *prev, struct mm_struct *next, - struct task_struct *tsk) -{ - if (prev != next) - __switch_mm(next); - - /* - * Update the saved TTBR0_EL1 of the scheduled-in task as the previous - * value may have not been initialised yet (activate_mm caller) or the - * ASID has changed since the last run (following the context switch - * of another thread of the same process). - */ - update_saved_ttbr0(tsk, next); -} - #define deactivate_mm(tsk,mm) do { } while (0) -#define activate_mm(prev,next) switch_mm(prev, next, current) +#define activate_mm(prev,next) switch_mm(prev, next, NULL) #endif diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 5eedfd83acc7..1528d52eb8c0 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -21,8 +21,6 @@ #include -#define _PSR_PAN_BIT 22 - /* Current Exception Level values, as contained in CurrentEL */ #define CurrentEL_EL1 (1 << 2) #define CurrentEL_EL2 (2 << 2) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 8606895240ba..8aa564f58b61 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -29,9 +29,7 @@ #include #include #include -#include #include -#include #include /* @@ -110,34 +108,6 @@ mrs x22, elr_el1 mrs x23, spsr_el1 stp lr, x21, [sp, #S_LR] - -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - /* - * Set the TTBR0 PAN bit in SPSR. When the exception is taken from - * EL0, there is no need to check the state of TTBR0_EL1 since - * accesses are always enabled. - * Note that the meaning of this bit differs from the ARMv8.1 PAN - * feature as all TTBR0_EL1 accesses are disabled, not just those to - * user mappings. - */ -alternative_if_not ARM64_HAS_PAN - nop -alternative_else - b 1f // skip TTBR0 PAN -alternative_endif - - .if \el != 0 - mrs x21, ttbr0_el1 - tst x21, #0xffff << 48 // Check for the reserved ASID - orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR - b.eq 1f // TTBR0 access already disabled - and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR - .endif - - uaccess_ttbr0_disable x21 -1: -#endif - stp x22, x23, [sp, #S_PC] /* @@ -174,42 +144,6 @@ alternative_endif ldp x21, x22, [sp, #S_PC] // load ELR, SPSR .if \el == 0 ct_user_enter - .endif - -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - /* - * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR - * PAN bit checking. - */ -alternative_if_not ARM64_HAS_PAN - nop -alternative_else - b 2f // skip TTBR0 PAN -alternative_endif - - .if \el != 0 - tbnz x22, #_PSR_PAN_BIT, 1f // Skip re-enabling TTBR0 access if previously disabled - .endif - - uaccess_ttbr0_enable x0 - - .if \el == 0 - /* - * Enable errata workarounds only if returning to user. The only - * workaround currently required for TTBR0_EL1 changes are for the - * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache - * corruption). - */ - post_ttbr0_update_workaround - .endif -1: - .if \el != 0 - and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit - .endif -2: -#endif - - .if \el == 0 ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 #ifdef CONFIG_ARM64_ERRATUM_845719 @@ -231,7 +165,6 @@ alternative_else alternative_endif #endif .endif - msr elr_el1, x21 // set up the return data msr spsr_el1, x22 ldp x0, x1, [sp, #16 * 0] diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 0153c0d8ddb1..e8afbfd313bc 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -346,15 +346,6 @@ void __init setup_arch(char **cmdline_p) smp_init_cpus(); smp_build_mpidr_hash(); -#ifdef CONFIG_ARM64_SW_TTBR0_PAN - /* - * Make sure init_thread_info.ttbr0 always generates translation - * faults in case uaccess_enable() is inadvertently called by the init - * thread. - */ - init_thread_info.ttbr0 = virt_to_phys(empty_zero_page); -#endif - #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c index 25128089c386..7275628ba59f 100644 --- a/arch/arm64/mm/context.c +++ b/arch/arm64/mm/context.c @@ -182,12 +182,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu) raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); switch_mm_fastpath: - /* - * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when - * emulating PAN. - */ - if (!system_uses_ttbr0_pan()) - cpu_switch_mm(mm->pgd, mm); + cpu_switch_mm(mm->pgd, mm); } static int asids_init(void) -- 2.34.1