From: Alex Shi Date: Tue, 20 Sep 2016 02:17:00 +0000 (+0800) Subject: Merge remote-tracking branch 'lts/linux-4.4.y' into linux-linaro-lsk-v4.4 X-Git-Tag: firefly_0821_release~176^2~4^2~31 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=d2d693d1ba7d93ec7c5db8aca2da29a4c91f6782;p=firefly-linux-kernel-4.4.55.git Merge remote-tracking branch 'lts/linux-4.4.y' into linux-linaro-lsk-v4.4 Conflicts: set ARM64_WORKAROUND_CAVIUM_27456 to 12 in arch/arm64/include/asm/cpufeature.h and add asm/memory.h in arch/arm64/kernel/entry.S --- d2d693d1ba7d93ec7c5db8aca2da29a4c91f6782 diff --cc arch/arm64/include/asm/cpufeature.h index 37a53fc6b384,8136afc9df0d..876fe0622204 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@@ -30,11 -30,9 +30,13 @@@ #define ARM64_HAS_LSE_ATOMICS 5 #define ARM64_WORKAROUND_CAVIUM_23154 6 #define ARM64_WORKAROUND_834220 7 -#define ARM64_WORKAROUND_CAVIUM_27456 8 +#define ARM64_HAS_NO_HW_PREFETCH 8 +#define ARM64_HAS_UAO 9 +#define ARM64_ALT_PAN_NOT_UAO 10 + +#define ARM64_NCAPS 11 ++#define ARM64_WORKAROUND_CAVIUM_27456 12 + -#define ARM64_NCAPS 9 #ifndef __ASSEMBLY__ diff --cc arch/arm64/kernel/entry.S index 1f7f5a2b61bf,5a3753d09e20..588c8e1778d4 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@@ -27,7 -27,7 +27,8 @@@ #include #include #include +#include + #include #include #include @@@ -89,15 -89,18 +90,21 @@@ .if \el == 0 mrs x21, sp_el0 - get_thread_info tsk // Ensure MDSCR_EL1.SS is clear, + mov tsk, sp + and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear, ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug disable_step_tsk x19, x20 // exceptions when scheduling. + + mov x29, xzr // fp pointed to user-space .else add x21, sp, #S_FRAME_SIZE - .endif + get_thread_info tsk + /* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */ + ldr x20, [tsk, #TI_ADDR_LIMIT] + str x20, [sp, #S_ORIG_ADDR_LIMIT] + mov x20, #TASK_SIZE_64 + str x20, [tsk, #TI_ADDR_LIMIT] + .endif /* \el == 0 */ mrs x22, elr_el1 mrs x23, spsr_el1 stp lr, x21, [sp, #S_LR] diff --cc arch/arm64/mm/mmu.c index cd4177a1781d,653735a8c58a..8fc302d84e1f --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@@ -775,73 -679,21 +775,73 @@@ void *__init __fixmap_remap_fdt(phys_ad dt_virt = (void *)dt_virt_base + offset; /* map the first chunk so we can read the size from the header */ - create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, - SWAPPER_BLOCK_SIZE, prot); + create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), + dt_virt_base, SWAPPER_BLOCK_SIZE, prot); - if (fdt_check_header(dt_virt) != 0) + if (fdt_magic(dt_virt) != FDT_MAGIC) return NULL; - size = fdt_totalsize(dt_virt); - if (size > MAX_FDT_SIZE) + *size = fdt_totalsize(dt_virt); + if (*size > MAX_FDT_SIZE) return NULL; - if (offset + size > SWAPPER_BLOCK_SIZE) - create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, - round_up(offset + size, SWAPPER_BLOCK_SIZE), prot); + if (offset + *size > SWAPPER_BLOCK_SIZE) + create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, + round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); - memblock_reserve(dt_phys, size); + return dt_virt; +} + +void *__init fixmap_remap_fdt(phys_addr_t dt_phys) +{ + void *dt_virt; + int size; + + dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); + if (!dt_virt) + return NULL; + memblock_reserve(dt_phys, size); return dt_virt; } + +int __init arch_ioremap_pud_supported(void) +{ + /* only 4k granule supports level 1 block mappings */ + return IS_ENABLED(CONFIG_ARM64_4K_PAGES); +} + +int __init arch_ioremap_pmd_supported(void) +{ + return 1; +} + +int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) +{ + BUG_ON(phys & ~PUD_MASK); + set_pud(pud, __pud(phys | PUD_TYPE_SECT | pgprot_val(mk_sect_prot(prot)))); + return 1; +} + +int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) +{ + BUG_ON(phys & ~PMD_MASK); + set_pmd(pmd, __pmd(phys | PMD_TYPE_SECT | pgprot_val(mk_sect_prot(prot)))); + return 1; +} + +int pud_clear_huge(pud_t *pud) +{ + if (!pud_sect(*pud)) + return 0; + pud_clear(pud); + return 1; +} + +int pmd_clear_huge(pmd_t *pmd) +{ + if (!pmd_sect(*pmd)) + return 0; + pmd_clear(pmd); + return 1; +} diff --cc arch/arm64/mm/proc.S index 0c19534a901e,18201e9e8cc7..a92738e8b1eb --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@@ -137,36 -139,20 +139,46 @@@ ENTRY(cpu_do_switch_mm bfi x0, x1, #48, #16 // set the ASID msr ttbr0_el1, x0 // set TTBR0 isb + alternative_if_not ARM64_WORKAROUND_CAVIUM_27456 ret + nop + nop + nop + alternative_else + ic iallu + dsb nsh + isb + ret + alternative_endif ENDPROC(cpu_do_switch_mm) - .section ".text.init", #alloc, #execinstr + .pushsection ".idmap.text", "ax" +/* + * void idmap_cpu_replace_ttbr1(phys_addr_t new_pgd) + * + * This is the low-level counterpart to cpu_replace_ttbr1, and should not be + * called by anything else. It can only be executed from a TTBR0 mapping. + */ +ENTRY(idmap_cpu_replace_ttbr1) + mrs x2, daif + msr daifset, #0xf + + adrp x1, empty_zero_page + msr ttbr1_el1, x1 + isb + + tlbi vmalle1 + dsb nsh + isb + + msr ttbr1_el1, x0 + isb + + msr daif, x2 + + ret +ENDPROC(idmap_cpu_replace_ttbr1) + .popsection /* * __cpu_setup