Merge branch 'linux-linaro-lsk-v4.4' into linux-linaro-lsk-v4.4-android
authorAlex Shi <alex.shi@linaro.org>
Fri, 31 Mar 2017 01:07:26 +0000 (09:07 +0800)
committerAlex Shi <alex.shi@linaro.org>
Fri, 31 Mar 2017 01:07:26 +0000 (09:07 +0800)
12 files changed:
arch/arm64/Makefile
arch/arm64/include/asm/spinlock.h
arch/arm64/kernel/hibernate.c
arch/arm64/kernel/stacktrace.c
arch/arm64/kernel/traps.c
arch/arm64/mm/hugetlbpage.c
arch/arm64/mm/proc.S
arch/s390/Kconfig
arch/s390/mm/init.c
drivers/base/power/opp/core.c
drivers/hwtracing/coresight/coresight-tmc.c
kernel/events/core.c

index 101632379b8b054dd5186616c9dba550511f6767..26ce8c9a473c3402b01e7b234661de31f27c4374 100644 (file)
@@ -61,7 +61,9 @@ head-y                := arch/arm64/kernel/head.o
 
 # The byte offset of the kernel image in RAM from the start of RAM.
 ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
-TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
+TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
+                int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
+                rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
 else
 TEXT_OFFSET := 0x00080000
 endif
index 43a66881fd57c1b753639aee87958a6b347d4675..73f5d548bba1fb19678ad25bd02d503ca595c85c 100644 (file)
@@ -31,6 +31,12 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
        unsigned int tmp;
        arch_spinlock_t lockval;
 
+       /*
+        * Ensure prior spin_lock operations to other locks have completed
+        * on this CPU before we test whether "lock" is locked.
+        */
+       smp_mb();
+
        asm volatile(
 "      sevl\n"
 "1:    wfe\n"
@@ -152,6 +158,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
+       smp_mb(); /* See arch_spin_unlock_wait */
        return !arch_spin_value_unlocked(READ_ONCE(*lock));
 }
 
index f8df75d740f4891a018aa84e02cb7c2988f3ff84..6dd18140ebb81d1349b718ba84cd94abcef7373f 100644 (file)
@@ -34,6 +34,7 @@
 #include <asm/pgtable-hwdef.h>
 #include <asm/sections.h>
 #include <asm/suspend.h>
+#include <asm/sysreg.h>
 #include <asm/virt.h>
 
 /*
@@ -216,12 +217,22 @@ static int create_safe_exec_page(void *src_start, size_t length,
        set_pte(pte, __pte(virt_to_phys((void *)dst) |
                         pgprot_val(PAGE_KERNEL_EXEC)));
 
-       /* Load our new page tables */
-       asm volatile("msr       ttbr0_el1, %0;"
-                    "isb;"
-                    "tlbi      vmalle1is;"
-                    "dsb       ish;"
-                    "isb" : : "r"(virt_to_phys(pgd)));
+       /*
+        * Load our new page tables. A strict BBM approach requires that we
+        * ensure that TLBs are free of any entries that may overlap with the
+        * global mappings we are about to install.
+        *
+        * For a real hibernate/resume cycle TTBR0 currently points to a zero
+        * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
+        * runtime services), while for a userspace-driven test_resume cycle it
+        * points to userspace page tables (and we must point it at a zero page
+        * ourselves). Elsewhere we only (un)install the idmap with preemption
+        * disabled, so T0SZ should be as required regardless.
+        */
+       cpu_set_reserved_ttbr0();
+       local_flush_tlb_all();
+       write_sysreg(virt_to_phys(pgd), ttbr0_el1);
+       isb();
 
        *phys_dst_addr = virt_to_phys((void *)dst);
 
@@ -387,6 +398,38 @@ int swsusp_arch_resume(void)
        void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
                                          void *, phys_addr_t, phys_addr_t);
 
+       /*
+        * Restoring the memory image will overwrite the ttbr1 page tables.
+        * Create a second copy of just the linear map, and use this when
+        * restoring.
+        */
+       tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+       if (!tmp_pg_dir) {
+               pr_err("Failed to allocate memory for temporary page tables.");
+               rc = -ENOMEM;
+               goto out;
+       }
+       rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+       if (rc)
+               goto out;
+
+       /*
+        * Since we only copied the linear map, we need to find restore_pblist's
+        * linear map address.
+        */
+       lm_restore_pblist = LMADDR(restore_pblist);
+
+       /*
+        * We need a zero page that is zero before & after resume in order to
+        * to break before make on the ttbr1 page tables.
+        */
+       zero_page = (void *)get_safe_page(GFP_ATOMIC);
+       if (!zero_page) {
+               pr_err("Failed to allocate zero page.");
+               rc = -ENOMEM;
+               goto out;
+       }
+
        /*
         * Locate the exit code in the bottom-but-one page, so that *NULL
         * still has disastrous affects.
@@ -412,27 +455,6 @@ int swsusp_arch_resume(void)
         */
        __flush_dcache_area(hibernate_exit, exit_size);
 
-       /*
-        * Restoring the memory image will overwrite the ttbr1 page tables.
-        * Create a second copy of just the linear map, and use this when
-        * restoring.
-        */
-       tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
-       if (!tmp_pg_dir) {
-               pr_err("Failed to allocate memory for temporary page tables.");
-               rc = -ENOMEM;
-               goto out;
-       }
-       rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
-       if (rc)
-               goto out;
-
-       /*
-        * Since we only copied the linear map, we need to find restore_pblist's
-        * linear map address.
-        */
-       lm_restore_pblist = LMADDR(restore_pblist);
-
        /*
         * KASLR will cause the el2 vectors to be in a different location in
         * the resumed kernel. Load hibernate's temporary copy into el2.
@@ -447,12 +469,6 @@ int swsusp_arch_resume(void)
                __hyp_set_vectors(el2_vectors);
        }
 
-       /*
-        * We need a zero page that is zero before & after resume in order to
-        * to break before make on the ttbr1 page tables.
-        */
-       zero_page = (void *)get_safe_page(GFP_ATOMIC);
-
        hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
                       resume_hdr.reenter_kernel, lm_restore_pblist,
                       resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
index cfd46c227c8cbd7c57c9b88c1d6189404f0bdcfa..a99eff9afc1f2ecb815d2b70763e58456d980723 100644 (file)
@@ -43,6 +43,9 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
        unsigned long fp = frame->fp;
        unsigned long irq_stack_ptr;
 
+       if (!tsk)
+               tsk = current;
+
        /*
         * Switching between stacks is valid when tracing current and in
         * non-preemptible context.
@@ -67,7 +70,7 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
        frame->pc = *(unsigned long *)(fp + 8);
 
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       if (tsk && tsk->ret_stack &&
+       if (tsk->ret_stack &&
                        (frame->pc == (unsigned long)return_to_handler)) {
                /*
                 * This is a case where function graph tracer has
index f5c82c76cf7cbd0983751dcb671788ad2c11ad9c..a1cfcaa562a22a5fa0181f6c57a4cbea68bf7da1 100644 (file)
@@ -149,6 +149,11 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
        unsigned long irq_stack_ptr;
        int skip;
 
+       pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+
+       if (!tsk)
+               tsk = current;
+
        /*
         * Switching between stacks is valid when tracing current and in
         * non-preemptible context.
@@ -158,11 +163,6 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
        else
                irq_stack_ptr = 0;
 
-       pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
-
-       if (!tsk)
-               tsk = current;
-
        if (tsk == current) {
                frame.fp = (unsigned long)__builtin_frame_address(0);
                frame.sp = current_stack_pointer;
index da30529bb1f65c9e3d5408b2e28ab31bc2283211..019f13637fae50a5df5c2b59947c33a2cc9ad9a5 100644 (file)
@@ -51,20 +51,8 @@ static int find_num_contig(struct mm_struct *mm, unsigned long addr,
        *pgsize = PAGE_SIZE;
        if (!pte_cont(pte))
                return 1;
-       if (!pgd_present(*pgd)) {
-               VM_BUG_ON(!pgd_present(*pgd));
-               return 1;
-       }
        pud = pud_offset(pgd, addr);
-       if (!pud_present(*pud)) {
-               VM_BUG_ON(!pud_present(*pud));
-               return 1;
-       }
        pmd = pmd_offset(pud, addr);
-       if (!pmd_present(*pmd)) {
-               VM_BUG_ON(!pmd_present(*pmd));
-               return 1;
-       }
        if ((pte_t *)pmd == ptep) {
                *pgsize = PMD_SIZE;
                return CONT_PMDS;
@@ -212,7 +200,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
                ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
                /* save the 1st pte to return */
                pte = ptep_get_and_clear(mm, addr, cpte);
-               for (i = 1; i < ncontig; ++i) {
+               for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
                        /*
                         * If HW_AFDBM is enabled, then the HW could
                         * turn on the dirty bit for any of the page
@@ -250,8 +238,8 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
                pfn = pte_pfn(*cpte);
                ncontig = find_num_contig(vma->vm_mm, addr, cpte,
                                          *cpte, &pgsize);
-               for (i = 0; i < ncontig; ++i, ++cpte) {
-                       changed = ptep_set_access_flags(vma, addr, cpte,
+               for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
+                       changed |= ptep_set_access_flags(vma, addr, cpte,
                                                        pfn_pte(pfn,
                                                                hugeprot),
                                                        dirty);
@@ -273,7 +261,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
 
                cpte = huge_pte_offset(mm, addr);
                ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
-               for (i = 0; i < ncontig; ++i, ++cpte)
+               for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
                        ptep_set_wrprotect(mm, addr, cpte);
        } else {
                ptep_set_wrprotect(mm, addr, ptep);
@@ -291,7 +279,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
                cpte = huge_pte_offset(vma->vm_mm, addr);
                ncontig = find_num_contig(vma->vm_mm, addr, cpte,
                                          *cpte, &pgsize);
-               for (i = 0; i < ncontig; ++i, ++cpte)
+               for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
                        ptep_clear_flush(vma, addr, cpte);
        } else {
                ptep_clear_flush(vma, addr, ptep);
index 8292784d44c95508c50be40b201454fafc488d2e..d88a2a80ada88c081915b6d40269c6e3a0914b45 100644 (file)
@@ -100,7 +100,16 @@ ENTRY(cpu_do_resume)
 
        msr     tcr_el1, x8
        msr     vbar_el1, x9
+
+       /*
+        * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
+        * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
+        * exception. Mask them until local_dbg_restore() in cpu_suspend()
+        * resets them.
+        */
+       disable_dbg
        msr     mdscr_el1, x10
+
        msr     sctlr_el1, x12
        /*
         * Restore oslsr_el1 by writing oslar_el1
index 60530fd93d6d11368731fedfe145afb6826c7e59..06176328f83d61415ef509a2d97bb8abdc8e1959 100644 (file)
@@ -62,6 +62,9 @@ config PCI_QUIRKS
 config ARCH_SUPPORTS_UPROBES
        def_bool y
 
+config DEBUG_RODATA
+       def_bool y
+
 config S390
        def_bool y
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
index feff9caf89b5cf81b26bcf2e82d9f71b2ca890e2..91376d9e42861190f1329b8a90da8b25ba1a89be 100644 (file)
@@ -109,6 +109,13 @@ void __init paging_init(void)
        free_area_init_nodes(max_zone_pfns);
 }
 
+void mark_rodata_ro(void)
+{
+       /* Text and rodata are already protected. Nothing to do here. */
+       pr_info("Write protecting the kernel read-only data: %luk\n",
+               ((unsigned long)&_eshared - (unsigned long)&_stext) >> 10);
+}
+
 void __init mem_init(void)
 {
        if (MACHINE_HAS_TLB_LC)
@@ -127,9 +134,6 @@ void __init mem_init(void)
        setup_zero_pages();     /* Setup zeroed pages. */
 
        mem_init_print_info(NULL);
-       printk("Write protected kernel read-only data: %#lx - %#lx\n",
-              (unsigned long)&_stext,
-              PFN_ALIGN((unsigned long)&_eshared) - 1);
 }
 
 void free_initmem(void)
index 433b60092972d56abba55897158d6c22156cf631..d8f4cc22856c924b1be7bf1aa97f175b6579c554 100644 (file)
@@ -259,9 +259,6 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev)
        reg = opp_table->regulator;
        if (IS_ERR(reg)) {
                /* Regulator may not be required for device */
-               if (reg)
-                       dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
-                               PTR_ERR(reg));
                rcu_read_unlock();
                return 0;
        }
index 9e02ac963cd0e3fbb38f9ee5e3fd082719f38bf7..3978cbb6b038c814ff839f084b157560ae1e68dc 100644 (file)
@@ -388,9 +388,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
 err_misc_register:
        coresight_unregister(drvdata->csdev);
 err_devm_kzalloc:
-       if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
-               dma_free_coherent(dev, drvdata->size,
-                               drvdata->vaddr, drvdata->paddr);
        return ret;
 }
 
index fe9420e421b0e1082e32c17690560e2882cd1fcf..19f34aa0eb68a73f8482328fa9fea85abb84a482 100644 (file)
@@ -5823,7 +5823,7 @@ static int __perf_pmu_output_stop(void *info)
 {
        struct perf_event *event = info;
        struct pmu *pmu = event->pmu;
-       struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+       struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
        struct remote_output ro = {
                .rb     = event->rb,
        };