# The byte offset of the kernel image in RAM from the start of RAM.
ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y)
-TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%03x000\n", int(512 * rand())}')
+TEXT_OFFSET := $(shell awk "BEGIN {srand(); printf \"0x%06x\n\", \
+ int(2 * 1024 * 1024 / (2 ^ $(CONFIG_ARM64_PAGE_SHIFT)) * \
+ rand()) * (2 ^ $(CONFIG_ARM64_PAGE_SHIFT))}")
else
TEXT_OFFSET := 0x00080000
endif
unsigned int tmp;
arch_spinlock_t lockval;
+ /*
+ * Ensure prior spin_lock operations to other locks have completed
+ * on this CPU before we test whether "lock" is locked.
+ */
+ smp_mb();
+
asm volatile(
" sevl\n"
"1: wfe\n"
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
+ smp_mb(); /* See arch_spin_unlock_wait */
return !arch_spin_value_unlocked(READ_ONCE(*lock));
}
#include <asm/pgtable-hwdef.h>
#include <asm/sections.h>
#include <asm/suspend.h>
+#include <asm/sysreg.h>
#include <asm/virt.h>
/*
set_pte(pte, __pte(virt_to_phys((void *)dst) |
pgprot_val(PAGE_KERNEL_EXEC)));
- /* Load our new page tables */
- asm volatile("msr ttbr0_el1, %0;"
- "isb;"
- "tlbi vmalle1is;"
- "dsb ish;"
- "isb" : : "r"(virt_to_phys(pgd)));
+ /*
+ * Load our new page tables. A strict BBM approach requires that we
+ * ensure that TLBs are free of any entries that may overlap with the
+ * global mappings we are about to install.
+ *
+ * For a real hibernate/resume cycle TTBR0 currently points to a zero
+ * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
+ * runtime services), while for a userspace-driven test_resume cycle it
+ * points to userspace page tables (and we must point it at a zero page
+ * ourselves). Elsewhere we only (un)install the idmap with preemption
+ * disabled, so T0SZ should be as required regardless.
+ */
+ cpu_set_reserved_ttbr0();
+ local_flush_tlb_all();
+ write_sysreg(virt_to_phys(pgd), ttbr0_el1);
+ isb();
*phys_dst_addr = virt_to_phys((void *)dst);
void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
void *, phys_addr_t, phys_addr_t);
+ /*
+ * Restoring the memory image will overwrite the ttbr1 page tables.
+ * Create a second copy of just the linear map, and use this when
+ * restoring.
+ */
+ tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
+ if (!tmp_pg_dir) {
+ pr_err("Failed to allocate memory for temporary page tables.");
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
+ if (rc)
+ goto out;
+
+ /*
+ * Since we only copied the linear map, we need to find restore_pblist's
+ * linear map address.
+ */
+ lm_restore_pblist = LMADDR(restore_pblist);
+
+ /*
+ * We need a zero page that is zero before & after resume in order to
+ * to break before make on the ttbr1 page tables.
+ */
+ zero_page = (void *)get_safe_page(GFP_ATOMIC);
+ if (!zero_page) {
+ pr_err("Failed to allocate zero page.");
+ rc = -ENOMEM;
+ goto out;
+ }
+
/*
* Locate the exit code in the bottom-but-one page, so that *NULL
* still has disastrous affects.
*/
__flush_dcache_area(hibernate_exit, exit_size);
- /*
- * Restoring the memory image will overwrite the ttbr1 page tables.
- * Create a second copy of just the linear map, and use this when
- * restoring.
- */
- tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
- if (!tmp_pg_dir) {
- pr_err("Failed to allocate memory for temporary page tables.");
- rc = -ENOMEM;
- goto out;
- }
- rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0);
- if (rc)
- goto out;
-
- /*
- * Since we only copied the linear map, we need to find restore_pblist's
- * linear map address.
- */
- lm_restore_pblist = LMADDR(restore_pblist);
-
/*
* KASLR will cause the el2 vectors to be in a different location in
* the resumed kernel. Load hibernate's temporary copy into el2.
__hyp_set_vectors(el2_vectors);
}
- /*
- * We need a zero page that is zero before & after resume in order to
- * to break before make on the ttbr1 page tables.
- */
- zero_page = (void *)get_safe_page(GFP_ATOMIC);
-
hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
resume_hdr.reenter_kernel, lm_restore_pblist,
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
unsigned long fp = frame->fp;
unsigned long irq_stack_ptr;
+ if (!tsk)
+ tsk = current;
+
/*
* Switching between stacks is valid when tracing current and in
* non-preemptible context.
frame->pc = *(unsigned long *)(fp + 8);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- if (tsk && tsk->ret_stack &&
+ if (tsk->ret_stack &&
(frame->pc == (unsigned long)return_to_handler)) {
/*
* This is a case where function graph tracer has
unsigned long irq_stack_ptr;
int skip;
+ pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+
+ if (!tsk)
+ tsk = current;
+
/*
* Switching between stacks is valid when tracing current and in
* non-preemptible context.
else
irq_stack_ptr = 0;
- pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
-
- if (!tsk)
- tsk = current;
-
if (tsk == current) {
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
*pgsize = PAGE_SIZE;
if (!pte_cont(pte))
return 1;
- if (!pgd_present(*pgd)) {
- VM_BUG_ON(!pgd_present(*pgd));
- return 1;
- }
pud = pud_offset(pgd, addr);
- if (!pud_present(*pud)) {
- VM_BUG_ON(!pud_present(*pud));
- return 1;
- }
pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd)) {
- VM_BUG_ON(!pmd_present(*pmd));
- return 1;
- }
if ((pte_t *)pmd == ptep) {
*pgsize = PMD_SIZE;
return CONT_PMDS;
ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
/* save the 1st pte to return */
pte = ptep_get_and_clear(mm, addr, cpte);
- for (i = 1; i < ncontig; ++i) {
+ for (i = 1, addr += pgsize; i < ncontig; ++i, addr += pgsize) {
/*
* If HW_AFDBM is enabled, then the HW could
* turn on the dirty bit for any of the page
pfn = pte_pfn(*cpte);
ncontig = find_num_contig(vma->vm_mm, addr, cpte,
*cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte) {
- changed = ptep_set_access_flags(vma, addr, cpte,
+ for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
+ changed |= ptep_set_access_flags(vma, addr, cpte,
pfn_pte(pfn,
hugeprot),
dirty);
cpte = huge_pte_offset(mm, addr);
ncontig = find_num_contig(mm, addr, cpte, *cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte)
+ for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
ptep_set_wrprotect(mm, addr, cpte);
} else {
ptep_set_wrprotect(mm, addr, ptep);
cpte = huge_pte_offset(vma->vm_mm, addr);
ncontig = find_num_contig(vma->vm_mm, addr, cpte,
*cpte, &pgsize);
- for (i = 0; i < ncontig; ++i, ++cpte)
+ for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize)
ptep_clear_flush(vma, addr, cpte);
} else {
ptep_clear_flush(vma, addr, ptep);
msr tcr_el1, x8
msr vbar_el1, x9
+
+ /*
+ * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking
+ * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug
+ * exception. Mask them until local_dbg_restore() in cpu_suspend()
+ * resets them.
+ */
+ disable_dbg
msr mdscr_el1, x10
+
msr sctlr_el1, x12
/*
* Restore oslsr_el1 by writing oslar_el1
config ARCH_SUPPORTS_UPROBES
def_bool y
+config DEBUG_RODATA
+ def_bool y
+
config S390
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
free_area_init_nodes(max_zone_pfns);
}
+void mark_rodata_ro(void)
+{
+ /* Text and rodata are already protected. Nothing to do here. */
+ pr_info("Write protecting the kernel read-only data: %luk\n",
+ ((unsigned long)&_eshared - (unsigned long)&_stext) >> 10);
+}
+
void __init mem_init(void)
{
if (MACHINE_HAS_TLB_LC)
setup_zero_pages(); /* Setup zeroed pages. */
mem_init_print_info(NULL);
- printk("Write protected kernel read-only data: %#lx - %#lx\n",
- (unsigned long)&_stext,
- PFN_ALIGN((unsigned long)&_eshared) - 1);
}
void free_initmem(void)
reg = opp_table->regulator;
if (IS_ERR(reg)) {
/* Regulator may not be required for device */
- if (reg)
- dev_err(dev, "%s: Invalid regulator (%ld)\n", __func__,
- PTR_ERR(reg));
rcu_read_unlock();
return 0;
}
err_misc_register:
coresight_unregister(drvdata->csdev);
err_devm_kzalloc:
- if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
- dma_free_coherent(dev, drvdata->size,
- drvdata->vaddr, drvdata->paddr);
return ret;
}
{
struct perf_event *event = info;
struct pmu *pmu = event->pmu;
- struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
+ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
struct remote_output ro = {
.rb = event->rb,
};