arm64: mm: ensure patched kernel text is fetched from PoU
authorWill Deacon <will.deacon@arm.com>
Tue, 4 Aug 2015 16:49:36 +0000 (17:49 +0100)
committerWill Deacon <will.deacon@arm.com>
Wed, 5 Aug 2015 09:05:20 +0000 (10:05 +0100)
The arm64 booting document requires that the bootloader has cleaned the
kernel image to the PoC. However, when a CPU re-enters the kernel due to
either a CPU hotplug "on" event or resuming from a low-power state (e.g.
cpuidle), the kernel text may in-fact be dirty at the PoU due to things
like alternative patching or even module loading.

Thanks to I-cache speculation with the MMU off, stale instructions could
be fetched prior to enabling the MMU, potentially leading to crashes
when executing regions of code that have been modified at runtime.

This patch addresses the issue by ensuring that the local I-cache is
invalidated immediately after a CPU has enabled its MMU but before
jumping out of the identity mapping. Any stale instructions fetched from
the PoC will then be discarded and refetched correctly from the PoU.
Patching kernel text executed prior to the MMU being enabled is
prohibited, so the early entry code will always be clean.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Tested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
arch/arm64/kernel/head.S
arch/arm64/kernel/sleep.S
arch/arm64/mm/proc.S

index 3a06541739976d506e90791c538fa5c0f2b8dc07..a055be6125cf592d06e957df0b1125d409c9a5cf 100644 (file)
@@ -634,5 +634,13 @@ __enable_mmu:
        isb
        msr     sctlr_el1, x0
        isb
+       /*
+        * Invalidate the local I-cache so that any instructions fetched
+        * speculatively from the PoC are discarded, since they may have
+        * been dynamically patched at the PoU.
+        */
+       ic      iallu
+       dsb     nsh
+       isb
        br      x27
 ENDPROC(__enable_mmu)
index fb3128ea3a4f5dfe4ab4824a1894c0aa8a01352f..f586f7c875e29295b6094efd4967e26cb6d01a99 100644 (file)
@@ -133,6 +133,14 @@ ENTRY(cpu_resume_mmu)
        ldr     x3, =cpu_resume_after_mmu
        msr     sctlr_el1, x0           // restore sctlr_el1
        isb
+       /*
+        * Invalidate the local I-cache so that any instructions fetched
+        * speculatively from the PoC are discarded, since they may have
+        * been dynamically patched at the PoU.
+        */
+       ic      iallu
+       dsb     nsh
+       isb
        br      x3                      // global jump to virtual address
 ENDPROC(cpu_resume_mmu)
        .popsection
index 34da270f9e344e0493f2903f7c8e5cc885fd92c0..6e8765a2bddd0a17c5adc8b65391a81846665a69 100644 (file)
@@ -146,7 +146,6 @@ ENDPROC(cpu_do_switch_mm)
  *     value of the SCTLR_EL1 register.
  */
 ENTRY(__cpu_setup)
-       ic      iallu                           // I+BTB cache invalidate
        tlbi    vmalle1is                       // invalidate I + D TLBs
        dsb     ish