ARM: 5757/1: Thumb-2: Correct "mov.w pc, lr" instruction which is unpredictable
authorCatalin Marinas <catalin.marinas@arm.com>
Mon, 12 Oct 2009 16:31:20 +0000 (17:31 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Wed, 14 Oct 2009 09:33:05 +0000 (10:33 +0100)
The 32-bit wide variant of "mov pc, reg" in Thumb-2 is unpredictable
causing improper handling of the undefined instructions not caught by
the kernel. This patch adds a movw_pc macro for such situations
(currently only used in call_fpe).

Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-header.S

index 322410be573ca027bd8e03da67e134492423974d..0022b4d57f8b8c9e736df3e896ce1a06b4835aba 100644 (file)
@@ -608,33 +608,33 @@ call_fpe:
  THUMB(        add     pc, r8                  )
        nop
 
-       W(mov)  pc, lr                          @ CP#0
+       movw_pc lr                              @ CP#0
        W(b)    do_fpe                          @ CP#1 (FPE)
        W(b)    do_fpe                          @ CP#2 (FPE)
-       W(mov)  pc, lr                          @ CP#3
+       movw_pc lr                              @ CP#3
 #ifdef CONFIG_CRUNCH
        b       crunch_task_enable              @ CP#4 (MaverickCrunch)
        b       crunch_task_enable              @ CP#5 (MaverickCrunch)
        b       crunch_task_enable              @ CP#6 (MaverickCrunch)
 #else
-       W(mov)  pc, lr                          @ CP#4
-       W(mov)  pc, lr                          @ CP#5
-       W(mov)  pc, lr                          @ CP#6
+       movw_pc lr                              @ CP#4
+       movw_pc lr                              @ CP#5
+       movw_pc lr                              @ CP#6
 #endif
-       W(mov)  pc, lr                          @ CP#7
-       W(mov)  pc, lr                          @ CP#8
-       W(mov)  pc, lr                          @ CP#9
+       movw_pc lr                              @ CP#7
+       movw_pc lr                              @ CP#8
+       movw_pc lr                              @ CP#9
 #ifdef CONFIG_VFP
        W(b)    do_vfp                          @ CP#10 (VFP)
        W(b)    do_vfp                          @ CP#11 (VFP)
 #else
-       W(mov)  pc, lr                          @ CP#10 (VFP)
-       W(mov)  pc, lr                          @ CP#11 (VFP)
+       movw_pc lr                              @ CP#10 (VFP)
+       movw_pc lr                              @ CP#11 (VFP)
 #endif
-       W(mov)  pc, lr                          @ CP#12
-       W(mov)  pc, lr                          @ CP#13
-       W(mov)  pc, lr                          @ CP#14 (Debug)
-       W(mov)  pc, lr                          @ CP#15 (Control)
+       movw_pc lr                              @ CP#12
+       movw_pc lr                              @ CP#13
+       movw_pc lr                              @ CP#14 (Debug)
+       movw_pc lr                              @ CP#15 (Control)
 
 #ifdef CONFIG_NEON
        .align  6
index ac34c0d9384b000c39eec3f63763756f086ee8a7..7e9ed1eea40a63d3a72e898b46278ef38911c741 100644 (file)
        mov     \rd, sp, lsr #13
        mov     \rd, \rd, lsl #13
        .endm
+
+       @
+       @ 32-bit wide "mov pc, reg"
+       @
+       .macro  movw_pc, reg
+       mov     pc, \reg
+       .endm
 #else  /* CONFIG_THUMB2_KERNEL */
        .macro  svc_exit, rpsr
        clrex                                   @ clear the exclusive monitor
        lsr     \rd, \rd, #13
        mov     \rd, \rd, lsl #13
        .endm
+
+       @
+       @ 32-bit wide "mov pc, reg"
+       @
+       .macro  movw_pc, reg
+       mov     pc, \reg
+       nop
+       .endm
 #endif /* !CONFIG_THUMB2_KERNEL */
 
 /*