b 1b
#endif
+__und_fault:
+ @ Correct the PC such that it is pointing at the instruction
+ @ which caused the fault. If the faulting instruction was ARM
+ @ the PC will be pointing at the next instruction, and have to
+ @ subtract 4. Otherwise, it is Thumb, and the PC will be
+ @ pointing at the second half of the Thumb instruction. We
+ @ have to subtract 2.
+ ldr r2, [r0, #S_PC]
+ sub r2, r2, r1
+ str r2, [r0, #S_PC]
+ b do_undefinstr
+ENDPROC(__und_fault)
+
.align 5
__und_svc:
#ifdef CONFIG_KPROBES
@
@ r0 - instruction
@
-#ifndef CONFIG_THUMB2_KERNEL
+#ifndef CONFIG_THUMB2_KERNEL
ldr r0, [r4, #-4]
#else
+ mov r1, #2
ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
- ldrhhs r9, [r4] @ bottom 16 bits
- orrhs r0, r9, r0, lsl #16
+ blo __und_svc_fault
+ ldrh r9, [r4] @ bottom 16 bits
+ add r4, r4, #2
+ str r4, [sp, #S_PC]
+ orr r0, r9, r0, lsl #16
#endif
- adr r9, BSYM(1f)
+ adr r9, BSYM(__und_svc_finish)
mov r2, r4
bl call_fpe
+ mov r1, #4 @ PC correction to apply
+__und_svc_fault:
mov r0, sp @ struct pt_regs *regs
- bl do_undefinstr
+ bl __und_fault
@
@ IRQs off again before pulling preserved data off the stack
@
-1: disable_irq_notrace
+__und_svc_finish:
+ disable_irq_notrace
@
@ restore SPSR and restart the instruction
mov r2, r4
mov r3, r5
+ @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
+ @ faulting instruction depending on Thumb mode.
+ @ r3 = regs->ARM_cpsr
@
- @ fall through to the emulation code, which returns using r9 if
- @ it has emulated the instruction, or the more conventional lr
- @ if we are to treat this as a real undefined instruction
- @
- @ r0 - instruction
+ @ The emulation code returns using r9 if it has emulated the
+ @ instruction, or the more conventional lr if we are to treat
+ @ this as a real undefined instruction
@
adr r9, BSYM(ret_from_exception)
- adr lr, BSYM(__und_usr_unknown)
+
tst r3, #PSR_T_BIT @ Thumb mode?
- itet eq @ explicit IT needed for the 1f label
- subeq r4, r2, #4 @ ARM instr at LR - 4
- subne r4, r2, #2 @ Thumb instr at LR - 2
-1: ldreqt r0, [r4]
+ bne __und_usr_thumb
+ sub r4, r2, #4 @ ARM instr at LR - 4
+1: ldrt r0, [r4]
#ifdef CONFIG_CPU_ENDIAN_BE8
- reveq r0, r0 @ little endian instruction
+ rev r0, r0 @ little endian instruction
#endif
- beq call_fpe
+ @ r0 = 32-bit ARM instruction which caused the exception
+ @ r2 = PC value for the following instruction (:= regs->ARM_pc)
+ @ r4 = PC value for the faulting instruction
+ @ lr = 32-bit undefined instruction function
+ adr lr, BSYM(__und_usr_fault_32)
+ b call_fpe
+
+__und_usr_thumb:
@ Thumb instruction
+ sub r4, r2, #2 @ First half of thumb instr at LR - 2
#if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
/*
* Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
ldr r5, .LCcpu_architecture
ldr r5, [r5]
cmp r5, #CPU_ARCH_ARMv7
- blo __und_usr_unknown
+ blo __und_usr_fault_16 @ 16bit undefined instruction
/*
* The following code won't get run unless the running CPU really is v7, so
* coding round the lack of ldrht on older arches is pointless. Temporarily
*/
.arch armv6t2
#endif
-2:
- ARM( ldrht r5, [r4], #2 )
- THUMB( ldrht r5, [r4] )
- THUMB( add r4, r4, #2 )
+2: ldrht r5, [r4]
cmp r5, #0xe800 @ 32bit instruction if xx != 0
- blo __und_usr_unknown
-3: ldrht r0, [r4]
+ blo __und_usr_fault_16 @ 16bit undefined instruction
+3: ldrht r0, [r2]
add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
+ str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
orr r0, r0, r5, lsl #16
+ adr lr, BSYM(__und_usr_fault_32)
+ @ r0 = the two 16-bit Thumb instructions which caused the exception
+ @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
+ @ r4 = PC value for the first 16-bit Thumb instruction
+ @ lr = 32bit undefined instruction function
#if __LINUX_ARM_ARCH__ < 7
/* If the target arch was overridden, change it back: */
#endif
#endif /* __LINUX_ARM_ARCH__ < 7 */
#else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
- b __und_usr_unknown
+ b __und_usr_fault_16
#endif
- UNWIND(.fnend )
+ UNWIND(.fnend)
ENDPROC(__und_usr)
- @
- @ fallthrough to call_fpe
- @
-
/*
- * The out of line fixup for the ldrt above.
+ * The out of line fixup for the ldrt instructions above.
*/
.pushsection .fixup, "ax"
.align 2
* NEON handler code.
*
* Emulators may wish to make use of the following registers:
- * r0 = instruction opcode.
- * r2 = PC+4
+ * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
+ * r2 = PC value to resume execution after successful emulation
* r9 = normal "successful" return address
- * r10 = this threads thread_info structure.
+ * r10 = this threads thread_info structure
* lr = unrecognised instruction return address
+ * IRQs disabled, FIQs enabled.
*/
@
@ Fall-through from Thumb-2 __und_usr
mov pc, lr
ENDPROC(no_fp)
-__und_usr_unknown:
- enable_irq
+__und_usr_fault_32:
+ mov r1, #4
+ b 1f
+__und_usr_fault_16:
+ mov r1, #2
+1: enable_irq
mov r0, sp
adr lr, BSYM(ret_from_exception)
- b do_undefinstr
-ENDPROC(__und_usr_unknown)
+ b __und_fault
+ENDPROC(__und_usr_fault_32)
+ENDPROC(__und_usr_fault_16)
.align 5
__pabt_usr:
@ VFP hardware support entry point.
@
-@ r0 = faulted instruction
-@ r2 = faulted PC+4
-@ r9 = successful return
+@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
+@ r2 = PC value to resume execution after successful emulation
+@ r9 = normal "successful" return address
@ r10 = vfp_state union
@ r11 = CPU number
-@ lr = failure return
-
+@ lr = unrecognised instruction return address
+@ IRQs enabled.
ENTRY(vfp_support_entry)
DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
@ exception before retrying branch
@ out before setting an FPEXC that
@ stops us reading stuff
- VFPFMXR FPEXC, r1 @ restore FPEXC last
- sub r2, r2, #4
- str r2, [sp, #S_PC] @ retry the instruction
+ VFPFMXR FPEXC, r1 @ Restore FPEXC last
+ sub r2, r2, #4 @ Retry current instruction - if Thumb
+ str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
+ @ else it's one 32-bit instruction, so
+ @ always subtract 4 from the following
+ @ instruction address.
#ifdef CONFIG_PREEMPT
get_thread_info r10
ldr r4, [r10, #TI_PREEMPT] @ get preempt count