sh: TLB protection violation exception optimizations.
authorPaul Mundt <lethal@linux-sh.org>
Fri, 14 Aug 2009 17:49:40 +0000 (02:49 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Fri, 14 Aug 2009 17:49:40 +0000 (02:49 +0900)
This adds a bit of rework to have the TLB protection violations skip the
TLB miss fastpath and go directly in to do_page_fault(), as these require
slow path handling.

Based on an earlier patch by SUGIOKA Toshinobu.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/kernel/cpu/sh3/entry.S
arch/sh/mm/fault_32.c

index 3cb531f233f24cd27edcf1f8aedd7d86201c0d5c..bbaf2bd118e748176fa1b6ab9d40a2ad54f846af 100644 (file)
@@ -113,34 +113,33 @@ OFF_TRA   =  (16*4+6*4)
 #if defined(CONFIG_MMU)
        .align  2
 ENTRY(tlb_miss_load)
-       bra     call_dpf
+       bra     call_handle_tlbmiss
         mov    #0, r5
 
        .align  2
 ENTRY(tlb_miss_store)
-       bra     call_dpf
+       bra     call_handle_tlbmiss
         mov    #1, r5
 
        .align  2
 ENTRY(initial_page_write)
-       bra     call_dpf
+       bra     call_handle_tlbmiss
         mov    #1, r5
 
        .align  2
 ENTRY(tlb_protection_violation_load)
-       bra     call_dpf
+       bra     call_do_page_fault
         mov    #0, r5
 
        .align  2
 ENTRY(tlb_protection_violation_store)
-       bra     call_dpf
+       bra     call_do_page_fault
         mov    #1, r5
 
-call_dpf:
+call_handle_tlbmiss:
        mov.l   1f, r0
        mov     r5, r8
        mov.l   @r0, r6
-       mov     r6, r9
        mov.l   2f, r0
        sts     pr, r10
        jsr     @r0
@@ -151,16 +150,25 @@ call_dpf:
         lds    r10, pr
        rts
         nop
-0:     mov.l   3f, r0
-       mov     r9, r6
+0:
        mov     r8, r5
+call_do_page_fault:
+       mov.l   1f, r0
+       mov.l   @r0, r6
+
+       sti
+
+       mov.l   3f, r0
+       mov.l   4f, r1
+       mov     r15, r4
        jmp     @r0
-        mov    r15, r4
+        lds    r1, pr
 
        .align 2
 1:     .long   MMU_TEA
-2:     .long   __do_page_fault
+2:     .long   handle_tlbmiss
 3:     .long   do_page_fault
+4:     .long   ret_from_exception
 
        .align  2
 ENTRY(address_error_load)
index dbbdeba2cee5e2e508c9bcbb485548f7223fa9cf..41840647f65fcd35d0c0c09b105d163887289559 100644 (file)
@@ -318,9 +318,9 @@ do_sigbus:
 /*
  * Called with interrupts disabled.
  */
-asmlinkage int __kprobes __do_page_fault(struct pt_regs *regs,
-                                        unsigned long writeaccess,
-                                        unsigned long address)
+asmlinkage int __kprobes
+handle_tlbmiss(struct pt_regs *regs, unsigned long writeaccess,
+              unsigned long address)
 {
        pgd_t *pgd;
        pud_t *pud;