MIPS: Optimize TLB refill for RI/XI configurations.
authorDavid Daney <ddaney@caviumnetworks.com>
Thu, 23 Aug 2012 17:02:03 +0000 (10:02 -0700)
committerRalf Baechle <ralf@linux-mips.org>
Thu, 11 Oct 2012 09:11:20 +0000 (11:11 +0200)
We don't have to do a separate shift to eliminate the software bits,
just rotate them into the fill and they will be ignored.

Signed-off-by: David Daney <ddaney@caviumnetworks.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/4294/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/mm/tlbex.c

index e09d49256908968edf6d3b018edea0f2a857b84d..658a520364ce76979191defd64c56a1da88899fb 100644 (file)
@@ -599,8 +599,7 @@ static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
                                                                  unsigned int reg)
 {
        if (cpu_has_rixi) {
-               UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
-               UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+               UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
        } else {
 #ifdef CONFIG_64BIT_PHYS_ADDR
                uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -1019,11 +1018,9 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
                uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
                uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
                if (cpu_has_rixi) {
-                       UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
-                       UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
-                       UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+                       UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
                        UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
-                       UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+                       UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
                } else {
                        uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
                        UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
@@ -1046,13 +1043,11 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
        if (r45k_bvahwbug())
                build_tlb_probe_entry(p);
        if (cpu_has_rixi) {
-               UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
-               UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
-               UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+               UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
                if (r4k_250MHZhwbug())
                        UASM_i_MTC0(p, 0, C0_ENTRYLO0);
                UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
-               UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+               UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
        } else {
                UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
                if (r4k_250MHZhwbug())
@@ -1212,13 +1207,9 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
                UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
        }
        if (cpu_has_rixi) {
-               uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_NO_EXEC));
-               uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_NO_EXEC));
-               uasm_i_drotr(p, even, even,
-                            ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+               uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
                UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
-               uasm_i_drotr(p, odd, odd,
-                            ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+               uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
        } else {
                uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
                UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */