More .set push/pop encapsulation, more eyefriendly code formatting.
authorThiemo Seufer <ths@networkno.de>
Thu, 1 Sep 2005 08:59:55 +0000 (08:59 +0000)
committerRalf Baechle <ralf@linux-mips.org>
Sat, 29 Oct 2005 18:32:14 +0000 (19:32 +0100)
Signed-off-by: Thiemo Seufer <ths@networkno.de>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
include/asm-mips/r4kcache.h

index 5bea49feec66c85dc32395804e4c6157eaef6607..a5ea9d828aee4083ba0bef097cd0ea409af6bf66 100644 (file)
@@ -21,7 +21,7 @@
  *
  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
  *    the index bits from the virtual address.  This breaks with tradition
- *    set by the R4000.  To keep unpleassant surprises from happening we pick
+ *    set by the R4000.  To keep unpleasant surprises from happening we pick
  *    an address in KSEG0 / CKSEG0.
  *  - We need a properly sign extended address for 64-bit code.  To get away
  *    without ifdefs we let the compiler do it by a type cast.
 
 #define cache_op(op,addr)                                              \
        __asm__ __volatile__(                                           \
+       "       .set    push                                    \n"     \
        "       .set    noreorder                               \n"     \
        "       .set    mips3\n\t                               \n"     \
        "       cache   %0, %1                                  \n"     \
-       "       .set    mips0                                   \n"     \
-       "       .set    reorder"                                        \
+       "       .set    pop                                     \n"     \
        :                                                               \
        : "i" (op), "m" (*(unsigned char *)(addr)))
 
@@ -84,14 +84,14 @@ static inline void flush_scache_line(unsigned long addr)
 static inline void protected_flush_icache_line(unsigned long addr)
 {
        __asm__ __volatile__(
-               ".set noreorder\n\t"
-               ".set mips3\n"
-               "1:\tcache %0,(%1)\n"
-               "2:\t.set mips0\n\t"
-               ".set reorder\n\t"
-               ".section\t__ex_table,\"a\"\n\t"
-               STR(PTR)"\t1b,2b\n\t"
-               ".previous"
+               "       .set    push                    \n"
+               "       .set    noreorder               \n"
+               "       .set    mips3                   \n"
+               "1:     cache   %0, (%1)                \n"
+               "2:     .set    pop                     \n"
+               "       .section __ex_table,\"a\"       \n"
+               "       "STR(PTR)" 1b, 2b               \n"
+               "       .previous"
                :
                : "i" (Hit_Invalidate_I), "r" (addr));
 }
@@ -100,19 +100,19 @@ static inline void protected_flush_icache_line(unsigned long addr)
  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
  * caches.  We're talking about one cacheline unnecessarily getting invalidated
- * here so the penaltiy isn't overly hard.
+ * here so the penalty isn't overly hard.
  */
 static inline void protected_writeback_dcache_line(unsigned long addr)
 {
        __asm__ __volatile__(
-               ".set noreorder\n\t"
-               ".set mips3\n"
-               "1:\tcache %0,(%1)\n"
-               "2:\t.set mips0\n\t"
-               ".set reorder\n\t"
-               ".section\t__ex_table,\"a\"\n\t"
-               STR(PTR)"\t1b,2b\n\t"
-               ".previous"
+               "       .set    push                    \n"
+               "       .set    noreorder               \n"
+               "       .set    mips3                   \n"
+               "1:     cache   %0, (%1)                \n"
+               "2:     .set    pop                     \n"
+               "       .section __ex_table,\"a\"       \n"
+               "       "STR(PTR)" 1b, 2b               \n"
+               "       .previous"
                :
                : "i" (Hit_Writeback_Inv_D), "r" (addr));
 }
@@ -120,14 +120,14 @@ static inline void protected_writeback_dcache_line(unsigned long addr)
 static inline void protected_writeback_scache_line(unsigned long addr)
 {
        __asm__ __volatile__(
-               ".set noreorder\n\t"
-               ".set mips3\n"
-               "1:\tcache %0,(%1)\n"
-               "2:\t.set mips0\n\t"
-               ".set reorder\n\t"
-               ".section\t__ex_table,\"a\"\n\t"
-               STR(PTR)"\t1b,2b\n\t"
-               ".previous"
+               "       .set    push                    \n"
+               "       .set    noreorder               \n"
+               "       .set    mips3                   \n"
+               "1:     cache   %0, (%1)                \n"
+               "2:     .set    pop                     \n"
+               "       .section __ex_table,\"a\"       \n"
+               "       "STR(PTR)" 1b, 2b               \n"
+               "       .previous"
                :
                : "i" (Hit_Writeback_Inv_SD), "r" (addr));
 }
@@ -142,6 +142,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
 
 #define cache16_unroll32(base,op)                                      \
        __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
        "       .set noreorder                                  \n"     \
        "       .set mips3                                      \n"     \
        "       cache %1, 0x000(%0); cache %1, 0x010(%0)        \n"     \
@@ -160,8 +161,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
        "       cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)        \n"     \
        "       cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)        \n"     \
        "       cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)        \n"     \
-       "       .set mips0                                      \n"     \
-       "       .set reorder                                    \n"     \
+       "       .set pop                                        \n"     \
                :                                                       \
                : "r" (base),                                           \
                  "i" (op));
@@ -285,6 +285,7 @@ static inline void blast_scache16_page_indexed(unsigned long page)
 
 #define cache32_unroll32(base,op)                                      \
        __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
        "       .set noreorder                                  \n"     \
        "       .set mips3                                      \n"     \
        "       cache %1, 0x000(%0); cache %1, 0x020(%0)        \n"     \
@@ -303,8 +304,7 @@ static inline void blast_scache16_page_indexed(unsigned long page)
        "       cache %1, 0x340(%0); cache %1, 0x360(%0)        \n"     \
        "       cache %1, 0x380(%0); cache %1, 0x3a0(%0)        \n"     \
        "       cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)        \n"     \
-       "       .set mips0                                      \n"     \
-       "       .set reorder                                    \n"     \
+       "       .set pop                                        \n"     \
                :                                                       \
                : "r" (base),                                           \
                  "i" (op));
@@ -428,6 +428,7 @@ static inline void blast_scache32_page_indexed(unsigned long page)
 
 #define cache64_unroll32(base,op)                                      \
        __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
        "       .set noreorder                                  \n"     \
        "       .set mips3                                      \n"     \
        "       cache %1, 0x000(%0); cache %1, 0x040(%0)        \n"     \
@@ -446,8 +447,7 @@ static inline void blast_scache32_page_indexed(unsigned long page)
        "       cache %1, 0x680(%0); cache %1, 0x6c0(%0)        \n"     \
        "       cache %1, 0x700(%0); cache %1, 0x740(%0)        \n"     \
        "       cache %1, 0x780(%0); cache %1, 0x7c0(%0)        \n"     \
-       "       .set mips0                                      \n"     \
-       "       .set reorder                                    \n"     \
+       "       .set pop                                        \n"     \
                :                                                       \
                : "r" (base),                                           \
                  "i" (op));
@@ -532,6 +532,7 @@ static inline void blast_scache64_page_indexed(unsigned long page)
 
 #define cache128_unroll32(base,op)                                     \
        __asm__ __volatile__(                                           \
+       "       .set push                                       \n"     \
        "       .set noreorder                                  \n"     \
        "       .set mips3                                      \n"     \
        "       cache %1, 0x000(%0); cache %1, 0x080(%0)        \n"     \
@@ -550,8 +551,7 @@ static inline void blast_scache64_page_indexed(unsigned long page)
        "       cache %1, 0xd00(%0); cache %1, 0xd80(%0)        \n"     \
        "       cache %1, 0xe00(%0); cache %1, 0xe80(%0)        \n"     \
        "       cache %1, 0xf00(%0); cache %1, 0xf80(%0)        \n"     \
-       "       .set mips0                                      \n"     \
-       "       .set reorder                                    \n"     \
+       "       .set pop                                        \n"     \
                :                                                       \
                : "r" (base),                                           \
                  "i" (op));