2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Inline assembly cache operations.
8 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9 * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10 * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
16 #include <asm/cacheops.h>
17 #include <asm/cpu-features.h>
18 #include <asm/cpu-type.h>
19 #include <asm/mipsmtregs.h>
22 * This macro return a properly sign-extended address suitable as base address
23 * for indexed cache operations. Two issues here:
25 * - The MIPS32 and MIPS64 specs permit an implementation to directly derive
26 * the index bits from the virtual address. This breaks with tradition
27 * set by the R4000. To keep unpleasant surprises from happening we pick
28 * an address in KSEG0 / CKSEG0.
29 * - We need a properly sign extended address for 64-bit code. To get away
30 * without ifdefs we let the compiler do it by a type cast.
32 #define INDEX_BASE CKSEG0
34 #define cache_op(op,addr) \
35 __asm__ __volatile__( \
37 " .set noreorder \n" \
38 " .set mips3\n\t \n" \
42 : "i" (op), "R" (*(unsigned char *)(addr)))
46 * Temporary hacks for SMTC debug. Optionally force single-threaded
47 * execution during I-cache flushes.
50 #define PROTECT_CACHE_FLUSHES 1
52 #ifdef PROTECT_CACHE_FLUSHES
54 extern int mt_protiflush;
55 extern int mt_protdflush;
56 extern void mt_cflush_lockdown(void);
57 extern void mt_cflush_release(void);
59 #define BEGIN_MT_IPROT \
60 unsigned long flags = 0; \
61 unsigned long mtflags = 0; \
63 local_irq_save(flags); \
66 mt_cflush_lockdown(); \
69 #define END_MT_IPROT \
71 mt_cflush_release(); \
73 local_irq_restore(flags); \
76 #define BEGIN_MT_DPROT \
77 unsigned long flags = 0; \
78 unsigned long mtflags = 0; \
80 local_irq_save(flags); \
83 mt_cflush_lockdown(); \
86 #define END_MT_DPROT \
88 mt_cflush_release(); \
90 local_irq_restore(flags); \
95 #define BEGIN_MT_IPROT
96 #define BEGIN_MT_DPROT
100 #endif /* PROTECT_CACHE_FLUSHES */
102 #define __iflush_prologue \
103 unsigned long redundance; \
104 extern int mt_n_iflushes; \
106 for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
108 #define __iflush_epilogue \
112 #define __dflush_prologue \
113 unsigned long redundance; \
114 extern int mt_n_dflushes; \
116 for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
118 #define __dflush_epilogue \
122 #define __inv_dflush_prologue __dflush_prologue
123 #define __inv_dflush_epilogue __dflush_epilogue
124 #define __sflush_prologue {
125 #define __sflush_epilogue }
126 #define __inv_sflush_prologue __sflush_prologue
127 #define __inv_sflush_epilogue __sflush_epilogue
129 #else /* CONFIG_MIPS_MT */
131 #define __iflush_prologue {
132 #define __iflush_epilogue }
133 #define __dflush_prologue {
134 #define __dflush_epilogue }
135 #define __inv_dflush_prologue {
136 #define __inv_dflush_epilogue }
137 #define __sflush_prologue {
138 #define __sflush_epilogue }
139 #define __inv_sflush_prologue {
140 #define __inv_sflush_epilogue }
142 #endif /* CONFIG_MIPS_MT */
144 static inline void flush_icache_line_indexed(unsigned long addr)
147 cache_op(Index_Invalidate_I, addr);
151 static inline void flush_dcache_line_indexed(unsigned long addr)
154 cache_op(Index_Writeback_Inv_D, addr);
158 static inline void flush_scache_line_indexed(unsigned long addr)
160 cache_op(Index_Writeback_Inv_SD, addr);
163 static inline void flush_icache_line(unsigned long addr)
166 switch (boot_cpu_type()) {
168 cache_op(Hit_Invalidate_I_Loongson2, addr);
172 cache_op(Hit_Invalidate_I, addr);
178 static inline void flush_dcache_line(unsigned long addr)
181 cache_op(Hit_Writeback_Inv_D, addr);
185 static inline void invalidate_dcache_line(unsigned long addr)
188 cache_op(Hit_Invalidate_D, addr);
192 static inline void invalidate_scache_line(unsigned long addr)
194 cache_op(Hit_Invalidate_SD, addr);
197 static inline void flush_scache_line(unsigned long addr)
199 cache_op(Hit_Writeback_Inv_SD, addr);
202 #define protected_cache_op(op,addr) \
203 __asm__ __volatile__( \
205 " .set noreorder \n" \
207 "1: cache %0, (%1) \n" \
209 " .section __ex_table,\"a\" \n" \
210 " "STR(PTR)" 1b, 2b \n" \
213 : "i" (op), "r" (addr))
216 * The next two are for badland addresses like signal trampolines.
218 static inline void protected_flush_icache_line(unsigned long addr)
220 switch (boot_cpu_type()) {
222 protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
226 protected_cache_op(Hit_Invalidate_I, addr);
232 * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
233 * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
234 * caches. We're talking about one cacheline unnecessarily getting invalidated
235 * here so the penalty isn't overly hard.
237 static inline void protected_writeback_dcache_line(unsigned long addr)
239 protected_cache_op(Hit_Writeback_Inv_D, addr);
242 static inline void protected_writeback_scache_line(unsigned long addr)
244 protected_cache_op(Hit_Writeback_Inv_SD, addr);
248 * This one is RM7000-specific
250 static inline void invalidate_tcache_page(unsigned long addr)
252 cache_op(Page_Invalidate_T, addr);
255 #define cache16_unroll32(base,op) \
256 __asm__ __volatile__( \
258 " .set noreorder \n" \
260 " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
261 " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
262 " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
263 " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
264 " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
265 " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
266 " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
267 " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
268 " cache %1, 0x100(%0); cache %1, 0x110(%0) \n" \
269 " cache %1, 0x120(%0); cache %1, 0x130(%0) \n" \
270 " cache %1, 0x140(%0); cache %1, 0x150(%0) \n" \
271 " cache %1, 0x160(%0); cache %1, 0x170(%0) \n" \
272 " cache %1, 0x180(%0); cache %1, 0x190(%0) \n" \
273 " cache %1, 0x1a0(%0); cache %1, 0x1b0(%0) \n" \
274 " cache %1, 0x1c0(%0); cache %1, 0x1d0(%0) \n" \
275 " cache %1, 0x1e0(%0); cache %1, 0x1f0(%0) \n" \
281 #define cache32_unroll32(base,op) \
282 __asm__ __volatile__( \
284 " .set noreorder \n" \
286 " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
287 " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
288 " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
289 " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
290 " cache %1, 0x100(%0); cache %1, 0x120(%0) \n" \
291 " cache %1, 0x140(%0); cache %1, 0x160(%0) \n" \
292 " cache %1, 0x180(%0); cache %1, 0x1a0(%0) \n" \
293 " cache %1, 0x1c0(%0); cache %1, 0x1e0(%0) \n" \
294 " cache %1, 0x200(%0); cache %1, 0x220(%0) \n" \
295 " cache %1, 0x240(%0); cache %1, 0x260(%0) \n" \
296 " cache %1, 0x280(%0); cache %1, 0x2a0(%0) \n" \
297 " cache %1, 0x2c0(%0); cache %1, 0x2e0(%0) \n" \
298 " cache %1, 0x300(%0); cache %1, 0x320(%0) \n" \
299 " cache %1, 0x340(%0); cache %1, 0x360(%0) \n" \
300 " cache %1, 0x380(%0); cache %1, 0x3a0(%0) \n" \
301 " cache %1, 0x3c0(%0); cache %1, 0x3e0(%0) \n" \
307 #define cache64_unroll32(base,op) \
308 __asm__ __volatile__( \
310 " .set noreorder \n" \
312 " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
313 " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
314 " cache %1, 0x100(%0); cache %1, 0x140(%0) \n" \
315 " cache %1, 0x180(%0); cache %1, 0x1c0(%0) \n" \
316 " cache %1, 0x200(%0); cache %1, 0x240(%0) \n" \
317 " cache %1, 0x280(%0); cache %1, 0x2c0(%0) \n" \
318 " cache %1, 0x300(%0); cache %1, 0x340(%0) \n" \
319 " cache %1, 0x380(%0); cache %1, 0x3c0(%0) \n" \
320 " cache %1, 0x400(%0); cache %1, 0x440(%0) \n" \
321 " cache %1, 0x480(%0); cache %1, 0x4c0(%0) \n" \
322 " cache %1, 0x500(%0); cache %1, 0x540(%0) \n" \
323 " cache %1, 0x580(%0); cache %1, 0x5c0(%0) \n" \
324 " cache %1, 0x600(%0); cache %1, 0x640(%0) \n" \
325 " cache %1, 0x680(%0); cache %1, 0x6c0(%0) \n" \
326 " cache %1, 0x700(%0); cache %1, 0x740(%0) \n" \
327 " cache %1, 0x780(%0); cache %1, 0x7c0(%0) \n" \
333 #define cache128_unroll32(base,op) \
334 __asm__ __volatile__( \
336 " .set noreorder \n" \
338 " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
339 " cache %1, 0x100(%0); cache %1, 0x180(%0) \n" \
340 " cache %1, 0x200(%0); cache %1, 0x280(%0) \n" \
341 " cache %1, 0x300(%0); cache %1, 0x380(%0) \n" \
342 " cache %1, 0x400(%0); cache %1, 0x480(%0) \n" \
343 " cache %1, 0x500(%0); cache %1, 0x580(%0) \n" \
344 " cache %1, 0x600(%0); cache %1, 0x680(%0) \n" \
345 " cache %1, 0x700(%0); cache %1, 0x780(%0) \n" \
346 " cache %1, 0x800(%0); cache %1, 0x880(%0) \n" \
347 " cache %1, 0x900(%0); cache %1, 0x980(%0) \n" \
348 " cache %1, 0xa00(%0); cache %1, 0xa80(%0) \n" \
349 " cache %1, 0xb00(%0); cache %1, 0xb80(%0) \n" \
350 " cache %1, 0xc00(%0); cache %1, 0xc80(%0) \n" \
351 " cache %1, 0xd00(%0); cache %1, 0xd80(%0) \n" \
352 " cache %1, 0xe00(%0); cache %1, 0xe80(%0) \n" \
353 " cache %1, 0xf00(%0); cache %1, 0xf80(%0) \n" \
359 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
360 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \
361 static inline void extra##blast_##pfx##cache##lsize(void) \
363 unsigned long start = INDEX_BASE; \
364 unsigned long end = start + current_cpu_data.desc.waysize; \
365 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
366 unsigned long ws_end = current_cpu_data.desc.ways << \
367 current_cpu_data.desc.waybit; \
368 unsigned long ws, addr; \
370 __##pfx##flush_prologue \
372 for (ws = 0; ws < ws_end; ws += ws_inc) \
373 for (addr = start; addr < end; addr += lsize * 32) \
374 cache##lsize##_unroll32(addr|ws, indexop); \
376 __##pfx##flush_epilogue \
379 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
381 unsigned long start = page; \
382 unsigned long end = page + PAGE_SIZE; \
384 __##pfx##flush_prologue \
387 cache##lsize##_unroll32(start, hitop); \
388 start += lsize * 32; \
389 } while (start < end); \
391 __##pfx##flush_epilogue \
394 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
396 unsigned long indexmask = current_cpu_data.desc.waysize - 1; \
397 unsigned long start = INDEX_BASE + (page & indexmask); \
398 unsigned long end = start + PAGE_SIZE; \
399 unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit; \
400 unsigned long ws_end = current_cpu_data.desc.ways << \
401 current_cpu_data.desc.waybit; \
402 unsigned long ws, addr; \
404 __##pfx##flush_prologue \
406 for (ws = 0; ws < ws_end; ws += ws_inc) \
407 for (addr = start; addr < end; addr += lsize * 32) \
408 cache##lsize##_unroll32(addr|ws, indexop); \
410 __##pfx##flush_epilogue \
413 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
414 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
415 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
416 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
417 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
418 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
419 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
420 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
421 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
422 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
423 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
425 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
426 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
427 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
428 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
429 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
430 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
432 /* build blast_xxx_range, protected_blast_xxx_range */
433 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \
434 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
437 unsigned long lsize = cpu_##desc##_line_size(); \
438 unsigned long addr = start & ~(lsize - 1); \
439 unsigned long aend = (end - 1) & ~(lsize - 1); \
441 __##pfx##flush_prologue \
444 prot##cache_op(hitop, addr); \
450 __##pfx##flush_epilogue \
453 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
454 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
455 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
456 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
457 protected_, loongson2_)
458 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
459 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
460 /* blast_inv_dcache_range */
461 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
462 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
464 #endif /* _ASM_R4KCACHE_H */