sh: Use L1_CACHE_BYTES for .data.cacheline_aligned.
authorPaul Mundt <lethal@linux-sh.org>
Thu, 1 Mar 2007 06:56:31 +0000 (15:56 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Mon, 5 Mar 2007 05:13:26 +0000 (14:13 +0900)
Previously this was using a hardcoded 32, use L1_CACHE_BYTES for
cacheline alignment instead.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/kernel/vmlinux.lds.S
include/asm-sh/cache.h

index 75de165867a0e75a83ac4220c493c5b10a530bfb..78a6c09875b2b6b3dc17fc18e4855a666de69668 100644 (file)
@@ -3,6 +3,7 @@
  * Written by Niibe Yutaka
  */
 #include <asm/thread_info.h>
+#include <asm/cache.h>
 #include <asm-generic/vmlinux.lds.h>
 
 #ifdef CONFIG_CPU_LITTLE_ENDIAN
@@ -53,7 +54,7 @@ SECTIONS
   . = ALIGN(PAGE_SIZE);
   .data.page_aligned : { *(.data.page_aligned) }
 
-  . = ALIGN(32);
+  . = ALIGN(L1_CACHE_BYTES);
   __per_cpu_start = .;
   .data.percpu : { *(.data.percpu) }
   __per_cpu_end = .;
index e3a180cf506285dbffe07273c60bc35837f54ce9..9a3cb6ba9d156622d0738be6fd18ba224de9cf4c 100644 (file)
@@ -21,6 +21,7 @@
 
 #define L1_CACHE_ALIGN(x)      (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
 
+#ifndef __ASSEMBLY__
 struct cache_info {
        unsigned int ways;              /* Number of cache ways */
        unsigned int sets;              /* Number of cache sets */
@@ -47,6 +48,6 @@ struct cache_info {
 
        unsigned long flags;
 };
-
+#endif /* __ASSEMBLY__ */
 #endif /* __KERNEL__ */
 #endif /* __ASM_SH_CACHE_H */