mm: Move ARCH_SLAB_MINALIGN and ARCH_KMALLOC_MINALIGN to <linux/slub_def.h>
authorDavid Woodhouse <dwmw2@infradead.org>
Wed, 19 May 2010 11:02:14 +0000 (12:02 +0100)
committerPekka Enberg <penberg@cs.helsinki.fi>
Wed, 19 May 2010 19:03:13 +0000 (22:03 +0300)
Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
include/linux/slub_def.h
mm/slub.c

index 0249d4175bacbb9d7b2a3a86758b8dfd0807a1e7..55695c8d2f8ad954a7575ac11961d1254347b1b3 100644 (file)
@@ -116,6 +116,14 @@ struct kmem_cache {
 
 #define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
 
+#ifndef ARCH_KMALLOC_MINALIGN
+#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
+#endif
+
+#ifndef ARCH_SLAB_MINALIGN
+#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
+#endif
+
 /*
  * Maximum kmalloc object size handled by SLUB. Larger object allocations
  * are passed through to the page allocator. The page allocator "fastpath"
index d2a54fe71ea22519feada61f181e874685c0a51b..c874c3efac293f174f256d9fcd12505f81d78fe2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
 #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
                SLAB_CACHE_DMA | SLAB_NOTRACK)
 
-#ifndef ARCH_KMALLOC_MINALIGN
-#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
-#endif
-
-#ifndef ARCH_SLAB_MINALIGN
-#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
-#endif
-
 #define OO_SHIFT       16
 #define OO_MASK                ((1 << OO_SHIFT) - 1)
 #define MAX_OBJS_PER_PAGE      65535 /* since page.objects is u16 */