arch/tile: catch up with section naming convention in 2.6.35
authorChris Metcalf <cmetcalf@tilera.com>
Sun, 27 Feb 2011 23:52:24 +0000 (18:52 -0500)
committerChris Metcalf <cmetcalf@tilera.com>
Tue, 1 Mar 2011 21:18:52 +0000 (16:18 -0500)
The convention changed to, e.g., ".data..page_aligned".  This commit
fixes the places in the tile architecture that were still using the
old convention.  One tile-specific section (.init.page) was dropped
in favor of just using an "aligned" attribute.

Sam Ravnborg <sam@ravnborg.org> pointed out __PAGE_ALIGNED_BSS, etc.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
arch/tile/include/asm/cache.h
arch/tile/kernel/head_32.S
arch/tile/kernel/vmlinux.lds.S
arch/tile/lib/atomic_32.c
arch/tile/mm/init.c

index 08a2815b5e4e7c9b6456b3c8c4d4d165bfc0db6c..392e5333dd8b06a31afdd045298f273451ec4d2e 100644 (file)
@@ -40,7 +40,7 @@
 #define INTERNODE_CACHE_BYTES   L2_CACHE_BYTES
 
 /* Group together read-mostly things to avoid cache false sharing */
-#define __read_mostly __attribute__((__section__(".data.read_mostly")))
+#define __read_mostly __attribute__((__section__(".data..read_mostly")))
 
 /*
  * Attribute for data that is kept read/write coherent until the end of
index 90e7c4435693d1848917b267a28a8770a8d5c64b..05b5f4d54d912d80eeac6c745d505f922a266200 100644 (file)
@@ -133,7 +133,7 @@ ENTRY(_start)
        }
        ENDPROC(_start)
 
-.section ".bss.page_aligned","w"
+__PAGE_ALIGNED_BSS
        .align PAGE_SIZE
 ENTRY(empty_zero_page)
        .fill PAGE_SIZE,1,0
@@ -148,7 +148,7 @@ ENTRY(empty_zero_page)
        .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN)
        .endm
 
-.section ".data.page_aligned","wa"
+__PAGE_ALIGNED_DATA
        .align PAGE_SIZE
 ENTRY(swapper_pg_dir)
        /*
index 25fdc0c1839a4d6245e34336ffa527e1c09b5e4a..4e211c1bf50035ce35257c3a9d981cf64e5a45d4 100644 (file)
@@ -59,10 +59,7 @@ SECTIONS
 
   . = ALIGN(PAGE_SIZE);
   VMLINUX_SYMBOL(_sinitdata) = .;
-  .init.page : AT (ADDR(.init.page) - LOAD_OFFSET) {
-    *(.init.page)
-  } :data =0
-  INIT_DATA_SECTION(16)
+  INIT_DATA_SECTION(16) :data =0
   PERCPU(PAGE_SIZE)
   . = ALIGN(PAGE_SIZE);
   VMLINUX_SYMBOL(_einitdata) = .;
index 7a5cc706ab62cf19dbbcae51fdb5b2a4ea04680a..20c31626f72faa654538e664ad9476a99fc6da8d 100644 (file)
@@ -46,8 +46,7 @@ struct atomic_locks_on_cpu *atomic_lock_ptr[ATOMIC_HASH_L1_SIZE]
 #else /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
 
 /* This page is remapped on startup to be hash-for-home. */
-int atomic_locks[PAGE_SIZE / sizeof(int) /* Only ATOMIC_HASH_SIZE is used */]
-  __attribute__((aligned(PAGE_SIZE), section(".bss.page_aligned")));
+int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
 
 #endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
 
index 0b9ce69b0ee5e755ea6186e1093269507cae156d..e34597e512dffa3dac797a03c82f66ff1effe9c9 100644 (file)
@@ -445,7 +445,7 @@ static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
 
 /* Temporary page table we use for staging. */
 static pgd_t pgtables[PTRS_PER_PGD]
- __attribute__((section(".init.page")));
+ __attribute__((aligned(HV_PAGE_TABLE_ALIGN)));
 
 /*
  * This maps the physical memory to kernel virtual address space, a total