Use macros for .data.page_aligned section.
authorTim Abbott <tabbott@ksplice.com>
Sun, 20 Sep 2009 22:14:15 +0000 (18:14 -0400)
committerSam Ravnborg <sam@ravnborg.org>
Mon, 21 Sep 2009 04:27:08 +0000 (06:27 +0200)
This patch changes the remaining direct references to
.data.page_aligned in C and assembly code to use the macros in
include/linux/linkage.h.

Signed-off-by: Tim Abbott <tabbott@ksplice.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Haavard Skinnemoen <hskinnemoen@atmel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
arch/avr32/mm/init.c
arch/powerpc/kernel/vdso.c
arch/powerpc/kernel/vdso32/vdso32_wrapper.S
arch/powerpc/kernel/vdso64/vdso64_wrapper.S
arch/s390/kernel/vdso.c
arch/s390/kernel/vdso32/vdso32_wrapper.S
arch/s390/kernel/vdso64/vdso64_wrapper.S
arch/x86/include/asm/cache.h
arch/x86/kernel/head_32.S

index e819fa69a90ea3d03e3aabae8b8009fe863009a7..cc60d10cf8f7b446a16e1567966f85d4f538dc28 100644 (file)
 #include <asm/setup.h>
 #include <asm/sections.h>
 
-#define __page_aligned __attribute__((section(".data.page_aligned")))
-
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
-pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned;
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_data;
 
 struct page *empty_zero_page;
 EXPORT_SYMBOL(empty_zero_page);
index a0abce251d0a0ae1021e1012fdc9b37fff31c7c6..3faaf29bdb29def5748cf54beef11a285764e438 100644 (file)
@@ -1,3 +1,4 @@
+
 /*
  *    Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
  *                      <benh@kernel.crashing.org>
@@ -74,7 +75,7 @@ static int vdso_ready;
 static union {
        struct vdso_data        data;
        u8                      page[PAGE_SIZE];
-} vdso_data_store __attribute__((__section__(".data.page_aligned")));
+} vdso_data_store __page_aligned_data;
 struct vdso_data *vdso_data = &vdso_data_store.data;
 
 /* Format of the patch table */
index 556f0caa5d842a27885a6c2417787c18315037dc..6e8f507ed32bb5f74f9d9f5574f09636ae15f9a3 100644 (file)
@@ -1,7 +1,8 @@
 #include <linux/init.h>
+#include <linux/linkage.h>
 #include <asm/page.h>
 
-       .section ".data.page_aligned"
+       __PAGE_ALIGNED_DATA
 
        .globl vdso32_start, vdso32_end
        .balign PAGE_SIZE
index 0529cb9e3b97bfaef233858595f91cebb820ac96..b8553d62b792ffb2953a400fabdf8fc746c62501 100644 (file)
@@ -1,7 +1,8 @@
 #include <linux/init.h>
+#include <linux/linkage.h>
 #include <asm/page.h>
 
-       .section ".data.page_aligned"
+       __PAGE_ALIGNED_DATA
 
        .globl vdso64_start, vdso64_end
        .balign PAGE_SIZE
index 45e1708b70fd028fdd9b6b432f156f886855d0ae..45a3e9a7ae21db6f5705eac145fe62790e4c5b0f 100644 (file)
@@ -75,7 +75,7 @@ __setup("vdso=", vdso_setup);
 static union {
        struct vdso_data        data;
        u8                      page[PAGE_SIZE];
-} vdso_data_store __attribute__((__section__(".data.page_aligned")));
+} vdso_data_store __page_aligned_data;
 struct vdso_data *vdso_data = &vdso_data_store.data;
 
 /*
index 61639a89e70b93e7c7355eb080c7afcf07e79b2d..ae42f8ce350bcb5047e6b553749fa6dc453295c7 100644 (file)
@@ -1,7 +1,8 @@
 #include <linux/init.h>
+#include <linux/linkage.h>
 #include <asm/page.h>
 
-       .section ".data.page_aligned"
+       __PAGE_ALIGNED_DATA
 
        .globl vdso32_start, vdso32_end
        .balign PAGE_SIZE
index d8e2ac14d564d057bef615266bcf17abf90083cf..c245842b516fdfa490e2c877252730cfc8fb5589 100644 (file)
@@ -1,7 +1,8 @@
 #include <linux/init.h>
+#include <linux/linkage.h>
 #include <asm/page.h>
 
-       .section ".data.page_aligned"
+       __PAGE_ALIGNED_DATA
 
        .globl vdso64_start, vdso64_end
        .balign PAGE_SIZE
index 5d367caa0e36c437e17cb29f76dc285d9b6d4a49..549860d3be8f1e8bb856e084e7fac225e6d53045 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_CACHE_H
 #define _ASM_X86_CACHE_H
 
+#include <linux/linkage.h>
+
 /* L1 cache line size */
 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
 #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
@@ -13,7 +15,7 @@
 #ifdef CONFIG_SMP
 #define __cacheline_aligned_in_smp                                     \
        __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))      \
-       __attribute__((__section__(".data.page_aligned")))
+       __page_aligned_data
 #endif
 #endif
 
index 1dac23958427fcf3c9b72e7b59ddd13b1010dd76..218aad7ee76e050202b95a7fc303c19d29697cc9 100644 (file)
@@ -626,7 +626,7 @@ ENTRY(empty_zero_page)
  * This starts the data section.
  */
 #ifdef CONFIG_X86_PAE
-.section ".data.page_aligned","wa"
+__PAGE_ALIGNED_DATA
        /* Page-aligned for the benefit of paravirt? */
        .align PAGE_SIZE_asm
 ENTRY(swapper_pg_dir)