2 * Helper macros to support writing architecture specific
5 * A minimal linker scripts has following content:
6 * [This is a sample, architectures may have special requiriements]
16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...)
26 * RO_DATA_SECTION(PAGE_SIZE)
27 * RW_DATA_SECTION(...)
30 * EXCEPTION_TABLE(...)
47 * [__init_begin, __init_end] is the init section that may be freed after init
48 * [_stext, _etext] is the text section
49 * [_sdata, _edata] is the data section
51 * Some of the included output section have their own set of constants.
52 * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
53 * [__nosave_begin, __nosave_end] for the nosave data
60 #ifndef VMLINUX_SYMBOL
61 #define VMLINUX_SYMBOL(_sym_) _sym_
64 /* Align . to a 8 byte boundary equals to maximum function alignment. */
65 #define ALIGN_FUNCTION() . = ALIGN(8)
67 /* The actual configuration determine if the init/exit sections
68 * are handled as text/data or they can be discarded (which
69 * often happens at runtime)
72 #define DEV_KEEP(sec) *(.dev##sec)
73 #define DEV_DISCARD(sec)
76 #define DEV_DISCARD(sec) *(.dev##sec)
79 #ifdef CONFIG_HOTPLUG_CPU
80 #define CPU_KEEP(sec) *(.cpu##sec)
81 #define CPU_DISCARD(sec)
84 #define CPU_DISCARD(sec) *(.cpu##sec)
87 #if defined(CONFIG_MEMORY_HOTPLUG)
88 #define MEM_KEEP(sec) *(.mem##sec)
89 #define MEM_DISCARD(sec)
92 #define MEM_DISCARD(sec) *(.mem##sec)
95 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
96 #define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \
98 VMLINUX_SYMBOL(__stop_mcount_loc) = .;
103 #ifdef CONFIG_TRACE_BRANCH_PROFILING
104 #define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
105 *(_ftrace_annotated_branch) \
106 VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
108 #define LIKELY_PROFILE()
111 #ifdef CONFIG_PROFILE_ALL_BRANCHES
112 #define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
114 VMLINUX_SYMBOL(__stop_branch_profile) = .;
116 #define BRANCH_PROFILE()
119 #ifdef CONFIG_EVENT_TRACING
120 #define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
122 VMLINUX_SYMBOL(__stop_ftrace_events) = .;
124 #define FTRACE_EVENTS()
127 #ifdef CONFIG_TRACING
128 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .; \
129 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
130 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
132 #define TRACE_PRINTKS()
135 #ifdef CONFIG_FTRACE_SYSCALLS
136 #define TRACE_SYSCALLS() VMLINUX_SYMBOL(__start_syscalls_metadata) = .; \
137 *(__syscalls_metadata) \
138 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
140 #define TRACE_SYSCALLS()
147 DEV_KEEP(init.data) \
148 DEV_KEEP(exit.data) \
149 CPU_KEEP(init.data) \
150 CPU_KEEP(exit.data) \
151 MEM_KEEP(init.data) \
152 MEM_KEEP(exit.data) \
154 VMLINUX_SYMBOL(__start___markers) = .; \
156 VMLINUX_SYMBOL(__stop___markers) = .; \
158 VMLINUX_SYMBOL(__start___tracepoints) = .; \
160 VMLINUX_SYMBOL(__stop___tracepoints) = .; \
161 /* implement dynamic printk debug */ \
163 VMLINUX_SYMBOL(__start___verbose) = .; \
165 VMLINUX_SYMBOL(__stop___verbose) = .; \
173 * Data section helpers
175 #define NOSAVE_DATA \
176 . = ALIGN(PAGE_SIZE); \
177 VMLINUX_SYMBOL(__nosave_begin) = .; \
179 . = ALIGN(PAGE_SIZE); \
180 VMLINUX_SYMBOL(__nosave_end) = .;
182 #define PAGE_ALIGNED_DATA(page_align) \
183 . = ALIGN(page_align); \
184 *(.data.page_aligned)
186 #define READ_MOSTLY_DATA(align) \
190 #define CACHELINE_ALIGNED_DATA(align) \
192 *(.data.cacheline_aligned)
194 #define INIT_TASK_DATA(align) \
201 #define RO_DATA_SECTION(align) \
202 . = ALIGN((align)); \
203 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
204 VMLINUX_SYMBOL(__start_rodata) = .; \
205 *(.rodata) *(.rodata.*) \
206 *(__vermagic) /* Kernel version magic */ \
207 *(__markers_strings) /* Markers: strings */ \
208 *(__tracepoints_strings)/* Tracepoints: strings */ \
211 .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
218 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
219 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
220 *(.pci_fixup_early) \
221 VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
222 VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
223 *(.pci_fixup_header) \
224 VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
225 VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
226 *(.pci_fixup_final) \
227 VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
228 VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
229 *(.pci_fixup_enable) \
230 VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
231 VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
232 *(.pci_fixup_resume) \
233 VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
234 VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
235 *(.pci_fixup_resume_early) \
236 VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
237 VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
238 *(.pci_fixup_suspend) \
239 VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
242 /* Built-in firmware blobs */ \
243 .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
244 VMLINUX_SYMBOL(__start_builtin_fw) = .; \
246 VMLINUX_SYMBOL(__end_builtin_fw) = .; \
249 /* RapidIO route ops */ \
250 .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \
251 VMLINUX_SYMBOL(__start_rio_route_ops) = .; \
253 VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
258 /* Kernel symbol table: Normal symbols */ \
259 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
260 VMLINUX_SYMBOL(__start___ksymtab) = .; \
262 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
265 /* Kernel symbol table: GPL-only symbols */ \
266 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
267 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
269 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
272 /* Kernel symbol table: Normal unused symbols */ \
273 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
274 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
275 *(__ksymtab_unused) \
276 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
279 /* Kernel symbol table: GPL-only unused symbols */ \
280 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
281 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
282 *(__ksymtab_unused_gpl) \
283 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
286 /* Kernel symbol table: GPL-future-only symbols */ \
287 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
288 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
289 *(__ksymtab_gpl_future) \
290 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
293 /* Kernel symbol table: Normal symbols */ \
294 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
295 VMLINUX_SYMBOL(__start___kcrctab) = .; \
297 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
300 /* Kernel symbol table: GPL-only symbols */ \
301 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
302 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
304 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
307 /* Kernel symbol table: Normal unused symbols */ \
308 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
309 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
310 *(__kcrctab_unused) \
311 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
314 /* Kernel symbol table: GPL-only unused symbols */ \
315 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
316 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
317 *(__kcrctab_unused_gpl) \
318 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
321 /* Kernel symbol table: GPL-future-only symbols */ \
322 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
323 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
324 *(__kcrctab_gpl_future) \
325 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
328 /* Kernel symbol table: strings */ \
329 __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
330 *(__ksymtab_strings) \
333 /* __*init sections */ \
334 __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
337 DEV_KEEP(init.rodata) \
338 DEV_KEEP(exit.rodata) \
339 CPU_KEEP(init.rodata) \
340 CPU_KEEP(exit.rodata) \
341 MEM_KEEP(init.rodata) \
342 MEM_KEEP(exit.rodata) \
345 /* Built-in module parameters. */ \
346 __param : AT(ADDR(__param) - LOAD_OFFSET) { \
347 VMLINUX_SYMBOL(__start___param) = .; \
349 VMLINUX_SYMBOL(__stop___param) = .; \
350 . = ALIGN((align)); \
351 VMLINUX_SYMBOL(__end_rodata) = .; \
355 /* RODATA & RO_DATA provided for backward compatibility.
356 * All archs are supposed to use RO_DATA() */
357 #define RODATA RO_DATA_SECTION(4096)
358 #define RO_DATA(align) RO_DATA_SECTION(align)
360 #define SECURITY_INIT \
361 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
362 VMLINUX_SYMBOL(__security_initcall_start) = .; \
363 *(.security_initcall.init) \
364 VMLINUX_SYMBOL(__security_initcall_end) = .; \
367 /* .text section. Map to function alignment to avoid address changes
368 * during second ld run in second ld pass when generating System.map */
374 DEV_KEEP(init.text) \
375 DEV_KEEP(exit.text) \
376 CPU_KEEP(init.text) \
377 CPU_KEEP(exit.text) \
378 MEM_KEEP(init.text) \
379 MEM_KEEP(exit.text) \
383 /* sched.text is aling to function alignment to secure we have same
384 * address even at second ld pass when generating System.map */
387 VMLINUX_SYMBOL(__sched_text_start) = .; \
389 VMLINUX_SYMBOL(__sched_text_end) = .;
391 /* spinlock.text is aling to function alignment to secure we have same
392 * address even at second ld pass when generating System.map */
395 VMLINUX_SYMBOL(__lock_text_start) = .; \
397 VMLINUX_SYMBOL(__lock_text_end) = .;
399 #define KPROBES_TEXT \
401 VMLINUX_SYMBOL(__kprobes_text_start) = .; \
403 VMLINUX_SYMBOL(__kprobes_text_end) = .;
405 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
406 #define IRQENTRY_TEXT \
408 VMLINUX_SYMBOL(__irqentry_text_start) = .; \
410 VMLINUX_SYMBOL(__irqentry_text_end) = .;
412 #define IRQENTRY_TEXT
415 /* Section used for early init (in .S files) */
416 #define HEAD_TEXT *(.head.text)
418 #define HEAD_TEXT_SECTION \
419 .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
426 #define EXCEPTION_TABLE(align) \
428 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
429 VMLINUX_SYMBOL(__start___ex_table) = .; \
431 VMLINUX_SYMBOL(__stop___ex_table) = .; \
437 #define INIT_TASK_DATA_SECTION(align) \
439 .data.init_task : { \
440 INIT_TASK_DATA(align) \
443 #ifdef CONFIG_CONSTRUCTORS
444 #define KERNEL_CTORS() . = ALIGN(8); \
445 VMLINUX_SYMBOL(__ctors_start) = .; \
447 VMLINUX_SYMBOL(__ctors_end) = .;
449 #define KERNEL_CTORS()
452 /* init and exit section handling */
455 DEV_DISCARD(init.data) \
456 CPU_DISCARD(init.data) \
457 MEM_DISCARD(init.data) \
460 DEV_DISCARD(init.rodata) \
461 CPU_DISCARD(init.rodata) \
462 MEM_DISCARD(init.rodata)
466 DEV_DISCARD(init.text) \
467 CPU_DISCARD(init.text) \
468 MEM_DISCARD(init.text)
472 DEV_DISCARD(exit.data) \
473 DEV_DISCARD(exit.rodata) \
474 CPU_DISCARD(exit.data) \
475 CPU_DISCARD(exit.rodata) \
476 MEM_DISCARD(exit.data) \
477 MEM_DISCARD(exit.rodata)
481 DEV_DISCARD(exit.text) \
482 CPU_DISCARD(exit.text) \
483 MEM_DISCARD(exit.text)
489 * bss (Block Started by Symbol) - uninitialized data
490 * zeroed during startup
493 .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
498 #define BSS(bss_align) \
499 . = ALIGN(bss_align); \
500 .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
501 VMLINUX_SYMBOL(__bss_start) = .; \
502 *(.bss.page_aligned) \
506 VMLINUX_SYMBOL(__bss_stop) = .; \
510 * DWARF debug sections.
511 * Symbols in the DWARF debugging sections are relative to
512 * the beginning of the section so we begin them at 0.
514 #define DWARF_DEBUG \
516 .debug 0 : { *(.debug) } \
517 .line 0 : { *(.line) } \
518 /* GNU DWARF 1 extensions */ \
519 .debug_srcinfo 0 : { *(.debug_srcinfo) } \
520 .debug_sfnames 0 : { *(.debug_sfnames) } \
521 /* DWARF 1.1 and DWARF 2 */ \
522 .debug_aranges 0 : { *(.debug_aranges) } \
523 .debug_pubnames 0 : { *(.debug_pubnames) } \
525 .debug_info 0 : { *(.debug_info \
526 .gnu.linkonce.wi.*) } \
527 .debug_abbrev 0 : { *(.debug_abbrev) } \
528 .debug_line 0 : { *(.debug_line) } \
529 .debug_frame 0 : { *(.debug_frame) } \
530 .debug_str 0 : { *(.debug_str) } \
531 .debug_loc 0 : { *(.debug_loc) } \
532 .debug_macinfo 0 : { *(.debug_macinfo) } \
533 /* SGI/MIPS DWARF 2 extensions */ \
534 .debug_weaknames 0 : { *(.debug_weaknames) } \
535 .debug_funcnames 0 : { *(.debug_funcnames) } \
536 .debug_typenames 0 : { *(.debug_typenames) } \
537 .debug_varnames 0 : { *(.debug_varnames) } \
539 /* Stabs debugging sections. */
540 #define STABS_DEBUG \
541 .stab 0 : { *(.stab) } \
542 .stabstr 0 : { *(.stabstr) } \
543 .stab.excl 0 : { *(.stab.excl) } \
544 .stab.exclstr 0 : { *(.stab.exclstr) } \
545 .stab.index 0 : { *(.stab.index) } \
546 .stab.indexstr 0 : { *(.stab.indexstr) } \
547 .comment 0 : { *(.comment) }
549 #ifdef CONFIG_GENERIC_BUG
552 __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
553 VMLINUX_SYMBOL(__start___bug_table) = .; \
555 VMLINUX_SYMBOL(__stop___bug_table) = .; \
561 #ifdef CONFIG_PM_TRACE
564 .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
565 VMLINUX_SYMBOL(__tracedata_start) = .; \
567 VMLINUX_SYMBOL(__tracedata_end) = .; \
574 .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
575 VMLINUX_SYMBOL(__start_notes) = .; \
577 VMLINUX_SYMBOL(__stop_notes) = .; \
580 #define INIT_SETUP(initsetup_align) \
581 . = ALIGN(initsetup_align); \
582 VMLINUX_SYMBOL(__setup_start) = .; \
584 VMLINUX_SYMBOL(__setup_end) = .;
587 *(.initcallearly.init) \
588 VMLINUX_SYMBOL(__early_initcall_end) = .; \
590 *(.initcall0s.init) \
592 *(.initcall1s.init) \
594 *(.initcall2s.init) \
596 *(.initcall3s.init) \
598 *(.initcall4s.init) \
600 *(.initcall5s.init) \
601 *(.initcallrootfs.init) \
603 *(.initcall6s.init) \
608 VMLINUX_SYMBOL(__initcall_start) = .; \
610 VMLINUX_SYMBOL(__initcall_end) = .;
612 #define CON_INITCALL \
613 VMLINUX_SYMBOL(__con_initcall_start) = .; \
614 *(.con_initcall.init) \
615 VMLINUX_SYMBOL(__con_initcall_end) = .;
617 #define SECURITY_INITCALL \
618 VMLINUX_SYMBOL(__security_initcall_start) = .; \
619 *(.security_initcall.init) \
620 VMLINUX_SYMBOL(__security_initcall_end) = .;
622 #ifdef CONFIG_BLK_DEV_INITRD
623 #define INIT_RAM_FS \
624 . = ALIGN(PAGE_SIZE); \
625 VMLINUX_SYMBOL(__initramfs_start) = .; \
627 VMLINUX_SYMBOL(__initramfs_end) = .;
633 * PERCPU_VADDR - define output section for percpu area
634 * @vaddr: explicit base address (optional)
635 * @phdr: destination PHDR (optional)
637 * Macro which expands to output section for percpu area. If @vaddr
638 * is not blank, it specifies explicit base address and all percpu
639 * symbols will be offset from the given address. If blank, @vaddr
640 * always equals @laddr + LOAD_OFFSET.
642 * @phdr defines the output PHDR to use if not blank. Be warned that
643 * output PHDR is sticky. If @phdr is specified, the next output
644 * section in the linker script will go there too. @phdr should have
647 * Note that this macros defines __per_cpu_load as an absolute symbol.
648 * If there is no need to put the percpu section at a predetermined
649 * address, use PERCPU().
651 #define PERCPU_VADDR(vaddr, phdr) \
652 VMLINUX_SYMBOL(__per_cpu_load) = .; \
653 .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
655 VMLINUX_SYMBOL(__per_cpu_start) = .; \
656 *(.data.percpu.first) \
657 *(.data.percpu.page_aligned) \
659 *(.data.percpu.shared_aligned) \
660 VMLINUX_SYMBOL(__per_cpu_end) = .; \
662 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
665 * PERCPU - define output section for percpu area, simple version
666 * @align: required alignment
668 * Align to @align and outputs output section for percpu area. This
669 * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and
670 * __per_cpu_start will be identical.
672 * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except
673 * that __per_cpu_load is defined as a relative symbol against
674 * .data.percpu which is required for relocatable x86_32
677 #define PERCPU(align) \
679 .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
680 VMLINUX_SYMBOL(__per_cpu_load) = .; \
681 VMLINUX_SYMBOL(__per_cpu_start) = .; \
682 *(.data.percpu.first) \
683 *(.data.percpu.page_aligned) \
685 *(.data.percpu.shared_aligned) \
686 VMLINUX_SYMBOL(__per_cpu_end) = .; \
691 * Definition of the high level *_SECTION macros
692 * They will fit only a subset of the architectures
698 * All sections are combined in a single .data section.
699 * The sections following CONSTRUCTORS are arranged so their
700 * typical alignment matches.
701 * A cacheline is typical/always less than a PAGE_SIZE so
702 * the sections that has this restriction (or similar)
703 * is located before the ones requiring PAGE_SIZE alignment.
704 * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
705 * matches the requirment of PAGE_ALIGNED_DATA.
707 * use 0 as page_align if page_aligned data is not used */
708 #define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
709 . = ALIGN(PAGE_SIZE); \
710 .data : AT(ADDR(.data) - LOAD_OFFSET) { \
711 INIT_TASK_DATA(inittask) \
712 CACHELINE_ALIGNED_DATA(cacheline) \
713 READ_MOSTLY_DATA(cacheline) \
717 PAGE_ALIGNED_DATA(pagealigned) \
720 #define INIT_TEXT_SECTION(inittext_align) \
721 . = ALIGN(inittext_align); \
722 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
723 VMLINUX_SYMBOL(_sinittext) = .; \
725 VMLINUX_SYMBOL(_einittext) = .; \
728 #define INIT_DATA_SECTION(initsetup_align) \
729 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
731 INIT_SETUP(initsetup_align) \
738 #define BSS_SECTION(sbss_align, bss_align) \