1 /*
2  * Helper macros to support writing architecture specific
3  * linker scripts.
4  *
5  * A minimal linker scripts has following content:
6  * [This is a sample, architectures may have special requiriements]
7  *
8  * OUTPUT_FORMAT(...)
9  * OUTPUT_ARCH(...)
10  * ENTRY(...)
11  * SECTIONS
12  * {
13  *	. = START;
14  *	__init_begin = .;
15  *	HEAD_TEXT_SECTION
16  *	INIT_TEXT_SECTION(PAGE_SIZE)
17  *	INIT_DATA_SECTION(...)
18  *	PERCPU(CACHELINE_SIZE, PAGE_SIZE)
19  *	__init_end = .;
20  *
21  *	_stext = .;
22  *	TEXT_SECTION = 0
23  *	_etext = .;
24  *
25  *      _sdata = .;
26  *	RO_DATA_SECTION(PAGE_SIZE)
27  *	RW_DATA_SECTION(...)
28  *	_edata = .;
29  *
30  *	EXCEPTION_TABLE(...)
31  *	NOTES
32  *
33  *	BSS_SECTION(0, 0, 0)
34  *	_end = .;
35  *
36  *	STABS_DEBUG
37  *	DWARF_DEBUG
38  *
39  *	DISCARDS		// must be the last
40  * }
41  *
42  * [__init_begin, __init_end] is the init section that may be freed after init
43  * [_stext, _etext] is the text section
44  * [_sdata, _edata] is the data section
45  *
46  * Some of the included output section have their own set of constants.
47  * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
48  *               [__nosave_begin, __nosave_end] for the nosave data
49  */
50 
51 #ifndef LOAD_OFFSET
52 #define LOAD_OFFSET 0
53 #endif
54 
55 #ifndef SYMBOL_PREFIX
56 #define VMLINUX_SYMBOL(sym) sym
57 #else
58 #define PASTE2(x,y) x##y
59 #define PASTE(x,y) PASTE2(x,y)
60 #define VMLINUX_SYMBOL(sym) PASTE(SYMBOL_PREFIX, sym)
61 #endif
62 
63 /* Align . to a 8 byte boundary equals to maximum function alignment. */
64 #define ALIGN_FUNCTION()  . = ALIGN(8)
65 
66 /*
67  * Align to a 32 byte boundary equal to the
68  * alignment gcc 4.5 uses for a struct
69  */
70 #define STRUCT_ALIGNMENT 32
71 #define STRUCT_ALIGN() . = ALIGN(STRUCT_ALIGNMENT)
72 
73 /* The actual configuration determine if the init/exit sections
74  * are handled as text/data or they can be discarded (which
75  * often happens at runtime)
76  */
77 #ifdef CONFIG_HOTPLUG
78 #define DEV_KEEP(sec)    *(.dev##sec)
79 #define DEV_DISCARD(sec)
80 #else
81 #define DEV_KEEP(sec)
82 #define DEV_DISCARD(sec) *(.dev##sec)
83 #endif
84 
85 #ifdef CONFIG_HOTPLUG_CPU
86 #define CPU_KEEP(sec)    *(.cpu##sec)
87 #define CPU_DISCARD(sec)
88 #else
89 #define CPU_KEEP(sec)
90 #define CPU_DISCARD(sec) *(.cpu##sec)
91 #endif
92 
93 #if defined(CONFIG_MEMORY_HOTPLUG)
94 #define MEM_KEEP(sec)    *(.mem##sec)
95 #define MEM_DISCARD(sec)
96 #else
97 #define MEM_KEEP(sec)
98 #define MEM_DISCARD(sec) *(.mem##sec)
99 #endif
100 
101 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
102 #define MCOUNT_REC()	. = ALIGN(8);				\
103 			VMLINUX_SYMBOL(__start_mcount_loc) = .; \
104 			*(__mcount_loc)				\
105 			VMLINUX_SYMBOL(__stop_mcount_loc) = .;
106 #else
107 #define MCOUNT_REC()
108 #endif
109 
110 #ifdef CONFIG_TRACE_BRANCH_PROFILING
111 #define LIKELY_PROFILE()	VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
112 				*(_ftrace_annotated_branch)			      \
113 				VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
114 #else
115 #define LIKELY_PROFILE()
116 #endif
117 
118 #ifdef CONFIG_PROFILE_ALL_BRANCHES
119 #define BRANCH_PROFILE()	VMLINUX_SYMBOL(__start_branch_profile) = .;   \
120 				*(_ftrace_branch)			      \
121 				VMLINUX_SYMBOL(__stop_branch_profile) = .;
122 #else
123 #define BRANCH_PROFILE()
124 #endif
125 
126 #ifdef CONFIG_EVENT_TRACING
127 #define FTRACE_EVENTS()	. = ALIGN(8);					\
128 			VMLINUX_SYMBOL(__start_ftrace_events) = .;	\
129 			*(_ftrace_events)				\
130 			VMLINUX_SYMBOL(__stop_ftrace_events) = .;
131 #else
132 #define FTRACE_EVENTS()
133 #endif
134 
135 #ifdef CONFIG_TRACING
136 #define TRACE_PRINTKS() VMLINUX_SYMBOL(__start___trace_bprintk_fmt) = .;      \
137 			 *(__trace_printk_fmt) /* Trace_printk fmt' pointer */ \
138 			 VMLINUX_SYMBOL(__stop___trace_bprintk_fmt) = .;
139 #else
140 #define TRACE_PRINTKS()
141 #endif
142 
143 #ifdef CONFIG_FTRACE_SYSCALLS
144 #define TRACE_SYSCALLS() . = ALIGN(8);					\
145 			 VMLINUX_SYMBOL(__start_syscalls_metadata) = .;	\
146 			 *(__syscalls_metadata)				\
147 			 VMLINUX_SYMBOL(__stop_syscalls_metadata) = .;
148 #else
149 #define TRACE_SYSCALLS()
150 #endif
151 
152 
153 #define KERNEL_DTB()							\
154 	STRUCT_ALIGN();							\
155 	VMLINUX_SYMBOL(__dtb_start) = .;				\
156 	*(.dtb.init.rodata)						\
157 	VMLINUX_SYMBOL(__dtb_end) = .;
158 
159 /* .data section */
160 #define DATA_DATA							\
161 	*(.data)							\
162 	*(.ref.data)							\
163 	*(.data..shared_aligned) /* percpu related */			\
164 	DEV_KEEP(init.data)						\
165 	DEV_KEEP(exit.data)						\
166 	CPU_KEEP(init.data)						\
167 	CPU_KEEP(exit.data)						\
168 	MEM_KEEP(init.data)						\
169 	MEM_KEEP(exit.data)						\
170 	STRUCT_ALIGN();							\
171 	*(__tracepoints)						\
172 	/* implement dynamic printk debug */				\
173 	. = ALIGN(8);							\
174 	VMLINUX_SYMBOL(__start___verbose) = .;                          \
175 	*(__verbose)                                                    \
176 	VMLINUX_SYMBOL(__stop___verbose) = .;				\
177 	LIKELY_PROFILE()		       				\
178 	BRANCH_PROFILE()						\
179 	TRACE_PRINTKS()
180 
181 /*
182  * Data section helpers
183  */
184 #define NOSAVE_DATA							\
185 	. = ALIGN(PAGE_SIZE);						\
186 	VMLINUX_SYMBOL(__nosave_begin) = .;				\
187 	*(.data..nosave)						\
188 	. = ALIGN(PAGE_SIZE);						\
189 	VMLINUX_SYMBOL(__nosave_end) = .;
190 
191 #define PAGE_ALIGNED_DATA(page_align)					\
192 	. = ALIGN(page_align);						\
193 	*(.data..page_aligned)
194 
195 #define READ_MOSTLY_DATA(align)						\
196 	. = ALIGN(align);						\
197 	*(.data..read_mostly)						\
198 	. = ALIGN(align);
199 
200 #define CACHELINE_ALIGNED_DATA(align)					\
201 	. = ALIGN(align);						\
202 	*(.data..cacheline_aligned)
203 
204 #define INIT_TASK_DATA(align)						\
205 	. = ALIGN(align);						\
206 	*(.data..init_task)
207 
208 /*
209  * Read only Data
210  */
211 #define RO_DATA_SECTION(align)						\
212 	. = ALIGN((align));						\
213 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
214 		VMLINUX_SYMBOL(__start_rodata) = .;			\
215 		*(.rodata) *(.rodata.*)					\
216 		*(__vermagic)		/* Kernel version magic */	\
217 		. = ALIGN(8);						\
218 		VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .;		\
219 		*(__tracepoints_ptrs)	/* Tracepoints: pointer array */\
220 		VMLINUX_SYMBOL(__stop___tracepoints_ptrs) = .;		\
221 		*(__markers_strings)	/* Markers: strings */		\
222 		*(__tracepoints_strings)/* Tracepoints: strings */	\
223 	}								\
224 									\
225 	.rodata1          : AT(ADDR(.rodata1) - LOAD_OFFSET) {		\
226 		*(.rodata1)						\
227 	}								\
228 									\
229 	BUG_TABLE							\
230 									\
231 	JUMP_TABLE							\
232 									\
233 	/* PCI quirks */						\
234 	.pci_fixup        : AT(ADDR(.pci_fixup) - LOAD_OFFSET) {	\
235 		VMLINUX_SYMBOL(__start_pci_fixups_early) = .;		\
236 		*(.pci_fixup_early)					\
237 		VMLINUX_SYMBOL(__end_pci_fixups_early) = .;		\
238 		VMLINUX_SYMBOL(__start_pci_fixups_header) = .;		\
239 		*(.pci_fixup_header)					\
240 		VMLINUX_SYMBOL(__end_pci_fixups_header) = .;		\
241 		VMLINUX_SYMBOL(__start_pci_fixups_final) = .;		\
242 		*(.pci_fixup_final)					\
243 		VMLINUX_SYMBOL(__end_pci_fixups_final) = .;		\
244 		VMLINUX_SYMBOL(__start_pci_fixups_enable) = .;		\
245 		*(.pci_fixup_enable)					\
246 		VMLINUX_SYMBOL(__end_pci_fixups_enable) = .;		\
247 		VMLINUX_SYMBOL(__start_pci_fixups_resume) = .;		\
248 		*(.pci_fixup_resume)					\
249 		VMLINUX_SYMBOL(__end_pci_fixups_resume) = .;		\
250 		VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .;	\
251 		*(.pci_fixup_resume_early)				\
252 		VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .;	\
253 		VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .;		\
254 		*(.pci_fixup_suspend)					\
255 		VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .;		\
256 	}								\
257 									\
258 	/* Built-in firmware blobs */					\
259 	.builtin_fw        : AT(ADDR(.builtin_fw) - LOAD_OFFSET) {	\
260 		VMLINUX_SYMBOL(__start_builtin_fw) = .;			\
261 		*(.builtin_fw)						\
262 		VMLINUX_SYMBOL(__end_builtin_fw) = .;			\
263 	}								\
264 									\
265 	/* RapidIO route ops */						\
266 	.rio_ops        : AT(ADDR(.rio_ops) - LOAD_OFFSET) {		\
267 		VMLINUX_SYMBOL(__start_rio_switch_ops) = .;		\
268 		*(.rio_switch_ops)					\
269 		VMLINUX_SYMBOL(__end_rio_switch_ops) = .;		\
270 	}								\
271 									\
272 	TRACEDATA							\
273 									\
274 	/* Kernel symbol table: Normal symbols */			\
275 	__ksymtab         : AT(ADDR(__ksymtab) - LOAD_OFFSET) {		\
276 		VMLINUX_SYMBOL(__start___ksymtab) = .;			\
277 		*(__ksymtab)						\
278 		VMLINUX_SYMBOL(__stop___ksymtab) = .;			\
279 	}								\
280 									\
281 	/* Kernel symbol table: GPL-only symbols */			\
282 	__ksymtab_gpl     : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) {	\
283 		VMLINUX_SYMBOL(__start___ksymtab_gpl) = .;		\
284 		*(__ksymtab_gpl)					\
285 		VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .;		\
286 	}								\
287 									\
288 	/* Kernel symbol table: Normal unused symbols */		\
289 	__ksymtab_unused  : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) {	\
290 		VMLINUX_SYMBOL(__start___ksymtab_unused) = .;		\
291 		*(__ksymtab_unused)					\
292 		VMLINUX_SYMBOL(__stop___ksymtab_unused) = .;		\
293 	}								\
294 									\
295 	/* Kernel symbol table: GPL-only unused symbols */		\
296 	__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
297 		VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .;	\
298 		*(__ksymtab_unused_gpl)					\
299 		VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .;	\
300 	}								\
301 									\
302 	/* Kernel symbol table: GPL-future-only symbols */		\
303 	__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
304 		VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .;	\
305 		*(__ksymtab_gpl_future)					\
306 		VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .;	\
307 	}								\
308 									\
309 	/* Kernel symbol table: Normal symbols */			\
310 	__kcrctab         : AT(ADDR(__kcrctab) - LOAD_OFFSET) {		\
311 		VMLINUX_SYMBOL(__start___kcrctab) = .;			\
312 		*(__kcrctab)						\
313 		VMLINUX_SYMBOL(__stop___kcrctab) = .;			\
314 	}								\
315 									\
316 	/* Kernel symbol table: GPL-only symbols */			\
317 	__kcrctab_gpl     : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) {	\
318 		VMLINUX_SYMBOL(__start___kcrctab_gpl) = .;		\
319 		*(__kcrctab_gpl)					\
320 		VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .;		\
321 	}								\
322 									\
323 	/* Kernel symbol table: Normal unused symbols */		\
324 	__kcrctab_unused  : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) {	\
325 		VMLINUX_SYMBOL(__start___kcrctab_unused) = .;		\
326 		*(__kcrctab_unused)					\
327 		VMLINUX_SYMBOL(__stop___kcrctab_unused) = .;		\
328 	}								\
329 									\
330 	/* Kernel symbol table: GPL-only unused symbols */		\
331 	__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
332 		VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .;	\
333 		*(__kcrctab_unused_gpl)					\
334 		VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .;	\
335 	}								\
336 									\
337 	/* Kernel symbol table: GPL-future-only symbols */		\
338 	__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
339 		VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .;	\
340 		*(__kcrctab_gpl_future)					\
341 		VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .;	\
342 	}								\
343 									\
344 	/* Kernel symbol table: strings */				\
345         __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) {	\
346 		*(__ksymtab_strings)					\
347 	}								\
348 									\
349 	/* __*init sections */						\
350 	__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) {		\
351 		*(.ref.rodata)						\
352 		DEV_KEEP(init.rodata)					\
353 		DEV_KEEP(exit.rodata)					\
354 		CPU_KEEP(init.rodata)					\
355 		CPU_KEEP(exit.rodata)					\
356 		MEM_KEEP(init.rodata)					\
357 		MEM_KEEP(exit.rodata)					\
358 	}								\
359 									\
360 	/* Built-in module parameters. */				\
361 	__param : AT(ADDR(__param) - LOAD_OFFSET) {			\
362 		VMLINUX_SYMBOL(__start___param) = .;			\
363 		*(__param)						\
364 		VMLINUX_SYMBOL(__stop___param) = .;			\
365 	}								\
366 									\
367 	/* Built-in module versions. */					\
368 	__modver : AT(ADDR(__modver) - LOAD_OFFSET) {			\
369 		VMLINUX_SYMBOL(__start___modver) = .;			\
370 		*(__modver)						\
371 		VMLINUX_SYMBOL(__stop___modver) = .;			\
372 		. = ALIGN((align));					\
373 		VMLINUX_SYMBOL(__end_rodata) = .;			\
374 	}								\
375 	. = ALIGN((align));
376 
377 /* RODATA & RO_DATA provided for backward compatibility.
378  * All archs are supposed to use RO_DATA() */
379 #define RODATA          RO_DATA_SECTION(4096)
380 #define RO_DATA(align)  RO_DATA_SECTION(align)
381 
382 #define SECURITY_INIT							\
383 	.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
384 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
385 		*(.security_initcall.init) 				\
386 		VMLINUX_SYMBOL(__security_initcall_end) = .;		\
387 	}
388 
389 /* .text section. Map to function alignment to avoid address changes
390  * during second ld run in second ld pass when generating System.map */
391 #define TEXT_TEXT							\
392 		ALIGN_FUNCTION();					\
393 		*(.text.hot)						\
394 		*(.text)						\
395 		*(.ref.text)						\
396 	DEV_KEEP(init.text)						\
397 	DEV_KEEP(exit.text)						\
398 	CPU_KEEP(init.text)						\
399 	CPU_KEEP(exit.text)						\
400 	MEM_KEEP(init.text)						\
401 	MEM_KEEP(exit.text)						\
402 		*(.text.unlikely)
403 
404 
405 /* sched.text is aling to function alignment to secure we have same
406  * address even at second ld pass when generating System.map */
407 #define SCHED_TEXT							\
408 		ALIGN_FUNCTION();					\
409 		VMLINUX_SYMBOL(__sched_text_start) = .;			\
410 		*(.sched.text)						\
411 		VMLINUX_SYMBOL(__sched_text_end) = .;
412 
413 /* spinlock.text is aling to function alignment to secure we have same
414  * address even at second ld pass when generating System.map */
415 #define LOCK_TEXT							\
416 		ALIGN_FUNCTION();					\
417 		VMLINUX_SYMBOL(__lock_text_start) = .;			\
418 		*(.spinlock.text)					\
419 		VMLINUX_SYMBOL(__lock_text_end) = .;
420 
421 #define KPROBES_TEXT							\
422 		ALIGN_FUNCTION();					\
423 		VMLINUX_SYMBOL(__kprobes_text_start) = .;		\
424 		*(.kprobes.text)					\
425 		VMLINUX_SYMBOL(__kprobes_text_end) = .;
426 
427 #define ENTRY_TEXT							\
428 		ALIGN_FUNCTION();					\
429 		VMLINUX_SYMBOL(__entry_text_start) = .;			\
430 		*(.entry.text)						\
431 		VMLINUX_SYMBOL(__entry_text_end) = .;
432 
433 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
434 #define IRQENTRY_TEXT							\
435 		ALIGN_FUNCTION();					\
436 		VMLINUX_SYMBOL(__irqentry_text_start) = .;		\
437 		*(.irqentry.text)					\
438 		VMLINUX_SYMBOL(__irqentry_text_end) = .;
439 #else
440 #define IRQENTRY_TEXT
441 #endif
442 
443 /* Section used for early init (in .S files) */
444 #define HEAD_TEXT  *(.head.text)
445 
446 #define HEAD_TEXT_SECTION							\
447 	.head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {		\
448 		HEAD_TEXT						\
449 	}
450 
451 /*
452  * Exception table
453  */
454 #define EXCEPTION_TABLE(align)						\
455 	. = ALIGN(align);						\
456 	__ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {		\
457 		VMLINUX_SYMBOL(__start___ex_table) = .;			\
458 		*(__ex_table)						\
459 		VMLINUX_SYMBOL(__stop___ex_table) = .;			\
460 	}
461 
462 /*
463  * Init task
464  */
465 #define INIT_TASK_DATA_SECTION(align)					\
466 	. = ALIGN(align);						\
467 	.data..init_task :  AT(ADDR(.data..init_task) - LOAD_OFFSET) {	\
468 		INIT_TASK_DATA(align)					\
469 	}
470 
471 #ifdef CONFIG_CONSTRUCTORS
472 #define KERNEL_CTORS()	. = ALIGN(8);			   \
473 			VMLINUX_SYMBOL(__ctors_start) = .; \
474 			*(.ctors)			   \
475 			VMLINUX_SYMBOL(__ctors_end) = .;
476 #else
477 #define KERNEL_CTORS()
478 #endif
479 
480 /* init and exit section handling */
481 #define INIT_DATA							\
482 	*(.init.data)							\
483 	DEV_DISCARD(init.data)						\
484 	CPU_DISCARD(init.data)						\
485 	MEM_DISCARD(init.data)						\
486 	KERNEL_CTORS()							\
487 	*(.init.rodata)							\
488 	MCOUNT_REC()							\
489 	FTRACE_EVENTS()							\
490 	TRACE_SYSCALLS()						\
491 	DEV_DISCARD(init.rodata)					\
492 	CPU_DISCARD(init.rodata)					\
493 	MEM_DISCARD(init.rodata)					\
494 	KERNEL_DTB()
495 
496 #define INIT_TEXT							\
497 	*(.init.text)							\
498 	DEV_DISCARD(init.text)						\
499 	CPU_DISCARD(init.text)						\
500 	MEM_DISCARD(init.text)
501 
502 #define EXIT_DATA							\
503 	*(.exit.data)							\
504 	DEV_DISCARD(exit.data)						\
505 	DEV_DISCARD(exit.rodata)					\
506 	CPU_DISCARD(exit.data)						\
507 	CPU_DISCARD(exit.rodata)					\
508 	MEM_DISCARD(exit.data)						\
509 	MEM_DISCARD(exit.rodata)
510 
511 #define EXIT_TEXT							\
512 	*(.exit.text)							\
513 	DEV_DISCARD(exit.text)						\
514 	CPU_DISCARD(exit.text)						\
515 	MEM_DISCARD(exit.text)
516 
517 #define EXIT_CALL							\
518 	*(.exitcall.exit)
519 
520 /*
521  * bss (Block Started by Symbol) - uninitialized data
522  * zeroed during startup
523  */
524 #define SBSS(sbss_align)						\
525 	. = ALIGN(sbss_align);						\
526 	.sbss : AT(ADDR(.sbss) - LOAD_OFFSET) {				\
527 		*(.sbss)						\
528 		*(.scommon)						\
529 	}
530 
531 #define BSS(bss_align)							\
532 	. = ALIGN(bss_align);						\
533 	.bss : AT(ADDR(.bss) - LOAD_OFFSET) {				\
534 		*(.bss..page_aligned)					\
535 		*(.dynbss)						\
536 		*(.bss)							\
537 		*(COMMON)						\
538 	}
539 
540 /*
541  * DWARF debug sections.
542  * Symbols in the DWARF debugging sections are relative to
543  * the beginning of the section so we begin them at 0.
544  */
545 #define DWARF_DEBUG							\
546 		/* DWARF 1 */						\
547 		.debug          0 : { *(.debug) }			\
548 		.line           0 : { *(.line) }			\
549 		/* GNU DWARF 1 extensions */				\
550 		.debug_srcinfo  0 : { *(.debug_srcinfo) }		\
551 		.debug_sfnames  0 : { *(.debug_sfnames) }		\
552 		/* DWARF 1.1 and DWARF 2 */				\
553 		.debug_aranges  0 : { *(.debug_aranges) }		\
554 		.debug_pubnames 0 : { *(.debug_pubnames) }		\
555 		/* DWARF 2 */						\
556 		.debug_info     0 : { *(.debug_info			\
557 				.gnu.linkonce.wi.*) }			\
558 		.debug_abbrev   0 : { *(.debug_abbrev) }		\
559 		.debug_line     0 : { *(.debug_line) }			\
560 		.debug_frame    0 : { *(.debug_frame) }			\
561 		.debug_str      0 : { *(.debug_str) }			\
562 		.debug_loc      0 : { *(.debug_loc) }			\
563 		.debug_macinfo  0 : { *(.debug_macinfo) }		\
564 		/* SGI/MIPS DWARF 2 extensions */			\
565 		.debug_weaknames 0 : { *(.debug_weaknames) }		\
566 		.debug_funcnames 0 : { *(.debug_funcnames) }		\
567 		.debug_typenames 0 : { *(.debug_typenames) }		\
568 		.debug_varnames  0 : { *(.debug_varnames) }		\
569 
570 		/* Stabs debugging sections.  */
571 #define STABS_DEBUG							\
572 		.stab 0 : { *(.stab) }					\
573 		.stabstr 0 : { *(.stabstr) }				\
574 		.stab.excl 0 : { *(.stab.excl) }			\
575 		.stab.exclstr 0 : { *(.stab.exclstr) }			\
576 		.stab.index 0 : { *(.stab.index) }			\
577 		.stab.indexstr 0 : { *(.stab.indexstr) }		\
578 		.comment 0 : { *(.comment) }
579 
580 #ifdef CONFIG_GENERIC_BUG
581 #define BUG_TABLE							\
582 	. = ALIGN(8);							\
583 	__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) {		\
584 		VMLINUX_SYMBOL(__start___bug_table) = .;		\
585 		*(__bug_table)						\
586 		VMLINUX_SYMBOL(__stop___bug_table) = .;			\
587 	}
588 #else
589 #define BUG_TABLE
590 #endif
591 
592 #define JUMP_TABLE							\
593 	. = ALIGN(8);							\
594 	__jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) {		\
595 		VMLINUX_SYMBOL(__start___jump_table) = .;		\
596 		*(__jump_table)						\
597 		VMLINUX_SYMBOL(__stop___jump_table) = .;		\
598 	}
599 
600 #ifdef CONFIG_PM_TRACE
601 #define TRACEDATA							\
602 	. = ALIGN(4);							\
603 	.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) {		\
604 		VMLINUX_SYMBOL(__tracedata_start) = .;			\
605 		*(.tracedata)						\
606 		VMLINUX_SYMBOL(__tracedata_end) = .;			\
607 	}
608 #else
609 #define TRACEDATA
610 #endif
611 
612 #define NOTES								\
613 	.notes : AT(ADDR(.notes) - LOAD_OFFSET) {			\
614 		VMLINUX_SYMBOL(__start_notes) = .;			\
615 		*(.note.*)						\
616 		VMLINUX_SYMBOL(__stop_notes) = .;			\
617 	}
618 
619 #define INIT_SETUP(initsetup_align)					\
620 		. = ALIGN(initsetup_align);				\
621 		VMLINUX_SYMBOL(__setup_start) = .;			\
622 		*(.init.setup)						\
623 		VMLINUX_SYMBOL(__setup_end) = .;
624 
625 #define INITCALLS							\
626 	*(.initcallearly.init)						\
627 	VMLINUX_SYMBOL(__early_initcall_end) = .;			\
628   	*(.initcall0.init)						\
629   	*(.initcall0s.init)						\
630   	*(.initcall1.init)						\
631   	*(.initcall1s.init)						\
632   	*(.initcall2.init)						\
633   	*(.initcall2s.init)						\
634   	*(.initcall3.init)						\
635   	*(.initcall3s.init)						\
636   	*(.initcall4.init)						\
637   	*(.initcall4s.init)						\
638   	*(.initcall5.init)						\
639   	*(.initcall5s.init)						\
640 	*(.initcallrootfs.init)						\
641   	*(.initcall6.init)						\
642   	*(.initcall6s.init)						\
643   	*(.initcall7.init)						\
644   	*(.initcall7s.init)
645 
646 #define INIT_CALLS							\
647 		VMLINUX_SYMBOL(__initcall_start) = .;			\
648 		INITCALLS						\
649 		VMLINUX_SYMBOL(__initcall_end) = .;
650 
651 #define CON_INITCALL							\
652 		VMLINUX_SYMBOL(__con_initcall_start) = .;		\
653 		*(.con_initcall.init)					\
654 		VMLINUX_SYMBOL(__con_initcall_end) = .;
655 
656 #define SECURITY_INITCALL						\
657 		VMLINUX_SYMBOL(__security_initcall_start) = .;		\
658 		*(.security_initcall.init)				\
659 		VMLINUX_SYMBOL(__security_initcall_end) = .;
660 
661 #ifdef CONFIG_BLK_DEV_INITRD
662 #define INIT_RAM_FS							\
663 	. = ALIGN(4);							\
664 	VMLINUX_SYMBOL(__initramfs_start) = .;				\
665 	*(.init.ramfs)							\
666 	. = ALIGN(8);							\
667 	*(.init.ramfs.info)
668 #else
669 #define INIT_RAM_FS
670 #endif
671 
672 /*
673  * Default discarded sections.
674  *
675  * Some archs want to discard exit text/data at runtime rather than
676  * link time due to cross-section references such as alt instructions,
677  * bug table, eh_frame, etc.  DISCARDS must be the last of output
678  * section definitions so that such archs put those in earlier section
679  * definitions.
680  */
681 #define DISCARDS							\
682 	/DISCARD/ : {							\
683 	EXIT_TEXT							\
684 	EXIT_DATA							\
685 	EXIT_CALL							\
686 	*(.discard)							\
687 	*(.discard.*)							\
688 	}
689 
690 /**
691  * PERCPU_VADDR - define output section for percpu area
692  * @cacheline: cacheline size
693  * @vaddr: explicit base address (optional)
694  * @phdr: destination PHDR (optional)
695  *
696  * Macro which expands to output section for percpu area.
697  *
698  * @cacheline is used to align subsections to avoid false cacheline
699  * sharing between subsections for different purposes.
700  *
701  * If @vaddr is not blank, it specifies explicit base address and all
702  * percpu symbols will be offset from the given address.  If blank,
703  * @vaddr always equals @laddr + LOAD_OFFSET.
704  *
705  * @phdr defines the output PHDR to use if not blank.  Be warned that
706  * output PHDR is sticky.  If @phdr is specified, the next output
707  * section in the linker script will go there too.  @phdr should have
708  * a leading colon.
709  *
710  * Note that this macros defines __per_cpu_load as an absolute symbol.
711  * If there is no need to put the percpu section at a predetermined
712  * address, use PERCPU().
713  */
714 #define PERCPU_VADDR(cacheline, vaddr, phdr)				\
715 	VMLINUX_SYMBOL(__per_cpu_load) = .;				\
716 	.data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load)		\
717 				- LOAD_OFFSET) {			\
718 		VMLINUX_SYMBOL(__per_cpu_start) = .;			\
719 		*(.data..percpu..first)					\
720 		. = ALIGN(PAGE_SIZE);					\
721 		*(.data..percpu..page_aligned)				\
722 		. = ALIGN(cacheline);					\
723 		*(.data..percpu..readmostly)				\
724 		. = ALIGN(cacheline);					\
725 		*(.data..percpu)					\
726 		*(.data..percpu..shared_aligned)			\
727 		VMLINUX_SYMBOL(__per_cpu_end) = .;			\
728 	} phdr								\
729 	. = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
730 
731 /**
732  * PERCPU - define output section for percpu area, simple version
733  * @cacheline: cacheline size
734  * @align: required alignment
735  *
736  * Align to @align and outputs output section for percpu area.  This macro
737  * doesn't manipulate @vaddr or @phdr and __per_cpu_load and
738  * __per_cpu_start will be identical.
739  *
740  * This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,)
741  * except that __per_cpu_load is defined as a relative symbol against
742  * .data..percpu which is required for relocatable x86_32 configuration.
743  */
744 #define PERCPU(cacheline, align)					\
745 	. = ALIGN(align);						\
746 	.data..percpu	: AT(ADDR(.data..percpu) - LOAD_OFFSET) {	\
747 		VMLINUX_SYMBOL(__per_cpu_load) = .;			\
748 		VMLINUX_SYMBOL(__per_cpu_start) = .;			\
749 		*(.data..percpu..first)					\
750 		. = ALIGN(PAGE_SIZE);					\
751 		*(.data..percpu..page_aligned)				\
752 		. = ALIGN(cacheline);					\
753 		*(.data..percpu..readmostly)				\
754 		. = ALIGN(cacheline);					\
755 		*(.data..percpu)					\
756 		*(.data..percpu..shared_aligned)			\
757 		VMLINUX_SYMBOL(__per_cpu_end) = .;			\
758 	}
759 
760 
761 /*
762  * Definition of the high level *_SECTION macros
763  * They will fit only a subset of the architectures
764  */
765 
766 
767 /*
768  * Writeable data.
769  * All sections are combined in a single .data section.
770  * The sections following CONSTRUCTORS are arranged so their
771  * typical alignment matches.
772  * A cacheline is typical/always less than a PAGE_SIZE so
773  * the sections that has this restriction (or similar)
774  * is located before the ones requiring PAGE_SIZE alignment.
775  * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
776  * matches the requirement of PAGE_ALIGNED_DATA.
777  *
778  * use 0 as page_align if page_aligned data is not used */
779 #define RW_DATA_SECTION(cacheline, pagealigned, inittask)		\
780 	. = ALIGN(PAGE_SIZE);						\
781 	.data : AT(ADDR(.data) - LOAD_OFFSET) {				\
782 		INIT_TASK_DATA(inittask)				\
783 		NOSAVE_DATA						\
784 		PAGE_ALIGNED_DATA(pagealigned)				\
785 		CACHELINE_ALIGNED_DATA(cacheline)			\
786 		READ_MOSTLY_DATA(cacheline)				\
787 		DATA_DATA						\
788 		CONSTRUCTORS						\
789 	}
790 
791 #define INIT_TEXT_SECTION(inittext_align)				\
792 	. = ALIGN(inittext_align);					\
793 	.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {		\
794 		VMLINUX_SYMBOL(_sinittext) = .;				\
795 		INIT_TEXT						\
796 		VMLINUX_SYMBOL(_einittext) = .;				\
797 	}
798 
799 #define INIT_DATA_SECTION(initsetup_align)				\
800 	.init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {		\
801 		INIT_DATA						\
802 		INIT_SETUP(initsetup_align)				\
803 		INIT_CALLS						\
804 		CON_INITCALL						\
805 		SECURITY_INITCALL					\
806 		INIT_RAM_FS						\
807 	}
808 
809 #define BSS_SECTION(sbss_align, bss_align, stop_align)			\
810 	. = ALIGN(sbss_align);						\
811 	VMLINUX_SYMBOL(__bss_start) = .;				\
812 	SBSS(sbss_align)						\
813 	BSS(bss_align)							\
814 	. = ALIGN(stop_align);						\
815 	VMLINUX_SYMBOL(__bss_stop) = .;
816