2 * Low-level CPU initialisation
3 * Based on arch/arm/kernel/head.S
5 * Copyright (C) 1994-2002 Russell King
6 * Copyright (C) 2003-2012 ARM Ltd.
7 * Authors: Catalin Marinas <catalin.marinas@arm.com>
8 * Will Deacon <will.deacon@arm.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 #include <linux/linkage.h>
24 #include <linux/init.h>
25 #include <linux/irqchip/arm-gic-v3.h>
27 #include <asm/assembler.h>
28 #include <asm/ptrace.h>
29 #include <asm/asm-offsets.h>
30 #include <asm/cache.h>
31 #include <asm/cputype.h>
33 #include <asm/kernel-pgtable.h>
34 #include <asm/memory.h>
35 #include <asm/pgtable-hwdef.h>
36 #include <asm/pgtable.h>
38 #include <asm/sysreg.h>
39 #include <asm/thread_info.h>
42 #define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET)
44 #if (TEXT_OFFSET & 0xfff) != 0
45 #error TEXT_OFFSET must be at least 4KB aligned
46 #elif (PAGE_OFFSET & 0x1fffff) != 0
47 #error PAGE_OFFSET must be at least 2MB aligned
48 #elif TEXT_OFFSET > 0x1fffff
49 #error TEXT_OFFSET must be less than 2MB
52 #define KERNEL_START _text
53 #define KERNEL_END _end
56 * Kernel startup entry point.
57 * ---------------------------
59 * The requirements are:
60 * MMU = off, D-cache = off, I-cache = on or off,
61 * x0 = physical address to the FDT blob.
63 * This code is mostly position independent so you call this at
64 * __pa(PAGE_OFFSET + TEXT_OFFSET).
66 * Note that the callee-saved registers are used for storing variables
67 * that are useful before the MMU is enabled. The allocations are described
68 * in the entry routines.
73 * DO NOT MODIFY. Image header expected by Linux boot-loaders.
77 * This add instruction has no meaningful effect except that
78 * its opcode forms the magic "MZ" signature required by UEFI.
83 b stext // branch to kernel start, magic
86 le64sym _kernel_offset_le // Image load offset from start of RAM, little-endian
87 le64sym _kernel_size_le // Effective size of kernel image, little-endian
88 le64sym _kernel_flags_le // Informative flags, little-endian
92 .byte 0x41 // Magic number, "ARM\x64"
97 .long pe_header - _head // Offset to the PE header.
103 .globl __efistub_stext_offset
104 .set __efistub_stext_offset, stext - _head
110 .short 0xaa64 // AArch64
111 .short 2 // nr_sections
112 .long 0 // TimeDateStamp
113 .long 0 // PointerToSymbolTable
114 .long 1 // NumberOfSymbols
115 .short section_table - optional_header // SizeOfOptionalHeader
116 .short 0x206 // Characteristics.
117 // IMAGE_FILE_DEBUG_STRIPPED |
118 // IMAGE_FILE_EXECUTABLE_IMAGE |
119 // IMAGE_FILE_LINE_NUMS_STRIPPED
121 .short 0x20b // PE32+ format
122 .byte 0x02 // MajorLinkerVersion
123 .byte 0x14 // MinorLinkerVersion
124 .long _end - stext // SizeOfCode
125 .long 0 // SizeOfInitializedData
126 .long 0 // SizeOfUninitializedData
127 .long __efistub_entry - _head // AddressOfEntryPoint
128 .long __efistub_stext_offset // BaseOfCode
132 .long 0x1000 // SectionAlignment
133 .long PECOFF_FILE_ALIGNMENT // FileAlignment
134 .short 0 // MajorOperatingSystemVersion
135 .short 0 // MinorOperatingSystemVersion
136 .short 0 // MajorImageVersion
137 .short 0 // MinorImageVersion
138 .short 0 // MajorSubsystemVersion
139 .short 0 // MinorSubsystemVersion
140 .long 0 // Win32VersionValue
142 .long _end - _head // SizeOfImage
144 // Everything before the kernel image is considered part of the header
145 .long __efistub_stext_offset // SizeOfHeaders
147 .short 0xa // Subsystem (EFI application)
148 .short 0 // DllCharacteristics
149 .quad 0 // SizeOfStackReserve
150 .quad 0 // SizeOfStackCommit
151 .quad 0 // SizeOfHeapReserve
152 .quad 0 // SizeOfHeapCommit
153 .long 0 // LoaderFlags
154 .long 0x6 // NumberOfRvaAndSizes
156 .quad 0 // ExportTable
157 .quad 0 // ImportTable
158 .quad 0 // ResourceTable
159 .quad 0 // ExceptionTable
160 .quad 0 // CertificationTable
161 .quad 0 // BaseRelocationTable
167 * The EFI application loader requires a relocation section
168 * because EFI applications must be relocatable. This is a
169 * dummy section as far as we are concerned.
173 .byte 0 // end of 0 padding of section name
176 .long 0 // SizeOfRawData
177 .long 0 // PointerToRawData
178 .long 0 // PointerToRelocations
179 .long 0 // PointerToLineNumbers
180 .short 0 // NumberOfRelocations
181 .short 0 // NumberOfLineNumbers
182 .long 0x42100040 // Characteristics (section flags)
188 .byte 0 // end of 0 padding of section name
189 .long _end - stext // VirtualSize
190 .long __efistub_stext_offset // VirtualAddress
191 .long _edata - stext // SizeOfRawData
192 .long __efistub_stext_offset // PointerToRawData
194 .long 0 // PointerToRelocations (0 for executables)
195 .long 0 // PointerToLineNumbers (0 for executables)
196 .short 0 // NumberOfRelocations (0 for executables)
197 .short 0 // NumberOfLineNumbers (0 for executables)
198 .long 0xe0500020 // Characteristics (section flags)
201 * EFI will load stext onwards at the 4k section alignment
202 * described in the PE/COFF header. To ensure that instruction
203 * sequences using an adrp and a :lo12: immediate will function
204 * correctly at this alignment, we must ensure that stext is
205 * placed at a 4k boundary in the Image to begin with.
211 bl preserve_boot_args
212 bl el2_setup // Drop to EL1, w20=cpu_boot_mode
213 mov x23, xzr // KASLR offset, defaults to 0
214 adrp x24, __PHYS_OFFSET
215 bl set_cpu_boot_mode_flag
216 bl __create_page_tables // x25=TTBR0, x26=TTBR1
218 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
220 * On return, the CPU will be ready for the MMU to be turned on and
221 * the TCR will have been set.
223 ldr x27, 0f // address to jump to after
224 // MMU has been enabled
225 adr_l lr, __enable_mmu // return (PIC) address
226 b __cpu_setup // initialise processor
229 0: .quad __mmap_switched - (_head - TEXT_OFFSET) + KIMAGE_VADDR
232 * Preserve the arguments passed by the bootloader in x0 .. x3
235 mov x21, x0 // x21=FDT
237 adr_l x0, boot_args // record the contents of
238 stp x21, x1, [x0] // x0 .. x3 at kernel entry
239 stp x2, x3, [x0, #16]
241 dmb sy // needed before dc ivac with
244 add x1, x0, #0x20 // 4 x 8 bytes
245 b __inval_cache_range // tail call
246 ENDPROC(preserve_boot_args)
249 * Macro to create a table entry to the next page.
251 * tbl: page table address
252 * virt: virtual address
253 * shift: #imm page table shift
254 * ptrs: #imm pointers per table page
257 * Corrupts: tmp1, tmp2
258 * Returns: tbl -> next level table page address
260 .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
261 lsr \tmp1, \virt, #\shift
262 and \tmp1, \tmp1, #\ptrs - 1 // table index
263 add \tmp2, \tbl, #PAGE_SIZE
264 orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type
265 str \tmp2, [\tbl, \tmp1, lsl #3]
266 add \tbl, \tbl, #PAGE_SIZE // next level table page
270 * Macro to populate the PGD (and possibily PUD) for the corresponding
271 * block entry in the next level (tbl) for the given virtual address.
273 * Preserves: tbl, next, virt
274 * Corrupts: tmp1, tmp2
276 .macro create_pgd_entry, tbl, virt, tmp1, tmp2
277 create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
278 #if SWAPPER_PGTABLE_LEVELS > 3
279 create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2
281 #if SWAPPER_PGTABLE_LEVELS > 2
282 create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
287 * Macro to populate block entries in the page table for the start..end
288 * virtual range (inclusive).
290 * Preserves: tbl, flags
291 * Corrupts: phys, start, end, pstate
293 .macro create_block_map, tbl, flags, phys, start, end
294 lsr \phys, \phys, #SWAPPER_BLOCK_SHIFT
295 lsr \start, \start, #SWAPPER_BLOCK_SHIFT
296 and \start, \start, #PTRS_PER_PTE - 1 // table index
297 orr \phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT // table entry
298 lsr \end, \end, #SWAPPER_BLOCK_SHIFT
299 and \end, \end, #PTRS_PER_PTE - 1 // table end index
300 9999: str \phys, [\tbl, \start, lsl #3] // store the entry
301 add \start, \start, #1 // next entry
302 add \phys, \phys, #SWAPPER_BLOCK_SIZE // next block
308 * Setup the initial page tables. We only setup the barest amount which is
309 * required to get the kernel running. The following sections are required:
310 * - identity mapping to enable the MMU (low address, TTBR0)
311 * - first few MB of the kernel linear mapping to jump to once the MMU has
314 __create_page_tables:
315 adrp x25, idmap_pg_dir
316 adrp x26, swapper_pg_dir
320 * Invalidate the idmap and swapper page tables to avoid potential
321 * dirty cache lines being evicted.
324 add x1, x26, #SWAPPER_DIR_SIZE
325 bl __inval_cache_range
328 * Clear the idmap and swapper page tables.
331 add x6, x26, #SWAPPER_DIR_SIZE
332 1: stp xzr, xzr, [x0], #16
333 stp xzr, xzr, [x0], #16
334 stp xzr, xzr, [x0], #16
335 stp xzr, xzr, [x0], #16
339 ldr x7, =SWAPPER_MM_MMUFLAGS
342 * Create the identity mapping.
344 mov x0, x25 // idmap_pg_dir
345 adrp x3, __idmap_text_start // __pa(__idmap_text_start)
347 #ifndef CONFIG_ARM64_VA_BITS_48
348 #define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
349 #define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT))
352 * If VA_BITS < 48, it may be too small to allow for an ID mapping to be
353 * created that covers system RAM if that is located sufficiently high
354 * in the physical address space. So for the ID map, use an extended
355 * virtual range in that case, by configuring an additional translation
357 * First, we have to verify our assumption that the current value of
358 * VA_BITS was chosen such that all translation levels are fully
359 * utilised, and that lowering T0SZ will always result in an additional
360 * translation level to be configured.
362 #if VA_BITS != EXTRA_SHIFT
363 #error "Mismatch between VA_BITS and page size/number of translation levels"
367 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
368 * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
369 * this number conveniently equals the number of leading zeroes in
370 * the physical address of __idmap_text_end.
372 adrp x5, __idmap_text_end
374 cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough?
375 b.ge 1f // .. then skip additional level
380 dc ivac, x6 // Invalidate potentially stale cache line
382 create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6
386 create_pgd_entry x0, x3, x5, x6
387 mov x5, x3 // __pa(__idmap_text_start)
388 adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
389 create_block_map x0, x7, x3, x5, x6
392 * Map the kernel image (starting with PHYS_OFFSET).
394 mov x0, x26 // swapper_pg_dir
395 ldr x5, =KIMAGE_VADDR
396 add x5, x5, x23 // add KASLR displacement
397 create_pgd_entry x0, x5, x3, x6
398 ldr w6, kernel_img_size
400 mov x3, x24 // phys offset
401 create_block_map x0, x7, x3, x5, x6
404 * Since the page tables have been populated with non-cacheable
405 * accesses (MMU disabled), invalidate the idmap and swapper page
406 * tables again to remove any speculatively loaded cache lines.
409 add x1, x26, #SWAPPER_DIR_SIZE
411 bl __inval_cache_range
414 ENDPROC(__create_page_tables)
417 .long _end - (_head - TEXT_OFFSET)
421 * The following fragment of code is executed with the MMU enabled.
423 .set initial_sp, init_thread_union + THREAD_START_SP
425 mov x28, lr // preserve LR
426 adr_l x8, vectors // load VBAR_EL1 with virtual
427 msr vbar_el1, x8 // vector table address
431 adr_l x0, __bss_start
436 dsb ishst // Make zero page visible to PTW
438 #ifdef CONFIG_RELOCATABLE
441 * Iterate over each entry in the relocation table, and apply the
442 * relocations in place.
444 adr_l x8, __dynsym_start // start of symbol table
445 adr_l x9, __reloc_start // start of reloc table
446 adr_l x10, __reloc_end // end of reloc table
450 ldp x11, x12, [x9], #24
452 cmp w12, #R_AARCH64_RELATIVE
454 add x13, x13, x23 // relocate
458 1: cmp w12, #R_AARCH64_ABS64
460 add x12, x12, x12, lsl #1 // symtab offset: 24x top word
461 add x12, x8, x12, lsr #(32 - 3) // ... shifted into bottom word
462 ldrsh w14, [x12, #6] // Elf64_Sym::st_shndx
463 ldr x15, [x12, #8] // Elf64_Sym::st_value
464 cmp w14, #-0xf // SHN_ABS (0xfff1) ?
465 add x14, x15, x23 // relocate
466 csel x15, x14, x15, ne
471 2: adr_l x8, kimage_vaddr // make relocated kimage_vaddr
472 dc cvac, x8 // value visible to secondaries
473 dsb sy // with MMU off
476 adr_l sp, initial_sp, x4
478 and x4, x4, #~(THREAD_SIZE - 1)
479 msr sp_el0, x4 // Save thread_info
480 str_l x21, __fdt_pointer, x5 // Save FDT pointer
482 ldr_l x4, kimage_vaddr // Save the offset between
483 sub x4, x4, x24 // the kernel virtual and
484 str_l x4, kimage_voffset, x5 // physical mappings
490 #ifdef CONFIG_RANDOMIZE_BASE
491 cbnz x23, 0f // already running randomized?
492 mov x0, x21 // pass FDT address in x0
493 bl kaslr_early_init // parse FDT for KASLR options
494 cbz x0, 0f // KASLR disabled? just proceed
495 mov x23, x0 // record KASLR offset
496 ret x28 // we must enable KASLR, return
501 ENDPROC(__mmap_switched)
504 * end early head section, begin head code that is also used for
505 * hotplug and needs to have the same protections as the text region
507 .section ".text","ax"
510 .quad _text - TEXT_OFFSET
513 * If we're fortunate enough to boot at EL2, ensure that the world is
514 * sane before dropping to EL1.
516 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
517 * booted in EL1 or EL2 respectively.
521 cmp x0, #CurrentEL_EL2
524 CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
525 CPU_LE( bic x0, x0, #(1 << 25) ) // Clear the EE bit for EL2
529 CPU_BE( orr x0, x0, #(3 << 24) ) // Set the EE and E0E bits for EL1
530 CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1
532 mov w20, #BOOT_CPU_MODE_EL1 // This cpu booted in EL1
536 /* Hyp configuration. */
537 2: mov x0, #(1 << 31) // 64-bit EL1
540 /* Generic timers. */
542 orr x0, x0, #3 // Enable EL1 physical timers
544 msr cntvoff_el2, xzr // Clear virtual offset
546 #ifdef CONFIG_ARM_GIC_V3
547 /* GICv3 system register access */
548 mrs x0, id_aa64pfr0_el1
553 mrs_s x0, ICC_SRE_EL2
554 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
555 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
556 msr_s ICC_SRE_EL2, x0
557 isb // Make sure SRE is now set
558 mrs_s x0, ICC_SRE_EL2 // Read SRE back,
559 tbz x0, #0, 3f // and check that it sticks
560 msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults
565 /* Populate ID registers. */
572 mov x0, #0x0800 // Set/clear RES{1,0} bits
573 CPU_BE( movk x0, #0x33d0, lsl #16 ) // Set EE and E0E on BE systems
574 CPU_LE( movk x0, #0x30d0, lsl #16 ) // Clear EE and E0E on LE systems
577 /* Coprocessor traps. */
579 msr cptr_el2, x0 // Disable copro. traps to EL2
582 msr hstr_el2, xzr // Disable CP15 traps to EL2
586 mrs x0, id_aa64dfr0_el1 // Check ID_AA64DFR0_EL1 PMUVer
589 b.lt 4f // Skip if no PMU present
590 mrs x0, pmcr_el0 // Disable debug access traps
591 ubfx x0, x0, #11, #5 // to EL2 and allow access to
592 msr mdcr_el2, x0 // all PMU counters from EL1
595 /* Stage-2 translation */
598 /* Hypervisor stub */
599 adrp x0, __hyp_stub_vectors
600 add x0, x0, #:lo12:__hyp_stub_vectors
604 mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
608 mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
613 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
614 * in x20. See arch/arm64/include/asm/virt.h for more info.
616 ENTRY(set_cpu_boot_mode_flag)
617 adr_l x1, __boot_cpu_mode
618 cmp w20, #BOOT_CPU_MODE_EL2
621 1: str w20, [x1] // This CPU has booted in EL1
623 dc ivac, x1 // Invalidate potentially stale cache line
625 ENDPROC(set_cpu_boot_mode_flag)
628 * We need to find out the CPU boot mode long after boot, so we need to
629 * store it in a writable variable.
631 * This is not in .bss, because we set it sufficiently early that the boot-time
632 * zeroing of .bss would clobber it.
634 .pushsection .data..cacheline_aligned
635 .align L1_CACHE_SHIFT
636 ENTRY(__boot_cpu_mode)
637 .long BOOT_CPU_MODE_EL2
638 .long BOOT_CPU_MODE_EL1
642 * This provides a "holding pen" for platforms to hold all secondary
643 * cores are held until we're ready for them to initialise.
645 ENTRY(secondary_holding_pen)
646 bl el2_setup // Drop to EL1, w20=cpu_boot_mode
647 bl set_cpu_boot_mode_flag
649 ldr x1, =MPIDR_HWID_BITMASK
651 adr_l x3, secondary_holding_pen_release
654 b.eq secondary_startup
657 ENDPROC(secondary_holding_pen)
660 * Secondary entry point that jumps straight into the kernel. Only to
661 * be used where CPUs are brought online dynamically by the kernel.
663 ENTRY(secondary_entry)
664 bl el2_setup // Drop to EL1
665 bl set_cpu_boot_mode_flag
667 ENDPROC(secondary_entry)
669 ENTRY(secondary_startup)
671 * Common entry point for secondary CPUs.
673 adrp x25, idmap_pg_dir
674 adrp x26, swapper_pg_dir
675 bl __cpu_setup // initialise processor
679 sub x27, x8, w9, sxtw // address to jump to after enabling the MMU
681 ENDPROC(secondary_startup)
682 0: .long (_text - TEXT_OFFSET) - __secondary_switched
684 ENTRY(__secondary_switched)
689 ldr_l x0, secondary_data // get secondary_data.stack
691 and x0, x0, #~(THREAD_SIZE - 1)
692 msr sp_el0, x0 // save thread_info
694 b secondary_start_kernel
695 ENDPROC(__secondary_switched)
700 * x0 = SCTLR_EL1 value for turning on the MMU.
701 * x27 = *virtual* address to jump to upon completion
703 * Other registers depend on the function called upon completion.
705 * Checks if the selected granule size is supported by the CPU.
706 * If it isn't, park the CPU
708 .section ".idmap.text", "ax"
710 mrs x18, sctlr_el1 // preserve old SCTLR_EL1 value
711 mrs x1, ID_AA64MMFR0_EL1
712 ubfx x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
713 cmp x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
714 b.ne __no_granule_support
715 msr ttbr0_el1, x25 // load TTBR0
716 msr ttbr1_el1, x26 // load TTBR1
721 * Invalidate the local I-cache so that any instructions fetched
722 * speculatively from the PoC are discarded, since they may have
723 * been dynamically patched at the PoU.
728 #ifdef CONFIG_RANDOMIZE_BASE
729 mov x19, x0 // preserve new SCTLR_EL1 value
733 * If we return here, we have a KASLR displacement in x23 which we need
734 * to take into account by discarding the current kernel mapping and
735 * creating a new one.
737 msr sctlr_el1, x18 // disable the MMU
739 bl __create_page_tables // recreate kernel mapping
741 msr sctlr_el1, x19 // re-enable the MMU
743 ic iallu // flush instructions fetched
744 dsb nsh // via old mapping
746 add x27, x27, x23 // relocated __mmap_switched
749 ENDPROC(__enable_mmu)
751 __no_granule_support:
753 b __no_granule_support
754 ENDPROC(__no_granule_support)