ARM: 7864/1: Handle 64-bit memory in case of 32-bit phys_addr_t
[firefly-linux-kernel-4.4.55.git] / arch / arm / kernel / setup.c
1 /*
2  *  linux/arch/arm/kernel/setup.c
3  *
4  *  Copyright (C) 1995-2001 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/utsname.h>
16 #include <linux/initrd.h>
17 #include <linux/console.h>
18 #include <linux/bootmem.h>
19 #include <linux/seq_file.h>
20 #include <linux/screen_info.h>
21 #include <linux/of_platform.h>
22 #include <linux/init.h>
23 #include <linux/kexec.h>
24 #include <linux/of_fdt.h>
25 #include <linux/cpu.h>
26 #include <linux/interrupt.h>
27 #include <linux/smp.h>
28 #include <linux/proc_fs.h>
29 #include <linux/memblock.h>
30 #include <linux/bug.h>
31 #include <linux/compiler.h>
32 #include <linux/sort.h>
33
34 #include <asm/unified.h>
35 #include <asm/cp15.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/elf.h>
39 #include <asm/procinfo.h>
40 #include <asm/sections.h>
41 #include <asm/setup.h>
42 #include <asm/smp_plat.h>
43 #include <asm/mach-types.h>
44 #include <asm/cacheflush.h>
45 #include <asm/cachetype.h>
46 #include <asm/tlbflush.h>
47
48 #include <asm/prom.h>
49 #include <asm/mach/arch.h>
50 #include <asm/mach/irq.h>
51 #include <asm/mach/time.h>
52 #include <asm/system_info.h>
53 #include <asm/system_misc.h>
54 #include <asm/traps.h>
55 #include <asm/unwind.h>
56 #include <asm/memblock.h>
57 #include <asm/virt.h>
58
59 #include "atags.h"
60
61
62 #if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
63 char fpe_type[8];
64
65 static int __init fpe_setup(char *line)
66 {
67         memcpy(fpe_type, line, 8);
68         return 1;
69 }
70
71 __setup("fpe=", fpe_setup);
72 #endif
73
74 extern void paging_init(struct machine_desc *desc);
75 extern void sanity_check_meminfo(void);
76 extern void reboot_setup(char *str);
77 extern void setup_dma_zone(struct machine_desc *desc);
78
79 unsigned int processor_id;
80 EXPORT_SYMBOL(processor_id);
81 unsigned int __machine_arch_type __read_mostly;
82 EXPORT_SYMBOL(__machine_arch_type);
83 unsigned int cacheid __read_mostly;
84 EXPORT_SYMBOL(cacheid);
85
86 unsigned int __atags_pointer __initdata;
87
88 unsigned int system_rev;
89 EXPORT_SYMBOL(system_rev);
90
91 unsigned int system_serial_low;
92 EXPORT_SYMBOL(system_serial_low);
93
94 unsigned int system_serial_high;
95 EXPORT_SYMBOL(system_serial_high);
96
97 unsigned int elf_hwcap __read_mostly;
98 EXPORT_SYMBOL(elf_hwcap);
99
100
101 #ifdef MULTI_CPU
102 struct processor processor __read_mostly;
103 #endif
104 #ifdef MULTI_TLB
105 struct cpu_tlb_fns cpu_tlb __read_mostly;
106 #endif
107 #ifdef MULTI_USER
108 struct cpu_user_fns cpu_user __read_mostly;
109 #endif
110 #ifdef MULTI_CACHE
111 struct cpu_cache_fns cpu_cache __read_mostly;
112 #endif
113 #ifdef CONFIG_OUTER_CACHE
114 struct outer_cache_fns outer_cache __read_mostly;
115 EXPORT_SYMBOL(outer_cache);
116 #endif
117
118 /*
119  * Cached cpu_architecture() result for use by assembler code.
120  * C code should use the cpu_architecture() function instead of accessing this
121  * variable directly.
122  */
123 int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
124
125 struct stack {
126         u32 irq[3];
127         u32 abt[3];
128         u32 und[3];
129 } ____cacheline_aligned;
130
131 static struct stack stacks[NR_CPUS];
132
133 char elf_platform[ELF_PLATFORM_SIZE];
134 EXPORT_SYMBOL(elf_platform);
135
136 static const char *cpu_name;
137 static const char *machine_name;
138 static char __initdata cmd_line[COMMAND_LINE_SIZE];
139 struct machine_desc *machine_desc __initdata;
140
141 static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
142 #define ENDIANNESS ((char)endian_test.l)
143
144 DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
145
146 /*
147  * Standard memory resources
148  */
149 static struct resource mem_res[] = {
150         {
151                 .name = "Video RAM",
152                 .start = 0,
153                 .end = 0,
154                 .flags = IORESOURCE_MEM
155         },
156         {
157                 .name = "Kernel code",
158                 .start = 0,
159                 .end = 0,
160                 .flags = IORESOURCE_MEM
161         },
162         {
163                 .name = "Kernel data",
164                 .start = 0,
165                 .end = 0,
166                 .flags = IORESOURCE_MEM
167         }
168 };
169
170 #define video_ram   mem_res[0]
171 #define kernel_code mem_res[1]
172 #define kernel_data mem_res[2]
173
174 static struct resource io_res[] = {
175         {
176                 .name = "reserved",
177                 .start = 0x3bc,
178                 .end = 0x3be,
179                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
180         },
181         {
182                 .name = "reserved",
183                 .start = 0x378,
184                 .end = 0x37f,
185                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
186         },
187         {
188                 .name = "reserved",
189                 .start = 0x278,
190                 .end = 0x27f,
191                 .flags = IORESOURCE_IO | IORESOURCE_BUSY
192         }
193 };
194
195 #define lp0 io_res[0]
196 #define lp1 io_res[1]
197 #define lp2 io_res[2]
198
199 static const char *proc_arch[] = {
200         "undefined/unknown",
201         "3",
202         "4",
203         "4T",
204         "5",
205         "5T",
206         "5TE",
207         "5TEJ",
208         "6TEJ",
209         "7",
210         "?(11)",
211         "?(12)",
212         "?(13)",
213         "?(14)",
214         "?(15)",
215         "?(16)",
216         "?(17)",
217 };
218
219 static int __get_cpu_architecture(void)
220 {
221         int cpu_arch;
222
223         if ((read_cpuid_id() & 0x0008f000) == 0) {
224                 cpu_arch = CPU_ARCH_UNKNOWN;
225         } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
226                 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
227         } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
228                 cpu_arch = (read_cpuid_id() >> 16) & 7;
229                 if (cpu_arch)
230                         cpu_arch += CPU_ARCH_ARMv3;
231         } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
232                 unsigned int mmfr0;
233
234                 /* Revised CPUID format. Read the Memory Model Feature
235                  * Register 0 and check for VMSAv7 or PMSAv7 */
236                 asm("mrc        p15, 0, %0, c0, c1, 4"
237                     : "=r" (mmfr0));
238                 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
239                     (mmfr0 & 0x000000f0) >= 0x00000030)
240                         cpu_arch = CPU_ARCH_ARMv7;
241                 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
242                          (mmfr0 & 0x000000f0) == 0x00000020)
243                         cpu_arch = CPU_ARCH_ARMv6;
244                 else
245                         cpu_arch = CPU_ARCH_UNKNOWN;
246         } else
247                 cpu_arch = CPU_ARCH_UNKNOWN;
248
249         return cpu_arch;
250 }
251
252 int __pure cpu_architecture(void)
253 {
254         BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
255
256         return __cpu_architecture;
257 }
258
259 static int cpu_has_aliasing_icache(unsigned int arch)
260 {
261         int aliasing_icache;
262         unsigned int id_reg, num_sets, line_size;
263
264         /* PIPT caches never alias. */
265         if (icache_is_pipt())
266                 return 0;
267
268         /* arch specifies the register format */
269         switch (arch) {
270         case CPU_ARCH_ARMv7:
271                 asm("mcr        p15, 2, %0, c0, c0, 0 @ set CSSELR"
272                     : /* No output operands */
273                     : "r" (1));
274                 isb();
275                 asm("mrc        p15, 1, %0, c0, c0, 0 @ read CCSIDR"
276                     : "=r" (id_reg));
277                 line_size = 4 << ((id_reg & 0x7) + 2);
278                 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
279                 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
280                 break;
281         case CPU_ARCH_ARMv6:
282                 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
283                 break;
284         default:
285                 /* I-cache aliases will be handled by D-cache aliasing code */
286                 aliasing_icache = 0;
287         }
288
289         return aliasing_icache;
290 }
291
292 static void __init cacheid_init(void)
293 {
294         unsigned int arch = cpu_architecture();
295
296         if (arch >= CPU_ARCH_ARMv6) {
297                 unsigned int cachetype = read_cpuid_cachetype();
298                 if ((cachetype & (7 << 29)) == 4 << 29) {
299                         /* ARMv7 register format */
300                         arch = CPU_ARCH_ARMv7;
301                         cacheid = CACHEID_VIPT_NONALIASING;
302                         switch (cachetype & (3 << 14)) {
303                         case (1 << 14):
304                                 cacheid |= CACHEID_ASID_TAGGED;
305                                 break;
306                         case (3 << 14):
307                                 cacheid |= CACHEID_PIPT;
308                                 break;
309                         }
310                 } else {
311                         arch = CPU_ARCH_ARMv6;
312                         if (cachetype & (1 << 23))
313                                 cacheid = CACHEID_VIPT_ALIASING;
314                         else
315                                 cacheid = CACHEID_VIPT_NONALIASING;
316                 }
317                 if (cpu_has_aliasing_icache(arch))
318                         cacheid |= CACHEID_VIPT_I_ALIASING;
319         } else {
320                 cacheid = CACHEID_VIVT;
321         }
322
323         printk("CPU: %s data cache, %s instruction cache\n",
324                 cache_is_vivt() ? "VIVT" :
325                 cache_is_vipt_aliasing() ? "VIPT aliasing" :
326                 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
327                 cache_is_vivt() ? "VIVT" :
328                 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
329                 icache_is_vipt_aliasing() ? "VIPT aliasing" :
330                 icache_is_pipt() ? "PIPT" :
331                 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
332 }
333
334 /*
335  * These functions re-use the assembly code in head.S, which
336  * already provide the required functionality.
337  */
338 extern struct proc_info_list *lookup_processor_type(unsigned int);
339
340 void __init early_print(const char *str, ...)
341 {
342         extern void printascii(const char *);
343         char buf[256];
344         va_list ap;
345
346         va_start(ap, str);
347         vsnprintf(buf, sizeof(buf), str, ap);
348         va_end(ap);
349
350 #ifdef CONFIG_DEBUG_LL
351         printascii(buf);
352 #endif
353         printk("%s", buf);
354 }
355
356 static void __init cpuid_init_hwcaps(void)
357 {
358         unsigned int divide_instrs;
359
360         if (cpu_architecture() < CPU_ARCH_ARMv7)
361                 return;
362
363         divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
364
365         switch (divide_instrs) {
366         case 2:
367                 elf_hwcap |= HWCAP_IDIVA;
368         case 1:
369                 elf_hwcap |= HWCAP_IDIVT;
370         }
371 }
372
373 static void __init feat_v6_fixup(void)
374 {
375         int id = read_cpuid_id();
376
377         if ((id & 0xff0f0000) != 0x41070000)
378                 return;
379
380         /*
381          * HWCAP_TLS is available only on 1136 r1p0 and later,
382          * see also kuser_get_tls_init.
383          */
384         if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
385                 elf_hwcap &= ~HWCAP_TLS;
386 }
387
388 /*
389  * cpu_init - initialise one CPU.
390  *
391  * cpu_init sets up the per-CPU stacks.
392  */
393 void notrace cpu_init(void)
394 {
395         unsigned int cpu = smp_processor_id();
396         struct stack *stk = &stacks[cpu];
397
398         if (cpu >= NR_CPUS) {
399                 printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
400                 BUG();
401         }
402
403         /*
404          * This only works on resume and secondary cores. For booting on the
405          * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
406          */
407         set_my_cpu_offset(per_cpu_offset(cpu));
408
409         cpu_proc_init();
410
411         /*
412          * Define the placement constraint for the inline asm directive below.
413          * In Thumb-2, msr with an immediate value is not allowed.
414          */
415 #ifdef CONFIG_THUMB2_KERNEL
416 #define PLC     "r"
417 #else
418 #define PLC     "I"
419 #endif
420
421         /*
422          * setup stacks for re-entrant exception handlers
423          */
424         __asm__ (
425         "msr    cpsr_c, %1\n\t"
426         "add    r14, %0, %2\n\t"
427         "mov    sp, r14\n\t"
428         "msr    cpsr_c, %3\n\t"
429         "add    r14, %0, %4\n\t"
430         "mov    sp, r14\n\t"
431         "msr    cpsr_c, %5\n\t"
432         "add    r14, %0, %6\n\t"
433         "mov    sp, r14\n\t"
434         "msr    cpsr_c, %7"
435             :
436             : "r" (stk),
437               PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
438               "I" (offsetof(struct stack, irq[0])),
439               PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
440               "I" (offsetof(struct stack, abt[0])),
441               PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
442               "I" (offsetof(struct stack, und[0])),
443               PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
444             : "r14");
445 }
446
447 u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
448
449 void __init smp_setup_processor_id(void)
450 {
451         int i;
452         u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
453         u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
454
455         cpu_logical_map(0) = cpu;
456         for (i = 1; i < nr_cpu_ids; ++i)
457                 cpu_logical_map(i) = i == cpu ? 0 : i;
458
459         printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
460 }
461
462 static void __init setup_processor(void)
463 {
464         struct proc_info_list *list;
465
466         /*
467          * locate processor in the list of supported processor
468          * types.  The linker builds this table for us from the
469          * entries in arch/arm/mm/proc-*.S
470          */
471         list = lookup_processor_type(read_cpuid_id());
472         if (!list) {
473                 printk("CPU configuration botched (ID %08x), unable "
474                        "to continue.\n", read_cpuid_id());
475                 while (1);
476         }
477
478         cpu_name = list->cpu_name;
479         __cpu_architecture = __get_cpu_architecture();
480
481 #ifdef MULTI_CPU
482         processor = *list->proc;
483 #endif
484 #ifdef MULTI_TLB
485         cpu_tlb = *list->tlb;
486 #endif
487 #ifdef MULTI_USER
488         cpu_user = *list->user;
489 #endif
490 #ifdef MULTI_CACHE
491         cpu_cache = *list->cache;
492 #endif
493
494         printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
495                cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
496                proc_arch[cpu_architecture()], cr_alignment);
497
498         snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
499                  list->arch_name, ENDIANNESS);
500         snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
501                  list->elf_name, ENDIANNESS);
502         elf_hwcap = list->elf_hwcap;
503
504         cpuid_init_hwcaps();
505
506 #ifndef CONFIG_ARM_THUMB
507         elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
508 #endif
509
510         feat_v6_fixup();
511
512         cacheid_init();
513         cpu_init();
514 }
515
516 void __init dump_machine_table(void)
517 {
518         struct machine_desc *p;
519
520         early_print("Available machine support:\n\nID (hex)\tNAME\n");
521         for_each_machine_desc(p)
522                 early_print("%08x\t%s\n", p->nr, p->name);
523
524         early_print("\nPlease check your kernel config and/or bootloader.\n");
525
526         while (true)
527                 /* can't use cpu_relax() here as it may require MMU setup */;
528 }
529
530 int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
531 {
532         struct membank *bank = &meminfo.bank[meminfo.nr_banks];
533         u64 aligned_start;
534
535         if (meminfo.nr_banks >= NR_BANKS) {
536                 printk(KERN_CRIT "NR_BANKS too low, "
537                         "ignoring memory at 0x%08llx\n", (long long)start);
538                 return -EINVAL;
539         }
540
541         /*
542          * Ensure that start/size are aligned to a page boundary.
543          * Size is appropriately rounded down, start is rounded up.
544          */
545         size -= start & ~PAGE_MASK;
546         aligned_start = PAGE_ALIGN(start);
547
548 #ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
549         if (aligned_start > ULONG_MAX) {
550                 printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
551                        "32-bit physical address space\n", (long long)start);
552                 return -EINVAL;
553         }
554
555         if (aligned_start + size > ULONG_MAX) {
556                 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
557                         "32-bit physical address space\n", (long long)start);
558                 /*
559                  * To ensure bank->start + bank->size is representable in
560                  * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
561                  * This means we lose a page after masking.
562                  */
563                 size = ULONG_MAX - aligned_start;
564         }
565 #endif
566
567         bank->start = aligned_start;
568         bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
569
570         /*
571          * Check whether this memory region has non-zero size or
572          * invalid node number.
573          */
574         if (bank->size == 0)
575                 return -EINVAL;
576
577         meminfo.nr_banks++;
578         return 0;
579 }
580
581 /*
582  * Pick out the memory size.  We look for mem=size@start,
583  * where start and size are "size[KkMm]"
584  */
585 static int __init early_mem(char *p)
586 {
587         static int usermem __initdata = 0;
588         phys_addr_t size;
589         phys_addr_t start;
590         char *endp;
591
592         /*
593          * If the user specifies memory size, we
594          * blow away any automatically generated
595          * size.
596          */
597         if (usermem == 0) {
598                 usermem = 1;
599                 meminfo.nr_banks = 0;
600         }
601
602         start = PHYS_OFFSET;
603         size  = memparse(p, &endp);
604         if (*endp == '@')
605                 start = memparse(endp + 1, NULL);
606
607         arm_add_memory(start, size);
608
609         return 0;
610 }
611 early_param("mem", early_mem);
612
613 static void __init request_standard_resources(struct machine_desc *mdesc)
614 {
615         struct memblock_region *region;
616         struct resource *res;
617
618         kernel_code.start   = virt_to_phys(_text);
619         kernel_code.end     = virt_to_phys(_etext - 1);
620         kernel_data.start   = virt_to_phys(_sdata);
621         kernel_data.end     = virt_to_phys(_end - 1);
622
623         for_each_memblock(memory, region) {
624                 res = alloc_bootmem_low(sizeof(*res));
625                 res->name  = "System RAM";
626                 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
627                 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
628                 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
629
630                 request_resource(&iomem_resource, res);
631
632                 if (kernel_code.start >= res->start &&
633                     kernel_code.end <= res->end)
634                         request_resource(res, &kernel_code);
635                 if (kernel_data.start >= res->start &&
636                     kernel_data.end <= res->end)
637                         request_resource(res, &kernel_data);
638         }
639
640         if (mdesc->video_start) {
641                 video_ram.start = mdesc->video_start;
642                 video_ram.end   = mdesc->video_end;
643                 request_resource(&iomem_resource, &video_ram);
644         }
645
646         /*
647          * Some machines don't have the possibility of ever
648          * possessing lp0, lp1 or lp2
649          */
650         if (mdesc->reserve_lp0)
651                 request_resource(&ioport_resource, &lp0);
652         if (mdesc->reserve_lp1)
653                 request_resource(&ioport_resource, &lp1);
654         if (mdesc->reserve_lp2)
655                 request_resource(&ioport_resource, &lp2);
656 }
657
658 #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
659 struct screen_info screen_info = {
660  .orig_video_lines      = 30,
661  .orig_video_cols       = 80,
662  .orig_video_mode       = 0,
663  .orig_video_ega_bx     = 0,
664  .orig_video_isVGA      = 1,
665  .orig_video_points     = 8
666 };
667 #endif
668
669 static int __init customize_machine(void)
670 {
671         /*
672          * customizes platform devices, or adds new ones
673          * On DT based machines, we fall back to populating the
674          * machine from the device tree, if no callback is provided,
675          * otherwise we would always need an init_machine callback.
676          */
677         if (machine_desc->init_machine)
678                 machine_desc->init_machine();
679 #ifdef CONFIG_OF
680         else
681                 of_platform_populate(NULL, of_default_bus_match_table,
682                                         NULL, NULL);
683 #endif
684         return 0;
685 }
686 arch_initcall(customize_machine);
687
688 static int __init init_machine_late(void)
689 {
690         if (machine_desc->init_late)
691                 machine_desc->init_late();
692         return 0;
693 }
694 late_initcall(init_machine_late);
695
696 #ifdef CONFIG_KEXEC
697 static inline unsigned long long get_total_mem(void)
698 {
699         unsigned long total;
700
701         total = max_low_pfn - min_low_pfn;
702         return total << PAGE_SHIFT;
703 }
704
705 /**
706  * reserve_crashkernel() - reserves memory are for crash kernel
707  *
708  * This function reserves memory area given in "crashkernel=" kernel command
709  * line parameter. The memory reserved is used by a dump capture kernel when
710  * primary kernel is crashing.
711  */
712 static void __init reserve_crashkernel(void)
713 {
714         unsigned long long crash_size, crash_base;
715         unsigned long long total_mem;
716         int ret;
717
718         total_mem = get_total_mem();
719         ret = parse_crashkernel(boot_command_line, total_mem,
720                                 &crash_size, &crash_base);
721         if (ret)
722                 return;
723
724         ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
725         if (ret < 0) {
726                 printk(KERN_WARNING "crashkernel reservation failed - "
727                        "memory is in use (0x%lx)\n", (unsigned long)crash_base);
728                 return;
729         }
730
731         printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
732                "for crashkernel (System RAM: %ldMB)\n",
733                (unsigned long)(crash_size >> 20),
734                (unsigned long)(crash_base >> 20),
735                (unsigned long)(total_mem >> 20));
736
737         crashk_res.start = crash_base;
738         crashk_res.end = crash_base + crash_size - 1;
739         insert_resource(&iomem_resource, &crashk_res);
740 }
741 #else
742 static inline void reserve_crashkernel(void) {}
743 #endif /* CONFIG_KEXEC */
744
745 static int __init meminfo_cmp(const void *_a, const void *_b)
746 {
747         const struct membank *a = _a, *b = _b;
748         long cmp = bank_pfn_start(a) - bank_pfn_start(b);
749         return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
750 }
751
752 void __init hyp_mode_check(void)
753 {
754 #ifdef CONFIG_ARM_VIRT_EXT
755         if (is_hyp_mode_available()) {
756                 pr_info("CPU: All CPU(s) started in HYP mode.\n");
757                 pr_info("CPU: Virtualization extensions available.\n");
758         } else if (is_hyp_mode_mismatched()) {
759                 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
760                         __boot_cpu_mode & MODE_MASK);
761                 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
762         } else
763                 pr_info("CPU: All CPU(s) started in SVC mode.\n");
764 #endif
765 }
766
767 void __init setup_arch(char **cmdline_p)
768 {
769         struct machine_desc *mdesc;
770
771         setup_processor();
772         mdesc = setup_machine_fdt(__atags_pointer);
773         if (!mdesc)
774                 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
775         machine_desc = mdesc;
776         machine_name = mdesc->name;
777
778         setup_dma_zone(mdesc);
779
780         if (mdesc->restart_mode)
781                 reboot_setup(&mdesc->restart_mode);
782
783         init_mm.start_code = (unsigned long) _text;
784         init_mm.end_code   = (unsigned long) _etext;
785         init_mm.end_data   = (unsigned long) _edata;
786         init_mm.brk        = (unsigned long) _end;
787
788         /* populate cmd_line too for later use, preserving boot_command_line */
789         strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
790         *cmdline_p = cmd_line;
791
792         parse_early_param();
793
794         sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
795         sanity_check_meminfo();
796         arm_memblock_init(&meminfo, mdesc);
797
798         paging_init(mdesc);
799         request_standard_resources(mdesc);
800
801         if (mdesc->restart)
802                 arm_pm_restart = mdesc->restart;
803
804         unflatten_device_tree();
805
806         arm_dt_init_cpu_maps();
807 #ifdef CONFIG_SMP
808         if (is_smp()) {
809                 smp_set_ops(mdesc->smp);
810                 smp_init_cpus();
811         }
812 #endif
813
814         if (!is_smp())
815                 hyp_mode_check();
816
817         reserve_crashkernel();
818
819 #ifdef CONFIG_MULTI_IRQ_HANDLER
820         handle_arch_irq = mdesc->handle_irq;
821 #endif
822
823 #ifdef CONFIG_VT
824 #if defined(CONFIG_VGA_CONSOLE)
825         conswitchp = &vga_con;
826 #elif defined(CONFIG_DUMMY_CONSOLE)
827         conswitchp = &dummy_con;
828 #endif
829 #endif
830
831         if (mdesc->init_early)
832                 mdesc->init_early();
833 }
834
835
836 static int __init topology_init(void)
837 {
838         int cpu;
839
840         for_each_possible_cpu(cpu) {
841                 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
842                 cpuinfo->cpu.hotpluggable = 1;
843                 register_cpu(&cpuinfo->cpu, cpu);
844         }
845
846         return 0;
847 }
848 subsys_initcall(topology_init);
849
850 #ifdef CONFIG_HAVE_PROC_CPU
851 static int __init proc_cpu_init(void)
852 {
853         struct proc_dir_entry *res;
854
855         res = proc_mkdir("cpu", NULL);
856         if (!res)
857                 return -ENOMEM;
858         return 0;
859 }
860 fs_initcall(proc_cpu_init);
861 #endif
862
863 static const char *hwcap_str[] = {
864         "swp",
865         "half",
866         "thumb",
867         "26bit",
868         "fastmult",
869         "fpa",
870         "vfp",
871         "edsp",
872         "java",
873         "iwmmxt",
874         "crunch",
875         "thumbee",
876         "neon",
877         "vfpv3",
878         "vfpv3d16",
879         "tls",
880         "vfpv4",
881         "idiva",
882         "idivt",
883         NULL
884 };
885
886 static int c_show(struct seq_file *m, void *v)
887 {
888         int i, j;
889         u32 cpuid;
890
891         for_each_online_cpu(i) {
892                 /*
893                  * glibc reads /proc/cpuinfo to determine the number of
894                  * online processors, looking for lines beginning with
895                  * "processor".  Give glibc what it expects.
896                  */
897                 seq_printf(m, "processor\t: %d\n", i);
898                 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
899                 seq_printf(m, "model name\t: %s rev %d (%s)\n",
900                            cpu_name, cpuid & 15, elf_platform);
901
902 #if defined(CONFIG_SMP)
903                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
904                            per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
905                            (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
906 #else
907                 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
908                            loops_per_jiffy / (500000/HZ),
909                            (loops_per_jiffy / (5000/HZ)) % 100);
910 #endif
911                 /* dump out the processor features */
912                 seq_puts(m, "Features\t: ");
913
914                 for (j = 0; hwcap_str[j]; j++)
915                         if (elf_hwcap & (1 << j))
916                                 seq_printf(m, "%s ", hwcap_str[j]);
917
918                 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
919                 seq_printf(m, "CPU architecture: %s\n",
920                            proc_arch[cpu_architecture()]);
921
922                 if ((cpuid & 0x0008f000) == 0x00000000) {
923                         /* pre-ARM7 */
924                         seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
925                 } else {
926                         if ((cpuid & 0x0008f000) == 0x00007000) {
927                                 /* ARM7 */
928                                 seq_printf(m, "CPU variant\t: 0x%02x\n",
929                                            (cpuid >> 16) & 127);
930                         } else {
931                                 /* post-ARM7 */
932                                 seq_printf(m, "CPU variant\t: 0x%x\n",
933                                            (cpuid >> 20) & 15);
934                         }
935                         seq_printf(m, "CPU part\t: 0x%03x\n",
936                                    (cpuid >> 4) & 0xfff);
937                 }
938                 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
939         }
940
941         seq_printf(m, "Hardware\t: %s\n", machine_name);
942         seq_printf(m, "Revision\t: %04x\n", system_rev);
943         seq_printf(m, "Serial\t\t: %08x%08x\n",
944                    system_serial_high, system_serial_low);
945
946         return 0;
947 }
948
949 static void *c_start(struct seq_file *m, loff_t *pos)
950 {
951         return *pos < 1 ? (void *)1 : NULL;
952 }
953
954 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
955 {
956         ++*pos;
957         return NULL;
958 }
959
960 static void c_stop(struct seq_file *m, void *v)
961 {
962 }
963
964 const struct seq_operations cpuinfo_op = {
965         .start  = c_start,
966         .next   = c_next,
967         .stop   = c_stop,
968         .show   = c_show
969 };