Merge tag 'for-linus-docs-2012-05-02' of git://git.kernel.org/pub/scm/linux/kernel...
[firefly-linux-kernel-4.4.55.git] / arch / s390 / kernel / setup.c
1 /*
2  *  S390 version
3  *    Copyright IBM Corp. 1999, 2012
4  *    Author(s): Hartmut Penner (hp@de.ibm.com),
5  *               Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *
7  *  Derived from "arch/i386/kernel/setup.c"
8  *    Copyright (C) 1995, Linus Torvalds
9  */
10
11 /*
12  * This file handles the architecture-dependent parts of initialization
13  */
14
15 #define KMSG_COMPONENT "setup"
16 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
18 #include <linux/errno.h>
19 #include <linux/export.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/memblock.h>
23 #include <linux/mm.h>
24 #include <linux/stddef.h>
25 #include <linux/unistd.h>
26 #include <linux/ptrace.h>
27 #include <linux/user.h>
28 #include <linux/tty.h>
29 #include <linux/ioport.h>
30 #include <linux/delay.h>
31 #include <linux/init.h>
32 #include <linux/initrd.h>
33 #include <linux/bootmem.h>
34 #include <linux/root_dev.h>
35 #include <linux/console.h>
36 #include <linux/kernel_stat.h>
37 #include <linux/device.h>
38 #include <linux/notifier.h>
39 #include <linux/pfn.h>
40 #include <linux/ctype.h>
41 #include <linux/reboot.h>
42 #include <linux/topology.h>
43 #include <linux/ftrace.h>
44 #include <linux/kexec.h>
45 #include <linux/crash_dump.h>
46 #include <linux/memory.h>
47 #include <linux/compat.h>
48
49 #include <asm/ipl.h>
50 #include <asm/uaccess.h>
51 #include <asm/facility.h>
52 #include <asm/smp.h>
53 #include <asm/mmu_context.h>
54 #include <asm/cpcmd.h>
55 #include <asm/lowcore.h>
56 #include <asm/irq.h>
57 #include <asm/page.h>
58 #include <asm/ptrace.h>
59 #include <asm/sections.h>
60 #include <asm/ebcdic.h>
61 #include <asm/kvm_virtio.h>
62 #include <asm/diag.h>
63 #include <asm/os_info.h>
64 #include <asm/sclp.h>
65 #include "entry.h"
66
67 long psw_kernel_bits    = PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_ASC_PRIMARY |
68                           PSW_MASK_EA | PSW_MASK_BA;
69 long psw_user_bits      = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT |
70                           PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_MCHECK |
71                           PSW_MASK_PSTATE | PSW_ASC_HOME;
72
73 /*
74  * User copy operations.
75  */
76 struct uaccess_ops uaccess;
77 EXPORT_SYMBOL(uaccess);
78
79 /*
80  * Machine setup..
81  */
82 unsigned int console_mode = 0;
83 EXPORT_SYMBOL(console_mode);
84
85 unsigned int console_devno = -1;
86 EXPORT_SYMBOL(console_devno);
87
88 unsigned int console_irq = -1;
89 EXPORT_SYMBOL(console_irq);
90
91 unsigned long elf_hwcap = 0;
92 char elf_platform[ELF_PLATFORM_SIZE];
93
94 struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
95
96 int __initdata memory_end_set;
97 unsigned long __initdata memory_end;
98
99 unsigned long VMALLOC_START;
100 EXPORT_SYMBOL(VMALLOC_START);
101
102 unsigned long VMALLOC_END;
103 EXPORT_SYMBOL(VMALLOC_END);
104
105 struct page *vmemmap;
106 EXPORT_SYMBOL(vmemmap);
107
108 #ifdef CONFIG_64BIT
109 unsigned long MODULES_VADDR;
110 unsigned long MODULES_END;
111 #endif
112
113 /* An array with a pointer to the lowcore of every CPU. */
114 struct _lowcore *lowcore_ptr[NR_CPUS];
115 EXPORT_SYMBOL(lowcore_ptr);
116
117 /*
118  * This is set up by the setup-routine at boot-time
119  * for S390 need to find out, what we have to setup
120  * using address 0x10400 ...
121  */
122
123 #include <asm/setup.h>
124
125 /*
126  * condev= and conmode= setup parameter.
127  */
128
129 static int __init condev_setup(char *str)
130 {
131         int vdev;
132
133         vdev = simple_strtoul(str, &str, 0);
134         if (vdev >= 0 && vdev < 65536) {
135                 console_devno = vdev;
136                 console_irq = -1;
137         }
138         return 1;
139 }
140
141 __setup("condev=", condev_setup);
142
143 static void __init set_preferred_console(void)
144 {
145         if (MACHINE_IS_KVM) {
146                 if (sclp_has_vt220())
147                         add_preferred_console("ttyS", 1, NULL);
148                 else if (sclp_has_linemode())
149                         add_preferred_console("ttyS", 0, NULL);
150                 else
151                         add_preferred_console("hvc", 0, NULL);
152         } else if (CONSOLE_IS_3215 || CONSOLE_IS_SCLP)
153                 add_preferred_console("ttyS", 0, NULL);
154         else if (CONSOLE_IS_3270)
155                 add_preferred_console("tty3270", 0, NULL);
156 }
157
158 static int __init conmode_setup(char *str)
159 {
160 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
161         if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
162                 SET_CONSOLE_SCLP;
163 #endif
164 #if defined(CONFIG_TN3215_CONSOLE)
165         if (strncmp(str, "3215", 5) == 0)
166                 SET_CONSOLE_3215;
167 #endif
168 #if defined(CONFIG_TN3270_CONSOLE)
169         if (strncmp(str, "3270", 5) == 0)
170                 SET_CONSOLE_3270;
171 #endif
172         set_preferred_console();
173         return 1;
174 }
175
176 __setup("conmode=", conmode_setup);
177
178 static void __init conmode_default(void)
179 {
180         char query_buffer[1024];
181         char *ptr;
182
183         if (MACHINE_IS_VM) {
184                 cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
185                 console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
186                 ptr = strstr(query_buffer, "SUBCHANNEL =");
187                 console_irq = simple_strtoul(ptr + 13, NULL, 16);
188                 cpcmd("QUERY TERM", query_buffer, 1024, NULL);
189                 ptr = strstr(query_buffer, "CONMODE");
190                 /*
191                  * Set the conmode to 3215 so that the device recognition 
192                  * will set the cu_type of the console to 3215. If the
193                  * conmode is 3270 and we don't set it back then both
194                  * 3215 and the 3270 driver will try to access the console
195                  * device (3215 as console and 3270 as normal tty).
196                  */
197                 cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
198                 if (ptr == NULL) {
199 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
200                         SET_CONSOLE_SCLP;
201 #endif
202                         return;
203                 }
204                 if (strncmp(ptr + 8, "3270", 4) == 0) {
205 #if defined(CONFIG_TN3270_CONSOLE)
206                         SET_CONSOLE_3270;
207 #elif defined(CONFIG_TN3215_CONSOLE)
208                         SET_CONSOLE_3215;
209 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
210                         SET_CONSOLE_SCLP;
211 #endif
212                 } else if (strncmp(ptr + 8, "3215", 4) == 0) {
213 #if defined(CONFIG_TN3215_CONSOLE)
214                         SET_CONSOLE_3215;
215 #elif defined(CONFIG_TN3270_CONSOLE)
216                         SET_CONSOLE_3270;
217 #elif defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
218                         SET_CONSOLE_SCLP;
219 #endif
220                 }
221         } else {
222 #if defined(CONFIG_SCLP_CONSOLE) || defined(CONFIG_SCLP_VT220_CONSOLE)
223                 SET_CONSOLE_SCLP;
224 #endif
225         }
226 }
227
228 #ifdef CONFIG_ZFCPDUMP
229 static void __init setup_zfcpdump(unsigned int console_devno)
230 {
231         static char str[41];
232
233         if (ipl_info.type != IPL_TYPE_FCP_DUMP)
234                 return;
235         if (OLDMEM_BASE)
236                 return;
237         if (console_devno != -1)
238                 sprintf(str, " cio_ignore=all,!0.0.%04x,!0.0.%04x",
239                         ipl_info.data.fcp.dev_id.devno, console_devno);
240         else
241                 sprintf(str, " cio_ignore=all,!0.0.%04x",
242                         ipl_info.data.fcp.dev_id.devno);
243         strcat(boot_command_line, str);
244         console_loglevel = 2;
245 }
246 #else
247 static inline void setup_zfcpdump(unsigned int console_devno) {}
248 #endif /* CONFIG_ZFCPDUMP */
249
250  /*
251  * Reboot, halt and power_off stubs. They just call _machine_restart,
252  * _machine_halt or _machine_power_off. 
253  */
254
255 void machine_restart(char *command)
256 {
257         if ((!in_interrupt() && !in_atomic()) || oops_in_progress)
258                 /*
259                  * Only unblank the console if we are called in enabled
260                  * context or a bust_spinlocks cleared the way for us.
261                  */
262                 console_unblank();
263         _machine_restart(command);
264 }
265
266 void machine_halt(void)
267 {
268         if (!in_interrupt() || oops_in_progress)
269                 /*
270                  * Only unblank the console if we are called in enabled
271                  * context or a bust_spinlocks cleared the way for us.
272                  */
273                 console_unblank();
274         _machine_halt();
275 }
276
277 void machine_power_off(void)
278 {
279         if (!in_interrupt() || oops_in_progress)
280                 /*
281                  * Only unblank the console if we are called in enabled
282                  * context or a bust_spinlocks cleared the way for us.
283                  */
284                 console_unblank();
285         _machine_power_off();
286 }
287
288 /*
289  * Dummy power off function.
290  */
291 void (*pm_power_off)(void) = machine_power_off;
292 EXPORT_SYMBOL_GPL(pm_power_off);
293
294 static int __init early_parse_mem(char *p)
295 {
296         memory_end = memparse(p, &p);
297         memory_end_set = 1;
298         return 0;
299 }
300 early_param("mem", early_parse_mem);
301
302 static int __init parse_vmalloc(char *arg)
303 {
304         if (!arg)
305                 return -EINVAL;
306         VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK;
307         return 0;
308 }
309 early_param("vmalloc", parse_vmalloc);
310
311 unsigned int s390_user_mode = PRIMARY_SPACE_MODE;
312 EXPORT_SYMBOL_GPL(s390_user_mode);
313
314 static void __init set_user_mode_primary(void)
315 {
316         psw_kernel_bits = (psw_kernel_bits & ~PSW_MASK_ASC) | PSW_ASC_HOME;
317         psw_user_bits = (psw_user_bits & ~PSW_MASK_ASC) | PSW_ASC_PRIMARY;
318 #ifdef CONFIG_COMPAT
319         psw32_user_bits =
320                 (psw32_user_bits & ~PSW32_MASK_ASC) | PSW32_ASC_PRIMARY;
321 #endif
322         uaccess = MACHINE_HAS_MVCOS ? uaccess_mvcos_switch : uaccess_pt;
323 }
324
325 static int __init early_parse_user_mode(char *p)
326 {
327         if (p && strcmp(p, "primary") == 0)
328                 s390_user_mode = PRIMARY_SPACE_MODE;
329         else if (!p || strcmp(p, "home") == 0)
330                 s390_user_mode = HOME_SPACE_MODE;
331         else
332                 return 1;
333         return 0;
334 }
335 early_param("user_mode", early_parse_user_mode);
336
337 static void __init setup_addressing_mode(void)
338 {
339         if (s390_user_mode != PRIMARY_SPACE_MODE)
340                 return;
341         set_user_mode_primary();
342         if (MACHINE_HAS_MVCOS)
343                 pr_info("Address spaces switched, mvcos available\n");
344         else
345                 pr_info("Address spaces switched, mvcos not available\n");
346 }
347
348 void *restart_stack __attribute__((__section__(".data")));
349
350 static void __init setup_lowcore(void)
351 {
352         struct _lowcore *lc;
353
354         /*
355          * Setup lowcore for boot cpu
356          */
357         BUILD_BUG_ON(sizeof(struct _lowcore) != LC_PAGES * 4096);
358         lc = __alloc_bootmem_low(LC_PAGES * PAGE_SIZE, LC_PAGES * PAGE_SIZE, 0);
359         lc->restart_psw.mask = psw_kernel_bits;
360         lc->restart_psw.addr =
361                 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
362         lc->external_new_psw.mask = psw_kernel_bits |
363                 PSW_MASK_DAT | PSW_MASK_MCHECK;
364         lc->external_new_psw.addr =
365                 PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
366         lc->svc_new_psw.mask = psw_kernel_bits |
367                 PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
368         lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
369         lc->program_new_psw.mask = psw_kernel_bits |
370                 PSW_MASK_DAT | PSW_MASK_MCHECK;
371         lc->program_new_psw.addr =
372                 PSW_ADDR_AMODE | (unsigned long) pgm_check_handler;
373         lc->mcck_new_psw.mask = psw_kernel_bits;
374         lc->mcck_new_psw.addr =
375                 PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
376         lc->io_new_psw.mask = psw_kernel_bits |
377                 PSW_MASK_DAT | PSW_MASK_MCHECK;
378         lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
379         lc->clock_comparator = -1ULL;
380         lc->kernel_stack = ((unsigned long) &init_thread_union)
381                 + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
382         lc->async_stack = (unsigned long)
383                 __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0)
384                 + ASYNC_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
385         lc->panic_stack = (unsigned long)
386                 __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0)
387                 + PAGE_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
388         lc->current_task = (unsigned long) init_thread_union.thread_info.task;
389         lc->thread_info = (unsigned long) &init_thread_union;
390         lc->machine_flags = S390_lowcore.machine_flags;
391         lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
392         memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
393                MAX_FACILITY_BIT/8);
394 #ifndef CONFIG_64BIT
395         if (MACHINE_HAS_IEEE) {
396                 lc->extended_save_area_addr = (__u32)
397                         __alloc_bootmem_low(PAGE_SIZE, PAGE_SIZE, 0);
398                 /* enable extended save area */
399                 __ctl_set_bit(14, 29);
400         }
401 #else
402         lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0];
403 #endif
404         lc->sync_enter_timer = S390_lowcore.sync_enter_timer;
405         lc->async_enter_timer = S390_lowcore.async_enter_timer;
406         lc->exit_timer = S390_lowcore.exit_timer;
407         lc->user_timer = S390_lowcore.user_timer;
408         lc->system_timer = S390_lowcore.system_timer;
409         lc->steal_timer = S390_lowcore.steal_timer;
410         lc->last_update_timer = S390_lowcore.last_update_timer;
411         lc->last_update_clock = S390_lowcore.last_update_clock;
412         lc->ftrace_func = S390_lowcore.ftrace_func;
413
414         restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
415         restart_stack += ASYNC_SIZE;
416
417         /*
418          * Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
419          * restart data to the absolute zero lowcore. This is necesary if
420          * PSW restart is done on an offline CPU that has lowcore zero.
421          */
422         lc->restart_stack = (unsigned long) restart_stack;
423         lc->restart_fn = (unsigned long) do_restart;
424         lc->restart_data = 0;
425         lc->restart_source = -1UL;
426
427         /* Setup absolute zero lowcore */
428         mem_assign_absolute(S390_lowcore.restart_stack, lc->restart_stack);
429         mem_assign_absolute(S390_lowcore.restart_fn, lc->restart_fn);
430         mem_assign_absolute(S390_lowcore.restart_data, lc->restart_data);
431         mem_assign_absolute(S390_lowcore.restart_source, lc->restart_source);
432         mem_assign_absolute(S390_lowcore.restart_psw, lc->restart_psw);
433
434         set_prefix((u32)(unsigned long) lc);
435         lowcore_ptr[0] = lc;
436 }
437
438 static struct resource code_resource = {
439         .name  = "Kernel code",
440         .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
441 };
442
443 static struct resource data_resource = {
444         .name = "Kernel data",
445         .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
446 };
447
448 static struct resource bss_resource = {
449         .name = "Kernel bss",
450         .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
451 };
452
453 static struct resource __initdata *standard_resources[] = {
454         &code_resource,
455         &data_resource,
456         &bss_resource,
457 };
458
459 static void __init setup_resources(void)
460 {
461         struct resource *res, *std_res, *sub_res;
462         int i, j;
463
464         code_resource.start = (unsigned long) &_text;
465         code_resource.end = (unsigned long) &_etext - 1;
466         data_resource.start = (unsigned long) &_etext;
467         data_resource.end = (unsigned long) &_edata - 1;
468         bss_resource.start = (unsigned long) &__bss_start;
469         bss_resource.end = (unsigned long) &__bss_stop - 1;
470
471         for (i = 0; i < MEMORY_CHUNKS; i++) {
472                 if (!memory_chunk[i].size)
473                         continue;
474                 if (memory_chunk[i].type == CHUNK_OLDMEM ||
475                     memory_chunk[i].type == CHUNK_CRASHK)
476                         continue;
477                 res = alloc_bootmem_low(sizeof(*res));
478                 res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
479                 switch (memory_chunk[i].type) {
480                 case CHUNK_READ_WRITE:
481                 case CHUNK_CRASHK:
482                         res->name = "System RAM";
483                         break;
484                 case CHUNK_READ_ONLY:
485                         res->name = "System ROM";
486                         res->flags |= IORESOURCE_READONLY;
487                         break;
488                 default:
489                         res->name = "reserved";
490                 }
491                 res->start = memory_chunk[i].addr;
492                 res->end = res->start + memory_chunk[i].size - 1;
493                 request_resource(&iomem_resource, res);
494
495                 for (j = 0; j < ARRAY_SIZE(standard_resources); j++) {
496                         std_res = standard_resources[j];
497                         if (std_res->start < res->start ||
498                             std_res->start > res->end)
499                                 continue;
500                         if (std_res->end > res->end) {
501                                 sub_res = alloc_bootmem_low(sizeof(*sub_res));
502                                 *sub_res = *std_res;
503                                 sub_res->end = res->end;
504                                 std_res->start = res->end + 1;
505                                 request_resource(res, sub_res);
506                         } else {
507                                 request_resource(res, std_res);
508                         }
509                 }
510         }
511 }
512
513 unsigned long real_memory_size;
514 EXPORT_SYMBOL_GPL(real_memory_size);
515
516 static void __init setup_memory_end(void)
517 {
518         unsigned long vmax, vmalloc_size, tmp;
519         int i;
520
521
522 #ifdef CONFIG_ZFCPDUMP
523         if (ipl_info.type == IPL_TYPE_FCP_DUMP && !OLDMEM_BASE) {
524                 memory_end = ZFCPDUMP_HSA_SIZE;
525                 memory_end_set = 1;
526         }
527 #endif
528         real_memory_size = 0;
529         memory_end &= PAGE_MASK;
530
531         /*
532          * Make sure all chunks are MAX_ORDER aligned so we don't need the
533          * extra checks that HOLES_IN_ZONE would require.
534          */
535         for (i = 0; i < MEMORY_CHUNKS; i++) {
536                 unsigned long start, end;
537                 struct mem_chunk *chunk;
538                 unsigned long align;
539
540                 chunk = &memory_chunk[i];
541                 align = 1UL << (MAX_ORDER + PAGE_SHIFT - 1);
542                 start = (chunk->addr + align - 1) & ~(align - 1);
543                 end = (chunk->addr + chunk->size) & ~(align - 1);
544                 if (start >= end)
545                         memset(chunk, 0, sizeof(*chunk));
546                 else {
547                         chunk->addr = start;
548                         chunk->size = end - start;
549                 }
550                 real_memory_size = max(real_memory_size,
551                                        chunk->addr + chunk->size);
552         }
553
554         /* Choose kernel address space layout: 2, 3, or 4 levels. */
555 #ifdef CONFIG_64BIT
556         vmalloc_size = VMALLOC_END ?: (128UL << 30) - MODULES_LEN;
557         tmp = (memory_end ?: real_memory_size) / PAGE_SIZE;
558         tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size;
559         if (tmp <= (1UL << 42))
560                 vmax = 1UL << 42;       /* 3-level kernel page table */
561         else
562                 vmax = 1UL << 53;       /* 4-level kernel page table */
563         /* module area is at the end of the kernel address space. */
564         MODULES_END = vmax;
565         MODULES_VADDR = MODULES_END - MODULES_LEN;
566         VMALLOC_END = MODULES_VADDR;
567 #else
568         vmalloc_size = VMALLOC_END ?: 96UL << 20;
569         vmax = 1UL << 31;               /* 2-level kernel page table */
570         /* vmalloc area is at the end of the kernel address space. */
571         VMALLOC_END = vmax;
572 #endif
573         VMALLOC_START = vmax - vmalloc_size;
574
575         /* Split remaining virtual space between 1:1 mapping & vmemmap array */
576         tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
577         /* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
578         tmp = SECTION_ALIGN_UP(tmp);
579         tmp = VMALLOC_START - tmp * sizeof(struct page);
580         tmp &= ~((vmax >> 11) - 1);     /* align to page table level */
581         tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS);
582         vmemmap = (struct page *) tmp;
583
584         /* Take care that memory_end is set and <= vmemmap */
585         memory_end = min(memory_end ?: real_memory_size, tmp);
586
587         /* Fixup memory chunk array to fit into 0..memory_end */
588         for (i = 0; i < MEMORY_CHUNKS; i++) {
589                 struct mem_chunk *chunk = &memory_chunk[i];
590
591                 if (chunk->addr >= memory_end) {
592                         memset(chunk, 0, sizeof(*chunk));
593                         continue;
594                 }
595                 if (chunk->addr + chunk->size > memory_end)
596                         chunk->size = memory_end - chunk->addr;
597         }
598 }
599
600 static void __init setup_vmcoreinfo(void)
601 {
602         mem_assign_absolute(S390_lowcore.vmcore_info, paddr_vmcoreinfo_note());
603 }
604
605 #ifdef CONFIG_CRASH_DUMP
606
607 /*
608  * Find suitable location for crashkernel memory
609  */
610 static unsigned long __init find_crash_base(unsigned long crash_size,
611                                             char **msg)
612 {
613         unsigned long crash_base;
614         struct mem_chunk *chunk;
615         int i;
616
617         if (memory_chunk[0].size < crash_size) {
618                 *msg = "first memory chunk must be at least crashkernel size";
619                 return 0;
620         }
621         if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
622                 return OLDMEM_BASE;
623
624         for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
625                 chunk = &memory_chunk[i];
626                 if (chunk->size == 0)
627                         continue;
628                 if (chunk->type != CHUNK_READ_WRITE)
629                         continue;
630                 if (chunk->size < crash_size)
631                         continue;
632                 crash_base = (chunk->addr + chunk->size) - crash_size;
633                 if (crash_base < crash_size)
634                         continue;
635                 if (crash_base < ZFCPDUMP_HSA_SIZE_MAX)
636                         continue;
637                 if (crash_base < (unsigned long) INITRD_START + INITRD_SIZE)
638                         continue;
639                 return crash_base;
640         }
641         *msg = "no suitable area found";
642         return 0;
643 }
644
645 /*
646  * Check if crash_base and crash_size is valid
647  */
648 static int __init verify_crash_base(unsigned long crash_base,
649                                     unsigned long crash_size,
650                                     char **msg)
651 {
652         struct mem_chunk *chunk;
653         int i;
654
655         /*
656          * Because we do the swap to zero, we must have at least 'crash_size'
657          * bytes free space before crash_base
658          */
659         if (crash_size > crash_base) {
660                 *msg = "crashkernel offset must be greater than size";
661                 return -EINVAL;
662         }
663
664         /* First memory chunk must be at least crash_size */
665         if (memory_chunk[0].size < crash_size) {
666                 *msg = "first memory chunk must be at least crashkernel size";
667                 return -EINVAL;
668         }
669         /* Check if we fit into the respective memory chunk */
670         for (i = 0; i < MEMORY_CHUNKS; i++) {
671                 chunk = &memory_chunk[i];
672                 if (chunk->size == 0)
673                         continue;
674                 if (crash_base < chunk->addr)
675                         continue;
676                 if (crash_base >= chunk->addr + chunk->size)
677                         continue;
678                 /* we have found the memory chunk */
679                 if (crash_base + crash_size > chunk->addr + chunk->size) {
680                         *msg = "selected memory chunk is too small for "
681                                 "crashkernel memory";
682                         return -EINVAL;
683                 }
684                 return 0;
685         }
686         *msg = "invalid memory range specified";
687         return -EINVAL;
688 }
689
690 /*
691  * Reserve kdump memory by creating a memory hole in the mem_chunk array
692  */
693 static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size,
694                                          int type)
695 {
696         create_mem_hole(memory_chunk, addr, size, type);
697 }
698
699 /*
700  * When kdump is enabled, we have to ensure that no memory from
701  * the area [0 - crashkernel memory size] and
702  * [crashk_res.start - crashk_res.end] is set offline.
703  */
704 static int kdump_mem_notifier(struct notifier_block *nb,
705                               unsigned long action, void *data)
706 {
707         struct memory_notify *arg = data;
708
709         if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
710                 return NOTIFY_BAD;
711         if (arg->start_pfn > PFN_DOWN(crashk_res.end))
712                 return NOTIFY_OK;
713         if (arg->start_pfn + arg->nr_pages - 1 < PFN_DOWN(crashk_res.start))
714                 return NOTIFY_OK;
715         return NOTIFY_BAD;
716 }
717
718 static struct notifier_block kdump_mem_nb = {
719         .notifier_call = kdump_mem_notifier,
720 };
721
722 #endif
723
724 /*
725  * Make sure that oldmem, where the dump is stored, is protected
726  */
727 static void reserve_oldmem(void)
728 {
729 #ifdef CONFIG_CRASH_DUMP
730         if (!OLDMEM_BASE)
731                 return;
732
733         reserve_kdump_bootmem(OLDMEM_BASE, OLDMEM_SIZE, CHUNK_OLDMEM);
734         reserve_kdump_bootmem(OLDMEM_SIZE, memory_end - OLDMEM_SIZE,
735                               CHUNK_OLDMEM);
736         if (OLDMEM_BASE + OLDMEM_SIZE == real_memory_size)
737                 saved_max_pfn = PFN_DOWN(OLDMEM_BASE) - 1;
738         else
739                 saved_max_pfn = PFN_DOWN(real_memory_size) - 1;
740 #endif
741 }
742
743 /*
744  * Reserve memory for kdump kernel to be loaded with kexec
745  */
746 static void __init reserve_crashkernel(void)
747 {
748 #ifdef CONFIG_CRASH_DUMP
749         unsigned long long crash_base, crash_size;
750         char *msg = NULL;
751         int rc;
752
753         rc = parse_crashkernel(boot_command_line, memory_end, &crash_size,
754                                &crash_base);
755         if (rc || crash_size == 0)
756                 return;
757         crash_base = ALIGN(crash_base, KEXEC_CRASH_MEM_ALIGN);
758         crash_size = ALIGN(crash_size, KEXEC_CRASH_MEM_ALIGN);
759         if (register_memory_notifier(&kdump_mem_nb))
760                 return;
761         if (!crash_base)
762                 crash_base = find_crash_base(crash_size, &msg);
763         if (!crash_base) {
764                 pr_info("crashkernel reservation failed: %s\n", msg);
765                 unregister_memory_notifier(&kdump_mem_nb);
766                 return;
767         }
768         if (verify_crash_base(crash_base, crash_size, &msg)) {
769                 pr_info("crashkernel reservation failed: %s\n", msg);
770                 unregister_memory_notifier(&kdump_mem_nb);
771                 return;
772         }
773         if (!OLDMEM_BASE && MACHINE_IS_VM)
774                 diag10_range(PFN_DOWN(crash_base), PFN_DOWN(crash_size));
775         crashk_res.start = crash_base;
776         crashk_res.end = crash_base + crash_size - 1;
777         insert_resource(&iomem_resource, &crashk_res);
778         reserve_kdump_bootmem(crash_base, crash_size, CHUNK_CRASHK);
779         pr_info("Reserving %lluMB of memory at %lluMB "
780                 "for crashkernel (System RAM: %luMB)\n",
781                 crash_size >> 20, crash_base >> 20, memory_end >> 20);
782         os_info_crashkernel_add(crash_base, crash_size);
783 #endif
784 }
785
786 static void __init setup_memory(void)
787 {
788         unsigned long bootmap_size;
789         unsigned long start_pfn, end_pfn;
790         int i;
791
792         /*
793          * partially used pages are not usable - thus
794          * we are rounding upwards:
795          */
796         start_pfn = PFN_UP(__pa(&_end));
797         end_pfn = max_pfn = PFN_DOWN(memory_end);
798
799 #ifdef CONFIG_BLK_DEV_INITRD
800         /*
801          * Move the initrd in case the bitmap of the bootmem allocater
802          * would overwrite it.
803          */
804
805         if (INITRD_START && INITRD_SIZE) {
806                 unsigned long bmap_size;
807                 unsigned long start;
808
809                 bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
810                 bmap_size = PFN_PHYS(bmap_size);
811
812                 if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
813                         start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
814
815 #ifdef CONFIG_CRASH_DUMP
816                         if (OLDMEM_BASE) {
817                                 /* Move initrd behind kdump oldmem */
818                                 if (start + INITRD_SIZE > OLDMEM_BASE &&
819                                     start < OLDMEM_BASE + OLDMEM_SIZE)
820                                         start = OLDMEM_BASE + OLDMEM_SIZE;
821                         }
822 #endif
823                         if (start + INITRD_SIZE > memory_end) {
824                                 pr_err("initrd extends beyond end of "
825                                        "memory (0x%08lx > 0x%08lx) "
826                                        "disabling initrd\n",
827                                        start + INITRD_SIZE, memory_end);
828                                 INITRD_START = INITRD_SIZE = 0;
829                         } else {
830                                 pr_info("Moving initrd (0x%08lx -> "
831                                         "0x%08lx, size: %ld)\n",
832                                         INITRD_START, start, INITRD_SIZE);
833                                 memmove((void *) start, (void *) INITRD_START,
834                                         INITRD_SIZE);
835                                 INITRD_START = start;
836                         }
837                 }
838         }
839 #endif
840
841         /*
842          * Initialize the boot-time allocator
843          */
844         bootmap_size = init_bootmem(start_pfn, end_pfn);
845
846         /*
847          * Register RAM areas with the bootmem allocator.
848          */
849
850         for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
851                 unsigned long start_chunk, end_chunk, pfn;
852
853                 if (memory_chunk[i].type != CHUNK_READ_WRITE &&
854                     memory_chunk[i].type != CHUNK_CRASHK)
855                         continue;
856                 start_chunk = PFN_DOWN(memory_chunk[i].addr);
857                 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
858                 end_chunk = min(end_chunk, end_pfn);
859                 if (start_chunk >= end_chunk)
860                         continue;
861                 memblock_add_node(PFN_PHYS(start_chunk),
862                                   PFN_PHYS(end_chunk - start_chunk), 0);
863                 pfn = max(start_chunk, start_pfn);
864                 storage_key_init_range(PFN_PHYS(pfn), PFN_PHYS(end_chunk));
865         }
866
867         psw_set_key(PAGE_DEFAULT_KEY);
868
869         free_bootmem_with_active_regions(0, max_pfn);
870
871         /*
872          * Reserve memory used for lowcore/command line/kernel image.
873          */
874         reserve_bootmem(0, (unsigned long)_ehead, BOOTMEM_DEFAULT);
875         reserve_bootmem((unsigned long)_stext,
876                         PFN_PHYS(start_pfn) - (unsigned long)_stext,
877                         BOOTMEM_DEFAULT);
878         /*
879          * Reserve the bootmem bitmap itself as well. We do this in two
880          * steps (first step was init_bootmem()) because this catches
881          * the (very unlikely) case of us accidentally initializing the
882          * bootmem allocator with an invalid RAM area.
883          */
884         reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size,
885                         BOOTMEM_DEFAULT);
886
887 #ifdef CONFIG_CRASH_DUMP
888         if (crashk_res.start)
889                 reserve_bootmem(crashk_res.start,
890                                 crashk_res.end - crashk_res.start + 1,
891                                 BOOTMEM_DEFAULT);
892         if (is_kdump_kernel())
893                 reserve_bootmem(elfcorehdr_addr - OLDMEM_BASE,
894                                 PAGE_ALIGN(elfcorehdr_size), BOOTMEM_DEFAULT);
895 #endif
896 #ifdef CONFIG_BLK_DEV_INITRD
897         if (INITRD_START && INITRD_SIZE) {
898                 if (INITRD_START + INITRD_SIZE <= memory_end) {
899                         reserve_bootmem(INITRD_START, INITRD_SIZE,
900                                         BOOTMEM_DEFAULT);
901                         initrd_start = INITRD_START;
902                         initrd_end = initrd_start + INITRD_SIZE;
903                 } else {
904                         pr_err("initrd extends beyond end of "
905                                "memory (0x%08lx > 0x%08lx) "
906                                "disabling initrd\n",
907                                initrd_start + INITRD_SIZE, memory_end);
908                         initrd_start = initrd_end = 0;
909                 }
910         }
911 #endif
912 }
913
914 /*
915  * Setup hardware capabilities.
916  */
917 static void __init setup_hwcaps(void)
918 {
919         static const int stfl_bits[6] = { 0, 2, 7, 17, 19, 21 };
920         struct cpuid cpu_id;
921         int i;
922
923         /*
924          * The store facility list bits numbers as found in the principles
925          * of operation are numbered with bit 1UL<<31 as number 0 to
926          * bit 1UL<<0 as number 31.
927          *   Bit 0: instructions named N3, "backported" to esa-mode
928          *   Bit 2: z/Architecture mode is active
929          *   Bit 7: the store-facility-list-extended facility is installed
930          *   Bit 17: the message-security assist is installed
931          *   Bit 19: the long-displacement facility is installed
932          *   Bit 21: the extended-immediate facility is installed
933          *   Bit 22: extended-translation facility 3 is installed
934          *   Bit 30: extended-translation facility 3 enhancement facility
935          * These get translated to:
936          *   HWCAP_S390_ESAN3 bit 0, HWCAP_S390_ZARCH bit 1,
937          *   HWCAP_S390_STFLE bit 2, HWCAP_S390_MSA bit 3,
938          *   HWCAP_S390_LDISP bit 4, HWCAP_S390_EIMM bit 5 and
939          *   HWCAP_S390_ETF3EH bit 8 (22 && 30).
940          */
941         for (i = 0; i < 6; i++)
942                 if (test_facility(stfl_bits[i]))
943                         elf_hwcap |= 1UL << i;
944
945         if (test_facility(22) && test_facility(30))
946                 elf_hwcap |= HWCAP_S390_ETF3EH;
947
948         /*
949          * Check for additional facilities with store-facility-list-extended.
950          * stfle stores doublewords (8 byte) with bit 1ULL<<63 as bit 0
951          * and 1ULL<<0 as bit 63. Bits 0-31 contain the same information
952          * as stored by stfl, bits 32-xxx contain additional facilities.
953          * How many facility words are stored depends on the number of
954          * doublewords passed to the instruction. The additional facilities
955          * are:
956          *   Bit 42: decimal floating point facility is installed
957          *   Bit 44: perform floating point operation facility is installed
958          * translated to:
959          *   HWCAP_S390_DFP bit 6 (42 && 44).
960          */
961         if ((elf_hwcap & (1UL << 2)) && test_facility(42) && test_facility(44))
962                 elf_hwcap |= HWCAP_S390_DFP;
963
964         /*
965          * Huge page support HWCAP_S390_HPAGE is bit 7.
966          */
967         if (MACHINE_HAS_HPAGE)
968                 elf_hwcap |= HWCAP_S390_HPAGE;
969
970 #if defined(CONFIG_64BIT)
971         /*
972          * 64-bit register support for 31-bit processes
973          * HWCAP_S390_HIGH_GPRS is bit 9.
974          */
975         elf_hwcap |= HWCAP_S390_HIGH_GPRS;
976
977         /*
978          * Transactional execution support HWCAP_S390_TE is bit 10.
979          */
980         if (test_facility(50) && test_facility(73))
981                 elf_hwcap |= HWCAP_S390_TE;
982 #endif
983
984         get_cpu_id(&cpu_id);
985         switch (cpu_id.machine) {
986         case 0x9672:
987 #if !defined(CONFIG_64BIT)
988         default:        /* Use "g5" as default for 31 bit kernels. */
989 #endif
990                 strcpy(elf_platform, "g5");
991                 break;
992         case 0x2064:
993         case 0x2066:
994 #if defined(CONFIG_64BIT)
995         default:        /* Use "z900" as default for 64 bit kernels. */
996 #endif
997                 strcpy(elf_platform, "z900");
998                 break;
999         case 0x2084:
1000         case 0x2086:
1001                 strcpy(elf_platform, "z990");
1002                 break;
1003         case 0x2094:
1004         case 0x2096:
1005                 strcpy(elf_platform, "z9-109");
1006                 break;
1007         case 0x2097:
1008         case 0x2098:
1009                 strcpy(elf_platform, "z10");
1010                 break;
1011         case 0x2817:
1012         case 0x2818:
1013                 strcpy(elf_platform, "z196");
1014                 break;
1015         case 0x2827:
1016                 strcpy(elf_platform, "zEC12");
1017                 break;
1018         }
1019 }
1020
1021 /*
1022  * Setup function called from init/main.c just after the banner
1023  * was printed.
1024  */
1025
1026 void __init setup_arch(char **cmdline_p)
1027 {
1028         /*
1029          * print what head.S has found out about the machine
1030          */
1031 #ifndef CONFIG_64BIT
1032         if (MACHINE_IS_VM)
1033                 pr_info("Linux is running as a z/VM "
1034                         "guest operating system in 31-bit mode\n");
1035         else if (MACHINE_IS_LPAR)
1036                 pr_info("Linux is running natively in 31-bit mode\n");
1037         if (MACHINE_HAS_IEEE)
1038                 pr_info("The hardware system has IEEE compatible "
1039                         "floating point units\n");
1040         else
1041                 pr_info("The hardware system has no IEEE compatible "
1042                         "floating point units\n");
1043 #else /* CONFIG_64BIT */
1044         if (MACHINE_IS_VM)
1045                 pr_info("Linux is running as a z/VM "
1046                         "guest operating system in 64-bit mode\n");
1047         else if (MACHINE_IS_KVM)
1048                 pr_info("Linux is running under KVM in 64-bit mode\n");
1049         else if (MACHINE_IS_LPAR)
1050                 pr_info("Linux is running natively in 64-bit mode\n");
1051 #endif /* CONFIG_64BIT */
1052
1053         /* Have one command line that is parsed and saved in /proc/cmdline */
1054         /* boot_command_line has been already set up in early.c */
1055         *cmdline_p = boot_command_line;
1056
1057         ROOT_DEV = Root_RAM0;
1058
1059         init_mm.start_code = PAGE_OFFSET;
1060         init_mm.end_code = (unsigned long) &_etext;
1061         init_mm.end_data = (unsigned long) &_edata;
1062         init_mm.brk = (unsigned long) &_end;
1063
1064         if (MACHINE_HAS_MVCOS)
1065                 memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
1066         else
1067                 memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
1068
1069         parse_early_param();
1070
1071         os_info_init();
1072         setup_ipl();
1073         setup_memory_end();
1074         setup_addressing_mode();
1075         reserve_oldmem();
1076         reserve_crashkernel();
1077         setup_memory();
1078         setup_resources();
1079         setup_vmcoreinfo();
1080         setup_lowcore();
1081
1082         cpu_init();
1083         s390_init_cpu_topology();
1084
1085         /*
1086          * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
1087          */
1088         setup_hwcaps();
1089
1090         /*
1091          * Create kernel page tables and switch to virtual addressing.
1092          */
1093         paging_init();
1094
1095         /* Setup default console */
1096         conmode_default();
1097         set_preferred_console();
1098
1099         /* Setup zfcpdump support */
1100         setup_zfcpdump(console_devno);
1101 }