2 * arch/s390/kernel/smp.c
4 * Copyright IBM Corp. 1999, 2009
5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
9 * based on other smp stuff by
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
23 #define KMSG_COMPONENT "cpu"
24 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
26 #include <linux/workqueue.h>
27 #include <linux/module.h>
28 #include <linux/init.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/kernel_stat.h>
33 #include <linux/delay.h>
34 #include <linux/cache.h>
35 #include <linux/interrupt.h>
36 #include <linux/irqflags.h>
37 #include <linux/cpu.h>
38 #include <linux/timex.h>
39 #include <linux/bootmem.h>
40 #include <linux/slab.h>
41 #include <linux/crash_dump.h>
42 #include <asm/asm-offsets.h>
44 #include <asm/setup.h>
46 #include <asm/pgalloc.h>
48 #include <asm/cpcmd.h>
49 #include <asm/tlbflush.h>
50 #include <asm/timer.h>
51 #include <asm/lowcore.h>
53 #include <asm/cputime.h>
58 /* logical cpu to cpu address */
59 unsigned short __cpu_logical_map[NR_CPUS];
61 static struct task_struct *current_set[NR_CPUS];
63 static u8 smp_cpu_type;
64 static int smp_use_sigp_detection;
71 DEFINE_MUTEX(smp_cpu_state_mutex);
72 static int smp_cpu_state[NR_CPUS];
74 static DEFINE_PER_CPU(struct cpu, cpu_devices);
76 static void smp_ext_bitcall(int, int);
78 static int raw_cpu_stopped(int cpu)
82 switch (raw_sigp_ps(&status, 0, cpu, sigp_sense)) {
83 case sigp_status_stored:
84 /* Check for stopped and check stop state */
94 static inline int cpu_stopped(int cpu)
96 return raw_cpu_stopped(cpu_logical_map(cpu));
100 * Ensure that PSW restart is done on an online CPU
102 void smp_restart_with_online_cpu(void)
106 for_each_online_cpu(cpu) {
107 if (stap() == __cpu_logical_map[cpu]) {
108 /* We are online: Enable DAT again and return */
109 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
113 /* We are not online: Do PSW restart on an online CPU */
114 while (sigp(cpu, sigp_restart) == sigp_busy)
116 /* And stop ourself */
117 while (raw_sigp(stap(), sigp_stop) == sigp_busy)
122 void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
124 struct _lowcore *lc, *current_lc;
125 struct stack_frame *sf;
126 struct pt_regs *regs;
129 if (smp_processor_id() == 0)
131 __load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE |
132 PSW_MASK_EA | PSW_MASK_BA);
133 /* Disable lowcore protection */
134 __ctl_clear_bit(0, 28);
135 current_lc = lowcore_ptr[smp_processor_id()];
139 lc->restart_psw.mask =
140 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
141 lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) smp_restart_cpu;
143 smp_switch_to_cpu(func, data, 0, stap(), __cpu_logical_map[0]);
144 while (sigp(0, sigp_stop_and_store_status) == sigp_busy)
146 sp = lc->panic_stack;
147 sp -= sizeof(struct pt_regs);
148 regs = (struct pt_regs *) sp;
149 memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs));
150 regs->psw = current_lc->psw_save_area;
151 sp -= STACK_FRAME_OVERHEAD;
152 sf = (struct stack_frame *) sp;
154 smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
157 void smp_send_stop(void)
161 /* Disable all interrupts/machine checks */
162 __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
163 trace_hardirqs_off();
165 /* stop all processors */
166 for_each_online_cpu(cpu) {
167 if (cpu == smp_processor_id())
170 rc = sigp(cpu, sigp_stop);
171 } while (rc == sigp_busy);
173 while (!cpu_stopped(cpu))
179 * This is the main routine where commands issued by other
183 static void do_ext_call_interrupt(unsigned int ext_int_code,
184 unsigned int param32, unsigned long param64)
188 if ((ext_int_code & 0xffff) == 0x1202)
189 kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++;
191 kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++;
193 * handle bit signal external calls
195 bits = xchg(&S390_lowcore.ext_call_fast, 0);
197 if (test_bit(ec_schedule, &bits))
200 if (test_bit(ec_call_function, &bits))
201 generic_smp_call_function_interrupt();
203 if (test_bit(ec_call_function_single, &bits))
204 generic_smp_call_function_single_interrupt();
208 * Send an external call sigp to another cpu and return without waiting
209 * for its completion.
211 static void smp_ext_bitcall(int cpu, int sig)
216 * Set signaling bit in lowcore of target cpu and kick it
218 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
220 order = smp_vcpu_scheduled(cpu) ?
221 sigp_external_call : sigp_emergency_signal;
222 if (sigp(cpu, order) != sigp_busy)
228 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
232 for_each_cpu(cpu, mask)
233 smp_ext_bitcall(cpu, ec_call_function);
236 void arch_send_call_function_single_ipi(int cpu)
238 smp_ext_bitcall(cpu, ec_call_function_single);
243 * this function sends a 'purge tlb' signal to another CPU.
245 static void smp_ptlb_callback(void *info)
250 void smp_ptlb_all(void)
252 on_each_cpu(smp_ptlb_callback, NULL, 1);
254 EXPORT_SYMBOL(smp_ptlb_all);
255 #endif /* ! CONFIG_64BIT */
258 * this function sends a 'reschedule' IPI to another CPU.
259 * it goes straight through and wastes no time serializing
260 * anything. Worst case is that we lose a reschedule ...
262 void smp_send_reschedule(int cpu)
264 smp_ext_bitcall(cpu, ec_schedule);
268 * parameter area for the set/clear control bit callbacks
270 struct ec_creg_mask_parms {
271 unsigned long orvals[16];
272 unsigned long andvals[16];
276 * callback for setting/clearing control bits
278 static void smp_ctl_bit_callback(void *info)
280 struct ec_creg_mask_parms *pp = info;
281 unsigned long cregs[16];
284 __ctl_store(cregs, 0, 15);
285 for (i = 0; i <= 15; i++)
286 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
287 __ctl_load(cregs, 0, 15);
291 * Set a bit in a control register of all cpus
293 void smp_ctl_set_bit(int cr, int bit)
295 struct ec_creg_mask_parms parms;
297 memset(&parms.orvals, 0, sizeof(parms.orvals));
298 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
299 parms.orvals[cr] = 1UL << bit;
300 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
302 EXPORT_SYMBOL(smp_ctl_set_bit);
305 * Clear a bit in a control register of all cpus
307 void smp_ctl_clear_bit(int cr, int bit)
309 struct ec_creg_mask_parms parms;
311 memset(&parms.orvals, 0, sizeof(parms.orvals));
312 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
313 parms.andvals[cr] = ~(1UL << bit);
314 on_each_cpu(smp_ctl_bit_callback, &parms, 1);
316 EXPORT_SYMBOL(smp_ctl_clear_bit);
318 #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
320 static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
322 if (ipl_info.type != IPL_TYPE_FCP_DUMP && !OLDMEM_BASE)
324 if (is_kdump_kernel())
326 if (cpu >= NR_CPUS) {
327 pr_warning("CPU %i exceeds the maximum %i and is excluded from "
328 "the dump\n", cpu, NR_CPUS - 1);
331 zfcpdump_save_areas[cpu] = kmalloc(sizeof(struct save_area), GFP_KERNEL);
332 while (raw_sigp(phy_cpu, sigp_stop_and_store_status) == sigp_busy)
334 memcpy_real(zfcpdump_save_areas[cpu],
335 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
336 sizeof(struct save_area));
339 struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
340 EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
344 static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
346 #endif /* CONFIG_ZFCPDUMP */
348 static int cpu_known(int cpu_id)
352 for_each_present_cpu(cpu) {
353 if (__cpu_logical_map[cpu] == cpu_id)
359 static int smp_rescan_cpus_sigp(cpumask_t avail)
361 int cpu_id, logical_cpu;
363 logical_cpu = cpumask_first(&avail);
364 if (logical_cpu >= nr_cpu_ids)
366 for (cpu_id = 0; cpu_id <= MAX_CPU_ADDRESS; cpu_id++) {
367 if (cpu_known(cpu_id))
369 __cpu_logical_map[logical_cpu] = cpu_id;
370 cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
371 if (!cpu_stopped(logical_cpu))
373 set_cpu_present(logical_cpu, true);
374 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
375 logical_cpu = cpumask_next(logical_cpu, &avail);
376 if (logical_cpu >= nr_cpu_ids)
382 static int smp_rescan_cpus_sclp(cpumask_t avail)
384 struct sclp_cpu_info *info;
385 int cpu_id, logical_cpu, cpu;
388 logical_cpu = cpumask_first(&avail);
389 if (logical_cpu >= nr_cpu_ids)
391 info = kmalloc(sizeof(*info), GFP_KERNEL);
394 rc = sclp_get_cpu_info(info);
397 for (cpu = 0; cpu < info->combined; cpu++) {
398 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
400 cpu_id = info->cpu[cpu].address;
401 if (cpu_known(cpu_id))
403 __cpu_logical_map[logical_cpu] = cpu_id;
404 cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN);
405 set_cpu_present(logical_cpu, true);
406 if (cpu >= info->configured)
407 smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
409 smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
410 logical_cpu = cpumask_next(logical_cpu, &avail);
411 if (logical_cpu >= nr_cpu_ids)
419 static int __smp_rescan_cpus(void)
423 cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
424 if (smp_use_sigp_detection)
425 return smp_rescan_cpus_sigp(avail);
427 return smp_rescan_cpus_sclp(avail);
430 static void __init smp_detect_cpus(void)
432 unsigned int cpu, c_cpus, s_cpus;
433 struct sclp_cpu_info *info;
434 u16 boot_cpu_addr, cpu_addr;
438 boot_cpu_addr = __cpu_logical_map[0];
439 info = kmalloc(sizeof(*info), GFP_KERNEL);
441 panic("smp_detect_cpus failed to allocate memory\n");
442 #ifdef CONFIG_CRASH_DUMP
443 if (OLDMEM_BASE && !is_kdump_kernel()) {
444 struct save_area *save_area;
446 save_area = kmalloc(sizeof(*save_area), GFP_KERNEL);
448 panic("could not allocate memory for save area\n");
449 copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
451 zfcpdump_save_areas[0] = save_area;
454 /* Use sigp detection algorithm if sclp doesn't work. */
455 if (sclp_get_cpu_info(info)) {
456 smp_use_sigp_detection = 1;
457 for (cpu = 0; cpu <= MAX_CPU_ADDRESS; cpu++) {
458 if (cpu == boot_cpu_addr)
460 if (!raw_cpu_stopped(cpu))
462 smp_get_save_area(c_cpus, cpu);
468 if (info->has_cpu_type) {
469 for (cpu = 0; cpu < info->combined; cpu++) {
470 if (info->cpu[cpu].address == boot_cpu_addr) {
471 smp_cpu_type = info->cpu[cpu].type;
477 for (cpu = 0; cpu < info->combined; cpu++) {
478 if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
480 cpu_addr = info->cpu[cpu].address;
481 if (cpu_addr == boot_cpu_addr)
483 if (!raw_cpu_stopped(cpu_addr)) {
487 smp_get_save_area(c_cpus, cpu_addr);
492 pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
499 * Activate a secondary processor.
501 int __cpuinit start_secondary(void *cpuvoid)
509 notify_cpu_starting(smp_processor_id());
511 set_cpu_online(smp_processor_id(), true);
513 __ctl_clear_bit(0, 28); /* Disable lowcore protection */
514 S390_lowcore.restart_psw.mask =
515 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
516 S390_lowcore.restart_psw.addr =
517 PSW_ADDR_AMODE | (unsigned long) psw_restart_int_handler;
518 __ctl_set_bit(0, 28); /* Enable lowcore protection */
520 * Wait until the cpu which brought this one up marked it
521 * active before enabling interrupts.
523 while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
526 /* cpu_idle will call schedule for us */
532 struct work_struct work;
533 struct task_struct *idle;
534 struct completion done;
538 static void __cpuinit smp_fork_idle(struct work_struct *work)
540 struct create_idle *c_idle;
542 c_idle = container_of(work, struct create_idle, work);
543 c_idle->idle = fork_idle(c_idle->cpu);
544 complete(&c_idle->done);
547 static int __cpuinit smp_alloc_lowcore(int cpu)
549 unsigned long async_stack, panic_stack;
550 struct _lowcore *lowcore;
552 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
555 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
556 panic_stack = __get_free_page(GFP_KERNEL);
557 if (!panic_stack || !async_stack)
559 memcpy(lowcore, &S390_lowcore, 512);
560 memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
561 lowcore->async_stack = async_stack + ASYNC_SIZE;
562 lowcore->panic_stack = panic_stack + PAGE_SIZE;
563 lowcore->restart_psw.mask =
564 PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA | PSW_MASK_BA;
565 lowcore->restart_psw.addr =
566 PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
567 if (user_mode != HOME_SPACE_MODE)
568 lowcore->restart_psw.mask |= PSW_ASC_HOME;
570 if (MACHINE_HAS_IEEE) {
571 unsigned long save_area;
573 save_area = get_zeroed_page(GFP_KERNEL);
576 lowcore->extended_save_area_addr = (u32) save_area;
579 if (vdso_alloc_per_cpu(cpu, lowcore))
582 lowcore_ptr[cpu] = lowcore;
586 free_page(panic_stack);
587 free_pages(async_stack, ASYNC_ORDER);
588 free_pages((unsigned long) lowcore, LC_ORDER);
592 static void smp_free_lowcore(int cpu)
594 struct _lowcore *lowcore;
596 lowcore = lowcore_ptr[cpu];
598 if (MACHINE_HAS_IEEE)
599 free_page((unsigned long) lowcore->extended_save_area_addr);
601 vdso_free_per_cpu(cpu, lowcore);
603 free_page(lowcore->panic_stack - PAGE_SIZE);
604 free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
605 free_pages((unsigned long) lowcore, LC_ORDER);
606 lowcore_ptr[cpu] = NULL;
609 /* Upping and downing of CPUs */
610 int __cpuinit __cpu_up(unsigned int cpu)
612 struct _lowcore *cpu_lowcore;
613 struct create_idle c_idle;
614 struct task_struct *idle;
615 struct stack_frame *sf;
619 if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
621 idle = current_set[cpu];
623 c_idle.done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done);
624 INIT_WORK_ONSTACK(&c_idle.work, smp_fork_idle);
626 schedule_work(&c_idle.work);
627 wait_for_completion(&c_idle.done);
628 if (IS_ERR(c_idle.idle))
629 return PTR_ERR(c_idle.idle);
631 current_set[cpu] = c_idle.idle;
633 init_idle(idle, cpu);
634 if (smp_alloc_lowcore(cpu))
637 ccode = sigp(cpu, sigp_initial_cpu_reset);
638 if (ccode == sigp_busy)
640 if (ccode == sigp_not_operational)
642 } while (ccode == sigp_busy);
644 lowcore = (u32)(unsigned long)lowcore_ptr[cpu];
645 while (sigp_p(lowcore, cpu, sigp_set_prefix) == sigp_busy)
648 cpu_lowcore = lowcore_ptr[cpu];
649 cpu_lowcore->kernel_stack = (unsigned long)
650 task_stack_page(idle) + THREAD_SIZE;
651 cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
652 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
653 - sizeof(struct pt_regs)
654 - sizeof(struct stack_frame));
655 memset(sf, 0, sizeof(struct stack_frame));
656 sf->gprs[9] = (unsigned long) sf;
657 cpu_lowcore->save_area[15] = (unsigned long) sf;
658 __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
659 atomic_inc(&init_mm.context.attach_count);
662 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
663 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
664 cpu_lowcore->current_task = (unsigned long) idle;
665 cpu_lowcore->cpu_nr = cpu;
666 cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
667 cpu_lowcore->machine_flags = S390_lowcore.machine_flags;
668 cpu_lowcore->ftrace_func = S390_lowcore.ftrace_func;
669 memcpy(cpu_lowcore->stfle_fac_list, S390_lowcore.stfle_fac_list,
673 while (sigp(cpu, sigp_restart) == sigp_busy)
676 while (!cpu_online(cpu))
681 smp_free_lowcore(cpu);
685 static int __init setup_possible_cpus(char *s)
689 pcpus = simple_strtoul(s, NULL, 0);
690 init_cpu_possible(cpumask_of(0));
691 for (cpu = 1; cpu < pcpus && cpu < nr_cpu_ids; cpu++)
692 set_cpu_possible(cpu, true);
695 early_param("possible_cpus", setup_possible_cpus);
697 #ifdef CONFIG_HOTPLUG_CPU
699 int __cpu_disable(void)
701 struct ec_creg_mask_parms cr_parms;
702 int cpu = smp_processor_id();
704 set_cpu_online(cpu, false);
706 /* Disable pfault pseudo page faults on this cpu. */
709 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
710 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
712 /* disable all external interrupts */
713 cr_parms.orvals[0] = 0;
714 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 11 |
715 1 << 10 | 1 << 9 | 1 << 6 | 1 << 5 |
717 /* disable all I/O interrupts */
718 cr_parms.orvals[6] = 0;
719 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
720 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
721 /* disable most machine checks */
722 cr_parms.orvals[14] = 0;
723 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
726 smp_ctl_bit_callback(&cr_parms);
731 void __cpu_die(unsigned int cpu)
733 /* Wait until target cpu is down */
734 while (!cpu_stopped(cpu))
736 while (sigp_p(0, cpu, sigp_set_prefix) == sigp_busy)
738 smp_free_lowcore(cpu);
739 atomic_dec(&init_mm.context.attach_count);
742 void __noreturn cpu_die(void)
745 while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
750 #endif /* CONFIG_HOTPLUG_CPU */
752 void __init smp_prepare_cpus(unsigned int max_cpus)
755 unsigned long save_area = 0;
757 unsigned long async_stack, panic_stack;
758 struct _lowcore *lowcore;
762 /* request the 0x1201 emergency signal external interrupt */
763 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
764 panic("Couldn't request external interrupt 0x1201");
765 /* request the 0x1202 external call external interrupt */
766 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
767 panic("Couldn't request external interrupt 0x1202");
769 /* Reallocate current lowcore, but keep its contents. */
770 lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
771 panic_stack = __get_free_page(GFP_KERNEL);
772 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
773 BUG_ON(!lowcore || !panic_stack || !async_stack);
775 if (MACHINE_HAS_IEEE)
776 save_area = get_zeroed_page(GFP_KERNEL);
779 local_mcck_disable();
780 lowcore_ptr[smp_processor_id()] = lowcore;
781 *lowcore = S390_lowcore;
782 lowcore->panic_stack = panic_stack + PAGE_SIZE;
783 lowcore->async_stack = async_stack + ASYNC_SIZE;
785 if (MACHINE_HAS_IEEE)
786 lowcore->extended_save_area_addr = (u32) save_area;
788 set_prefix((u32)(unsigned long) lowcore);
792 if (vdso_alloc_per_cpu(smp_processor_id(), &S390_lowcore))
797 void __init smp_prepare_boot_cpu(void)
799 BUG_ON(smp_processor_id() != 0);
801 current_thread_info()->cpu = 0;
802 set_cpu_present(0, true);
803 set_cpu_online(0, true);
804 S390_lowcore.percpu_offset = __per_cpu_offset[0];
805 current_set[0] = current;
806 smp_cpu_state[0] = CPU_STATE_CONFIGURED;
807 cpu_set_polarization(0, POLARIZATION_UNKNOWN);
810 void __init smp_cpus_done(unsigned int max_cpus)
814 void __init smp_setup_processor_id(void)
816 S390_lowcore.cpu_nr = 0;
817 __cpu_logical_map[0] = stap();
821 * the frequency of the profiling timer can be changed
822 * by writing a multiplier value into /proc/profile.
824 * usually you want to run this on all CPUs ;)
826 int setup_profiling_timer(unsigned int multiplier)
831 #ifdef CONFIG_HOTPLUG_CPU
832 static ssize_t cpu_configure_show(struct sys_device *dev,
833 struct sysdev_attribute *attr, char *buf)
837 mutex_lock(&smp_cpu_state_mutex);
838 count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
839 mutex_unlock(&smp_cpu_state_mutex);
843 static ssize_t cpu_configure_store(struct sys_device *dev,
844 struct sysdev_attribute *attr,
845 const char *buf, size_t count)
851 if (sscanf(buf, "%d %c", &val, &delim) != 1)
853 if (val != 0 && val != 1)
857 mutex_lock(&smp_cpu_state_mutex);
859 /* disallow configuration changes of online cpus and cpu 0 */
860 if (cpu_online(cpu) || cpu == 0)
865 if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
866 rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
868 smp_cpu_state[cpu] = CPU_STATE_STANDBY;
869 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
874 if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
875 rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
877 smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
878 cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
886 mutex_unlock(&smp_cpu_state_mutex);
888 return rc ? rc : count;
890 static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
891 #endif /* CONFIG_HOTPLUG_CPU */
893 static ssize_t show_cpu_address(struct sys_device *dev,
894 struct sysdev_attribute *attr, char *buf)
896 return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
898 static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
900 static struct attribute *cpu_common_attrs[] = {
901 #ifdef CONFIG_HOTPLUG_CPU
902 &attr_configure.attr,
908 static struct attribute_group cpu_common_attr_group = {
909 .attrs = cpu_common_attrs,
912 static ssize_t show_capability(struct sys_device *dev,
913 struct sysdev_attribute *attr, char *buf)
915 unsigned int capability;
918 rc = get_cpu_capability(&capability);
921 return sprintf(buf, "%u\n", capability);
923 static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
925 static ssize_t show_idle_count(struct sys_device *dev,
926 struct sysdev_attribute *attr, char *buf)
928 struct s390_idle_data *idle;
929 unsigned long long idle_count;
930 unsigned int sequence;
932 idle = &per_cpu(s390_idle, dev->id);
934 sequence = idle->sequence;
938 idle_count = idle->idle_count;
939 if (idle->idle_enter)
942 if (idle->sequence != sequence)
944 return sprintf(buf, "%llu\n", idle_count);
946 static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
948 static ssize_t show_idle_time(struct sys_device *dev,
949 struct sysdev_attribute *attr, char *buf)
951 struct s390_idle_data *idle;
952 unsigned long long now, idle_time, idle_enter;
953 unsigned int sequence;
955 idle = &per_cpu(s390_idle, dev->id);
958 sequence = idle->sequence;
962 idle_time = idle->idle_time;
963 idle_enter = idle->idle_enter;
964 if (idle_enter != 0ULL && idle_enter < now)
965 idle_time += now - idle_enter;
967 if (idle->sequence != sequence)
969 return sprintf(buf, "%llu\n", idle_time >> 12);
971 static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
973 static struct attribute *cpu_online_attrs[] = {
974 &attr_capability.attr,
975 &attr_idle_count.attr,
976 &attr_idle_time_us.attr,
980 static struct attribute_group cpu_online_attr_group = {
981 .attrs = cpu_online_attrs,
984 static int __cpuinit smp_cpu_notify(struct notifier_block *self,
985 unsigned long action, void *hcpu)
987 unsigned int cpu = (unsigned int)(long)hcpu;
988 struct cpu *c = &per_cpu(cpu_devices, cpu);
989 struct sys_device *s = &c->sysdev;
990 struct s390_idle_data *idle;
995 case CPU_ONLINE_FROZEN:
996 idle = &per_cpu(s390_idle, cpu);
997 memset(idle, 0, sizeof(struct s390_idle_data));
998 err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1001 case CPU_DEAD_FROZEN:
1002 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1005 return notifier_from_errno(err);
1008 static struct notifier_block __cpuinitdata smp_cpu_nb = {
1009 .notifier_call = smp_cpu_notify,
1012 static int __devinit smp_add_present_cpu(int cpu)
1014 struct cpu *c = &per_cpu(cpu_devices, cpu);
1015 struct sys_device *s = &c->sysdev;
1018 c->hotpluggable = 1;
1019 rc = register_cpu(c, cpu);
1022 rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
1025 if (cpu_online(cpu)) {
1026 rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
1030 rc = topology_cpu_init(c);
1036 if (cpu_online(cpu))
1037 sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
1039 sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
1041 #ifdef CONFIG_HOTPLUG_CPU
1048 #ifdef CONFIG_HOTPLUG_CPU
1050 int __ref smp_rescan_cpus(void)
1057 mutex_lock(&smp_cpu_state_mutex);
1058 cpumask_copy(&newcpus, cpu_present_mask);
1059 rc = __smp_rescan_cpus();
1062 cpumask_andnot(&newcpus, cpu_present_mask, &newcpus);
1063 for_each_cpu(cpu, &newcpus) {
1064 rc = smp_add_present_cpu(cpu);
1066 set_cpu_present(cpu, false);
1070 mutex_unlock(&smp_cpu_state_mutex);
1072 if (!cpumask_empty(&newcpus))
1073 topology_schedule_update();
1077 static ssize_t __ref rescan_store(struct sysdev_class *class,
1078 struct sysdev_class_attribute *attr,
1084 rc = smp_rescan_cpus();
1085 return rc ? rc : count;
1087 static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
1088 #endif /* CONFIG_HOTPLUG_CPU */
1090 static int __init s390_smp_init(void)
1094 register_cpu_notifier(&smp_cpu_nb);
1095 #ifdef CONFIG_HOTPLUG_CPU
1096 rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
1100 for_each_present_cpu(cpu) {
1101 rc = smp_add_present_cpu(cpu);
1107 subsys_initcall(s390_smp_init);