[SPARC64]: Elminate all usage of hard-coded trap globals.
[firefly-linux-kernel-4.4.55.git] / arch / sparc64 / kernel / smp.c
index b137fd63f5e12691a87c24113d8581f1e8daf45c..8c245859d2126166bc530fc8eba75741253c8f4d 100644 (file)
@@ -38,8 +38,8 @@
 #include <asm/timer.h>
 #include <asm/starfire.h>
 #include <asm/tlb.h>
+#include <asm/sections.h>
 
-extern int linux_num_cpus;
 extern void calibrate_delay(void);
 
 /* Please don't make this stuff initdata!!!  --DaveM */
@@ -88,10 +88,6 @@ void __init smp_store_cpu_info(int id)
        cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
                                                     "clock-frequency", 0);
 
-       cpu_data(id).pgcache_size               = 0;
-       cpu_data(id).pte_cache[0]               = NULL;
-       cpu_data(id).pte_cache[1]               = NULL;
-       cpu_data(id).pgd_cache                  = NULL;
        cpu_data(id).idle_volume                = 1;
 
        cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
@@ -122,25 +118,15 @@ static volatile unsigned long callin_flag = 0;
 
 extern void inherit_locked_prom_mappings(int save_p);
 
-static inline void cpu_setup_percpu_base(unsigned long cpu_id)
-{
-       __asm__ __volatile__("mov       %0, %%g5\n\t"
-                            "stxa      %0, [%1] %2\n\t"
-                            "membar    #Sync"
-                            : /* no outputs */
-                            : "r" (__per_cpu_offset(cpu_id)),
-                              "r" (TSB_REG), "i" (ASI_IMMU));
-}
-
 void __init smp_callin(void)
 {
        int cpuid = hard_smp_processor_id();
 
        inherit_locked_prom_mappings(0);
 
-       __flush_tlb_all();
+       __local_per_cpu_offset = __per_cpu_offset(cpuid);
 
-       cpu_setup_percpu_base(cpuid);
+       __flush_tlb_all();
 
        smp_setup_percpu_timer();
 
@@ -168,6 +154,9 @@ void __init smp_callin(void)
                rmb();
 
        cpu_set(cpuid, cpu_online_map);
+
+       /* idle thread is expected to have preempt disabled */
+       preempt_disable();
 }
 
 void cpu_panic(void)
@@ -333,7 +322,7 @@ static int __devinit smp_boot_one_cpu(unsigned int cpu)
 
        p = fork_idle(cpu);
        callin_flag = 0;
-       cpu_new_thread = p->thread_info;
+       cpu_new_thread = task_thread_info(p);
        cpu_set(cpu, cpu_callout_map);
 
        cpu_find_by_mid(cpu, &cpu_node);
@@ -660,8 +649,6 @@ void smp_call_function_client(int irq, struct pt_regs *regs)
 extern unsigned long xcall_flush_tlb_mm;
 extern unsigned long xcall_flush_tlb_pending;
 extern unsigned long xcall_flush_tlb_kernel_range;
-extern unsigned long xcall_flush_tlb_all_spitfire;
-extern unsigned long xcall_flush_tlb_all_cheetah;
 extern unsigned long xcall_report_regs;
 extern unsigned long xcall_receive_signal;
 
@@ -792,15 +779,6 @@ void smp_report_regs(void)
        smp_cross_call(&xcall_report_regs, 0, 0, 0);
 }
 
-void smp_flush_tlb_all(void)
-{
-       if (tlb_type == spitfire)
-               smp_cross_call(&xcall_flush_tlb_all_spitfire, 0, 0, 0);
-       else
-               smp_cross_call(&xcall_flush_tlb_all_cheetah, 0, 0, 0);
-       __flush_tlb_all();
-}
-
 /* We know that the window frames of the user have been flushed
  * to the stack before we get here because all callers of us
  * are flush_tlb_*() routines, and these run after flush_cache_*()
@@ -839,43 +817,29 @@ void smp_flush_tlb_all(void)
  *    questionable (in theory the big win for threads is the massive sharing of
  *    address space state across processors).
  */
+
+/* This currently is only used by the hugetlb arch pre-fault
+ * hook on UltraSPARC-III+ and later when changing the pagesize
+ * bits of the context register for an address space.
+ */
 void smp_flush_tlb_mm(struct mm_struct *mm)
 {
-        /*
-         * This code is called from two places, dup_mmap and exit_mmap. In the
-         * former case, we really need a flush. In the later case, the callers
-         * are single threaded exec_mmap (really need a flush), multithreaded
-         * exec_mmap case (do not need to flush, since the caller gets a new
-         * context via activate_mm), and all other callers of mmput() whence
-         * the flush can be optimized since the associated threads are dead and
-         * the mm is being torn down (__exit_mm and other mmput callers) or the
-         * owning thread is dissociating itself from the mm. The
-         * (atomic_read(&mm->mm_users) == 0) check ensures real work is done
-         * for single thread exec and dup_mmap cases. An alternate check might
-         * have been (current->mm != mm).
-         *                                              Kanoj Sarcar
-         */
-        if (atomic_read(&mm->mm_users) == 0)
-                return;
-
-       {
-               u32 ctx = CTX_HWBITS(mm->context);
-               int cpu = get_cpu();
+       u32 ctx = CTX_HWBITS(mm->context);
+       int cpu = get_cpu();
 
-               if (atomic_read(&mm->mm_users) == 1) {
-                       mm->cpu_vm_mask = cpumask_of_cpu(cpu);
-                       goto local_flush_and_out;
-               }
+       if (atomic_read(&mm->mm_users) == 1) {
+               mm->cpu_vm_mask = cpumask_of_cpu(cpu);
+               goto local_flush_and_out;
+       }
 
-               smp_cross_call_masked(&xcall_flush_tlb_mm,
-                                     ctx, 0, 0,
-                                     mm->cpu_vm_mask);
+       smp_cross_call_masked(&xcall_flush_tlb_mm,
+                             ctx, 0, 0,
+                             mm->cpu_vm_mask);
 
-       local_flush_and_out:
-               __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
+local_flush_and_out:
+       __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
 
-               put_cpu();
-       }
+       put_cpu();
 }
 
 void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
@@ -883,34 +847,13 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
        u32 ctx = CTX_HWBITS(mm->context);
        int cpu = get_cpu();
 
-       if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1) {
+       if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
                mm->cpu_vm_mask = cpumask_of_cpu(cpu);
-               goto local_flush_and_out;
-       } else {
-               /* This optimization is not valid.  Normally
-                * we will be holding the page_table_lock, but
-                * there is an exception which is copy_page_range()
-                * when forking.  The lock is held during the individual
-                * page table updates in the parent, but not at the
-                * top level, which is where we are invoked.
-                */
-               if (0) {
-                       cpumask_t this_cpu_mask = cpumask_of_cpu(cpu);
-
-                       /* By virtue of running under the mm->page_table_lock,
-                        * and mmu_context.h:switch_mm doing the same, the
-                        * following operation is safe.
-                        */
-                       if (cpus_equal(mm->cpu_vm_mask, this_cpu_mask))
-                               goto local_flush_and_out;
-               }
-       }
-
-       smp_cross_call_masked(&xcall_flush_tlb_pending,
-                             ctx, nr, (unsigned long) vaddrs,
-                             mm->cpu_vm_mask);
+       else
+               smp_cross_call_masked(&xcall_flush_tlb_pending,
+                                     ctx, nr, (unsigned long) vaddrs,
+                                     mm->cpu_vm_mask);
 
-local_flush_and_out:
        __flush_tlb_pending(ctx, nr, vaddrs);
 
        put_cpu();
@@ -1112,18 +1055,12 @@ int setup_profiling_timer(unsigned int multiplier)
        return 0;
 }
 
+/* Constrain the number of cpus to max_cpus.  */
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       int instance, mid;
-
-       instance = 0;
-       while (!cpu_find_by_instance(instance, NULL, &mid)) {
-               if (mid < max_cpus)
-                       cpu_set(mid, phys_cpu_present_map);
-               instance++;
-       }
-
        if (num_possible_cpus() > max_cpus) {
+               int instance, mid;
+
                instance = 0;
                while (!cpu_find_by_instance(instance, NULL, &mid)) {
                        if (mid != boot_cpu_id) {
@@ -1138,14 +1075,33 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
        smp_store_cpu_info(boot_cpu_id);
 }
 
+/* Set this up early so that things like the scheduler can init
+ * properly.  We use the same cpu mask for both the present and
+ * possible cpu map.
+ */
+void __init smp_setup_cpu_possible_map(void)
+{
+       int instance, mid;
+
+       instance = 0;
+       while (!cpu_find_by_instance(instance, NULL, &mid)) {
+               if (mid < NR_CPUS)
+                       cpu_set(mid, phys_cpu_present_map);
+               instance++;
+       }
+}
+
 void __devinit smp_prepare_boot_cpu(void)
 {
-       if (hard_smp_processor_id() >= NR_CPUS) {
+       int cpu = hard_smp_processor_id();
+
+       if (cpu >= NR_CPUS) {
                prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
                prom_halt();
        }
 
-       current_thread_info()->cpu = hard_smp_processor_id();
+       current_thread_info()->cpu = cpu;
+       __local_per_cpu_offset = __per_cpu_offset(cpu);
 
        cpu_set(smp_processor_id(), cpu_online_map);
        cpu_set(smp_processor_id(), phys_cpu_present_map);
@@ -1184,20 +1140,9 @@ void __init smp_cpus_done(unsigned int max_cpus)
               (bogosum/(5000/HZ))%100);
 }
 
-/* This needn't do anything as we do not sleep the cpu
- * inside of the idler task, so an interrupt is not needed
- * to get a clean fast response.
- *
- * XXX Reverify this assumption... -DaveM
- *
- * Addendum: We do want it to do something for the signal
- *           delivery case, we detect that by just seeing
- *           if we are trying to send this to an idler or not.
- */
 void smp_send_reschedule(int cpu)
 {
-       if (cpu_data(cpu).idle_volume == 0)
-               smp_receive_signal(cpu);
+       smp_receive_signal(cpu);
 }
 
 /* This is a nop because we capture all other cpus
@@ -1217,12 +1162,9 @@ void __init setup_per_cpu_areas(void)
 {
        unsigned long goal, size, i;
        char *ptr;
-       /* Created by linker magic */
-       extern char __per_cpu_start[], __per_cpu_end[];
 
        /* Copy section for each CPU (we discard the original) */
-       goal = ALIGN(__per_cpu_end - __per_cpu_start, PAGE_SIZE);
-
+       goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
 #ifdef CONFIG_MODULES
        if (goal < PERCPU_ENOUGH_ROOM)
                goal = PERCPU_ENOUGH_ROOM;
@@ -1231,31 +1173,10 @@ void __init setup_per_cpu_areas(void)
        for (size = 1UL; size < goal; size <<= 1UL)
                __per_cpu_shift++;
 
-       /* Make sure the resulting __per_cpu_base value
-        * will fit in the 43-bit sign extended IMMU
-        * TSB register.
-        */
-       ptr = __alloc_bootmem(size * NR_CPUS, PAGE_SIZE,
-                             (unsigned long) __per_cpu_start);
+       ptr = alloc_bootmem(size * NR_CPUS);
 
        __per_cpu_base = ptr - __per_cpu_start;
 
-       if ((__per_cpu_shift < PAGE_SHIFT) ||
-           (__per_cpu_base & ~PAGE_MASK) ||
-           (__per_cpu_base != (((long) __per_cpu_base << 20) >> 20))) {
-               prom_printf("PER_CPU: Invalid layout, "
-                           "ptr[%p] shift[%lx] base[%lx]\n",
-                           ptr, __per_cpu_shift, __per_cpu_base);
-               prom_halt();
-       }
-
        for (i = 0; i < NR_CPUS; i++, ptr += size)
                memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
-
-       /* Finally, load in the boot cpu's base value.
-        * We abuse the IMMU TSB register for trap handler
-        * entry and exit loading of %g5.  That is why it
-        * has to be page aligned.
-        */
-       cpu_setup_percpu_base(hard_smp_processor_id());
 }