Merge branch 'for-3.9' of git://linux-nfs.org/~bfields/linux
[firefly-linux-kernel-4.4.55.git] / arch / arm / kernel / smp.c
index 84f4cbf652e58b3acfea846ad1cad9ddbee72d95..5f73f7018f502a6f5833781bdaed1aa6ce9a3453 100644 (file)
@@ -125,18 +125,6 @@ void __init smp_init_cpus(void)
                smp_ops.smp_init_cpus();
 }
 
-static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
-{
-       if (smp_ops.smp_prepare_cpus)
-               smp_ops.smp_prepare_cpus(max_cpus);
-}
-
-static void __cpuinit platform_secondary_init(unsigned int cpu)
-{
-       if (smp_ops.smp_secondary_init)
-               smp_ops.smp_secondary_init(cpu);
-}
-
 int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
        if (smp_ops.smp_boot_secondary)
@@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu)
        return 1;
 }
 
-static void platform_cpu_die(unsigned int cpu)
-{
-       if (smp_ops.cpu_die)
-               smp_ops.cpu_die(cpu);
-}
-
 static int platform_cpu_disable(unsigned int cpu)
 {
        if (smp_ops.cpu_disable)
@@ -257,7 +239,8 @@ void __ref cpu_die(void)
         * actual CPU shutdown procedure is at least platform (if not
         * CPU) specific.
         */
-       platform_cpu_die(cpu);
+       if (smp_ops.cpu_die)
+               smp_ops.cpu_die(cpu);
 
        /*
         * Do not return to the idle loop - jump back to the secondary
@@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
        /*
         * Give the platform a chance to do its own initialisation.
         */
-       platform_secondary_init(cpu);
+       if (smp_ops.smp_secondary_init)
+               smp_ops.smp_secondary_init(cpu);
 
        notify_cpu_starting(cpu);
 
@@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                /*
                 * Initialise the present map, which describes the set of CPUs
                 * actually populated at the present time. A platform should
-                * re-initialize the map in platform_smp_prepare_cpus() if
-                * present != possible (e.g. physical hotplug).
+                * re-initialize the map in the platforms smp_prepare_cpus()
+                * if present != possible (e.g. physical hotplug).
                 */
                init_cpu_present(cpu_possible_mask);
 
@@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
                 * Initialise the SCU if there are more than one CPU
                 * and let them know where to start.
                 */
-               platform_smp_prepare_cpus(max_cpus);
+               if (smp_ops.smp_prepare_cpus)
+                       smp_ops.smp_prepare_cpus(max_cpus);
        }
 }
 
@@ -416,7 +401,8 @@ static void (*smp_cross_call)(const struct cpumask *, unsigned int);
 
 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
 {
-       smp_cross_call = fn;
+       if (!smp_cross_call)
+               smp_cross_call = fn;
 }
 
 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
@@ -475,14 +461,8 @@ u64 smp_irq_stat_cpu(unsigned int cpu)
  */
 static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
 
-static void ipi_timer(void)
-{
-       struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
-       evt->event_handler(evt);
-}
-
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
-static void smp_timer_broadcast(const struct cpumask *mask)
+void tick_broadcast(const struct cpumask *mask)
 {
        smp_cross_call(mask, IPI_TIMER);
 }
@@ -530,7 +510,6 @@ static void __cpuinit percpu_timer_setup(void)
        struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
 
        evt->cpumask = cpumask_of(cpu);
-       evt->broadcast = smp_timer_broadcast;
 
        if (!lt_ops || lt_ops->setup(evt))
                broadcast_timer_setup(evt);
@@ -596,11 +575,13 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
        case IPI_WAKEUP:
                break;
 
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
        case IPI_TIMER:
                irq_enter();
-               ipi_timer();
+               tick_receive_broadcast();
                irq_exit();
                break;
+#endif
 
        case IPI_RESCHEDULE:
                scheduler_ipi();