KVM: use a more sensible error number when debugfs directory creation fails
[firefly-linux-kernel-4.4.55.git] / kernel / rcutree.c
index d8534308fd052f9a9446929f170a4ceb9d6e30b1..35380019f0fc101df423dab03d3b418ec2291eac 100644 (file)
@@ -799,6 +799,16 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
                rdp->offline_fqs++;
                return 1;
        }
+
+       /*
+        * There is a possibility that a CPU in adaptive-ticks state
+        * might run in the kernel with the scheduling-clock tick disabled
+        * for an extended time period.  Invoke rcu_kick_nohz_cpu() to
+        * force the CPU to restart the scheduling-clock tick in this
+        * CPU is in this state.
+        */
+       rcu_kick_nohz_cpu(rdp->cpu);
+
        return 0;
 }
 
@@ -1441,9 +1451,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
                                            rnp->grphi, rnp->qsmask);
                raw_spin_unlock_irq(&rnp->lock);
 #ifdef CONFIG_PROVE_RCU_DELAY
-               if ((prandom_u32() % (rcu_num_nodes * 8)) == 0 &&
+               if ((prandom_u32() % (rcu_num_nodes + 1)) == 0 &&
                    system_state == SYSTEM_RUNNING)
-                       schedule_timeout_uninterruptible(2);
+                       udelay(200);
 #endif /* #ifdef CONFIG_PROVE_RCU_DELAY */
                cond_resched();
        }
@@ -1603,6 +1613,14 @@ static int __noreturn rcu_gp_kthread(void *arg)
        }
 }
 
+static void rsp_wakeup(struct irq_work *work)
+{
+       struct rcu_state *rsp = container_of(work, struct rcu_state, wakeup_work);
+
+       /* Wake up rcu_gp_kthread() to start the grace period. */
+       wake_up(&rsp->gp_wq);
+}
+
 /*
  * Start a new RCU grace period if warranted, re-initializing the hierarchy
  * in preparation for detecting the next grace period.  The caller must hold
@@ -1627,8 +1645,12 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
        }
        rsp->gp_flags = RCU_GP_FLAG_INIT;
 
-       /* Wake up rcu_gp_kthread() to start the grace period. */
-       wake_up(&rsp->gp_wq);
+       /*
+        * We can't do wakeups while holding the rnp->lock, as that
+        * could cause possible deadlocks with the rq->lock. Deter
+        * the wakeup to interrupt context.
+        */
+       irq_work_queue(&rsp->wakeup_work);
 }
 
 /*
@@ -1820,7 +1842,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
                          struct rcu_node *rnp, struct rcu_data *rdp)
 {
        /* No-CBs CPUs do not have orphanable callbacks. */
-       if (is_nocb_cpu(rdp->cpu))
+       if (rcu_is_nocb_cpu(rdp->cpu))
                return;
 
        /*
@@ -2892,10 +2914,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
         * corresponding CPU's preceding callbacks have been invoked.
         */
        for_each_possible_cpu(cpu) {
-               if (!cpu_online(cpu) && !is_nocb_cpu(cpu))
+               if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
                        continue;
                rdp = per_cpu_ptr(rsp->rda, cpu);
-               if (is_nocb_cpu(cpu)) {
+               if (rcu_is_nocb_cpu(cpu)) {
                        _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
                                           rsp->n_barrier_done);
                        atomic_inc(&rsp->barrier_cpu_count);
@@ -3225,6 +3247,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
 
        rsp->rda = rda;
        init_waitqueue_head(&rsp->gp_wq);
+       init_irq_work(&rsp->wakeup_work, rsp_wakeup);
        rnp = rsp->level[rcu_num_lvls - 1];
        for_each_possible_cpu(i) {
                while (i > rnp->grphi)