From: Paul E. McKenney Date: Tue, 16 Sep 2014 17:08:34 +0000 (-0700) Subject: Merge branches 'doc.2014.09.07a', 'fixes.2014.09.10a', 'nocb-nohz.2014.09.16b' and... X-Git-Tag: firefly_0821_release~176^2~3065^2~1^2~16 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=e98d06dd6cd791b5138b0fc6c14a9c0b4d1f2e72;p=firefly-linux-kernel-4.4.55.git Merge branches 'doc.2014.09.07a', 'fixes.2014.09.10a', 'nocb-nohz.2014.09.16b' and 'torture.2014.09.07a' into HEAD doc.2014.09.07a: Documentation updates. fixes.2014.09.10a: Miscellaneous fixes. nocb-nohz.2014.09.16b: No-CBs CPUs and NO_HZ_FULL updates. torture.2014.09.07a: Torture-test updates. --- e98d06dd6cd791b5138b0fc6c14a9c0b4d1f2e72 diff --cc kernel/rcu/tree.h index 6a86eb7bac45,e33562f2a655,a9a226d2e80a,6a86eb7bac45..ffedcb9d42dc --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@@@@ -589,10 -594,10 -590,14 -589,10 +595,14 @@@@@ static bool __call_rcu_nocb(struct rcu_ static bool rcu_nocb_adopt_orphan_cbs(struct rcu_state *rsp, struct rcu_data *rdp, unsigned long flags); - --static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); + ++static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp); static void do_nocb_deferred_wakeup(struct rcu_data *rdp); static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); -- -static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); ++ +static void rcu_spawn_all_nocb_kthreads(int cpu); ++ +static void __init rcu_spawn_nocb_kthreads(void); ++ +#ifdef CONFIG_RCU_NOCB_CPU ++ +static void __init rcu_organize_nocb_kthreads(struct rcu_state *rsp); ++ +#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ static void __maybe_unused rcu_kick_nohz_cpu(int cpu); static bool init_nocb_callback_list(struct rcu_data *rdp); static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq); diff --cc kernel/rcu/tree_plugin.h index a7997e272564,bbb0a0cd091b,c554accfc5f5,a7997e272564..59318ea32bc8 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@@@@ -2444,13 -2453,16 -2424,78 -2444,13 +2433,81 @@@@@ static int rcu_nocb_need_deferred_wakeu /* Do a deferred wakeup of rcu_nocb_kthread(). */ static void do_nocb_deferred_wakeup(struct rcu_data *rdp) { + ++ int ndw; + ++ if (!rcu_nocb_need_deferred_wakeup(rdp)) return; - -- ACCESS_ONCE(rdp->nocb_defer_wakeup) = false; - -- wake_nocb_leader(rdp, false); - -- trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWakeEmpty")); + ++ ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup); + ++ ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT; + ++ wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE); + ++ trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake")); + +} + + ++ +void __init rcu_init_nohz(void) ++ +{ ++ + int cpu; ++ + bool need_rcu_nocb_mask = true; ++ + struct rcu_state *rsp; ++ + ++ +#ifdef CONFIG_RCU_NOCB_CPU_NONE ++ + need_rcu_nocb_mask = false; ++ +#endif /* #ifndef CONFIG_RCU_NOCB_CPU_NONE */ ++ + ++ +#if defined(CONFIG_NO_HZ_FULL) ++ + if (tick_nohz_full_running && cpumask_weight(tick_nohz_full_mask)) ++ + need_rcu_nocb_mask = true; ++ +#endif /* #if defined(CONFIG_NO_HZ_FULL) */ ++ + ++ + if (!have_rcu_nocb_mask && need_rcu_nocb_mask) { ++ + if (!zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL)) { ++ + pr_info("rcu_nocb_mask allocation failed, callback offloading disabled.\n"); ++ + return; ++ + } ++ + have_rcu_nocb_mask = true; ++ + } ++ + if (!have_rcu_nocb_mask) ++ + return; ++ + ++ +#ifdef CONFIG_RCU_NOCB_CPU_ZERO ++ + pr_info("\tOffload RCU callbacks from CPU 0\n"); ++ + cpumask_set_cpu(0, rcu_nocb_mask); ++ +#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ZERO */ ++ +#ifdef CONFIG_RCU_NOCB_CPU_ALL ++ + pr_info("\tOffload RCU callbacks from all CPUs\n"); ++ + cpumask_copy(rcu_nocb_mask, cpu_possible_mask); ++ +#endif /* #ifdef CONFIG_RCU_NOCB_CPU_ALL */ ++ +#if defined(CONFIG_NO_HZ_FULL) ++ + if (tick_nohz_full_running) ++ + cpumask_or(rcu_nocb_mask, rcu_nocb_mask, tick_nohz_full_mask); ++ +#endif /* #if defined(CONFIG_NO_HZ_FULL) */ ++ + ++ + if (!cpumask_subset(rcu_nocb_mask, cpu_possible_mask)) { ++ + pr_info("\tNote: kernel parameter 'rcu_nocbs=' contains nonexistent CPUs.\n"); ++ + cpumask_and(rcu_nocb_mask, cpu_possible_mask, ++ + rcu_nocb_mask); ++ + } ++ + cpulist_scnprintf(nocb_buf, sizeof(nocb_buf), rcu_nocb_mask); ++ + pr_info("\tOffload RCU callbacks from CPUs: %s.\n", nocb_buf); ++ + if (rcu_nocb_poll) ++ + pr_info("\tPoll for callbacks from no-CBs CPUs.\n"); ++ + ++ + for_each_rcu_flavor(rsp) { ++ + for_each_cpu(cpu, rcu_nocb_mask) { ++ + struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); ++ + ++ + /* ++ + * If there are early callbacks, they will need ++ + * to be moved to the nocb lists. ++ + */ ++ + WARN_ON_ONCE(rdp->nxttail[RCU_NEXT_TAIL] != ++ + &rdp->nxtlist && ++ + rdp->nxttail[RCU_NEXT_TAIL] != NULL); ++ + init_nocb_callback_list(rdp); ++ + } ++ + rcu_organize_nocb_kthreads(rsp); ++ + } + } + /* Initialize per-rcu_data variables for no-CBs CPUs. */ static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) {