nohz: New APIs to re-evaluate the tick on full dynticks CPUs
authorFrederic Weisbecker <fweisbec@gmail.com>
Wed, 17 Apr 2013 22:15:40 +0000 (00:15 +0200)
committerFrederic Weisbecker <fweisbec@gmail.com>
Thu, 18 Apr 2013 16:53:34 +0000 (18:53 +0200)
Provide two new helpers in order to notify the full dynticks CPUs about
some internal system changes against which they may reconsider the state
of their tick. Some practical examples include: posix cpu timers, perf tick
and sched clock tick.

For now the notifying handler, implemented through IPIs, is a stub
that will be implemented when we get the tick stop/restart infrastructure
in.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Chris Metcalf <cmetcalf@tilera.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Geoff Levand <geoff@infradead.org>
Cc: Gilad Ben Yossef <gilad@benyossef.com>
Cc: Hakan Akkan <hakanakkan@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Kevin Hilman <khilman@linaro.org>
Cc: Li Zhong <zhong@linux.vnet.ibm.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
include/linux/tick.h
kernel/time/Kconfig
kernel/time/tick-sched.c

index b4e3b0c9639e7520aaf8e9a9b4e6e39f9722bf33..c2dcfb18f65b824579556d589a9dd9994081fa2c 100644 (file)
@@ -159,8 +159,12 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
 
 #ifdef CONFIG_NO_HZ_FULL
 extern int tick_nohz_full_cpu(int cpu);
+extern void tick_nohz_full_kick(void);
+extern void tick_nohz_full_kick_all(void);
 #else
 static inline int tick_nohz_full_cpu(int cpu) { return 0; }
+static inline void tick_nohz_full_kick(void) { }
+static inline void tick_nohz_full_kick_all(void) { }
 #endif
 
 
index 358d601a4fec8881f3a4089f4fc051710b36c363..fbb4c7eb92a0de40816bbcac6a119f9cf80269f9 100644 (file)
@@ -111,6 +111,7 @@ config NO_HZ_FULL
        select RCU_USER_QS
        select RCU_NOCB_CPU
        select CONTEXT_TRACKING_FORCE
+       select IRQ_WORK
        help
         Adaptively try to shutdown the tick whenever possible, even when
         the CPU is running tasks. Typically this requires running a single
index 369b5769fc9722e5e751c45cd3941dadf6a7cb91..2bcad5b904d88e92df1f12982da2fdb6bfd865a9 100644 (file)
@@ -147,6 +147,57 @@ static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
 static cpumask_var_t nohz_full_mask;
 bool have_nohz_full_mask;
 
+/*
+ * Re-evaluate the need for the tick on the current CPU
+ * and restart it if necessary.
+ */
+static void tick_nohz_full_check(void)
+{
+       /*
+        * STUB for now, will be filled with the full tick stop/restart
+        * infrastructure patches
+        */
+}
+
+static void nohz_full_kick_work_func(struct irq_work *work)
+{
+       tick_nohz_full_check();
+}
+
+static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
+       .func = nohz_full_kick_work_func,
+};
+
+/*
+ * Kick the current CPU if it's full dynticks in order to force it to
+ * re-evaluate its dependency on the tick and restart it if necessary.
+ */
+void tick_nohz_full_kick(void)
+{
+       if (tick_nohz_full_cpu(smp_processor_id()))
+               irq_work_queue(&__get_cpu_var(nohz_full_kick_work));
+}
+
+static void nohz_full_kick_ipi(void *info)
+{
+       tick_nohz_full_check();
+}
+
+/*
+ * Kick all full dynticks CPUs in order to force these to re-evaluate
+ * their dependency on the tick and restart it if necessary.
+ */
+void tick_nohz_full_kick_all(void)
+{
+       if (!have_nohz_full_mask)
+               return;
+
+       preempt_disable();
+       smp_call_function_many(nohz_full_mask,
+                              nohz_full_kick_ipi, NULL, false);
+       preempt_enable();
+}
+
 int tick_nohz_full_cpu(int cpu)
 {
        if (!have_nohz_full_mask)