From: Linus Torvalds Date: Tue, 23 Jun 2015 01:57:44 +0000 (-0700) Subject: Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel... X-Git-Tag: firefly_0821_release~176^2~1622 X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=43224b96af3154cedd7220f7b90094905f07ac78;p=firefly-linux-kernel-4.4.55.git Merge branch 'timers-core-for-linus' of git://git./linux/kernel/git/tip/tip Pull timer updates from Thomas Gleixner: "A rather largish update for everything time and timer related: - Cache footprint optimizations for both hrtimers and timer wheel - Lower the NOHZ impact on systems which have NOHZ or timer migration disabled at runtime. - Optimize run time overhead of hrtimer interrupt by making the clock offset updates smarter - hrtimer cleanups and removal of restrictions to tackle some problems in sched/perf - Some more leap second tweaks - Another round of changes addressing the 2038 problem - First step to change the internals of clock event devices by introducing the necessary infrastructure - Allow constant folding for usecs/msecs_to_jiffies() - The usual pile of clockevent/clocksource driver updates The hrtimer changes contain updates to sched, perf and x86 as they depend on them plus changes all over the tree to cleanup API changes and redundant code, which got copied all over the place. The y2038 changes touch s390 to remove the last non 2038 safe code related to boot/persistant clock" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (114 commits) clocksource: Increase dependencies of timer-stm32 to limit build wreckage timer: Minimize nohz off overhead timer: Reduce timer migration overhead if disabled timer: Stats: Simplify the flags handling timer: Replace timer base by a cpu index timer: Use hlist for the timer wheel hash buckets timer: Remove FIFO "guarantee" timers: Sanitize catchup_timer_jiffies() usage hrtimer: Allow hrtimer::function() to free the timer seqcount: Introduce raw_write_seqcount_barrier() seqcount: Rename write_seqcount_barrier() hrtimer: Fix hrtimer_is_queued() hole hrtimer: Remove HRTIMER_STATE_MIGRATE selftest: Timers: Avoid signal deadlock in leap-a-day timekeeping: Copy the shadow-timekeeper over the real timekeeper last clockevents: Check state instead of mode in suspend/resume path selftests: timers: Add leap-second timer edge testing to leap-a-day.c ntp: Do leapsecond adjustment in adjtimex read path time: Prevent early expiry of hrtimers[CLOCK_REALTIME] at the leap second edge ntp: Introduce and use SECS_PER_DAY macro instead of 86400 ... --- 43224b96af3154cedd7220f7b90094905f07ac78 diff --cc include/linux/rcupdate.h index 03a899aabd17,0627a447c589..33a056bb886f --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@@ -1099,13 -1155,13 +1101,13 @@@ static inline notrace void rcu_read_unl #define kfree_rcu(ptr, rcu_head) \ __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) -#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) +#ifdef CONFIG_TINY_RCU - static inline int rcu_needs_cpu(unsigned long *delta_jiffies) + static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt) { - *delta_jiffies = ULONG_MAX; + *nextevt = KTIME_MAX; return 0; } -#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */ +#endif /* #ifdef CONFIG_TINY_RCU */ #if defined(CONFIG_RCU_NOCB_CPU_ALL) static inline bool rcu_is_nocb_cpu(int cpu) { return true; } diff --cc include/linux/rcutree.h index 3fa4a43ab415,db2e31beaae7..456879143f89 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@@ -31,7 -31,9 +31,7 @@@ #define __LINUX_RCUTREE_H void rcu_note_context_switch(void); - int rcu_needs_cpu(unsigned long *delta_jiffies); -#ifndef CONFIG_RCU_NOCB_CPU_ALL + int rcu_needs_cpu(u64 basem, u64 *nextevt); -#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ void rcu_cpu_stall_reset(void); /* diff --cc kernel/rcu/tree_plugin.h index 32664347091a,d72fa24f2312..013485fb2b06 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@@ -1375,12 -1367,13 +1375,12 @@@ static void rcu_prepare_kthreads(int cp * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs * any flavor of RCU. */ - int rcu_needs_cpu(unsigned long *delta_jiffies) -#ifndef CONFIG_RCU_NOCB_CPU_ALL + int rcu_needs_cpu(u64 basemono, u64 *nextevt) { - *delta_jiffies = ULONG_MAX; + *nextevt = KTIME_MAX; - return rcu_cpu_has_callbacks(NULL); + return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) + ? 0 : rcu_cpu_has_callbacks(NULL); } -#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ /* * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up @@@ -1487,15 -1478,12 +1485,16 @@@ static bool __maybe_unused rcu_try_adva * * The caller must have disabled interrupts. */ - int rcu_needs_cpu(unsigned long *dj) -#ifndef CONFIG_RCU_NOCB_CPU_ALL + int rcu_needs_cpu(u64 basemono, u64 *nextevt) { struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks); + unsigned long dj; + if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)) { - *dj = ULONG_MAX; ++ *nextevt = KTIME_MAX; + return 0; + } + /* Snapshot to detect later posting of non-lazy callback. */ rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted; @@@ -1515,13 -1503,15 +1514,14 @@@ /* Request timer delay depending on laziness, and round. */ if (!rdtp->all_lazy) { - *dj = round_up(rcu_idle_gp_delay + jiffies, + dj = round_up(rcu_idle_gp_delay + jiffies, rcu_idle_gp_delay) - jiffies; } else { - *dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies; + dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies; } + *nextevt = basemono + dj * TICK_NSEC; return 0; } -#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */ /* * Prepare a CPU for idle from an RCU perspective. The first major task