2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/clocksource.h>
20 #include <linux/jiffies.h>
21 #include <linux/time.h>
22 #include <linux/tick.h>
23 #include <linux/stop_machine.h>
24 #include <linux/pvclock_gtod.h>
25 #include <linux/compiler.h>
27 #include "tick-internal.h"
28 #include "ntp_internal.h"
29 #include "timekeeping_internal.h"
31 #define TK_CLEAR_NTP (1 << 0)
32 #define TK_MIRROR (1 << 1)
33 #define TK_CLOCK_WAS_SET (1 << 2)
35 static struct timekeeper timekeeper;
36 static DEFINE_RAW_SPINLOCK(timekeeper_lock);
37 static seqcount_t timekeeper_seq;
38 static struct timekeeper shadow_timekeeper;
40 /* flag for if timekeeping is suspended */
41 int __read_mostly timekeeping_suspended;
43 /* Flag for if there is a persistent clock on this platform */
44 bool __read_mostly persistent_clock_exist = false;
46 static inline void tk_normalize_xtime(struct timekeeper *tk)
48 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
49 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
54 static inline struct timespec64 tk_xtime(struct timekeeper *tk)
58 ts.tv_sec = tk->xtime_sec;
59 ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
63 static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
65 tk->xtime_sec = ts->tv_sec;
66 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
69 static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
71 tk->xtime_sec += ts->tv_sec;
72 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
73 tk_normalize_xtime(tk);
76 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
78 struct timespec64 tmp;
81 * Verify consistency of: offset_real = -wall_to_monotonic
82 * before modifying anything
84 set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
85 -tk->wall_to_monotonic.tv_nsec);
86 WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64);
87 tk->wall_to_monotonic = wtm;
88 set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
89 tk->offs_real = timespec64_to_ktime(tmp);
90 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
93 static void tk_set_sleep_time(struct timekeeper *tk, struct timespec64 t)
95 /* Verify consistency before modifying */
96 WARN_ON_ONCE(tk->offs_boot.tv64 != timespec64_to_ktime(tk->total_sleep_time).tv64);
98 tk->total_sleep_time = t;
99 tk->offs_boot = timespec64_to_ktime(t);
103 * tk_setup_internals - Set up internals to use clocksource clock.
105 * @tk: The target timekeeper to setup.
106 * @clock: Pointer to clocksource.
108 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
109 * pair and interval request.
111 * Unless you're the timekeeping code, you should not be using this!
113 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
116 u64 tmp, ntpinterval;
117 struct clocksource *old_clock;
119 old_clock = tk->clock;
121 tk->cycle_last = clock->cycle_last = clock->read(clock);
123 /* Do the ns -> cycle conversion first, using original mult */
124 tmp = NTP_INTERVAL_LENGTH;
125 tmp <<= clock->shift;
127 tmp += clock->mult/2;
128 do_div(tmp, clock->mult);
132 interval = (cycle_t) tmp;
133 tk->cycle_interval = interval;
135 /* Go back from cycles -> shifted ns */
136 tk->xtime_interval = (u64) interval * clock->mult;
137 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
139 ((u64) interval * clock->mult) >> clock->shift;
141 /* if changing clocks, convert xtime_nsec shift units */
143 int shift_change = clock->shift - old_clock->shift;
144 if (shift_change < 0)
145 tk->xtime_nsec >>= -shift_change;
147 tk->xtime_nsec <<= shift_change;
149 tk->shift = clock->shift;
152 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
155 * The timekeeper keeps its own mult values for the currently
156 * active clocksource. These value will be adjusted via NTP
157 * to counteract clock drifting.
159 tk->mult = clock->mult;
162 /* Timekeeper helper functions. */
164 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
165 static u32 default_arch_gettimeoffset(void) { return 0; }
166 u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset;
168 static inline u32 arch_gettimeoffset(void) { return 0; }
171 static inline s64 timekeeping_get_ns(struct timekeeper *tk)
173 cycle_t cycle_now, cycle_delta;
174 struct clocksource *clock;
177 /* read clocksource: */
179 cycle_now = clock->read(clock);
181 /* calculate the delta since the last update_wall_time: */
182 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
184 nsec = cycle_delta * tk->mult + tk->xtime_nsec;
187 /* If arch requires, add in get_arch_timeoffset() */
188 return nsec + arch_gettimeoffset();
191 static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
193 cycle_t cycle_now, cycle_delta;
194 struct clocksource *clock;
197 /* read clocksource: */
199 cycle_now = clock->read(clock);
201 /* calculate the delta since the last update_wall_time: */
202 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
204 /* convert delta to nanoseconds. */
205 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
207 /* If arch requires, add in get_arch_timeoffset() */
208 return nsec + arch_gettimeoffset();
211 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
213 static inline void update_vsyscall(struct timekeeper *tk)
218 update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
221 static inline void old_vsyscall_fixup(struct timekeeper *tk)
226 * Store only full nanoseconds into xtime_nsec after rounding
227 * it up and add the remainder to the error difference.
228 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
229 * by truncating the remainder in vsyscalls. However, it causes
230 * additional work to be done in timekeeping_adjust(). Once
231 * the vsyscall implementations are converted to use xtime_nsec
232 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
233 * users are removed, this can be killed.
235 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
236 tk->xtime_nsec -= remainder;
237 tk->xtime_nsec += 1ULL << tk->shift;
238 tk->ntp_error += remainder << tk->ntp_error_shift;
239 tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift;
242 #define old_vsyscall_fixup(tk)
245 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
247 static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
249 raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
253 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
255 int pvclock_gtod_register_notifier(struct notifier_block *nb)
257 struct timekeeper *tk = &timekeeper;
261 raw_spin_lock_irqsave(&timekeeper_lock, flags);
262 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
263 update_pvclock_gtod(tk, true);
264 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
268 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
271 * pvclock_gtod_unregister_notifier - unregister a pvclock
272 * timedata update listener
274 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
279 raw_spin_lock_irqsave(&timekeeper_lock, flags);
280 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
281 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
285 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
287 /* must hold timekeeper_lock */
288 static void timekeeping_update(struct timekeeper *tk, unsigned int action)
290 if (action & TK_CLEAR_NTP) {
295 update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
297 if (action & TK_MIRROR)
298 memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
302 * timekeeping_forward_now - update clock to the current time
304 * Forward the current clock to update its state since the last call to
305 * update_wall_time(). This is useful before significant clock changes,
306 * as it avoids having to deal with this time offset explicitly.
308 static void timekeeping_forward_now(struct timekeeper *tk)
310 cycle_t cycle_now, cycle_delta;
311 struct clocksource *clock;
315 cycle_now = clock->read(clock);
316 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
317 tk->cycle_last = clock->cycle_last = cycle_now;
319 tk->xtime_nsec += cycle_delta * tk->mult;
321 /* If arch requires, add in get_arch_timeoffset() */
322 tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift;
324 tk_normalize_xtime(tk);
326 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
327 timespec64_add_ns(&tk->raw_time, nsec);
331 * __getnstimeofday64 - Returns the time of day in a timespec64.
332 * @ts: pointer to the timespec to be set
334 * Updates the time of day in the timespec.
335 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
337 int __getnstimeofday64(struct timespec64 *ts)
339 struct timekeeper *tk = &timekeeper;
344 seq = read_seqcount_begin(&timekeeper_seq);
346 ts->tv_sec = tk->xtime_sec;
347 nsecs = timekeeping_get_ns(tk);
349 } while (read_seqcount_retry(&timekeeper_seq, seq));
352 timespec64_add_ns(ts, nsecs);
355 * Do not bail out early, in case there were callers still using
356 * the value, even in the face of the WARN_ON.
358 if (unlikely(timekeeping_suspended))
362 EXPORT_SYMBOL(__getnstimeofday64);
365 * getnstimeofday64 - Returns the time of day in a timespec64.
366 * @ts: pointer to the timespec to be set
368 * Returns the time of day in a timespec (WARN if suspended).
370 void getnstimeofday64(struct timespec64 *ts)
372 WARN_ON(__getnstimeofday64(ts));
374 EXPORT_SYMBOL(getnstimeofday64);
376 ktime_t ktime_get(void)
378 struct timekeeper *tk = &timekeeper;
382 WARN_ON(timekeeping_suspended);
385 seq = read_seqcount_begin(&timekeeper_seq);
386 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
387 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
389 } while (read_seqcount_retry(&timekeeper_seq, seq));
391 return ktime_set(secs, nsecs);
393 EXPORT_SYMBOL_GPL(ktime_get);
396 * ktime_get_ts64 - get the monotonic clock in timespec64 format
397 * @ts: pointer to timespec variable
399 * The function calculates the monotonic clock from the realtime
400 * clock and the wall_to_monotonic offset and stores the result
401 * in normalized timespec format in the variable pointed to by @ts.
403 void ktime_get_ts64(struct timespec64 *ts)
405 struct timekeeper *tk = &timekeeper;
406 struct timespec64 tomono;
410 WARN_ON(timekeeping_suspended);
413 seq = read_seqcount_begin(&timekeeper_seq);
414 ts->tv_sec = tk->xtime_sec;
415 nsec = timekeeping_get_ns(tk);
416 tomono = tk->wall_to_monotonic;
418 } while (read_seqcount_retry(&timekeeper_seq, seq));
420 ts->tv_sec += tomono.tv_sec;
422 timespec64_add_ns(ts, nsec + tomono.tv_nsec);
424 EXPORT_SYMBOL_GPL(ktime_get_ts64);
428 * timekeeping_clocktai - Returns the TAI time of day in a timespec
429 * @ts: pointer to the timespec to be set
431 * Returns the time of day in a timespec.
433 void timekeeping_clocktai(struct timespec *ts)
435 struct timekeeper *tk = &timekeeper;
436 struct timespec64 ts64;
440 WARN_ON(timekeeping_suspended);
443 seq = read_seqcount_begin(&timekeeper_seq);
445 ts64.tv_sec = tk->xtime_sec + tk->tai_offset;
446 nsecs = timekeeping_get_ns(tk);
448 } while (read_seqcount_retry(&timekeeper_seq, seq));
451 timespec64_add_ns(&ts64, nsecs);
452 *ts = timespec64_to_timespec(ts64);
455 EXPORT_SYMBOL(timekeeping_clocktai);
459 * ktime_get_clocktai - Returns the TAI time of day in a ktime
461 * Returns the time of day in a ktime.
463 ktime_t ktime_get_clocktai(void)
467 timekeeping_clocktai(&ts);
468 return timespec_to_ktime(ts);
470 EXPORT_SYMBOL(ktime_get_clocktai);
472 #ifdef CONFIG_NTP_PPS
475 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
476 * @ts_raw: pointer to the timespec to be set to raw monotonic time
477 * @ts_real: pointer to the timespec to be set to the time of day
479 * This function reads both the time of day and raw monotonic time at the
480 * same time atomically and stores the resulting timestamps in timespec
483 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
485 struct timekeeper *tk = &timekeeper;
487 s64 nsecs_raw, nsecs_real;
489 WARN_ON_ONCE(timekeeping_suspended);
492 seq = read_seqcount_begin(&timekeeper_seq);
494 *ts_raw = timespec64_to_timespec(tk->raw_time);
495 ts_real->tv_sec = tk->xtime_sec;
496 ts_real->tv_nsec = 0;
498 nsecs_raw = timekeeping_get_ns_raw(tk);
499 nsecs_real = timekeeping_get_ns(tk);
501 } while (read_seqcount_retry(&timekeeper_seq, seq));
503 timespec_add_ns(ts_raw, nsecs_raw);
504 timespec_add_ns(ts_real, nsecs_real);
506 EXPORT_SYMBOL(getnstime_raw_and_real);
508 #endif /* CONFIG_NTP_PPS */
511 * do_gettimeofday - Returns the time of day in a timeval
512 * @tv: pointer to the timeval to be set
514 * NOTE: Users should be converted to using getnstimeofday()
516 void do_gettimeofday(struct timeval *tv)
518 struct timespec64 now;
520 getnstimeofday64(&now);
521 tv->tv_sec = now.tv_sec;
522 tv->tv_usec = now.tv_nsec/1000;
524 EXPORT_SYMBOL(do_gettimeofday);
527 * do_settimeofday - Sets the time of day
528 * @tv: pointer to the timespec variable containing the new time
530 * Sets the time of day to the new time and update NTP and notify hrtimers
532 int do_settimeofday(const struct timespec *tv)
534 struct timekeeper *tk = &timekeeper;
535 struct timespec64 ts_delta, xt, tmp;
538 if (!timespec_valid_strict(tv))
541 raw_spin_lock_irqsave(&timekeeper_lock, flags);
542 write_seqcount_begin(&timekeeper_seq);
544 timekeeping_forward_now(tk);
547 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
548 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
550 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
552 tmp = timespec_to_timespec64(*tv);
553 tk_set_xtime(tk, &tmp);
555 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
557 write_seqcount_end(&timekeeper_seq);
558 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
560 /* signal hrtimers about time change */
565 EXPORT_SYMBOL(do_settimeofday);
568 * timekeeping_inject_offset - Adds or subtracts from the current time.
569 * @tv: pointer to the timespec variable containing the offset
571 * Adds or subtracts an offset value from the current time.
573 int timekeeping_inject_offset(struct timespec *ts)
575 struct timekeeper *tk = &timekeeper;
577 struct timespec64 ts64, tmp;
580 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
583 ts64 = timespec_to_timespec64(*ts);
585 raw_spin_lock_irqsave(&timekeeper_lock, flags);
586 write_seqcount_begin(&timekeeper_seq);
588 timekeeping_forward_now(tk);
590 /* Make sure the proposed value is valid */
591 tmp = timespec64_add(tk_xtime(tk), ts64);
592 if (!timespec64_valid_strict(&tmp)) {
597 tk_xtime_add(tk, &ts64);
598 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64));
600 error: /* even if we error out, we forwarded the time, so call update */
601 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
603 write_seqcount_end(&timekeeper_seq);
604 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
606 /* signal hrtimers about time change */
611 EXPORT_SYMBOL(timekeeping_inject_offset);
615 * timekeeping_get_tai_offset - Returns current TAI offset from UTC
618 s32 timekeeping_get_tai_offset(void)
620 struct timekeeper *tk = &timekeeper;
625 seq = read_seqcount_begin(&timekeeper_seq);
626 ret = tk->tai_offset;
627 } while (read_seqcount_retry(&timekeeper_seq, seq));
633 * __timekeeping_set_tai_offset - Lock free worker function
636 static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
638 tk->tai_offset = tai_offset;
639 tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
643 * timekeeping_set_tai_offset - Sets the current TAI offset from UTC
646 void timekeeping_set_tai_offset(s32 tai_offset)
648 struct timekeeper *tk = &timekeeper;
651 raw_spin_lock_irqsave(&timekeeper_lock, flags);
652 write_seqcount_begin(&timekeeper_seq);
653 __timekeeping_set_tai_offset(tk, tai_offset);
654 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
655 write_seqcount_end(&timekeeper_seq);
656 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
661 * change_clocksource - Swaps clocksources if a new one is available
663 * Accumulates current time interval and initializes new clocksource
665 static int change_clocksource(void *data)
667 struct timekeeper *tk = &timekeeper;
668 struct clocksource *new, *old;
671 new = (struct clocksource *) data;
673 raw_spin_lock_irqsave(&timekeeper_lock, flags);
674 write_seqcount_begin(&timekeeper_seq);
676 timekeeping_forward_now(tk);
678 * If the cs is in module, get a module reference. Succeeds
679 * for built-in code (owner == NULL) as well.
681 if (try_module_get(new->owner)) {
682 if (!new->enable || new->enable(new) == 0) {
684 tk_setup_internals(tk, new);
687 module_put(old->owner);
689 module_put(new->owner);
692 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
694 write_seqcount_end(&timekeeper_seq);
695 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
701 * timekeeping_notify - Install a new clock source
702 * @clock: pointer to the clock source
704 * This function is called from clocksource.c after a new, better clock
705 * source has been registered. The caller holds the clocksource_mutex.
707 int timekeeping_notify(struct clocksource *clock)
709 struct timekeeper *tk = &timekeeper;
711 if (tk->clock == clock)
713 stop_machine(change_clocksource, clock, NULL);
715 return tk->clock == clock ? 0 : -1;
719 * ktime_get_real - get the real (wall-) time in ktime_t format
721 * returns the time in ktime_t format
723 ktime_t ktime_get_real(void)
725 struct timespec64 now;
727 getnstimeofday64(&now);
729 return timespec64_to_ktime(now);
731 EXPORT_SYMBOL_GPL(ktime_get_real);
734 * getrawmonotonic - Returns the raw monotonic time in a timespec
735 * @ts: pointer to the timespec to be set
737 * Returns the raw monotonic time (completely un-modified by ntp)
739 void getrawmonotonic(struct timespec *ts)
741 struct timekeeper *tk = &timekeeper;
742 struct timespec64 ts64;
747 seq = read_seqcount_begin(&timekeeper_seq);
748 nsecs = timekeeping_get_ns_raw(tk);
751 } while (read_seqcount_retry(&timekeeper_seq, seq));
753 timespec64_add_ns(&ts64, nsecs);
754 *ts = timespec64_to_timespec(ts64);
756 EXPORT_SYMBOL(getrawmonotonic);
759 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
761 int timekeeping_valid_for_hres(void)
763 struct timekeeper *tk = &timekeeper;
768 seq = read_seqcount_begin(&timekeeper_seq);
770 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
772 } while (read_seqcount_retry(&timekeeper_seq, seq));
778 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
780 u64 timekeeping_max_deferment(void)
782 struct timekeeper *tk = &timekeeper;
787 seq = read_seqcount_begin(&timekeeper_seq);
789 ret = tk->clock->max_idle_ns;
791 } while (read_seqcount_retry(&timekeeper_seq, seq));
797 * read_persistent_clock - Return time from the persistent clock.
799 * Weak dummy function for arches that do not yet support it.
800 * Reads the time from the battery backed persistent clock.
801 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
803 * XXX - Do be sure to remove it once all arches implement it.
805 void __weak read_persistent_clock(struct timespec *ts)
812 * read_boot_clock - Return time of the system start.
814 * Weak dummy function for arches that do not yet support it.
815 * Function to read the exact time the system has been started.
816 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
818 * XXX - Do be sure to remove it once all arches implement it.
820 void __weak read_boot_clock(struct timespec *ts)
827 * timekeeping_init - Initializes the clocksource and common timekeeping values
829 void __init timekeeping_init(void)
831 struct timekeeper *tk = &timekeeper;
832 struct clocksource *clock;
834 struct timespec64 now, boot, tmp;
837 read_persistent_clock(&ts);
838 now = timespec_to_timespec64(ts);
839 if (!timespec64_valid_strict(&now)) {
840 pr_warn("WARNING: Persistent clock returned invalid value!\n"
841 " Check your CMOS/BIOS settings.\n");
844 } else if (now.tv_sec || now.tv_nsec)
845 persistent_clock_exist = true;
847 read_boot_clock(&ts);
848 boot = timespec_to_timespec64(ts);
849 if (!timespec64_valid_strict(&boot)) {
850 pr_warn("WARNING: Boot clock returned invalid value!\n"
851 " Check your CMOS/BIOS settings.\n");
856 raw_spin_lock_irqsave(&timekeeper_lock, flags);
857 write_seqcount_begin(&timekeeper_seq);
860 clock = clocksource_default_clock();
862 clock->enable(clock);
863 tk_setup_internals(tk, clock);
865 tk_set_xtime(tk, &now);
866 tk->raw_time.tv_sec = 0;
867 tk->raw_time.tv_nsec = 0;
868 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
871 set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec);
872 tk_set_wall_to_mono(tk, tmp);
876 tk_set_sleep_time(tk, tmp);
878 memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper));
880 write_seqcount_end(&timekeeper_seq);
881 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
884 /* time in seconds when suspend began */
885 static struct timespec64 timekeeping_suspend_time;
888 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
889 * @delta: pointer to a timespec delta value
891 * Takes a timespec offset measuring a suspend interval and properly
892 * adds the sleep offset to the timekeeping variables.
894 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
895 struct timespec64 *delta)
897 if (!timespec64_valid_strict(delta)) {
898 printk_deferred(KERN_WARNING
899 "__timekeeping_inject_sleeptime: Invalid "
900 "sleep delta value!\n");
903 tk_xtime_add(tk, delta);
904 tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
905 tk_set_sleep_time(tk, timespec64_add(tk->total_sleep_time, *delta));
906 tk_debug_account_sleep_time(delta);
910 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
911 * @delta: pointer to a timespec delta value
913 * This hook is for architectures that cannot support read_persistent_clock
914 * because their RTC/persistent clock is only accessible when irqs are enabled.
916 * This function should only be called by rtc_resume(), and allows
917 * a suspend offset to be injected into the timekeeping values.
919 void timekeeping_inject_sleeptime(struct timespec *delta)
921 struct timekeeper *tk = &timekeeper;
922 struct timespec64 tmp;
926 * Make sure we don't set the clock twice, as timekeeping_resume()
929 if (has_persistent_clock())
932 raw_spin_lock_irqsave(&timekeeper_lock, flags);
933 write_seqcount_begin(&timekeeper_seq);
935 timekeeping_forward_now(tk);
937 tmp = timespec_to_timespec64(*delta);
938 __timekeeping_inject_sleeptime(tk, &tmp);
940 timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
942 write_seqcount_end(&timekeeper_seq);
943 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
945 /* signal hrtimers about time change */
950 * timekeeping_resume - Resumes the generic timekeeping subsystem.
952 * This is for the generic clocksource timekeeping.
953 * xtime/wall_to_monotonic/jiffies/etc are
954 * still managed by arch specific suspend/resume code.
956 static void timekeeping_resume(void)
958 struct timekeeper *tk = &timekeeper;
959 struct clocksource *clock = tk->clock;
961 struct timespec64 ts_new, ts_delta;
963 cycle_t cycle_now, cycle_delta;
964 bool suspendtime_found = false;
966 read_persistent_clock(&tmp);
967 ts_new = timespec_to_timespec64(tmp);
969 clockevents_resume();
970 clocksource_resume();
972 raw_spin_lock_irqsave(&timekeeper_lock, flags);
973 write_seqcount_begin(&timekeeper_seq);
976 * After system resumes, we need to calculate the suspended time and
977 * compensate it for the OS time. There are 3 sources that could be
978 * used: Nonstop clocksource during suspend, persistent clock and rtc
981 * One specific platform may have 1 or 2 or all of them, and the
982 * preference will be:
983 * suspend-nonstop clocksource -> persistent clock -> rtc
984 * The less preferred source will only be tried if there is no better
985 * usable source. The rtc part is handled separately in rtc core code.
987 cycle_now = clock->read(clock);
988 if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
989 cycle_now > clock->cycle_last) {
990 u64 num, max = ULLONG_MAX;
991 u32 mult = clock->mult;
992 u32 shift = clock->shift;
995 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
998 * "cycle_delta * mutl" may cause 64 bits overflow, if the
999 * suspended time is too long. In that case we need do the
1000 * 64 bits math carefully
1003 if (cycle_delta > max) {
1004 num = div64_u64(cycle_delta, max);
1005 nsec = (((u64) max * mult) >> shift) * num;
1006 cycle_delta -= num * max;
1008 nsec += ((u64) cycle_delta * mult) >> shift;
1010 ts_delta = ns_to_timespec64(nsec);
1011 suspendtime_found = true;
1012 } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) {
1013 ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time);
1014 suspendtime_found = true;
1017 if (suspendtime_found)
1018 __timekeeping_inject_sleeptime(tk, &ts_delta);
1020 /* Re-base the last cycle value */
1021 tk->cycle_last = clock->cycle_last = cycle_now;
1023 timekeeping_suspended = 0;
1024 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1025 write_seqcount_end(&timekeeper_seq);
1026 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1028 touch_softlockup_watchdog();
1030 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
1032 /* Resume hrtimers */
1036 static int timekeeping_suspend(void)
1038 struct timekeeper *tk = &timekeeper;
1039 unsigned long flags;
1040 struct timespec64 delta, delta_delta;
1041 static struct timespec64 old_delta;
1042 struct timespec tmp;
1044 read_persistent_clock(&tmp);
1045 timekeeping_suspend_time = timespec_to_timespec64(tmp);
1048 * On some systems the persistent_clock can not be detected at
1049 * timekeeping_init by its return value, so if we see a valid
1050 * value returned, update the persistent_clock_exists flag.
1052 if (timekeeping_suspend_time.tv_sec || timekeeping_suspend_time.tv_nsec)
1053 persistent_clock_exist = true;
1055 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1056 write_seqcount_begin(&timekeeper_seq);
1057 timekeeping_forward_now(tk);
1058 timekeeping_suspended = 1;
1061 * To avoid drift caused by repeated suspend/resumes,
1062 * which each can add ~1 second drift error,
1063 * try to compensate so the difference in system time
1064 * and persistent_clock time stays close to constant.
1066 delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
1067 delta_delta = timespec64_sub(delta, old_delta);
1068 if (abs(delta_delta.tv_sec) >= 2) {
1070 * if delta_delta is too large, assume time correction
1071 * has occured and set old_delta to the current delta.
1075 /* Otherwise try to adjust old_system to compensate */
1076 timekeeping_suspend_time =
1077 timespec64_add(timekeeping_suspend_time, delta_delta);
1080 timekeeping_update(tk, TK_MIRROR);
1081 write_seqcount_end(&timekeeper_seq);
1082 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1084 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
1085 clocksource_suspend();
1086 clockevents_suspend();
1091 /* sysfs resume/suspend bits for timekeeping */
1092 static struct syscore_ops timekeeping_syscore_ops = {
1093 .resume = timekeeping_resume,
1094 .suspend = timekeeping_suspend,
1097 static int __init timekeeping_init_ops(void)
1099 register_syscore_ops(&timekeeping_syscore_ops);
1103 device_initcall(timekeeping_init_ops);
1106 * If the error is already larger, we look ahead even further
1107 * to compensate for late or lost adjustments.
1109 static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
1110 s64 error, s64 *interval,
1114 u32 look_ahead, adj;
1118 * Use the current error value to determine how much to look ahead.
1119 * The larger the error the slower we adjust for it to avoid problems
1120 * with losing too many ticks, otherwise we would overadjust and
1121 * produce an even larger error. The smaller the adjustment the
1122 * faster we try to adjust for it, as lost ticks can do less harm
1123 * here. This is tuned so that an error of about 1 msec is adjusted
1124 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
1126 error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
1127 error2 = abs(error2);
1128 for (look_ahead = 0; error2 > 0; look_ahead++)
1132 * Now calculate the error in (1 << look_ahead) ticks, but first
1133 * remove the single look ahead already included in the error.
1135 tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
1136 tick_error -= tk->xtime_interval >> 1;
1137 error = ((error - tick_error) >> look_ahead) + tick_error;
1139 /* Finally calculate the adjustment shift value. */
1144 *interval = -*interval;
1148 for (adj = 0; error > i; adj++)
1157 * Adjust the multiplier to reduce the error value,
1158 * this is optimized for the most common adjustments of -1,0,1,
1159 * for other values we can do a bit more work.
1161 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
1163 s64 error, interval = tk->cycle_interval;
1167 * The point of this is to check if the error is greater than half
1170 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
1172 * Note we subtract one in the shift, so that error is really error*2.
1173 * This "saves" dividing(shifting) interval twice, but keeps the
1174 * (error > interval) comparison as still measuring if error is
1175 * larger than half an interval.
1177 * Note: It does not "save" on aggravation when reading the code.
1179 error = tk->ntp_error >> (tk->ntp_error_shift - 1);
1180 if (error > interval) {
1182 * We now divide error by 4(via shift), which checks if
1183 * the error is greater than twice the interval.
1184 * If it is greater, we need a bigadjust, if its smaller,
1185 * we can adjust by 1.
1188 if (likely(error <= interval))
1191 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1193 if (error < -interval) {
1194 /* See comment above, this is just switched for the negative */
1196 if (likely(error >= -interval)) {
1198 interval = -interval;
1201 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
1208 if (unlikely(tk->clock->maxadj &&
1209 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
1210 printk_deferred_once(KERN_WARNING
1211 "Adjusting %s more than 11%% (%ld vs %ld)\n",
1212 tk->clock->name, (long)tk->mult + adj,
1213 (long)tk->clock->mult + tk->clock->maxadj);
1216 * So the following can be confusing.
1218 * To keep things simple, lets assume adj == 1 for now.
1220 * When adj != 1, remember that the interval and offset values
1221 * have been appropriately scaled so the math is the same.
1223 * The basic idea here is that we're increasing the multiplier
1224 * by one, this causes the xtime_interval to be incremented by
1225 * one cycle_interval. This is because:
1226 * xtime_interval = cycle_interval * mult
1227 * So if mult is being incremented by one:
1228 * xtime_interval = cycle_interval * (mult + 1)
1230 * xtime_interval = (cycle_interval * mult) + cycle_interval
1231 * Which can be shortened to:
1232 * xtime_interval += cycle_interval
1234 * So offset stores the non-accumulated cycles. Thus the current
1235 * time (in shifted nanoseconds) is:
1236 * now = (offset * adj) + xtime_nsec
1237 * Now, even though we're adjusting the clock frequency, we have
1238 * to keep time consistent. In other words, we can't jump back
1239 * in time, and we also want to avoid jumping forward in time.
1241 * So given the same offset value, we need the time to be the same
1242 * both before and after the freq adjustment.
1243 * now = (offset * adj_1) + xtime_nsec_1
1244 * now = (offset * adj_2) + xtime_nsec_2
1246 * (offset * adj_1) + xtime_nsec_1 =
1247 * (offset * adj_2) + xtime_nsec_2
1251 * (offset * adj_1) + xtime_nsec_1 =
1252 * (offset * (adj_1+1)) + xtime_nsec_2
1253 * (offset * adj_1) + xtime_nsec_1 =
1254 * (offset * adj_1) + offset + xtime_nsec_2
1255 * Canceling the sides:
1256 * xtime_nsec_1 = offset + xtime_nsec_2
1258 * xtime_nsec_2 = xtime_nsec_1 - offset
1259 * Which simplfies to:
1260 * xtime_nsec -= offset
1262 * XXX - TODO: Doc ntp_error calculation.
1265 tk->xtime_interval += interval;
1266 tk->xtime_nsec -= offset;
1267 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1271 * It may be possible that when we entered this function, xtime_nsec
1272 * was very small. Further, if we're slightly speeding the clocksource
1273 * in the code above, its possible the required corrective factor to
1274 * xtime_nsec could cause it to underflow.
1276 * Now, since we already accumulated the second, cannot simply roll
1277 * the accumulated second back, since the NTP subsystem has been
1278 * notified via second_overflow. So instead we push xtime_nsec forward
1279 * by the amount we underflowed, and add that amount into the error.
1281 * We'll correct this error next time through this function, when
1282 * xtime_nsec is not as small.
1284 if (unlikely((s64)tk->xtime_nsec < 0)) {
1285 s64 neg = -(s64)tk->xtime_nsec;
1287 tk->ntp_error += neg << tk->ntp_error_shift;
1293 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1295 * Helper function that accumulates a the nsecs greater then a second
1296 * from the xtime_nsec field to the xtime_secs field.
1297 * It also calls into the NTP code to handle leapsecond processing.
1300 static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
1302 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
1303 unsigned int clock_set = 0;
1305 while (tk->xtime_nsec >= nsecps) {
1308 tk->xtime_nsec -= nsecps;
1311 /* Figure out if its a leap sec and apply if needed */
1312 leap = second_overflow(tk->xtime_sec);
1313 if (unlikely(leap)) {
1314 struct timespec64 ts;
1316 tk->xtime_sec += leap;
1320 tk_set_wall_to_mono(tk,
1321 timespec64_sub(tk->wall_to_monotonic, ts));
1323 __timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
1325 clock_set = TK_CLOCK_WAS_SET;
1332 * logarithmic_accumulation - shifted accumulation of cycles
1334 * This functions accumulates a shifted interval of cycles into
1335 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1338 * Returns the unconsumed cycles.
1340 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1342 unsigned int *clock_set)
1344 cycle_t interval = tk->cycle_interval << shift;
1347 /* If the offset is smaller then a shifted interval, do nothing */
1348 if (offset < interval)
1351 /* Accumulate one shifted interval */
1353 tk->cycle_last += interval;
1355 tk->xtime_nsec += tk->xtime_interval << shift;
1356 *clock_set |= accumulate_nsecs_to_secs(tk);
1358 /* Accumulate raw time */
1359 raw_nsecs = (u64)tk->raw_interval << shift;
1360 raw_nsecs += tk->raw_time.tv_nsec;
1361 if (raw_nsecs >= NSEC_PER_SEC) {
1362 u64 raw_secs = raw_nsecs;
1363 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1364 tk->raw_time.tv_sec += raw_secs;
1366 tk->raw_time.tv_nsec = raw_nsecs;
1368 /* Accumulate error between NTP and clock interval */
1369 tk->ntp_error += ntp_tick_length() << shift;
1370 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1371 (tk->ntp_error_shift + shift);
1377 * update_wall_time - Uses the current clocksource to increment the wall time
1380 void update_wall_time(void)
1382 struct clocksource *clock;
1383 struct timekeeper *real_tk = &timekeeper;
1384 struct timekeeper *tk = &shadow_timekeeper;
1386 int shift = 0, maxshift;
1387 unsigned int clock_set = 0;
1388 unsigned long flags;
1390 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1392 /* Make sure we're fully resumed: */
1393 if (unlikely(timekeeping_suspended))
1396 clock = real_tk->clock;
1398 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1399 offset = real_tk->cycle_interval;
1401 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1404 /* Check if there's really nothing to do */
1405 if (offset < real_tk->cycle_interval)
1409 * With NO_HZ we may have to accumulate many cycle_intervals
1410 * (think "ticks") worth of time at once. To do this efficiently,
1411 * we calculate the largest doubling multiple of cycle_intervals
1412 * that is smaller than the offset. We then accumulate that
1413 * chunk in one go, and then try to consume the next smaller
1416 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1417 shift = max(0, shift);
1418 /* Bound shift to one less than what overflows tick_length */
1419 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1420 shift = min(shift, maxshift);
1421 while (offset >= tk->cycle_interval) {
1422 offset = logarithmic_accumulation(tk, offset, shift,
1424 if (offset < tk->cycle_interval<<shift)
1428 /* correct the clock when NTP error is too big */
1429 timekeeping_adjust(tk, offset);
1432 * XXX This can be killed once everyone converts
1433 * to the new update_vsyscall.
1435 old_vsyscall_fixup(tk);
1438 * Finally, make sure that after the rounding
1439 * xtime_nsec isn't larger than NSEC_PER_SEC
1441 clock_set |= accumulate_nsecs_to_secs(tk);
1443 write_seqcount_begin(&timekeeper_seq);
1444 /* Update clock->cycle_last with the new value */
1445 clock->cycle_last = tk->cycle_last;
1447 * Update the real timekeeper.
1449 * We could avoid this memcpy by switching pointers, but that
1450 * requires changes to all other timekeeper usage sites as
1451 * well, i.e. move the timekeeper pointer getter into the
1452 * spinlocked/seqcount protected sections. And we trade this
1453 * memcpy under the timekeeper_seq against one before we start
1456 memcpy(real_tk, tk, sizeof(*tk));
1457 timekeeping_update(real_tk, clock_set);
1458 write_seqcount_end(&timekeeper_seq);
1460 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1462 /* Have to call _delayed version, since in irq context*/
1463 clock_was_set_delayed();
1467 * getboottime - Return the real time of system boot.
1468 * @ts: pointer to the timespec to be set
1470 * Returns the wall-time of boot in a timespec.
1472 * This is based on the wall_to_monotonic offset and the total suspend
1473 * time. Calls to settimeofday will affect the value returned (which
1474 * basically means that however wrong your real time clock is at boot time,
1475 * you get the right time here).
1477 void getboottime(struct timespec *ts)
1479 struct timekeeper *tk = &timekeeper;
1480 struct timespec boottime = {
1481 .tv_sec = tk->wall_to_monotonic.tv_sec +
1482 tk->total_sleep_time.tv_sec,
1483 .tv_nsec = tk->wall_to_monotonic.tv_nsec +
1484 tk->total_sleep_time.tv_nsec
1487 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1489 EXPORT_SYMBOL_GPL(getboottime);
1492 * get_monotonic_boottime - Returns monotonic time since boot
1493 * @ts: pointer to the timespec to be set
1495 * Returns the monotonic time since boot in a timespec.
1497 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1498 * includes the time spent in suspend.
1500 void get_monotonic_boottime(struct timespec *ts)
1502 struct timekeeper *tk = &timekeeper;
1503 struct timespec64 tomono, sleep, ret;
1507 WARN_ON(timekeeping_suspended);
1510 seq = read_seqcount_begin(&timekeeper_seq);
1511 ret.tv_sec = tk->xtime_sec;
1512 nsec = timekeeping_get_ns(tk);
1513 tomono = tk->wall_to_monotonic;
1514 sleep = tk->total_sleep_time;
1516 } while (read_seqcount_retry(&timekeeper_seq, seq));
1518 ret.tv_sec += tomono.tv_sec + sleep.tv_sec;
1520 timespec64_add_ns(&ret, nsec + tomono.tv_nsec + sleep.tv_nsec);
1521 *ts = timespec64_to_timespec(ret);
1523 EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1526 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1528 * Returns the monotonic time since boot in a ktime
1530 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1531 * includes the time spent in suspend.
1533 ktime_t ktime_get_boottime(void)
1537 get_monotonic_boottime(&ts);
1538 return timespec_to_ktime(ts);
1540 EXPORT_SYMBOL_GPL(ktime_get_boottime);
1543 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1544 * @ts: pointer to the timespec to be converted
1546 void monotonic_to_bootbased(struct timespec *ts)
1548 struct timekeeper *tk = &timekeeper;
1549 struct timespec64 ts64;
1551 ts64 = timespec_to_timespec64(*ts);
1552 ts64 = timespec64_add(ts64, tk->total_sleep_time);
1553 *ts = timespec64_to_timespec(ts64);
1555 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1557 unsigned long get_seconds(void)
1559 struct timekeeper *tk = &timekeeper;
1561 return tk->xtime_sec;
1563 EXPORT_SYMBOL(get_seconds);
1565 struct timespec __current_kernel_time(void)
1567 struct timekeeper *tk = &timekeeper;
1569 return timespec64_to_timespec(tk_xtime(tk));
1572 struct timespec current_kernel_time(void)
1574 struct timekeeper *tk = &timekeeper;
1575 struct timespec64 now;
1579 seq = read_seqcount_begin(&timekeeper_seq);
1582 } while (read_seqcount_retry(&timekeeper_seq, seq));
1584 return timespec64_to_timespec(now);
1586 EXPORT_SYMBOL(current_kernel_time);
1588 struct timespec get_monotonic_coarse(void)
1590 struct timekeeper *tk = &timekeeper;
1591 struct timespec64 now, mono;
1595 seq = read_seqcount_begin(&timekeeper_seq);
1598 mono = tk->wall_to_monotonic;
1599 } while (read_seqcount_retry(&timekeeper_seq, seq));
1601 set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec,
1602 now.tv_nsec + mono.tv_nsec);
1604 return timespec64_to_timespec(now);
1608 * Must hold jiffies_lock
1610 void do_timer(unsigned long ticks)
1612 jiffies_64 += ticks;
1613 calc_global_load(ticks);
1617 * ktime_get_update_offsets_tick - hrtimer helper
1618 * @offs_real: pointer to storage for monotonic -> realtime offset
1619 * @offs_boot: pointer to storage for monotonic -> boottime offset
1620 * @offs_tai: pointer to storage for monotonic -> clock tai offset
1622 * Returns monotonic time at last tick and various offsets
1624 ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
1627 struct timekeeper *tk = &timekeeper;
1628 struct timespec64 ts;
1633 seq = read_seqcount_begin(&timekeeper_seq);
1636 *offs_real = tk->offs_real;
1637 *offs_boot = tk->offs_boot;
1638 *offs_tai = tk->offs_tai;
1639 } while (read_seqcount_retry(&timekeeper_seq, seq));
1641 now = ktime_set(ts.tv_sec, ts.tv_nsec);
1642 now = ktime_sub(now, *offs_real);
1646 #ifdef CONFIG_HIGH_RES_TIMERS
1648 * ktime_get_update_offsets_now - hrtimer helper
1649 * @offs_real: pointer to storage for monotonic -> realtime offset
1650 * @offs_boot: pointer to storage for monotonic -> boottime offset
1651 * @offs_tai: pointer to storage for monotonic -> clock tai offset
1653 * Returns current monotonic time and updates the offsets
1654 * Called from hrtimer_interrupt() or retrigger_next_event()
1656 ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
1659 struct timekeeper *tk = &timekeeper;
1665 seq = read_seqcount_begin(&timekeeper_seq);
1667 secs = tk->xtime_sec;
1668 nsecs = timekeeping_get_ns(tk);
1670 *offs_real = tk->offs_real;
1671 *offs_boot = tk->offs_boot;
1672 *offs_tai = tk->offs_tai;
1673 } while (read_seqcount_retry(&timekeeper_seq, seq));
1675 now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1676 now = ktime_sub(now, *offs_real);
1682 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1684 ktime_t ktime_get_monotonic_offset(void)
1686 struct timekeeper *tk = &timekeeper;
1688 struct timespec64 wtom;
1691 seq = read_seqcount_begin(&timekeeper_seq);
1692 wtom = tk->wall_to_monotonic;
1693 } while (read_seqcount_retry(&timekeeper_seq, seq));
1695 return timespec64_to_ktime(wtom);
1697 EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1700 * do_adjtimex() - Accessor function to NTP __do_adjtimex function
1702 int do_adjtimex(struct timex *txc)
1704 struct timekeeper *tk = &timekeeper;
1705 unsigned long flags;
1706 struct timespec64 ts;
1710 /* Validate the data before disabling interrupts */
1711 ret = ntp_validate_timex(txc);
1715 if (txc->modes & ADJ_SETOFFSET) {
1716 struct timespec delta;
1717 delta.tv_sec = txc->time.tv_sec;
1718 delta.tv_nsec = txc->time.tv_usec;
1719 if (!(txc->modes & ADJ_NANO))
1720 delta.tv_nsec *= 1000;
1721 ret = timekeeping_inject_offset(&delta);
1726 getnstimeofday64(&ts);
1728 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1729 write_seqcount_begin(&timekeeper_seq);
1731 orig_tai = tai = tk->tai_offset;
1732 ret = __do_adjtimex(txc, &ts, &tai);
1734 if (tai != orig_tai) {
1735 __timekeeping_set_tai_offset(tk, tai);
1736 timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
1738 write_seqcount_end(&timekeeper_seq);
1739 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1741 if (tai != orig_tai)
1744 ntp_notify_cmos_timer();
1749 #ifdef CONFIG_NTP_PPS
1751 * hardpps() - Accessor function to NTP __hardpps function
1753 void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
1755 unsigned long flags;
1757 raw_spin_lock_irqsave(&timekeeper_lock, flags);
1758 write_seqcount_begin(&timekeeper_seq);
1760 __hardpps(phase_ts, raw_ts);
1762 write_seqcount_end(&timekeeper_seq);
1763 raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
1765 EXPORT_SYMBOL(hardpps);
1769 * xtime_update() - advances the timekeeping infrastructure
1770 * @ticks: number of ticks, that have elapsed since the last call.
1772 * Must be called with interrupts disabled.
1774 void xtime_update(unsigned long ticks)
1776 write_seqlock(&jiffies_lock);
1778 write_sequnlock(&jiffies_lock);