2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
7 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
10 * The code should have no internal unresolved relocations.
11 * Check with readelf after changing.
14 /* Disable profiling for userspace code: */
15 #define DISABLE_BRANCH_PROFILING
17 #include <uapi/linux/time.h>
18 #include <asm/vgtod.h>
21 #include <asm/unistd.h>
23 #include <linux/math64.h>
24 #include <linux/time.h>
26 #define gtod (&VVAR(vsyscall_gtod_data))
28 extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
29 extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
30 extern time_t __vdso_time(time_t *t);
32 #ifdef CONFIG_HPET_TIMER
33 static inline u32 read_hpet_counter(const volatile void *addr)
35 return *(const volatile u32 *) (addr + HPET_COUNTER);
41 #include <linux/kernel.h>
42 #include <asm/vsyscall.h>
43 #include <asm/fixmap.h>
44 #include <asm/pvclock.h>
46 static notrace cycle_t vread_hpet(void)
48 return read_hpet_counter((const void *)fix_to_virt(VSYSCALL_HPET));
51 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
54 asm("syscall" : "=a" (ret) :
55 "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
59 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
63 asm("syscall" : "=a" (ret) :
64 "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
68 #ifdef CONFIG_PARAVIRT_CLOCK
70 static notrace const struct pvclock_vsyscall_time_info *get_pvti(int cpu)
72 const struct pvclock_vsyscall_time_info *pvti_base;
73 int idx = cpu / (PAGE_SIZE/PVTI_SIZE);
74 int offset = cpu % (PAGE_SIZE/PVTI_SIZE);
76 BUG_ON(PVCLOCK_FIXMAP_BEGIN + idx > PVCLOCK_FIXMAP_END);
78 pvti_base = (struct pvclock_vsyscall_time_info *)
79 __fix_to_virt(PVCLOCK_FIXMAP_BEGIN+idx);
81 return &pvti_base[offset];
84 static notrace cycle_t vread_pvclock(int *mode)
86 const struct pvclock_vsyscall_time_info *pvti;
95 * Note: hypervisor must guarantee that:
96 * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
97 * 2. that per-CPU pvclock time info is updated if the
98 * underlying CPU changes.
99 * 3. that version is increased whenever underlying CPU
104 cpu = __getcpu() & VGETCPU_CPU_MASK;
105 /* TODO: We can put vcpu id into higher bits of pvti.version.
106 * This will save a couple of cycles by getting rid of
107 * __getcpu() calls (Gleb).
110 pvti = get_pvti(cpu);
112 version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
115 * Test we're still on the cpu as well as the version.
116 * We could have been migrated just after the first
117 * vgetcpu but before fetching the version, so we
118 * wouldn't notice a version change.
120 cpu1 = __getcpu() & VGETCPU_CPU_MASK;
121 } while (unlikely(cpu != cpu1 ||
122 (pvti->pvti.version & 1) ||
123 pvti->pvti.version != version));
125 if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
128 /* refer to tsc.c read_tsc() comment for rationale */
129 last = gtod->cycle_last;
131 if (likely(ret >= last))
141 __attribute__((visibility("hidden")));
143 #ifdef CONFIG_HPET_TIMER
144 static notrace cycle_t vread_hpet(void)
146 return read_hpet_counter((const void *)(&hpet_page));
150 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
155 "mov %%ebx, %%edx \n"
157 "call VDSO32_vsyscall \n"
158 "mov %%edx, %%ebx \n"
160 : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
165 notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
170 "mov %%ebx, %%edx \n"
172 "call VDSO32_vsyscall \n"
173 "mov %%edx, %%ebx \n"
175 : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
180 #ifdef CONFIG_PARAVIRT_CLOCK
182 static notrace cycle_t vread_pvclock(int *mode)
191 notrace static cycle_t vread_tsc(void)
197 * Empirically, a fence (of type that depends on the CPU)
198 * before rdtsc is enough to ensure that rdtsc is ordered
199 * with respect to loads. The various CPU manuals are unclear
200 * as to whether rdtsc can be reordered with later loads,
201 * but no one has ever seen it happen.
204 ret = (cycle_t)__native_read_tsc();
206 last = gtod->cycle_last;
208 if (likely(ret >= last))
212 * GCC likes to generate cmov here, but this branch is extremely
213 * predictable (it's just a funciton of time and the likely is
214 * very likely) and there's a data dependence, so force GCC
215 * to generate a branch instead. I don't barrier() because
216 * we don't actually need a barrier, and if this function
217 * ever gets inlined it will generate worse code.
223 notrace static inline u64 vgetsns(int *mode)
228 if (gtod->vclock_mode == VCLOCK_TSC)
229 cycles = vread_tsc();
230 #ifdef CONFIG_HPET_TIMER
231 else if (gtod->vclock_mode == VCLOCK_HPET)
232 cycles = vread_hpet();
234 #ifdef CONFIG_PARAVIRT_CLOCK
235 else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
236 cycles = vread_pvclock(mode);
240 v = (cycles - gtod->cycle_last) & gtod->mask;
241 return v * gtod->mult;
244 /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
245 notrace static int __always_inline do_realtime(struct timespec *ts)
252 seq = gtod_read_begin(gtod);
253 mode = gtod->vclock_mode;
254 ts->tv_sec = gtod->wall_time_sec;
255 ns = gtod->wall_time_snsec;
256 ns += vgetsns(&mode);
258 } while (unlikely(gtod_read_retry(gtod, seq)));
260 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
266 notrace static int __always_inline do_monotonic(struct timespec *ts)
273 seq = gtod_read_begin(gtod);
274 mode = gtod->vclock_mode;
275 ts->tv_sec = gtod->monotonic_time_sec;
276 ns = gtod->monotonic_time_snsec;
277 ns += vgetsns(&mode);
279 } while (unlikely(gtod_read_retry(gtod, seq)));
281 ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
287 notrace static void do_realtime_coarse(struct timespec *ts)
291 seq = gtod_read_begin(gtod);
292 ts->tv_sec = gtod->wall_time_coarse_sec;
293 ts->tv_nsec = gtod->wall_time_coarse_nsec;
294 } while (unlikely(gtod_read_retry(gtod, seq)));
297 notrace static void do_monotonic_coarse(struct timespec *ts)
301 seq = gtod_read_begin(gtod);
302 ts->tv_sec = gtod->monotonic_time_coarse_sec;
303 ts->tv_nsec = gtod->monotonic_time_coarse_nsec;
304 } while (unlikely(gtod_read_retry(gtod, seq)));
307 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
311 if (do_realtime(ts) == VCLOCK_NONE)
314 case CLOCK_MONOTONIC:
315 if (do_monotonic(ts) == VCLOCK_NONE)
318 case CLOCK_REALTIME_COARSE:
319 do_realtime_coarse(ts);
321 case CLOCK_MONOTONIC_COARSE:
322 do_monotonic_coarse(ts);
330 return vdso_fallback_gettime(clock, ts);
332 int clock_gettime(clockid_t, struct timespec *)
333 __attribute__((weak, alias("__vdso_clock_gettime")));
335 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
337 if (likely(tv != NULL)) {
338 if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
339 return vdso_fallback_gtod(tv, tz);
342 if (unlikely(tz != NULL)) {
343 tz->tz_minuteswest = gtod->tz_minuteswest;
344 tz->tz_dsttime = gtod->tz_dsttime;
349 int gettimeofday(struct timeval *, struct timezone *)
350 __attribute__((weak, alias("__vdso_gettimeofday")));
353 * This will break when the xtime seconds get inaccurate, but that is
356 notrace time_t __vdso_time(time_t *t)
358 /* This is atomic on x86 so we don't need any locks. */
359 time_t result = ACCESS_ONCE(gtod->wall_time_sec);
366 __attribute__((weak, alias("__vdso_time")));