UPSTREAM: arm64: Add support for CLOCK_MONOTONIC_RAW in clock_gettime() vDSO
authorKevin Brodsky <kevin.brodsky@arm.com>
Tue, 12 Jul 2016 10:24:00 +0000 (11:24 +0100)
committerAmit Pundir <amit.pundir@linaro.org>
Mon, 10 Apr 2017 07:55:02 +0000 (13:25 +0530)
(cherry pick from commit 49eea433b326a0ac5c7c941a011b2c65990bd19b)

So far the arm64 clock_gettime() vDSO implementation only supported
the following clocks, falling back to the syscall for the others:
- CLOCK_REALTIME{,_COARSE}
- CLOCK_MONOTONIC{,_COARSE}

This patch adds support for the CLOCK_MONOTONIC_RAW clock, taking
advantage of the recent refactoring of the vDSO time functions. Like
the non-_COARSE clocks, this only works when the "arch_sys_counter"
clocksource is in use (allowing us to read the current time from the
virtual counter register), otherwise we also have to fall back to the
syscall.

Most of the data is shared with CLOCK_MONOTONIC, and the algorithm is
similar. The reference implementation in kernel/time/timekeeping.c
shows that:
- CLOCK_MONOTONIC = tk->wall_to_monotonic + tk->xtime_sec +
  timekeeping_get_ns(&tk->tkr_mono)
- CLOCK_MONOTONIC_RAW = tk->raw_time + timekeeping_get_ns(&tk->tkr_raw)
- tkr_mono and tkr_raw are identical (in particular, same
  clocksource), except these members:
  * mult (only mono's multiplier is NTP-adjusted)
  * xtime_nsec (always 0 for raw)

Therefore, tk->raw_time and tkr_raw->mult are now also stored in the
vDSO data page.

Cc: Ali Saidi <ali.saidi@arm.com>
Signed-off-by: Kevin Brodsky <kevin.brodsky@arm.com>
Reviewed-by: Dave Martin <dave.martin@arm.com>
Acked-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Bug: 20045882
Bug: 19198045
Change-Id: I854adcc1192757a1ac40662e85d466ca709d0682

arch/arm64/include/asm/vdso_datapage.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/vdso.c
arch/arm64/kernel/vdso/gettimeofday.S

index de66199673d7ae23678c72bd898953af869a4b87..2b9a63771eda8c81b12ec4279a9e279543739ecc 100644 (file)
@@ -22,6 +22,8 @@
 
 struct vdso_data {
        __u64 cs_cycle_last;    /* Timebase at clocksource init */
+       __u64 raw_time_sec;     /* Raw time */
+       __u64 raw_time_nsec;
        __u64 xtime_clock_sec;  /* Kernel time */
        __u64 xtime_clock_nsec;
        __u64 xtime_coarse_sec; /* Coarse time */
@@ -29,8 +31,10 @@ struct vdso_data {
        __u64 wtm_clock_sec;    /* Wall to monotonic time */
        __u64 wtm_clock_nsec;
        __u32 tb_seq_count;     /* Timebase sequence counter */
-       __u32 cs_mult;          /* Clocksource multiplier */
-       __u32 cs_shift;         /* Clocksource shift */
+       /* cs_* members must be adjacent and in this order (ldp accesses) */
+       __u32 cs_mono_mult;     /* NTP-adjusted clocksource multiplier */
+       __u32 cs_shift;         /* Clocksource shift (mono = raw) */
+       __u32 cs_raw_mult;      /* Raw clocksource multiplier */
        __u32 tz_minuteswest;   /* Whacky timezone stuff */
        __u32 tz_dsttime;
        __u32 use_syscall;
index c9ea87198789771cc5d3662d43a54fa0aedc3514..350c0e99fc6bcb1a6706efd46d1cdb022e9f0339 100644 (file)
@@ -92,6 +92,7 @@ int main(void)
   BLANK();
   DEFINE(CLOCK_REALTIME,       CLOCK_REALTIME);
   DEFINE(CLOCK_MONOTONIC,      CLOCK_MONOTONIC);
+  DEFINE(CLOCK_MONOTONIC_RAW,  CLOCK_MONOTONIC_RAW);
   DEFINE(CLOCK_REALTIME_RES,   MONOTONIC_RES_NSEC);
   DEFINE(CLOCK_REALTIME_COARSE,        CLOCK_REALTIME_COARSE);
   DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
@@ -99,6 +100,8 @@ int main(void)
   DEFINE(NSEC_PER_SEC,         NSEC_PER_SEC);
   BLANK();
   DEFINE(VDSO_CS_CYCLE_LAST,   offsetof(struct vdso_data, cs_cycle_last));
+  DEFINE(VDSO_RAW_TIME_SEC,    offsetof(struct vdso_data, raw_time_sec));
+  DEFINE(VDSO_RAW_TIME_NSEC,   offsetof(struct vdso_data, raw_time_nsec));
   DEFINE(VDSO_XTIME_CLK_SEC,   offsetof(struct vdso_data, xtime_clock_sec));
   DEFINE(VDSO_XTIME_CLK_NSEC,  offsetof(struct vdso_data, xtime_clock_nsec));
   DEFINE(VDSO_XTIME_CRS_SEC,   offsetof(struct vdso_data, xtime_coarse_sec));
@@ -106,7 +109,8 @@ int main(void)
   DEFINE(VDSO_WTM_CLK_SEC,     offsetof(struct vdso_data, wtm_clock_sec));
   DEFINE(VDSO_WTM_CLK_NSEC,    offsetof(struct vdso_data, wtm_clock_nsec));
   DEFINE(VDSO_TB_SEQ_COUNT,    offsetof(struct vdso_data, tb_seq_count));
-  DEFINE(VDSO_CS_MULT,         offsetof(struct vdso_data, cs_mult));
+  DEFINE(VDSO_CS_MONO_MULT,    offsetof(struct vdso_data, cs_mono_mult));
+  DEFINE(VDSO_CS_RAW_MULT,     offsetof(struct vdso_data, cs_raw_mult));
   DEFINE(VDSO_CS_SHIFT,                offsetof(struct vdso_data, cs_shift));
   DEFINE(VDSO_TZ_MINWEST,      offsetof(struct vdso_data, tz_minuteswest));
   DEFINE(VDSO_TZ_DSTTIME,      offsetof(struct vdso_data, tz_dsttime));
index 97bc68f4c689f28eac7188f5e0b792b5293c37da..54f7b327fd1833ef00fd2d5ec18f5a9841ba1865 100644 (file)
@@ -212,10 +212,16 @@ void update_vsyscall(struct timekeeper *tk)
        vdso_data->wtm_clock_nsec               = tk->wall_to_monotonic.tv_nsec;
 
        if (!use_syscall) {
+               /* tkr_mono.cycle_last == tkr_raw.cycle_last */
                vdso_data->cs_cycle_last        = tk->tkr_mono.cycle_last;
+               vdso_data->raw_time_sec         = tk->raw_time.tv_sec;
+               vdso_data->raw_time_nsec        = tk->raw_time.tv_nsec;
                vdso_data->xtime_clock_sec      = tk->xtime_sec;
                vdso_data->xtime_clock_nsec     = tk->tkr_mono.xtime_nsec;
-               vdso_data->cs_mult              = tk->tkr_mono.mult;
+               /* tkr_raw.xtime_nsec == 0 */
+               vdso_data->cs_mono_mult         = tk->tkr_mono.mult;
+               vdso_data->cs_raw_mult          = tk->tkr_raw.mult;
+               /* tkr_mono.shift == tkr_raw.shift */
                vdso_data->cs_shift             = tk->tkr_mono.shift;
        }
 
index c06919ee29e1b0c25dbd055195a7def20ec1a160..e00b4671bd7c4af5516b95da00409c7296df1963 100644 (file)
@@ -87,6 +87,15 @@ x_tmp                .req    x8
        msub    \res_nsec, x_tmp, \nsec_to_sec, \res_nsec
        .endm
 
+       /*
+        * Returns in res_{sec,nsec} the timespec based on the clock_raw delta,
+        * used for CLOCK_MONOTONIC_RAW.
+        */
+       .macro  get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec
+       udiv    \res_sec, \clock_nsec, \nsec_to_sec
+       msub    \res_nsec, \res_sec, \nsec_to_sec, \clock_nsec
+       .endm
+
        /* sec and nsec are modified in place. */
        .macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec
        /* Add timespec. */
@@ -135,7 +144,8 @@ ENTRY(__kernel_gettimeofday)
 1:     seqcnt_acquire
        syscall_check fail=4f
        ldr     x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
-       ldp     w11, w12, [vdso_data, #VDSO_CS_MULT]
+       /* w11 = cs_mono_mult, w12 = cs_shift */
+       ldp     w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
        ldp     x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
        seqcnt_check fail=1b
 
@@ -172,20 +182,20 @@ ENDPROC(__kernel_gettimeofday)
 /* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
 ENTRY(__kernel_clock_gettime)
        .cfi_startproc
-       cmp     w0, #JUMPSLOT_MAX
-       b.hi    syscall
+       cmp     w0, #JUMPSLOT_MAX
+       b.hi    syscall
        adr     vdso_data, _vdso_data
-       adr     x_tmp, jumptable
-       add     x_tmp, x_tmp, w0, uxtw #2
-       br      x_tmp
+       adr     x_tmp, jumptable
+       add     x_tmp, x_tmp, w0, uxtw #2
+       br      x_tmp
 
        ALIGN
 jumptable:
        jump_slot jumptable, CLOCK_REALTIME, realtime
        jump_slot jumptable, CLOCK_MONOTONIC, monotonic
-       b       syscall
-       b       syscall
-       b       syscall
+       b       syscall
+       b       syscall
+       jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw
        jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse
        jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse
 
@@ -198,7 +208,8 @@ realtime:
        seqcnt_acquire
        syscall_check fail=syscall
        ldr     x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
-       ldp     w11, w12, [vdso_data, #VDSO_CS_MULT]
+       /* w11 = cs_mono_mult, w12 = cs_shift */
+       ldp     w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
        ldp     x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
        seqcnt_check fail=realtime
 
@@ -216,7 +227,8 @@ monotonic:
        seqcnt_acquire
        syscall_check fail=syscall
        ldr     x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
-       ldp     w11, w12, [vdso_data, #VDSO_CS_MULT]
+       /* w11 = cs_mono_mult, w12 = cs_shift */
+       ldp     w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
        ldp     x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
        ldp     x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
        seqcnt_check fail=monotonic
@@ -233,6 +245,28 @@ monotonic:
        add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9
        clock_gettime_return, shift=1
 
+       ALIGN
+monotonic_raw:
+       seqcnt_acquire
+       syscall_check fail=syscall
+       ldr     x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
+       /* w11 = cs_raw_mult, w12 = cs_shift */
+       ldp     w12, w11, [vdso_data, #VDSO_CS_SHIFT]
+       ldp     x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
+       seqcnt_check fail=monotonic_raw
+
+       /* All computations are done with left-shifted nsecs. */
+       lsl     x14, x14, x12
+       get_nsec_per_sec res=x9
+       lsl     x9, x9, x12
+
+       get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
+       get_ts_clock_raw res_sec=x10, res_nsec=x11, \
+               clock_nsec=x15, nsec_to_sec=x9
+
+       add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
+       clock_gettime_return, shift=1
+
        ALIGN
 realtime_coarse:
        seqcnt_acquire
@@ -265,6 +299,7 @@ ENTRY(__kernel_clock_getres)
        .cfi_startproc
        cmp     w0, #CLOCK_REALTIME
        ccmp    w0, #CLOCK_MONOTONIC, #0x4, ne
+       ccmp    w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
        b.ne    1f
 
        ldr     x2, 5f