staging/lustre: use 64-bit time for pl_recalc
authorArnd Bergmann <arnd@arndb.de>
Sun, 27 Sep 2015 20:45:17 +0000 (16:45 -0400)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 29 Sep 2015 02:03:36 +0000 (04:03 +0200)
The ldlm pool calculates elapsed time by comparing the previous and
current get_seconds() values, which is unsafe on 32-bit machines
after 2038.

This changes the code to use time64_t and ktime_get_real_seconds(),
keeping the 'real' instead of 'monotonic' time because of the
debug prints.

Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Oleg Drokin <green@linuxhacker.ru>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/staging/lustre/lustre/include/lustre_dlm.h
drivers/staging/lustre/lustre/ldlm/ldlm_pool.c

index 796a997ec94c9e766e196cd68c953721c9967227..1ac08e1c055914d022724635d33d1b02e080a460 100644 (file)
@@ -256,9 +256,9 @@ struct ldlm_pool {
         *  server_slv * lock_volume_factor. */
        atomic_t                pl_lock_volume_factor;
        /** Time when last SLV from server was obtained. */
-       time_t                  pl_recalc_time;
+       time64_t                pl_recalc_time;
        /** Recalculation period for pool. */
-       time_t                  pl_recalc_period;
+       time64_t                pl_recalc_period;
        /** Recalculation and shrink operations. */
        const struct ldlm_pool_ops      *pl_ops;
        /** Number of planned locks for next period. */
index c234acb85f10063cef7aa0a8c9b8f71c8519b8b2..1c9d67fbeb27c97d8f60b437568df917ac61c73d 100644 (file)
@@ -330,14 +330,14 @@ static void ldlm_srv_pool_push_slv(struct ldlm_pool *pl)
  */
 static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
 {
-       time_t recalc_interval_sec;
+       time64_t recalc_interval_sec;
 
-       recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+       recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
        if (recalc_interval_sec < pl->pl_recalc_period)
                return 0;
 
        spin_lock(&pl->pl_lock);
-       recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+       recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
        if (recalc_interval_sec < pl->pl_recalc_period) {
                spin_unlock(&pl->pl_lock);
                return 0;
@@ -358,7 +358,7 @@ static int ldlm_srv_pool_recalc(struct ldlm_pool *pl)
         */
        ldlm_pool_recalc_grant_plan(pl);
 
-       pl->pl_recalc_time = get_seconds();
+       pl->pl_recalc_time = ktime_get_real_seconds();
        lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
                            recalc_interval_sec);
        spin_unlock(&pl->pl_lock);
@@ -467,10 +467,10 @@ static void ldlm_cli_pool_pop_slv(struct ldlm_pool *pl)
  */
 static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
 {
-       time_t recalc_interval_sec;
+       time64_t recalc_interval_sec;
        int ret;
 
-       recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+       recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
        if (recalc_interval_sec < pl->pl_recalc_period)
                return 0;
 
@@ -478,7 +478,7 @@ static int ldlm_cli_pool_recalc(struct ldlm_pool *pl)
        /*
         * Check if we need to recalc lists now.
         */
-       recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+       recalc_interval_sec = ktime_get_real_seconds() - pl->pl_recalc_time;
        if (recalc_interval_sec < pl->pl_recalc_period) {
                spin_unlock(&pl->pl_lock);
                return 0;
@@ -513,7 +513,7 @@ out:
         * Time of LRU resizing might be longer than period,
         * so update after LRU resizing rather than before it.
         */
-       pl->pl_recalc_time = get_seconds();
+       pl->pl_recalc_time = ktime_get_real_seconds();
        lprocfs_counter_add(pl->pl_stats, LDLM_POOL_TIMING_STAT,
                            recalc_interval_sec);
        spin_unlock(&pl->pl_lock);
@@ -571,10 +571,10 @@ static const struct ldlm_pool_ops ldlm_cli_pool_ops = {
  */
 int ldlm_pool_recalc(struct ldlm_pool *pl)
 {
-       time_t recalc_interval_sec;
+       u32 recalc_interval_sec;
        int count;
 
-       recalc_interval_sec = get_seconds() - pl->pl_recalc_time;
+       recalc_interval_sec = ktime_get_seconds() - pl->pl_recalc_time;
        if (recalc_interval_sec <= 0)
                goto recalc;
 
@@ -599,14 +599,14 @@ int ldlm_pool_recalc(struct ldlm_pool *pl)
                lprocfs_counter_add(pl->pl_stats, LDLM_POOL_RECALC_STAT,
                                    count);
        }
-       recalc_interval_sec = pl->pl_recalc_time - get_seconds() +
+       recalc_interval_sec = pl->pl_recalc_time - ktime_get_seconds() +
                              pl->pl_recalc_period;
        if (recalc_interval_sec <= 0) {
                /* Prevent too frequent recalculation. */
-               CDEBUG(D_DLMTRACE, "Negative interval(%ld), "
-                      "too short period(%ld)",
+               CDEBUG(D_DLMTRACE,
+                      "Negative interval(%d), too short period(%lld)",
                       recalc_interval_sec,
-                      pl->pl_recalc_period);
+                      (s64)pl->pl_recalc_period);
                recalc_interval_sec = 1;
        }
 
@@ -893,7 +893,7 @@ int ldlm_pool_init(struct ldlm_pool *pl, struct ldlm_namespace *ns,
 
        spin_lock_init(&pl->pl_lock);
        atomic_set(&pl->pl_granted, 0);
-       pl->pl_recalc_time = get_seconds();
+       pl->pl_recalc_time = ktime_get_seconds();
        atomic_set(&pl->pl_lock_volume_factor, 1);
 
        atomic_set(&pl->pl_grant_rate, 0);