drm/i915: Finish enabling rps before use by sysfs or debugfs
authorTom O'Rourke <Tom.O'Rourke@intel.com>
Mon, 16 Sep 2013 21:56:43 +0000 (14:56 -0700)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 10 Oct 2013 12:23:39 +0000 (14:23 +0200)
Enabling rps (turbo setup) was put in a work queue because it may
take quite awhile.  This change flushes the work queue to initialize
rps values before use by sysfs or debugfs.  Specifically,
rps.delayed_resume_work is flushed before using rps.hw_max,
rps.max_delay, rps.min_delay, or rps.cur_delay.

This change fixes a problem in sysfs where show functions using
uninitialized values show incorrect values and store functions
using uninitialized values in range checks incorrectly fail to
store valid input values.  This change also addresses similar use
before initialized problems in debugfs.

Signed-off-by: Tom O'Rourke <Tom.O'Rourke@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_sysfs.c

index 5fd6a5db6eb560103a2f97439a82dc53cc821264..a569597125db91275e91e71b968a80a9450664c1 100644 (file)
@@ -847,6 +847,8 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        if (IS_GEN5(dev)) {
                u16 rgvswctl = I915_READ16(MEMSWCTL);
                u16 rgvstat = I915_READ16(MEMSTAT_ILK);
@@ -1325,6 +1327,8 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
                return 0;
        }
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
@@ -1940,6 +1944,8 @@ i915_max_freq_get(void *data, u64 *val)
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
@@ -1964,6 +1970,8 @@ i915_max_freq_set(void *data, u64 val)
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        DRM_DEBUG_DRIVER("Manually setting max freq to %llu\n", val);
 
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
@@ -2002,6 +2010,8 @@ i915_min_freq_get(void *data, u64 *val)
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
        if (ret)
                return ret;
@@ -2026,6 +2036,8 @@ i915_min_freq_set(void *data, u64 val)
        if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        DRM_DEBUG_DRIVER("Manually setting min freq to %llu\n", val);
 
        ret = mutex_lock_interruptible(&dev_priv->rps.hw_lock);
index 8003886361b80fc835e7f45c96f6d5c906d70bb2..9ff1e4d969091fcc0984dc6a8cb6a8050dca35de 100644 (file)
@@ -251,6 +251,8 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        mutex_lock(&dev_priv->rps.hw_lock);
        if (IS_VALLEYVIEW(dev_priv->dev)) {
                u32 freq;
@@ -283,6 +285,8 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        mutex_lock(&dev_priv->rps.hw_lock);
        if (IS_VALLEYVIEW(dev_priv->dev))
                ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.max_delay);
@@ -307,6 +311,8 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
        if (ret)
                return ret;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        mutex_lock(&dev_priv->rps.hw_lock);
 
        if (IS_VALLEYVIEW(dev_priv->dev)) {
@@ -355,6 +361,8 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        mutex_lock(&dev_priv->rps.hw_lock);
        if (IS_VALLEYVIEW(dev_priv->dev))
                ret = vlv_gpu_freq(dev_priv->mem_freq, dev_priv->rps.min_delay);
@@ -379,6 +387,8 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
        if (ret)
                return ret;
 
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
        mutex_lock(&dev_priv->rps.hw_lock);
 
        if (IS_VALLEYVIEW(dev)) {