VLV_WM_LEVEL_PM2,
VLV_WM_LEVEL_PM5,
VLV_WM_LEVEL_DDR_DVFS,
- CHV_WM_NUM_LEVELS,
- VLV_WM_NUM_LEVELS = 1,
};
/* latency must be in 0.1us units. */
/* all latencies in usec */
dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
+ dev_priv->wm.max_level = VLV_WM_LEVEL_PM2;
+
if (IS_CHERRYVIEW(dev_priv)) {
dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM5] = 12;
dev_priv->wm.pri_latency[VLV_WM_LEVEL_DDR_DVFS] = 33;
+
+ dev_priv->wm.max_level = VLV_WM_LEVEL_DDR_DVFS;
}
}
memset(wm_state, 0, sizeof(*wm_state));
wm_state->cxsr = crtc->pipe != PIPE_C && crtc->wm.cxsr_allowed;
- if (IS_CHERRYVIEW(dev))
- wm_state->num_levels = CHV_WM_NUM_LEVELS;
- else
- wm_state->num_levels = VLV_WM_NUM_LEVELS;
+ wm_state->num_levels = to_i915(dev)->wm.max_level + 1;
wm_state->num_active_planes = 0;
}
/* clear any (partially) filled invalid levels */
- for (level = wm_state->num_levels; level < CHV_WM_NUM_LEVELS; level++) {
+ for (level = wm_state->num_levels; level < to_i915(dev)->wm.max_level + 1; level++) {
memset(&wm_state->wm[level], 0, sizeof(wm_state->wm[level]));
memset(&wm_state->sr[level], 0, sizeof(wm_state->sr[level]));
}
struct intel_crtc *crtc;
int num_active_crtcs = 0;
- if (IS_CHERRYVIEW(dev))
- wm->level = VLV_WM_LEVEL_DDR_DVFS;
- else
- wm->level = VLV_WM_LEVEL_PM2;
+ wm->level = to_i915(dev)->wm.max_level;
wm->cxsr = true;
for_each_intel_crtc(dev, crtc) {
if (crtc) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 12000;
- const struct drm_display_mode *adjusted_mode =
- &to_intel_crtc(crtc)->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w;
if (HAS_FW_BLC(dev) && enabled) {
/* self-refresh has much higher latency */
static const int sr_latency_ns = 6000;
- const struct drm_display_mode *adjusted_mode =
- &to_intel_crtc(enabled)->config->base.adjusted_mode;
+ const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode;
int clock = adjusted_mode->crtc_clock;
int htotal = adjusted_mode->crtc_htotal;
int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w;
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
+ struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
u32 linetime, ips_linetime;
if (!intel_crtc->active)
/* The WM are computed with base on how long it takes to fill a single
* row at the given clock rate, multiplied by 8.
* */
- linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
- mode->crtc_clock);
- ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
+ linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+ adjusted_mode->crtc_clock);
+ ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
dev_priv->cdclk_freq);
return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
if (val & DSP_MAXFIFO_PM5_ENABLE)
wm->level = VLV_WM_LEVEL_PM5;
+ /*
+ * If DDR DVFS is disabled in the BIOS, Punit
+ * will never ack the request. So if that happens
+ * assume we don't have to enable/disable DDR DVFS
+ * dynamically. To test that just set the REQ_ACK
+ * bit to poke the Punit, but don't change the
+ * HIGH/LOW bits so that we don't actually change
+ * the current state.
+ */
val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
- if ((val & FORCE_DDR_HIGH_FREQ) == 0)
- wm->level = VLV_WM_LEVEL_DDR_DVFS;
+ val |= FORCE_DDR_FREQ_REQ_ACK;
+ vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val);
+
+ if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) &
+ FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) {
+ DRM_DEBUG_KMS("Punit not acking DDR DVFS request, "
+ "assuming DDR DVFS is disabled\n");
+ dev_priv->wm.max_level = VLV_WM_LEVEL_PM5;
+ } else {
+ val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2);
+ if ((val & FORCE_DDR_HIGH_FREQ) == 0)
+ wm->level = VLV_WM_LEVEL_DDR_DVFS;
+ }
mutex_unlock(&dev_priv->rps.hw_lock);
}
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
- vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ vstart = (I915_READ(PXVFREQ(fstart)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
ironlake_set_drps(dev, fstart);
- dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
- I915_READ(0x112e0);
+ dev_priv->ips.last_count1 = I915_READ(DMIEC) +
+ I915_READ(DDREC) + I915_READ(CSIEC);
dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
- dev_priv->ips.last_count2 = I915_READ(0x112f4);
+ dev_priv->ips.last_count2 = I915_READ(GFXEC);
dev_priv->ips.last_time2 = ktime_get_raw_ns();
spin_unlock_irq(&mchdev_lock);
I915_WRITE(GEN6_RC_CONTROL, 0);
/* 2b: Program RC6 thresholds.*/
- I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
+
+ /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
+ if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
+ (INTEL_REVID(dev) <= SKL_REVID_E0)))
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
+ else
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
for_each_ring(ring, dev_priv, unused)
I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
+
+ if (HAS_GUC_UCODE(dev))
+ I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
+
I915_WRITE(GEN6_RC_SLEEP, 0);
I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
"on" : "off");
- I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
- GEN6_RC_CTL_EI_MODE(1) |
- rc6_mask);
+
+ if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) ||
+ (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0))
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN7_RC_CTL_TO_MODE |
+ rc6_mask);
+ else
+ I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ rc6_mask);
/*
* 3b: Enable Coarse Power Gating only when RC6 is enabled.
- * WaDisableRenderPowerGating:skl,bxt - Render PG need to be disabled with RC6.
+ * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
*/
- I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
- GEN9_MEDIA_PG_ENABLE : 0);
-
+ if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
+ ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
+ I915_WRITE(GEN9_PG_ENABLE, 0);
+ else
+ I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
+ (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
assert_spin_locked(&mchdev_lock);
- pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
+ pxvid = I915_READ(PXVFREQ(dev_priv->rps.cur_freq));
pxvid = (pxvid >> 24) & 0x7f;
ext_v = pvid_to_extvid(dev_priv, pxvid);
I915_WRITE(CSIEW2, 0x04000004);
for (i = 0; i < 5; i++)
- I915_WRITE(PEW + (i * 4), 0);
+ I915_WRITE(PEW(i), 0);
for (i = 0; i < 3; i++)
- I915_WRITE(DEW + (i * 4), 0);
+ I915_WRITE(DEW(i), 0);
/* Program P-state weights to account for frequency power adjustment */
for (i = 0; i < 16; i++) {
- u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
+ u32 pxvidfreq = I915_READ(PXVFREQ(i));
unsigned long freq = intel_pxfreq(pxvidfreq);
unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
for (i = 0; i < 4; i++) {
u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
(pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
- I915_WRITE(PXW + (i * 4), val);
+ I915_WRITE(PXW(i), val);
}
/* Adjust magic regs to magic values (more experimental results) */
I915_WRITE(EG7, 0);
for (i = 0; i < 8; i++)
- I915_WRITE(PXWL + (i * 4), 0);
+ I915_WRITE(PXWL(i), 0);
/* Enable PMON + select events */
I915_WRITE(ECR, 0x80000019);