2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33 #include <linux/vgaarb.h>
34 #include <drm/i915_powerwell.h>
35 #include <linux/pm_runtime.h>
38 * RC6 is a special power stage which allows the GPU to enter an very
39 * low-voltage mode when idle, using down to 0V while at this stage. This
40 * stage is entered automatically when the GPU is idle when RC6 support is
41 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
43 * There are different RC6 modes available in Intel GPU, which differentiate
44 * among each other with the latency required to enter and leave RC6 and
45 * voltage consumed by the GPU in different states.
47 * The combination of the following flags define which states GPU is allowed
48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
49 * RC6pp is deepest RC6. Their support by hardware varies according to the
50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
51 * which brings the most power savings; deeper states save more power, but
52 * require higher latency to switch to and wake up.
54 #define INTEL_RC6_ENABLE (1<<0)
55 #define INTEL_RC6p_ENABLE (1<<1)
56 #define INTEL_RC6pp_ENABLE (1<<2)
58 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
59 * framebuffer contents in-memory, aiming at reducing the required bandwidth
60 * during in-memory transfers and, therefore, reduce the power packet.
62 * The benefits of FBC are mostly visible with solid backgrounds and
63 * variation-less patterns.
65 * FBC-related functionality can be enabled by the means of the
66 * i915.i915_enable_fbc parameter
69 static void i8xx_disable_fbc(struct drm_device *dev)
71 struct drm_i915_private *dev_priv = dev->dev_private;
74 /* Disable compression */
75 fbc_ctl = I915_READ(FBC_CONTROL);
76 if ((fbc_ctl & FBC_CTL_EN) == 0)
79 fbc_ctl &= ~FBC_CTL_EN;
80 I915_WRITE(FBC_CONTROL, fbc_ctl);
82 /* Wait for compressing bit to clear */
83 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
84 DRM_DEBUG_KMS("FBC idle timed out\n");
88 DRM_DEBUG_KMS("disabled FBC\n");
91 static void i8xx_enable_fbc(struct drm_crtc *crtc)
93 struct drm_device *dev = crtc->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_framebuffer *fb = crtc->primary->fb;
96 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
97 struct drm_i915_gem_object *obj = intel_fb->obj;
98 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
103 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
104 if (fb->pitches[0] < cfb_pitch)
105 cfb_pitch = fb->pitches[0];
107 /* FBC_CTL wants 32B or 64B units */
109 cfb_pitch = (cfb_pitch / 32) - 1;
111 cfb_pitch = (cfb_pitch / 64) - 1;
114 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
115 I915_WRITE(FBC_TAG + (i * 4), 0);
121 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
122 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
123 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
124 I915_WRITE(FBC_FENCE_OFF, crtc->y);
128 fbc_ctl = I915_READ(FBC_CONTROL);
129 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
130 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
132 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
133 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
134 fbc_ctl |= obj->fence_reg;
135 I915_WRITE(FBC_CONTROL, fbc_ctl);
137 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
138 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
141 static bool i8xx_fbc_enabled(struct drm_device *dev)
143 struct drm_i915_private *dev_priv = dev->dev_private;
145 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
148 static void g4x_enable_fbc(struct drm_crtc *crtc)
150 struct drm_device *dev = crtc->dev;
151 struct drm_i915_private *dev_priv = dev->dev_private;
152 struct drm_framebuffer *fb = crtc->primary->fb;
153 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
154 struct drm_i915_gem_object *obj = intel_fb->obj;
155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
158 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
159 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
160 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
162 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
163 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
165 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
168 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
170 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
173 static void g4x_disable_fbc(struct drm_device *dev)
175 struct drm_i915_private *dev_priv = dev->dev_private;
178 /* Disable compression */
179 dpfc_ctl = I915_READ(DPFC_CONTROL);
180 if (dpfc_ctl & DPFC_CTL_EN) {
181 dpfc_ctl &= ~DPFC_CTL_EN;
182 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
184 DRM_DEBUG_KMS("disabled FBC\n");
188 static bool g4x_fbc_enabled(struct drm_device *dev)
190 struct drm_i915_private *dev_priv = dev->dev_private;
192 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
195 static void sandybridge_blit_fbc_update(struct drm_device *dev)
197 struct drm_i915_private *dev_priv = dev->dev_private;
200 /* Make sure blitter notifies FBC of writes */
202 /* Blitter is part of Media powerwell on VLV. No impact of
203 * his param in other platforms for now */
204 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
206 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
207 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
208 GEN6_BLITTER_LOCK_SHIFT;
209 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
210 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
211 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
212 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
213 GEN6_BLITTER_LOCK_SHIFT);
214 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
215 POSTING_READ(GEN6_BLITTER_ECOSKPD);
217 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
220 static void ironlake_enable_fbc(struct drm_crtc *crtc)
222 struct drm_device *dev = crtc->dev;
223 struct drm_i915_private *dev_priv = dev->dev_private;
224 struct drm_framebuffer *fb = crtc->primary->fb;
225 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
226 struct drm_i915_gem_object *obj = intel_fb->obj;
227 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
230 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
231 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
232 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
234 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
235 dpfc_ctl |= DPFC_CTL_FENCE_EN;
237 dpfc_ctl |= obj->fence_reg;
239 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
240 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
242 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
245 I915_WRITE(SNB_DPFC_CTL_SA,
246 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
247 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
248 sandybridge_blit_fbc_update(dev);
251 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
254 static void ironlake_disable_fbc(struct drm_device *dev)
256 struct drm_i915_private *dev_priv = dev->dev_private;
259 /* Disable compression */
260 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
261 if (dpfc_ctl & DPFC_CTL_EN) {
262 dpfc_ctl &= ~DPFC_CTL_EN;
263 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
265 DRM_DEBUG_KMS("disabled FBC\n");
269 static bool ironlake_fbc_enabled(struct drm_device *dev)
271 struct drm_i915_private *dev_priv = dev->dev_private;
273 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
276 static void gen7_enable_fbc(struct drm_crtc *crtc)
278 struct drm_device *dev = crtc->dev;
279 struct drm_i915_private *dev_priv = dev->dev_private;
280 struct drm_framebuffer *fb = crtc->primary->fb;
281 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
282 struct drm_i915_gem_object *obj = intel_fb->obj;
283 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
286 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
287 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
288 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
290 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
291 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
293 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
295 if (IS_IVYBRIDGE(dev)) {
296 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
297 I915_WRITE(ILK_DISPLAY_CHICKEN1,
298 I915_READ(ILK_DISPLAY_CHICKEN1) |
301 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
302 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
303 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
307 I915_WRITE(SNB_DPFC_CTL_SA,
308 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
309 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
311 sandybridge_blit_fbc_update(dev);
313 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
316 bool intel_fbc_enabled(struct drm_device *dev)
318 struct drm_i915_private *dev_priv = dev->dev_private;
320 if (!dev_priv->display.fbc_enabled)
323 return dev_priv->display.fbc_enabled(dev);
326 static void intel_fbc_work_fn(struct work_struct *__work)
328 struct intel_fbc_work *work =
329 container_of(to_delayed_work(__work),
330 struct intel_fbc_work, work);
331 struct drm_device *dev = work->crtc->dev;
332 struct drm_i915_private *dev_priv = dev->dev_private;
334 mutex_lock(&dev->struct_mutex);
335 if (work == dev_priv->fbc.fbc_work) {
336 /* Double check that we haven't switched fb without cancelling
339 if (work->crtc->primary->fb == work->fb) {
340 dev_priv->display.enable_fbc(work->crtc);
342 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
343 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
344 dev_priv->fbc.y = work->crtc->y;
347 dev_priv->fbc.fbc_work = NULL;
349 mutex_unlock(&dev->struct_mutex);
354 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
356 if (dev_priv->fbc.fbc_work == NULL)
359 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
361 /* Synchronisation is provided by struct_mutex and checking of
362 * dev_priv->fbc.fbc_work, so we can perform the cancellation
363 * entirely asynchronously.
365 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
366 /* tasklet was killed before being run, clean up */
367 kfree(dev_priv->fbc.fbc_work);
369 /* Mark the work as no longer wanted so that if it does
370 * wake-up (because the work was already running and waiting
371 * for our mutex), it will discover that is no longer
374 dev_priv->fbc.fbc_work = NULL;
377 static void intel_enable_fbc(struct drm_crtc *crtc)
379 struct intel_fbc_work *work;
380 struct drm_device *dev = crtc->dev;
381 struct drm_i915_private *dev_priv = dev->dev_private;
383 if (!dev_priv->display.enable_fbc)
386 intel_cancel_fbc_work(dev_priv);
388 work = kzalloc(sizeof(*work), GFP_KERNEL);
390 DRM_ERROR("Failed to allocate FBC work structure\n");
391 dev_priv->display.enable_fbc(crtc);
396 work->fb = crtc->primary->fb;
397 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
399 dev_priv->fbc.fbc_work = work;
401 /* Delay the actual enabling to let pageflipping cease and the
402 * display to settle before starting the compression. Note that
403 * this delay also serves a second purpose: it allows for a
404 * vblank to pass after disabling the FBC before we attempt
405 * to modify the control registers.
407 * A more complicated solution would involve tracking vblanks
408 * following the termination of the page-flipping sequence
409 * and indeed performing the enable as a co-routine and not
410 * waiting synchronously upon the vblank.
412 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
414 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
417 void intel_disable_fbc(struct drm_device *dev)
419 struct drm_i915_private *dev_priv = dev->dev_private;
421 intel_cancel_fbc_work(dev_priv);
423 if (!dev_priv->display.disable_fbc)
426 dev_priv->display.disable_fbc(dev);
427 dev_priv->fbc.plane = -1;
430 static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
431 enum no_fbc_reason reason)
433 if (dev_priv->fbc.no_fbc_reason == reason)
436 dev_priv->fbc.no_fbc_reason = reason;
441 * intel_update_fbc - enable/disable FBC as needed
442 * @dev: the drm_device
444 * Set up the framebuffer compression hardware at mode set time. We
445 * enable it if possible:
446 * - plane A only (on pre-965)
447 * - no pixel mulitply/line duplication
448 * - no alpha buffer discard
450 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
452 * We can't assume that any compression will take place (worst case),
453 * so the compressed buffer has to be the same size as the uncompressed
454 * one. It also must reside (along with the line length buffer) in
457 * We need to enable/disable FBC on a global basis.
459 void intel_update_fbc(struct drm_device *dev)
461 struct drm_i915_private *dev_priv = dev->dev_private;
462 struct drm_crtc *crtc = NULL, *tmp_crtc;
463 struct intel_crtc *intel_crtc;
464 struct drm_framebuffer *fb;
465 struct intel_framebuffer *intel_fb;
466 struct drm_i915_gem_object *obj;
467 const struct drm_display_mode *adjusted_mode;
468 unsigned int max_width, max_height;
471 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
475 if (!i915.powersave) {
476 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
477 DRM_DEBUG_KMS("fbc disabled per module param\n");
482 * If FBC is already on, we just have to verify that we can
483 * keep it that way...
484 * Need to disable if:
485 * - more than one pipe is active
486 * - changing FBC params (stride, fence, mode)
487 * - new fb is too large to fit in compressed buffer
488 * - going to an unsupported config (interlace, pixel multiply, etc.)
490 for_each_crtc(dev, tmp_crtc) {
491 if (intel_crtc_active(tmp_crtc) &&
492 to_intel_crtc(tmp_crtc)->primary_enabled) {
494 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
495 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
502 if (!crtc || crtc->primary->fb == NULL) {
503 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
504 DRM_DEBUG_KMS("no output, disabling\n");
508 intel_crtc = to_intel_crtc(crtc);
509 fb = crtc->primary->fb;
510 intel_fb = to_intel_framebuffer(fb);
512 adjusted_mode = &intel_crtc->config.adjusted_mode;
514 if (i915.enable_fbc < 0 &&
515 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
516 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
517 DRM_DEBUG_KMS("disabled per chip default\n");
520 if (!i915.enable_fbc) {
521 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
522 DRM_DEBUG_KMS("fbc disabled per module param\n");
525 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
526 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
527 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
528 DRM_DEBUG_KMS("mode incompatible with compression, "
533 if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
536 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
543 if (intel_crtc->config.pipe_src_w > max_width ||
544 intel_crtc->config.pipe_src_h > max_height) {
545 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
546 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
549 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
550 intel_crtc->plane != PLANE_A) {
551 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
552 DRM_DEBUG_KMS("plane not A, disabling compression\n");
556 /* The use of a CPU fence is mandatory in order to detect writes
557 * by the CPU to the scanout and trigger updates to the FBC.
559 if (obj->tiling_mode != I915_TILING_X ||
560 obj->fence_reg == I915_FENCE_REG_NONE) {
561 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
562 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
566 /* If the kernel debugger is active, always disable compression */
570 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
571 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
572 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
576 /* If the scanout has not changed, don't modify the FBC settings.
577 * Note that we make the fundamental assumption that the fb->obj
578 * cannot be unpinned (and have its GTT offset and fence revoked)
579 * without first being decoupled from the scanout and FBC disabled.
581 if (dev_priv->fbc.plane == intel_crtc->plane &&
582 dev_priv->fbc.fb_id == fb->base.id &&
583 dev_priv->fbc.y == crtc->y)
586 if (intel_fbc_enabled(dev)) {
587 /* We update FBC along two paths, after changing fb/crtc
588 * configuration (modeswitching) and after page-flipping
589 * finishes. For the latter, we know that not only did
590 * we disable the FBC at the start of the page-flip
591 * sequence, but also more than one vblank has passed.
593 * For the former case of modeswitching, it is possible
594 * to switch between two FBC valid configurations
595 * instantaneously so we do need to disable the FBC
596 * before we can modify its control registers. We also
597 * have to wait for the next vblank for that to take
598 * effect. However, since we delay enabling FBC we can
599 * assume that a vblank has passed since disabling and
600 * that we can safely alter the registers in the deferred
603 * In the scenario that we go from a valid to invalid
604 * and then back to valid FBC configuration we have
605 * no strict enforcement that a vblank occurred since
606 * disabling the FBC. However, along all current pipe
607 * disabling paths we do need to wait for a vblank at
608 * some point. And we wait before enabling FBC anyway.
610 DRM_DEBUG_KMS("disabling active FBC for update\n");
611 intel_disable_fbc(dev);
614 intel_enable_fbc(crtc);
615 dev_priv->fbc.no_fbc_reason = FBC_OK;
619 /* Multiple disables should be harmless */
620 if (intel_fbc_enabled(dev)) {
621 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
622 intel_disable_fbc(dev);
624 i915_gem_stolen_cleanup_compression(dev);
627 static void i915_pineview_get_mem_freq(struct drm_device *dev)
629 struct drm_i915_private *dev_priv = dev->dev_private;
632 tmp = I915_READ(CLKCFG);
634 switch (tmp & CLKCFG_FSB_MASK) {
636 dev_priv->fsb_freq = 533; /* 133*4 */
639 dev_priv->fsb_freq = 800; /* 200*4 */
642 dev_priv->fsb_freq = 667; /* 167*4 */
645 dev_priv->fsb_freq = 400; /* 100*4 */
649 switch (tmp & CLKCFG_MEM_MASK) {
651 dev_priv->mem_freq = 533;
654 dev_priv->mem_freq = 667;
657 dev_priv->mem_freq = 800;
661 /* detect pineview DDR3 setting */
662 tmp = I915_READ(CSHRDDR3CTL);
663 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
666 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
668 struct drm_i915_private *dev_priv = dev->dev_private;
671 ddrpll = I915_READ16(DDRMPLL1);
672 csipll = I915_READ16(CSIPLL0);
674 switch (ddrpll & 0xff) {
676 dev_priv->mem_freq = 800;
679 dev_priv->mem_freq = 1066;
682 dev_priv->mem_freq = 1333;
685 dev_priv->mem_freq = 1600;
688 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
690 dev_priv->mem_freq = 0;
694 dev_priv->ips.r_t = dev_priv->mem_freq;
696 switch (csipll & 0x3ff) {
698 dev_priv->fsb_freq = 3200;
701 dev_priv->fsb_freq = 3733;
704 dev_priv->fsb_freq = 4266;
707 dev_priv->fsb_freq = 4800;
710 dev_priv->fsb_freq = 5333;
713 dev_priv->fsb_freq = 5866;
716 dev_priv->fsb_freq = 6400;
719 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
721 dev_priv->fsb_freq = 0;
725 if (dev_priv->fsb_freq == 3200) {
726 dev_priv->ips.c_m = 0;
727 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
728 dev_priv->ips.c_m = 1;
730 dev_priv->ips.c_m = 2;
734 static const struct cxsr_latency cxsr_latency_table[] = {
735 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
736 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
737 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
738 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
739 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
741 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
742 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
743 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
744 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
745 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
747 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
748 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
749 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
750 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
751 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
753 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
754 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
755 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
756 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
757 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
759 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
760 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
761 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
762 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
763 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
765 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
766 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
767 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
768 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
769 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
772 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
777 const struct cxsr_latency *latency;
780 if (fsb == 0 || mem == 0)
783 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
784 latency = &cxsr_latency_table[i];
785 if (is_desktop == latency->is_desktop &&
786 is_ddr3 == latency->is_ddr3 &&
787 fsb == latency->fsb_freq && mem == latency->mem_freq)
791 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
796 static void pineview_disable_cxsr(struct drm_device *dev)
798 struct drm_i915_private *dev_priv = dev->dev_private;
800 /* deactivate cxsr */
801 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
805 * Latency for FIFO fetches is dependent on several factors:
806 * - memory configuration (speed, channels)
808 * - current MCH state
809 * It can be fairly high in some situations, so here we assume a fairly
810 * pessimal value. It's a tradeoff between extra memory fetches (if we
811 * set this value too high, the FIFO will fetch frequently to stay full)
812 * and power consumption (set it too low to save power and we might see
813 * FIFO underruns and display "flicker").
815 * A value of 5us seems to be a good balance; safe for very low end
816 * platforms but not overly aggressive on lower latency configs.
818 static const int latency_ns = 5000;
820 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
822 struct drm_i915_private *dev_priv = dev->dev_private;
823 uint32_t dsparb = I915_READ(DSPARB);
826 size = dsparb & 0x7f;
828 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
830 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
831 plane ? "B" : "A", size);
836 static int i830_get_fifo_size(struct drm_device *dev, int plane)
838 struct drm_i915_private *dev_priv = dev->dev_private;
839 uint32_t dsparb = I915_READ(DSPARB);
842 size = dsparb & 0x1ff;
844 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
845 size >>= 1; /* Convert to cachelines */
847 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
848 plane ? "B" : "A", size);
853 static int i845_get_fifo_size(struct drm_device *dev, int plane)
855 struct drm_i915_private *dev_priv = dev->dev_private;
856 uint32_t dsparb = I915_READ(DSPARB);
859 size = dsparb & 0x7f;
860 size >>= 2; /* Convert to cachelines */
862 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
869 /* Pineview has different values for various configs */
870 static const struct intel_watermark_params pineview_display_wm = {
871 .fifo_size = PINEVIEW_DISPLAY_FIFO,
872 .max_wm = PINEVIEW_MAX_WM,
873 .default_wm = PINEVIEW_DFT_WM,
874 .guard_size = PINEVIEW_GUARD_WM,
875 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
877 static const struct intel_watermark_params pineview_display_hplloff_wm = {
878 .fifo_size = PINEVIEW_DISPLAY_FIFO,
879 .max_wm = PINEVIEW_MAX_WM,
880 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
881 .guard_size = PINEVIEW_GUARD_WM,
882 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
884 static const struct intel_watermark_params pineview_cursor_wm = {
885 .fifo_size = PINEVIEW_CURSOR_FIFO,
886 .max_wm = PINEVIEW_CURSOR_MAX_WM,
887 .default_wm = PINEVIEW_CURSOR_DFT_WM,
888 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
889 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
891 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
892 .fifo_size = PINEVIEW_CURSOR_FIFO,
893 .max_wm = PINEVIEW_CURSOR_MAX_WM,
894 .default_wm = PINEVIEW_CURSOR_DFT_WM,
895 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
896 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
898 static const struct intel_watermark_params g4x_wm_info = {
899 .fifo_size = G4X_FIFO_SIZE,
900 .max_wm = G4X_MAX_WM,
901 .default_wm = G4X_MAX_WM,
903 .cacheline_size = G4X_FIFO_LINE_SIZE,
905 static const struct intel_watermark_params g4x_cursor_wm_info = {
906 .fifo_size = I965_CURSOR_FIFO,
907 .max_wm = I965_CURSOR_MAX_WM,
908 .default_wm = I965_CURSOR_DFT_WM,
910 .cacheline_size = G4X_FIFO_LINE_SIZE,
912 static const struct intel_watermark_params valleyview_wm_info = {
913 .fifo_size = VALLEYVIEW_FIFO_SIZE,
914 .max_wm = VALLEYVIEW_MAX_WM,
915 .default_wm = VALLEYVIEW_MAX_WM,
917 .cacheline_size = G4X_FIFO_LINE_SIZE,
919 static const struct intel_watermark_params valleyview_cursor_wm_info = {
920 .fifo_size = I965_CURSOR_FIFO,
921 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
922 .default_wm = I965_CURSOR_DFT_WM,
924 .cacheline_size = G4X_FIFO_LINE_SIZE,
926 static const struct intel_watermark_params i965_cursor_wm_info = {
927 .fifo_size = I965_CURSOR_FIFO,
928 .max_wm = I965_CURSOR_MAX_WM,
929 .default_wm = I965_CURSOR_DFT_WM,
931 .cacheline_size = I915_FIFO_LINE_SIZE,
933 static const struct intel_watermark_params i945_wm_info = {
934 .fifo_size = I945_FIFO_SIZE,
935 .max_wm = I915_MAX_WM,
938 .cacheline_size = I915_FIFO_LINE_SIZE,
940 static const struct intel_watermark_params i915_wm_info = {
941 .fifo_size = I915_FIFO_SIZE,
942 .max_wm = I915_MAX_WM,
945 .cacheline_size = I915_FIFO_LINE_SIZE,
947 static const struct intel_watermark_params i830_wm_info = {
948 .fifo_size = I855GM_FIFO_SIZE,
949 .max_wm = I915_MAX_WM,
952 .cacheline_size = I830_FIFO_LINE_SIZE,
954 static const struct intel_watermark_params i845_wm_info = {
955 .fifo_size = I830_FIFO_SIZE,
956 .max_wm = I915_MAX_WM,
959 .cacheline_size = I830_FIFO_LINE_SIZE,
963 * intel_calculate_wm - calculate watermark level
964 * @clock_in_khz: pixel clock
965 * @wm: chip FIFO params
966 * @pixel_size: display pixel size
967 * @latency_ns: memory latency for the platform
969 * Calculate the watermark level (the level at which the display plane will
970 * start fetching from memory again). Each chip has a different display
971 * FIFO size and allocation, so the caller needs to figure that out and pass
972 * in the correct intel_watermark_params structure.
974 * As the pixel clock runs, the FIFO will be drained at a rate that depends
975 * on the pixel size. When it reaches the watermark level, it'll start
976 * fetching FIFO line sized based chunks from memory until the FIFO fills
977 * past the watermark point. If the FIFO drains completely, a FIFO underrun
978 * will occur, and a display engine hang could result.
980 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
981 const struct intel_watermark_params *wm,
984 unsigned long latency_ns)
986 long entries_required, wm_size;
989 * Note: we need to make sure we don't overflow for various clock &
991 * clocks go from a few thousand to several hundred thousand.
992 * latency is usually a few thousand
994 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
996 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
998 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1000 wm_size = fifo_size - (entries_required + wm->guard_size);
1002 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1004 /* Don't promote wm_size to unsigned... */
1005 if (wm_size > (long)wm->max_wm)
1006 wm_size = wm->max_wm;
1008 wm_size = wm->default_wm;
1012 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1014 struct drm_crtc *crtc, *enabled = NULL;
1016 for_each_crtc(dev, crtc) {
1017 if (intel_crtc_active(crtc)) {
1027 static void pineview_update_wm(struct drm_crtc *unused_crtc)
1029 struct drm_device *dev = unused_crtc->dev;
1030 struct drm_i915_private *dev_priv = dev->dev_private;
1031 struct drm_crtc *crtc;
1032 const struct cxsr_latency *latency;
1036 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1037 dev_priv->fsb_freq, dev_priv->mem_freq);
1039 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1040 pineview_disable_cxsr(dev);
1044 crtc = single_enabled_crtc(dev);
1046 const struct drm_display_mode *adjusted_mode;
1047 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1050 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1051 clock = adjusted_mode->crtc_clock;
1054 wm = intel_calculate_wm(clock, &pineview_display_wm,
1055 pineview_display_wm.fifo_size,
1056 pixel_size, latency->display_sr);
1057 reg = I915_READ(DSPFW1);
1058 reg &= ~DSPFW_SR_MASK;
1059 reg |= wm << DSPFW_SR_SHIFT;
1060 I915_WRITE(DSPFW1, reg);
1061 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1064 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1065 pineview_display_wm.fifo_size,
1066 pixel_size, latency->cursor_sr);
1067 reg = I915_READ(DSPFW3);
1068 reg &= ~DSPFW_CURSOR_SR_MASK;
1069 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1070 I915_WRITE(DSPFW3, reg);
1072 /* Display HPLL off SR */
1073 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1074 pineview_display_hplloff_wm.fifo_size,
1075 pixel_size, latency->display_hpll_disable);
1076 reg = I915_READ(DSPFW3);
1077 reg &= ~DSPFW_HPLL_SR_MASK;
1078 reg |= wm & DSPFW_HPLL_SR_MASK;
1079 I915_WRITE(DSPFW3, reg);
1081 /* cursor HPLL off SR */
1082 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1083 pineview_display_hplloff_wm.fifo_size,
1084 pixel_size, latency->cursor_hpll_disable);
1085 reg = I915_READ(DSPFW3);
1086 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1087 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1088 I915_WRITE(DSPFW3, reg);
1089 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1093 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1094 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1096 pineview_disable_cxsr(dev);
1097 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1101 static bool g4x_compute_wm0(struct drm_device *dev,
1103 const struct intel_watermark_params *display,
1104 int display_latency_ns,
1105 const struct intel_watermark_params *cursor,
1106 int cursor_latency_ns,
1110 struct drm_crtc *crtc;
1111 const struct drm_display_mode *adjusted_mode;
1112 int htotal, hdisplay, clock, pixel_size;
1113 int line_time_us, line_count;
1114 int entries, tlb_miss;
1116 crtc = intel_get_crtc_for_plane(dev, plane);
1117 if (!intel_crtc_active(crtc)) {
1118 *cursor_wm = cursor->guard_size;
1119 *plane_wm = display->guard_size;
1123 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1124 clock = adjusted_mode->crtc_clock;
1125 htotal = adjusted_mode->crtc_htotal;
1126 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1127 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1129 /* Use the small buffer method to calculate plane watermark */
1130 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1131 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1133 entries += tlb_miss;
1134 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1135 *plane_wm = entries + display->guard_size;
1136 if (*plane_wm > (int)display->max_wm)
1137 *plane_wm = display->max_wm;
1139 /* Use the large buffer method to calculate cursor watermark */
1140 line_time_us = max(htotal * 1000 / clock, 1);
1141 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1142 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1143 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1145 entries += tlb_miss;
1146 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1147 *cursor_wm = entries + cursor->guard_size;
1148 if (*cursor_wm > (int)cursor->max_wm)
1149 *cursor_wm = (int)cursor->max_wm;
1155 * Check the wm result.
1157 * If any calculated watermark values is larger than the maximum value that
1158 * can be programmed into the associated watermark register, that watermark
1161 static bool g4x_check_srwm(struct drm_device *dev,
1162 int display_wm, int cursor_wm,
1163 const struct intel_watermark_params *display,
1164 const struct intel_watermark_params *cursor)
1166 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1167 display_wm, cursor_wm);
1169 if (display_wm > display->max_wm) {
1170 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1171 display_wm, display->max_wm);
1175 if (cursor_wm > cursor->max_wm) {
1176 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1177 cursor_wm, cursor->max_wm);
1181 if (!(display_wm || cursor_wm)) {
1182 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1189 static bool g4x_compute_srwm(struct drm_device *dev,
1192 const struct intel_watermark_params *display,
1193 const struct intel_watermark_params *cursor,
1194 int *display_wm, int *cursor_wm)
1196 struct drm_crtc *crtc;
1197 const struct drm_display_mode *adjusted_mode;
1198 int hdisplay, htotal, pixel_size, clock;
1199 unsigned long line_time_us;
1200 int line_count, line_size;
1205 *display_wm = *cursor_wm = 0;
1209 crtc = intel_get_crtc_for_plane(dev, plane);
1210 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1211 clock = adjusted_mode->crtc_clock;
1212 htotal = adjusted_mode->crtc_htotal;
1213 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1214 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1216 line_time_us = max(htotal * 1000 / clock, 1);
1217 line_count = (latency_ns / line_time_us + 1000) / 1000;
1218 line_size = hdisplay * pixel_size;
1220 /* Use the minimum of the small and large buffer method for primary */
1221 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1222 large = line_count * line_size;
1224 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1225 *display_wm = entries + display->guard_size;
1227 /* calculate the self-refresh watermark for display cursor */
1228 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
1229 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1230 *cursor_wm = entries + cursor->guard_size;
1232 return g4x_check_srwm(dev,
1233 *display_wm, *cursor_wm,
1237 static bool vlv_compute_drain_latency(struct drm_device *dev,
1239 int *plane_prec_mult,
1241 int *cursor_prec_mult,
1244 struct drm_crtc *crtc;
1245 int clock, pixel_size;
1248 crtc = intel_get_crtc_for_plane(dev, plane);
1249 if (!intel_crtc_active(crtc))
1252 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1253 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1255 entries = (clock / 1000) * pixel_size;
1256 *plane_prec_mult = (entries > 256) ?
1257 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1258 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1261 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1262 *cursor_prec_mult = (entries > 256) ?
1263 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1264 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1270 * Update drain latency registers of memory arbiter
1272 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1273 * to be programmed. Each plane has a drain latency multiplier and a drain
1277 static void vlv_update_drain_latency(struct drm_device *dev)
1279 struct drm_i915_private *dev_priv = dev->dev_private;
1280 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1281 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1282 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1285 /* For plane A, Cursor A */
1286 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1287 &cursor_prec_mult, &cursora_dl)) {
1288 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1289 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1290 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1291 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1293 I915_WRITE(VLV_DDL1, cursora_prec |
1294 (cursora_dl << DDL_CURSORA_SHIFT) |
1295 planea_prec | planea_dl);
1298 /* For plane B, Cursor B */
1299 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1300 &cursor_prec_mult, &cursorb_dl)) {
1301 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1302 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1303 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1304 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1306 I915_WRITE(VLV_DDL2, cursorb_prec |
1307 (cursorb_dl << DDL_CURSORB_SHIFT) |
1308 planeb_prec | planeb_dl);
1312 #define single_plane_enabled(mask) is_power_of_2(mask)
1314 static void valleyview_update_wm(struct drm_crtc *crtc)
1316 struct drm_device *dev = crtc->dev;
1317 static const int sr_latency_ns = 12000;
1318 struct drm_i915_private *dev_priv = dev->dev_private;
1319 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1320 int plane_sr, cursor_sr;
1321 int ignore_plane_sr, ignore_cursor_sr;
1322 unsigned int enabled = 0;
1324 vlv_update_drain_latency(dev);
1326 if (g4x_compute_wm0(dev, PIPE_A,
1327 &valleyview_wm_info, latency_ns,
1328 &valleyview_cursor_wm_info, latency_ns,
1329 &planea_wm, &cursora_wm))
1330 enabled |= 1 << PIPE_A;
1332 if (g4x_compute_wm0(dev, PIPE_B,
1333 &valleyview_wm_info, latency_ns,
1334 &valleyview_cursor_wm_info, latency_ns,
1335 &planeb_wm, &cursorb_wm))
1336 enabled |= 1 << PIPE_B;
1338 if (single_plane_enabled(enabled) &&
1339 g4x_compute_srwm(dev, ffs(enabled) - 1,
1341 &valleyview_wm_info,
1342 &valleyview_cursor_wm_info,
1343 &plane_sr, &ignore_cursor_sr) &&
1344 g4x_compute_srwm(dev, ffs(enabled) - 1,
1346 &valleyview_wm_info,
1347 &valleyview_cursor_wm_info,
1348 &ignore_plane_sr, &cursor_sr)) {
1349 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1351 I915_WRITE(FW_BLC_SELF_VLV,
1352 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1353 plane_sr = cursor_sr = 0;
1356 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1357 planea_wm, cursora_wm,
1358 planeb_wm, cursorb_wm,
1359 plane_sr, cursor_sr);
1362 (plane_sr << DSPFW_SR_SHIFT) |
1363 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1364 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1367 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1368 (cursora_wm << DSPFW_CURSORA_SHIFT));
1370 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1371 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1374 static void g4x_update_wm(struct drm_crtc *crtc)
1376 struct drm_device *dev = crtc->dev;
1377 static const int sr_latency_ns = 12000;
1378 struct drm_i915_private *dev_priv = dev->dev_private;
1379 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1380 int plane_sr, cursor_sr;
1381 unsigned int enabled = 0;
1383 if (g4x_compute_wm0(dev, PIPE_A,
1384 &g4x_wm_info, latency_ns,
1385 &g4x_cursor_wm_info, latency_ns,
1386 &planea_wm, &cursora_wm))
1387 enabled |= 1 << PIPE_A;
1389 if (g4x_compute_wm0(dev, PIPE_B,
1390 &g4x_wm_info, latency_ns,
1391 &g4x_cursor_wm_info, latency_ns,
1392 &planeb_wm, &cursorb_wm))
1393 enabled |= 1 << PIPE_B;
1395 if (single_plane_enabled(enabled) &&
1396 g4x_compute_srwm(dev, ffs(enabled) - 1,
1399 &g4x_cursor_wm_info,
1400 &plane_sr, &cursor_sr)) {
1401 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1403 I915_WRITE(FW_BLC_SELF,
1404 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1405 plane_sr = cursor_sr = 0;
1408 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1409 planea_wm, cursora_wm,
1410 planeb_wm, cursorb_wm,
1411 plane_sr, cursor_sr);
1414 (plane_sr << DSPFW_SR_SHIFT) |
1415 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1416 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1419 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1420 (cursora_wm << DSPFW_CURSORA_SHIFT));
1421 /* HPLL off in SR has some issues on G4x... disable it */
1423 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1424 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1427 static void i965_update_wm(struct drm_crtc *unused_crtc)
1429 struct drm_device *dev = unused_crtc->dev;
1430 struct drm_i915_private *dev_priv = dev->dev_private;
1431 struct drm_crtc *crtc;
1435 /* Calc sr entries for one plane configs */
1436 crtc = single_enabled_crtc(dev);
1438 /* self-refresh has much higher latency */
1439 static const int sr_latency_ns = 12000;
1440 const struct drm_display_mode *adjusted_mode =
1441 &to_intel_crtc(crtc)->config.adjusted_mode;
1442 int clock = adjusted_mode->crtc_clock;
1443 int htotal = adjusted_mode->crtc_htotal;
1444 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1445 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1446 unsigned long line_time_us;
1449 line_time_us = max(htotal * 1000 / clock, 1);
1451 /* Use ns/us then divide to preserve precision */
1452 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1453 pixel_size * hdisplay;
1454 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1455 srwm = I965_FIFO_SIZE - entries;
1459 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1462 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1463 pixel_size * to_intel_crtc(crtc)->cursor_width;
1464 entries = DIV_ROUND_UP(entries,
1465 i965_cursor_wm_info.cacheline_size);
1466 cursor_sr = i965_cursor_wm_info.fifo_size -
1467 (entries + i965_cursor_wm_info.guard_size);
1469 if (cursor_sr > i965_cursor_wm_info.max_wm)
1470 cursor_sr = i965_cursor_wm_info.max_wm;
1472 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1473 "cursor %d\n", srwm, cursor_sr);
1475 if (IS_CRESTLINE(dev))
1476 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1478 /* Turn off self refresh if both pipes are enabled */
1479 if (IS_CRESTLINE(dev))
1480 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1484 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1487 /* 965 has limitations... */
1488 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1489 (8 << 16) | (8 << 8) | (8 << 0));
1490 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1491 /* update cursor SR watermark */
1492 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1495 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1497 struct drm_device *dev = unused_crtc->dev;
1498 struct drm_i915_private *dev_priv = dev->dev_private;
1499 const struct intel_watermark_params *wm_info;
1504 int planea_wm, planeb_wm;
1505 struct drm_crtc *crtc, *enabled = NULL;
1508 wm_info = &i945_wm_info;
1509 else if (!IS_GEN2(dev))
1510 wm_info = &i915_wm_info;
1512 wm_info = &i830_wm_info;
1514 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1515 crtc = intel_get_crtc_for_plane(dev, 0);
1516 if (intel_crtc_active(crtc)) {
1517 const struct drm_display_mode *adjusted_mode;
1518 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1522 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1523 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1524 wm_info, fifo_size, cpp,
1528 planea_wm = fifo_size - wm_info->guard_size;
1530 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1531 crtc = intel_get_crtc_for_plane(dev, 1);
1532 if (intel_crtc_active(crtc)) {
1533 const struct drm_display_mode *adjusted_mode;
1534 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1538 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1539 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1540 wm_info, fifo_size, cpp,
1542 if (enabled == NULL)
1547 planeb_wm = fifo_size - wm_info->guard_size;
1549 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1551 if (IS_I915GM(dev) && enabled) {
1552 struct intel_framebuffer *fb;
1554 fb = to_intel_framebuffer(enabled->primary->fb);
1556 /* self-refresh seems busted with untiled */
1557 if (fb->obj->tiling_mode == I915_TILING_NONE)
1562 * Overlay gets an aggressive default since video jitter is bad.
1566 /* Play safe and disable self-refresh before adjusting watermarks. */
1567 if (IS_I945G(dev) || IS_I945GM(dev))
1568 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1569 else if (IS_I915GM(dev))
1570 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
1572 /* Calc sr entries for one plane configs */
1573 if (HAS_FW_BLC(dev) && enabled) {
1574 /* self-refresh has much higher latency */
1575 static const int sr_latency_ns = 6000;
1576 const struct drm_display_mode *adjusted_mode =
1577 &to_intel_crtc(enabled)->config.adjusted_mode;
1578 int clock = adjusted_mode->crtc_clock;
1579 int htotal = adjusted_mode->crtc_htotal;
1580 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1581 int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1582 unsigned long line_time_us;
1585 line_time_us = max(htotal * 1000 / clock, 1);
1587 /* Use ns/us then divide to preserve precision */
1588 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1589 pixel_size * hdisplay;
1590 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1591 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1592 srwm = wm_info->fifo_size - entries;
1596 if (IS_I945G(dev) || IS_I945GM(dev))
1597 I915_WRITE(FW_BLC_SELF,
1598 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1599 else if (IS_I915GM(dev))
1600 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1603 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1604 planea_wm, planeb_wm, cwm, srwm);
1606 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1607 fwater_hi = (cwm & 0x1f);
1609 /* Set request length to 8 cachelines per fetch */
1610 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1611 fwater_hi = fwater_hi | (1 << 8);
1613 I915_WRITE(FW_BLC, fwater_lo);
1614 I915_WRITE(FW_BLC2, fwater_hi);
1616 if (HAS_FW_BLC(dev)) {
1618 if (IS_I945G(dev) || IS_I945GM(dev))
1619 I915_WRITE(FW_BLC_SELF,
1620 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1621 else if (IS_I915GM(dev))
1622 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
1623 DRM_DEBUG_KMS("memory self refresh enabled\n");
1625 DRM_DEBUG_KMS("memory self refresh disabled\n");
1629 static void i845_update_wm(struct drm_crtc *unused_crtc)
1631 struct drm_device *dev = unused_crtc->dev;
1632 struct drm_i915_private *dev_priv = dev->dev_private;
1633 struct drm_crtc *crtc;
1634 const struct drm_display_mode *adjusted_mode;
1638 crtc = single_enabled_crtc(dev);
1642 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1643 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1645 dev_priv->display.get_fifo_size(dev, 0),
1647 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1648 fwater_lo |= (3<<8) | planea_wm;
1650 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1652 I915_WRITE(FW_BLC, fwater_lo);
1655 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1656 struct drm_crtc *crtc)
1658 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1659 uint32_t pixel_rate;
1661 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
1663 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1664 * adjust the pixel_rate here. */
1666 if (intel_crtc->config.pch_pfit.enabled) {
1667 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1668 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
1670 pipe_w = intel_crtc->config.pipe_src_w;
1671 pipe_h = intel_crtc->config.pipe_src_h;
1672 pfit_w = (pfit_size >> 16) & 0xFFFF;
1673 pfit_h = pfit_size & 0xFFFF;
1674 if (pipe_w < pfit_w)
1676 if (pipe_h < pfit_h)
1679 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1686 /* latency must be in 0.1us units. */
1687 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1692 if (WARN(latency == 0, "Latency value missing\n"))
1695 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1696 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1701 /* latency must be in 0.1us units. */
1702 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1703 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1708 if (WARN(latency == 0, "Latency value missing\n"))
1711 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1712 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1713 ret = DIV_ROUND_UP(ret, 64) + 2;
1717 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1718 uint8_t bytes_per_pixel)
1720 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1723 struct ilk_pipe_wm_parameters {
1725 uint32_t pipe_htotal;
1726 uint32_t pixel_rate;
1727 struct intel_plane_wm_parameters pri;
1728 struct intel_plane_wm_parameters spr;
1729 struct intel_plane_wm_parameters cur;
1732 struct ilk_wm_maximums {
1739 /* used in computing the new watermarks state */
1740 struct intel_wm_config {
1741 unsigned int num_pipes_active;
1742 bool sprites_enabled;
1743 bool sprites_scaled;
1747 * For both WM_PIPE and WM_LP.
1748 * mem_value must be in 0.1us units.
1750 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1754 uint32_t method1, method2;
1756 if (!params->active || !params->pri.enabled)
1759 method1 = ilk_wm_method1(params->pixel_rate,
1760 params->pri.bytes_per_pixel,
1766 method2 = ilk_wm_method2(params->pixel_rate,
1767 params->pipe_htotal,
1768 params->pri.horiz_pixels,
1769 params->pri.bytes_per_pixel,
1772 return min(method1, method2);
1776 * For both WM_PIPE and WM_LP.
1777 * mem_value must be in 0.1us units.
1779 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1782 uint32_t method1, method2;
1784 if (!params->active || !params->spr.enabled)
1787 method1 = ilk_wm_method1(params->pixel_rate,
1788 params->spr.bytes_per_pixel,
1790 method2 = ilk_wm_method2(params->pixel_rate,
1791 params->pipe_htotal,
1792 params->spr.horiz_pixels,
1793 params->spr.bytes_per_pixel,
1795 return min(method1, method2);
1799 * For both WM_PIPE and WM_LP.
1800 * mem_value must be in 0.1us units.
1802 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
1805 if (!params->active || !params->cur.enabled)
1808 return ilk_wm_method2(params->pixel_rate,
1809 params->pipe_htotal,
1810 params->cur.horiz_pixels,
1811 params->cur.bytes_per_pixel,
1815 /* Only for WM_LP. */
1816 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
1819 if (!params->active || !params->pri.enabled)
1822 return ilk_wm_fbc(pri_val,
1823 params->pri.horiz_pixels,
1824 params->pri.bytes_per_pixel);
1827 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1829 if (INTEL_INFO(dev)->gen >= 8)
1831 else if (INTEL_INFO(dev)->gen >= 7)
1837 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
1838 int level, bool is_sprite)
1840 if (INTEL_INFO(dev)->gen >= 8)
1841 /* BDW primary/sprite plane watermarks */
1842 return level == 0 ? 255 : 2047;
1843 else if (INTEL_INFO(dev)->gen >= 7)
1844 /* IVB/HSW primary/sprite plane watermarks */
1845 return level == 0 ? 127 : 1023;
1846 else if (!is_sprite)
1847 /* ILK/SNB primary plane watermarks */
1848 return level == 0 ? 127 : 511;
1850 /* ILK/SNB sprite plane watermarks */
1851 return level == 0 ? 63 : 255;
1854 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
1857 if (INTEL_INFO(dev)->gen >= 7)
1858 return level == 0 ? 63 : 255;
1860 return level == 0 ? 31 : 63;
1863 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
1865 if (INTEL_INFO(dev)->gen >= 8)
1871 /* Calculate the maximum primary/sprite plane watermark */
1872 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1874 const struct intel_wm_config *config,
1875 enum intel_ddb_partitioning ddb_partitioning,
1878 unsigned int fifo_size = ilk_display_fifo_size(dev);
1880 /* if sprites aren't enabled, sprites get nothing */
1881 if (is_sprite && !config->sprites_enabled)
1884 /* HSW allows LP1+ watermarks even with multiple pipes */
1885 if (level == 0 || config->num_pipes_active > 1) {
1886 fifo_size /= INTEL_INFO(dev)->num_pipes;
1889 * For some reason the non self refresh
1890 * FIFO size is only half of the self
1891 * refresh FIFO size on ILK/SNB.
1893 if (INTEL_INFO(dev)->gen <= 6)
1897 if (config->sprites_enabled) {
1898 /* level 0 is always calculated with 1:1 split */
1899 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1908 /* clamp to max that the registers can hold */
1909 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
1912 /* Calculate the maximum cursor plane watermark */
1913 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1915 const struct intel_wm_config *config)
1917 /* HSW LP1+ watermarks w/ multiple pipes */
1918 if (level > 0 && config->num_pipes_active > 1)
1921 /* otherwise just report max that registers can hold */
1922 return ilk_cursor_wm_reg_max(dev, level);
1925 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1927 const struct intel_wm_config *config,
1928 enum intel_ddb_partitioning ddb_partitioning,
1929 struct ilk_wm_maximums *max)
1931 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1932 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1933 max->cur = ilk_cursor_wm_max(dev, level, config);
1934 max->fbc = ilk_fbc_wm_reg_max(dev);
1937 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
1939 struct ilk_wm_maximums *max)
1941 max->pri = ilk_plane_wm_reg_max(dev, level, false);
1942 max->spr = ilk_plane_wm_reg_max(dev, level, true);
1943 max->cur = ilk_cursor_wm_reg_max(dev, level);
1944 max->fbc = ilk_fbc_wm_reg_max(dev);
1947 static bool ilk_validate_wm_level(int level,
1948 const struct ilk_wm_maximums *max,
1949 struct intel_wm_level *result)
1953 /* already determined to be invalid? */
1954 if (!result->enable)
1957 result->enable = result->pri_val <= max->pri &&
1958 result->spr_val <= max->spr &&
1959 result->cur_val <= max->cur;
1961 ret = result->enable;
1964 * HACK until we can pre-compute everything,
1965 * and thus fail gracefully if LP0 watermarks
1968 if (level == 0 && !result->enable) {
1969 if (result->pri_val > max->pri)
1970 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1971 level, result->pri_val, max->pri);
1972 if (result->spr_val > max->spr)
1973 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1974 level, result->spr_val, max->spr);
1975 if (result->cur_val > max->cur)
1976 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1977 level, result->cur_val, max->cur);
1979 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1980 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1981 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1982 result->enable = true;
1988 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1990 const struct ilk_pipe_wm_parameters *p,
1991 struct intel_wm_level *result)
1993 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
1994 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
1995 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
1997 /* WM1+ latency values stored in 0.5us units */
2004 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2005 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2006 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2007 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2008 result->enable = true;
2012 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2014 struct drm_i915_private *dev_priv = dev->dev_private;
2015 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2016 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2017 u32 linetime, ips_linetime;
2019 if (!intel_crtc_active(crtc))
2022 /* The WM are computed with base on how long it takes to fill a single
2023 * row at the given clock rate, multiplied by 8.
2025 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2027 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2028 intel_ddi_get_cdclk_freq(dev_priv));
2030 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2031 PIPE_WM_LINETIME_TIME(linetime);
2034 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2036 struct drm_i915_private *dev_priv = dev->dev_private;
2038 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2039 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2041 wm[0] = (sskpd >> 56) & 0xFF;
2043 wm[0] = sskpd & 0xF;
2044 wm[1] = (sskpd >> 4) & 0xFF;
2045 wm[2] = (sskpd >> 12) & 0xFF;
2046 wm[3] = (sskpd >> 20) & 0x1FF;
2047 wm[4] = (sskpd >> 32) & 0x1FF;
2048 } else if (INTEL_INFO(dev)->gen >= 6) {
2049 uint32_t sskpd = I915_READ(MCH_SSKPD);
2051 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2052 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2053 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2054 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2055 } else if (INTEL_INFO(dev)->gen >= 5) {
2056 uint32_t mltr = I915_READ(MLTR_ILK);
2058 /* ILK primary LP0 latency is 700 ns */
2060 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2061 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2065 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2067 /* ILK sprite LP0 latency is 1300 ns */
2068 if (INTEL_INFO(dev)->gen == 5)
2072 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2074 /* ILK cursor LP0 latency is 1300 ns */
2075 if (INTEL_INFO(dev)->gen == 5)
2078 /* WaDoubleCursorLP3Latency:ivb */
2079 if (IS_IVYBRIDGE(dev))
2083 int ilk_wm_max_level(const struct drm_device *dev)
2085 /* how many WM levels are we expecting */
2086 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2088 else if (INTEL_INFO(dev)->gen >= 6)
2094 static void intel_print_wm_latency(struct drm_device *dev,
2096 const uint16_t wm[5])
2098 int level, max_level = ilk_wm_max_level(dev);
2100 for (level = 0; level <= max_level; level++) {
2101 unsigned int latency = wm[level];
2104 DRM_ERROR("%s WM%d latency not provided\n",
2109 /* WM1+ latency values in 0.5us units */
2113 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2114 name, level, wm[level],
2115 latency / 10, latency % 10);
2119 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2120 uint16_t wm[5], uint16_t min)
2122 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2127 wm[0] = max(wm[0], min);
2128 for (level = 1; level <= max_level; level++)
2129 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2134 static void snb_wm_latency_quirk(struct drm_device *dev)
2136 struct drm_i915_private *dev_priv = dev->dev_private;
2140 * The BIOS provided WM memory latency values are often
2141 * inadequate for high resolution displays. Adjust them.
2143 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2144 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2145 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2150 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2151 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2152 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2153 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2156 static void ilk_setup_wm_latency(struct drm_device *dev)
2158 struct drm_i915_private *dev_priv = dev->dev_private;
2160 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2162 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2163 sizeof(dev_priv->wm.pri_latency));
2164 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2165 sizeof(dev_priv->wm.pri_latency));
2167 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2168 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2170 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2171 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2172 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2175 snb_wm_latency_quirk(dev);
2178 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2179 struct ilk_pipe_wm_parameters *p)
2181 struct drm_device *dev = crtc->dev;
2182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2183 enum pipe pipe = intel_crtc->pipe;
2184 struct drm_plane *plane;
2186 if (!intel_crtc_active(crtc))
2190 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2191 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2192 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2193 p->cur.bytes_per_pixel = 4;
2194 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2195 p->cur.horiz_pixels = intel_crtc->cursor_width;
2196 /* TODO: for now, assume primary and cursor planes are always enabled. */
2197 p->pri.enabled = true;
2198 p->cur.enabled = true;
2200 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2201 struct intel_plane *intel_plane = to_intel_plane(plane);
2203 if (intel_plane->pipe == pipe) {
2204 p->spr = intel_plane->wm;
2210 static void ilk_compute_wm_config(struct drm_device *dev,
2211 struct intel_wm_config *config)
2213 struct intel_crtc *intel_crtc;
2215 /* Compute the currently _active_ config */
2216 for_each_intel_crtc(dev, intel_crtc) {
2217 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2219 if (!wm->pipe_enabled)
2222 config->sprites_enabled |= wm->sprites_enabled;
2223 config->sprites_scaled |= wm->sprites_scaled;
2224 config->num_pipes_active++;
2228 /* Compute new watermarks for the pipe */
2229 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2230 const struct ilk_pipe_wm_parameters *params,
2231 struct intel_pipe_wm *pipe_wm)
2233 struct drm_device *dev = crtc->dev;
2234 const struct drm_i915_private *dev_priv = dev->dev_private;
2235 int level, max_level = ilk_wm_max_level(dev);
2236 /* LP0 watermark maximums depend on this pipe alone */
2237 struct intel_wm_config config = {
2238 .num_pipes_active = 1,
2239 .sprites_enabled = params->spr.enabled,
2240 .sprites_scaled = params->spr.scaled,
2242 struct ilk_wm_maximums max;
2244 pipe_wm->pipe_enabled = params->active;
2245 pipe_wm->sprites_enabled = params->spr.enabled;
2246 pipe_wm->sprites_scaled = params->spr.scaled;
2248 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2249 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2252 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2253 if (params->spr.scaled)
2256 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
2258 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2259 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2261 /* LP0 watermarks always use 1/2 DDB partitioning */
2262 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2264 /* At least LP0 must be valid */
2265 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2268 ilk_compute_wm_reg_maximums(dev, 1, &max);
2270 for (level = 1; level <= max_level; level++) {
2271 struct intel_wm_level wm = {};
2273 ilk_compute_wm_level(dev_priv, level, params, &wm);
2276 * Disable any watermark level that exceeds the
2277 * register maximums since such watermarks are
2280 if (!ilk_validate_wm_level(level, &max, &wm))
2283 pipe_wm->wm[level] = wm;
2290 * Merge the watermarks from all active pipes for a specific level.
2292 static void ilk_merge_wm_level(struct drm_device *dev,
2294 struct intel_wm_level *ret_wm)
2296 const struct intel_crtc *intel_crtc;
2298 ret_wm->enable = true;
2300 for_each_intel_crtc(dev, intel_crtc) {
2301 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2302 const struct intel_wm_level *wm = &active->wm[level];
2304 if (!active->pipe_enabled)
2308 * The watermark values may have been used in the past,
2309 * so we must maintain them in the registers for some
2310 * time even if the level is now disabled.
2313 ret_wm->enable = false;
2315 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2316 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2317 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2318 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2323 * Merge all low power watermarks for all active pipes.
2325 static void ilk_wm_merge(struct drm_device *dev,
2326 const struct intel_wm_config *config,
2327 const struct ilk_wm_maximums *max,
2328 struct intel_pipe_wm *merged)
2330 int level, max_level = ilk_wm_max_level(dev);
2331 int last_enabled_level = max_level;
2333 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2334 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2335 config->num_pipes_active > 1)
2338 /* ILK: FBC WM must be disabled always */
2339 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2341 /* merge each WM1+ level */
2342 for (level = 1; level <= max_level; level++) {
2343 struct intel_wm_level *wm = &merged->wm[level];
2345 ilk_merge_wm_level(dev, level, wm);
2347 if (level > last_enabled_level)
2349 else if (!ilk_validate_wm_level(level, max, wm))
2350 /* make sure all following levels get disabled */
2351 last_enabled_level = level - 1;
2354 * The spec says it is preferred to disable
2355 * FBC WMs instead of disabling a WM level.
2357 if (wm->fbc_val > max->fbc) {
2359 merged->fbc_wm_enabled = false;
2364 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2366 * FIXME this is racy. FBC might get enabled later.
2367 * What we should check here is whether FBC can be
2368 * enabled sometime later.
2370 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2371 for (level = 2; level <= max_level; level++) {
2372 struct intel_wm_level *wm = &merged->wm[level];
2379 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2381 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2382 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2385 /* The value we need to program into the WM_LPx latency field */
2386 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2388 struct drm_i915_private *dev_priv = dev->dev_private;
2390 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2393 return dev_priv->wm.pri_latency[level];
2396 static void ilk_compute_wm_results(struct drm_device *dev,
2397 const struct intel_pipe_wm *merged,
2398 enum intel_ddb_partitioning partitioning,
2399 struct ilk_wm_values *results)
2401 struct intel_crtc *intel_crtc;
2404 results->enable_fbc_wm = merged->fbc_wm_enabled;
2405 results->partitioning = partitioning;
2407 /* LP1+ register values */
2408 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2409 const struct intel_wm_level *r;
2411 level = ilk_wm_lp_to_level(wm_lp, merged);
2413 r = &merged->wm[level];
2416 * Maintain the watermark values even if the level is
2417 * disabled. Doing otherwise could cause underruns.
2419 results->wm_lp[wm_lp - 1] =
2420 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2421 (r->pri_val << WM1_LP_SR_SHIFT) |
2425 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2427 if (INTEL_INFO(dev)->gen >= 8)
2428 results->wm_lp[wm_lp - 1] |=
2429 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2431 results->wm_lp[wm_lp - 1] |=
2432 r->fbc_val << WM1_LP_FBC_SHIFT;
2435 * Always set WM1S_LP_EN when spr_val != 0, even if the
2436 * level is disabled. Doing otherwise could cause underruns.
2438 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2439 WARN_ON(wm_lp != 1);
2440 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2442 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2445 /* LP0 register values */
2446 for_each_intel_crtc(dev, intel_crtc) {
2447 enum pipe pipe = intel_crtc->pipe;
2448 const struct intel_wm_level *r =
2449 &intel_crtc->wm.active.wm[0];
2451 if (WARN_ON(!r->enable))
2454 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2456 results->wm_pipe[pipe] =
2457 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2458 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2463 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2464 * case both are at the same level. Prefer r1 in case they're the same. */
2465 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2466 struct intel_pipe_wm *r1,
2467 struct intel_pipe_wm *r2)
2469 int level, max_level = ilk_wm_max_level(dev);
2470 int level1 = 0, level2 = 0;
2472 for (level = 1; level <= max_level; level++) {
2473 if (r1->wm[level].enable)
2475 if (r2->wm[level].enable)
2479 if (level1 == level2) {
2480 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2484 } else if (level1 > level2) {
2491 /* dirty bits used to track which watermarks need changes */
2492 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2493 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2494 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2495 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2496 #define WM_DIRTY_FBC (1 << 24)
2497 #define WM_DIRTY_DDB (1 << 25)
2499 static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2500 const struct ilk_wm_values *old,
2501 const struct ilk_wm_values *new)
2503 unsigned int dirty = 0;
2507 for_each_pipe(pipe) {
2508 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2509 dirty |= WM_DIRTY_LINETIME(pipe);
2510 /* Must disable LP1+ watermarks too */
2511 dirty |= WM_DIRTY_LP_ALL;
2514 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2515 dirty |= WM_DIRTY_PIPE(pipe);
2516 /* Must disable LP1+ watermarks too */
2517 dirty |= WM_DIRTY_LP_ALL;
2521 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2522 dirty |= WM_DIRTY_FBC;
2523 /* Must disable LP1+ watermarks too */
2524 dirty |= WM_DIRTY_LP_ALL;
2527 if (old->partitioning != new->partitioning) {
2528 dirty |= WM_DIRTY_DDB;
2529 /* Must disable LP1+ watermarks too */
2530 dirty |= WM_DIRTY_LP_ALL;
2533 /* LP1+ watermarks already deemed dirty, no need to continue */
2534 if (dirty & WM_DIRTY_LP_ALL)
2537 /* Find the lowest numbered LP1+ watermark in need of an update... */
2538 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2539 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2540 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2544 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2545 for (; wm_lp <= 3; wm_lp++)
2546 dirty |= WM_DIRTY_LP(wm_lp);
2551 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2554 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2555 bool changed = false;
2557 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2558 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2559 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2562 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2563 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2564 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2567 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2568 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2569 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2574 * Don't touch WM1S_LP_EN here.
2575 * Doing so could cause underruns.
2582 * The spec says we shouldn't write when we don't need, because every write
2583 * causes WMs to be re-evaluated, expending some power.
2585 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2586 struct ilk_wm_values *results)
2588 struct drm_device *dev = dev_priv->dev;
2589 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2593 dirty = ilk_compute_wm_dirty(dev, previous, results);
2597 _ilk_disable_lp_wm(dev_priv, dirty);
2599 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2600 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2601 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2602 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2603 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2604 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2606 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2607 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2608 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2609 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2610 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2611 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2613 if (dirty & WM_DIRTY_DDB) {
2614 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2615 val = I915_READ(WM_MISC);
2616 if (results->partitioning == INTEL_DDB_PART_1_2)
2617 val &= ~WM_MISC_DATA_PARTITION_5_6;
2619 val |= WM_MISC_DATA_PARTITION_5_6;
2620 I915_WRITE(WM_MISC, val);
2622 val = I915_READ(DISP_ARB_CTL2);
2623 if (results->partitioning == INTEL_DDB_PART_1_2)
2624 val &= ~DISP_DATA_PARTITION_5_6;
2626 val |= DISP_DATA_PARTITION_5_6;
2627 I915_WRITE(DISP_ARB_CTL2, val);
2631 if (dirty & WM_DIRTY_FBC) {
2632 val = I915_READ(DISP_ARB_CTL);
2633 if (results->enable_fbc_wm)
2634 val &= ~DISP_FBC_WM_DIS;
2636 val |= DISP_FBC_WM_DIS;
2637 I915_WRITE(DISP_ARB_CTL, val);
2640 if (dirty & WM_DIRTY_LP(1) &&
2641 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2642 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2644 if (INTEL_INFO(dev)->gen >= 7) {
2645 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2646 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2647 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2648 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2651 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2652 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2653 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2654 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2655 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2656 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2658 dev_priv->wm.hw = *results;
2661 static bool ilk_disable_lp_wm(struct drm_device *dev)
2663 struct drm_i915_private *dev_priv = dev->dev_private;
2665 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2668 static void ilk_update_wm(struct drm_crtc *crtc)
2670 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2671 struct drm_device *dev = crtc->dev;
2672 struct drm_i915_private *dev_priv = dev->dev_private;
2673 struct ilk_wm_maximums max;
2674 struct ilk_pipe_wm_parameters params = {};
2675 struct ilk_wm_values results = {};
2676 enum intel_ddb_partitioning partitioning;
2677 struct intel_pipe_wm pipe_wm = {};
2678 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2679 struct intel_wm_config config = {};
2681 ilk_compute_wm_parameters(crtc, ¶ms);
2683 intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
2685 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2688 intel_crtc->wm.active = pipe_wm;
2690 ilk_compute_wm_config(dev, &config);
2692 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2693 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2695 /* 5/6 split only in single pipe config on IVB+ */
2696 if (INTEL_INFO(dev)->gen >= 7 &&
2697 config.num_pipes_active == 1 && config.sprites_enabled) {
2698 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2699 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2701 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2703 best_lp_wm = &lp_wm_1_2;
2706 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2707 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2709 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2711 ilk_write_wm_values(dev_priv, &results);
2714 static void ilk_update_sprite_wm(struct drm_plane *plane,
2715 struct drm_crtc *crtc,
2716 uint32_t sprite_width, int pixel_size,
2717 bool enabled, bool scaled)
2719 struct drm_device *dev = plane->dev;
2720 struct intel_plane *intel_plane = to_intel_plane(plane);
2722 intel_plane->wm.enabled = enabled;
2723 intel_plane->wm.scaled = scaled;
2724 intel_plane->wm.horiz_pixels = sprite_width;
2725 intel_plane->wm.bytes_per_pixel = pixel_size;
2728 * IVB workaround: must disable low power watermarks for at least
2729 * one frame before enabling scaling. LP watermarks can be re-enabled
2730 * when scaling is disabled.
2732 * WaCxSRDisabledForSpriteScaling:ivb
2734 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
2735 intel_wait_for_vblank(dev, intel_plane->pipe);
2737 ilk_update_wm(crtc);
2740 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2742 struct drm_device *dev = crtc->dev;
2743 struct drm_i915_private *dev_priv = dev->dev_private;
2744 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2745 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2746 struct intel_pipe_wm *active = &intel_crtc->wm.active;
2747 enum pipe pipe = intel_crtc->pipe;
2748 static const unsigned int wm0_pipe_reg[] = {
2749 [PIPE_A] = WM0_PIPEA_ILK,
2750 [PIPE_B] = WM0_PIPEB_ILK,
2751 [PIPE_C] = WM0_PIPEC_IVB,
2754 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
2755 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2756 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2758 active->pipe_enabled = intel_crtc_active(crtc);
2760 if (active->pipe_enabled) {
2761 u32 tmp = hw->wm_pipe[pipe];
2764 * For active pipes LP0 watermark is marked as
2765 * enabled, and LP1+ watermaks as disabled since
2766 * we can't really reverse compute them in case
2767 * multiple pipes are active.
2769 active->wm[0].enable = true;
2770 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
2771 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
2772 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
2773 active->linetime = hw->wm_linetime[pipe];
2775 int level, max_level = ilk_wm_max_level(dev);
2778 * For inactive pipes, all watermark levels
2779 * should be marked as enabled but zeroed,
2780 * which is what we'd compute them to.
2782 for (level = 0; level <= max_level; level++)
2783 active->wm[level].enable = true;
2787 void ilk_wm_get_hw_state(struct drm_device *dev)
2789 struct drm_i915_private *dev_priv = dev->dev_private;
2790 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2791 struct drm_crtc *crtc;
2793 for_each_crtc(dev, crtc)
2794 ilk_pipe_wm_get_hw_state(crtc);
2796 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
2797 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
2798 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2800 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2801 if (INTEL_INFO(dev)->gen >= 7) {
2802 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2803 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2806 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2807 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2808 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2809 else if (IS_IVYBRIDGE(dev))
2810 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
2811 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2814 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2818 * intel_update_watermarks - update FIFO watermark values based on current modes
2820 * Calculate watermark values for the various WM regs based on current mode
2821 * and plane configuration.
2823 * There are several cases to deal with here:
2824 * - normal (i.e. non-self-refresh)
2825 * - self-refresh (SR) mode
2826 * - lines are large relative to FIFO size (buffer can hold up to 2)
2827 * - lines are small relative to FIFO size (buffer can hold more than 2
2828 * lines), so need to account for TLB latency
2830 * The normal calculation is:
2831 * watermark = dotclock * bytes per pixel * latency
2832 * where latency is platform & configuration dependent (we assume pessimal
2835 * The SR calculation is:
2836 * watermark = (trunc(latency/line time)+1) * surface width *
2839 * line time = htotal / dotclock
2840 * surface width = hdisplay for normal plane and 64 for cursor
2841 * and latency is assumed to be high, as above.
2843 * The final value programmed to the register should always be rounded up,
2844 * and include an extra 2 entries to account for clock crossings.
2846 * We don't use the sprite, so we can ignore that. And on Crestline we have
2847 * to set the non-SR watermarks to 8.
2849 void intel_update_watermarks(struct drm_crtc *crtc)
2851 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
2853 if (dev_priv->display.update_wm)
2854 dev_priv->display.update_wm(crtc);
2857 void intel_update_sprite_watermarks(struct drm_plane *plane,
2858 struct drm_crtc *crtc,
2859 uint32_t sprite_width, int pixel_size,
2860 bool enabled, bool scaled)
2862 struct drm_i915_private *dev_priv = plane->dev->dev_private;
2864 if (dev_priv->display.update_sprite_wm)
2865 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
2866 pixel_size, enabled, scaled);
2869 static struct drm_i915_gem_object *
2870 intel_alloc_context_page(struct drm_device *dev)
2872 struct drm_i915_gem_object *ctx;
2875 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2877 ctx = i915_gem_alloc_object(dev, 4096);
2879 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2883 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
2885 DRM_ERROR("failed to pin power context: %d\n", ret);
2889 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2891 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2898 i915_gem_object_ggtt_unpin(ctx);
2900 drm_gem_object_unreference(&ctx->base);
2905 * Lock protecting IPS related data structures
2907 DEFINE_SPINLOCK(mchdev_lock);
2909 /* Global for IPS driver to get at the current i915 device. Protected by
2911 static struct drm_i915_private *i915_mch_dev;
2913 bool ironlake_set_drps(struct drm_device *dev, u8 val)
2915 struct drm_i915_private *dev_priv = dev->dev_private;
2918 assert_spin_locked(&mchdev_lock);
2920 rgvswctl = I915_READ16(MEMSWCTL);
2921 if (rgvswctl & MEMCTL_CMD_STS) {
2922 DRM_DEBUG("gpu busy, RCS change rejected\n");
2923 return false; /* still busy with another command */
2926 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2927 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2928 I915_WRITE16(MEMSWCTL, rgvswctl);
2929 POSTING_READ16(MEMSWCTL);
2931 rgvswctl |= MEMCTL_CMD_STS;
2932 I915_WRITE16(MEMSWCTL, rgvswctl);
2937 static void ironlake_enable_drps(struct drm_device *dev)
2939 struct drm_i915_private *dev_priv = dev->dev_private;
2940 u32 rgvmodectl = I915_READ(MEMMODECTL);
2941 u8 fmax, fmin, fstart, vstart;
2943 spin_lock_irq(&mchdev_lock);
2945 /* Enable temp reporting */
2946 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2947 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2949 /* 100ms RC evaluation intervals */
2950 I915_WRITE(RCUPEI, 100000);
2951 I915_WRITE(RCDNEI, 100000);
2953 /* Set max/min thresholds to 90ms and 80ms respectively */
2954 I915_WRITE(RCBMAXAVG, 90000);
2955 I915_WRITE(RCBMINAVG, 80000);
2957 I915_WRITE(MEMIHYST, 1);
2959 /* Set up min, max, and cur for interrupt handling */
2960 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2961 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2962 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2963 MEMMODE_FSTART_SHIFT;
2965 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2968 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2969 dev_priv->ips.fstart = fstart;
2971 dev_priv->ips.max_delay = fstart;
2972 dev_priv->ips.min_delay = fmin;
2973 dev_priv->ips.cur_delay = fstart;
2975 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2976 fmax, fmin, fstart);
2978 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2981 * Interrupts will be enabled in ironlake_irq_postinstall
2984 I915_WRITE(VIDSTART, vstart);
2985 POSTING_READ(VIDSTART);
2987 rgvmodectl |= MEMMODE_SWMODE_EN;
2988 I915_WRITE(MEMMODECTL, rgvmodectl);
2990 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2991 DRM_ERROR("stuck trying to change perf mode\n");
2994 ironlake_set_drps(dev, fstart);
2996 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2998 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2999 dev_priv->ips.last_count2 = I915_READ(0x112f4);
3000 getrawmonotonic(&dev_priv->ips.last_time2);
3002 spin_unlock_irq(&mchdev_lock);
3005 static void ironlake_disable_drps(struct drm_device *dev)
3007 struct drm_i915_private *dev_priv = dev->dev_private;
3010 spin_lock_irq(&mchdev_lock);
3012 rgvswctl = I915_READ16(MEMSWCTL);
3014 /* Ack interrupts, disable EFC interrupt */
3015 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
3016 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
3017 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
3018 I915_WRITE(DEIIR, DE_PCU_EVENT);
3019 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
3021 /* Go back to the starting frequency */
3022 ironlake_set_drps(dev, dev_priv->ips.fstart);
3024 rgvswctl |= MEMCTL_CMD_STS;
3025 I915_WRITE(MEMSWCTL, rgvswctl);
3028 spin_unlock_irq(&mchdev_lock);
3031 /* There's a funny hw issue where the hw returns all 0 when reading from
3032 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
3033 * ourselves, instead of doing a rmw cycle (which might result in us clearing
3034 * all limits and the gpu stuck at whatever frequency it is at atm).
3036 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
3040 /* Only set the down limit when we've reached the lowest level to avoid
3041 * getting more interrupts, otherwise leave this clear. This prevents a
3042 * race in the hw when coming out of rc6: There's a tiny window where
3043 * the hw runs at the minimal clock before selecting the desired
3044 * frequency, if the down threshold expires in that window we will not
3045 * receive a down interrupt. */
3046 limits = dev_priv->rps.max_freq_softlimit << 24;
3047 if (val <= dev_priv->rps.min_freq_softlimit)
3048 limits |= dev_priv->rps.min_freq_softlimit << 16;
3053 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
3057 new_power = dev_priv->rps.power;
3058 switch (dev_priv->rps.power) {
3060 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
3061 new_power = BETWEEN;
3065 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
3066 new_power = LOW_POWER;
3067 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
3068 new_power = HIGH_POWER;
3072 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
3073 new_power = BETWEEN;
3076 /* Max/min bins are special */
3077 if (val == dev_priv->rps.min_freq_softlimit)
3078 new_power = LOW_POWER;
3079 if (val == dev_priv->rps.max_freq_softlimit)
3080 new_power = HIGH_POWER;
3081 if (new_power == dev_priv->rps.power)
3084 /* Note the units here are not exactly 1us, but 1280ns. */
3085 switch (new_power) {
3087 /* Upclock if more than 95% busy over 16ms */
3088 I915_WRITE(GEN6_RP_UP_EI, 12500);
3089 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
3091 /* Downclock if less than 85% busy over 32ms */
3092 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3093 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
3095 I915_WRITE(GEN6_RP_CONTROL,
3096 GEN6_RP_MEDIA_TURBO |
3097 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3098 GEN6_RP_MEDIA_IS_GFX |
3100 GEN6_RP_UP_BUSY_AVG |
3101 GEN6_RP_DOWN_IDLE_AVG);
3105 /* Upclock if more than 90% busy over 13ms */
3106 I915_WRITE(GEN6_RP_UP_EI, 10250);
3107 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
3109 /* Downclock if less than 75% busy over 32ms */
3110 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3111 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
3113 I915_WRITE(GEN6_RP_CONTROL,
3114 GEN6_RP_MEDIA_TURBO |
3115 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3116 GEN6_RP_MEDIA_IS_GFX |
3118 GEN6_RP_UP_BUSY_AVG |
3119 GEN6_RP_DOWN_IDLE_AVG);
3123 /* Upclock if more than 85% busy over 10ms */
3124 I915_WRITE(GEN6_RP_UP_EI, 8000);
3125 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
3127 /* Downclock if less than 60% busy over 32ms */
3128 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
3129 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
3131 I915_WRITE(GEN6_RP_CONTROL,
3132 GEN6_RP_MEDIA_TURBO |
3133 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3134 GEN6_RP_MEDIA_IS_GFX |
3136 GEN6_RP_UP_BUSY_AVG |
3137 GEN6_RP_DOWN_IDLE_AVG);
3141 dev_priv->rps.power = new_power;
3142 dev_priv->rps.last_adj = 0;
3145 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3149 if (val > dev_priv->rps.min_freq_softlimit)
3150 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3151 if (val < dev_priv->rps.max_freq_softlimit)
3152 mask |= GEN6_PM_RP_UP_THRESHOLD;
3154 /* IVB and SNB hard hangs on looping batchbuffer
3155 * if GEN6_PM_UP_EI_EXPIRED is masked.
3157 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3158 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3160 if (IS_GEN8(dev_priv->dev))
3161 mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
3166 /* gen6_set_rps is called to update the frequency request, but should also be
3167 * called when the range (min_delay and max_delay) is modified so that we can
3168 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3169 void gen6_set_rps(struct drm_device *dev, u8 val)
3171 struct drm_i915_private *dev_priv = dev->dev_private;
3173 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3174 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3175 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3177 /* min/max delay may still have been modified so be sure to
3178 * write the limits value.
3180 if (val != dev_priv->rps.cur_freq) {
3181 gen6_set_rps_thresholds(dev_priv, val);
3183 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3184 I915_WRITE(GEN6_RPNSWREQ,
3185 HSW_FREQUENCY(val));
3187 I915_WRITE(GEN6_RPNSWREQ,
3188 GEN6_FREQUENCY(val) |
3190 GEN6_AGGRESSIVE_TURBO);
3193 /* Make sure we continue to get interrupts
3194 * until we hit the minimum or maximum frequencies.
3196 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3197 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3199 POSTING_READ(GEN6_RPNSWREQ);
3201 dev_priv->rps.cur_freq = val;
3202 trace_intel_gpu_freq_change(val * 50);
3205 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3207 * * If Gfx is Idle, then
3208 * 1. Mask Turbo interrupts
3209 * 2. Bring up Gfx clock
3210 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3211 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3212 * 5. Unmask Turbo interrupts
3214 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3217 * When we are idle. Drop to min voltage state.
3220 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
3223 /* Mask turbo interrupt so that they will not come in between */
3224 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3226 vlv_force_gfx_clock(dev_priv, true);
3228 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3230 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3231 dev_priv->rps.min_freq_softlimit);
3233 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3234 & GENFREQSTATUS) == 0, 5))
3235 DRM_ERROR("timed out waiting for Punit\n");
3237 vlv_force_gfx_clock(dev_priv, false);
3239 I915_WRITE(GEN6_PMINTRMSK,
3240 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3243 void gen6_rps_idle(struct drm_i915_private *dev_priv)
3245 struct drm_device *dev = dev_priv->dev;
3247 mutex_lock(&dev_priv->rps.hw_lock);
3248 if (dev_priv->rps.enabled) {
3249 if (IS_VALLEYVIEW(dev))
3250 vlv_set_rps_idle(dev_priv);
3252 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3253 dev_priv->rps.last_adj = 0;
3255 mutex_unlock(&dev_priv->rps.hw_lock);
3258 void gen6_rps_boost(struct drm_i915_private *dev_priv)
3260 struct drm_device *dev = dev_priv->dev;
3262 mutex_lock(&dev_priv->rps.hw_lock);
3263 if (dev_priv->rps.enabled) {
3264 if (IS_VALLEYVIEW(dev))
3265 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3267 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3268 dev_priv->rps.last_adj = 0;
3270 mutex_unlock(&dev_priv->rps.hw_lock);
3273 void valleyview_set_rps(struct drm_device *dev, u8 val)
3275 struct drm_i915_private *dev_priv = dev->dev_private;
3277 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3278 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3279 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3281 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3282 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3283 dev_priv->rps.cur_freq,
3284 vlv_gpu_freq(dev_priv, val), val);
3286 if (val != dev_priv->rps.cur_freq)
3287 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3289 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3291 dev_priv->rps.cur_freq = val;
3292 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3295 static void gen8_disable_rps_interrupts(struct drm_device *dev)
3297 struct drm_i915_private *dev_priv = dev->dev_private;
3299 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3300 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3301 ~dev_priv->pm_rps_events);
3302 /* Complete PM interrupt masking here doesn't race with the rps work
3303 * item again unmasking PM interrupts because that is using a different
3304 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3305 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3306 * gen8_enable_rps will clean up. */
3308 spin_lock_irq(&dev_priv->irq_lock);
3309 dev_priv->rps.pm_iir = 0;
3310 spin_unlock_irq(&dev_priv->irq_lock);
3312 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3315 static void gen6_disable_rps_interrupts(struct drm_device *dev)
3317 struct drm_i915_private *dev_priv = dev->dev_private;
3319 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3320 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3321 ~dev_priv->pm_rps_events);
3322 /* Complete PM interrupt masking here doesn't race with the rps work
3323 * item again unmasking PM interrupts because that is using a different
3324 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3325 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3327 spin_lock_irq(&dev_priv->irq_lock);
3328 dev_priv->rps.pm_iir = 0;
3329 spin_unlock_irq(&dev_priv->irq_lock);
3331 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3334 static void gen6_disable_rps(struct drm_device *dev)
3336 struct drm_i915_private *dev_priv = dev->dev_private;
3338 I915_WRITE(GEN6_RC_CONTROL, 0);
3339 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3341 if (IS_BROADWELL(dev))
3342 gen8_disable_rps_interrupts(dev);
3344 gen6_disable_rps_interrupts(dev);
3347 static void cherryview_disable_rps(struct drm_device *dev)
3349 struct drm_i915_private *dev_priv = dev->dev_private;
3351 I915_WRITE(GEN6_RC_CONTROL, 0);
3354 static void valleyview_disable_rps(struct drm_device *dev)
3356 struct drm_i915_private *dev_priv = dev->dev_private;
3358 I915_WRITE(GEN6_RC_CONTROL, 0);
3360 gen6_disable_rps_interrupts(dev);
3363 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3365 if (IS_VALLEYVIEW(dev)) {
3366 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
3367 mode = GEN6_RC_CTL_RC6_ENABLE;
3371 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3372 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3373 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3374 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3377 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
3379 /* No RC6 before Ironlake */
3380 if (INTEL_INFO(dev)->gen < 5)
3383 /* RC6 is only on Ironlake mobile not on desktop */
3384 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
3387 /* Respect the kernel parameter if it is set */
3388 if (enable_rc6 >= 0) {
3391 if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
3392 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
3395 mask = INTEL_RC6_ENABLE;
3397 if ((enable_rc6 & mask) != enable_rc6)
3398 DRM_INFO("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
3399 enable_rc6 & mask, enable_rc6, mask);
3401 return enable_rc6 & mask;
3404 /* Disable RC6 on Ironlake */
3405 if (INTEL_INFO(dev)->gen == 5)
3408 if (IS_IVYBRIDGE(dev))
3409 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3411 return INTEL_RC6_ENABLE;
3414 int intel_enable_rc6(const struct drm_device *dev)
3416 return i915.enable_rc6;
3419 static void gen8_enable_rps_interrupts(struct drm_device *dev)
3421 struct drm_i915_private *dev_priv = dev->dev_private;
3423 spin_lock_irq(&dev_priv->irq_lock);
3424 WARN_ON(dev_priv->rps.pm_iir);
3425 bdw_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3426 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3427 spin_unlock_irq(&dev_priv->irq_lock);
3430 static void gen6_enable_rps_interrupts(struct drm_device *dev)
3432 struct drm_i915_private *dev_priv = dev->dev_private;
3434 spin_lock_irq(&dev_priv->irq_lock);
3435 WARN_ON(dev_priv->rps.pm_iir);
3436 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3437 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3438 spin_unlock_irq(&dev_priv->irq_lock);
3441 static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
3443 /* All of these values are in units of 50MHz */
3444 dev_priv->rps.cur_freq = 0;
3445 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3446 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3447 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3448 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3449 /* XXX: only BYT has a special efficient freq */
3450 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3451 /* hw_max = RP0 until we check for overclocking */
3452 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3454 /* Preserve min/max settings in case of re-init */
3455 if (dev_priv->rps.max_freq_softlimit == 0)
3456 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3458 if (dev_priv->rps.min_freq_softlimit == 0)
3459 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3462 static void gen8_enable_rps(struct drm_device *dev)
3464 struct drm_i915_private *dev_priv = dev->dev_private;
3465 struct intel_engine_cs *ring;
3466 uint32_t rc6_mask = 0, rp_state_cap;
3469 /* 1a: Software RC state - RC0 */
3470 I915_WRITE(GEN6_RC_STATE, 0);
3472 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3473 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3474 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3476 /* 2a: Disable RC states. */
3477 I915_WRITE(GEN6_RC_CONTROL, 0);
3479 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3480 parse_rp_state_cap(dev_priv, rp_state_cap);
3482 /* 2b: Program RC6 thresholds.*/
3483 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3484 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3485 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3486 for_each_ring(ring, dev_priv, unused)
3487 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3488 I915_WRITE(GEN6_RC_SLEEP, 0);
3489 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3492 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3493 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3494 intel_print_rc6_info(dev, rc6_mask);
3495 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3496 GEN6_RC_CTL_EI_MODE(1) |
3499 /* 4 Program defaults and thresholds for RPS*/
3500 I915_WRITE(GEN6_RPNSWREQ,
3501 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3502 I915_WRITE(GEN6_RC_VIDEO_FREQ,
3503 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
3504 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3505 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3507 /* Docs recommend 900MHz, and 300 MHz respectively */
3508 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3509 dev_priv->rps.max_freq_softlimit << 24 |
3510 dev_priv->rps.min_freq_softlimit << 16);
3512 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3513 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3514 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3515 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3517 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3520 I915_WRITE(GEN6_RP_CONTROL,
3521 GEN6_RP_MEDIA_TURBO |
3522 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3523 GEN6_RP_MEDIA_IS_GFX |
3525 GEN6_RP_UP_BUSY_AVG |
3526 GEN6_RP_DOWN_IDLE_AVG);
3528 /* 6: Ring frequency + overclocking (our driver does this later */
3530 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3532 gen8_enable_rps_interrupts(dev);
3534 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3537 static void gen6_enable_rps(struct drm_device *dev)
3539 struct drm_i915_private *dev_priv = dev->dev_private;
3540 struct intel_engine_cs *ring;
3543 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3548 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3550 /* Here begins a magic sequence of register writes to enable
3551 * auto-downclocking.
3553 * Perhaps there might be some value in exposing these to
3556 I915_WRITE(GEN6_RC_STATE, 0);
3558 /* Clear the DBG now so we don't confuse earlier errors */
3559 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3560 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3561 I915_WRITE(GTFIFODBG, gtfifodbg);
3564 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3566 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3567 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3569 parse_rp_state_cap(dev_priv, rp_state_cap);
3571 /* disable the counters and set deterministic thresholds */
3572 I915_WRITE(GEN6_RC_CONTROL, 0);
3574 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3575 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3576 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3577 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3578 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3580 for_each_ring(ring, dev_priv, i)
3581 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3583 I915_WRITE(GEN6_RC_SLEEP, 0);
3584 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3585 if (IS_IVYBRIDGE(dev))
3586 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3588 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3589 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3590 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3592 /* Check if we are enabling RC6 */
3593 rc6_mode = intel_enable_rc6(dev_priv->dev);
3594 if (rc6_mode & INTEL_RC6_ENABLE)
3595 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3597 /* We don't use those on Haswell */
3598 if (!IS_HASWELL(dev)) {
3599 if (rc6_mode & INTEL_RC6p_ENABLE)
3600 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3602 if (rc6_mode & INTEL_RC6pp_ENABLE)
3603 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3606 intel_print_rc6_info(dev, rc6_mask);
3608 I915_WRITE(GEN6_RC_CONTROL,
3610 GEN6_RC_CTL_EI_MODE(1) |
3611 GEN6_RC_CTL_HW_ENABLE);
3613 /* Power down if completely idle for over 50ms */
3614 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3615 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3617 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3619 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3621 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3622 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3623 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3624 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
3625 (pcu_mbox & 0xff) * 50);
3626 dev_priv->rps.max_freq = pcu_mbox & 0xff;
3629 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3630 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3632 gen6_enable_rps_interrupts(dev);
3635 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3636 if (IS_GEN6(dev) && ret) {
3637 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3638 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3639 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3640 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3641 rc6vids &= 0xffff00;
3642 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3643 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3645 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3648 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3651 static void __gen6_update_ring_freq(struct drm_device *dev)
3653 struct drm_i915_private *dev_priv = dev->dev_private;
3655 unsigned int gpu_freq;
3656 unsigned int max_ia_freq, min_ring_freq;
3657 int scaling_factor = 180;
3658 struct cpufreq_policy *policy;
3660 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3662 policy = cpufreq_cpu_get(0);
3664 max_ia_freq = policy->cpuinfo.max_freq;
3665 cpufreq_cpu_put(policy);
3668 * Default to measured freq if none found, PCU will ensure we
3671 max_ia_freq = tsc_khz;
3674 /* Convert from kHz to MHz */
3675 max_ia_freq /= 1000;
3677 min_ring_freq = I915_READ(DCLK) & 0xf;
3678 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3679 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3682 * For each potential GPU frequency, load a ring frequency we'd like
3683 * to use for memory access. We do this by specifying the IA frequency
3684 * the PCU should use as a reference to determine the ring frequency.
3686 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
3688 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
3689 unsigned int ia_freq = 0, ring_freq = 0;
3691 if (INTEL_INFO(dev)->gen >= 8) {
3692 /* max(2 * GT, DDR). NB: GT is 50MHz units */
3693 ring_freq = max(min_ring_freq, gpu_freq);
3694 } else if (IS_HASWELL(dev)) {
3695 ring_freq = mult_frac(gpu_freq, 5, 4);
3696 ring_freq = max(min_ring_freq, ring_freq);
3697 /* leave ia_freq as the default, chosen by cpufreq */
3699 /* On older processors, there is no separate ring
3700 * clock domain, so in order to boost the bandwidth
3701 * of the ring, we need to upclock the CPU (ia_freq).
3703 * For GPU frequencies less than 750MHz,
3704 * just use the lowest ring freq.
3706 if (gpu_freq < min_freq)
3709 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3710 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3713 sandybridge_pcode_write(dev_priv,
3714 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3715 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3716 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3721 void gen6_update_ring_freq(struct drm_device *dev)
3723 struct drm_i915_private *dev_priv = dev->dev_private;
3725 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
3728 mutex_lock(&dev_priv->rps.hw_lock);
3729 __gen6_update_ring_freq(dev);
3730 mutex_unlock(&dev_priv->rps.hw_lock);
3733 int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
3737 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
3738 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
3743 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3747 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
3748 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
3753 int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
3757 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
3758 rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
3762 int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3766 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3768 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3770 rp0 = min_t(u32, rp0, 0xea);
3775 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3779 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
3780 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
3781 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
3782 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3787 int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3789 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3792 /* Check that the pctx buffer wasn't move under us. */
3793 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
3795 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3797 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
3798 dev_priv->vlv_pctx->stolen->start);
3802 /* Check that the pcbr address is not empty. */
3803 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
3805 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3807 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
3810 static void cherryview_setup_pctx(struct drm_device *dev)
3812 struct drm_i915_private *dev_priv = dev->dev_private;
3813 unsigned long pctx_paddr, paddr;
3814 struct i915_gtt *gtt = &dev_priv->gtt;
3816 int pctx_size = 32*1024;
3818 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3820 pcbr = I915_READ(VLV_PCBR);
3821 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
3822 paddr = (dev_priv->mm.stolen_base +
3823 (gtt->stolen_size - pctx_size));
3825 pctx_paddr = (paddr & (~4095));
3826 I915_WRITE(VLV_PCBR, pctx_paddr);
3830 static void valleyview_setup_pctx(struct drm_device *dev)
3832 struct drm_i915_private *dev_priv = dev->dev_private;
3833 struct drm_i915_gem_object *pctx;
3834 unsigned long pctx_paddr;
3836 int pctx_size = 24*1024;
3838 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3840 pcbr = I915_READ(VLV_PCBR);
3842 /* BIOS set it up already, grab the pre-alloc'd space */
3845 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3846 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3848 I915_GTT_OFFSET_NONE,
3854 * From the Gunit register HAS:
3855 * The Gfx driver is expected to program this register and ensure
3856 * proper allocation within Gfx stolen memory. For example, this
3857 * register should be programmed such than the PCBR range does not
3858 * overlap with other ranges, such as the frame buffer, protected
3859 * memory, or any other relevant ranges.
3861 pctx = i915_gem_object_create_stolen(dev, pctx_size);
3863 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3867 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3868 I915_WRITE(VLV_PCBR, pctx_paddr);
3871 dev_priv->vlv_pctx = pctx;
3874 static void valleyview_cleanup_pctx(struct drm_device *dev)
3876 struct drm_i915_private *dev_priv = dev->dev_private;
3878 if (WARN_ON(!dev_priv->vlv_pctx))
3881 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3882 dev_priv->vlv_pctx = NULL;
3885 static void valleyview_init_gt_powersave(struct drm_device *dev)
3887 struct drm_i915_private *dev_priv = dev->dev_private;
3889 valleyview_setup_pctx(dev);
3891 mutex_lock(&dev_priv->rps.hw_lock);
3893 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
3894 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3895 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3896 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
3897 dev_priv->rps.max_freq);
3899 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
3900 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3901 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3902 dev_priv->rps.efficient_freq);
3904 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
3905 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3906 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
3907 dev_priv->rps.min_freq);
3909 /* Preserve min/max settings in case of re-init */
3910 if (dev_priv->rps.max_freq_softlimit == 0)
3911 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3913 if (dev_priv->rps.min_freq_softlimit == 0)
3914 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3916 mutex_unlock(&dev_priv->rps.hw_lock);
3919 static void cherryview_init_gt_powersave(struct drm_device *dev)
3921 struct drm_i915_private *dev_priv = dev->dev_private;
3923 cherryview_setup_pctx(dev);
3925 mutex_lock(&dev_priv->rps.hw_lock);
3927 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
3928 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3929 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3930 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
3931 dev_priv->rps.max_freq);
3933 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
3934 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3935 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3936 dev_priv->rps.efficient_freq);
3938 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
3939 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3940 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
3941 dev_priv->rps.min_freq);
3943 /* Preserve min/max settings in case of re-init */
3944 if (dev_priv->rps.max_freq_softlimit == 0)
3945 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3947 if (dev_priv->rps.min_freq_softlimit == 0)
3948 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3950 mutex_unlock(&dev_priv->rps.hw_lock);
3953 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
3955 valleyview_cleanup_pctx(dev);
3958 static void cherryview_enable_rps(struct drm_device *dev)
3960 struct drm_i915_private *dev_priv = dev->dev_private;
3961 struct intel_engine_cs *ring;
3962 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
3965 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3967 gtfifodbg = I915_READ(GTFIFODBG);
3969 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3971 I915_WRITE(GTFIFODBG, gtfifodbg);
3974 cherryview_check_pctx(dev_priv);
3976 /* 1a & 1b: Get forcewake during program sequence. Although the driver
3977 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3978 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3980 /* 2a: Program RC6 thresholds.*/
3981 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3982 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3983 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3985 for_each_ring(ring, dev_priv, i)
3986 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3987 I915_WRITE(GEN6_RC_SLEEP, 0);
3989 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3991 /* allows RC6 residency counter to work */
3992 I915_WRITE(VLV_COUNTER_CONTROL,
3993 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
3994 VLV_MEDIA_RC6_COUNT_EN |
3995 VLV_RENDER_RC6_COUNT_EN));
3997 /* For now we assume BIOS is allocating and populating the PCBR */
3998 pcbr = I915_READ(VLV_PCBR);
4000 DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
4003 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
4004 (pcbr >> VLV_PCBR_ADDR_SHIFT))
4005 rc6_mode = GEN6_RC_CTL_EI_MODE(1);
4007 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4009 /* 4 Program defaults and thresholds for RPS*/
4010 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4011 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4012 I915_WRITE(GEN6_RP_UP_EI, 66000);
4013 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4015 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4017 /* WaDisablePwrmtrEvent:chv (pre-production hw) */
4018 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
4019 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
4022 I915_WRITE(GEN6_RP_CONTROL,
4023 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4024 GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
4026 GEN6_RP_UP_BUSY_AVG |
4027 GEN6_RP_DOWN_IDLE_AVG);
4029 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4031 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4032 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4034 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4035 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4036 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4037 dev_priv->rps.cur_freq);
4039 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4040 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4041 dev_priv->rps.efficient_freq);
4043 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4045 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4048 static void valleyview_enable_rps(struct drm_device *dev)
4050 struct drm_i915_private *dev_priv = dev->dev_private;
4051 struct intel_engine_cs *ring;
4052 u32 gtfifodbg, val, rc6_mode = 0;
4055 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4057 valleyview_check_pctx(dev_priv);
4059 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4060 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
4062 I915_WRITE(GTFIFODBG, gtfifodbg);
4065 /* If VLV, Forcewake all wells, else re-direct to regular path */
4066 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4068 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
4069 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
4070 I915_WRITE(GEN6_RP_UP_EI, 66000);
4071 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
4073 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4075 I915_WRITE(GEN6_RP_CONTROL,
4076 GEN6_RP_MEDIA_TURBO |
4077 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4078 GEN6_RP_MEDIA_IS_GFX |
4080 GEN6_RP_UP_BUSY_AVG |
4081 GEN6_RP_DOWN_IDLE_CONT);
4083 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
4084 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4085 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4087 for_each_ring(ring, dev_priv, i)
4088 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4090 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
4092 /* allows RC6 residency counter to work */
4093 I915_WRITE(VLV_COUNTER_CONTROL,
4094 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
4095 VLV_MEDIA_RC6_COUNT_EN |
4096 VLV_RENDER_RC6_COUNT_EN));
4097 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4098 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
4100 intel_print_rc6_info(dev, rc6_mode);
4102 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
4104 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4106 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
4107 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4109 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
4110 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
4111 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
4112 dev_priv->rps.cur_freq);
4114 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
4115 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
4116 dev_priv->rps.efficient_freq);
4118 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4120 gen6_enable_rps_interrupts(dev);
4122 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4125 void ironlake_teardown_rc6(struct drm_device *dev)
4127 struct drm_i915_private *dev_priv = dev->dev_private;
4129 if (dev_priv->ips.renderctx) {
4130 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
4131 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
4132 dev_priv->ips.renderctx = NULL;
4135 if (dev_priv->ips.pwrctx) {
4136 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
4137 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
4138 dev_priv->ips.pwrctx = NULL;
4142 static void ironlake_disable_rc6(struct drm_device *dev)
4144 struct drm_i915_private *dev_priv = dev->dev_private;
4146 if (I915_READ(PWRCTXA)) {
4147 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
4148 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
4149 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
4152 I915_WRITE(PWRCTXA, 0);
4153 POSTING_READ(PWRCTXA);
4155 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4156 POSTING_READ(RSTDBYCTL);
4160 static int ironlake_setup_rc6(struct drm_device *dev)
4162 struct drm_i915_private *dev_priv = dev->dev_private;
4164 if (dev_priv->ips.renderctx == NULL)
4165 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
4166 if (!dev_priv->ips.renderctx)
4169 if (dev_priv->ips.pwrctx == NULL)
4170 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
4171 if (!dev_priv->ips.pwrctx) {
4172 ironlake_teardown_rc6(dev);
4179 static void ironlake_enable_rc6(struct drm_device *dev)
4181 struct drm_i915_private *dev_priv = dev->dev_private;
4182 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
4183 bool was_interruptible;
4186 /* rc6 disabled by default due to repeated reports of hanging during
4189 if (!intel_enable_rc6(dev))
4192 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4194 ret = ironlake_setup_rc6(dev);
4198 was_interruptible = dev_priv->mm.interruptible;
4199 dev_priv->mm.interruptible = false;
4202 * GPU can automatically power down the render unit if given a page
4205 ret = intel_ring_begin(ring, 6);
4207 ironlake_teardown_rc6(dev);
4208 dev_priv->mm.interruptible = was_interruptible;
4212 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
4213 intel_ring_emit(ring, MI_SET_CONTEXT);
4214 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
4216 MI_SAVE_EXT_STATE_EN |
4217 MI_RESTORE_EXT_STATE_EN |
4218 MI_RESTORE_INHIBIT);
4219 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
4220 intel_ring_emit(ring, MI_NOOP);
4221 intel_ring_emit(ring, MI_FLUSH);
4222 intel_ring_advance(ring);
4225 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
4226 * does an implicit flush, combined with MI_FLUSH above, it should be
4227 * safe to assume that renderctx is valid
4229 ret = intel_ring_idle(ring);
4230 dev_priv->mm.interruptible = was_interruptible;
4232 DRM_ERROR("failed to enable ironlake power savings\n");
4233 ironlake_teardown_rc6(dev);
4237 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
4238 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
4240 intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
4243 static unsigned long intel_pxfreq(u32 vidfreq)
4246 int div = (vidfreq & 0x3f0000) >> 16;
4247 int post = (vidfreq & 0x3000) >> 12;
4248 int pre = (vidfreq & 0x7);
4253 freq = ((div * 133333) / ((1<<post) * pre));
4258 static const struct cparams {
4264 { 1, 1333, 301, 28664 },
4265 { 1, 1066, 294, 24460 },
4266 { 1, 800, 294, 25192 },
4267 { 0, 1333, 276, 27605 },
4268 { 0, 1066, 276, 27605 },
4269 { 0, 800, 231, 23784 },
4272 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
4274 u64 total_count, diff, ret;
4275 u32 count1, count2, count3, m = 0, c = 0;
4276 unsigned long now = jiffies_to_msecs(jiffies), diff1;
4279 assert_spin_locked(&mchdev_lock);
4281 diff1 = now - dev_priv->ips.last_time1;
4283 /* Prevent division-by-zero if we are asking too fast.
4284 * Also, we don't get interesting results if we are polling
4285 * faster than once in 10ms, so just return the saved value
4289 return dev_priv->ips.chipset_power;
4291 count1 = I915_READ(DMIEC);
4292 count2 = I915_READ(DDREC);
4293 count3 = I915_READ(CSIEC);
4295 total_count = count1 + count2 + count3;
4297 /* FIXME: handle per-counter overflow */
4298 if (total_count < dev_priv->ips.last_count1) {
4299 diff = ~0UL - dev_priv->ips.last_count1;
4300 diff += total_count;
4302 diff = total_count - dev_priv->ips.last_count1;
4305 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
4306 if (cparams[i].i == dev_priv->ips.c_m &&
4307 cparams[i].t == dev_priv->ips.r_t) {
4314 diff = div_u64(diff, diff1);
4315 ret = ((m * diff) + c);
4316 ret = div_u64(ret, 10);
4318 dev_priv->ips.last_count1 = total_count;
4319 dev_priv->ips.last_time1 = now;
4321 dev_priv->ips.chipset_power = ret;
4326 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
4328 struct drm_device *dev = dev_priv->dev;
4331 if (INTEL_INFO(dev)->gen != 5)
4334 spin_lock_irq(&mchdev_lock);
4336 val = __i915_chipset_val(dev_priv);
4338 spin_unlock_irq(&mchdev_lock);
4343 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
4345 unsigned long m, x, b;
4348 tsfs = I915_READ(TSFS);
4350 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
4351 x = I915_READ8(TR1);
4353 b = tsfs & TSFS_INTR_MASK;
4355 return ((m * x) / 127) - b;
4358 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
4360 struct drm_device *dev = dev_priv->dev;
4361 static const struct v_table {
4362 u16 vd; /* in .1 mil */
4363 u16 vm; /* in .1 mil */
4494 if (INTEL_INFO(dev)->is_mobile)
4495 return v_table[pxvid].vm;
4497 return v_table[pxvid].vd;
4500 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4502 struct timespec now, diff1;
4504 unsigned long diffms;
4507 assert_spin_locked(&mchdev_lock);
4509 getrawmonotonic(&now);
4510 diff1 = timespec_sub(now, dev_priv->ips.last_time2);
4512 /* Don't divide by 0 */
4513 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
4517 count = I915_READ(GFXEC);
4519 if (count < dev_priv->ips.last_count2) {
4520 diff = ~0UL - dev_priv->ips.last_count2;
4523 diff = count - dev_priv->ips.last_count2;
4526 dev_priv->ips.last_count2 = count;
4527 dev_priv->ips.last_time2 = now;
4529 /* More magic constants... */
4531 diff = div_u64(diff, diffms * 10);
4532 dev_priv->ips.gfx_power = diff;
4535 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4537 struct drm_device *dev = dev_priv->dev;
4539 if (INTEL_INFO(dev)->gen != 5)
4542 spin_lock_irq(&mchdev_lock);
4544 __i915_update_gfx_val(dev_priv);
4546 spin_unlock_irq(&mchdev_lock);
4549 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4551 unsigned long t, corr, state1, corr2, state2;
4554 assert_spin_locked(&mchdev_lock);
4556 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
4557 pxvid = (pxvid >> 24) & 0x7f;
4558 ext_v = pvid_to_extvid(dev_priv, pxvid);
4562 t = i915_mch_val(dev_priv);
4564 /* Revel in the empirically derived constants */
4566 /* Correction factor in 1/100000 units */
4568 corr = ((t * 2349) + 135940);
4570 corr = ((t * 964) + 29317);
4572 corr = ((t * 301) + 1004);
4574 corr = corr * ((150142 * state1) / 10000 - 78642);
4576 corr2 = (corr * dev_priv->ips.corr);
4578 state2 = (corr2 * state1) / 10000;
4579 state2 /= 100; /* convert to mW */
4581 __i915_update_gfx_val(dev_priv);
4583 return dev_priv->ips.gfx_power + state2;
4586 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4588 struct drm_device *dev = dev_priv->dev;
4591 if (INTEL_INFO(dev)->gen != 5)
4594 spin_lock_irq(&mchdev_lock);
4596 val = __i915_gfx_val(dev_priv);
4598 spin_unlock_irq(&mchdev_lock);
4604 * i915_read_mch_val - return value for IPS use
4606 * Calculate and return a value for the IPS driver to use when deciding whether
4607 * we have thermal and power headroom to increase CPU or GPU power budget.
4609 unsigned long i915_read_mch_val(void)
4611 struct drm_i915_private *dev_priv;
4612 unsigned long chipset_val, graphics_val, ret = 0;
4614 spin_lock_irq(&mchdev_lock);
4617 dev_priv = i915_mch_dev;
4619 chipset_val = __i915_chipset_val(dev_priv);
4620 graphics_val = __i915_gfx_val(dev_priv);
4622 ret = chipset_val + graphics_val;
4625 spin_unlock_irq(&mchdev_lock);
4629 EXPORT_SYMBOL_GPL(i915_read_mch_val);
4632 * i915_gpu_raise - raise GPU frequency limit
4634 * Raise the limit; IPS indicates we have thermal headroom.
4636 bool i915_gpu_raise(void)
4638 struct drm_i915_private *dev_priv;
4641 spin_lock_irq(&mchdev_lock);
4642 if (!i915_mch_dev) {
4646 dev_priv = i915_mch_dev;
4648 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4649 dev_priv->ips.max_delay--;
4652 spin_unlock_irq(&mchdev_lock);
4656 EXPORT_SYMBOL_GPL(i915_gpu_raise);
4659 * i915_gpu_lower - lower GPU frequency limit
4661 * IPS indicates we're close to a thermal limit, so throttle back the GPU
4662 * frequency maximum.
4664 bool i915_gpu_lower(void)
4666 struct drm_i915_private *dev_priv;
4669 spin_lock_irq(&mchdev_lock);
4670 if (!i915_mch_dev) {
4674 dev_priv = i915_mch_dev;
4676 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4677 dev_priv->ips.max_delay++;
4680 spin_unlock_irq(&mchdev_lock);
4684 EXPORT_SYMBOL_GPL(i915_gpu_lower);
4687 * i915_gpu_busy - indicate GPU business to IPS
4689 * Tell the IPS driver whether or not the GPU is busy.
4691 bool i915_gpu_busy(void)
4693 struct drm_i915_private *dev_priv;
4694 struct intel_engine_cs *ring;
4698 spin_lock_irq(&mchdev_lock);
4701 dev_priv = i915_mch_dev;
4703 for_each_ring(ring, dev_priv, i)
4704 ret |= !list_empty(&ring->request_list);
4707 spin_unlock_irq(&mchdev_lock);
4711 EXPORT_SYMBOL_GPL(i915_gpu_busy);
4714 * i915_gpu_turbo_disable - disable graphics turbo
4716 * Disable graphics turbo by resetting the max frequency and setting the
4717 * current frequency to the default.
4719 bool i915_gpu_turbo_disable(void)
4721 struct drm_i915_private *dev_priv;
4724 spin_lock_irq(&mchdev_lock);
4725 if (!i915_mch_dev) {
4729 dev_priv = i915_mch_dev;
4731 dev_priv->ips.max_delay = dev_priv->ips.fstart;
4733 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
4737 spin_unlock_irq(&mchdev_lock);
4741 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
4744 * Tells the intel_ips driver that the i915 driver is now loaded, if
4745 * IPS got loaded first.
4747 * This awkward dance is so that neither module has to depend on the
4748 * other in order for IPS to do the appropriate communication of
4749 * GPU turbo limits to i915.
4752 ips_ping_for_i915_load(void)
4756 link = symbol_get(ips_link_to_i915_driver);
4759 symbol_put(ips_link_to_i915_driver);
4763 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
4765 /* We only register the i915 ips part with intel-ips once everything is
4766 * set up, to avoid intel-ips sneaking in and reading bogus values. */
4767 spin_lock_irq(&mchdev_lock);
4768 i915_mch_dev = dev_priv;
4769 spin_unlock_irq(&mchdev_lock);
4771 ips_ping_for_i915_load();
4774 void intel_gpu_ips_teardown(void)
4776 spin_lock_irq(&mchdev_lock);
4777 i915_mch_dev = NULL;
4778 spin_unlock_irq(&mchdev_lock);
4781 static void intel_init_emon(struct drm_device *dev)
4783 struct drm_i915_private *dev_priv = dev->dev_private;
4788 /* Disable to program */
4792 /* Program energy weights for various events */
4793 I915_WRITE(SDEW, 0x15040d00);
4794 I915_WRITE(CSIEW0, 0x007f0000);
4795 I915_WRITE(CSIEW1, 0x1e220004);
4796 I915_WRITE(CSIEW2, 0x04000004);
4798 for (i = 0; i < 5; i++)
4799 I915_WRITE(PEW + (i * 4), 0);
4800 for (i = 0; i < 3; i++)
4801 I915_WRITE(DEW + (i * 4), 0);
4803 /* Program P-state weights to account for frequency power adjustment */
4804 for (i = 0; i < 16; i++) {
4805 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
4806 unsigned long freq = intel_pxfreq(pxvidfreq);
4807 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
4812 val *= (freq / 1000);
4814 val /= (127*127*900);
4816 DRM_ERROR("bad pxval: %ld\n", val);
4819 /* Render standby states get 0 weight */
4823 for (i = 0; i < 4; i++) {
4824 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
4825 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
4826 I915_WRITE(PXW + (i * 4), val);
4829 /* Adjust magic regs to magic values (more experimental results) */
4830 I915_WRITE(OGW0, 0);
4831 I915_WRITE(OGW1, 0);
4832 I915_WRITE(EG0, 0x00007f00);
4833 I915_WRITE(EG1, 0x0000000e);
4834 I915_WRITE(EG2, 0x000e0000);
4835 I915_WRITE(EG3, 0x68000300);
4836 I915_WRITE(EG4, 0x42000000);
4837 I915_WRITE(EG5, 0x00140031);
4841 for (i = 0; i < 8; i++)
4842 I915_WRITE(PXWL + (i * 4), 0);
4844 /* Enable PMON + select events */
4845 I915_WRITE(ECR, 0x80000019);
4847 lcfuse = I915_READ(LCFUSE02);
4849 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4852 void intel_init_gt_powersave(struct drm_device *dev)
4854 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
4856 if (IS_CHERRYVIEW(dev))
4857 cherryview_init_gt_powersave(dev);
4858 else if (IS_VALLEYVIEW(dev))
4859 valleyview_init_gt_powersave(dev);
4862 void intel_cleanup_gt_powersave(struct drm_device *dev)
4864 if (IS_CHERRYVIEW(dev))
4866 else if (IS_VALLEYVIEW(dev))
4867 valleyview_cleanup_gt_powersave(dev);
4871 * intel_suspend_gt_powersave - suspend PM work and helper threads
4874 * We don't want to disable RC6 or other features here, we just want
4875 * to make sure any work we've queued has finished and won't bother
4876 * us while we're suspended.
4878 void intel_suspend_gt_powersave(struct drm_device *dev)
4880 struct drm_i915_private *dev_priv = dev->dev_private;
4882 /* Interrupts should be disabled already to avoid re-arming. */
4883 WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled);
4885 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
4887 cancel_work_sync(&dev_priv->rps.work);
4890 void intel_disable_gt_powersave(struct drm_device *dev)
4892 struct drm_i915_private *dev_priv = dev->dev_private;
4894 /* Interrupts should be disabled already to avoid re-arming. */
4895 WARN_ON(dev->irq_enabled && !dev_priv->pm.irqs_disabled);
4897 if (IS_IRONLAKE_M(dev)) {
4898 ironlake_disable_drps(dev);
4899 ironlake_disable_rc6(dev);
4900 } else if (INTEL_INFO(dev)->gen >= 6) {
4901 intel_suspend_gt_powersave(dev);
4903 mutex_lock(&dev_priv->rps.hw_lock);
4904 if (IS_CHERRYVIEW(dev))
4905 cherryview_disable_rps(dev);
4906 else if (IS_VALLEYVIEW(dev))
4907 valleyview_disable_rps(dev);
4909 gen6_disable_rps(dev);
4910 dev_priv->rps.enabled = false;
4911 mutex_unlock(&dev_priv->rps.hw_lock);
4915 static void intel_gen6_powersave_work(struct work_struct *work)
4917 struct drm_i915_private *dev_priv =
4918 container_of(work, struct drm_i915_private,
4919 rps.delayed_resume_work.work);
4920 struct drm_device *dev = dev_priv->dev;
4922 mutex_lock(&dev_priv->rps.hw_lock);
4924 if (IS_CHERRYVIEW(dev)) {
4925 cherryview_enable_rps(dev);
4926 } else if (IS_VALLEYVIEW(dev)) {
4927 valleyview_enable_rps(dev);
4928 } else if (IS_BROADWELL(dev)) {
4929 gen8_enable_rps(dev);
4930 __gen6_update_ring_freq(dev);
4932 gen6_enable_rps(dev);
4933 __gen6_update_ring_freq(dev);
4935 dev_priv->rps.enabled = true;
4936 mutex_unlock(&dev_priv->rps.hw_lock);
4938 intel_runtime_pm_put(dev_priv);
4941 void intel_enable_gt_powersave(struct drm_device *dev)
4943 struct drm_i915_private *dev_priv = dev->dev_private;
4945 if (IS_IRONLAKE_M(dev)) {
4946 mutex_lock(&dev->struct_mutex);
4947 ironlake_enable_drps(dev);
4948 ironlake_enable_rc6(dev);
4949 intel_init_emon(dev);
4950 mutex_unlock(&dev->struct_mutex);
4951 } else if (INTEL_INFO(dev)->gen >= 6) {
4953 * PCU communication is slow and this doesn't need to be
4954 * done at any specific time, so do this out of our fast path
4955 * to make resume and init faster.
4957 * We depend on the HW RC6 power context save/restore
4958 * mechanism when entering D3 through runtime PM suspend. So
4959 * disable RPM until RPS/RC6 is properly setup. We can only
4960 * get here via the driver load/system resume/runtime resume
4961 * paths, so the _noresume version is enough (and in case of
4962 * runtime resume it's necessary).
4964 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
4965 round_jiffies_up_relative(HZ)))
4966 intel_runtime_pm_get_noresume(dev_priv);
4970 void intel_reset_gt_powersave(struct drm_device *dev)
4972 struct drm_i915_private *dev_priv = dev->dev_private;
4974 dev_priv->rps.enabled = false;
4975 intel_enable_gt_powersave(dev);
4978 static void ibx_init_clock_gating(struct drm_device *dev)
4980 struct drm_i915_private *dev_priv = dev->dev_private;
4983 * On Ibex Peak and Cougar Point, we need to disable clock
4984 * gating for the panel power sequencer or it will fail to
4985 * start up when no ports are active.
4987 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4990 static void g4x_disable_trickle_feed(struct drm_device *dev)
4992 struct drm_i915_private *dev_priv = dev->dev_private;
4995 for_each_pipe(pipe) {
4996 I915_WRITE(DSPCNTR(pipe),
4997 I915_READ(DSPCNTR(pipe)) |
4998 DISPPLANE_TRICKLE_FEED_DISABLE);
4999 intel_flush_primary_plane(dev_priv, pipe);
5003 static void ilk_init_lp_watermarks(struct drm_device *dev)
5005 struct drm_i915_private *dev_priv = dev->dev_private;
5007 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
5008 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
5009 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
5012 * Don't touch WM1S_LP_EN here.
5013 * Doing so could cause underruns.
5017 static void ironlake_init_clock_gating(struct drm_device *dev)
5019 struct drm_i915_private *dev_priv = dev->dev_private;
5020 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5024 * WaFbcDisableDpfcClockGating:ilk
5026 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
5027 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
5028 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
5030 I915_WRITE(PCH_3DCGDIS0,
5031 MARIUNIT_CLOCK_GATE_DISABLE |
5032 SVSMUNIT_CLOCK_GATE_DISABLE);
5033 I915_WRITE(PCH_3DCGDIS1,
5034 VFMUNIT_CLOCK_GATE_DISABLE);
5037 * According to the spec the following bits should be set in
5038 * order to enable memory self-refresh
5039 * The bit 22/21 of 0x42004
5040 * The bit 5 of 0x42020
5041 * The bit 15 of 0x45000
5043 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5044 (I915_READ(ILK_DISPLAY_CHICKEN2) |
5045 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
5046 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
5047 I915_WRITE(DISP_ARB_CTL,
5048 (I915_READ(DISP_ARB_CTL) |
5051 ilk_init_lp_watermarks(dev);
5054 * Based on the document from hardware guys the following bits
5055 * should be set unconditionally in order to enable FBC.
5056 * The bit 22 of 0x42000
5057 * The bit 22 of 0x42004
5058 * The bit 7,8,9 of 0x42020.
5060 if (IS_IRONLAKE_M(dev)) {
5061 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
5062 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5063 I915_READ(ILK_DISPLAY_CHICKEN1) |
5065 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5066 I915_READ(ILK_DISPLAY_CHICKEN2) |
5070 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5072 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5073 I915_READ(ILK_DISPLAY_CHICKEN2) |
5074 ILK_ELPIN_409_SELECT);
5075 I915_WRITE(_3D_CHICKEN2,
5076 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
5077 _3D_CHICKEN2_WM_READ_PIPELINED);
5079 /* WaDisableRenderCachePipelinedFlush:ilk */
5080 I915_WRITE(CACHE_MODE_0,
5081 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5083 /* WaDisable_RenderCache_OperationalFlush:ilk */
5084 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5086 g4x_disable_trickle_feed(dev);
5088 ibx_init_clock_gating(dev);
5091 static void cpt_init_clock_gating(struct drm_device *dev)
5093 struct drm_i915_private *dev_priv = dev->dev_private;
5098 * On Ibex Peak and Cougar Point, we need to disable clock
5099 * gating for the panel power sequencer or it will fail to
5100 * start up when no ports are active.
5102 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
5103 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
5104 PCH_CPUNIT_CLOCK_GATE_DISABLE);
5105 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
5106 DPLS_EDP_PPS_FIX_DIS);
5107 /* The below fixes the weird display corruption, a few pixels shifted
5108 * downward, on (only) LVDS of some HP laptops with IVY.
5110 for_each_pipe(pipe) {
5111 val = I915_READ(TRANS_CHICKEN2(pipe));
5112 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
5113 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5114 if (dev_priv->vbt.fdi_rx_polarity_inverted)
5115 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
5116 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
5117 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
5118 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
5119 I915_WRITE(TRANS_CHICKEN2(pipe), val);
5121 /* WADP0ClockGatingDisable */
5122 for_each_pipe(pipe) {
5123 I915_WRITE(TRANS_CHICKEN1(pipe),
5124 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5128 static void gen6_check_mch_setup(struct drm_device *dev)
5130 struct drm_i915_private *dev_priv = dev->dev_private;
5133 tmp = I915_READ(MCH_SSKPD);
5134 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
5135 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
5136 DRM_INFO("This can cause pipe underruns and display issues.\n");
5137 DRM_INFO("Please upgrade your BIOS to fix this.\n");
5141 static void gen6_init_clock_gating(struct drm_device *dev)
5143 struct drm_i915_private *dev_priv = dev->dev_private;
5144 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
5146 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
5148 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5149 I915_READ(ILK_DISPLAY_CHICKEN2) |
5150 ILK_ELPIN_409_SELECT);
5152 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
5153 I915_WRITE(_3D_CHICKEN,
5154 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
5156 /* WaSetupGtModeTdRowDispatch:snb */
5157 if (IS_SNB_GT1(dev))
5158 I915_WRITE(GEN6_GT_MODE,
5159 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
5161 /* WaDisable_RenderCache_OperationalFlush:snb */
5162 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5165 * BSpec recoomends 8x4 when MSAA is used,
5166 * however in practice 16x4 seems fastest.
5168 * Note that PS/WM thread counts depend on the WIZ hashing
5169 * disable bit, which we don't touch here, but it's good
5170 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5172 I915_WRITE(GEN6_GT_MODE,
5173 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5175 ilk_init_lp_watermarks(dev);
5177 I915_WRITE(CACHE_MODE_0,
5178 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
5180 I915_WRITE(GEN6_UCGCTL1,
5181 I915_READ(GEN6_UCGCTL1) |
5182 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
5183 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5185 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
5186 * gating disable must be set. Failure to set it results in
5187 * flickering pixels due to Z write ordering failures after
5188 * some amount of runtime in the Mesa "fire" demo, and Unigine
5189 * Sanctuary and Tropics, and apparently anything else with
5190 * alpha test or pixel discard.
5192 * According to the spec, bit 11 (RCCUNIT) must also be set,
5193 * but we didn't debug actual testcases to find it out.
5195 * WaDisableRCCUnitClockGating:snb
5196 * WaDisableRCPBUnitClockGating:snb
5198 I915_WRITE(GEN6_UCGCTL2,
5199 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
5200 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
5202 /* WaStripsFansDisableFastClipPerformanceFix:snb */
5203 I915_WRITE(_3D_CHICKEN3,
5204 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
5208 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
5209 * 3DSTATE_SF number of SF output attributes is more than 16."
5211 I915_WRITE(_3D_CHICKEN3,
5212 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
5215 * According to the spec the following bits should be
5216 * set in order to enable memory self-refresh and fbc:
5217 * The bit21 and bit22 of 0x42000
5218 * The bit21 and bit22 of 0x42004
5219 * The bit5 and bit7 of 0x42020
5220 * The bit14 of 0x70180
5221 * The bit14 of 0x71180
5223 * WaFbcAsynchFlipDisableFbcQueue:snb
5225 I915_WRITE(ILK_DISPLAY_CHICKEN1,
5226 I915_READ(ILK_DISPLAY_CHICKEN1) |
5227 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
5228 I915_WRITE(ILK_DISPLAY_CHICKEN2,
5229 I915_READ(ILK_DISPLAY_CHICKEN2) |
5230 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
5231 I915_WRITE(ILK_DSPCLK_GATE_D,
5232 I915_READ(ILK_DSPCLK_GATE_D) |
5233 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
5234 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
5236 g4x_disable_trickle_feed(dev);
5238 cpt_init_clock_gating(dev);
5240 gen6_check_mch_setup(dev);
5243 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
5245 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
5248 * WaVSThreadDispatchOverride:ivb,vlv
5250 * This actually overrides the dispatch
5251 * mode for all thread types.
5253 reg &= ~GEN7_FF_SCHED_MASK;
5254 reg |= GEN7_FF_TS_SCHED_HW;
5255 reg |= GEN7_FF_VS_SCHED_HW;
5256 reg |= GEN7_FF_DS_SCHED_HW;
5258 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
5261 static void lpt_init_clock_gating(struct drm_device *dev)
5263 struct drm_i915_private *dev_priv = dev->dev_private;
5266 * TODO: this bit should only be enabled when really needed, then
5267 * disabled when not needed anymore in order to save power.
5269 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
5270 I915_WRITE(SOUTH_DSPCLK_GATE_D,
5271 I915_READ(SOUTH_DSPCLK_GATE_D) |
5272 PCH_LP_PARTITION_LEVEL_DISABLE);
5274 /* WADPOClockGatingDisable:hsw */
5275 I915_WRITE(_TRANSA_CHICKEN1,
5276 I915_READ(_TRANSA_CHICKEN1) |
5277 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
5280 static void lpt_suspend_hw(struct drm_device *dev)
5282 struct drm_i915_private *dev_priv = dev->dev_private;
5284 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
5285 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
5287 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
5288 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
5292 static void gen8_init_clock_gating(struct drm_device *dev)
5294 struct drm_i915_private *dev_priv = dev->dev_private;
5297 I915_WRITE(WM3_LP_ILK, 0);
5298 I915_WRITE(WM2_LP_ILK, 0);
5299 I915_WRITE(WM1_LP_ILK, 0);
5301 /* FIXME(BDW): Check all the w/a, some might only apply to
5302 * pre-production hw. */
5304 /* WaDisablePartialInstShootdown:bdw */
5305 I915_WRITE(GEN8_ROW_CHICKEN,
5306 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
5308 /* WaDisableThreadStallDopClockGating:bdw */
5309 /* FIXME: Unclear whether we really need this on production bdw. */
5310 I915_WRITE(GEN8_ROW_CHICKEN,
5311 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
5314 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
5315 * pre-production hardware
5317 I915_WRITE(HALF_SLICE_CHICKEN3,
5318 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
5319 I915_WRITE(HALF_SLICE_CHICKEN3,
5320 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5321 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5323 I915_WRITE(_3D_CHICKEN3,
5324 _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
5326 I915_WRITE(COMMON_SLICE_CHICKEN2,
5327 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
5329 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5330 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
5332 /* WaDisableDopClockGating:bdw May not be needed for production */
5333 I915_WRITE(GEN7_ROW_CHICKEN2,
5334 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5336 /* WaSwitchSolVfFArbitrationPriority:bdw */
5337 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5339 /* WaPsrDPAMaskVBlankInSRD:bdw */
5340 I915_WRITE(CHICKEN_PAR1_1,
5341 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
5343 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
5344 for_each_pipe(pipe) {
5345 I915_WRITE(CHICKEN_PIPESL_1(pipe),
5346 I915_READ(CHICKEN_PIPESL_1(pipe)) |
5347 BDW_DPRS_MASK_VBLANK_SRD);
5350 /* Use Force Non-Coherent whenever executing a 3D context. This is a
5351 * workaround for for a possible hang in the unlikely event a TLB
5352 * invalidation occurs during a PSD flush.
5354 I915_WRITE(HDC_CHICKEN0,
5355 I915_READ(HDC_CHICKEN0) |
5356 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
5358 /* WaVSRefCountFullforceMissDisable:bdw */
5359 /* WaDSRefCountFullforceMissDisable:bdw */
5360 I915_WRITE(GEN7_FF_THREAD_MODE,
5361 I915_READ(GEN7_FF_THREAD_MODE) &
5362 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5365 * BSpec recommends 8x4 when MSAA is used,
5366 * however in practice 16x4 seems fastest.
5368 * Note that PS/WM thread counts depend on the WIZ hashing
5369 * disable bit, which we don't touch here, but it's good
5370 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5372 I915_WRITE(GEN7_GT_MODE,
5373 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5375 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5376 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5378 /* WaDisableSDEUnitClockGating:bdw */
5379 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5380 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5382 /* Wa4x4STCOptimizationDisable:bdw */
5383 I915_WRITE(CACHE_MODE_1,
5384 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
5387 static void haswell_init_clock_gating(struct drm_device *dev)
5389 struct drm_i915_private *dev_priv = dev->dev_private;
5391 ilk_init_lp_watermarks(dev);
5393 /* L3 caching of data atomics doesn't work -- disable it. */
5394 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
5395 I915_WRITE(HSW_ROW_CHICKEN3,
5396 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
5398 /* This is required by WaCatErrorRejectionIssue:hsw */
5399 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5400 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5401 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5403 /* WaVSRefCountFullforceMissDisable:hsw */
5404 I915_WRITE(GEN7_FF_THREAD_MODE,
5405 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
5407 /* WaDisable_RenderCache_OperationalFlush:hsw */
5408 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5410 /* enable HiZ Raw Stall Optimization */
5411 I915_WRITE(CACHE_MODE_0_GEN7,
5412 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5414 /* WaDisable4x2SubspanOptimization:hsw */
5415 I915_WRITE(CACHE_MODE_1,
5416 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5419 * BSpec recommends 8x4 when MSAA is used,
5420 * however in practice 16x4 seems fastest.
5422 * Note that PS/WM thread counts depend on the WIZ hashing
5423 * disable bit, which we don't touch here, but it's good
5424 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5426 I915_WRITE(GEN7_GT_MODE,
5427 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5429 /* WaSwitchSolVfFArbitrationPriority:hsw */
5430 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5432 /* WaRsPkgCStateDisplayPMReq:hsw */
5433 I915_WRITE(CHICKEN_PAR1_1,
5434 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
5436 lpt_init_clock_gating(dev);
5439 static void ivybridge_init_clock_gating(struct drm_device *dev)
5441 struct drm_i915_private *dev_priv = dev->dev_private;
5444 ilk_init_lp_watermarks(dev);
5446 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
5448 /* WaDisableEarlyCull:ivb */
5449 I915_WRITE(_3D_CHICKEN3,
5450 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5452 /* WaDisableBackToBackFlipFix:ivb */
5453 I915_WRITE(IVB_CHICKEN3,
5454 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5455 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5457 /* WaDisablePSDDualDispatchEnable:ivb */
5458 if (IS_IVB_GT1(dev))
5459 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5460 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5462 /* WaDisable_RenderCache_OperationalFlush:ivb */
5463 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5465 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
5466 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
5467 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
5469 /* WaApplyL3ControlAndL3ChickenMode:ivb */
5470 I915_WRITE(GEN7_L3CNTLREG1,
5471 GEN7_WA_FOR_GEN7_L3_CONTROL);
5472 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
5473 GEN7_WA_L3_CHICKEN_MODE);
5474 if (IS_IVB_GT1(dev))
5475 I915_WRITE(GEN7_ROW_CHICKEN2,
5476 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5478 /* must write both registers */
5479 I915_WRITE(GEN7_ROW_CHICKEN2,
5480 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5481 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5482 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5485 /* WaForceL3Serialization:ivb */
5486 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5487 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5490 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5491 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
5493 I915_WRITE(GEN6_UCGCTL2,
5494 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5496 /* This is required by WaCatErrorRejectionIssue:ivb */
5497 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5498 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5499 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5501 g4x_disable_trickle_feed(dev);
5503 gen7_setup_fixed_func_scheduler(dev_priv);
5505 if (0) { /* causes HiZ corruption on ivb:gt1 */
5506 /* enable HiZ Raw Stall Optimization */
5507 I915_WRITE(CACHE_MODE_0_GEN7,
5508 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5511 /* WaDisable4x2SubspanOptimization:ivb */
5512 I915_WRITE(CACHE_MODE_1,
5513 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5516 * BSpec recommends 8x4 when MSAA is used,
5517 * however in practice 16x4 seems fastest.
5519 * Note that PS/WM thread counts depend on the WIZ hashing
5520 * disable bit, which we don't touch here, but it's good
5521 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5523 I915_WRITE(GEN7_GT_MODE,
5524 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5526 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5527 snpcr &= ~GEN6_MBC_SNPCR_MASK;
5528 snpcr |= GEN6_MBC_SNPCR_MED;
5529 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5531 if (!HAS_PCH_NOP(dev))
5532 cpt_init_clock_gating(dev);
5534 gen6_check_mch_setup(dev);
5537 static void valleyview_init_clock_gating(struct drm_device *dev)
5539 struct drm_i915_private *dev_priv = dev->dev_private;
5542 mutex_lock(&dev_priv->rps.hw_lock);
5543 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5544 mutex_unlock(&dev_priv->rps.hw_lock);
5545 switch ((val >> 6) & 3) {
5548 dev_priv->mem_freq = 800;
5551 dev_priv->mem_freq = 1066;
5554 dev_priv->mem_freq = 1333;
5557 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5559 dev_priv->vlv_cdclk_freq = valleyview_cur_cdclk(dev_priv);
5560 DRM_DEBUG_DRIVER("Current CD clock rate: %d MHz",
5561 dev_priv->vlv_cdclk_freq);
5563 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5565 /* WaDisableEarlyCull:vlv */
5566 I915_WRITE(_3D_CHICKEN3,
5567 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5569 /* WaDisableBackToBackFlipFix:vlv */
5570 I915_WRITE(IVB_CHICKEN3,
5571 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5572 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5574 /* WaPsdDispatchEnable:vlv */
5575 /* WaDisablePSDDualDispatchEnable:vlv */
5576 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5577 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5578 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5580 /* WaDisable_RenderCache_OperationalFlush:vlv */
5581 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5583 /* WaForceL3Serialization:vlv */
5584 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5585 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5587 /* WaDisableDopClockGating:vlv */
5588 I915_WRITE(GEN7_ROW_CHICKEN2,
5589 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5591 /* This is required by WaCatErrorRejectionIssue:vlv */
5592 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5593 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5594 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5596 gen7_setup_fixed_func_scheduler(dev_priv);
5599 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5600 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
5602 I915_WRITE(GEN6_UCGCTL2,
5603 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5605 /* WaDisableL3Bank2xClockGate:vlv
5606 * Disabling L3 clock gating- MMIO 940c[25] = 1
5607 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
5608 I915_WRITE(GEN7_UCGCTL4,
5609 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5611 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5614 * BSpec says this must be set, even though
5615 * WaDisable4x2SubspanOptimization isn't listed for VLV.
5617 I915_WRITE(CACHE_MODE_1,
5618 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5621 * WaIncreaseL3CreditsForVLVB0:vlv
5622 * This is the hardware default actually.
5624 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
5627 * WaDisableVLVClockGating_VBIIssue:vlv
5628 * Disable clock gating on th GCFG unit to prevent a delay
5629 * in the reporting of vblank events.
5631 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
5634 static void cherryview_init_clock_gating(struct drm_device *dev)
5636 struct drm_i915_private *dev_priv = dev->dev_private;
5638 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5640 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5642 /* WaDisablePartialInstShootdown:chv */
5643 I915_WRITE(GEN8_ROW_CHICKEN,
5644 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
5646 /* WaDisableThreadStallDopClockGating:chv */
5647 I915_WRITE(GEN8_ROW_CHICKEN,
5648 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
5650 /* WaVSRefCountFullforceMissDisable:chv */
5651 /* WaDSRefCountFullforceMissDisable:chv */
5652 I915_WRITE(GEN7_FF_THREAD_MODE,
5653 I915_READ(GEN7_FF_THREAD_MODE) &
5654 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
5656 /* WaDisableSemaphoreAndSyncFlipWait:chv */
5657 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5658 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
5660 /* WaDisableCSUnitClockGating:chv */
5661 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5662 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
5664 /* WaDisableSDEUnitClockGating:chv */
5665 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5666 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5668 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */
5669 I915_WRITE(HALF_SLICE_CHICKEN3,
5670 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
5672 /* WaDisableGunitClockGating:chv (pre-production hw) */
5673 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
5676 /* WaDisableFfDopClockGating:chv (pre-production hw) */
5677 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5678 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
5680 /* WaDisableDopClockGating:chv (pre-production hw) */
5681 I915_WRITE(GEN7_ROW_CHICKEN2,
5682 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5683 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5684 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
5687 static void g4x_init_clock_gating(struct drm_device *dev)
5689 struct drm_i915_private *dev_priv = dev->dev_private;
5690 uint32_t dspclk_gate;
5692 I915_WRITE(RENCLK_GATE_D1, 0);
5693 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
5694 GS_UNIT_CLOCK_GATE_DISABLE |
5695 CL_UNIT_CLOCK_GATE_DISABLE);
5696 I915_WRITE(RAMCLK_GATE_D, 0);
5697 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
5698 OVRUNIT_CLOCK_GATE_DISABLE |
5699 OVCUNIT_CLOCK_GATE_DISABLE;
5701 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5702 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
5704 /* WaDisableRenderCachePipelinedFlush */
5705 I915_WRITE(CACHE_MODE_0,
5706 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5708 /* WaDisable_RenderCache_OperationalFlush:g4x */
5709 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5711 g4x_disable_trickle_feed(dev);
5714 static void crestline_init_clock_gating(struct drm_device *dev)
5716 struct drm_i915_private *dev_priv = dev->dev_private;
5718 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5719 I915_WRITE(RENCLK_GATE_D2, 0);
5720 I915_WRITE(DSPCLK_GATE_D, 0);
5721 I915_WRITE(RAMCLK_GATE_D, 0);
5722 I915_WRITE16(DEUC, 0);
5723 I915_WRITE(MI_ARB_STATE,
5724 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5726 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5727 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5730 static void broadwater_init_clock_gating(struct drm_device *dev)
5732 struct drm_i915_private *dev_priv = dev->dev_private;
5734 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5735 I965_RCC_CLOCK_GATE_DISABLE |
5736 I965_RCPB_CLOCK_GATE_DISABLE |
5737 I965_ISC_CLOCK_GATE_DISABLE |
5738 I965_FBC_CLOCK_GATE_DISABLE);
5739 I915_WRITE(RENCLK_GATE_D2, 0);
5740 I915_WRITE(MI_ARB_STATE,
5741 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5743 /* WaDisable_RenderCache_OperationalFlush:gen4 */
5744 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5747 static void gen3_init_clock_gating(struct drm_device *dev)
5749 struct drm_i915_private *dev_priv = dev->dev_private;
5750 u32 dstate = I915_READ(D_STATE);
5752 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
5753 DSTATE_DOT_CLOCK_GATING;
5754 I915_WRITE(D_STATE, dstate);
5756 if (IS_PINEVIEW(dev))
5757 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
5759 /* IIR "flip pending" means done if this bit is set */
5760 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
5762 /* interrupts should cause a wake up from C3 */
5763 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
5765 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
5766 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
5769 static void i85x_init_clock_gating(struct drm_device *dev)
5771 struct drm_i915_private *dev_priv = dev->dev_private;
5773 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
5775 /* interrupts should cause a wake up from C3 */
5776 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
5777 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
5780 static void i830_init_clock_gating(struct drm_device *dev)
5782 struct drm_i915_private *dev_priv = dev->dev_private;
5784 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
5787 void intel_init_clock_gating(struct drm_device *dev)
5789 struct drm_i915_private *dev_priv = dev->dev_private;
5791 dev_priv->display.init_clock_gating(dev);
5794 void intel_suspend_hw(struct drm_device *dev)
5796 if (HAS_PCH_LPT(dev))
5797 lpt_suspend_hw(dev);
5800 #define for_each_power_well(i, power_well, domain_mask, power_domains) \
5802 i < (power_domains)->power_well_count && \
5803 ((power_well) = &(power_domains)->power_wells[i]); \
5805 if ((power_well)->domains & (domain_mask))
5807 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
5808 for (i = (power_domains)->power_well_count - 1; \
5809 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
5811 if ((power_well)->domains & (domain_mask))
5814 * We should only use the power well if we explicitly asked the hardware to
5815 * enable it, so check if it's enabled and also check if we've requested it to
5818 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
5819 struct i915_power_well *power_well)
5821 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5822 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5825 bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
5826 enum intel_display_power_domain domain)
5828 struct i915_power_domains *power_domains;
5829 struct i915_power_well *power_well;
5833 if (dev_priv->pm.suspended)
5836 power_domains = &dev_priv->power_domains;
5838 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5839 if (power_well->always_on)
5842 if (!power_well->count) {
5850 bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
5851 enum intel_display_power_domain domain)
5853 struct i915_power_domains *power_domains;
5854 struct i915_power_well *power_well;
5858 if (dev_priv->pm.suspended)
5861 power_domains = &dev_priv->power_domains;
5865 mutex_lock(&power_domains->lock);
5866 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5867 if (power_well->always_on)
5870 if (!power_well->ops->is_enabled(dev_priv, power_well)) {
5875 mutex_unlock(&power_domains->lock);
5881 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5882 * when not needed anymore. We have 4 registers that can request the power well
5883 * to be enabled, and it will only be disabled if none of the registers is
5884 * requesting it to be enabled.
5886 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5888 struct drm_device *dev = dev_priv->dev;
5889 unsigned long irqflags;
5892 * After we re-enable the power well, if we touch VGA register 0x3d5
5893 * we'll get unclaimed register interrupts. This stops after we write
5894 * anything to the VGA MSR register. The vgacon module uses this
5895 * register all the time, so if we unbind our driver and, as a
5896 * consequence, bind vgacon, we'll get stuck in an infinite loop at
5897 * console_unlock(). So make here we touch the VGA MSR register, making
5898 * sure vgacon can keep working normally without triggering interrupts
5899 * and error messages.
5901 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
5902 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
5903 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
5905 if (IS_BROADWELL(dev)) {
5906 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5907 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5908 dev_priv->de_irq_mask[PIPE_B]);
5909 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5910 ~dev_priv->de_irq_mask[PIPE_B] |
5912 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5913 dev_priv->de_irq_mask[PIPE_C]);
5914 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5915 ~dev_priv->de_irq_mask[PIPE_C] |
5917 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5918 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5922 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
5923 struct i915_power_well *power_well, bool enable)
5925 bool is_enabled, enable_requested;
5928 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5929 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5930 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
5933 if (!enable_requested)
5934 I915_WRITE(HSW_PWR_WELL_DRIVER,
5935 HSW_PWR_WELL_ENABLE_REQUEST);
5938 DRM_DEBUG_KMS("Enabling power well\n");
5939 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
5940 HSW_PWR_WELL_STATE_ENABLED), 20))
5941 DRM_ERROR("Timeout enabling power well\n");
5944 hsw_power_well_post_enable(dev_priv);
5946 if (enable_requested) {
5947 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5948 POSTING_READ(HSW_PWR_WELL_DRIVER);
5949 DRM_DEBUG_KMS("Requesting to disable the power well\n");
5954 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
5955 struct i915_power_well *power_well)
5957 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
5960 * We're taking over the BIOS, so clear any requests made by it since
5961 * the driver is in charge now.
5963 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
5964 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
5967 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
5968 struct i915_power_well *power_well)
5970 hsw_set_power_well(dev_priv, power_well, true);
5973 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
5974 struct i915_power_well *power_well)
5976 hsw_set_power_well(dev_priv, power_well, false);
5979 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
5980 struct i915_power_well *power_well)
5984 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
5985 struct i915_power_well *power_well)
5990 void __vlv_set_power_well(struct drm_i915_private *dev_priv,
5991 enum punit_power_well power_well_id, bool enable)
5993 struct drm_device *dev = dev_priv->dev;
5999 if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6002 * Enable the CRI clock source so we can get at the
6003 * display and the reference clock for VGA
6004 * hotplug / manual detection.
6006 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6007 DPLL_REFA_CLK_ENABLE_VLV |
6008 DPLL_INTEGRATED_CRI_CLK_VLV);
6009 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6012 assert_pll_disabled(dev_priv, pipe);
6013 /* Assert common reset */
6014 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) &
6019 mask = PUNIT_PWRGT_MASK(power_well_id);
6020 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
6021 PUNIT_PWRGT_PWR_GATE(power_well_id);
6023 mutex_lock(&dev_priv->rps.hw_lock);
6026 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
6031 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
6034 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
6036 if (wait_for(COND, 100))
6037 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6039 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
6044 mutex_unlock(&dev_priv->rps.hw_lock);
6047 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
6048 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
6049 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
6050 * b. The other bits such as sfr settings / modesel may all
6053 * This should only be done on init and resume from S3 with
6054 * both PLLs disabled, or we risk losing DPIO and PLL
6057 if (power_well_id == PUNIT_POWER_WELL_DPIO_CMN_BC && enable)
6058 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
6061 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
6062 struct i915_power_well *power_well, bool enable)
6064 enum punit_power_well power_well_id = power_well->data;
6066 __vlv_set_power_well(dev_priv, power_well_id, enable);
6069 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
6070 struct i915_power_well *power_well)
6072 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
6075 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
6076 struct i915_power_well *power_well)
6078 vlv_set_power_well(dev_priv, power_well, true);
6081 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
6082 struct i915_power_well *power_well)
6084 vlv_set_power_well(dev_priv, power_well, false);
6087 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
6088 struct i915_power_well *power_well)
6090 int power_well_id = power_well->data;
6091 bool enabled = false;
6096 mask = PUNIT_PWRGT_MASK(power_well_id);
6097 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
6099 mutex_lock(&dev_priv->rps.hw_lock);
6101 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
6103 * We only ever set the power-on and power-gate states, anything
6104 * else is unexpected.
6106 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
6107 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
6112 * A transient state at this point would mean some unexpected party
6113 * is poking at the power controls too.
6115 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
6116 WARN_ON(ctrl != state);
6118 mutex_unlock(&dev_priv->rps.hw_lock);
6123 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
6124 struct i915_power_well *power_well)
6126 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6128 vlv_set_power_well(dev_priv, power_well, true);
6130 spin_lock_irq(&dev_priv->irq_lock);
6131 valleyview_enable_display_irqs(dev_priv);
6132 spin_unlock_irq(&dev_priv->irq_lock);
6135 * During driver initialization/resume we can avoid restoring the
6136 * part of the HW/SW state that will be inited anyway explicitly.
6138 if (dev_priv->power_domains.initializing)
6141 intel_hpd_init(dev_priv->dev);
6143 i915_redisable_vga_power_on(dev_priv->dev);
6146 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
6147 struct i915_power_well *power_well)
6149 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6151 spin_lock_irq(&dev_priv->irq_lock);
6152 valleyview_disable_display_irqs(dev_priv);
6153 spin_unlock_irq(&dev_priv->irq_lock);
6155 vlv_set_power_well(dev_priv, power_well, false);
6158 static void check_power_well_state(struct drm_i915_private *dev_priv,
6159 struct i915_power_well *power_well)
6161 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
6163 if (power_well->always_on || !i915.disable_power_well) {
6170 if (enabled != (power_well->count > 0))
6176 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
6177 power_well->name, power_well->always_on, enabled,
6178 power_well->count, i915.disable_power_well);
6181 void intel_display_power_get(struct drm_i915_private *dev_priv,
6182 enum intel_display_power_domain domain)
6184 struct i915_power_domains *power_domains;
6185 struct i915_power_well *power_well;
6188 intel_runtime_pm_get(dev_priv);
6190 power_domains = &dev_priv->power_domains;
6192 mutex_lock(&power_domains->lock);
6194 for_each_power_well(i, power_well, BIT(domain), power_domains) {
6195 if (!power_well->count++) {
6196 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
6197 power_well->ops->enable(dev_priv, power_well);
6200 check_power_well_state(dev_priv, power_well);
6203 power_domains->domain_use_count[domain]++;
6205 mutex_unlock(&power_domains->lock);
6208 void intel_display_power_put(struct drm_i915_private *dev_priv,
6209 enum intel_display_power_domain domain)
6211 struct i915_power_domains *power_domains;
6212 struct i915_power_well *power_well;
6215 power_domains = &dev_priv->power_domains;
6217 mutex_lock(&power_domains->lock);
6219 WARN_ON(!power_domains->domain_use_count[domain]);
6220 power_domains->domain_use_count[domain]--;
6222 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6223 WARN_ON(!power_well->count);
6225 if (!--power_well->count && i915.disable_power_well) {
6226 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
6227 power_well->ops->disable(dev_priv, power_well);
6230 check_power_well_state(dev_priv, power_well);
6233 mutex_unlock(&power_domains->lock);
6235 intel_runtime_pm_put(dev_priv);
6238 static struct i915_power_domains *hsw_pwr;
6240 /* Display audio driver power well request */
6241 void i915_request_power_well(void)
6243 struct drm_i915_private *dev_priv;
6245 if (WARN_ON(!hsw_pwr))
6248 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6250 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
6252 EXPORT_SYMBOL_GPL(i915_request_power_well);
6254 /* Display audio driver power well release */
6255 void i915_release_power_well(void)
6257 struct drm_i915_private *dev_priv;
6259 if (WARN_ON(!hsw_pwr))
6262 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6264 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
6266 EXPORT_SYMBOL_GPL(i915_release_power_well);
6268 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
6270 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
6271 BIT(POWER_DOMAIN_PIPE_A) | \
6272 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
6273 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
6274 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
6275 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6276 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6277 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6278 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6279 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6280 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6281 BIT(POWER_DOMAIN_PORT_CRT) | \
6282 BIT(POWER_DOMAIN_INIT))
6283 #define HSW_DISPLAY_POWER_DOMAINS ( \
6284 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
6285 BIT(POWER_DOMAIN_INIT))
6287 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
6288 HSW_ALWAYS_ON_POWER_DOMAINS | \
6289 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
6290 #define BDW_DISPLAY_POWER_DOMAINS ( \
6291 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
6292 BIT(POWER_DOMAIN_INIT))
6294 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
6295 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
6297 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
6298 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6299 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6300 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6301 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6302 BIT(POWER_DOMAIN_PORT_CRT) | \
6303 BIT(POWER_DOMAIN_INIT))
6305 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
6306 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6307 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6308 BIT(POWER_DOMAIN_INIT))
6310 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
6311 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6312 BIT(POWER_DOMAIN_INIT))
6314 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
6315 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6316 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6317 BIT(POWER_DOMAIN_INIT))
6319 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
6320 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6321 BIT(POWER_DOMAIN_INIT))
6323 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6324 .sync_hw = i9xx_always_on_power_well_noop,
6325 .enable = i9xx_always_on_power_well_noop,
6326 .disable = i9xx_always_on_power_well_noop,
6327 .is_enabled = i9xx_always_on_power_well_enabled,
6330 static struct i915_power_well i9xx_always_on_power_well[] = {
6332 .name = "always-on",
6334 .domains = POWER_DOMAIN_MASK,
6335 .ops = &i9xx_always_on_power_well_ops,
6339 static const struct i915_power_well_ops hsw_power_well_ops = {
6340 .sync_hw = hsw_power_well_sync_hw,
6341 .enable = hsw_power_well_enable,
6342 .disable = hsw_power_well_disable,
6343 .is_enabled = hsw_power_well_enabled,
6346 static struct i915_power_well hsw_power_wells[] = {
6348 .name = "always-on",
6350 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
6351 .ops = &i9xx_always_on_power_well_ops,
6355 .domains = HSW_DISPLAY_POWER_DOMAINS,
6356 .ops = &hsw_power_well_ops,
6360 static struct i915_power_well bdw_power_wells[] = {
6362 .name = "always-on",
6364 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
6365 .ops = &i9xx_always_on_power_well_ops,
6369 .domains = BDW_DISPLAY_POWER_DOMAINS,
6370 .ops = &hsw_power_well_ops,
6374 static const struct i915_power_well_ops vlv_display_power_well_ops = {
6375 .sync_hw = vlv_power_well_sync_hw,
6376 .enable = vlv_display_power_well_enable,
6377 .disable = vlv_display_power_well_disable,
6378 .is_enabled = vlv_power_well_enabled,
6381 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
6382 .sync_hw = vlv_power_well_sync_hw,
6383 .enable = vlv_power_well_enable,
6384 .disable = vlv_power_well_disable,
6385 .is_enabled = vlv_power_well_enabled,
6388 static struct i915_power_well vlv_power_wells[] = {
6390 .name = "always-on",
6392 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6393 .ops = &i9xx_always_on_power_well_ops,
6397 .domains = VLV_DISPLAY_POWER_DOMAINS,
6398 .data = PUNIT_POWER_WELL_DISP2D,
6399 .ops = &vlv_display_power_well_ops,
6402 .name = "dpio-tx-b-01",
6403 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6404 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6405 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6406 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6407 .ops = &vlv_dpio_power_well_ops,
6408 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6411 .name = "dpio-tx-b-23",
6412 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6413 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6414 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6415 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6416 .ops = &vlv_dpio_power_well_ops,
6417 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6420 .name = "dpio-tx-c-01",
6421 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6422 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6423 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6424 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6425 .ops = &vlv_dpio_power_well_ops,
6426 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6429 .name = "dpio-tx-c-23",
6430 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6431 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6432 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6433 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6434 .ops = &vlv_dpio_power_well_ops,
6435 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6438 .name = "dpio-common",
6439 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
6440 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6441 .ops = &vlv_dpio_power_well_ops,
6445 #define set_power_wells(power_domains, __power_wells) ({ \
6446 (power_domains)->power_wells = (__power_wells); \
6447 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
6450 int intel_power_domains_init(struct drm_i915_private *dev_priv)
6452 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6454 mutex_init(&power_domains->lock);
6457 * The enabling order will be from lower to higher indexed wells,
6458 * the disabling order is reversed.
6460 if (IS_HASWELL(dev_priv->dev)) {
6461 set_power_wells(power_domains, hsw_power_wells);
6462 hsw_pwr = power_domains;
6463 } else if (IS_BROADWELL(dev_priv->dev)) {
6464 set_power_wells(power_domains, bdw_power_wells);
6465 hsw_pwr = power_domains;
6466 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
6467 set_power_wells(power_domains, vlv_power_wells);
6469 set_power_wells(power_domains, i9xx_always_on_power_well);
6475 void intel_power_domains_remove(struct drm_i915_private *dev_priv)
6480 static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
6482 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6483 struct i915_power_well *power_well;
6486 mutex_lock(&power_domains->lock);
6487 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
6488 power_well->ops->sync_hw(dev_priv, power_well);
6489 mutex_unlock(&power_domains->lock);
6492 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
6494 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6496 power_domains->initializing = true;
6497 /* For now, we need the power well to be always enabled. */
6498 intel_display_set_init_power(dev_priv, true);
6499 intel_power_domains_resume(dev_priv);
6500 power_domains->initializing = false;
6503 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
6505 intel_runtime_pm_get(dev_priv);
6508 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
6510 intel_runtime_pm_put(dev_priv);
6513 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
6515 struct drm_device *dev = dev_priv->dev;
6516 struct device *device = &dev->pdev->dev;
6518 if (!HAS_RUNTIME_PM(dev))
6521 pm_runtime_get_sync(device);
6522 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
6525 void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
6527 struct drm_device *dev = dev_priv->dev;
6528 struct device *device = &dev->pdev->dev;
6530 if (!HAS_RUNTIME_PM(dev))
6533 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
6534 pm_runtime_get_noresume(device);
6537 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
6539 struct drm_device *dev = dev_priv->dev;
6540 struct device *device = &dev->pdev->dev;
6542 if (!HAS_RUNTIME_PM(dev))
6545 pm_runtime_mark_last_busy(device);
6546 pm_runtime_put_autosuspend(device);
6549 void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
6551 struct drm_device *dev = dev_priv->dev;
6552 struct device *device = &dev->pdev->dev;
6554 if (!HAS_RUNTIME_PM(dev))
6557 pm_runtime_set_active(device);
6560 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6563 if (!intel_enable_rc6(dev)) {
6564 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
6568 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
6569 pm_runtime_mark_last_busy(device);
6570 pm_runtime_use_autosuspend(device);
6572 pm_runtime_put_autosuspend(device);
6575 void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
6577 struct drm_device *dev = dev_priv->dev;
6578 struct device *device = &dev->pdev->dev;
6580 if (!HAS_RUNTIME_PM(dev))
6583 if (!intel_enable_rc6(dev))
6586 /* Make sure we're not suspended first. */
6587 pm_runtime_get_sync(device);
6588 pm_runtime_disable(device);
6591 /* Set up chip specific power management-related functions */
6592 void intel_init_pm(struct drm_device *dev)
6594 struct drm_i915_private *dev_priv = dev->dev_private;
6597 if (INTEL_INFO(dev)->gen >= 7) {
6598 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6599 dev_priv->display.enable_fbc = gen7_enable_fbc;
6600 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6601 } else if (INTEL_INFO(dev)->gen >= 5) {
6602 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6603 dev_priv->display.enable_fbc = ironlake_enable_fbc;
6604 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6605 } else if (IS_GM45(dev)) {
6606 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
6607 dev_priv->display.enable_fbc = g4x_enable_fbc;
6608 dev_priv->display.disable_fbc = g4x_disable_fbc;
6610 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
6611 dev_priv->display.enable_fbc = i8xx_enable_fbc;
6612 dev_priv->display.disable_fbc = i8xx_disable_fbc;
6614 /* This value was pulled out of someone's hat */
6615 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
6620 if (IS_PINEVIEW(dev))
6621 i915_pineview_get_mem_freq(dev);
6622 else if (IS_GEN5(dev))
6623 i915_ironlake_get_mem_freq(dev);
6625 /* For FIFO watermark updates */
6626 if (HAS_PCH_SPLIT(dev)) {
6627 ilk_setup_wm_latency(dev);
6629 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6630 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
6631 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
6632 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
6633 dev_priv->display.update_wm = ilk_update_wm;
6634 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
6636 DRM_DEBUG_KMS("Failed to read display plane latency. "
6641 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6642 else if (IS_GEN6(dev))
6643 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6644 else if (IS_IVYBRIDGE(dev))
6645 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6646 else if (IS_HASWELL(dev))
6647 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6648 else if (INTEL_INFO(dev)->gen == 8)
6649 dev_priv->display.init_clock_gating = gen8_init_clock_gating;
6650 } else if (IS_CHERRYVIEW(dev)) {
6651 dev_priv->display.update_wm = valleyview_update_wm;
6652 dev_priv->display.init_clock_gating =
6653 cherryview_init_clock_gating;
6654 } else if (IS_VALLEYVIEW(dev)) {
6655 dev_priv->display.update_wm = valleyview_update_wm;
6656 dev_priv->display.init_clock_gating =
6657 valleyview_init_clock_gating;
6658 } else if (IS_PINEVIEW(dev)) {
6659 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
6662 dev_priv->mem_freq)) {
6663 DRM_INFO("failed to find known CxSR latency "
6664 "(found ddr%s fsb freq %d, mem freq %d), "
6666 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6667 dev_priv->fsb_freq, dev_priv->mem_freq);
6668 /* Disable CxSR and never update its watermark again */
6669 pineview_disable_cxsr(dev);
6670 dev_priv->display.update_wm = NULL;
6672 dev_priv->display.update_wm = pineview_update_wm;
6673 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6674 } else if (IS_G4X(dev)) {
6675 dev_priv->display.update_wm = g4x_update_wm;
6676 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
6677 } else if (IS_GEN4(dev)) {
6678 dev_priv->display.update_wm = i965_update_wm;
6679 if (IS_CRESTLINE(dev))
6680 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
6681 else if (IS_BROADWATER(dev))
6682 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
6683 } else if (IS_GEN3(dev)) {
6684 dev_priv->display.update_wm = i9xx_update_wm;
6685 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6686 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6687 } else if (IS_GEN2(dev)) {
6688 if (INTEL_INFO(dev)->num_pipes == 1) {
6689 dev_priv->display.update_wm = i845_update_wm;
6690 dev_priv->display.get_fifo_size = i845_get_fifo_size;
6692 dev_priv->display.update_wm = i9xx_update_wm;
6693 dev_priv->display.get_fifo_size = i830_get_fifo_size;
6696 if (IS_I85X(dev) || IS_I865G(dev))
6697 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6699 dev_priv->display.init_clock_gating = i830_init_clock_gating;
6701 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6705 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
6707 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6709 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6710 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
6714 I915_WRITE(GEN6_PCODE_DATA, *val);
6715 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6717 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6719 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
6723 *val = I915_READ(GEN6_PCODE_DATA);
6724 I915_WRITE(GEN6_PCODE_DATA, 0);
6729 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
6731 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6733 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6734 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
6738 I915_WRITE(GEN6_PCODE_DATA, val);
6739 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6741 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6743 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
6747 I915_WRITE(GEN6_PCODE_DATA, 0);
6752 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6757 switch (dev_priv->mem_freq) {
6771 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
6774 int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
6779 switch (dev_priv->mem_freq) {
6793 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
6796 void intel_pm_setup(struct drm_device *dev)
6798 struct drm_i915_private *dev_priv = dev->dev_private;
6800 mutex_init(&dev_priv->rps.hw_lock);
6802 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6803 intel_gen6_powersave_work);
6805 dev_priv->pm.suspended = false;
6806 dev_priv->pm.irqs_disabled = false;