2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33 #include <linux/vgaarb.h>
34 #include <drm/i915_powerwell.h>
35 #include <linux/pm_runtime.h>
38 * RC6 is a special power stage which allows the GPU to enter an very
39 * low-voltage mode when idle, using down to 0V while at this stage. This
40 * stage is entered automatically when the GPU is idle when RC6 support is
41 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
43 * There are different RC6 modes available in Intel GPU, which differentiate
44 * among each other with the latency required to enter and leave RC6 and
45 * voltage consumed by the GPU in different states.
47 * The combination of the following flags define which states GPU is allowed
48 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
49 * RC6pp is deepest RC6. Their support by hardware varies according to the
50 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
51 * which brings the most power savings; deeper states save more power, but
52 * require higher latency to switch to and wake up.
54 #define INTEL_RC6_ENABLE (1<<0)
55 #define INTEL_RC6p_ENABLE (1<<1)
56 #define INTEL_RC6pp_ENABLE (1<<2)
58 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
59 * framebuffer contents in-memory, aiming at reducing the required bandwidth
60 * during in-memory transfers and, therefore, reduce the power packet.
62 * The benefits of FBC are mostly visible with solid backgrounds and
63 * variation-less patterns.
65 * FBC-related functionality can be enabled by the means of the
66 * i915.i915_enable_fbc parameter
69 static void i8xx_disable_fbc(struct drm_device *dev)
71 struct drm_i915_private *dev_priv = dev->dev_private;
74 /* Disable compression */
75 fbc_ctl = I915_READ(FBC_CONTROL);
76 if ((fbc_ctl & FBC_CTL_EN) == 0)
79 fbc_ctl &= ~FBC_CTL_EN;
80 I915_WRITE(FBC_CONTROL, fbc_ctl);
82 /* Wait for compressing bit to clear */
83 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
84 DRM_DEBUG_KMS("FBC idle timed out\n");
88 DRM_DEBUG_KMS("disabled FBC\n");
91 static void i8xx_enable_fbc(struct drm_crtc *crtc)
93 struct drm_device *dev = crtc->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_framebuffer *fb = crtc->primary->fb;
96 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
97 struct drm_i915_gem_object *obj = intel_fb->obj;
98 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
103 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
104 if (fb->pitches[0] < cfb_pitch)
105 cfb_pitch = fb->pitches[0];
107 /* FBC_CTL wants 32B or 64B units */
109 cfb_pitch = (cfb_pitch / 32) - 1;
111 cfb_pitch = (cfb_pitch / 64) - 1;
114 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
115 I915_WRITE(FBC_TAG + (i * 4), 0);
121 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
122 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
123 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
124 I915_WRITE(FBC_FENCE_OFF, crtc->y);
128 fbc_ctl = I915_READ(FBC_CONTROL);
129 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
130 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
132 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
133 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
134 fbc_ctl |= obj->fence_reg;
135 I915_WRITE(FBC_CONTROL, fbc_ctl);
137 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
138 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
141 static bool i8xx_fbc_enabled(struct drm_device *dev)
143 struct drm_i915_private *dev_priv = dev->dev_private;
145 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
148 static void g4x_enable_fbc(struct drm_crtc *crtc)
150 struct drm_device *dev = crtc->dev;
151 struct drm_i915_private *dev_priv = dev->dev_private;
152 struct drm_framebuffer *fb = crtc->primary->fb;
153 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
154 struct drm_i915_gem_object *obj = intel_fb->obj;
155 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
158 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
159 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
160 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
162 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
163 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
165 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
168 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
170 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
173 static void g4x_disable_fbc(struct drm_device *dev)
175 struct drm_i915_private *dev_priv = dev->dev_private;
178 /* Disable compression */
179 dpfc_ctl = I915_READ(DPFC_CONTROL);
180 if (dpfc_ctl & DPFC_CTL_EN) {
181 dpfc_ctl &= ~DPFC_CTL_EN;
182 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
184 DRM_DEBUG_KMS("disabled FBC\n");
188 static bool g4x_fbc_enabled(struct drm_device *dev)
190 struct drm_i915_private *dev_priv = dev->dev_private;
192 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
195 static void sandybridge_blit_fbc_update(struct drm_device *dev)
197 struct drm_i915_private *dev_priv = dev->dev_private;
200 /* Make sure blitter notifies FBC of writes */
202 /* Blitter is part of Media powerwell on VLV. No impact of
203 * his param in other platforms for now */
204 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
206 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
207 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
208 GEN6_BLITTER_LOCK_SHIFT;
209 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
210 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
211 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
212 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
213 GEN6_BLITTER_LOCK_SHIFT);
214 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
215 POSTING_READ(GEN6_BLITTER_ECOSKPD);
217 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
220 static void ironlake_enable_fbc(struct drm_crtc *crtc)
222 struct drm_device *dev = crtc->dev;
223 struct drm_i915_private *dev_priv = dev->dev_private;
224 struct drm_framebuffer *fb = crtc->primary->fb;
225 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
226 struct drm_i915_gem_object *obj = intel_fb->obj;
227 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
230 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
231 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
232 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
234 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
235 dpfc_ctl |= DPFC_CTL_FENCE_EN;
237 dpfc_ctl |= obj->fence_reg;
239 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
240 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
242 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
245 I915_WRITE(SNB_DPFC_CTL_SA,
246 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
247 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
248 sandybridge_blit_fbc_update(dev);
251 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
254 static void ironlake_disable_fbc(struct drm_device *dev)
256 struct drm_i915_private *dev_priv = dev->dev_private;
259 /* Disable compression */
260 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
261 if (dpfc_ctl & DPFC_CTL_EN) {
262 dpfc_ctl &= ~DPFC_CTL_EN;
263 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
265 DRM_DEBUG_KMS("disabled FBC\n");
269 static bool ironlake_fbc_enabled(struct drm_device *dev)
271 struct drm_i915_private *dev_priv = dev->dev_private;
273 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
276 static void gen7_enable_fbc(struct drm_crtc *crtc)
278 struct drm_device *dev = crtc->dev;
279 struct drm_i915_private *dev_priv = dev->dev_private;
280 struct drm_framebuffer *fb = crtc->primary->fb;
281 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
282 struct drm_i915_gem_object *obj = intel_fb->obj;
283 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
286 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
287 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
288 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
290 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
291 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
293 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
295 if (IS_IVYBRIDGE(dev)) {
296 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
297 I915_WRITE(ILK_DISPLAY_CHICKEN1,
298 I915_READ(ILK_DISPLAY_CHICKEN1) |
301 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
302 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
303 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
307 I915_WRITE(SNB_DPFC_CTL_SA,
308 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
309 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
311 sandybridge_blit_fbc_update(dev);
313 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
316 bool intel_fbc_enabled(struct drm_device *dev)
318 struct drm_i915_private *dev_priv = dev->dev_private;
320 if (!dev_priv->display.fbc_enabled)
323 return dev_priv->display.fbc_enabled(dev);
326 static void intel_fbc_work_fn(struct work_struct *__work)
328 struct intel_fbc_work *work =
329 container_of(to_delayed_work(__work),
330 struct intel_fbc_work, work);
331 struct drm_device *dev = work->crtc->dev;
332 struct drm_i915_private *dev_priv = dev->dev_private;
334 mutex_lock(&dev->struct_mutex);
335 if (work == dev_priv->fbc.fbc_work) {
336 /* Double check that we haven't switched fb without cancelling
339 if (work->crtc->primary->fb == work->fb) {
340 dev_priv->display.enable_fbc(work->crtc);
342 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
343 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
344 dev_priv->fbc.y = work->crtc->y;
347 dev_priv->fbc.fbc_work = NULL;
349 mutex_unlock(&dev->struct_mutex);
354 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
356 if (dev_priv->fbc.fbc_work == NULL)
359 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
361 /* Synchronisation is provided by struct_mutex and checking of
362 * dev_priv->fbc.fbc_work, so we can perform the cancellation
363 * entirely asynchronously.
365 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
366 /* tasklet was killed before being run, clean up */
367 kfree(dev_priv->fbc.fbc_work);
369 /* Mark the work as no longer wanted so that if it does
370 * wake-up (because the work was already running and waiting
371 * for our mutex), it will discover that is no longer
374 dev_priv->fbc.fbc_work = NULL;
377 static void intel_enable_fbc(struct drm_crtc *crtc)
379 struct intel_fbc_work *work;
380 struct drm_device *dev = crtc->dev;
381 struct drm_i915_private *dev_priv = dev->dev_private;
383 if (!dev_priv->display.enable_fbc)
386 intel_cancel_fbc_work(dev_priv);
388 work = kzalloc(sizeof(*work), GFP_KERNEL);
390 DRM_ERROR("Failed to allocate FBC work structure\n");
391 dev_priv->display.enable_fbc(crtc);
396 work->fb = crtc->primary->fb;
397 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
399 dev_priv->fbc.fbc_work = work;
401 /* Delay the actual enabling to let pageflipping cease and the
402 * display to settle before starting the compression. Note that
403 * this delay also serves a second purpose: it allows for a
404 * vblank to pass after disabling the FBC before we attempt
405 * to modify the control registers.
407 * A more complicated solution would involve tracking vblanks
408 * following the termination of the page-flipping sequence
409 * and indeed performing the enable as a co-routine and not
410 * waiting synchronously upon the vblank.
412 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
414 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
417 void intel_disable_fbc(struct drm_device *dev)
419 struct drm_i915_private *dev_priv = dev->dev_private;
421 intel_cancel_fbc_work(dev_priv);
423 if (!dev_priv->display.disable_fbc)
426 dev_priv->display.disable_fbc(dev);
427 dev_priv->fbc.plane = -1;
430 static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
431 enum no_fbc_reason reason)
433 if (dev_priv->fbc.no_fbc_reason == reason)
436 dev_priv->fbc.no_fbc_reason = reason;
441 * intel_update_fbc - enable/disable FBC as needed
442 * @dev: the drm_device
444 * Set up the framebuffer compression hardware at mode set time. We
445 * enable it if possible:
446 * - plane A only (on pre-965)
447 * - no pixel mulitply/line duplication
448 * - no alpha buffer discard
450 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
452 * We can't assume that any compression will take place (worst case),
453 * so the compressed buffer has to be the same size as the uncompressed
454 * one. It also must reside (along with the line length buffer) in
457 * We need to enable/disable FBC on a global basis.
459 void intel_update_fbc(struct drm_device *dev)
461 struct drm_i915_private *dev_priv = dev->dev_private;
462 struct drm_crtc *crtc = NULL, *tmp_crtc;
463 struct intel_crtc *intel_crtc;
464 struct drm_framebuffer *fb;
465 struct intel_framebuffer *intel_fb;
466 struct drm_i915_gem_object *obj;
467 const struct drm_display_mode *adjusted_mode;
468 unsigned int max_width, max_height;
471 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
475 if (!i915.powersave) {
476 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
477 DRM_DEBUG_KMS("fbc disabled per module param\n");
482 * If FBC is already on, we just have to verify that we can
483 * keep it that way...
484 * Need to disable if:
485 * - more than one pipe is active
486 * - changing FBC params (stride, fence, mode)
487 * - new fb is too large to fit in compressed buffer
488 * - going to an unsupported config (interlace, pixel multiply, etc.)
490 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
491 if (intel_crtc_active(tmp_crtc) &&
492 to_intel_crtc(tmp_crtc)->primary_enabled) {
494 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
495 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
502 if (!crtc || crtc->primary->fb == NULL) {
503 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
504 DRM_DEBUG_KMS("no output, disabling\n");
508 intel_crtc = to_intel_crtc(crtc);
509 fb = crtc->primary->fb;
510 intel_fb = to_intel_framebuffer(fb);
512 adjusted_mode = &intel_crtc->config.adjusted_mode;
514 if (i915.enable_fbc < 0 &&
515 INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
516 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
517 DRM_DEBUG_KMS("disabled per chip default\n");
520 if (!i915.enable_fbc) {
521 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
522 DRM_DEBUG_KMS("fbc disabled per module param\n");
525 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
526 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
527 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
528 DRM_DEBUG_KMS("mode incompatible with compression, "
533 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
540 if (intel_crtc->config.pipe_src_w > max_width ||
541 intel_crtc->config.pipe_src_h > max_height) {
542 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
543 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
546 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
547 intel_crtc->plane != PLANE_A) {
548 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
549 DRM_DEBUG_KMS("plane not A, disabling compression\n");
553 /* The use of a CPU fence is mandatory in order to detect writes
554 * by the CPU to the scanout and trigger updates to the FBC.
556 if (obj->tiling_mode != I915_TILING_X ||
557 obj->fence_reg == I915_FENCE_REG_NONE) {
558 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
559 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
563 /* If the kernel debugger is active, always disable compression */
567 if (i915_gem_stolen_setup_compression(dev, intel_fb->obj->base.size)) {
568 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
569 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
573 /* If the scanout has not changed, don't modify the FBC settings.
574 * Note that we make the fundamental assumption that the fb->obj
575 * cannot be unpinned (and have its GTT offset and fence revoked)
576 * without first being decoupled from the scanout and FBC disabled.
578 if (dev_priv->fbc.plane == intel_crtc->plane &&
579 dev_priv->fbc.fb_id == fb->base.id &&
580 dev_priv->fbc.y == crtc->y)
583 if (intel_fbc_enabled(dev)) {
584 /* We update FBC along two paths, after changing fb/crtc
585 * configuration (modeswitching) and after page-flipping
586 * finishes. For the latter, we know that not only did
587 * we disable the FBC at the start of the page-flip
588 * sequence, but also more than one vblank has passed.
590 * For the former case of modeswitching, it is possible
591 * to switch between two FBC valid configurations
592 * instantaneously so we do need to disable the FBC
593 * before we can modify its control registers. We also
594 * have to wait for the next vblank for that to take
595 * effect. However, since we delay enabling FBC we can
596 * assume that a vblank has passed since disabling and
597 * that we can safely alter the registers in the deferred
600 * In the scenario that we go from a valid to invalid
601 * and then back to valid FBC configuration we have
602 * no strict enforcement that a vblank occurred since
603 * disabling the FBC. However, along all current pipe
604 * disabling paths we do need to wait for a vblank at
605 * some point. And we wait before enabling FBC anyway.
607 DRM_DEBUG_KMS("disabling active FBC for update\n");
608 intel_disable_fbc(dev);
611 intel_enable_fbc(crtc);
612 dev_priv->fbc.no_fbc_reason = FBC_OK;
616 /* Multiple disables should be harmless */
617 if (intel_fbc_enabled(dev)) {
618 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
619 intel_disable_fbc(dev);
621 i915_gem_stolen_cleanup_compression(dev);
624 static void i915_pineview_get_mem_freq(struct drm_device *dev)
626 struct drm_i915_private *dev_priv = dev->dev_private;
629 tmp = I915_READ(CLKCFG);
631 switch (tmp & CLKCFG_FSB_MASK) {
633 dev_priv->fsb_freq = 533; /* 133*4 */
636 dev_priv->fsb_freq = 800; /* 200*4 */
639 dev_priv->fsb_freq = 667; /* 167*4 */
642 dev_priv->fsb_freq = 400; /* 100*4 */
646 switch (tmp & CLKCFG_MEM_MASK) {
648 dev_priv->mem_freq = 533;
651 dev_priv->mem_freq = 667;
654 dev_priv->mem_freq = 800;
658 /* detect pineview DDR3 setting */
659 tmp = I915_READ(CSHRDDR3CTL);
660 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
663 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
665 struct drm_i915_private *dev_priv = dev->dev_private;
668 ddrpll = I915_READ16(DDRMPLL1);
669 csipll = I915_READ16(CSIPLL0);
671 switch (ddrpll & 0xff) {
673 dev_priv->mem_freq = 800;
676 dev_priv->mem_freq = 1066;
679 dev_priv->mem_freq = 1333;
682 dev_priv->mem_freq = 1600;
685 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
687 dev_priv->mem_freq = 0;
691 dev_priv->ips.r_t = dev_priv->mem_freq;
693 switch (csipll & 0x3ff) {
695 dev_priv->fsb_freq = 3200;
698 dev_priv->fsb_freq = 3733;
701 dev_priv->fsb_freq = 4266;
704 dev_priv->fsb_freq = 4800;
707 dev_priv->fsb_freq = 5333;
710 dev_priv->fsb_freq = 5866;
713 dev_priv->fsb_freq = 6400;
716 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
718 dev_priv->fsb_freq = 0;
722 if (dev_priv->fsb_freq == 3200) {
723 dev_priv->ips.c_m = 0;
724 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
725 dev_priv->ips.c_m = 1;
727 dev_priv->ips.c_m = 2;
731 static const struct cxsr_latency cxsr_latency_table[] = {
732 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
733 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
734 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
735 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
736 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
738 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
739 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
740 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
741 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
742 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
744 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
745 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
746 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
747 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
748 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
750 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
751 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
752 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
753 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
754 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
756 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
757 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
758 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
759 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
760 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
762 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
763 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
764 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
765 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
766 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
769 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
774 const struct cxsr_latency *latency;
777 if (fsb == 0 || mem == 0)
780 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
781 latency = &cxsr_latency_table[i];
782 if (is_desktop == latency->is_desktop &&
783 is_ddr3 == latency->is_ddr3 &&
784 fsb == latency->fsb_freq && mem == latency->mem_freq)
788 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
793 static void pineview_disable_cxsr(struct drm_device *dev)
795 struct drm_i915_private *dev_priv = dev->dev_private;
797 /* deactivate cxsr */
798 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
802 * Latency for FIFO fetches is dependent on several factors:
803 * - memory configuration (speed, channels)
805 * - current MCH state
806 * It can be fairly high in some situations, so here we assume a fairly
807 * pessimal value. It's a tradeoff between extra memory fetches (if we
808 * set this value too high, the FIFO will fetch frequently to stay full)
809 * and power consumption (set it too low to save power and we might see
810 * FIFO underruns and display "flicker").
812 * A value of 5us seems to be a good balance; safe for very low end
813 * platforms but not overly aggressive on lower latency configs.
815 static const int latency_ns = 5000;
817 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
819 struct drm_i915_private *dev_priv = dev->dev_private;
820 uint32_t dsparb = I915_READ(DSPARB);
823 size = dsparb & 0x7f;
825 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
827 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
828 plane ? "B" : "A", size);
833 static int i830_get_fifo_size(struct drm_device *dev, int plane)
835 struct drm_i915_private *dev_priv = dev->dev_private;
836 uint32_t dsparb = I915_READ(DSPARB);
839 size = dsparb & 0x1ff;
841 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
842 size >>= 1; /* Convert to cachelines */
844 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
845 plane ? "B" : "A", size);
850 static int i845_get_fifo_size(struct drm_device *dev, int plane)
852 struct drm_i915_private *dev_priv = dev->dev_private;
853 uint32_t dsparb = I915_READ(DSPARB);
856 size = dsparb & 0x7f;
857 size >>= 2; /* Convert to cachelines */
859 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
866 /* Pineview has different values for various configs */
867 static const struct intel_watermark_params pineview_display_wm = {
868 PINEVIEW_DISPLAY_FIFO,
872 PINEVIEW_FIFO_LINE_SIZE
874 static const struct intel_watermark_params pineview_display_hplloff_wm = {
875 PINEVIEW_DISPLAY_FIFO,
877 PINEVIEW_DFT_HPLLOFF_WM,
879 PINEVIEW_FIFO_LINE_SIZE
881 static const struct intel_watermark_params pineview_cursor_wm = {
882 PINEVIEW_CURSOR_FIFO,
883 PINEVIEW_CURSOR_MAX_WM,
884 PINEVIEW_CURSOR_DFT_WM,
885 PINEVIEW_CURSOR_GUARD_WM,
886 PINEVIEW_FIFO_LINE_SIZE,
888 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
889 PINEVIEW_CURSOR_FIFO,
890 PINEVIEW_CURSOR_MAX_WM,
891 PINEVIEW_CURSOR_DFT_WM,
892 PINEVIEW_CURSOR_GUARD_WM,
893 PINEVIEW_FIFO_LINE_SIZE
895 static const struct intel_watermark_params g4x_wm_info = {
902 static const struct intel_watermark_params g4x_cursor_wm_info = {
909 static const struct intel_watermark_params valleyview_wm_info = {
910 VALLEYVIEW_FIFO_SIZE,
916 static const struct intel_watermark_params valleyview_cursor_wm_info = {
918 VALLEYVIEW_CURSOR_MAX_WM,
923 static const struct intel_watermark_params i965_cursor_wm_info = {
930 static const struct intel_watermark_params i945_wm_info = {
937 static const struct intel_watermark_params i915_wm_info = {
944 static const struct intel_watermark_params i830_wm_info = {
951 static const struct intel_watermark_params i845_wm_info = {
960 * intel_calculate_wm - calculate watermark level
961 * @clock_in_khz: pixel clock
962 * @wm: chip FIFO params
963 * @pixel_size: display pixel size
964 * @latency_ns: memory latency for the platform
966 * Calculate the watermark level (the level at which the display plane will
967 * start fetching from memory again). Each chip has a different display
968 * FIFO size and allocation, so the caller needs to figure that out and pass
969 * in the correct intel_watermark_params structure.
971 * As the pixel clock runs, the FIFO will be drained at a rate that depends
972 * on the pixel size. When it reaches the watermark level, it'll start
973 * fetching FIFO line sized based chunks from memory until the FIFO fills
974 * past the watermark point. If the FIFO drains completely, a FIFO underrun
975 * will occur, and a display engine hang could result.
977 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
978 const struct intel_watermark_params *wm,
981 unsigned long latency_ns)
983 long entries_required, wm_size;
986 * Note: we need to make sure we don't overflow for various clock &
988 * clocks go from a few thousand to several hundred thousand.
989 * latency is usually a few thousand
991 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
993 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
995 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
997 wm_size = fifo_size - (entries_required + wm->guard_size);
999 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1001 /* Don't promote wm_size to unsigned... */
1002 if (wm_size > (long)wm->max_wm)
1003 wm_size = wm->max_wm;
1005 wm_size = wm->default_wm;
1009 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1011 struct drm_crtc *crtc, *enabled = NULL;
1013 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1014 if (intel_crtc_active(crtc)) {
1024 static void pineview_update_wm(struct drm_crtc *unused_crtc)
1026 struct drm_device *dev = unused_crtc->dev;
1027 struct drm_i915_private *dev_priv = dev->dev_private;
1028 struct drm_crtc *crtc;
1029 const struct cxsr_latency *latency;
1033 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1034 dev_priv->fsb_freq, dev_priv->mem_freq);
1036 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1037 pineview_disable_cxsr(dev);
1041 crtc = single_enabled_crtc(dev);
1043 const struct drm_display_mode *adjusted_mode;
1044 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1047 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1048 clock = adjusted_mode->crtc_clock;
1051 wm = intel_calculate_wm(clock, &pineview_display_wm,
1052 pineview_display_wm.fifo_size,
1053 pixel_size, latency->display_sr);
1054 reg = I915_READ(DSPFW1);
1055 reg &= ~DSPFW_SR_MASK;
1056 reg |= wm << DSPFW_SR_SHIFT;
1057 I915_WRITE(DSPFW1, reg);
1058 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1061 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1062 pineview_display_wm.fifo_size,
1063 pixel_size, latency->cursor_sr);
1064 reg = I915_READ(DSPFW3);
1065 reg &= ~DSPFW_CURSOR_SR_MASK;
1066 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1067 I915_WRITE(DSPFW3, reg);
1069 /* Display HPLL off SR */
1070 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1071 pineview_display_hplloff_wm.fifo_size,
1072 pixel_size, latency->display_hpll_disable);
1073 reg = I915_READ(DSPFW3);
1074 reg &= ~DSPFW_HPLL_SR_MASK;
1075 reg |= wm & DSPFW_HPLL_SR_MASK;
1076 I915_WRITE(DSPFW3, reg);
1078 /* cursor HPLL off SR */
1079 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1080 pineview_display_hplloff_wm.fifo_size,
1081 pixel_size, latency->cursor_hpll_disable);
1082 reg = I915_READ(DSPFW3);
1083 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1084 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1085 I915_WRITE(DSPFW3, reg);
1086 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1090 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1091 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1093 pineview_disable_cxsr(dev);
1094 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1098 static bool g4x_compute_wm0(struct drm_device *dev,
1100 const struct intel_watermark_params *display,
1101 int display_latency_ns,
1102 const struct intel_watermark_params *cursor,
1103 int cursor_latency_ns,
1107 struct drm_crtc *crtc;
1108 const struct drm_display_mode *adjusted_mode;
1109 int htotal, hdisplay, clock, pixel_size;
1110 int line_time_us, line_count;
1111 int entries, tlb_miss;
1113 crtc = intel_get_crtc_for_plane(dev, plane);
1114 if (!intel_crtc_active(crtc)) {
1115 *cursor_wm = cursor->guard_size;
1116 *plane_wm = display->guard_size;
1120 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1121 clock = adjusted_mode->crtc_clock;
1122 htotal = adjusted_mode->crtc_htotal;
1123 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1124 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1126 /* Use the small buffer method to calculate plane watermark */
1127 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1128 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1130 entries += tlb_miss;
1131 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1132 *plane_wm = entries + display->guard_size;
1133 if (*plane_wm > (int)display->max_wm)
1134 *plane_wm = display->max_wm;
1136 /* Use the large buffer method to calculate cursor watermark */
1137 line_time_us = max(htotal * 1000 / clock, 1);
1138 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1139 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1140 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1142 entries += tlb_miss;
1143 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1144 *cursor_wm = entries + cursor->guard_size;
1145 if (*cursor_wm > (int)cursor->max_wm)
1146 *cursor_wm = (int)cursor->max_wm;
1152 * Check the wm result.
1154 * If any calculated watermark values is larger than the maximum value that
1155 * can be programmed into the associated watermark register, that watermark
1158 static bool g4x_check_srwm(struct drm_device *dev,
1159 int display_wm, int cursor_wm,
1160 const struct intel_watermark_params *display,
1161 const struct intel_watermark_params *cursor)
1163 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1164 display_wm, cursor_wm);
1166 if (display_wm > display->max_wm) {
1167 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1168 display_wm, display->max_wm);
1172 if (cursor_wm > cursor->max_wm) {
1173 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1174 cursor_wm, cursor->max_wm);
1178 if (!(display_wm || cursor_wm)) {
1179 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1186 static bool g4x_compute_srwm(struct drm_device *dev,
1189 const struct intel_watermark_params *display,
1190 const struct intel_watermark_params *cursor,
1191 int *display_wm, int *cursor_wm)
1193 struct drm_crtc *crtc;
1194 const struct drm_display_mode *adjusted_mode;
1195 int hdisplay, htotal, pixel_size, clock;
1196 unsigned long line_time_us;
1197 int line_count, line_size;
1202 *display_wm = *cursor_wm = 0;
1206 crtc = intel_get_crtc_for_plane(dev, plane);
1207 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1208 clock = adjusted_mode->crtc_clock;
1209 htotal = adjusted_mode->crtc_htotal;
1210 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1211 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1213 line_time_us = max(htotal * 1000 / clock, 1);
1214 line_count = (latency_ns / line_time_us + 1000) / 1000;
1215 line_size = hdisplay * pixel_size;
1217 /* Use the minimum of the small and large buffer method for primary */
1218 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1219 large = line_count * line_size;
1221 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1222 *display_wm = entries + display->guard_size;
1224 /* calculate the self-refresh watermark for display cursor */
1225 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
1226 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1227 *cursor_wm = entries + cursor->guard_size;
1229 return g4x_check_srwm(dev,
1230 *display_wm, *cursor_wm,
1234 static bool vlv_compute_drain_latency(struct drm_device *dev,
1236 int *plane_prec_mult,
1238 int *cursor_prec_mult,
1241 struct drm_crtc *crtc;
1242 int clock, pixel_size;
1245 crtc = intel_get_crtc_for_plane(dev, plane);
1246 if (!intel_crtc_active(crtc))
1249 clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1250 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1252 entries = (clock / 1000) * pixel_size;
1253 *plane_prec_mult = (entries > 256) ?
1254 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1255 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1258 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1259 *cursor_prec_mult = (entries > 256) ?
1260 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1261 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1267 * Update drain latency registers of memory arbiter
1269 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1270 * to be programmed. Each plane has a drain latency multiplier and a drain
1274 static void vlv_update_drain_latency(struct drm_device *dev)
1276 struct drm_i915_private *dev_priv = dev->dev_private;
1277 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1278 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1279 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1282 /* For plane A, Cursor A */
1283 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1284 &cursor_prec_mult, &cursora_dl)) {
1285 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1286 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1287 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1288 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1290 I915_WRITE(VLV_DDL1, cursora_prec |
1291 (cursora_dl << DDL_CURSORA_SHIFT) |
1292 planea_prec | planea_dl);
1295 /* For plane B, Cursor B */
1296 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1297 &cursor_prec_mult, &cursorb_dl)) {
1298 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1299 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1300 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1301 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1303 I915_WRITE(VLV_DDL2, cursorb_prec |
1304 (cursorb_dl << DDL_CURSORB_SHIFT) |
1305 planeb_prec | planeb_dl);
1309 #define single_plane_enabled(mask) is_power_of_2(mask)
1311 static void valleyview_update_wm(struct drm_crtc *crtc)
1313 struct drm_device *dev = crtc->dev;
1314 static const int sr_latency_ns = 12000;
1315 struct drm_i915_private *dev_priv = dev->dev_private;
1316 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1317 int plane_sr, cursor_sr;
1318 int ignore_plane_sr, ignore_cursor_sr;
1319 unsigned int enabled = 0;
1321 vlv_update_drain_latency(dev);
1323 if (g4x_compute_wm0(dev, PIPE_A,
1324 &valleyview_wm_info, latency_ns,
1325 &valleyview_cursor_wm_info, latency_ns,
1326 &planea_wm, &cursora_wm))
1327 enabled |= 1 << PIPE_A;
1329 if (g4x_compute_wm0(dev, PIPE_B,
1330 &valleyview_wm_info, latency_ns,
1331 &valleyview_cursor_wm_info, latency_ns,
1332 &planeb_wm, &cursorb_wm))
1333 enabled |= 1 << PIPE_B;
1335 if (single_plane_enabled(enabled) &&
1336 g4x_compute_srwm(dev, ffs(enabled) - 1,
1338 &valleyview_wm_info,
1339 &valleyview_cursor_wm_info,
1340 &plane_sr, &ignore_cursor_sr) &&
1341 g4x_compute_srwm(dev, ffs(enabled) - 1,
1343 &valleyview_wm_info,
1344 &valleyview_cursor_wm_info,
1345 &ignore_plane_sr, &cursor_sr)) {
1346 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1348 I915_WRITE(FW_BLC_SELF_VLV,
1349 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1350 plane_sr = cursor_sr = 0;
1353 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1354 planea_wm, cursora_wm,
1355 planeb_wm, cursorb_wm,
1356 plane_sr, cursor_sr);
1359 (plane_sr << DSPFW_SR_SHIFT) |
1360 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1361 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1364 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1365 (cursora_wm << DSPFW_CURSORA_SHIFT));
1367 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1368 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1371 static void g4x_update_wm(struct drm_crtc *crtc)
1373 struct drm_device *dev = crtc->dev;
1374 static const int sr_latency_ns = 12000;
1375 struct drm_i915_private *dev_priv = dev->dev_private;
1376 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1377 int plane_sr, cursor_sr;
1378 unsigned int enabled = 0;
1380 if (g4x_compute_wm0(dev, PIPE_A,
1381 &g4x_wm_info, latency_ns,
1382 &g4x_cursor_wm_info, latency_ns,
1383 &planea_wm, &cursora_wm))
1384 enabled |= 1 << PIPE_A;
1386 if (g4x_compute_wm0(dev, PIPE_B,
1387 &g4x_wm_info, latency_ns,
1388 &g4x_cursor_wm_info, latency_ns,
1389 &planeb_wm, &cursorb_wm))
1390 enabled |= 1 << PIPE_B;
1392 if (single_plane_enabled(enabled) &&
1393 g4x_compute_srwm(dev, ffs(enabled) - 1,
1396 &g4x_cursor_wm_info,
1397 &plane_sr, &cursor_sr)) {
1398 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1400 I915_WRITE(FW_BLC_SELF,
1401 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1402 plane_sr = cursor_sr = 0;
1405 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1406 planea_wm, cursora_wm,
1407 planeb_wm, cursorb_wm,
1408 plane_sr, cursor_sr);
1411 (plane_sr << DSPFW_SR_SHIFT) |
1412 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1413 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1416 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1417 (cursora_wm << DSPFW_CURSORA_SHIFT));
1418 /* HPLL off in SR has some issues on G4x... disable it */
1420 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1421 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1424 static void i965_update_wm(struct drm_crtc *unused_crtc)
1426 struct drm_device *dev = unused_crtc->dev;
1427 struct drm_i915_private *dev_priv = dev->dev_private;
1428 struct drm_crtc *crtc;
1432 /* Calc sr entries for one plane configs */
1433 crtc = single_enabled_crtc(dev);
1435 /* self-refresh has much higher latency */
1436 static const int sr_latency_ns = 12000;
1437 const struct drm_display_mode *adjusted_mode =
1438 &to_intel_crtc(crtc)->config.adjusted_mode;
1439 int clock = adjusted_mode->crtc_clock;
1440 int htotal = adjusted_mode->crtc_htotal;
1441 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1442 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1443 unsigned long line_time_us;
1446 line_time_us = max(htotal * 1000 / clock, 1);
1448 /* Use ns/us then divide to preserve precision */
1449 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1450 pixel_size * hdisplay;
1451 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1452 srwm = I965_FIFO_SIZE - entries;
1456 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1459 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1460 pixel_size * to_intel_crtc(crtc)->cursor_width;
1461 entries = DIV_ROUND_UP(entries,
1462 i965_cursor_wm_info.cacheline_size);
1463 cursor_sr = i965_cursor_wm_info.fifo_size -
1464 (entries + i965_cursor_wm_info.guard_size);
1466 if (cursor_sr > i965_cursor_wm_info.max_wm)
1467 cursor_sr = i965_cursor_wm_info.max_wm;
1469 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1470 "cursor %d\n", srwm, cursor_sr);
1472 if (IS_CRESTLINE(dev))
1473 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1475 /* Turn off self refresh if both pipes are enabled */
1476 if (IS_CRESTLINE(dev))
1477 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1481 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1484 /* 965 has limitations... */
1485 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1486 (8 << 16) | (8 << 8) | (8 << 0));
1487 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1488 /* update cursor SR watermark */
1489 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1492 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1494 struct drm_device *dev = unused_crtc->dev;
1495 struct drm_i915_private *dev_priv = dev->dev_private;
1496 const struct intel_watermark_params *wm_info;
1501 int planea_wm, planeb_wm;
1502 struct drm_crtc *crtc, *enabled = NULL;
1505 wm_info = &i945_wm_info;
1506 else if (!IS_GEN2(dev))
1507 wm_info = &i915_wm_info;
1509 wm_info = &i830_wm_info;
1511 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1512 crtc = intel_get_crtc_for_plane(dev, 0);
1513 if (intel_crtc_active(crtc)) {
1514 const struct drm_display_mode *adjusted_mode;
1515 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1519 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1520 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1521 wm_info, fifo_size, cpp,
1525 planea_wm = fifo_size - wm_info->guard_size;
1527 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1528 crtc = intel_get_crtc_for_plane(dev, 1);
1529 if (intel_crtc_active(crtc)) {
1530 const struct drm_display_mode *adjusted_mode;
1531 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1535 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1536 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1537 wm_info, fifo_size, cpp,
1539 if (enabled == NULL)
1544 planeb_wm = fifo_size - wm_info->guard_size;
1546 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1549 * Overlay gets an aggressive default since video jitter is bad.
1553 /* Play safe and disable self-refresh before adjusting watermarks. */
1554 if (IS_I945G(dev) || IS_I945GM(dev))
1555 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1556 else if (IS_I915GM(dev))
1557 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_SELF_EN));
1559 /* Calc sr entries for one plane configs */
1560 if (HAS_FW_BLC(dev) && enabled) {
1561 /* self-refresh has much higher latency */
1562 static const int sr_latency_ns = 6000;
1563 const struct drm_display_mode *adjusted_mode =
1564 &to_intel_crtc(enabled)->config.adjusted_mode;
1565 int clock = adjusted_mode->crtc_clock;
1566 int htotal = adjusted_mode->crtc_htotal;
1567 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1568 int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1569 unsigned long line_time_us;
1572 line_time_us = max(htotal * 1000 / clock, 1);
1574 /* Use ns/us then divide to preserve precision */
1575 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1576 pixel_size * hdisplay;
1577 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1578 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1579 srwm = wm_info->fifo_size - entries;
1583 if (IS_I945G(dev) || IS_I945GM(dev))
1584 I915_WRITE(FW_BLC_SELF,
1585 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1586 else if (IS_I915GM(dev))
1587 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1590 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1591 planea_wm, planeb_wm, cwm, srwm);
1593 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1594 fwater_hi = (cwm & 0x1f);
1596 /* Set request length to 8 cachelines per fetch */
1597 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1598 fwater_hi = fwater_hi | (1 << 8);
1600 I915_WRITE(FW_BLC, fwater_lo);
1601 I915_WRITE(FW_BLC2, fwater_hi);
1603 if (HAS_FW_BLC(dev)) {
1605 if (IS_I945G(dev) || IS_I945GM(dev))
1606 I915_WRITE(FW_BLC_SELF,
1607 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1608 else if (IS_I915GM(dev))
1609 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_SELF_EN));
1610 DRM_DEBUG_KMS("memory self refresh enabled\n");
1612 DRM_DEBUG_KMS("memory self refresh disabled\n");
1616 static void i845_update_wm(struct drm_crtc *unused_crtc)
1618 struct drm_device *dev = unused_crtc->dev;
1619 struct drm_i915_private *dev_priv = dev->dev_private;
1620 struct drm_crtc *crtc;
1621 const struct drm_display_mode *adjusted_mode;
1625 crtc = single_enabled_crtc(dev);
1629 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1630 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1632 dev_priv->display.get_fifo_size(dev, 0),
1634 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1635 fwater_lo |= (3<<8) | planea_wm;
1637 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1639 I915_WRITE(FW_BLC, fwater_lo);
1642 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1643 struct drm_crtc *crtc)
1645 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1646 uint32_t pixel_rate;
1648 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
1650 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1651 * adjust the pixel_rate here. */
1653 if (intel_crtc->config.pch_pfit.enabled) {
1654 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1655 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
1657 pipe_w = intel_crtc->config.pipe_src_w;
1658 pipe_h = intel_crtc->config.pipe_src_h;
1659 pfit_w = (pfit_size >> 16) & 0xFFFF;
1660 pfit_h = pfit_size & 0xFFFF;
1661 if (pipe_w < pfit_w)
1663 if (pipe_h < pfit_h)
1666 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1673 /* latency must be in 0.1us units. */
1674 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1679 if (WARN(latency == 0, "Latency value missing\n"))
1682 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1683 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1688 /* latency must be in 0.1us units. */
1689 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1690 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1695 if (WARN(latency == 0, "Latency value missing\n"))
1698 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1699 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1700 ret = DIV_ROUND_UP(ret, 64) + 2;
1704 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1705 uint8_t bytes_per_pixel)
1707 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1710 struct ilk_pipe_wm_parameters {
1712 uint32_t pipe_htotal;
1713 uint32_t pixel_rate;
1714 struct intel_plane_wm_parameters pri;
1715 struct intel_plane_wm_parameters spr;
1716 struct intel_plane_wm_parameters cur;
1719 struct ilk_wm_maximums {
1726 /* used in computing the new watermarks state */
1727 struct intel_wm_config {
1728 unsigned int num_pipes_active;
1729 bool sprites_enabled;
1730 bool sprites_scaled;
1734 * For both WM_PIPE and WM_LP.
1735 * mem_value must be in 0.1us units.
1737 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
1741 uint32_t method1, method2;
1743 if (!params->active || !params->pri.enabled)
1746 method1 = ilk_wm_method1(params->pixel_rate,
1747 params->pri.bytes_per_pixel,
1753 method2 = ilk_wm_method2(params->pixel_rate,
1754 params->pipe_htotal,
1755 params->pri.horiz_pixels,
1756 params->pri.bytes_per_pixel,
1759 return min(method1, method2);
1763 * For both WM_PIPE and WM_LP.
1764 * mem_value must be in 0.1us units.
1766 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
1769 uint32_t method1, method2;
1771 if (!params->active || !params->spr.enabled)
1774 method1 = ilk_wm_method1(params->pixel_rate,
1775 params->spr.bytes_per_pixel,
1777 method2 = ilk_wm_method2(params->pixel_rate,
1778 params->pipe_htotal,
1779 params->spr.horiz_pixels,
1780 params->spr.bytes_per_pixel,
1782 return min(method1, method2);
1786 * For both WM_PIPE and WM_LP.
1787 * mem_value must be in 0.1us units.
1789 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
1792 if (!params->active || !params->cur.enabled)
1795 return ilk_wm_method2(params->pixel_rate,
1796 params->pipe_htotal,
1797 params->cur.horiz_pixels,
1798 params->cur.bytes_per_pixel,
1802 /* Only for WM_LP. */
1803 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
1806 if (!params->active || !params->pri.enabled)
1809 return ilk_wm_fbc(pri_val,
1810 params->pri.horiz_pixels,
1811 params->pri.bytes_per_pixel);
1814 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
1816 if (INTEL_INFO(dev)->gen >= 8)
1818 else if (INTEL_INFO(dev)->gen >= 7)
1824 /* Calculate the maximum primary/sprite plane watermark */
1825 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
1827 const struct intel_wm_config *config,
1828 enum intel_ddb_partitioning ddb_partitioning,
1831 unsigned int fifo_size = ilk_display_fifo_size(dev);
1834 /* if sprites aren't enabled, sprites get nothing */
1835 if (is_sprite && !config->sprites_enabled)
1838 /* HSW allows LP1+ watermarks even with multiple pipes */
1839 if (level == 0 || config->num_pipes_active > 1) {
1840 fifo_size /= INTEL_INFO(dev)->num_pipes;
1843 * For some reason the non self refresh
1844 * FIFO size is only half of the self
1845 * refresh FIFO size on ILK/SNB.
1847 if (INTEL_INFO(dev)->gen <= 6)
1851 if (config->sprites_enabled) {
1852 /* level 0 is always calculated with 1:1 split */
1853 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
1862 /* clamp to max that the registers can hold */
1863 if (INTEL_INFO(dev)->gen >= 8)
1864 max = level == 0 ? 255 : 2047;
1865 else if (INTEL_INFO(dev)->gen >= 7)
1866 /* IVB/HSW primary/sprite plane watermarks */
1867 max = level == 0 ? 127 : 1023;
1868 else if (!is_sprite)
1869 /* ILK/SNB primary plane watermarks */
1870 max = level == 0 ? 127 : 511;
1872 /* ILK/SNB sprite plane watermarks */
1873 max = level == 0 ? 63 : 255;
1875 return min(fifo_size, max);
1878 /* Calculate the maximum cursor plane watermark */
1879 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
1881 const struct intel_wm_config *config)
1883 /* HSW LP1+ watermarks w/ multiple pipes */
1884 if (level > 0 && config->num_pipes_active > 1)
1887 /* otherwise just report max that registers can hold */
1888 if (INTEL_INFO(dev)->gen >= 7)
1889 return level == 0 ? 63 : 255;
1891 return level == 0 ? 31 : 63;
1894 /* Calculate the maximum FBC watermark */
1895 static unsigned int ilk_fbc_wm_max(const struct drm_device *dev)
1897 /* max that registers can hold */
1898 if (INTEL_INFO(dev)->gen >= 8)
1904 static void ilk_compute_wm_maximums(const struct drm_device *dev,
1906 const struct intel_wm_config *config,
1907 enum intel_ddb_partitioning ddb_partitioning,
1908 struct ilk_wm_maximums *max)
1910 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
1911 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
1912 max->cur = ilk_cursor_wm_max(dev, level, config);
1913 max->fbc = ilk_fbc_wm_max(dev);
1916 static bool ilk_validate_wm_level(int level,
1917 const struct ilk_wm_maximums *max,
1918 struct intel_wm_level *result)
1922 /* already determined to be invalid? */
1923 if (!result->enable)
1926 result->enable = result->pri_val <= max->pri &&
1927 result->spr_val <= max->spr &&
1928 result->cur_val <= max->cur;
1930 ret = result->enable;
1933 * HACK until we can pre-compute everything,
1934 * and thus fail gracefully if LP0 watermarks
1937 if (level == 0 && !result->enable) {
1938 if (result->pri_val > max->pri)
1939 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
1940 level, result->pri_val, max->pri);
1941 if (result->spr_val > max->spr)
1942 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
1943 level, result->spr_val, max->spr);
1944 if (result->cur_val > max->cur)
1945 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
1946 level, result->cur_val, max->cur);
1948 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
1949 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
1950 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
1951 result->enable = true;
1957 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
1959 const struct ilk_pipe_wm_parameters *p,
1960 struct intel_wm_level *result)
1962 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
1963 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
1964 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
1966 /* WM1+ latency values stored in 0.5us units */
1973 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
1974 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
1975 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
1976 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
1977 result->enable = true;
1981 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
1983 struct drm_i915_private *dev_priv = dev->dev_private;
1984 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1985 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
1986 u32 linetime, ips_linetime;
1988 if (!intel_crtc_active(crtc))
1991 /* The WM are computed with base on how long it takes to fill a single
1992 * row at the given clock rate, multiplied by 8.
1994 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
1996 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
1997 intel_ddi_get_cdclk_freq(dev_priv));
1999 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2000 PIPE_WM_LINETIME_TIME(linetime);
2003 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5])
2005 struct drm_i915_private *dev_priv = dev->dev_private;
2007 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2008 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2010 wm[0] = (sskpd >> 56) & 0xFF;
2012 wm[0] = sskpd & 0xF;
2013 wm[1] = (sskpd >> 4) & 0xFF;
2014 wm[2] = (sskpd >> 12) & 0xFF;
2015 wm[3] = (sskpd >> 20) & 0x1FF;
2016 wm[4] = (sskpd >> 32) & 0x1FF;
2017 } else if (INTEL_INFO(dev)->gen >= 6) {
2018 uint32_t sskpd = I915_READ(MCH_SSKPD);
2020 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2021 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2022 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2023 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2024 } else if (INTEL_INFO(dev)->gen >= 5) {
2025 uint32_t mltr = I915_READ(MLTR_ILK);
2027 /* ILK primary LP0 latency is 700 ns */
2029 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2030 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2034 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2036 /* ILK sprite LP0 latency is 1300 ns */
2037 if (INTEL_INFO(dev)->gen == 5)
2041 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2043 /* ILK cursor LP0 latency is 1300 ns */
2044 if (INTEL_INFO(dev)->gen == 5)
2047 /* WaDoubleCursorLP3Latency:ivb */
2048 if (IS_IVYBRIDGE(dev))
2052 static int ilk_wm_max_level(const struct drm_device *dev)
2054 /* how many WM levels are we expecting */
2055 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2057 else if (INTEL_INFO(dev)->gen >= 6)
2063 static void intel_print_wm_latency(struct drm_device *dev,
2065 const uint16_t wm[5])
2067 int level, max_level = ilk_wm_max_level(dev);
2069 for (level = 0; level <= max_level; level++) {
2070 unsigned int latency = wm[level];
2073 DRM_ERROR("%s WM%d latency not provided\n",
2078 /* WM1+ latency values in 0.5us units */
2082 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2083 name, level, wm[level],
2084 latency / 10, latency % 10);
2088 static void ilk_setup_wm_latency(struct drm_device *dev)
2090 struct drm_i915_private *dev_priv = dev->dev_private;
2092 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2094 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2095 sizeof(dev_priv->wm.pri_latency));
2096 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2097 sizeof(dev_priv->wm.pri_latency));
2099 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2100 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2102 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2103 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2104 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2107 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2108 struct ilk_pipe_wm_parameters *p,
2109 struct intel_wm_config *config)
2111 struct drm_device *dev = crtc->dev;
2112 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2113 enum pipe pipe = intel_crtc->pipe;
2114 struct drm_plane *plane;
2116 p->active = intel_crtc_active(crtc);
2118 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2119 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2120 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2121 p->cur.bytes_per_pixel = 4;
2122 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2123 p->cur.horiz_pixels = intel_crtc->cursor_width;
2124 /* TODO: for now, assume primary and cursor planes are always enabled. */
2125 p->pri.enabled = true;
2126 p->cur.enabled = true;
2129 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2130 config->num_pipes_active += intel_crtc_active(crtc);
2132 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2133 struct intel_plane *intel_plane = to_intel_plane(plane);
2135 if (intel_plane->pipe == pipe)
2136 p->spr = intel_plane->wm;
2138 config->sprites_enabled |= intel_plane->wm.enabled;
2139 config->sprites_scaled |= intel_plane->wm.scaled;
2143 /* Compute new watermarks for the pipe */
2144 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2145 const struct ilk_pipe_wm_parameters *params,
2146 struct intel_pipe_wm *pipe_wm)
2148 struct drm_device *dev = crtc->dev;
2149 const struct drm_i915_private *dev_priv = dev->dev_private;
2150 int level, max_level = ilk_wm_max_level(dev);
2151 /* LP0 watermark maximums depend on this pipe alone */
2152 struct intel_wm_config config = {
2153 .num_pipes_active = 1,
2154 .sprites_enabled = params->spr.enabled,
2155 .sprites_scaled = params->spr.scaled,
2157 struct ilk_wm_maximums max;
2159 /* LP0 watermarks always use 1/2 DDB partitioning */
2160 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2162 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2163 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2166 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2167 if (params->spr.scaled)
2170 for (level = 0; level <= max_level; level++)
2171 ilk_compute_wm_level(dev_priv, level, params,
2172 &pipe_wm->wm[level]);
2174 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2175 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2177 /* At least LP0 must be valid */
2178 return ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]);
2182 * Merge the watermarks from all active pipes for a specific level.
2184 static void ilk_merge_wm_level(struct drm_device *dev,
2186 struct intel_wm_level *ret_wm)
2188 const struct intel_crtc *intel_crtc;
2190 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2191 const struct intel_wm_level *wm =
2192 &intel_crtc->wm.active.wm[level];
2197 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2198 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2199 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2200 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2203 ret_wm->enable = true;
2207 * Merge all low power watermarks for all active pipes.
2209 static void ilk_wm_merge(struct drm_device *dev,
2210 const struct intel_wm_config *config,
2211 const struct ilk_wm_maximums *max,
2212 struct intel_pipe_wm *merged)
2214 int level, max_level = ilk_wm_max_level(dev);
2216 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2217 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2218 config->num_pipes_active > 1)
2221 /* ILK: FBC WM must be disabled always */
2222 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2224 /* merge each WM1+ level */
2225 for (level = 1; level <= max_level; level++) {
2226 struct intel_wm_level *wm = &merged->wm[level];
2228 ilk_merge_wm_level(dev, level, wm);
2230 if (!ilk_validate_wm_level(level, max, wm))
2234 * The spec says it is preferred to disable
2235 * FBC WMs instead of disabling a WM level.
2237 if (wm->fbc_val > max->fbc) {
2238 merged->fbc_wm_enabled = false;
2243 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2245 * FIXME this is racy. FBC might get enabled later.
2246 * What we should check here is whether FBC can be
2247 * enabled sometime later.
2249 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2250 for (level = 2; level <= max_level; level++) {
2251 struct intel_wm_level *wm = &merged->wm[level];
2258 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2260 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2261 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2264 /* The value we need to program into the WM_LPx latency field */
2265 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2267 struct drm_i915_private *dev_priv = dev->dev_private;
2269 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2272 return dev_priv->wm.pri_latency[level];
2275 static void ilk_compute_wm_results(struct drm_device *dev,
2276 const struct intel_pipe_wm *merged,
2277 enum intel_ddb_partitioning partitioning,
2278 struct ilk_wm_values *results)
2280 struct intel_crtc *intel_crtc;
2283 results->enable_fbc_wm = merged->fbc_wm_enabled;
2284 results->partitioning = partitioning;
2286 /* LP1+ register values */
2287 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2288 const struct intel_wm_level *r;
2290 level = ilk_wm_lp_to_level(wm_lp, merged);
2292 r = &merged->wm[level];
2296 results->wm_lp[wm_lp - 1] = WM3_LP_EN |
2297 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2298 (r->pri_val << WM1_LP_SR_SHIFT) |
2301 if (INTEL_INFO(dev)->gen >= 8)
2302 results->wm_lp[wm_lp - 1] |=
2303 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2305 results->wm_lp[wm_lp - 1] |=
2306 r->fbc_val << WM1_LP_FBC_SHIFT;
2308 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2309 WARN_ON(wm_lp != 1);
2310 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2312 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2315 /* LP0 register values */
2316 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) {
2317 enum pipe pipe = intel_crtc->pipe;
2318 const struct intel_wm_level *r =
2319 &intel_crtc->wm.active.wm[0];
2321 if (WARN_ON(!r->enable))
2324 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2326 results->wm_pipe[pipe] =
2327 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2328 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2333 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2334 * case both are at the same level. Prefer r1 in case they're the same. */
2335 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2336 struct intel_pipe_wm *r1,
2337 struct intel_pipe_wm *r2)
2339 int level, max_level = ilk_wm_max_level(dev);
2340 int level1 = 0, level2 = 0;
2342 for (level = 1; level <= max_level; level++) {
2343 if (r1->wm[level].enable)
2345 if (r2->wm[level].enable)
2349 if (level1 == level2) {
2350 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2354 } else if (level1 > level2) {
2361 /* dirty bits used to track which watermarks need changes */
2362 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2363 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2364 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2365 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2366 #define WM_DIRTY_FBC (1 << 24)
2367 #define WM_DIRTY_DDB (1 << 25)
2369 static unsigned int ilk_compute_wm_dirty(struct drm_device *dev,
2370 const struct ilk_wm_values *old,
2371 const struct ilk_wm_values *new)
2373 unsigned int dirty = 0;
2377 for_each_pipe(pipe) {
2378 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2379 dirty |= WM_DIRTY_LINETIME(pipe);
2380 /* Must disable LP1+ watermarks too */
2381 dirty |= WM_DIRTY_LP_ALL;
2384 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2385 dirty |= WM_DIRTY_PIPE(pipe);
2386 /* Must disable LP1+ watermarks too */
2387 dirty |= WM_DIRTY_LP_ALL;
2391 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2392 dirty |= WM_DIRTY_FBC;
2393 /* Must disable LP1+ watermarks too */
2394 dirty |= WM_DIRTY_LP_ALL;
2397 if (old->partitioning != new->partitioning) {
2398 dirty |= WM_DIRTY_DDB;
2399 /* Must disable LP1+ watermarks too */
2400 dirty |= WM_DIRTY_LP_ALL;
2403 /* LP1+ watermarks already deemed dirty, no need to continue */
2404 if (dirty & WM_DIRTY_LP_ALL)
2407 /* Find the lowest numbered LP1+ watermark in need of an update... */
2408 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2409 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2410 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2414 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2415 for (; wm_lp <= 3; wm_lp++)
2416 dirty |= WM_DIRTY_LP(wm_lp);
2421 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2424 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2425 bool changed = false;
2427 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2428 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2429 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2432 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2433 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2434 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2437 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2438 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2439 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2444 * Don't touch WM1S_LP_EN here.
2445 * Doing so could cause underruns.
2452 * The spec says we shouldn't write when we don't need, because every write
2453 * causes WMs to be re-evaluated, expending some power.
2455 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2456 struct ilk_wm_values *results)
2458 struct drm_device *dev = dev_priv->dev;
2459 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2463 dirty = ilk_compute_wm_dirty(dev, previous, results);
2467 _ilk_disable_lp_wm(dev_priv, dirty);
2469 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2470 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2471 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2472 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2473 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2474 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2476 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2477 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2478 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2479 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2480 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2481 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2483 if (dirty & WM_DIRTY_DDB) {
2484 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2485 val = I915_READ(WM_MISC);
2486 if (results->partitioning == INTEL_DDB_PART_1_2)
2487 val &= ~WM_MISC_DATA_PARTITION_5_6;
2489 val |= WM_MISC_DATA_PARTITION_5_6;
2490 I915_WRITE(WM_MISC, val);
2492 val = I915_READ(DISP_ARB_CTL2);
2493 if (results->partitioning == INTEL_DDB_PART_1_2)
2494 val &= ~DISP_DATA_PARTITION_5_6;
2496 val |= DISP_DATA_PARTITION_5_6;
2497 I915_WRITE(DISP_ARB_CTL2, val);
2501 if (dirty & WM_DIRTY_FBC) {
2502 val = I915_READ(DISP_ARB_CTL);
2503 if (results->enable_fbc_wm)
2504 val &= ~DISP_FBC_WM_DIS;
2506 val |= DISP_FBC_WM_DIS;
2507 I915_WRITE(DISP_ARB_CTL, val);
2510 if (dirty & WM_DIRTY_LP(1) &&
2511 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2512 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2514 if (INTEL_INFO(dev)->gen >= 7) {
2515 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2516 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2517 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2518 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2521 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2522 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2523 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2524 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2525 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2526 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2528 dev_priv->wm.hw = *results;
2531 static bool ilk_disable_lp_wm(struct drm_device *dev)
2533 struct drm_i915_private *dev_priv = dev->dev_private;
2535 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2538 static void ilk_update_wm(struct drm_crtc *crtc)
2540 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2541 struct drm_device *dev = crtc->dev;
2542 struct drm_i915_private *dev_priv = dev->dev_private;
2543 struct ilk_wm_maximums max;
2544 struct ilk_pipe_wm_parameters params = {};
2545 struct ilk_wm_values results = {};
2546 enum intel_ddb_partitioning partitioning;
2547 struct intel_pipe_wm pipe_wm = {};
2548 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
2549 struct intel_wm_config config = {};
2551 ilk_compute_wm_parameters(crtc, ¶ms, &config);
2553 intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
2555 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
2558 intel_crtc->wm.active = pipe_wm;
2560 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
2561 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
2563 /* 5/6 split only in single pipe config on IVB+ */
2564 if (INTEL_INFO(dev)->gen >= 7 &&
2565 config.num_pipes_active == 1 && config.sprites_enabled) {
2566 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
2567 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
2569 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
2571 best_lp_wm = &lp_wm_1_2;
2574 partitioning = (best_lp_wm == &lp_wm_1_2) ?
2575 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
2577 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
2579 ilk_write_wm_values(dev_priv, &results);
2582 static void ilk_update_sprite_wm(struct drm_plane *plane,
2583 struct drm_crtc *crtc,
2584 uint32_t sprite_width, int pixel_size,
2585 bool enabled, bool scaled)
2587 struct drm_device *dev = plane->dev;
2588 struct intel_plane *intel_plane = to_intel_plane(plane);
2590 intel_plane->wm.enabled = enabled;
2591 intel_plane->wm.scaled = scaled;
2592 intel_plane->wm.horiz_pixels = sprite_width;
2593 intel_plane->wm.bytes_per_pixel = pixel_size;
2596 * IVB workaround: must disable low power watermarks for at least
2597 * one frame before enabling scaling. LP watermarks can be re-enabled
2598 * when scaling is disabled.
2600 * WaCxSRDisabledForSpriteScaling:ivb
2602 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
2603 intel_wait_for_vblank(dev, intel_plane->pipe);
2605 ilk_update_wm(crtc);
2608 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2610 struct drm_device *dev = crtc->dev;
2611 struct drm_i915_private *dev_priv = dev->dev_private;
2612 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2613 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2614 struct intel_pipe_wm *active = &intel_crtc->wm.active;
2615 enum pipe pipe = intel_crtc->pipe;
2616 static const unsigned int wm0_pipe_reg[] = {
2617 [PIPE_A] = WM0_PIPEA_ILK,
2618 [PIPE_B] = WM0_PIPEB_ILK,
2619 [PIPE_C] = WM0_PIPEC_IVB,
2622 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
2623 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2624 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
2626 if (intel_crtc_active(crtc)) {
2627 u32 tmp = hw->wm_pipe[pipe];
2630 * For active pipes LP0 watermark is marked as
2631 * enabled, and LP1+ watermaks as disabled since
2632 * we can't really reverse compute them in case
2633 * multiple pipes are active.
2635 active->wm[0].enable = true;
2636 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
2637 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
2638 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
2639 active->linetime = hw->wm_linetime[pipe];
2641 int level, max_level = ilk_wm_max_level(dev);
2644 * For inactive pipes, all watermark levels
2645 * should be marked as enabled but zeroed,
2646 * which is what we'd compute them to.
2648 for (level = 0; level <= max_level; level++)
2649 active->wm[level].enable = true;
2653 void ilk_wm_get_hw_state(struct drm_device *dev)
2655 struct drm_i915_private *dev_priv = dev->dev_private;
2656 struct ilk_wm_values *hw = &dev_priv->wm.hw;
2657 struct drm_crtc *crtc;
2659 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
2660 ilk_pipe_wm_get_hw_state(crtc);
2662 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
2663 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
2664 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
2666 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
2667 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
2668 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
2670 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2671 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
2672 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2673 else if (IS_IVYBRIDGE(dev))
2674 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
2675 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
2678 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
2682 * intel_update_watermarks - update FIFO watermark values based on current modes
2684 * Calculate watermark values for the various WM regs based on current mode
2685 * and plane configuration.
2687 * There are several cases to deal with here:
2688 * - normal (i.e. non-self-refresh)
2689 * - self-refresh (SR) mode
2690 * - lines are large relative to FIFO size (buffer can hold up to 2)
2691 * - lines are small relative to FIFO size (buffer can hold more than 2
2692 * lines), so need to account for TLB latency
2694 * The normal calculation is:
2695 * watermark = dotclock * bytes per pixel * latency
2696 * where latency is platform & configuration dependent (we assume pessimal
2699 * The SR calculation is:
2700 * watermark = (trunc(latency/line time)+1) * surface width *
2703 * line time = htotal / dotclock
2704 * surface width = hdisplay for normal plane and 64 for cursor
2705 * and latency is assumed to be high, as above.
2707 * The final value programmed to the register should always be rounded up,
2708 * and include an extra 2 entries to account for clock crossings.
2710 * We don't use the sprite, so we can ignore that. And on Crestline we have
2711 * to set the non-SR watermarks to 8.
2713 void intel_update_watermarks(struct drm_crtc *crtc)
2715 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
2717 if (dev_priv->display.update_wm)
2718 dev_priv->display.update_wm(crtc);
2721 void intel_update_sprite_watermarks(struct drm_plane *plane,
2722 struct drm_crtc *crtc,
2723 uint32_t sprite_width, int pixel_size,
2724 bool enabled, bool scaled)
2726 struct drm_i915_private *dev_priv = plane->dev->dev_private;
2728 if (dev_priv->display.update_sprite_wm)
2729 dev_priv->display.update_sprite_wm(plane, crtc, sprite_width,
2730 pixel_size, enabled, scaled);
2733 static struct drm_i915_gem_object *
2734 intel_alloc_context_page(struct drm_device *dev)
2736 struct drm_i915_gem_object *ctx;
2739 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2741 ctx = i915_gem_alloc_object(dev, 4096);
2743 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2747 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
2749 DRM_ERROR("failed to pin power context: %d\n", ret);
2753 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2755 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2762 i915_gem_object_ggtt_unpin(ctx);
2764 drm_gem_object_unreference(&ctx->base);
2769 * Lock protecting IPS related data structures
2771 DEFINE_SPINLOCK(mchdev_lock);
2773 /* Global for IPS driver to get at the current i915 device. Protected by
2775 static struct drm_i915_private *i915_mch_dev;
2777 bool ironlake_set_drps(struct drm_device *dev, u8 val)
2779 struct drm_i915_private *dev_priv = dev->dev_private;
2782 assert_spin_locked(&mchdev_lock);
2784 rgvswctl = I915_READ16(MEMSWCTL);
2785 if (rgvswctl & MEMCTL_CMD_STS) {
2786 DRM_DEBUG("gpu busy, RCS change rejected\n");
2787 return false; /* still busy with another command */
2790 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2791 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2792 I915_WRITE16(MEMSWCTL, rgvswctl);
2793 POSTING_READ16(MEMSWCTL);
2795 rgvswctl |= MEMCTL_CMD_STS;
2796 I915_WRITE16(MEMSWCTL, rgvswctl);
2801 static void ironlake_enable_drps(struct drm_device *dev)
2803 struct drm_i915_private *dev_priv = dev->dev_private;
2804 u32 rgvmodectl = I915_READ(MEMMODECTL);
2805 u8 fmax, fmin, fstart, vstart;
2807 spin_lock_irq(&mchdev_lock);
2809 /* Enable temp reporting */
2810 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2811 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2813 /* 100ms RC evaluation intervals */
2814 I915_WRITE(RCUPEI, 100000);
2815 I915_WRITE(RCDNEI, 100000);
2817 /* Set max/min thresholds to 90ms and 80ms respectively */
2818 I915_WRITE(RCBMAXAVG, 90000);
2819 I915_WRITE(RCBMINAVG, 80000);
2821 I915_WRITE(MEMIHYST, 1);
2823 /* Set up min, max, and cur for interrupt handling */
2824 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2825 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2826 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2827 MEMMODE_FSTART_SHIFT;
2829 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2832 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2833 dev_priv->ips.fstart = fstart;
2835 dev_priv->ips.max_delay = fstart;
2836 dev_priv->ips.min_delay = fmin;
2837 dev_priv->ips.cur_delay = fstart;
2839 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2840 fmax, fmin, fstart);
2842 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2845 * Interrupts will be enabled in ironlake_irq_postinstall
2848 I915_WRITE(VIDSTART, vstart);
2849 POSTING_READ(VIDSTART);
2851 rgvmodectl |= MEMMODE_SWMODE_EN;
2852 I915_WRITE(MEMMODECTL, rgvmodectl);
2854 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2855 DRM_ERROR("stuck trying to change perf mode\n");
2858 ironlake_set_drps(dev, fstart);
2860 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2862 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2863 dev_priv->ips.last_count2 = I915_READ(0x112f4);
2864 getrawmonotonic(&dev_priv->ips.last_time2);
2866 spin_unlock_irq(&mchdev_lock);
2869 static void ironlake_disable_drps(struct drm_device *dev)
2871 struct drm_i915_private *dev_priv = dev->dev_private;
2874 spin_lock_irq(&mchdev_lock);
2876 rgvswctl = I915_READ16(MEMSWCTL);
2878 /* Ack interrupts, disable EFC interrupt */
2879 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2880 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2881 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2882 I915_WRITE(DEIIR, DE_PCU_EVENT);
2883 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2885 /* Go back to the starting frequency */
2886 ironlake_set_drps(dev, dev_priv->ips.fstart);
2888 rgvswctl |= MEMCTL_CMD_STS;
2889 I915_WRITE(MEMSWCTL, rgvswctl);
2892 spin_unlock_irq(&mchdev_lock);
2895 /* There's a funny hw issue where the hw returns all 0 when reading from
2896 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
2897 * ourselves, instead of doing a rmw cycle (which might result in us clearing
2898 * all limits and the gpu stuck at whatever frequency it is at atm).
2900 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
2904 /* Only set the down limit when we've reached the lowest level to avoid
2905 * getting more interrupts, otherwise leave this clear. This prevents a
2906 * race in the hw when coming out of rc6: There's a tiny window where
2907 * the hw runs at the minimal clock before selecting the desired
2908 * frequency, if the down threshold expires in that window we will not
2909 * receive a down interrupt. */
2910 limits = dev_priv->rps.max_freq_softlimit << 24;
2911 if (val <= dev_priv->rps.min_freq_softlimit)
2912 limits |= dev_priv->rps.min_freq_softlimit << 16;
2917 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
2921 new_power = dev_priv->rps.power;
2922 switch (dev_priv->rps.power) {
2924 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
2925 new_power = BETWEEN;
2929 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
2930 new_power = LOW_POWER;
2931 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
2932 new_power = HIGH_POWER;
2936 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
2937 new_power = BETWEEN;
2940 /* Max/min bins are special */
2941 if (val == dev_priv->rps.min_freq_softlimit)
2942 new_power = LOW_POWER;
2943 if (val == dev_priv->rps.max_freq_softlimit)
2944 new_power = HIGH_POWER;
2945 if (new_power == dev_priv->rps.power)
2948 /* Note the units here are not exactly 1us, but 1280ns. */
2949 switch (new_power) {
2951 /* Upclock if more than 95% busy over 16ms */
2952 I915_WRITE(GEN6_RP_UP_EI, 12500);
2953 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
2955 /* Downclock if less than 85% busy over 32ms */
2956 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
2957 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
2959 I915_WRITE(GEN6_RP_CONTROL,
2960 GEN6_RP_MEDIA_TURBO |
2961 GEN6_RP_MEDIA_HW_NORMAL_MODE |
2962 GEN6_RP_MEDIA_IS_GFX |
2964 GEN6_RP_UP_BUSY_AVG |
2965 GEN6_RP_DOWN_IDLE_AVG);
2969 /* Upclock if more than 90% busy over 13ms */
2970 I915_WRITE(GEN6_RP_UP_EI, 10250);
2971 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
2973 /* Downclock if less than 75% busy over 32ms */
2974 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
2975 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
2977 I915_WRITE(GEN6_RP_CONTROL,
2978 GEN6_RP_MEDIA_TURBO |
2979 GEN6_RP_MEDIA_HW_NORMAL_MODE |
2980 GEN6_RP_MEDIA_IS_GFX |
2982 GEN6_RP_UP_BUSY_AVG |
2983 GEN6_RP_DOWN_IDLE_AVG);
2987 /* Upclock if more than 85% busy over 10ms */
2988 I915_WRITE(GEN6_RP_UP_EI, 8000);
2989 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
2991 /* Downclock if less than 60% busy over 32ms */
2992 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
2993 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
2995 I915_WRITE(GEN6_RP_CONTROL,
2996 GEN6_RP_MEDIA_TURBO |
2997 GEN6_RP_MEDIA_HW_NORMAL_MODE |
2998 GEN6_RP_MEDIA_IS_GFX |
3000 GEN6_RP_UP_BUSY_AVG |
3001 GEN6_RP_DOWN_IDLE_AVG);
3005 dev_priv->rps.power = new_power;
3006 dev_priv->rps.last_adj = 0;
3009 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
3013 if (val > dev_priv->rps.min_freq_softlimit)
3014 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
3015 if (val < dev_priv->rps.max_freq_softlimit)
3016 mask |= GEN6_PM_RP_UP_THRESHOLD;
3018 /* IVB and SNB hard hangs on looping batchbuffer
3019 * if GEN6_PM_UP_EI_EXPIRED is masked.
3021 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
3022 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
3027 /* gen6_set_rps is called to update the frequency request, but should also be
3028 * called when the range (min_delay and max_delay) is modified so that we can
3029 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
3030 void gen6_set_rps(struct drm_device *dev, u8 val)
3032 struct drm_i915_private *dev_priv = dev->dev_private;
3034 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3035 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3036 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3038 /* min/max delay may still have been modified so be sure to
3039 * write the limits value.
3041 if (val != dev_priv->rps.cur_freq) {
3042 gen6_set_rps_thresholds(dev_priv, val);
3044 if (IS_HASWELL(dev))
3045 I915_WRITE(GEN6_RPNSWREQ,
3046 HSW_FREQUENCY(val));
3048 I915_WRITE(GEN6_RPNSWREQ,
3049 GEN6_FREQUENCY(val) |
3051 GEN6_AGGRESSIVE_TURBO);
3054 /* Make sure we continue to get interrupts
3055 * until we hit the minimum or maximum frequencies.
3057 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
3058 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3060 POSTING_READ(GEN6_RPNSWREQ);
3062 dev_priv->rps.cur_freq = val;
3063 trace_intel_gpu_freq_change(val * 50);
3066 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
3068 * * If Gfx is Idle, then
3069 * 1. Mask Turbo interrupts
3070 * 2. Bring up Gfx clock
3071 * 3. Change the freq to Rpn and wait till P-Unit updates freq
3072 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
3073 * 5. Unmask Turbo interrupts
3075 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3078 * When we are idle. Drop to min voltage state.
3081 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
3084 /* Mask turbo interrupt so that they will not come in between */
3085 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3087 /* Bring up the Gfx clock */
3088 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3089 I915_READ(VLV_GTLC_SURVIVABILITY_REG) |
3090 VLV_GFX_CLK_FORCE_ON_BIT);
3092 if (wait_for(((VLV_GFX_CLK_STATUS_BIT &
3093 I915_READ(VLV_GTLC_SURVIVABILITY_REG)) != 0), 5)) {
3094 DRM_ERROR("GFX_CLK_ON request timed out\n");
3098 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
3100 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
3101 dev_priv->rps.min_freq_softlimit);
3103 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3104 & GENFREQSTATUS) == 0, 5))
3105 DRM_ERROR("timed out waiting for Punit\n");
3107 /* Release the Gfx clock */
3108 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG,
3109 I915_READ(VLV_GTLC_SURVIVABILITY_REG) &
3110 ~VLV_GFX_CLK_FORCE_ON_BIT);
3112 I915_WRITE(GEN6_PMINTRMSK,
3113 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
3116 void gen6_rps_idle(struct drm_i915_private *dev_priv)
3118 struct drm_device *dev = dev_priv->dev;
3120 mutex_lock(&dev_priv->rps.hw_lock);
3121 if (dev_priv->rps.enabled) {
3122 if (IS_VALLEYVIEW(dev))
3123 vlv_set_rps_idle(dev_priv);
3125 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3126 dev_priv->rps.last_adj = 0;
3128 mutex_unlock(&dev_priv->rps.hw_lock);
3131 void gen6_rps_boost(struct drm_i915_private *dev_priv)
3133 struct drm_device *dev = dev_priv->dev;
3135 mutex_lock(&dev_priv->rps.hw_lock);
3136 if (dev_priv->rps.enabled) {
3137 if (IS_VALLEYVIEW(dev))
3138 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3140 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
3141 dev_priv->rps.last_adj = 0;
3143 mutex_unlock(&dev_priv->rps.hw_lock);
3146 void valleyview_set_rps(struct drm_device *dev, u8 val)
3148 struct drm_i915_private *dev_priv = dev->dev_private;
3150 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3151 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
3152 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
3154 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3155 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3156 dev_priv->rps.cur_freq,
3157 vlv_gpu_freq(dev_priv, val), val);
3159 if (val != dev_priv->rps.cur_freq)
3160 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3162 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3164 dev_priv->rps.cur_freq = val;
3165 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3168 static void gen6_disable_rps_interrupts(struct drm_device *dev)
3170 struct drm_i915_private *dev_priv = dev->dev_private;
3172 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
3173 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3174 ~dev_priv->pm_rps_events);
3175 /* Complete PM interrupt masking here doesn't race with the rps work
3176 * item again unmasking PM interrupts because that is using a different
3177 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3178 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3180 spin_lock_irq(&dev_priv->irq_lock);
3181 dev_priv->rps.pm_iir = 0;
3182 spin_unlock_irq(&dev_priv->irq_lock);
3184 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3187 static void gen6_disable_rps(struct drm_device *dev)
3189 struct drm_i915_private *dev_priv = dev->dev_private;
3191 I915_WRITE(GEN6_RC_CONTROL, 0);
3192 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3194 gen6_disable_rps_interrupts(dev);
3197 static void valleyview_disable_rps(struct drm_device *dev)
3199 struct drm_i915_private *dev_priv = dev->dev_private;
3201 I915_WRITE(GEN6_RC_CONTROL, 0);
3203 gen6_disable_rps_interrupts(dev);
3206 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3208 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
3209 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3210 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
3211 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
3214 int intel_enable_rc6(const struct drm_device *dev)
3216 /* No RC6 before Ironlake */
3217 if (INTEL_INFO(dev)->gen < 5)
3220 /* Respect the kernel parameter if it is set */
3221 if (i915.enable_rc6 >= 0)
3222 return i915.enable_rc6;
3224 /* Disable RC6 on Ironlake */
3225 if (INTEL_INFO(dev)->gen == 5)
3228 if (IS_IVYBRIDGE(dev))
3229 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
3231 return INTEL_RC6_ENABLE;
3234 static void gen6_enable_rps_interrupts(struct drm_device *dev)
3236 struct drm_i915_private *dev_priv = dev->dev_private;
3238 spin_lock_irq(&dev_priv->irq_lock);
3239 WARN_ON(dev_priv->rps.pm_iir);
3240 snb_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3241 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3242 spin_unlock_irq(&dev_priv->irq_lock);
3245 static void gen8_enable_rps(struct drm_device *dev)
3247 struct drm_i915_private *dev_priv = dev->dev_private;
3248 struct intel_ring_buffer *ring;
3249 uint32_t rc6_mask = 0, rp_state_cap;
3252 /* 1a: Software RC state - RC0 */
3253 I915_WRITE(GEN6_RC_STATE, 0);
3255 /* 1c & 1d: Get forcewake during program sequence. Although the driver
3256 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
3257 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3259 /* 2a: Disable RC states. */
3260 I915_WRITE(GEN6_RC_CONTROL, 0);
3262 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3264 /* 2b: Program RC6 thresholds.*/
3265 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
3266 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
3267 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
3268 for_each_ring(ring, dev_priv, unused)
3269 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3270 I915_WRITE(GEN6_RC_SLEEP, 0);
3271 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
3274 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3275 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
3276 intel_print_rc6_info(dev, rc6_mask);
3277 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
3278 GEN6_RC_CTL_EI_MODE(1) |
3281 /* 4 Program defaults and thresholds for RPS*/
3282 I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(10)); /* Request 500 MHz */
3283 I915_WRITE(GEN6_RC_VIDEO_FREQ, HSW_FREQUENCY(12)); /* Request 600 MHz */
3284 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
3285 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
3287 /* Docs recommend 900MHz, and 300 MHz respectively */
3288 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
3289 dev_priv->rps.max_freq_softlimit << 24 |
3290 dev_priv->rps.min_freq_softlimit << 16);
3292 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
3293 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
3294 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
3295 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
3297 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3300 I915_WRITE(GEN6_RP_CONTROL,
3301 GEN6_RP_MEDIA_TURBO |
3302 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3303 GEN6_RP_MEDIA_IS_GFX |
3305 GEN6_RP_UP_BUSY_AVG |
3306 GEN6_RP_DOWN_IDLE_AVG);
3308 /* 6: Ring frequency + overclocking (our driver does this later */
3310 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8);
3312 gen6_enable_rps_interrupts(dev);
3314 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3317 static void gen6_enable_rps(struct drm_device *dev)
3319 struct drm_i915_private *dev_priv = dev->dev_private;
3320 struct intel_ring_buffer *ring;
3323 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3328 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3330 /* Here begins a magic sequence of register writes to enable
3331 * auto-downclocking.
3333 * Perhaps there might be some value in exposing these to
3336 I915_WRITE(GEN6_RC_STATE, 0);
3338 /* Clear the DBG now so we don't confuse earlier errors */
3339 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3340 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
3341 I915_WRITE(GTFIFODBG, gtfifodbg);
3344 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3346 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3347 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
3349 /* All of these values are in units of 50MHz */
3350 dev_priv->rps.cur_freq = 0;
3351 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */
3352 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3353 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
3354 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3355 /* XXX: only BYT has a special efficient freq */
3356 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3357 /* hw_max = RP0 until we check for overclocking */
3358 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3360 /* Preserve min/max settings in case of re-init */
3361 if (dev_priv->rps.max_freq_softlimit == 0)
3362 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3364 if (dev_priv->rps.min_freq_softlimit == 0)
3365 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3367 /* disable the counters and set deterministic thresholds */
3368 I915_WRITE(GEN6_RC_CONTROL, 0);
3370 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
3371 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
3372 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
3373 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3374 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3376 for_each_ring(ring, dev_priv, i)
3377 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3379 I915_WRITE(GEN6_RC_SLEEP, 0);
3380 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
3381 if (IS_IVYBRIDGE(dev))
3382 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
3384 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
3385 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
3386 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
3388 /* Check if we are enabling RC6 */
3389 rc6_mode = intel_enable_rc6(dev_priv->dev);
3390 if (rc6_mode & INTEL_RC6_ENABLE)
3391 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
3393 /* We don't use those on Haswell */
3394 if (!IS_HASWELL(dev)) {
3395 if (rc6_mode & INTEL_RC6p_ENABLE)
3396 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
3398 if (rc6_mode & INTEL_RC6pp_ENABLE)
3399 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
3402 intel_print_rc6_info(dev, rc6_mask);
3404 I915_WRITE(GEN6_RC_CONTROL,
3406 GEN6_RC_CTL_EI_MODE(1) |
3407 GEN6_RC_CTL_HW_ENABLE);
3409 /* Power down if completely idle for over 50ms */
3410 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
3411 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3413 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
3415 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
3417 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
3418 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
3419 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
3420 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
3421 (pcu_mbox & 0xff) * 50);
3422 dev_priv->rps.max_freq = pcu_mbox & 0xff;
3425 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3426 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3428 gen6_enable_rps_interrupts(dev);
3431 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3432 if (IS_GEN6(dev) && ret) {
3433 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
3434 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
3435 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
3436 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
3437 rc6vids &= 0xffff00;
3438 rc6vids |= GEN6_ENCODE_RC6_VID(450);
3439 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
3441 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
3444 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3447 void gen6_update_ring_freq(struct drm_device *dev)
3449 struct drm_i915_private *dev_priv = dev->dev_private;
3451 unsigned int gpu_freq;
3452 unsigned int max_ia_freq, min_ring_freq;
3453 int scaling_factor = 180;
3454 struct cpufreq_policy *policy;
3456 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3458 policy = cpufreq_cpu_get(0);
3460 max_ia_freq = policy->cpuinfo.max_freq;
3461 cpufreq_cpu_put(policy);
3464 * Default to measured freq if none found, PCU will ensure we
3467 max_ia_freq = tsc_khz;
3470 /* Convert from kHz to MHz */
3471 max_ia_freq /= 1000;
3473 min_ring_freq = I915_READ(DCLK) & 0xf;
3474 /* convert DDR frequency from units of 266.6MHz to bandwidth */
3475 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
3478 * For each potential GPU frequency, load a ring frequency we'd like
3479 * to use for memory access. We do this by specifying the IA frequency
3480 * the PCU should use as a reference to determine the ring frequency.
3482 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit;
3484 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq;
3485 unsigned int ia_freq = 0, ring_freq = 0;
3487 if (INTEL_INFO(dev)->gen >= 8) {
3488 /* max(2 * GT, DDR). NB: GT is 50MHz units */
3489 ring_freq = max(min_ring_freq, gpu_freq);
3490 } else if (IS_HASWELL(dev)) {
3491 ring_freq = mult_frac(gpu_freq, 5, 4);
3492 ring_freq = max(min_ring_freq, ring_freq);
3493 /* leave ia_freq as the default, chosen by cpufreq */
3495 /* On older processors, there is no separate ring
3496 * clock domain, so in order to boost the bandwidth
3497 * of the ring, we need to upclock the CPU (ia_freq).
3499 * For GPU frequencies less than 750MHz,
3500 * just use the lowest ring freq.
3502 if (gpu_freq < min_freq)
3505 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
3506 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
3509 sandybridge_pcode_write(dev_priv,
3510 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
3511 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
3512 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
3517 int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
3521 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
3523 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
3525 rp0 = min_t(u32, rp0, 0xea);
3530 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
3534 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
3535 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
3536 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
3537 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
3542 int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
3544 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
3547 /* Check that the pctx buffer wasn't move under us. */
3548 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
3550 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
3552 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
3553 dev_priv->vlv_pctx->stolen->start);
3556 static void valleyview_setup_pctx(struct drm_device *dev)
3558 struct drm_i915_private *dev_priv = dev->dev_private;
3559 struct drm_i915_gem_object *pctx;
3560 unsigned long pctx_paddr;
3562 int pctx_size = 24*1024;
3564 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3566 pcbr = I915_READ(VLV_PCBR);
3568 /* BIOS set it up already, grab the pre-alloc'd space */
3571 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
3572 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
3574 I915_GTT_OFFSET_NONE,
3580 * From the Gunit register HAS:
3581 * The Gfx driver is expected to program this register and ensure
3582 * proper allocation within Gfx stolen memory. For example, this
3583 * register should be programmed such than the PCBR range does not
3584 * overlap with other ranges, such as the frame buffer, protected
3585 * memory, or any other relevant ranges.
3587 pctx = i915_gem_object_create_stolen(dev, pctx_size);
3589 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
3593 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
3594 I915_WRITE(VLV_PCBR, pctx_paddr);
3597 dev_priv->vlv_pctx = pctx;
3600 static void valleyview_cleanup_pctx(struct drm_device *dev)
3602 struct drm_i915_private *dev_priv = dev->dev_private;
3604 if (WARN_ON(!dev_priv->vlv_pctx))
3607 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
3608 dev_priv->vlv_pctx = NULL;
3611 static void valleyview_enable_rps(struct drm_device *dev)
3613 struct drm_i915_private *dev_priv = dev->dev_private;
3614 struct intel_ring_buffer *ring;
3615 u32 gtfifodbg, val, rc6_mode = 0;
3618 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
3620 valleyview_check_pctx(dev_priv);
3622 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
3623 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
3625 I915_WRITE(GTFIFODBG, gtfifodbg);
3628 /* If VLV, Forcewake all wells, else re-direct to regular path */
3629 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3631 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
3632 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
3633 I915_WRITE(GEN6_RP_UP_EI, 66000);
3634 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
3636 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
3638 I915_WRITE(GEN6_RP_CONTROL,
3639 GEN6_RP_MEDIA_TURBO |
3640 GEN6_RP_MEDIA_HW_NORMAL_MODE |
3641 GEN6_RP_MEDIA_IS_GFX |
3643 GEN6_RP_UP_BUSY_AVG |
3644 GEN6_RP_DOWN_IDLE_CONT);
3646 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
3647 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
3648 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
3650 for_each_ring(ring, dev_priv, i)
3651 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
3653 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
3655 /* allows RC6 residency counter to work */
3656 I915_WRITE(VLV_COUNTER_CONTROL,
3657 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
3658 VLV_MEDIA_RC6_COUNT_EN |
3659 VLV_RENDER_RC6_COUNT_EN));
3660 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
3661 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
3663 intel_print_rc6_info(dev, rc6_mode);
3665 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
3667 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
3669 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no");
3670 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
3672 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
3673 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
3674 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3675 dev_priv->rps.cur_freq);
3677 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
3678 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
3679 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
3680 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
3681 dev_priv->rps.max_freq);
3683 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
3684 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
3685 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3686 dev_priv->rps.efficient_freq);
3688 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
3689 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
3690 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
3691 dev_priv->rps.min_freq);
3693 /* Preserve min/max settings in case of re-init */
3694 if (dev_priv->rps.max_freq_softlimit == 0)
3695 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3697 if (dev_priv->rps.min_freq_softlimit == 0)
3698 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
3700 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
3701 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
3702 dev_priv->rps.efficient_freq);
3704 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
3706 gen6_enable_rps_interrupts(dev);
3708 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3711 void ironlake_teardown_rc6(struct drm_device *dev)
3713 struct drm_i915_private *dev_priv = dev->dev_private;
3715 if (dev_priv->ips.renderctx) {
3716 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
3717 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
3718 dev_priv->ips.renderctx = NULL;
3721 if (dev_priv->ips.pwrctx) {
3722 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
3723 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
3724 dev_priv->ips.pwrctx = NULL;
3728 static void ironlake_disable_rc6(struct drm_device *dev)
3730 struct drm_i915_private *dev_priv = dev->dev_private;
3732 if (I915_READ(PWRCTXA)) {
3733 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
3734 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
3735 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
3738 I915_WRITE(PWRCTXA, 0);
3739 POSTING_READ(PWRCTXA);
3741 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3742 POSTING_READ(RSTDBYCTL);
3746 static int ironlake_setup_rc6(struct drm_device *dev)
3748 struct drm_i915_private *dev_priv = dev->dev_private;
3750 if (dev_priv->ips.renderctx == NULL)
3751 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
3752 if (!dev_priv->ips.renderctx)
3755 if (dev_priv->ips.pwrctx == NULL)
3756 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
3757 if (!dev_priv->ips.pwrctx) {
3758 ironlake_teardown_rc6(dev);
3765 static void ironlake_enable_rc6(struct drm_device *dev)
3767 struct drm_i915_private *dev_priv = dev->dev_private;
3768 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
3769 bool was_interruptible;
3772 /* rc6 disabled by default due to repeated reports of hanging during
3775 if (!intel_enable_rc6(dev))
3778 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
3780 ret = ironlake_setup_rc6(dev);
3784 was_interruptible = dev_priv->mm.interruptible;
3785 dev_priv->mm.interruptible = false;
3788 * GPU can automatically power down the render unit if given a page
3791 ret = intel_ring_begin(ring, 6);
3793 ironlake_teardown_rc6(dev);
3794 dev_priv->mm.interruptible = was_interruptible;
3798 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
3799 intel_ring_emit(ring, MI_SET_CONTEXT);
3800 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
3802 MI_SAVE_EXT_STATE_EN |
3803 MI_RESTORE_EXT_STATE_EN |
3804 MI_RESTORE_INHIBIT);
3805 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
3806 intel_ring_emit(ring, MI_NOOP);
3807 intel_ring_emit(ring, MI_FLUSH);
3808 intel_ring_advance(ring);
3811 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
3812 * does an implicit flush, combined with MI_FLUSH above, it should be
3813 * safe to assume that renderctx is valid
3815 ret = intel_ring_idle(ring);
3816 dev_priv->mm.interruptible = was_interruptible;
3818 DRM_ERROR("failed to enable ironlake power savings\n");
3819 ironlake_teardown_rc6(dev);
3823 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
3824 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
3826 intel_print_rc6_info(dev, INTEL_RC6_ENABLE);
3829 static unsigned long intel_pxfreq(u32 vidfreq)
3832 int div = (vidfreq & 0x3f0000) >> 16;
3833 int post = (vidfreq & 0x3000) >> 12;
3834 int pre = (vidfreq & 0x7);
3839 freq = ((div * 133333) / ((1<<post) * pre));
3844 static const struct cparams {
3850 { 1, 1333, 301, 28664 },
3851 { 1, 1066, 294, 24460 },
3852 { 1, 800, 294, 25192 },
3853 { 0, 1333, 276, 27605 },
3854 { 0, 1066, 276, 27605 },
3855 { 0, 800, 231, 23784 },
3858 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
3860 u64 total_count, diff, ret;
3861 u32 count1, count2, count3, m = 0, c = 0;
3862 unsigned long now = jiffies_to_msecs(jiffies), diff1;
3865 assert_spin_locked(&mchdev_lock);
3867 diff1 = now - dev_priv->ips.last_time1;
3869 /* Prevent division-by-zero if we are asking too fast.
3870 * Also, we don't get interesting results if we are polling
3871 * faster than once in 10ms, so just return the saved value
3875 return dev_priv->ips.chipset_power;
3877 count1 = I915_READ(DMIEC);
3878 count2 = I915_READ(DDREC);
3879 count3 = I915_READ(CSIEC);
3881 total_count = count1 + count2 + count3;
3883 /* FIXME: handle per-counter overflow */
3884 if (total_count < dev_priv->ips.last_count1) {
3885 diff = ~0UL - dev_priv->ips.last_count1;
3886 diff += total_count;
3888 diff = total_count - dev_priv->ips.last_count1;
3891 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
3892 if (cparams[i].i == dev_priv->ips.c_m &&
3893 cparams[i].t == dev_priv->ips.r_t) {
3900 diff = div_u64(diff, diff1);
3901 ret = ((m * diff) + c);
3902 ret = div_u64(ret, 10);
3904 dev_priv->ips.last_count1 = total_count;
3905 dev_priv->ips.last_time1 = now;
3907 dev_priv->ips.chipset_power = ret;
3912 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
3914 struct drm_device *dev = dev_priv->dev;
3917 if (INTEL_INFO(dev)->gen != 5)
3920 spin_lock_irq(&mchdev_lock);
3922 val = __i915_chipset_val(dev_priv);
3924 spin_unlock_irq(&mchdev_lock);
3929 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
3931 unsigned long m, x, b;
3934 tsfs = I915_READ(TSFS);
3936 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
3937 x = I915_READ8(TR1);
3939 b = tsfs & TSFS_INTR_MASK;
3941 return ((m * x) / 127) - b;
3944 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
3946 struct drm_device *dev = dev_priv->dev;
3947 static const struct v_table {
3948 u16 vd; /* in .1 mil */
3949 u16 vm; /* in .1 mil */
4080 if (INTEL_INFO(dev)->is_mobile)
4081 return v_table[pxvid].vm;
4083 return v_table[pxvid].vd;
4086 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
4088 struct timespec now, diff1;
4090 unsigned long diffms;
4093 assert_spin_locked(&mchdev_lock);
4095 getrawmonotonic(&now);
4096 diff1 = timespec_sub(now, dev_priv->ips.last_time2);
4098 /* Don't divide by 0 */
4099 diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
4103 count = I915_READ(GFXEC);
4105 if (count < dev_priv->ips.last_count2) {
4106 diff = ~0UL - dev_priv->ips.last_count2;
4109 diff = count - dev_priv->ips.last_count2;
4112 dev_priv->ips.last_count2 = count;
4113 dev_priv->ips.last_time2 = now;
4115 /* More magic constants... */
4117 diff = div_u64(diff, diffms * 10);
4118 dev_priv->ips.gfx_power = diff;
4121 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
4123 struct drm_device *dev = dev_priv->dev;
4125 if (INTEL_INFO(dev)->gen != 5)
4128 spin_lock_irq(&mchdev_lock);
4130 __i915_update_gfx_val(dev_priv);
4132 spin_unlock_irq(&mchdev_lock);
4135 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
4137 unsigned long t, corr, state1, corr2, state2;
4140 assert_spin_locked(&mchdev_lock);
4142 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
4143 pxvid = (pxvid >> 24) & 0x7f;
4144 ext_v = pvid_to_extvid(dev_priv, pxvid);
4148 t = i915_mch_val(dev_priv);
4150 /* Revel in the empirically derived constants */
4152 /* Correction factor in 1/100000 units */
4154 corr = ((t * 2349) + 135940);
4156 corr = ((t * 964) + 29317);
4158 corr = ((t * 301) + 1004);
4160 corr = corr * ((150142 * state1) / 10000 - 78642);
4162 corr2 = (corr * dev_priv->ips.corr);
4164 state2 = (corr2 * state1) / 10000;
4165 state2 /= 100; /* convert to mW */
4167 __i915_update_gfx_val(dev_priv);
4169 return dev_priv->ips.gfx_power + state2;
4172 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
4174 struct drm_device *dev = dev_priv->dev;
4177 if (INTEL_INFO(dev)->gen != 5)
4180 spin_lock_irq(&mchdev_lock);
4182 val = __i915_gfx_val(dev_priv);
4184 spin_unlock_irq(&mchdev_lock);
4190 * i915_read_mch_val - return value for IPS use
4192 * Calculate and return a value for the IPS driver to use when deciding whether
4193 * we have thermal and power headroom to increase CPU or GPU power budget.
4195 unsigned long i915_read_mch_val(void)
4197 struct drm_i915_private *dev_priv;
4198 unsigned long chipset_val, graphics_val, ret = 0;
4200 spin_lock_irq(&mchdev_lock);
4203 dev_priv = i915_mch_dev;
4205 chipset_val = __i915_chipset_val(dev_priv);
4206 graphics_val = __i915_gfx_val(dev_priv);
4208 ret = chipset_val + graphics_val;
4211 spin_unlock_irq(&mchdev_lock);
4215 EXPORT_SYMBOL_GPL(i915_read_mch_val);
4218 * i915_gpu_raise - raise GPU frequency limit
4220 * Raise the limit; IPS indicates we have thermal headroom.
4222 bool i915_gpu_raise(void)
4224 struct drm_i915_private *dev_priv;
4227 spin_lock_irq(&mchdev_lock);
4228 if (!i915_mch_dev) {
4232 dev_priv = i915_mch_dev;
4234 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
4235 dev_priv->ips.max_delay--;
4238 spin_unlock_irq(&mchdev_lock);
4242 EXPORT_SYMBOL_GPL(i915_gpu_raise);
4245 * i915_gpu_lower - lower GPU frequency limit
4247 * IPS indicates we're close to a thermal limit, so throttle back the GPU
4248 * frequency maximum.
4250 bool i915_gpu_lower(void)
4252 struct drm_i915_private *dev_priv;
4255 spin_lock_irq(&mchdev_lock);
4256 if (!i915_mch_dev) {
4260 dev_priv = i915_mch_dev;
4262 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
4263 dev_priv->ips.max_delay++;
4266 spin_unlock_irq(&mchdev_lock);
4270 EXPORT_SYMBOL_GPL(i915_gpu_lower);
4273 * i915_gpu_busy - indicate GPU business to IPS
4275 * Tell the IPS driver whether or not the GPU is busy.
4277 bool i915_gpu_busy(void)
4279 struct drm_i915_private *dev_priv;
4280 struct intel_ring_buffer *ring;
4284 spin_lock_irq(&mchdev_lock);
4287 dev_priv = i915_mch_dev;
4289 for_each_ring(ring, dev_priv, i)
4290 ret |= !list_empty(&ring->request_list);
4293 spin_unlock_irq(&mchdev_lock);
4297 EXPORT_SYMBOL_GPL(i915_gpu_busy);
4300 * i915_gpu_turbo_disable - disable graphics turbo
4302 * Disable graphics turbo by resetting the max frequency and setting the
4303 * current frequency to the default.
4305 bool i915_gpu_turbo_disable(void)
4307 struct drm_i915_private *dev_priv;
4310 spin_lock_irq(&mchdev_lock);
4311 if (!i915_mch_dev) {
4315 dev_priv = i915_mch_dev;
4317 dev_priv->ips.max_delay = dev_priv->ips.fstart;
4319 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
4323 spin_unlock_irq(&mchdev_lock);
4327 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
4330 * Tells the intel_ips driver that the i915 driver is now loaded, if
4331 * IPS got loaded first.
4333 * This awkward dance is so that neither module has to depend on the
4334 * other in order for IPS to do the appropriate communication of
4335 * GPU turbo limits to i915.
4338 ips_ping_for_i915_load(void)
4342 link = symbol_get(ips_link_to_i915_driver);
4345 symbol_put(ips_link_to_i915_driver);
4349 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
4351 /* We only register the i915 ips part with intel-ips once everything is
4352 * set up, to avoid intel-ips sneaking in and reading bogus values. */
4353 spin_lock_irq(&mchdev_lock);
4354 i915_mch_dev = dev_priv;
4355 spin_unlock_irq(&mchdev_lock);
4357 ips_ping_for_i915_load();
4360 void intel_gpu_ips_teardown(void)
4362 spin_lock_irq(&mchdev_lock);
4363 i915_mch_dev = NULL;
4364 spin_unlock_irq(&mchdev_lock);
4367 static void intel_init_emon(struct drm_device *dev)
4369 struct drm_i915_private *dev_priv = dev->dev_private;
4374 /* Disable to program */
4378 /* Program energy weights for various events */
4379 I915_WRITE(SDEW, 0x15040d00);
4380 I915_WRITE(CSIEW0, 0x007f0000);
4381 I915_WRITE(CSIEW1, 0x1e220004);
4382 I915_WRITE(CSIEW2, 0x04000004);
4384 for (i = 0; i < 5; i++)
4385 I915_WRITE(PEW + (i * 4), 0);
4386 for (i = 0; i < 3; i++)
4387 I915_WRITE(DEW + (i * 4), 0);
4389 /* Program P-state weights to account for frequency power adjustment */
4390 for (i = 0; i < 16; i++) {
4391 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
4392 unsigned long freq = intel_pxfreq(pxvidfreq);
4393 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
4398 val *= (freq / 1000);
4400 val /= (127*127*900);
4402 DRM_ERROR("bad pxval: %ld\n", val);
4405 /* Render standby states get 0 weight */
4409 for (i = 0; i < 4; i++) {
4410 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
4411 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
4412 I915_WRITE(PXW + (i * 4), val);
4415 /* Adjust magic regs to magic values (more experimental results) */
4416 I915_WRITE(OGW0, 0);
4417 I915_WRITE(OGW1, 0);
4418 I915_WRITE(EG0, 0x00007f00);
4419 I915_WRITE(EG1, 0x0000000e);
4420 I915_WRITE(EG2, 0x000e0000);
4421 I915_WRITE(EG3, 0x68000300);
4422 I915_WRITE(EG4, 0x42000000);
4423 I915_WRITE(EG5, 0x00140031);
4427 for (i = 0; i < 8; i++)
4428 I915_WRITE(PXWL + (i * 4), 0);
4430 /* Enable PMON + select events */
4431 I915_WRITE(ECR, 0x80000019);
4433 lcfuse = I915_READ(LCFUSE02);
4435 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
4438 void intel_init_gt_powersave(struct drm_device *dev)
4440 if (IS_VALLEYVIEW(dev))
4441 valleyview_setup_pctx(dev);
4444 void intel_cleanup_gt_powersave(struct drm_device *dev)
4446 if (IS_VALLEYVIEW(dev))
4447 valleyview_cleanup_pctx(dev);
4450 void intel_disable_gt_powersave(struct drm_device *dev)
4452 struct drm_i915_private *dev_priv = dev->dev_private;
4454 /* Interrupts should be disabled already to avoid re-arming. */
4455 WARN_ON(dev->irq_enabled);
4457 if (IS_IRONLAKE_M(dev)) {
4458 ironlake_disable_drps(dev);
4459 ironlake_disable_rc6(dev);
4460 } else if (INTEL_INFO(dev)->gen >= 6) {
4461 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
4462 cancel_work_sync(&dev_priv->rps.work);
4463 mutex_lock(&dev_priv->rps.hw_lock);
4464 if (IS_VALLEYVIEW(dev))
4465 valleyview_disable_rps(dev);
4467 gen6_disable_rps(dev);
4468 dev_priv->rps.enabled = false;
4469 mutex_unlock(&dev_priv->rps.hw_lock);
4473 static void intel_gen6_powersave_work(struct work_struct *work)
4475 struct drm_i915_private *dev_priv =
4476 container_of(work, struct drm_i915_private,
4477 rps.delayed_resume_work.work);
4478 struct drm_device *dev = dev_priv->dev;
4480 mutex_lock(&dev_priv->rps.hw_lock);
4482 if (IS_VALLEYVIEW(dev)) {
4483 valleyview_enable_rps(dev);
4484 } else if (IS_BROADWELL(dev)) {
4485 gen8_enable_rps(dev);
4486 gen6_update_ring_freq(dev);
4488 gen6_enable_rps(dev);
4489 gen6_update_ring_freq(dev);
4491 dev_priv->rps.enabled = true;
4492 mutex_unlock(&dev_priv->rps.hw_lock);
4495 void intel_enable_gt_powersave(struct drm_device *dev)
4497 struct drm_i915_private *dev_priv = dev->dev_private;
4499 if (IS_IRONLAKE_M(dev)) {
4500 ironlake_enable_drps(dev);
4501 ironlake_enable_rc6(dev);
4502 intel_init_emon(dev);
4503 } else if (IS_GEN6(dev) || IS_GEN7(dev)) {
4505 * PCU communication is slow and this doesn't need to be
4506 * done at any specific time, so do this out of our fast path
4507 * to make resume and init faster.
4509 schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
4510 round_jiffies_up_relative(HZ));
4514 static void ibx_init_clock_gating(struct drm_device *dev)
4516 struct drm_i915_private *dev_priv = dev->dev_private;
4519 * On Ibex Peak and Cougar Point, we need to disable clock
4520 * gating for the panel power sequencer or it will fail to
4521 * start up when no ports are active.
4523 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
4526 static void g4x_disable_trickle_feed(struct drm_device *dev)
4528 struct drm_i915_private *dev_priv = dev->dev_private;
4531 for_each_pipe(pipe) {
4532 I915_WRITE(DSPCNTR(pipe),
4533 I915_READ(DSPCNTR(pipe)) |
4534 DISPPLANE_TRICKLE_FEED_DISABLE);
4535 intel_flush_primary_plane(dev_priv, pipe);
4539 static void ilk_init_lp_watermarks(struct drm_device *dev)
4541 struct drm_i915_private *dev_priv = dev->dev_private;
4543 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
4544 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
4545 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
4548 * Don't touch WM1S_LP_EN here.
4549 * Doing so could cause underruns.
4553 static void ironlake_init_clock_gating(struct drm_device *dev)
4555 struct drm_i915_private *dev_priv = dev->dev_private;
4556 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4560 * WaFbcDisableDpfcClockGating:ilk
4562 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
4563 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
4564 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
4566 I915_WRITE(PCH_3DCGDIS0,
4567 MARIUNIT_CLOCK_GATE_DISABLE |
4568 SVSMUNIT_CLOCK_GATE_DISABLE);
4569 I915_WRITE(PCH_3DCGDIS1,
4570 VFMUNIT_CLOCK_GATE_DISABLE);
4573 * According to the spec the following bits should be set in
4574 * order to enable memory self-refresh
4575 * The bit 22/21 of 0x42004
4576 * The bit 5 of 0x42020
4577 * The bit 15 of 0x45000
4579 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4580 (I915_READ(ILK_DISPLAY_CHICKEN2) |
4581 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
4582 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
4583 I915_WRITE(DISP_ARB_CTL,
4584 (I915_READ(DISP_ARB_CTL) |
4587 ilk_init_lp_watermarks(dev);
4590 * Based on the document from hardware guys the following bits
4591 * should be set unconditionally in order to enable FBC.
4592 * The bit 22 of 0x42000
4593 * The bit 22 of 0x42004
4594 * The bit 7,8,9 of 0x42020.
4596 if (IS_IRONLAKE_M(dev)) {
4597 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
4598 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4599 I915_READ(ILK_DISPLAY_CHICKEN1) |
4601 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4602 I915_READ(ILK_DISPLAY_CHICKEN2) |
4606 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4608 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4609 I915_READ(ILK_DISPLAY_CHICKEN2) |
4610 ILK_ELPIN_409_SELECT);
4611 I915_WRITE(_3D_CHICKEN2,
4612 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
4613 _3D_CHICKEN2_WM_READ_PIPELINED);
4615 /* WaDisableRenderCachePipelinedFlush:ilk */
4616 I915_WRITE(CACHE_MODE_0,
4617 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
4619 g4x_disable_trickle_feed(dev);
4621 ibx_init_clock_gating(dev);
4624 static void cpt_init_clock_gating(struct drm_device *dev)
4626 struct drm_i915_private *dev_priv = dev->dev_private;
4631 * On Ibex Peak and Cougar Point, we need to disable clock
4632 * gating for the panel power sequencer or it will fail to
4633 * start up when no ports are active.
4635 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
4636 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
4637 PCH_CPUNIT_CLOCK_GATE_DISABLE);
4638 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
4639 DPLS_EDP_PPS_FIX_DIS);
4640 /* The below fixes the weird display corruption, a few pixels shifted
4641 * downward, on (only) LVDS of some HP laptops with IVY.
4643 for_each_pipe(pipe) {
4644 val = I915_READ(TRANS_CHICKEN2(pipe));
4645 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
4646 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4647 if (dev_priv->vbt.fdi_rx_polarity_inverted)
4648 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
4649 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
4650 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
4651 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
4652 I915_WRITE(TRANS_CHICKEN2(pipe), val);
4654 /* WADP0ClockGatingDisable */
4655 for_each_pipe(pipe) {
4656 I915_WRITE(TRANS_CHICKEN1(pipe),
4657 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4661 static void gen6_check_mch_setup(struct drm_device *dev)
4663 struct drm_i915_private *dev_priv = dev->dev_private;
4666 tmp = I915_READ(MCH_SSKPD);
4667 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) {
4668 DRM_INFO("Wrong MCH_SSKPD value: 0x%08x\n", tmp);
4669 DRM_INFO("This can cause pipe underruns and display issues.\n");
4670 DRM_INFO("Please upgrade your BIOS to fix this.\n");
4674 static void gen6_init_clock_gating(struct drm_device *dev)
4676 struct drm_i915_private *dev_priv = dev->dev_private;
4677 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
4679 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
4681 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4682 I915_READ(ILK_DISPLAY_CHICKEN2) |
4683 ILK_ELPIN_409_SELECT);
4685 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
4686 I915_WRITE(_3D_CHICKEN,
4687 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
4689 /* WaSetupGtModeTdRowDispatch:snb */
4690 if (IS_SNB_GT1(dev))
4691 I915_WRITE(GEN6_GT_MODE,
4692 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
4695 * BSpec recoomends 8x4 when MSAA is used,
4696 * however in practice 16x4 seems fastest.
4698 * Note that PS/WM thread counts depend on the WIZ hashing
4699 * disable bit, which we don't touch here, but it's good
4700 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
4702 I915_WRITE(GEN6_GT_MODE,
4703 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
4705 ilk_init_lp_watermarks(dev);
4707 I915_WRITE(CACHE_MODE_0,
4708 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
4710 I915_WRITE(GEN6_UCGCTL1,
4711 I915_READ(GEN6_UCGCTL1) |
4712 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
4713 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
4715 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
4716 * gating disable must be set. Failure to set it results in
4717 * flickering pixels due to Z write ordering failures after
4718 * some amount of runtime in the Mesa "fire" demo, and Unigine
4719 * Sanctuary and Tropics, and apparently anything else with
4720 * alpha test or pixel discard.
4722 * According to the spec, bit 11 (RCCUNIT) must also be set,
4723 * but we didn't debug actual testcases to find it out.
4725 * WaDisableRCCUnitClockGating:snb
4726 * WaDisableRCPBUnitClockGating:snb
4728 I915_WRITE(GEN6_UCGCTL2,
4729 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
4730 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
4732 /* WaStripsFansDisableFastClipPerformanceFix:snb */
4733 I915_WRITE(_3D_CHICKEN3,
4734 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
4738 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
4739 * 3DSTATE_SF number of SF output attributes is more than 16."
4741 I915_WRITE(_3D_CHICKEN3,
4742 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
4745 * According to the spec the following bits should be
4746 * set in order to enable memory self-refresh and fbc:
4747 * The bit21 and bit22 of 0x42000
4748 * The bit21 and bit22 of 0x42004
4749 * The bit5 and bit7 of 0x42020
4750 * The bit14 of 0x70180
4751 * The bit14 of 0x71180
4753 * WaFbcAsynchFlipDisableFbcQueue:snb
4755 I915_WRITE(ILK_DISPLAY_CHICKEN1,
4756 I915_READ(ILK_DISPLAY_CHICKEN1) |
4757 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
4758 I915_WRITE(ILK_DISPLAY_CHICKEN2,
4759 I915_READ(ILK_DISPLAY_CHICKEN2) |
4760 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
4761 I915_WRITE(ILK_DSPCLK_GATE_D,
4762 I915_READ(ILK_DSPCLK_GATE_D) |
4763 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
4764 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
4766 g4x_disable_trickle_feed(dev);
4768 cpt_init_clock_gating(dev);
4770 gen6_check_mch_setup(dev);
4773 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
4775 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
4778 * WaVSThreadDispatchOverride:ivb,vlv
4780 * This actually overrides the dispatch
4781 * mode for all thread types.
4783 reg &= ~GEN7_FF_SCHED_MASK;
4784 reg |= GEN7_FF_TS_SCHED_HW;
4785 reg |= GEN7_FF_VS_SCHED_HW;
4786 reg |= GEN7_FF_DS_SCHED_HW;
4788 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
4791 static void lpt_init_clock_gating(struct drm_device *dev)
4793 struct drm_i915_private *dev_priv = dev->dev_private;
4796 * TODO: this bit should only be enabled when really needed, then
4797 * disabled when not needed anymore in order to save power.
4799 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
4800 I915_WRITE(SOUTH_DSPCLK_GATE_D,
4801 I915_READ(SOUTH_DSPCLK_GATE_D) |
4802 PCH_LP_PARTITION_LEVEL_DISABLE);
4804 /* WADPOClockGatingDisable:hsw */
4805 I915_WRITE(_TRANSA_CHICKEN1,
4806 I915_READ(_TRANSA_CHICKEN1) |
4807 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
4810 static void lpt_suspend_hw(struct drm_device *dev)
4812 struct drm_i915_private *dev_priv = dev->dev_private;
4814 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
4815 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
4817 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
4818 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
4822 static void gen8_init_clock_gating(struct drm_device *dev)
4824 struct drm_i915_private *dev_priv = dev->dev_private;
4827 I915_WRITE(WM3_LP_ILK, 0);
4828 I915_WRITE(WM2_LP_ILK, 0);
4829 I915_WRITE(WM1_LP_ILK, 0);
4831 /* FIXME(BDW): Check all the w/a, some might only apply to
4832 * pre-production hw. */
4834 /* WaDisablePartialInstShootdown:bdw */
4835 I915_WRITE(GEN8_ROW_CHICKEN,
4836 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE));
4838 /* WaDisableThreadStallDopClockGating:bdw */
4839 /* FIXME: Unclear whether we really need this on production bdw. */
4840 I915_WRITE(GEN8_ROW_CHICKEN,
4841 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
4844 * This GEN8_CENTROID_PIXEL_OPT_DIS W/A is only needed for
4845 * pre-production hardware
4847 I915_WRITE(HALF_SLICE_CHICKEN3,
4848 _MASKED_BIT_ENABLE(GEN8_CENTROID_PIXEL_OPT_DIS));
4849 I915_WRITE(HALF_SLICE_CHICKEN3,
4850 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
4851 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
4853 I915_WRITE(_3D_CHICKEN3,
4854 _3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2));
4856 I915_WRITE(COMMON_SLICE_CHICKEN2,
4857 _MASKED_BIT_ENABLE(GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE));
4859 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4860 _MASKED_BIT_ENABLE(GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE));
4862 /* WaSwitchSolVfFArbitrationPriority:bdw */
4863 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4865 /* WaPsrDPAMaskVBlankInSRD:bdw */
4866 I915_WRITE(CHICKEN_PAR1_1,
4867 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
4869 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
4870 for_each_pipe(pipe) {
4871 I915_WRITE(CHICKEN_PIPESL_1(pipe),
4872 I915_READ(CHICKEN_PIPESL_1(pipe)) |
4873 BDW_DPRS_MASK_VBLANK_SRD);
4876 /* Use Force Non-Coherent whenever executing a 3D context. This is a
4877 * workaround for for a possible hang in the unlikely event a TLB
4878 * invalidation occurs during a PSD flush.
4880 I915_WRITE(HDC_CHICKEN0,
4881 I915_READ(HDC_CHICKEN0) |
4882 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT));
4884 /* WaVSRefCountFullforceMissDisable:bdw */
4885 /* WaDSRefCountFullforceMissDisable:bdw */
4886 I915_WRITE(GEN7_FF_THREAD_MODE,
4887 I915_READ(GEN7_FF_THREAD_MODE) &
4888 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
4891 * BSpec recommends 8x4 when MSAA is used,
4892 * however in practice 16x4 seems fastest.
4894 * Note that PS/WM thread counts depend on the WIZ hashing
4895 * disable bit, which we don't touch here, but it's good
4896 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
4898 I915_WRITE(GEN7_GT_MODE,
4899 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
4901 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
4902 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
4904 /* WaDisableSDEUnitClockGating:bdw */
4905 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
4906 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
4908 /* Wa4x4STCOptimizationDisable:bdw */
4909 I915_WRITE(CACHE_MODE_1,
4910 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
4913 static void haswell_init_clock_gating(struct drm_device *dev)
4915 struct drm_i915_private *dev_priv = dev->dev_private;
4917 ilk_init_lp_watermarks(dev);
4919 /* L3 caching of data atomics doesn't work -- disable it. */
4920 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
4921 I915_WRITE(HSW_ROW_CHICKEN3,
4922 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
4924 /* This is required by WaCatErrorRejectionIssue:hsw */
4925 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
4926 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
4927 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
4929 /* WaVSRefCountFullforceMissDisable:hsw */
4930 I915_WRITE(GEN7_FF_THREAD_MODE,
4931 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
4933 /* enable HiZ Raw Stall Optimization */
4934 I915_WRITE(CACHE_MODE_0_GEN7,
4935 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
4937 /* WaDisable4x2SubspanOptimization:hsw */
4938 I915_WRITE(CACHE_MODE_1,
4939 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
4942 * BSpec recommends 8x4 when MSAA is used,
4943 * however in practice 16x4 seems fastest.
4945 * Note that PS/WM thread counts depend on the WIZ hashing
4946 * disable bit, which we don't touch here, but it's good
4947 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
4949 I915_WRITE(GEN7_GT_MODE,
4950 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
4952 /* WaSwitchSolVfFArbitrationPriority:hsw */
4953 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
4955 /* WaRsPkgCStateDisplayPMReq:hsw */
4956 I915_WRITE(CHICKEN_PAR1_1,
4957 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
4959 lpt_init_clock_gating(dev);
4962 static void ivybridge_init_clock_gating(struct drm_device *dev)
4964 struct drm_i915_private *dev_priv = dev->dev_private;
4967 ilk_init_lp_watermarks(dev);
4969 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
4971 /* WaDisableEarlyCull:ivb */
4972 I915_WRITE(_3D_CHICKEN3,
4973 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
4975 /* WaDisableBackToBackFlipFix:ivb */
4976 I915_WRITE(IVB_CHICKEN3,
4977 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
4978 CHICKEN3_DGMG_DONE_FIX_DISABLE);
4980 /* WaDisablePSDDualDispatchEnable:ivb */
4981 if (IS_IVB_GT1(dev))
4982 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
4983 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
4985 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
4986 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
4987 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
4989 /* WaApplyL3ControlAndL3ChickenMode:ivb */
4990 I915_WRITE(GEN7_L3CNTLREG1,
4991 GEN7_WA_FOR_GEN7_L3_CONTROL);
4992 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
4993 GEN7_WA_L3_CHICKEN_MODE);
4994 if (IS_IVB_GT1(dev))
4995 I915_WRITE(GEN7_ROW_CHICKEN2,
4996 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
4998 /* must write both registers */
4999 I915_WRITE(GEN7_ROW_CHICKEN2,
5000 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5001 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
5002 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5005 /* WaForceL3Serialization:ivb */
5006 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5007 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5010 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5011 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
5013 I915_WRITE(GEN6_UCGCTL2,
5014 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5016 /* This is required by WaCatErrorRejectionIssue:ivb */
5017 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5018 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5019 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5021 g4x_disable_trickle_feed(dev);
5023 gen7_setup_fixed_func_scheduler(dev_priv);
5025 if (0) { /* causes HiZ corruption on ivb:gt1 */
5026 /* enable HiZ Raw Stall Optimization */
5027 I915_WRITE(CACHE_MODE_0_GEN7,
5028 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
5031 /* WaDisable4x2SubspanOptimization:ivb */
5032 I915_WRITE(CACHE_MODE_1,
5033 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5036 * BSpec recommends 8x4 when MSAA is used,
5037 * however in practice 16x4 seems fastest.
5039 * Note that PS/WM thread counts depend on the WIZ hashing
5040 * disable bit, which we don't touch here, but it's good
5041 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5043 I915_WRITE(GEN7_GT_MODE,
5044 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4);
5046 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5047 snpcr &= ~GEN6_MBC_SNPCR_MASK;
5048 snpcr |= GEN6_MBC_SNPCR_MED;
5049 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
5051 if (!HAS_PCH_NOP(dev))
5052 cpt_init_clock_gating(dev);
5054 gen6_check_mch_setup(dev);
5057 static void valleyview_init_clock_gating(struct drm_device *dev)
5059 struct drm_i915_private *dev_priv = dev->dev_private;
5062 mutex_lock(&dev_priv->rps.hw_lock);
5063 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5064 mutex_unlock(&dev_priv->rps.hw_lock);
5065 switch ((val >> 6) & 3) {
5068 dev_priv->mem_freq = 800;
5071 dev_priv->mem_freq = 1066;
5074 dev_priv->mem_freq = 1333;
5077 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq);
5079 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
5081 /* WaDisableEarlyCull:vlv */
5082 I915_WRITE(_3D_CHICKEN3,
5083 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
5085 /* WaDisableBackToBackFlipFix:vlv */
5086 I915_WRITE(IVB_CHICKEN3,
5087 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
5088 CHICKEN3_DGMG_DONE_FIX_DISABLE);
5090 /* WaPsdDispatchEnable:vlv */
5091 /* WaDisablePSDDualDispatchEnable:vlv */
5092 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
5093 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
5094 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
5096 /* WaForceL3Serialization:vlv */
5097 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
5098 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
5100 /* WaDisableDopClockGating:vlv */
5101 I915_WRITE(GEN7_ROW_CHICKEN2,
5102 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
5104 /* This is required by WaCatErrorRejectionIssue:vlv */
5105 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
5106 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
5107 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
5109 gen7_setup_fixed_func_scheduler(dev_priv);
5112 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
5113 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
5115 I915_WRITE(GEN6_UCGCTL2,
5116 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
5118 /* WaDisableL3Bank2xClockGate:vlv */
5119 I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
5121 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
5124 * BSpec says this must be set, even though
5125 * WaDisable4x2SubspanOptimization isn't listed for VLV.
5127 I915_WRITE(CACHE_MODE_1,
5128 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
5131 * WaIncreaseL3CreditsForVLVB0:vlv
5132 * This is the hardware default actually.
5134 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
5137 * WaDisableVLVClockGating_VBIIssue:vlv
5138 * Disable clock gating on th GCFG unit to prevent a delay
5139 * in the reporting of vblank events.
5141 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
5144 static void g4x_init_clock_gating(struct drm_device *dev)
5146 struct drm_i915_private *dev_priv = dev->dev_private;
5147 uint32_t dspclk_gate;
5149 I915_WRITE(RENCLK_GATE_D1, 0);
5150 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
5151 GS_UNIT_CLOCK_GATE_DISABLE |
5152 CL_UNIT_CLOCK_GATE_DISABLE);
5153 I915_WRITE(RAMCLK_GATE_D, 0);
5154 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
5155 OVRUNIT_CLOCK_GATE_DISABLE |
5156 OVCUNIT_CLOCK_GATE_DISABLE;
5158 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
5159 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
5161 /* WaDisableRenderCachePipelinedFlush */
5162 I915_WRITE(CACHE_MODE_0,
5163 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
5165 g4x_disable_trickle_feed(dev);
5168 static void crestline_init_clock_gating(struct drm_device *dev)
5170 struct drm_i915_private *dev_priv = dev->dev_private;
5172 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
5173 I915_WRITE(RENCLK_GATE_D2, 0);
5174 I915_WRITE(DSPCLK_GATE_D, 0);
5175 I915_WRITE(RAMCLK_GATE_D, 0);
5176 I915_WRITE16(DEUC, 0);
5177 I915_WRITE(MI_ARB_STATE,
5178 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5181 static void broadwater_init_clock_gating(struct drm_device *dev)
5183 struct drm_i915_private *dev_priv = dev->dev_private;
5185 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
5186 I965_RCC_CLOCK_GATE_DISABLE |
5187 I965_RCPB_CLOCK_GATE_DISABLE |
5188 I965_ISC_CLOCK_GATE_DISABLE |
5189 I965_FBC_CLOCK_GATE_DISABLE);
5190 I915_WRITE(RENCLK_GATE_D2, 0);
5191 I915_WRITE(MI_ARB_STATE,
5192 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
5195 static void gen3_init_clock_gating(struct drm_device *dev)
5197 struct drm_i915_private *dev_priv = dev->dev_private;
5198 u32 dstate = I915_READ(D_STATE);
5200 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
5201 DSTATE_DOT_CLOCK_GATING;
5202 I915_WRITE(D_STATE, dstate);
5204 if (IS_PINEVIEW(dev))
5205 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
5207 /* IIR "flip pending" means done if this bit is set */
5208 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
5211 static void i85x_init_clock_gating(struct drm_device *dev)
5213 struct drm_i915_private *dev_priv = dev->dev_private;
5215 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
5218 static void i830_init_clock_gating(struct drm_device *dev)
5220 struct drm_i915_private *dev_priv = dev->dev_private;
5222 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
5225 void intel_init_clock_gating(struct drm_device *dev)
5227 struct drm_i915_private *dev_priv = dev->dev_private;
5229 dev_priv->display.init_clock_gating(dev);
5232 void intel_suspend_hw(struct drm_device *dev)
5234 if (HAS_PCH_LPT(dev))
5235 lpt_suspend_hw(dev);
5238 #define for_each_power_well(i, power_well, domain_mask, power_domains) \
5240 i < (power_domains)->power_well_count && \
5241 ((power_well) = &(power_domains)->power_wells[i]); \
5243 if ((power_well)->domains & (domain_mask))
5245 #define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
5246 for (i = (power_domains)->power_well_count - 1; \
5247 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
5249 if ((power_well)->domains & (domain_mask))
5252 * We should only use the power well if we explicitly asked the hardware to
5253 * enable it, so check if it's enabled and also check if we've requested it to
5256 static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
5257 struct i915_power_well *power_well)
5259 return I915_READ(HSW_PWR_WELL_DRIVER) ==
5260 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5263 bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
5264 enum intel_display_power_domain domain)
5266 struct i915_power_domains *power_domains;
5268 power_domains = &dev_priv->power_domains;
5270 return power_domains->domain_use_count[domain];
5273 bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
5274 enum intel_display_power_domain domain)
5276 struct i915_power_domains *power_domains;
5277 struct i915_power_well *power_well;
5281 if (dev_priv->pm.suspended)
5284 power_domains = &dev_priv->power_domains;
5288 mutex_lock(&power_domains->lock);
5289 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5290 if (power_well->always_on)
5293 if (!power_well->ops->is_enabled(dev_priv, power_well)) {
5298 mutex_unlock(&power_domains->lock);
5304 * Starting with Haswell, we have a "Power Down Well" that can be turned off
5305 * when not needed anymore. We have 4 registers that can request the power well
5306 * to be enabled, and it will only be disabled if none of the registers is
5307 * requesting it to be enabled.
5309 static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
5311 struct drm_device *dev = dev_priv->dev;
5312 unsigned long irqflags;
5315 * After we re-enable the power well, if we touch VGA register 0x3d5
5316 * we'll get unclaimed register interrupts. This stops after we write
5317 * anything to the VGA MSR register. The vgacon module uses this
5318 * register all the time, so if we unbind our driver and, as a
5319 * consequence, bind vgacon, we'll get stuck in an infinite loop at
5320 * console_unlock(). So make here we touch the VGA MSR register, making
5321 * sure vgacon can keep working normally without triggering interrupts
5322 * and error messages.
5324 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
5325 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
5326 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
5328 if (IS_BROADWELL(dev)) {
5329 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
5330 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
5331 dev_priv->de_irq_mask[PIPE_B]);
5332 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
5333 ~dev_priv->de_irq_mask[PIPE_B] |
5335 I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
5336 dev_priv->de_irq_mask[PIPE_C]);
5337 I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
5338 ~dev_priv->de_irq_mask[PIPE_C] |
5340 POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
5341 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
5345 static void reset_vblank_counter(struct drm_device *dev, enum pipe pipe)
5347 assert_spin_locked(&dev->vbl_lock);
5349 dev->vblank[pipe].last = 0;
5352 static void hsw_power_well_post_disable(struct drm_i915_private *dev_priv)
5354 struct drm_device *dev = dev_priv->dev;
5356 unsigned long irqflags;
5359 * After this, the registers on the pipes that are part of the power
5360 * well will become zero, so we have to adjust our counters according to
5363 * FIXME: Should we do this in general in drm_vblank_post_modeset?
5365 spin_lock_irqsave(&dev->vbl_lock, irqflags);
5368 reset_vblank_counter(dev, pipe);
5369 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
5372 static void hsw_set_power_well(struct drm_i915_private *dev_priv,
5373 struct i915_power_well *power_well, bool enable)
5375 bool is_enabled, enable_requested;
5378 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
5379 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
5380 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
5383 if (!enable_requested)
5384 I915_WRITE(HSW_PWR_WELL_DRIVER,
5385 HSW_PWR_WELL_ENABLE_REQUEST);
5388 DRM_DEBUG_KMS("Enabling power well\n");
5389 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
5390 HSW_PWR_WELL_STATE_ENABLED), 20))
5391 DRM_ERROR("Timeout enabling power well\n");
5394 hsw_power_well_post_enable(dev_priv);
5396 if (enable_requested) {
5397 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
5398 POSTING_READ(HSW_PWR_WELL_DRIVER);
5399 DRM_DEBUG_KMS("Requesting to disable the power well\n");
5401 hsw_power_well_post_disable(dev_priv);
5406 static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
5407 struct i915_power_well *power_well)
5409 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
5412 * We're taking over the BIOS, so clear any requests made by it since
5413 * the driver is in charge now.
5415 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
5416 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
5419 static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
5420 struct i915_power_well *power_well)
5422 hsw_set_power_well(dev_priv, power_well, true);
5425 static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
5426 struct i915_power_well *power_well)
5428 hsw_set_power_well(dev_priv, power_well, false);
5431 static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
5432 struct i915_power_well *power_well)
5436 static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
5437 struct i915_power_well *power_well)
5442 static void vlv_set_power_well(struct drm_i915_private *dev_priv,
5443 struct i915_power_well *power_well, bool enable)
5445 enum punit_power_well power_well_id = power_well->data;
5450 mask = PUNIT_PWRGT_MASK(power_well_id);
5451 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
5452 PUNIT_PWRGT_PWR_GATE(power_well_id);
5454 mutex_lock(&dev_priv->rps.hw_lock);
5457 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
5462 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
5465 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
5467 if (wait_for(COND, 100))
5468 DRM_ERROR("timout setting power well state %08x (%08x)\n",
5470 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
5475 mutex_unlock(&dev_priv->rps.hw_lock);
5478 static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
5479 struct i915_power_well *power_well)
5481 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
5484 static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
5485 struct i915_power_well *power_well)
5487 vlv_set_power_well(dev_priv, power_well, true);
5490 static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
5491 struct i915_power_well *power_well)
5493 vlv_set_power_well(dev_priv, power_well, false);
5496 static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
5497 struct i915_power_well *power_well)
5499 int power_well_id = power_well->data;
5500 bool enabled = false;
5505 mask = PUNIT_PWRGT_MASK(power_well_id);
5506 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
5508 mutex_lock(&dev_priv->rps.hw_lock);
5510 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
5512 * We only ever set the power-on and power-gate states, anything
5513 * else is unexpected.
5515 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
5516 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
5521 * A transient state at this point would mean some unexpected party
5522 * is poking at the power controls too.
5524 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
5525 WARN_ON(ctrl != state);
5527 mutex_unlock(&dev_priv->rps.hw_lock);
5532 static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
5533 struct i915_power_well *power_well)
5535 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
5537 vlv_set_power_well(dev_priv, power_well, true);
5539 spin_lock_irq(&dev_priv->irq_lock);
5540 valleyview_enable_display_irqs(dev_priv);
5541 spin_unlock_irq(&dev_priv->irq_lock);
5544 * During driver initialization we need to defer enabling hotplug
5545 * processing until fbdev is set up.
5547 if (dev_priv->enable_hotplug_processing)
5548 intel_hpd_init(dev_priv->dev);
5550 i915_redisable_vga_power_on(dev_priv->dev);
5553 static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
5554 struct i915_power_well *power_well)
5556 struct drm_device *dev = dev_priv->dev;
5559 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
5561 spin_lock_irq(&dev_priv->irq_lock);
5563 __intel_set_cpu_fifo_underrun_reporting(dev, pipe, false);
5565 valleyview_disable_display_irqs(dev_priv);
5566 spin_unlock_irq(&dev_priv->irq_lock);
5568 spin_lock_irq(&dev->vbl_lock);
5570 reset_vblank_counter(dev, pipe);
5571 spin_unlock_irq(&dev->vbl_lock);
5573 vlv_set_power_well(dev_priv, power_well, false);
5576 static void check_power_well_state(struct drm_i915_private *dev_priv,
5577 struct i915_power_well *power_well)
5579 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
5581 if (power_well->always_on || !i915.disable_power_well) {
5588 if (enabled != (power_well->count > 0))
5594 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
5595 power_well->name, power_well->always_on, enabled,
5596 power_well->count, i915.disable_power_well);
5599 void intel_display_power_get(struct drm_i915_private *dev_priv,
5600 enum intel_display_power_domain domain)
5602 struct i915_power_domains *power_domains;
5603 struct i915_power_well *power_well;
5606 intel_runtime_pm_get(dev_priv);
5608 power_domains = &dev_priv->power_domains;
5610 mutex_lock(&power_domains->lock);
5612 for_each_power_well(i, power_well, BIT(domain), power_domains) {
5613 if (!power_well->count++) {
5614 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
5615 power_well->ops->enable(dev_priv, power_well);
5618 check_power_well_state(dev_priv, power_well);
5621 power_domains->domain_use_count[domain]++;
5623 mutex_unlock(&power_domains->lock);
5626 void intel_display_power_put(struct drm_i915_private *dev_priv,
5627 enum intel_display_power_domain domain)
5629 struct i915_power_domains *power_domains;
5630 struct i915_power_well *power_well;
5633 power_domains = &dev_priv->power_domains;
5635 mutex_lock(&power_domains->lock);
5637 WARN_ON(!power_domains->domain_use_count[domain]);
5638 power_domains->domain_use_count[domain]--;
5640 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5641 WARN_ON(!power_well->count);
5643 if (!--power_well->count && i915.disable_power_well) {
5644 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
5645 power_well->ops->disable(dev_priv, power_well);
5648 check_power_well_state(dev_priv, power_well);
5651 mutex_unlock(&power_domains->lock);
5653 intel_runtime_pm_put(dev_priv);
5656 static struct i915_power_domains *hsw_pwr;
5658 /* Display audio driver power well request */
5659 void i915_request_power_well(void)
5661 struct drm_i915_private *dev_priv;
5663 if (WARN_ON(!hsw_pwr))
5666 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5668 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
5670 EXPORT_SYMBOL_GPL(i915_request_power_well);
5672 /* Display audio driver power well release */
5673 void i915_release_power_well(void)
5675 struct drm_i915_private *dev_priv;
5677 if (WARN_ON(!hsw_pwr))
5680 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
5682 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
5684 EXPORT_SYMBOL_GPL(i915_release_power_well);
5686 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
5688 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
5689 BIT(POWER_DOMAIN_PIPE_A) | \
5690 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
5691 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
5692 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
5693 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5694 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5695 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5696 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5697 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
5698 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
5699 BIT(POWER_DOMAIN_PORT_CRT) | \
5700 BIT(POWER_DOMAIN_INIT))
5701 #define HSW_DISPLAY_POWER_DOMAINS ( \
5702 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
5703 BIT(POWER_DOMAIN_INIT))
5705 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
5706 HSW_ALWAYS_ON_POWER_DOMAINS | \
5707 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
5708 #define BDW_DISPLAY_POWER_DOMAINS ( \
5709 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
5710 BIT(POWER_DOMAIN_INIT))
5712 #define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
5713 #define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
5715 #define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
5716 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5717 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5718 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5719 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5720 BIT(POWER_DOMAIN_PORT_CRT) | \
5721 BIT(POWER_DOMAIN_INIT))
5723 #define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
5724 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
5725 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5726 BIT(POWER_DOMAIN_INIT))
5728 #define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
5729 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
5730 BIT(POWER_DOMAIN_INIT))
5732 #define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
5733 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
5734 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5735 BIT(POWER_DOMAIN_INIT))
5737 #define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
5738 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
5739 BIT(POWER_DOMAIN_INIT))
5741 static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
5742 .sync_hw = i9xx_always_on_power_well_noop,
5743 .enable = i9xx_always_on_power_well_noop,
5744 .disable = i9xx_always_on_power_well_noop,
5745 .is_enabled = i9xx_always_on_power_well_enabled,
5748 static struct i915_power_well i9xx_always_on_power_well[] = {
5750 .name = "always-on",
5752 .domains = POWER_DOMAIN_MASK,
5753 .ops = &i9xx_always_on_power_well_ops,
5757 static const struct i915_power_well_ops hsw_power_well_ops = {
5758 .sync_hw = hsw_power_well_sync_hw,
5759 .enable = hsw_power_well_enable,
5760 .disable = hsw_power_well_disable,
5761 .is_enabled = hsw_power_well_enabled,
5764 static struct i915_power_well hsw_power_wells[] = {
5766 .name = "always-on",
5768 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
5769 .ops = &i9xx_always_on_power_well_ops,
5773 .domains = HSW_DISPLAY_POWER_DOMAINS,
5774 .ops = &hsw_power_well_ops,
5778 static struct i915_power_well bdw_power_wells[] = {
5780 .name = "always-on",
5782 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
5783 .ops = &i9xx_always_on_power_well_ops,
5787 .domains = BDW_DISPLAY_POWER_DOMAINS,
5788 .ops = &hsw_power_well_ops,
5792 static const struct i915_power_well_ops vlv_display_power_well_ops = {
5793 .sync_hw = vlv_power_well_sync_hw,
5794 .enable = vlv_display_power_well_enable,
5795 .disable = vlv_display_power_well_disable,
5796 .is_enabled = vlv_power_well_enabled,
5799 static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
5800 .sync_hw = vlv_power_well_sync_hw,
5801 .enable = vlv_power_well_enable,
5802 .disable = vlv_power_well_disable,
5803 .is_enabled = vlv_power_well_enabled,
5806 static struct i915_power_well vlv_power_wells[] = {
5808 .name = "always-on",
5810 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
5811 .ops = &i9xx_always_on_power_well_ops,
5815 .domains = VLV_DISPLAY_POWER_DOMAINS,
5816 .data = PUNIT_POWER_WELL_DISP2D,
5817 .ops = &vlv_display_power_well_ops,
5820 .name = "dpio-common",
5821 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
5822 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
5823 .ops = &vlv_dpio_power_well_ops,
5826 .name = "dpio-tx-b-01",
5827 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5828 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5829 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5830 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5831 .ops = &vlv_dpio_power_well_ops,
5832 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
5835 .name = "dpio-tx-b-23",
5836 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5837 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5838 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5839 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5840 .ops = &vlv_dpio_power_well_ops,
5841 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
5844 .name = "dpio-tx-c-01",
5845 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5846 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5847 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5848 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5849 .ops = &vlv_dpio_power_well_ops,
5850 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
5853 .name = "dpio-tx-c-23",
5854 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
5855 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
5856 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
5857 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
5858 .ops = &vlv_dpio_power_well_ops,
5859 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
5863 #define set_power_wells(power_domains, __power_wells) ({ \
5864 (power_domains)->power_wells = (__power_wells); \
5865 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
5868 int intel_power_domains_init(struct drm_i915_private *dev_priv)
5870 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5872 mutex_init(&power_domains->lock);
5875 * The enabling order will be from lower to higher indexed wells,
5876 * the disabling order is reversed.
5878 if (IS_HASWELL(dev_priv->dev)) {
5879 set_power_wells(power_domains, hsw_power_wells);
5880 hsw_pwr = power_domains;
5881 } else if (IS_BROADWELL(dev_priv->dev)) {
5882 set_power_wells(power_domains, bdw_power_wells);
5883 hsw_pwr = power_domains;
5884 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
5885 set_power_wells(power_domains, vlv_power_wells);
5887 set_power_wells(power_domains, i9xx_always_on_power_well);
5893 void intel_power_domains_remove(struct drm_i915_private *dev_priv)
5898 static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
5900 struct i915_power_domains *power_domains = &dev_priv->power_domains;
5901 struct i915_power_well *power_well;
5904 mutex_lock(&power_domains->lock);
5905 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
5906 power_well->ops->sync_hw(dev_priv, power_well);
5907 mutex_unlock(&power_domains->lock);
5910 void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
5912 /* For now, we need the power well to be always enabled. */
5913 intel_display_set_init_power(dev_priv, true);
5914 intel_power_domains_resume(dev_priv);
5917 void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
5919 intel_runtime_pm_get(dev_priv);
5922 void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
5924 intel_runtime_pm_put(dev_priv);
5927 void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
5929 struct drm_device *dev = dev_priv->dev;
5930 struct device *device = &dev->pdev->dev;
5932 if (!HAS_RUNTIME_PM(dev))
5935 pm_runtime_get_sync(device);
5936 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
5939 void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
5941 struct drm_device *dev = dev_priv->dev;
5942 struct device *device = &dev->pdev->dev;
5944 if (!HAS_RUNTIME_PM(dev))
5947 pm_runtime_mark_last_busy(device);
5948 pm_runtime_put_autosuspend(device);
5951 void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
5953 struct drm_device *dev = dev_priv->dev;
5954 struct device *device = &dev->pdev->dev;
5956 if (!HAS_RUNTIME_PM(dev))
5959 pm_runtime_set_active(device);
5961 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
5962 pm_runtime_mark_last_busy(device);
5963 pm_runtime_use_autosuspend(device);
5965 pm_runtime_put_autosuspend(device);
5968 void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
5970 struct drm_device *dev = dev_priv->dev;
5971 struct device *device = &dev->pdev->dev;
5973 if (!HAS_RUNTIME_PM(dev))
5976 /* Make sure we're not suspended first. */
5977 pm_runtime_get_sync(device);
5978 pm_runtime_disable(device);
5981 /* Set up chip specific power management-related functions */
5982 void intel_init_pm(struct drm_device *dev)
5984 struct drm_i915_private *dev_priv = dev->dev_private;
5987 if (INTEL_INFO(dev)->gen >= 7) {
5988 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5989 dev_priv->display.enable_fbc = gen7_enable_fbc;
5990 dev_priv->display.disable_fbc = ironlake_disable_fbc;
5991 } else if (INTEL_INFO(dev)->gen >= 5) {
5992 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5993 dev_priv->display.enable_fbc = ironlake_enable_fbc;
5994 dev_priv->display.disable_fbc = ironlake_disable_fbc;
5995 } else if (IS_GM45(dev)) {
5996 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
5997 dev_priv->display.enable_fbc = g4x_enable_fbc;
5998 dev_priv->display.disable_fbc = g4x_disable_fbc;
6000 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
6001 dev_priv->display.enable_fbc = i8xx_enable_fbc;
6002 dev_priv->display.disable_fbc = i8xx_disable_fbc;
6004 /* This value was pulled out of someone's hat */
6005 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
6010 if (IS_PINEVIEW(dev))
6011 i915_pineview_get_mem_freq(dev);
6012 else if (IS_GEN5(dev))
6013 i915_ironlake_get_mem_freq(dev);
6015 /* For FIFO watermark updates */
6016 if (HAS_PCH_SPLIT(dev)) {
6017 ilk_setup_wm_latency(dev);
6019 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
6020 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
6021 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
6022 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
6023 dev_priv->display.update_wm = ilk_update_wm;
6024 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
6026 DRM_DEBUG_KMS("Failed to read display plane latency. "
6031 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
6032 else if (IS_GEN6(dev))
6033 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
6034 else if (IS_IVYBRIDGE(dev))
6035 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
6036 else if (IS_HASWELL(dev))
6037 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
6038 else if (INTEL_INFO(dev)->gen == 8)
6039 dev_priv->display.init_clock_gating = gen8_init_clock_gating;
6040 } else if (IS_VALLEYVIEW(dev)) {
6041 dev_priv->display.update_wm = valleyview_update_wm;
6042 dev_priv->display.init_clock_gating =
6043 valleyview_init_clock_gating;
6044 } else if (IS_PINEVIEW(dev)) {
6045 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
6048 dev_priv->mem_freq)) {
6049 DRM_INFO("failed to find known CxSR latency "
6050 "(found ddr%s fsb freq %d, mem freq %d), "
6052 (dev_priv->is_ddr3 == 1) ? "3" : "2",
6053 dev_priv->fsb_freq, dev_priv->mem_freq);
6054 /* Disable CxSR and never update its watermark again */
6055 pineview_disable_cxsr(dev);
6056 dev_priv->display.update_wm = NULL;
6058 dev_priv->display.update_wm = pineview_update_wm;
6059 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6060 } else if (IS_G4X(dev)) {
6061 dev_priv->display.update_wm = g4x_update_wm;
6062 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
6063 } else if (IS_GEN4(dev)) {
6064 dev_priv->display.update_wm = i965_update_wm;
6065 if (IS_CRESTLINE(dev))
6066 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
6067 else if (IS_BROADWATER(dev))
6068 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
6069 } else if (IS_GEN3(dev)) {
6070 dev_priv->display.update_wm = i9xx_update_wm;
6071 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
6072 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
6073 } else if (IS_GEN2(dev)) {
6074 if (INTEL_INFO(dev)->num_pipes == 1) {
6075 dev_priv->display.update_wm = i845_update_wm;
6076 dev_priv->display.get_fifo_size = i845_get_fifo_size;
6078 dev_priv->display.update_wm = i9xx_update_wm;
6079 dev_priv->display.get_fifo_size = i830_get_fifo_size;
6082 if (IS_I85X(dev) || IS_I865G(dev))
6083 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
6085 dev_priv->display.init_clock_gating = i830_init_clock_gating;
6087 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
6091 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
6093 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6095 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6096 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
6100 I915_WRITE(GEN6_PCODE_DATA, *val);
6101 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6103 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6105 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
6109 *val = I915_READ(GEN6_PCODE_DATA);
6110 I915_WRITE(GEN6_PCODE_DATA, 0);
6115 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
6117 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
6119 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
6120 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
6124 I915_WRITE(GEN6_PCODE_DATA, val);
6125 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
6127 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6129 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
6133 I915_WRITE(GEN6_PCODE_DATA, 0);
6138 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
6143 switch (dev_priv->mem_freq) {
6157 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div);
6160 int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
6165 switch (dev_priv->mem_freq) {
6179 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6;
6182 void intel_pm_setup(struct drm_device *dev)
6184 struct drm_i915_private *dev_priv = dev->dev_private;
6186 mutex_init(&dev_priv->rps.hw_lock);
6188 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
6189 intel_gen6_powersave_work);
6191 dev_priv->pm.suspended = false;
6192 dev_priv->pm.irqs_disabled = false;