2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
28 #include <linux/cpufreq.h>
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
35 * RC6 is a special power stage which allows the GPU to enter an very
36 * low-voltage mode when idle, using down to 0V while at this stage. This
37 * stage is entered automatically when the GPU is idle when RC6 support is
38 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
40 * There are different RC6 modes available in Intel GPU, which differentiate
41 * among each other with the latency required to enter and leave RC6 and
42 * voltage consumed by the GPU in different states.
44 * The combination of the following flags define which states GPU is allowed
45 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
46 * RC6pp is deepest RC6. Their support by hardware varies according to the
47 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
48 * which brings the most power savings; deeper states save more power, but
49 * require higher latency to switch to and wake up.
51 #define INTEL_RC6_ENABLE (1<<0)
52 #define INTEL_RC6p_ENABLE (1<<1)
53 #define INTEL_RC6pp_ENABLE (1<<2)
55 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
56 * framebuffer contents in-memory, aiming at reducing the required bandwidth
57 * during in-memory transfers and, therefore, reduce the power packet.
59 * The benefits of FBC are mostly visible with solid backgrounds and
60 * variation-less patterns.
62 * FBC-related functionality can be enabled by the means of the
63 * i915.i915_enable_fbc parameter
66 static void gen9_init_clock_gating(struct drm_device *dev)
68 struct drm_i915_private *dev_priv = dev->dev_private;
71 * WaDisableSDEUnitClockGating:skl
72 * This seems to be a pre-production w/a.
74 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
75 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
78 * WaDisableDgMirrorFixInHalfSliceChicken5:skl
79 * This is a pre-production w/a.
81 I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
82 I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
83 ~GEN9_DG_MIRROR_FIX_ENABLE);
85 /* Wa4x4STCOptimizationDisable:skl */
86 I915_WRITE(CACHE_MODE_1,
87 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
90 static void i8xx_disable_fbc(struct drm_device *dev)
92 struct drm_i915_private *dev_priv = dev->dev_private;
95 dev_priv->fbc.enabled = false;
97 /* Disable compression */
98 fbc_ctl = I915_READ(FBC_CONTROL);
99 if ((fbc_ctl & FBC_CTL_EN) == 0)
102 fbc_ctl &= ~FBC_CTL_EN;
103 I915_WRITE(FBC_CONTROL, fbc_ctl);
105 /* Wait for compressing bit to clear */
106 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
107 DRM_DEBUG_KMS("FBC idle timed out\n");
111 DRM_DEBUG_KMS("disabled FBC\n");
114 static void i8xx_enable_fbc(struct drm_crtc *crtc)
116 struct drm_device *dev = crtc->dev;
117 struct drm_i915_private *dev_priv = dev->dev_private;
118 struct drm_framebuffer *fb = crtc->primary->fb;
119 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
120 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
125 dev_priv->fbc.enabled = true;
127 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
128 if (fb->pitches[0] < cfb_pitch)
129 cfb_pitch = fb->pitches[0];
131 /* FBC_CTL wants 32B or 64B units */
133 cfb_pitch = (cfb_pitch / 32) - 1;
135 cfb_pitch = (cfb_pitch / 64) - 1;
138 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
139 I915_WRITE(FBC_TAG + (i * 4), 0);
145 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
146 fbc_ctl2 |= FBC_CTL_PLANE(intel_crtc->plane);
147 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
148 I915_WRITE(FBC_FENCE_OFF, crtc->y);
152 fbc_ctl = I915_READ(FBC_CONTROL);
153 fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
154 fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
156 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
157 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
158 fbc_ctl |= obj->fence_reg;
159 I915_WRITE(FBC_CONTROL, fbc_ctl);
161 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %c\n",
162 cfb_pitch, crtc->y, plane_name(intel_crtc->plane));
165 static bool i8xx_fbc_enabled(struct drm_device *dev)
167 struct drm_i915_private *dev_priv = dev->dev_private;
169 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
172 static void g4x_enable_fbc(struct drm_crtc *crtc)
174 struct drm_device *dev = crtc->dev;
175 struct drm_i915_private *dev_priv = dev->dev_private;
176 struct drm_framebuffer *fb = crtc->primary->fb;
177 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
178 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
181 dev_priv->fbc.enabled = true;
183 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
184 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
185 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
187 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
188 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
190 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
193 I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
195 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
198 static void g4x_disable_fbc(struct drm_device *dev)
200 struct drm_i915_private *dev_priv = dev->dev_private;
203 dev_priv->fbc.enabled = false;
205 /* Disable compression */
206 dpfc_ctl = I915_READ(DPFC_CONTROL);
207 if (dpfc_ctl & DPFC_CTL_EN) {
208 dpfc_ctl &= ~DPFC_CTL_EN;
209 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
211 DRM_DEBUG_KMS("disabled FBC\n");
215 static bool g4x_fbc_enabled(struct drm_device *dev)
217 struct drm_i915_private *dev_priv = dev->dev_private;
219 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
222 static void sandybridge_blit_fbc_update(struct drm_device *dev)
224 struct drm_i915_private *dev_priv = dev->dev_private;
227 /* Make sure blitter notifies FBC of writes */
229 /* Blitter is part of Media powerwell on VLV. No impact of
230 * his param in other platforms for now */
231 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_MEDIA);
233 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
234 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
235 GEN6_BLITTER_LOCK_SHIFT;
236 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
237 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
238 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
239 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
240 GEN6_BLITTER_LOCK_SHIFT);
241 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
242 POSTING_READ(GEN6_BLITTER_ECOSKPD);
244 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_MEDIA);
247 static void ironlake_enable_fbc(struct drm_crtc *crtc)
249 struct drm_device *dev = crtc->dev;
250 struct drm_i915_private *dev_priv = dev->dev_private;
251 struct drm_framebuffer *fb = crtc->primary->fb;
252 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
253 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
256 dev_priv->fbc.enabled = true;
258 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
259 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
260 dev_priv->fbc.threshold++;
262 switch (dev_priv->fbc.threshold) {
265 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
268 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
271 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
274 dpfc_ctl |= DPFC_CTL_FENCE_EN;
276 dpfc_ctl |= obj->fence_reg;
278 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
279 I915_WRITE(ILK_FBC_RT_BASE, i915_gem_obj_ggtt_offset(obj) | ILK_FBC_RT_VALID);
281 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
284 I915_WRITE(SNB_DPFC_CTL_SA,
285 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
286 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
287 sandybridge_blit_fbc_update(dev);
290 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
293 static void ironlake_disable_fbc(struct drm_device *dev)
295 struct drm_i915_private *dev_priv = dev->dev_private;
298 dev_priv->fbc.enabled = false;
300 /* Disable compression */
301 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
302 if (dpfc_ctl & DPFC_CTL_EN) {
303 dpfc_ctl &= ~DPFC_CTL_EN;
304 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
306 DRM_DEBUG_KMS("disabled FBC\n");
310 static bool ironlake_fbc_enabled(struct drm_device *dev)
312 struct drm_i915_private *dev_priv = dev->dev_private;
314 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
317 static void gen7_enable_fbc(struct drm_crtc *crtc)
319 struct drm_device *dev = crtc->dev;
320 struct drm_i915_private *dev_priv = dev->dev_private;
321 struct drm_framebuffer *fb = crtc->primary->fb;
322 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
323 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
326 dev_priv->fbc.enabled = true;
328 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
329 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
330 dev_priv->fbc.threshold++;
332 switch (dev_priv->fbc.threshold) {
335 dpfc_ctl |= DPFC_CTL_LIMIT_4X;
338 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
341 dpfc_ctl |= DPFC_CTL_LIMIT_1X;
345 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
347 if (dev_priv->fbc.false_color)
348 dpfc_ctl |= FBC_CTL_FALSE_COLOR;
350 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
352 if (IS_IVYBRIDGE(dev)) {
353 /* WaFbcAsynchFlipDisableFbcQueue:ivb */
354 I915_WRITE(ILK_DISPLAY_CHICKEN1,
355 I915_READ(ILK_DISPLAY_CHICKEN1) |
358 /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
359 I915_WRITE(CHICKEN_PIPESL_1(intel_crtc->pipe),
360 I915_READ(CHICKEN_PIPESL_1(intel_crtc->pipe)) |
364 I915_WRITE(SNB_DPFC_CTL_SA,
365 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
366 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
368 sandybridge_blit_fbc_update(dev);
370 DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane));
373 bool intel_fbc_enabled(struct drm_device *dev)
375 struct drm_i915_private *dev_priv = dev->dev_private;
377 return dev_priv->fbc.enabled;
380 void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
382 struct drm_i915_private *dev_priv = dev->dev_private;
387 if (!intel_fbc_enabled(dev))
390 I915_WRITE(MSG_FBC_REND_STATE, value);
393 static void intel_fbc_work_fn(struct work_struct *__work)
395 struct intel_fbc_work *work =
396 container_of(to_delayed_work(__work),
397 struct intel_fbc_work, work);
398 struct drm_device *dev = work->crtc->dev;
399 struct drm_i915_private *dev_priv = dev->dev_private;
401 mutex_lock(&dev->struct_mutex);
402 if (work == dev_priv->fbc.fbc_work) {
403 /* Double check that we haven't switched fb without cancelling
406 if (work->crtc->primary->fb == work->fb) {
407 dev_priv->display.enable_fbc(work->crtc);
409 dev_priv->fbc.plane = to_intel_crtc(work->crtc)->plane;
410 dev_priv->fbc.fb_id = work->crtc->primary->fb->base.id;
411 dev_priv->fbc.y = work->crtc->y;
414 dev_priv->fbc.fbc_work = NULL;
416 mutex_unlock(&dev->struct_mutex);
421 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
423 if (dev_priv->fbc.fbc_work == NULL)
426 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
428 /* Synchronisation is provided by struct_mutex and checking of
429 * dev_priv->fbc.fbc_work, so we can perform the cancellation
430 * entirely asynchronously.
432 if (cancel_delayed_work(&dev_priv->fbc.fbc_work->work))
433 /* tasklet was killed before being run, clean up */
434 kfree(dev_priv->fbc.fbc_work);
436 /* Mark the work as no longer wanted so that if it does
437 * wake-up (because the work was already running and waiting
438 * for our mutex), it will discover that is no longer
441 dev_priv->fbc.fbc_work = NULL;
444 static void intel_enable_fbc(struct drm_crtc *crtc)
446 struct intel_fbc_work *work;
447 struct drm_device *dev = crtc->dev;
448 struct drm_i915_private *dev_priv = dev->dev_private;
450 if (!dev_priv->display.enable_fbc)
453 intel_cancel_fbc_work(dev_priv);
455 work = kzalloc(sizeof(*work), GFP_KERNEL);
457 DRM_ERROR("Failed to allocate FBC work structure\n");
458 dev_priv->display.enable_fbc(crtc);
463 work->fb = crtc->primary->fb;
464 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
466 dev_priv->fbc.fbc_work = work;
468 /* Delay the actual enabling to let pageflipping cease and the
469 * display to settle before starting the compression. Note that
470 * this delay also serves a second purpose: it allows for a
471 * vblank to pass after disabling the FBC before we attempt
472 * to modify the control registers.
474 * A more complicated solution would involve tracking vblanks
475 * following the termination of the page-flipping sequence
476 * and indeed performing the enable as a co-routine and not
477 * waiting synchronously upon the vblank.
479 * WaFbcWaitForVBlankBeforeEnable:ilk,snb
481 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
484 void intel_disable_fbc(struct drm_device *dev)
486 struct drm_i915_private *dev_priv = dev->dev_private;
488 intel_cancel_fbc_work(dev_priv);
490 if (!dev_priv->display.disable_fbc)
493 dev_priv->display.disable_fbc(dev);
494 dev_priv->fbc.plane = -1;
497 static bool set_no_fbc_reason(struct drm_i915_private *dev_priv,
498 enum no_fbc_reason reason)
500 if (dev_priv->fbc.no_fbc_reason == reason)
503 dev_priv->fbc.no_fbc_reason = reason;
508 * intel_update_fbc - enable/disable FBC as needed
509 * @dev: the drm_device
511 * Set up the framebuffer compression hardware at mode set time. We
512 * enable it if possible:
513 * - plane A only (on pre-965)
514 * - no pixel mulitply/line duplication
515 * - no alpha buffer discard
517 * - framebuffer <= max_hdisplay in width, max_vdisplay in height
519 * We can't assume that any compression will take place (worst case),
520 * so the compressed buffer has to be the same size as the uncompressed
521 * one. It also must reside (along with the line length buffer) in
524 * We need to enable/disable FBC on a global basis.
526 void intel_update_fbc(struct drm_device *dev)
528 struct drm_i915_private *dev_priv = dev->dev_private;
529 struct drm_crtc *crtc = NULL, *tmp_crtc;
530 struct intel_crtc *intel_crtc;
531 struct drm_framebuffer *fb;
532 struct drm_i915_gem_object *obj;
533 const struct drm_display_mode *adjusted_mode;
534 unsigned int max_width, max_height;
537 set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED);
541 if (!i915.powersave) {
542 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
543 DRM_DEBUG_KMS("fbc disabled per module param\n");
548 * If FBC is already on, we just have to verify that we can
549 * keep it that way...
550 * Need to disable if:
551 * - more than one pipe is active
552 * - changing FBC params (stride, fence, mode)
553 * - new fb is too large to fit in compressed buffer
554 * - going to an unsupported config (interlace, pixel multiply, etc.)
556 for_each_crtc(dev, tmp_crtc) {
557 if (intel_crtc_active(tmp_crtc) &&
558 to_intel_crtc(tmp_crtc)->primary_enabled) {
560 if (set_no_fbc_reason(dev_priv, FBC_MULTIPLE_PIPES))
561 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
568 if (!crtc || crtc->primary->fb == NULL) {
569 if (set_no_fbc_reason(dev_priv, FBC_NO_OUTPUT))
570 DRM_DEBUG_KMS("no output, disabling\n");
574 intel_crtc = to_intel_crtc(crtc);
575 fb = crtc->primary->fb;
576 obj = intel_fb_obj(fb);
577 adjusted_mode = &intel_crtc->config.adjusted_mode;
579 if (i915.enable_fbc < 0) {
580 if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
581 DRM_DEBUG_KMS("disabled per chip default\n");
584 if (!i915.enable_fbc) {
585 if (set_no_fbc_reason(dev_priv, FBC_MODULE_PARAM))
586 DRM_DEBUG_KMS("fbc disabled per module param\n");
589 if ((adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ||
590 (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
591 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
592 DRM_DEBUG_KMS("mode incompatible with compression, "
597 if (INTEL_INFO(dev)->gen >= 8 || IS_HASWELL(dev)) {
600 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
607 if (intel_crtc->config.pipe_src_w > max_width ||
608 intel_crtc->config.pipe_src_h > max_height) {
609 if (set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE))
610 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
613 if ((INTEL_INFO(dev)->gen < 4 || HAS_DDI(dev)) &&
614 intel_crtc->plane != PLANE_A) {
615 if (set_no_fbc_reason(dev_priv, FBC_BAD_PLANE))
616 DRM_DEBUG_KMS("plane not A, disabling compression\n");
620 /* The use of a CPU fence is mandatory in order to detect writes
621 * by the CPU to the scanout and trigger updates to the FBC.
623 if (obj->tiling_mode != I915_TILING_X ||
624 obj->fence_reg == I915_FENCE_REG_NONE) {
625 if (set_no_fbc_reason(dev_priv, FBC_NOT_TILED))
626 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
629 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
630 to_intel_plane(crtc->primary)->rotation != BIT(DRM_ROTATE_0)) {
631 if (set_no_fbc_reason(dev_priv, FBC_UNSUPPORTED_MODE))
632 DRM_DEBUG_KMS("Rotation unsupported, disabling\n");
636 /* If the kernel debugger is active, always disable compression */
640 if (i915_gem_stolen_setup_compression(dev, obj->base.size,
641 drm_format_plane_cpp(fb->pixel_format, 0))) {
642 if (set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL))
643 DRM_DEBUG_KMS("framebuffer too large, disabling compression\n");
647 /* If the scanout has not changed, don't modify the FBC settings.
648 * Note that we make the fundamental assumption that the fb->obj
649 * cannot be unpinned (and have its GTT offset and fence revoked)
650 * without first being decoupled from the scanout and FBC disabled.
652 if (dev_priv->fbc.plane == intel_crtc->plane &&
653 dev_priv->fbc.fb_id == fb->base.id &&
654 dev_priv->fbc.y == crtc->y)
657 if (intel_fbc_enabled(dev)) {
658 /* We update FBC along two paths, after changing fb/crtc
659 * configuration (modeswitching) and after page-flipping
660 * finishes. For the latter, we know that not only did
661 * we disable the FBC at the start of the page-flip
662 * sequence, but also more than one vblank has passed.
664 * For the former case of modeswitching, it is possible
665 * to switch between two FBC valid configurations
666 * instantaneously so we do need to disable the FBC
667 * before we can modify its control registers. We also
668 * have to wait for the next vblank for that to take
669 * effect. However, since we delay enabling FBC we can
670 * assume that a vblank has passed since disabling and
671 * that we can safely alter the registers in the deferred
674 * In the scenario that we go from a valid to invalid
675 * and then back to valid FBC configuration we have
676 * no strict enforcement that a vblank occurred since
677 * disabling the FBC. However, along all current pipe
678 * disabling paths we do need to wait for a vblank at
679 * some point. And we wait before enabling FBC anyway.
681 DRM_DEBUG_KMS("disabling active FBC for update\n");
682 intel_disable_fbc(dev);
685 intel_enable_fbc(crtc);
686 dev_priv->fbc.no_fbc_reason = FBC_OK;
690 /* Multiple disables should be harmless */
691 if (intel_fbc_enabled(dev)) {
692 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
693 intel_disable_fbc(dev);
695 i915_gem_stolen_cleanup_compression(dev);
698 static void i915_pineview_get_mem_freq(struct drm_device *dev)
700 struct drm_i915_private *dev_priv = dev->dev_private;
703 tmp = I915_READ(CLKCFG);
705 switch (tmp & CLKCFG_FSB_MASK) {
707 dev_priv->fsb_freq = 533; /* 133*4 */
710 dev_priv->fsb_freq = 800; /* 200*4 */
713 dev_priv->fsb_freq = 667; /* 167*4 */
716 dev_priv->fsb_freq = 400; /* 100*4 */
720 switch (tmp & CLKCFG_MEM_MASK) {
722 dev_priv->mem_freq = 533;
725 dev_priv->mem_freq = 667;
728 dev_priv->mem_freq = 800;
732 /* detect pineview DDR3 setting */
733 tmp = I915_READ(CSHRDDR3CTL);
734 dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
737 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
739 struct drm_i915_private *dev_priv = dev->dev_private;
742 ddrpll = I915_READ16(DDRMPLL1);
743 csipll = I915_READ16(CSIPLL0);
745 switch (ddrpll & 0xff) {
747 dev_priv->mem_freq = 800;
750 dev_priv->mem_freq = 1066;
753 dev_priv->mem_freq = 1333;
756 dev_priv->mem_freq = 1600;
759 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
761 dev_priv->mem_freq = 0;
765 dev_priv->ips.r_t = dev_priv->mem_freq;
767 switch (csipll & 0x3ff) {
769 dev_priv->fsb_freq = 3200;
772 dev_priv->fsb_freq = 3733;
775 dev_priv->fsb_freq = 4266;
778 dev_priv->fsb_freq = 4800;
781 dev_priv->fsb_freq = 5333;
784 dev_priv->fsb_freq = 5866;
787 dev_priv->fsb_freq = 6400;
790 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
792 dev_priv->fsb_freq = 0;
796 if (dev_priv->fsb_freq == 3200) {
797 dev_priv->ips.c_m = 0;
798 } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
799 dev_priv->ips.c_m = 1;
801 dev_priv->ips.c_m = 2;
805 static const struct cxsr_latency cxsr_latency_table[] = {
806 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
807 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
808 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
809 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
810 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
812 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
813 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
814 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
815 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
816 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
818 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
819 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
820 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
821 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
822 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
824 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
825 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
826 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
827 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
828 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
830 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
831 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
832 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
833 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
834 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
836 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
837 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
838 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
839 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
840 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
843 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
848 const struct cxsr_latency *latency;
851 if (fsb == 0 || mem == 0)
854 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
855 latency = &cxsr_latency_table[i];
856 if (is_desktop == latency->is_desktop &&
857 is_ddr3 == latency->is_ddr3 &&
858 fsb == latency->fsb_freq && mem == latency->mem_freq)
862 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
867 void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
869 struct drm_device *dev = dev_priv->dev;
872 if (IS_VALLEYVIEW(dev)) {
873 I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0);
874 } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) {
875 I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0);
876 } else if (IS_PINEVIEW(dev)) {
877 val = I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN;
878 val |= enable ? PINEVIEW_SELF_REFRESH_EN : 0;
879 I915_WRITE(DSPFW3, val);
880 } else if (IS_I945G(dev) || IS_I945GM(dev)) {
881 val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
882 _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
883 I915_WRITE(FW_BLC_SELF, val);
884 } else if (IS_I915GM(dev)) {
885 val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
886 _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
887 I915_WRITE(INSTPM, val);
892 DRM_DEBUG_KMS("memory self-refresh is %s\n",
893 enable ? "enabled" : "disabled");
897 * Latency for FIFO fetches is dependent on several factors:
898 * - memory configuration (speed, channels)
900 * - current MCH state
901 * It can be fairly high in some situations, so here we assume a fairly
902 * pessimal value. It's a tradeoff between extra memory fetches (if we
903 * set this value too high, the FIFO will fetch frequently to stay full)
904 * and power consumption (set it too low to save power and we might see
905 * FIFO underruns and display "flicker").
907 * A value of 5us seems to be a good balance; safe for very low end
908 * platforms but not overly aggressive on lower latency configs.
910 static const int pessimal_latency_ns = 5000;
912 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
914 struct drm_i915_private *dev_priv = dev->dev_private;
915 uint32_t dsparb = I915_READ(DSPARB);
918 size = dsparb & 0x7f;
920 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
922 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
923 plane ? "B" : "A", size);
928 static int i830_get_fifo_size(struct drm_device *dev, int plane)
930 struct drm_i915_private *dev_priv = dev->dev_private;
931 uint32_t dsparb = I915_READ(DSPARB);
934 size = dsparb & 0x1ff;
936 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
937 size >>= 1; /* Convert to cachelines */
939 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
940 plane ? "B" : "A", size);
945 static int i845_get_fifo_size(struct drm_device *dev, int plane)
947 struct drm_i915_private *dev_priv = dev->dev_private;
948 uint32_t dsparb = I915_READ(DSPARB);
951 size = dsparb & 0x7f;
952 size >>= 2; /* Convert to cachelines */
954 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
961 /* Pineview has different values for various configs */
962 static const struct intel_watermark_params pineview_display_wm = {
963 .fifo_size = PINEVIEW_DISPLAY_FIFO,
964 .max_wm = PINEVIEW_MAX_WM,
965 .default_wm = PINEVIEW_DFT_WM,
966 .guard_size = PINEVIEW_GUARD_WM,
967 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
969 static const struct intel_watermark_params pineview_display_hplloff_wm = {
970 .fifo_size = PINEVIEW_DISPLAY_FIFO,
971 .max_wm = PINEVIEW_MAX_WM,
972 .default_wm = PINEVIEW_DFT_HPLLOFF_WM,
973 .guard_size = PINEVIEW_GUARD_WM,
974 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
976 static const struct intel_watermark_params pineview_cursor_wm = {
977 .fifo_size = PINEVIEW_CURSOR_FIFO,
978 .max_wm = PINEVIEW_CURSOR_MAX_WM,
979 .default_wm = PINEVIEW_CURSOR_DFT_WM,
980 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
981 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
983 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
984 .fifo_size = PINEVIEW_CURSOR_FIFO,
985 .max_wm = PINEVIEW_CURSOR_MAX_WM,
986 .default_wm = PINEVIEW_CURSOR_DFT_WM,
987 .guard_size = PINEVIEW_CURSOR_GUARD_WM,
988 .cacheline_size = PINEVIEW_FIFO_LINE_SIZE,
990 static const struct intel_watermark_params g4x_wm_info = {
991 .fifo_size = G4X_FIFO_SIZE,
992 .max_wm = G4X_MAX_WM,
993 .default_wm = G4X_MAX_WM,
995 .cacheline_size = G4X_FIFO_LINE_SIZE,
997 static const struct intel_watermark_params g4x_cursor_wm_info = {
998 .fifo_size = I965_CURSOR_FIFO,
999 .max_wm = I965_CURSOR_MAX_WM,
1000 .default_wm = I965_CURSOR_DFT_WM,
1002 .cacheline_size = G4X_FIFO_LINE_SIZE,
1004 static const struct intel_watermark_params valleyview_wm_info = {
1005 .fifo_size = VALLEYVIEW_FIFO_SIZE,
1006 .max_wm = VALLEYVIEW_MAX_WM,
1007 .default_wm = VALLEYVIEW_MAX_WM,
1009 .cacheline_size = G4X_FIFO_LINE_SIZE,
1011 static const struct intel_watermark_params valleyview_cursor_wm_info = {
1012 .fifo_size = I965_CURSOR_FIFO,
1013 .max_wm = VALLEYVIEW_CURSOR_MAX_WM,
1014 .default_wm = I965_CURSOR_DFT_WM,
1016 .cacheline_size = G4X_FIFO_LINE_SIZE,
1018 static const struct intel_watermark_params i965_cursor_wm_info = {
1019 .fifo_size = I965_CURSOR_FIFO,
1020 .max_wm = I965_CURSOR_MAX_WM,
1021 .default_wm = I965_CURSOR_DFT_WM,
1023 .cacheline_size = I915_FIFO_LINE_SIZE,
1025 static const struct intel_watermark_params i945_wm_info = {
1026 .fifo_size = I945_FIFO_SIZE,
1027 .max_wm = I915_MAX_WM,
1030 .cacheline_size = I915_FIFO_LINE_SIZE,
1032 static const struct intel_watermark_params i915_wm_info = {
1033 .fifo_size = I915_FIFO_SIZE,
1034 .max_wm = I915_MAX_WM,
1037 .cacheline_size = I915_FIFO_LINE_SIZE,
1039 static const struct intel_watermark_params i830_a_wm_info = {
1040 .fifo_size = I855GM_FIFO_SIZE,
1041 .max_wm = I915_MAX_WM,
1044 .cacheline_size = I830_FIFO_LINE_SIZE,
1046 static const struct intel_watermark_params i830_bc_wm_info = {
1047 .fifo_size = I855GM_FIFO_SIZE,
1048 .max_wm = I915_MAX_WM/2,
1051 .cacheline_size = I830_FIFO_LINE_SIZE,
1053 static const struct intel_watermark_params i845_wm_info = {
1054 .fifo_size = I830_FIFO_SIZE,
1055 .max_wm = I915_MAX_WM,
1058 .cacheline_size = I830_FIFO_LINE_SIZE,
1062 * intel_calculate_wm - calculate watermark level
1063 * @clock_in_khz: pixel clock
1064 * @wm: chip FIFO params
1065 * @pixel_size: display pixel size
1066 * @latency_ns: memory latency for the platform
1068 * Calculate the watermark level (the level at which the display plane will
1069 * start fetching from memory again). Each chip has a different display
1070 * FIFO size and allocation, so the caller needs to figure that out and pass
1071 * in the correct intel_watermark_params structure.
1073 * As the pixel clock runs, the FIFO will be drained at a rate that depends
1074 * on the pixel size. When it reaches the watermark level, it'll start
1075 * fetching FIFO line sized based chunks from memory until the FIFO fills
1076 * past the watermark point. If the FIFO drains completely, a FIFO underrun
1077 * will occur, and a display engine hang could result.
1079 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
1080 const struct intel_watermark_params *wm,
1083 unsigned long latency_ns)
1085 long entries_required, wm_size;
1088 * Note: we need to make sure we don't overflow for various clock &
1090 * clocks go from a few thousand to several hundred thousand.
1091 * latency is usually a few thousand
1093 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
1095 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
1097 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
1099 wm_size = fifo_size - (entries_required + wm->guard_size);
1101 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
1103 /* Don't promote wm_size to unsigned... */
1104 if (wm_size > (long)wm->max_wm)
1105 wm_size = wm->max_wm;
1107 wm_size = wm->default_wm;
1110 * Bspec seems to indicate that the value shouldn't be lower than
1111 * 'burst size + 1'. Certainly 830 is quite unhappy with low values.
1112 * Lets go for 8 which is the burst size since certain platforms
1113 * already use a hardcoded 8 (which is what the spec says should be
1122 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
1124 struct drm_crtc *crtc, *enabled = NULL;
1126 for_each_crtc(dev, crtc) {
1127 if (intel_crtc_active(crtc)) {
1137 static void pineview_update_wm(struct drm_crtc *unused_crtc)
1139 struct drm_device *dev = unused_crtc->dev;
1140 struct drm_i915_private *dev_priv = dev->dev_private;
1141 struct drm_crtc *crtc;
1142 const struct cxsr_latency *latency;
1146 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1147 dev_priv->fsb_freq, dev_priv->mem_freq);
1149 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1150 intel_set_memory_cxsr(dev_priv, false);
1154 crtc = single_enabled_crtc(dev);
1156 const struct drm_display_mode *adjusted_mode;
1157 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1160 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1161 clock = adjusted_mode->crtc_clock;
1164 wm = intel_calculate_wm(clock, &pineview_display_wm,
1165 pineview_display_wm.fifo_size,
1166 pixel_size, latency->display_sr);
1167 reg = I915_READ(DSPFW1);
1168 reg &= ~DSPFW_SR_MASK;
1169 reg |= wm << DSPFW_SR_SHIFT;
1170 I915_WRITE(DSPFW1, reg);
1171 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1174 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1175 pineview_display_wm.fifo_size,
1176 pixel_size, latency->cursor_sr);
1177 reg = I915_READ(DSPFW3);
1178 reg &= ~DSPFW_CURSOR_SR_MASK;
1179 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1180 I915_WRITE(DSPFW3, reg);
1182 /* Display HPLL off SR */
1183 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1184 pineview_display_hplloff_wm.fifo_size,
1185 pixel_size, latency->display_hpll_disable);
1186 reg = I915_READ(DSPFW3);
1187 reg &= ~DSPFW_HPLL_SR_MASK;
1188 reg |= wm & DSPFW_HPLL_SR_MASK;
1189 I915_WRITE(DSPFW3, reg);
1191 /* cursor HPLL off SR */
1192 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1193 pineview_display_hplloff_wm.fifo_size,
1194 pixel_size, latency->cursor_hpll_disable);
1195 reg = I915_READ(DSPFW3);
1196 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1197 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1198 I915_WRITE(DSPFW3, reg);
1199 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1201 intel_set_memory_cxsr(dev_priv, true);
1203 intel_set_memory_cxsr(dev_priv, false);
1207 static bool g4x_compute_wm0(struct drm_device *dev,
1209 const struct intel_watermark_params *display,
1210 int display_latency_ns,
1211 const struct intel_watermark_params *cursor,
1212 int cursor_latency_ns,
1216 struct drm_crtc *crtc;
1217 const struct drm_display_mode *adjusted_mode;
1218 int htotal, hdisplay, clock, pixel_size;
1219 int line_time_us, line_count;
1220 int entries, tlb_miss;
1222 crtc = intel_get_crtc_for_plane(dev, plane);
1223 if (!intel_crtc_active(crtc)) {
1224 *cursor_wm = cursor->guard_size;
1225 *plane_wm = display->guard_size;
1229 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1230 clock = adjusted_mode->crtc_clock;
1231 htotal = adjusted_mode->crtc_htotal;
1232 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1233 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1235 /* Use the small buffer method to calculate plane watermark */
1236 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1237 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1239 entries += tlb_miss;
1240 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1241 *plane_wm = entries + display->guard_size;
1242 if (*plane_wm > (int)display->max_wm)
1243 *plane_wm = display->max_wm;
1245 /* Use the large buffer method to calculate cursor watermark */
1246 line_time_us = max(htotal * 1000 / clock, 1);
1247 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1248 entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size;
1249 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1251 entries += tlb_miss;
1252 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1253 *cursor_wm = entries + cursor->guard_size;
1254 if (*cursor_wm > (int)cursor->max_wm)
1255 *cursor_wm = (int)cursor->max_wm;
1261 * Check the wm result.
1263 * If any calculated watermark values is larger than the maximum value that
1264 * can be programmed into the associated watermark register, that watermark
1267 static bool g4x_check_srwm(struct drm_device *dev,
1268 int display_wm, int cursor_wm,
1269 const struct intel_watermark_params *display,
1270 const struct intel_watermark_params *cursor)
1272 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1273 display_wm, cursor_wm);
1275 if (display_wm > display->max_wm) {
1276 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1277 display_wm, display->max_wm);
1281 if (cursor_wm > cursor->max_wm) {
1282 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1283 cursor_wm, cursor->max_wm);
1287 if (!(display_wm || cursor_wm)) {
1288 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1295 static bool g4x_compute_srwm(struct drm_device *dev,
1298 const struct intel_watermark_params *display,
1299 const struct intel_watermark_params *cursor,
1300 int *display_wm, int *cursor_wm)
1302 struct drm_crtc *crtc;
1303 const struct drm_display_mode *adjusted_mode;
1304 int hdisplay, htotal, pixel_size, clock;
1305 unsigned long line_time_us;
1306 int line_count, line_size;
1311 *display_wm = *cursor_wm = 0;
1315 crtc = intel_get_crtc_for_plane(dev, plane);
1316 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1317 clock = adjusted_mode->crtc_clock;
1318 htotal = adjusted_mode->crtc_htotal;
1319 hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1320 pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1322 line_time_us = max(htotal * 1000 / clock, 1);
1323 line_count = (latency_ns / line_time_us + 1000) / 1000;
1324 line_size = hdisplay * pixel_size;
1326 /* Use the minimum of the small and large buffer method for primary */
1327 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1328 large = line_count * line_size;
1330 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1331 *display_wm = entries + display->guard_size;
1333 /* calculate the self-refresh watermark for display cursor */
1334 entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width;
1335 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1336 *cursor_wm = entries + cursor->guard_size;
1338 return g4x_check_srwm(dev,
1339 *display_wm, *cursor_wm,
1343 static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1348 struct drm_device *dev = crtc->dev;
1350 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1352 if (WARN(clock == 0, "Pixel clock is zero!\n"))
1355 if (WARN(pixel_size == 0, "Pixel size is zero!\n"))
1358 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
1359 if (IS_CHERRYVIEW(dev))
1360 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
1361 DRAIN_LATENCY_PRECISION_16;
1363 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
1364 DRAIN_LATENCY_PRECISION_32;
1365 *drain_latency = (64 * (*prec_mult) * 4) / entries;
1367 if (*drain_latency > DRAIN_LATENCY_MASK)
1368 *drain_latency = DRAIN_LATENCY_MASK;
1374 * Update drain latency registers of memory arbiter
1376 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1377 * to be programmed. Each plane has a drain latency multiplier and a drain
1381 static void vlv_update_drain_latency(struct drm_crtc *crtc)
1383 struct drm_device *dev = crtc->dev;
1384 struct drm_i915_private *dev_priv = dev->dev_private;
1385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1388 enum pipe pipe = intel_crtc->pipe;
1389 int plane_prec, prec_mult, plane_dl;
1390 const int high_precision = IS_CHERRYVIEW(dev) ?
1391 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
1393 plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
1394 DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
1395 (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
1397 if (!intel_crtc_active(crtc)) {
1398 I915_WRITE(VLV_DDL(pipe), plane_dl);
1402 /* Primary plane Drain Latency */
1403 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1404 if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1405 plane_prec = (prec_mult == high_precision) ?
1406 DDL_PLANE_PRECISION_HIGH :
1407 DDL_PLANE_PRECISION_LOW;
1408 plane_dl |= plane_prec | drain_latency;
1411 /* Cursor Drain Latency
1412 * BPP is always 4 for cursor
1416 /* Program cursor DL only if it is enabled */
1417 if (intel_crtc->cursor_base &&
1418 vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1419 plane_prec = (prec_mult == high_precision) ?
1420 DDL_CURSOR_PRECISION_HIGH :
1421 DDL_CURSOR_PRECISION_LOW;
1422 plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
1425 I915_WRITE(VLV_DDL(pipe), plane_dl);
1428 #define single_plane_enabled(mask) is_power_of_2(mask)
1430 static void valleyview_update_wm(struct drm_crtc *crtc)
1432 struct drm_device *dev = crtc->dev;
1433 static const int sr_latency_ns = 12000;
1434 struct drm_i915_private *dev_priv = dev->dev_private;
1435 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1436 int plane_sr, cursor_sr;
1437 int ignore_plane_sr, ignore_cursor_sr;
1438 unsigned int enabled = 0;
1441 vlv_update_drain_latency(crtc);
1443 if (g4x_compute_wm0(dev, PIPE_A,
1444 &valleyview_wm_info, pessimal_latency_ns,
1445 &valleyview_cursor_wm_info, pessimal_latency_ns,
1446 &planea_wm, &cursora_wm))
1447 enabled |= 1 << PIPE_A;
1449 if (g4x_compute_wm0(dev, PIPE_B,
1450 &valleyview_wm_info, pessimal_latency_ns,
1451 &valleyview_cursor_wm_info, pessimal_latency_ns,
1452 &planeb_wm, &cursorb_wm))
1453 enabled |= 1 << PIPE_B;
1455 if (single_plane_enabled(enabled) &&
1456 g4x_compute_srwm(dev, ffs(enabled) - 1,
1458 &valleyview_wm_info,
1459 &valleyview_cursor_wm_info,
1460 &plane_sr, &ignore_cursor_sr) &&
1461 g4x_compute_srwm(dev, ffs(enabled) - 1,
1463 &valleyview_wm_info,
1464 &valleyview_cursor_wm_info,
1465 &ignore_plane_sr, &cursor_sr)) {
1466 cxsr_enabled = true;
1468 cxsr_enabled = false;
1469 intel_set_memory_cxsr(dev_priv, false);
1470 plane_sr = cursor_sr = 0;
1473 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1474 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1475 planea_wm, cursora_wm,
1476 planeb_wm, cursorb_wm,
1477 plane_sr, cursor_sr);
1480 (plane_sr << DSPFW_SR_SHIFT) |
1481 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1482 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1483 (planea_wm << DSPFW_PLANEA_SHIFT));
1485 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1486 (cursora_wm << DSPFW_CURSORA_SHIFT));
1488 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1489 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1492 intel_set_memory_cxsr(dev_priv, true);
1495 static void cherryview_update_wm(struct drm_crtc *crtc)
1497 struct drm_device *dev = crtc->dev;
1498 static const int sr_latency_ns = 12000;
1499 struct drm_i915_private *dev_priv = dev->dev_private;
1500 int planea_wm, planeb_wm, planec_wm;
1501 int cursora_wm, cursorb_wm, cursorc_wm;
1502 int plane_sr, cursor_sr;
1503 int ignore_plane_sr, ignore_cursor_sr;
1504 unsigned int enabled = 0;
1507 vlv_update_drain_latency(crtc);
1509 if (g4x_compute_wm0(dev, PIPE_A,
1510 &valleyview_wm_info, pessimal_latency_ns,
1511 &valleyview_cursor_wm_info, pessimal_latency_ns,
1512 &planea_wm, &cursora_wm))
1513 enabled |= 1 << PIPE_A;
1515 if (g4x_compute_wm0(dev, PIPE_B,
1516 &valleyview_wm_info, pessimal_latency_ns,
1517 &valleyview_cursor_wm_info, pessimal_latency_ns,
1518 &planeb_wm, &cursorb_wm))
1519 enabled |= 1 << PIPE_B;
1521 if (g4x_compute_wm0(dev, PIPE_C,
1522 &valleyview_wm_info, pessimal_latency_ns,
1523 &valleyview_cursor_wm_info, pessimal_latency_ns,
1524 &planec_wm, &cursorc_wm))
1525 enabled |= 1 << PIPE_C;
1527 if (single_plane_enabled(enabled) &&
1528 g4x_compute_srwm(dev, ffs(enabled) - 1,
1530 &valleyview_wm_info,
1531 &valleyview_cursor_wm_info,
1532 &plane_sr, &ignore_cursor_sr) &&
1533 g4x_compute_srwm(dev, ffs(enabled) - 1,
1535 &valleyview_wm_info,
1536 &valleyview_cursor_wm_info,
1537 &ignore_plane_sr, &cursor_sr)) {
1538 cxsr_enabled = true;
1540 cxsr_enabled = false;
1541 intel_set_memory_cxsr(dev_priv, false);
1542 plane_sr = cursor_sr = 0;
1545 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1546 "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, "
1547 "SR: plane=%d, cursor=%d\n",
1548 planea_wm, cursora_wm,
1549 planeb_wm, cursorb_wm,
1550 planec_wm, cursorc_wm,
1551 plane_sr, cursor_sr);
1554 (plane_sr << DSPFW_SR_SHIFT) |
1555 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1556 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1557 (planea_wm << DSPFW_PLANEA_SHIFT));
1559 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1560 (cursora_wm << DSPFW_CURSORA_SHIFT));
1562 (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) |
1563 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1564 I915_WRITE(DSPFW9_CHV,
1565 (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK |
1566 DSPFW_CURSORC_MASK)) |
1567 (planec_wm << DSPFW_PLANEC_SHIFT) |
1568 (cursorc_wm << DSPFW_CURSORC_SHIFT));
1571 intel_set_memory_cxsr(dev_priv, true);
1574 static void valleyview_update_sprite_wm(struct drm_plane *plane,
1575 struct drm_crtc *crtc,
1576 uint32_t sprite_width,
1577 uint32_t sprite_height,
1579 bool enabled, bool scaled)
1581 struct drm_device *dev = crtc->dev;
1582 struct drm_i915_private *dev_priv = dev->dev_private;
1583 int pipe = to_intel_plane(plane)->pipe;
1584 int sprite = to_intel_plane(plane)->plane;
1589 const int high_precision = IS_CHERRYVIEW(dev) ?
1590 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
1592 sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
1593 (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
1595 if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
1597 plane_prec = (prec_mult == high_precision) ?
1598 DDL_SPRITE_PRECISION_HIGH(sprite) :
1599 DDL_SPRITE_PRECISION_LOW(sprite);
1600 sprite_dl |= plane_prec |
1601 (drain_latency << DDL_SPRITE_SHIFT(sprite));
1604 I915_WRITE(VLV_DDL(pipe), sprite_dl);
1607 static void g4x_update_wm(struct drm_crtc *crtc)
1609 struct drm_device *dev = crtc->dev;
1610 static const int sr_latency_ns = 12000;
1611 struct drm_i915_private *dev_priv = dev->dev_private;
1612 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1613 int plane_sr, cursor_sr;
1614 unsigned int enabled = 0;
1617 if (g4x_compute_wm0(dev, PIPE_A,
1618 &g4x_wm_info, pessimal_latency_ns,
1619 &g4x_cursor_wm_info, pessimal_latency_ns,
1620 &planea_wm, &cursora_wm))
1621 enabled |= 1 << PIPE_A;
1623 if (g4x_compute_wm0(dev, PIPE_B,
1624 &g4x_wm_info, pessimal_latency_ns,
1625 &g4x_cursor_wm_info, pessimal_latency_ns,
1626 &planeb_wm, &cursorb_wm))
1627 enabled |= 1 << PIPE_B;
1629 if (single_plane_enabled(enabled) &&
1630 g4x_compute_srwm(dev, ffs(enabled) - 1,
1633 &g4x_cursor_wm_info,
1634 &plane_sr, &cursor_sr)) {
1635 cxsr_enabled = true;
1637 cxsr_enabled = false;
1638 intel_set_memory_cxsr(dev_priv, false);
1639 plane_sr = cursor_sr = 0;
1642 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, "
1643 "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1644 planea_wm, cursora_wm,
1645 planeb_wm, cursorb_wm,
1646 plane_sr, cursor_sr);
1649 (plane_sr << DSPFW_SR_SHIFT) |
1650 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1651 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1652 (planea_wm << DSPFW_PLANEA_SHIFT));
1654 (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) |
1655 (cursora_wm << DSPFW_CURSORA_SHIFT));
1656 /* HPLL off in SR has some issues on G4x... disable it */
1658 (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) |
1659 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1662 intel_set_memory_cxsr(dev_priv, true);
1665 static void i965_update_wm(struct drm_crtc *unused_crtc)
1667 struct drm_device *dev = unused_crtc->dev;
1668 struct drm_i915_private *dev_priv = dev->dev_private;
1669 struct drm_crtc *crtc;
1674 /* Calc sr entries for one plane configs */
1675 crtc = single_enabled_crtc(dev);
1677 /* self-refresh has much higher latency */
1678 static const int sr_latency_ns = 12000;
1679 const struct drm_display_mode *adjusted_mode =
1680 &to_intel_crtc(crtc)->config.adjusted_mode;
1681 int clock = adjusted_mode->crtc_clock;
1682 int htotal = adjusted_mode->crtc_htotal;
1683 int hdisplay = to_intel_crtc(crtc)->config.pipe_src_w;
1684 int pixel_size = crtc->primary->fb->bits_per_pixel / 8;
1685 unsigned long line_time_us;
1688 line_time_us = max(htotal * 1000 / clock, 1);
1690 /* Use ns/us then divide to preserve precision */
1691 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1692 pixel_size * hdisplay;
1693 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1694 srwm = I965_FIFO_SIZE - entries;
1698 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1701 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1702 pixel_size * to_intel_crtc(crtc)->cursor_width;
1703 entries = DIV_ROUND_UP(entries,
1704 i965_cursor_wm_info.cacheline_size);
1705 cursor_sr = i965_cursor_wm_info.fifo_size -
1706 (entries + i965_cursor_wm_info.guard_size);
1708 if (cursor_sr > i965_cursor_wm_info.max_wm)
1709 cursor_sr = i965_cursor_wm_info.max_wm;
1711 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1712 "cursor %d\n", srwm, cursor_sr);
1714 cxsr_enabled = true;
1716 cxsr_enabled = false;
1717 /* Turn off self refresh if both pipes are enabled */
1718 intel_set_memory_cxsr(dev_priv, false);
1721 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1724 /* 965 has limitations... */
1725 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1726 (8 << DSPFW_CURSORB_SHIFT) |
1727 (8 << DSPFW_PLANEB_SHIFT) |
1728 (8 << DSPFW_PLANEA_SHIFT));
1729 I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) |
1730 (8 << DSPFW_PLANEC_SHIFT_OLD));
1731 /* update cursor SR watermark */
1732 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1735 intel_set_memory_cxsr(dev_priv, true);
1738 static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1740 struct drm_device *dev = unused_crtc->dev;
1741 struct drm_i915_private *dev_priv = dev->dev_private;
1742 const struct intel_watermark_params *wm_info;
1747 int planea_wm, planeb_wm;
1748 struct drm_crtc *crtc, *enabled = NULL;
1751 wm_info = &i945_wm_info;
1752 else if (!IS_GEN2(dev))
1753 wm_info = &i915_wm_info;
1755 wm_info = &i830_a_wm_info;
1757 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1758 crtc = intel_get_crtc_for_plane(dev, 0);
1759 if (intel_crtc_active(crtc)) {
1760 const struct drm_display_mode *adjusted_mode;
1761 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1765 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1766 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1767 wm_info, fifo_size, cpp,
1768 pessimal_latency_ns);
1771 planea_wm = fifo_size - wm_info->guard_size;
1772 if (planea_wm > (long)wm_info->max_wm)
1773 planea_wm = wm_info->max_wm;
1777 wm_info = &i830_bc_wm_info;
1779 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1780 crtc = intel_get_crtc_for_plane(dev, 1);
1781 if (intel_crtc_active(crtc)) {
1782 const struct drm_display_mode *adjusted_mode;
1783 int cpp = crtc->primary->fb->bits_per_pixel / 8;
1787 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1788 planeb_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1789 wm_info, fifo_size, cpp,
1790 pessimal_latency_ns);
1791 if (enabled == NULL)
1796 planeb_wm = fifo_size - wm_info->guard_size;
1797 if (planeb_wm > (long)wm_info->max_wm)
1798 planeb_wm = wm_info->max_wm;
1801 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1803 if (IS_I915GM(dev) && enabled) {
1804 struct drm_i915_gem_object *obj;
1806 obj = intel_fb_obj(enabled->primary->fb);
1808 /* self-refresh seems busted with untiled */
1809 if (obj->tiling_mode == I915_TILING_NONE)
1814 * Overlay gets an aggressive default since video jitter is bad.
1818 /* Play safe and disable self-refresh before adjusting watermarks. */
1819 intel_set_memory_cxsr(dev_priv, false);
1821 /* Calc sr entries for one plane configs */
1822 if (HAS_FW_BLC(dev) && enabled) {
1823 /* self-refresh has much higher latency */
1824 static const int sr_latency_ns = 6000;
1825 const struct drm_display_mode *adjusted_mode =
1826 &to_intel_crtc(enabled)->config.adjusted_mode;
1827 int clock = adjusted_mode->crtc_clock;
1828 int htotal = adjusted_mode->crtc_htotal;
1829 int hdisplay = to_intel_crtc(enabled)->config.pipe_src_w;
1830 int pixel_size = enabled->primary->fb->bits_per_pixel / 8;
1831 unsigned long line_time_us;
1834 line_time_us = max(htotal * 1000 / clock, 1);
1836 /* Use ns/us then divide to preserve precision */
1837 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1838 pixel_size * hdisplay;
1839 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1840 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1841 srwm = wm_info->fifo_size - entries;
1845 if (IS_I945G(dev) || IS_I945GM(dev))
1846 I915_WRITE(FW_BLC_SELF,
1847 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1848 else if (IS_I915GM(dev))
1849 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1852 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1853 planea_wm, planeb_wm, cwm, srwm);
1855 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1856 fwater_hi = (cwm & 0x1f);
1858 /* Set request length to 8 cachelines per fetch */
1859 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1860 fwater_hi = fwater_hi | (1 << 8);
1862 I915_WRITE(FW_BLC, fwater_lo);
1863 I915_WRITE(FW_BLC2, fwater_hi);
1866 intel_set_memory_cxsr(dev_priv, true);
1869 static void i845_update_wm(struct drm_crtc *unused_crtc)
1871 struct drm_device *dev = unused_crtc->dev;
1872 struct drm_i915_private *dev_priv = dev->dev_private;
1873 struct drm_crtc *crtc;
1874 const struct drm_display_mode *adjusted_mode;
1878 crtc = single_enabled_crtc(dev);
1882 adjusted_mode = &to_intel_crtc(crtc)->config.adjusted_mode;
1883 planea_wm = intel_calculate_wm(adjusted_mode->crtc_clock,
1885 dev_priv->display.get_fifo_size(dev, 0),
1886 4, pessimal_latency_ns);
1887 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1888 fwater_lo |= (3<<8) | planea_wm;
1890 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1892 I915_WRITE(FW_BLC, fwater_lo);
1895 static uint32_t ilk_pipe_pixel_rate(struct drm_device *dev,
1896 struct drm_crtc *crtc)
1898 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1899 uint32_t pixel_rate;
1901 pixel_rate = intel_crtc->config.adjusted_mode.crtc_clock;
1903 /* We only use IF-ID interlacing. If we ever use PF-ID we'll need to
1904 * adjust the pixel_rate here. */
1906 if (intel_crtc->config.pch_pfit.enabled) {
1907 uint64_t pipe_w, pipe_h, pfit_w, pfit_h;
1908 uint32_t pfit_size = intel_crtc->config.pch_pfit.size;
1910 pipe_w = intel_crtc->config.pipe_src_w;
1911 pipe_h = intel_crtc->config.pipe_src_h;
1912 pfit_w = (pfit_size >> 16) & 0xFFFF;
1913 pfit_h = pfit_size & 0xFFFF;
1914 if (pipe_w < pfit_w)
1916 if (pipe_h < pfit_h)
1919 pixel_rate = div_u64((uint64_t) pixel_rate * pipe_w * pipe_h,
1926 /* latency must be in 0.1us units. */
1927 static uint32_t ilk_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
1932 if (WARN(latency == 0, "Latency value missing\n"))
1935 ret = (uint64_t) pixel_rate * bytes_per_pixel * latency;
1936 ret = DIV_ROUND_UP_ULL(ret, 64 * 10000) + 2;
1941 /* latency must be in 0.1us units. */
1942 static uint32_t ilk_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
1943 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
1948 if (WARN(latency == 0, "Latency value missing\n"))
1951 ret = (latency * pixel_rate) / (pipe_htotal * 10000);
1952 ret = (ret + 1) * horiz_pixels * bytes_per_pixel;
1953 ret = DIV_ROUND_UP(ret, 64) + 2;
1957 static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1958 uint8_t bytes_per_pixel)
1960 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1963 struct skl_pipe_wm_parameters {
1965 uint32_t pipe_htotal;
1966 uint32_t pixel_rate; /* in KHz */
1967 struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
1968 struct intel_plane_wm_parameters cursor;
1971 struct ilk_pipe_wm_parameters {
1973 uint32_t pipe_htotal;
1974 uint32_t pixel_rate;
1975 struct intel_plane_wm_parameters pri;
1976 struct intel_plane_wm_parameters spr;
1977 struct intel_plane_wm_parameters cur;
1980 struct ilk_wm_maximums {
1987 /* used in computing the new watermarks state */
1988 struct intel_wm_config {
1989 unsigned int num_pipes_active;
1990 bool sprites_enabled;
1991 bool sprites_scaled;
1995 * For both WM_PIPE and WM_LP.
1996 * mem_value must be in 0.1us units.
1998 static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params,
2002 uint32_t method1, method2;
2004 if (!params->active || !params->pri.enabled)
2007 method1 = ilk_wm_method1(params->pixel_rate,
2008 params->pri.bytes_per_pixel,
2014 method2 = ilk_wm_method2(params->pixel_rate,
2015 params->pipe_htotal,
2016 params->pri.horiz_pixels,
2017 params->pri.bytes_per_pixel,
2020 return min(method1, method2);
2024 * For both WM_PIPE and WM_LP.
2025 * mem_value must be in 0.1us units.
2027 static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params,
2030 uint32_t method1, method2;
2032 if (!params->active || !params->spr.enabled)
2035 method1 = ilk_wm_method1(params->pixel_rate,
2036 params->spr.bytes_per_pixel,
2038 method2 = ilk_wm_method2(params->pixel_rate,
2039 params->pipe_htotal,
2040 params->spr.horiz_pixels,
2041 params->spr.bytes_per_pixel,
2043 return min(method1, method2);
2047 * For both WM_PIPE and WM_LP.
2048 * mem_value must be in 0.1us units.
2050 static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params,
2053 if (!params->active || !params->cur.enabled)
2056 return ilk_wm_method2(params->pixel_rate,
2057 params->pipe_htotal,
2058 params->cur.horiz_pixels,
2059 params->cur.bytes_per_pixel,
2063 /* Only for WM_LP. */
2064 static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params,
2067 if (!params->active || !params->pri.enabled)
2070 return ilk_wm_fbc(pri_val,
2071 params->pri.horiz_pixels,
2072 params->pri.bytes_per_pixel);
2075 static unsigned int ilk_display_fifo_size(const struct drm_device *dev)
2077 if (INTEL_INFO(dev)->gen >= 8)
2079 else if (INTEL_INFO(dev)->gen >= 7)
2085 static unsigned int ilk_plane_wm_reg_max(const struct drm_device *dev,
2086 int level, bool is_sprite)
2088 if (INTEL_INFO(dev)->gen >= 8)
2089 /* BDW primary/sprite plane watermarks */
2090 return level == 0 ? 255 : 2047;
2091 else if (INTEL_INFO(dev)->gen >= 7)
2092 /* IVB/HSW primary/sprite plane watermarks */
2093 return level == 0 ? 127 : 1023;
2094 else if (!is_sprite)
2095 /* ILK/SNB primary plane watermarks */
2096 return level == 0 ? 127 : 511;
2098 /* ILK/SNB sprite plane watermarks */
2099 return level == 0 ? 63 : 255;
2102 static unsigned int ilk_cursor_wm_reg_max(const struct drm_device *dev,
2105 if (INTEL_INFO(dev)->gen >= 7)
2106 return level == 0 ? 63 : 255;
2108 return level == 0 ? 31 : 63;
2111 static unsigned int ilk_fbc_wm_reg_max(const struct drm_device *dev)
2113 if (INTEL_INFO(dev)->gen >= 8)
2119 /* Calculate the maximum primary/sprite plane watermark */
2120 static unsigned int ilk_plane_wm_max(const struct drm_device *dev,
2122 const struct intel_wm_config *config,
2123 enum intel_ddb_partitioning ddb_partitioning,
2126 unsigned int fifo_size = ilk_display_fifo_size(dev);
2128 /* if sprites aren't enabled, sprites get nothing */
2129 if (is_sprite && !config->sprites_enabled)
2132 /* HSW allows LP1+ watermarks even with multiple pipes */
2133 if (level == 0 || config->num_pipes_active > 1) {
2134 fifo_size /= INTEL_INFO(dev)->num_pipes;
2137 * For some reason the non self refresh
2138 * FIFO size is only half of the self
2139 * refresh FIFO size on ILK/SNB.
2141 if (INTEL_INFO(dev)->gen <= 6)
2145 if (config->sprites_enabled) {
2146 /* level 0 is always calculated with 1:1 split */
2147 if (level > 0 && ddb_partitioning == INTEL_DDB_PART_5_6) {
2156 /* clamp to max that the registers can hold */
2157 return min(fifo_size, ilk_plane_wm_reg_max(dev, level, is_sprite));
2160 /* Calculate the maximum cursor plane watermark */
2161 static unsigned int ilk_cursor_wm_max(const struct drm_device *dev,
2163 const struct intel_wm_config *config)
2165 /* HSW LP1+ watermarks w/ multiple pipes */
2166 if (level > 0 && config->num_pipes_active > 1)
2169 /* otherwise just report max that registers can hold */
2170 return ilk_cursor_wm_reg_max(dev, level);
2173 static void ilk_compute_wm_maximums(const struct drm_device *dev,
2175 const struct intel_wm_config *config,
2176 enum intel_ddb_partitioning ddb_partitioning,
2177 struct ilk_wm_maximums *max)
2179 max->pri = ilk_plane_wm_max(dev, level, config, ddb_partitioning, false);
2180 max->spr = ilk_plane_wm_max(dev, level, config, ddb_partitioning, true);
2181 max->cur = ilk_cursor_wm_max(dev, level, config);
2182 max->fbc = ilk_fbc_wm_reg_max(dev);
2185 static void ilk_compute_wm_reg_maximums(struct drm_device *dev,
2187 struct ilk_wm_maximums *max)
2189 max->pri = ilk_plane_wm_reg_max(dev, level, false);
2190 max->spr = ilk_plane_wm_reg_max(dev, level, true);
2191 max->cur = ilk_cursor_wm_reg_max(dev, level);
2192 max->fbc = ilk_fbc_wm_reg_max(dev);
2195 static bool ilk_validate_wm_level(int level,
2196 const struct ilk_wm_maximums *max,
2197 struct intel_wm_level *result)
2201 /* already determined to be invalid? */
2202 if (!result->enable)
2205 result->enable = result->pri_val <= max->pri &&
2206 result->spr_val <= max->spr &&
2207 result->cur_val <= max->cur;
2209 ret = result->enable;
2212 * HACK until we can pre-compute everything,
2213 * and thus fail gracefully if LP0 watermarks
2216 if (level == 0 && !result->enable) {
2217 if (result->pri_val > max->pri)
2218 DRM_DEBUG_KMS("Primary WM%d too large %u (max %u)\n",
2219 level, result->pri_val, max->pri);
2220 if (result->spr_val > max->spr)
2221 DRM_DEBUG_KMS("Sprite WM%d too large %u (max %u)\n",
2222 level, result->spr_val, max->spr);
2223 if (result->cur_val > max->cur)
2224 DRM_DEBUG_KMS("Cursor WM%d too large %u (max %u)\n",
2225 level, result->cur_val, max->cur);
2227 result->pri_val = min_t(uint32_t, result->pri_val, max->pri);
2228 result->spr_val = min_t(uint32_t, result->spr_val, max->spr);
2229 result->cur_val = min_t(uint32_t, result->cur_val, max->cur);
2230 result->enable = true;
2236 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2238 const struct ilk_pipe_wm_parameters *p,
2239 struct intel_wm_level *result)
2241 uint16_t pri_latency = dev_priv->wm.pri_latency[level];
2242 uint16_t spr_latency = dev_priv->wm.spr_latency[level];
2243 uint16_t cur_latency = dev_priv->wm.cur_latency[level];
2245 /* WM1+ latency values stored in 0.5us units */
2252 result->pri_val = ilk_compute_pri_wm(p, pri_latency, level);
2253 result->spr_val = ilk_compute_spr_wm(p, spr_latency);
2254 result->cur_val = ilk_compute_cur_wm(p, cur_latency);
2255 result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val);
2256 result->enable = true;
2260 hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2262 struct drm_i915_private *dev_priv = dev->dev_private;
2263 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2264 struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
2265 u32 linetime, ips_linetime;
2267 if (!intel_crtc_active(crtc))
2270 /* The WM are computed with base on how long it takes to fill a single
2271 * row at the given clock rate, multiplied by 8.
2273 linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2275 ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8,
2276 intel_ddi_get_cdclk_freq(dev_priv));
2278 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2279 PIPE_WM_LINETIME_TIME(linetime);
2282 static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2284 struct drm_i915_private *dev_priv = dev->dev_private;
2289 int level, max_level = ilk_wm_max_level(dev);
2291 /* read the first set of memory latencies[0:3] */
2292 val = 0; /* data0 to be programmed to 0 for first set */
2293 mutex_lock(&dev_priv->rps.hw_lock);
2294 ret = sandybridge_pcode_read(dev_priv,
2295 GEN9_PCODE_READ_MEM_LATENCY,
2297 mutex_unlock(&dev_priv->rps.hw_lock);
2300 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2304 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2305 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2306 GEN9_MEM_LATENCY_LEVEL_MASK;
2307 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2308 GEN9_MEM_LATENCY_LEVEL_MASK;
2309 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2310 GEN9_MEM_LATENCY_LEVEL_MASK;
2312 /* read the second set of memory latencies[4:7] */
2313 val = 1; /* data0 to be programmed to 1 for second set */
2314 mutex_lock(&dev_priv->rps.hw_lock);
2315 ret = sandybridge_pcode_read(dev_priv,
2316 GEN9_PCODE_READ_MEM_LATENCY,
2318 mutex_unlock(&dev_priv->rps.hw_lock);
2320 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2324 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2325 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2326 GEN9_MEM_LATENCY_LEVEL_MASK;
2327 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2328 GEN9_MEM_LATENCY_LEVEL_MASK;
2329 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2330 GEN9_MEM_LATENCY_LEVEL_MASK;
2333 * punit doesn't take into account the read latency so we need
2334 * to add 2us to the various latency levels we retrieve from
2336 * - W0 is a bit special in that it's the only level that
2337 * can't be disabled if we want to have display working, so
2338 * we always add 2us there.
2339 * - For levels >=1, punit returns 0us latency when they are
2340 * disabled, so we respect that and don't add 2us then
2342 * Additionally, if a level n (n > 1) has a 0us latency, all
2343 * levels m (m >= n) need to be disabled. We make sure to
2344 * sanitize the values out of the punit to satisfy this
2348 for (level = 1; level <= max_level; level++)
2352 for (i = level + 1; i <= max_level; i++)
2357 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2358 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2360 wm[0] = (sskpd >> 56) & 0xFF;
2362 wm[0] = sskpd & 0xF;
2363 wm[1] = (sskpd >> 4) & 0xFF;
2364 wm[2] = (sskpd >> 12) & 0xFF;
2365 wm[3] = (sskpd >> 20) & 0x1FF;
2366 wm[4] = (sskpd >> 32) & 0x1FF;
2367 } else if (INTEL_INFO(dev)->gen >= 6) {
2368 uint32_t sskpd = I915_READ(MCH_SSKPD);
2370 wm[0] = (sskpd >> SSKPD_WM0_SHIFT) & SSKPD_WM_MASK;
2371 wm[1] = (sskpd >> SSKPD_WM1_SHIFT) & SSKPD_WM_MASK;
2372 wm[2] = (sskpd >> SSKPD_WM2_SHIFT) & SSKPD_WM_MASK;
2373 wm[3] = (sskpd >> SSKPD_WM3_SHIFT) & SSKPD_WM_MASK;
2374 } else if (INTEL_INFO(dev)->gen >= 5) {
2375 uint32_t mltr = I915_READ(MLTR_ILK);
2377 /* ILK primary LP0 latency is 700 ns */
2379 wm[1] = (mltr >> MLTR_WM1_SHIFT) & ILK_SRLT_MASK;
2380 wm[2] = (mltr >> MLTR_WM2_SHIFT) & ILK_SRLT_MASK;
2384 static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2386 /* ILK sprite LP0 latency is 1300 ns */
2387 if (INTEL_INFO(dev)->gen == 5)
2391 static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2393 /* ILK cursor LP0 latency is 1300 ns */
2394 if (INTEL_INFO(dev)->gen == 5)
2397 /* WaDoubleCursorLP3Latency:ivb */
2398 if (IS_IVYBRIDGE(dev))
2402 int ilk_wm_max_level(const struct drm_device *dev)
2404 /* how many WM levels are we expecting */
2407 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2409 else if (INTEL_INFO(dev)->gen >= 6)
2415 static void intel_print_wm_latency(struct drm_device *dev,
2417 const uint16_t wm[8])
2419 int level, max_level = ilk_wm_max_level(dev);
2421 for (level = 0; level <= max_level; level++) {
2422 unsigned int latency = wm[level];
2425 DRM_ERROR("%s WM%d latency not provided\n",
2431 * - latencies are in us on gen9.
2432 * - before then, WM1+ latency values are in 0.5us units
2439 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
2440 name, level, wm[level],
2441 latency / 10, latency % 10);
2445 static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2446 uint16_t wm[5], uint16_t min)
2448 int level, max_level = ilk_wm_max_level(dev_priv->dev);
2453 wm[0] = max(wm[0], min);
2454 for (level = 1; level <= max_level; level++)
2455 wm[level] = max_t(uint16_t, wm[level], DIV_ROUND_UP(min, 5));
2460 static void snb_wm_latency_quirk(struct drm_device *dev)
2462 struct drm_i915_private *dev_priv = dev->dev_private;
2466 * The BIOS provided WM memory latency values are often
2467 * inadequate for high resolution displays. Adjust them.
2469 changed = ilk_increase_wm_latency(dev_priv, dev_priv->wm.pri_latency, 12) |
2470 ilk_increase_wm_latency(dev_priv, dev_priv->wm.spr_latency, 12) |
2471 ilk_increase_wm_latency(dev_priv, dev_priv->wm.cur_latency, 12);
2476 DRM_DEBUG_KMS("WM latency values increased to avoid potential underruns\n");
2477 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2478 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2479 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2482 static void ilk_setup_wm_latency(struct drm_device *dev)
2484 struct drm_i915_private *dev_priv = dev->dev_private;
2486 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2488 memcpy(dev_priv->wm.spr_latency, dev_priv->wm.pri_latency,
2489 sizeof(dev_priv->wm.pri_latency));
2490 memcpy(dev_priv->wm.cur_latency, dev_priv->wm.pri_latency,
2491 sizeof(dev_priv->wm.pri_latency));
2493 intel_fixup_spr_wm_latency(dev, dev_priv->wm.spr_latency);
2494 intel_fixup_cur_wm_latency(dev, dev_priv->wm.cur_latency);
2496 intel_print_wm_latency(dev, "Primary", dev_priv->wm.pri_latency);
2497 intel_print_wm_latency(dev, "Sprite", dev_priv->wm.spr_latency);
2498 intel_print_wm_latency(dev, "Cursor", dev_priv->wm.cur_latency);
2501 snb_wm_latency_quirk(dev);
2504 static void skl_setup_wm_latency(struct drm_device *dev)
2506 struct drm_i915_private *dev_priv = dev->dev_private;
2508 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2509 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2512 static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2513 struct ilk_pipe_wm_parameters *p)
2515 struct drm_device *dev = crtc->dev;
2516 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2517 enum pipe pipe = intel_crtc->pipe;
2518 struct drm_plane *plane;
2520 if (!intel_crtc_active(crtc))
2524 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
2525 p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
2526 p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8;
2527 p->cur.bytes_per_pixel = 4;
2528 p->pri.horiz_pixels = intel_crtc->config.pipe_src_w;
2529 p->cur.horiz_pixels = intel_crtc->cursor_width;
2530 /* TODO: for now, assume primary and cursor planes are always enabled. */
2531 p->pri.enabled = true;
2532 p->cur.enabled = true;
2534 drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) {
2535 struct intel_plane *intel_plane = to_intel_plane(plane);
2537 if (intel_plane->pipe == pipe) {
2538 p->spr = intel_plane->wm;
2544 static void ilk_compute_wm_config(struct drm_device *dev,
2545 struct intel_wm_config *config)
2547 struct intel_crtc *intel_crtc;
2549 /* Compute the currently _active_ config */
2550 for_each_intel_crtc(dev, intel_crtc) {
2551 const struct intel_pipe_wm *wm = &intel_crtc->wm.active;
2553 if (!wm->pipe_enabled)
2556 config->sprites_enabled |= wm->sprites_enabled;
2557 config->sprites_scaled |= wm->sprites_scaled;
2558 config->num_pipes_active++;
2562 /* Compute new watermarks for the pipe */
2563 static bool intel_compute_pipe_wm(struct drm_crtc *crtc,
2564 const struct ilk_pipe_wm_parameters *params,
2565 struct intel_pipe_wm *pipe_wm)
2567 struct drm_device *dev = crtc->dev;
2568 const struct drm_i915_private *dev_priv = dev->dev_private;
2569 int level, max_level = ilk_wm_max_level(dev);
2570 /* LP0 watermark maximums depend on this pipe alone */
2571 struct intel_wm_config config = {
2572 .num_pipes_active = 1,
2573 .sprites_enabled = params->spr.enabled,
2574 .sprites_scaled = params->spr.scaled,
2576 struct ilk_wm_maximums max;
2578 pipe_wm->pipe_enabled = params->active;
2579 pipe_wm->sprites_enabled = params->spr.enabled;
2580 pipe_wm->sprites_scaled = params->spr.scaled;
2582 /* ILK/SNB: LP2+ watermarks only w/o sprites */
2583 if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled)
2586 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */
2587 if (params->spr.scaled)
2590 ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]);
2592 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2593 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc);
2595 /* LP0 watermarks always use 1/2 DDB partitioning */
2596 ilk_compute_wm_maximums(dev, 0, &config, INTEL_DDB_PART_1_2, &max);
2598 /* At least LP0 must be valid */
2599 if (!ilk_validate_wm_level(0, &max, &pipe_wm->wm[0]))
2602 ilk_compute_wm_reg_maximums(dev, 1, &max);
2604 for (level = 1; level <= max_level; level++) {
2605 struct intel_wm_level wm = {};
2607 ilk_compute_wm_level(dev_priv, level, params, &wm);
2610 * Disable any watermark level that exceeds the
2611 * register maximums since such watermarks are
2614 if (!ilk_validate_wm_level(level, &max, &wm))
2617 pipe_wm->wm[level] = wm;
2624 * Merge the watermarks from all active pipes for a specific level.
2626 static void ilk_merge_wm_level(struct drm_device *dev,
2628 struct intel_wm_level *ret_wm)
2630 const struct intel_crtc *intel_crtc;
2632 ret_wm->enable = true;
2634 for_each_intel_crtc(dev, intel_crtc) {
2635 const struct intel_pipe_wm *active = &intel_crtc->wm.active;
2636 const struct intel_wm_level *wm = &active->wm[level];
2638 if (!active->pipe_enabled)
2642 * The watermark values may have been used in the past,
2643 * so we must maintain them in the registers for some
2644 * time even if the level is now disabled.
2647 ret_wm->enable = false;
2649 ret_wm->pri_val = max(ret_wm->pri_val, wm->pri_val);
2650 ret_wm->spr_val = max(ret_wm->spr_val, wm->spr_val);
2651 ret_wm->cur_val = max(ret_wm->cur_val, wm->cur_val);
2652 ret_wm->fbc_val = max(ret_wm->fbc_val, wm->fbc_val);
2657 * Merge all low power watermarks for all active pipes.
2659 static void ilk_wm_merge(struct drm_device *dev,
2660 const struct intel_wm_config *config,
2661 const struct ilk_wm_maximums *max,
2662 struct intel_pipe_wm *merged)
2664 int level, max_level = ilk_wm_max_level(dev);
2665 int last_enabled_level = max_level;
2667 /* ILK/SNB/IVB: LP1+ watermarks only w/ single pipe */
2668 if ((INTEL_INFO(dev)->gen <= 6 || IS_IVYBRIDGE(dev)) &&
2669 config->num_pipes_active > 1)
2672 /* ILK: FBC WM must be disabled always */
2673 merged->fbc_wm_enabled = INTEL_INFO(dev)->gen >= 6;
2675 /* merge each WM1+ level */
2676 for (level = 1; level <= max_level; level++) {
2677 struct intel_wm_level *wm = &merged->wm[level];
2679 ilk_merge_wm_level(dev, level, wm);
2681 if (level > last_enabled_level)
2683 else if (!ilk_validate_wm_level(level, max, wm))
2684 /* make sure all following levels get disabled */
2685 last_enabled_level = level - 1;
2688 * The spec says it is preferred to disable
2689 * FBC WMs instead of disabling a WM level.
2691 if (wm->fbc_val > max->fbc) {
2693 merged->fbc_wm_enabled = false;
2698 /* ILK: LP2+ must be disabled when FBC WM is disabled but FBC enabled */
2700 * FIXME this is racy. FBC might get enabled later.
2701 * What we should check here is whether FBC can be
2702 * enabled sometime later.
2704 if (IS_GEN5(dev) && !merged->fbc_wm_enabled && intel_fbc_enabled(dev)) {
2705 for (level = 2; level <= max_level; level++) {
2706 struct intel_wm_level *wm = &merged->wm[level];
2713 static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2715 /* LP1,LP2,LP3 levels are either 1,2,3 or 1,3,4 */
2716 return wm_lp + (wm_lp >= 2 && pipe_wm->wm[4].enable);
2719 /* The value we need to program into the WM_LPx latency field */
2720 static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2722 struct drm_i915_private *dev_priv = dev->dev_private;
2724 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2727 return dev_priv->wm.pri_latency[level];
2730 static void ilk_compute_wm_results(struct drm_device *dev,
2731 const struct intel_pipe_wm *merged,
2732 enum intel_ddb_partitioning partitioning,
2733 struct ilk_wm_values *results)
2735 struct intel_crtc *intel_crtc;
2738 results->enable_fbc_wm = merged->fbc_wm_enabled;
2739 results->partitioning = partitioning;
2741 /* LP1+ register values */
2742 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2743 const struct intel_wm_level *r;
2745 level = ilk_wm_lp_to_level(wm_lp, merged);
2747 r = &merged->wm[level];
2750 * Maintain the watermark values even if the level is
2751 * disabled. Doing otherwise could cause underruns.
2753 results->wm_lp[wm_lp - 1] =
2754 (ilk_wm_lp_latency(dev, level) << WM1_LP_LATENCY_SHIFT) |
2755 (r->pri_val << WM1_LP_SR_SHIFT) |
2759 results->wm_lp[wm_lp - 1] |= WM1_LP_SR_EN;
2761 if (INTEL_INFO(dev)->gen >= 8)
2762 results->wm_lp[wm_lp - 1] |=
2763 r->fbc_val << WM1_LP_FBC_SHIFT_BDW;
2765 results->wm_lp[wm_lp - 1] |=
2766 r->fbc_val << WM1_LP_FBC_SHIFT;
2769 * Always set WM1S_LP_EN when spr_val != 0, even if the
2770 * level is disabled. Doing otherwise could cause underruns.
2772 if (INTEL_INFO(dev)->gen <= 6 && r->spr_val) {
2773 WARN_ON(wm_lp != 1);
2774 results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
2776 results->wm_lp_spr[wm_lp - 1] = r->spr_val;
2779 /* LP0 register values */
2780 for_each_intel_crtc(dev, intel_crtc) {
2781 enum pipe pipe = intel_crtc->pipe;
2782 const struct intel_wm_level *r =
2783 &intel_crtc->wm.active.wm[0];
2785 if (WARN_ON(!r->enable))
2788 results->wm_linetime[pipe] = intel_crtc->wm.active.linetime;
2790 results->wm_pipe[pipe] =
2791 (r->pri_val << WM0_PIPE_PLANE_SHIFT) |
2792 (r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
2797 /* Find the result with the highest level enabled. Check for enable_fbc_wm in
2798 * case both are at the same level. Prefer r1 in case they're the same. */
2799 static struct intel_pipe_wm *ilk_find_best_result(struct drm_device *dev,
2800 struct intel_pipe_wm *r1,
2801 struct intel_pipe_wm *r2)
2803 int level, max_level = ilk_wm_max_level(dev);
2804 int level1 = 0, level2 = 0;
2806 for (level = 1; level <= max_level; level++) {
2807 if (r1->wm[level].enable)
2809 if (r2->wm[level].enable)
2813 if (level1 == level2) {
2814 if (r2->fbc_wm_enabled && !r1->fbc_wm_enabled)
2818 } else if (level1 > level2) {
2825 /* dirty bits used to track which watermarks need changes */
2826 #define WM_DIRTY_PIPE(pipe) (1 << (pipe))
2827 #define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
2828 #define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
2829 #define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
2830 #define WM_DIRTY_FBC (1 << 24)
2831 #define WM_DIRTY_DDB (1 << 25)
2833 static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
2834 const struct ilk_wm_values *old,
2835 const struct ilk_wm_values *new)
2837 unsigned int dirty = 0;
2841 for_each_pipe(dev_priv, pipe) {
2842 if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
2843 dirty |= WM_DIRTY_LINETIME(pipe);
2844 /* Must disable LP1+ watermarks too */
2845 dirty |= WM_DIRTY_LP_ALL;
2848 if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
2849 dirty |= WM_DIRTY_PIPE(pipe);
2850 /* Must disable LP1+ watermarks too */
2851 dirty |= WM_DIRTY_LP_ALL;
2855 if (old->enable_fbc_wm != new->enable_fbc_wm) {
2856 dirty |= WM_DIRTY_FBC;
2857 /* Must disable LP1+ watermarks too */
2858 dirty |= WM_DIRTY_LP_ALL;
2861 if (old->partitioning != new->partitioning) {
2862 dirty |= WM_DIRTY_DDB;
2863 /* Must disable LP1+ watermarks too */
2864 dirty |= WM_DIRTY_LP_ALL;
2867 /* LP1+ watermarks already deemed dirty, no need to continue */
2868 if (dirty & WM_DIRTY_LP_ALL)
2871 /* Find the lowest numbered LP1+ watermark in need of an update... */
2872 for (wm_lp = 1; wm_lp <= 3; wm_lp++) {
2873 if (old->wm_lp[wm_lp - 1] != new->wm_lp[wm_lp - 1] ||
2874 old->wm_lp_spr[wm_lp - 1] != new->wm_lp_spr[wm_lp - 1])
2878 /* ...and mark it and all higher numbered LP1+ watermarks as dirty */
2879 for (; wm_lp <= 3; wm_lp++)
2880 dirty |= WM_DIRTY_LP(wm_lp);
2885 static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2888 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2889 bool changed = false;
2891 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) {
2892 previous->wm_lp[2] &= ~WM1_LP_SR_EN;
2893 I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]);
2896 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) {
2897 previous->wm_lp[1] &= ~WM1_LP_SR_EN;
2898 I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]);
2901 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) {
2902 previous->wm_lp[0] &= ~WM1_LP_SR_EN;
2903 I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]);
2908 * Don't touch WM1S_LP_EN here.
2909 * Doing so could cause underruns.
2916 * The spec says we shouldn't write when we don't need, because every write
2917 * causes WMs to be re-evaluated, expending some power.
2919 static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2920 struct ilk_wm_values *results)
2922 struct drm_device *dev = dev_priv->dev;
2923 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2927 dirty = ilk_compute_wm_dirty(dev_priv, previous, results);
2931 _ilk_disable_lp_wm(dev_priv, dirty);
2933 if (dirty & WM_DIRTY_PIPE(PIPE_A))
2934 I915_WRITE(WM0_PIPEA_ILK, results->wm_pipe[0]);
2935 if (dirty & WM_DIRTY_PIPE(PIPE_B))
2936 I915_WRITE(WM0_PIPEB_ILK, results->wm_pipe[1]);
2937 if (dirty & WM_DIRTY_PIPE(PIPE_C))
2938 I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
2940 if (dirty & WM_DIRTY_LINETIME(PIPE_A))
2941 I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
2942 if (dirty & WM_DIRTY_LINETIME(PIPE_B))
2943 I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
2944 if (dirty & WM_DIRTY_LINETIME(PIPE_C))
2945 I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
2947 if (dirty & WM_DIRTY_DDB) {
2948 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2949 val = I915_READ(WM_MISC);
2950 if (results->partitioning == INTEL_DDB_PART_1_2)
2951 val &= ~WM_MISC_DATA_PARTITION_5_6;
2953 val |= WM_MISC_DATA_PARTITION_5_6;
2954 I915_WRITE(WM_MISC, val);
2956 val = I915_READ(DISP_ARB_CTL2);
2957 if (results->partitioning == INTEL_DDB_PART_1_2)
2958 val &= ~DISP_DATA_PARTITION_5_6;
2960 val |= DISP_DATA_PARTITION_5_6;
2961 I915_WRITE(DISP_ARB_CTL2, val);
2965 if (dirty & WM_DIRTY_FBC) {
2966 val = I915_READ(DISP_ARB_CTL);
2967 if (results->enable_fbc_wm)
2968 val &= ~DISP_FBC_WM_DIS;
2970 val |= DISP_FBC_WM_DIS;
2971 I915_WRITE(DISP_ARB_CTL, val);
2974 if (dirty & WM_DIRTY_LP(1) &&
2975 previous->wm_lp_spr[0] != results->wm_lp_spr[0])
2976 I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]);
2978 if (INTEL_INFO(dev)->gen >= 7) {
2979 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1])
2980 I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]);
2981 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2])
2982 I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]);
2985 if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0])
2986 I915_WRITE(WM1_LP_ILK, results->wm_lp[0]);
2987 if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1])
2988 I915_WRITE(WM2_LP_ILK, results->wm_lp[1]);
2989 if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2])
2990 I915_WRITE(WM3_LP_ILK, results->wm_lp[2]);
2992 dev_priv->wm.hw = *results;
2995 static bool ilk_disable_lp_wm(struct drm_device *dev)
2997 struct drm_i915_private *dev_priv = dev->dev_private;
2999 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
3003 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
3004 * different active planes.
3007 #define SKL_DDB_SIZE 896 /* in blocks */
3010 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
3011 struct drm_crtc *for_crtc,
3012 const struct intel_wm_config *config,
3013 const struct skl_pipe_wm_parameters *params,
3014 struct skl_ddb_entry *alloc /* out */)
3016 struct drm_crtc *crtc;
3017 unsigned int pipe_size, ddb_size;
3018 int nth_active_pipe;
3020 if (!params->active) {
3026 ddb_size = SKL_DDB_SIZE;
3028 ddb_size -= 4; /* 4 blocks for bypass path allocation */
3030 nth_active_pipe = 0;
3031 for_each_crtc(dev, crtc) {
3032 if (!intel_crtc_active(crtc))
3035 if (crtc == for_crtc)
3041 pipe_size = ddb_size / config->num_pipes_active;
3042 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
3043 alloc->end = alloc->start + pipe_size;
3046 static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
3048 if (config->num_pipes_active == 1)
3054 static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
3056 entry->start = reg & 0x3ff;
3057 entry->end = (reg >> 16) & 0x3ff;
3062 void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
3063 struct skl_ddb_allocation *ddb /* out */)
3065 struct drm_device *dev = dev_priv->dev;
3070 for_each_pipe(dev_priv, pipe) {
3071 for_each_plane(pipe, plane) {
3072 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
3073 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
3077 val = I915_READ(CUR_BUF_CFG(pipe));
3078 skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
3083 skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p)
3085 return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
3089 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3090 * a 8192x4096@32bpp framebuffer:
3091 * 3 * 4096 * 8192 * 4 < 2^32
3094 skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
3095 const struct skl_pipe_wm_parameters *params)
3097 unsigned int total_data_rate = 0;
3100 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
3101 const struct intel_plane_wm_parameters *p;
3103 p = ¶ms->plane[plane];
3107 total_data_rate += skl_plane_relative_data_rate(p);
3110 return total_data_rate;
3114 skl_allocate_pipe_ddb(struct drm_crtc *crtc,
3115 const struct intel_wm_config *config,
3116 const struct skl_pipe_wm_parameters *params,
3117 struct skl_ddb_allocation *ddb /* out */)
3119 struct drm_device *dev = crtc->dev;
3120 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3121 enum pipe pipe = intel_crtc->pipe;
3122 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
3123 uint16_t alloc_size, start, cursor_blocks;
3124 unsigned int total_data_rate;
3127 skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
3128 alloc_size = skl_ddb_entry_size(alloc);
3129 if (alloc_size == 0) {
3130 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3131 memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
3135 cursor_blocks = skl_cursor_allocation(config);
3136 ddb->cursor[pipe].start = alloc->end - cursor_blocks;
3137 ddb->cursor[pipe].end = alloc->end;
3139 alloc_size -= cursor_blocks;
3140 alloc->end -= cursor_blocks;
3143 * Each active plane get a portion of the remaining space, in
3144 * proportion to the amount of data they need to fetch from memory.
3146 * FIXME: we may not allocate every single block here.
3148 total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
3150 start = alloc->start;
3151 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
3152 const struct intel_plane_wm_parameters *p;
3153 unsigned int data_rate;
3154 uint16_t plane_blocks;
3156 p = ¶ms->plane[plane];
3160 data_rate = skl_plane_relative_data_rate(p);
3163 * promote the expression to 64 bits to avoid overflowing, the
3164 * result is < available as data_rate / total_data_rate < 1
3166 plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
3169 ddb->plane[pipe][plane].start = start;
3170 ddb->plane[pipe][plane].end = start + plane_blocks;
3172 start += plane_blocks;
3177 static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config *config)
3179 /* TODO: Take into account the scalers once we support them */
3180 return config->adjusted_mode.crtc_clock;
3184 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3185 * for the read latency) and bytes_per_pixel should always be <= 8, so that
3186 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3187 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3189 static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
3192 uint32_t wm_intermediate_val, ret;
3197 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
3198 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3203 static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3204 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
3207 uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
3212 plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
3213 wm_intermediate_val = latency * pixel_rate;
3214 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
3215 plane_bytes_per_line;
3220 static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3221 const struct intel_crtc *intel_crtc)
3223 struct drm_device *dev = intel_crtc->base.dev;
3224 struct drm_i915_private *dev_priv = dev->dev_private;
3225 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3226 enum pipe pipe = intel_crtc->pipe;
3228 if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
3229 sizeof(new_ddb->plane[pipe])))
3232 if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
3233 sizeof(new_ddb->cursor[pipe])))
3239 static void skl_compute_wm_global_parameters(struct drm_device *dev,
3240 struct intel_wm_config *config)
3242 struct drm_crtc *crtc;
3243 struct drm_plane *plane;
3245 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3246 config->num_pipes_active += intel_crtc_active(crtc);
3248 /* FIXME: I don't think we need those two global parameters on SKL */
3249 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3250 struct intel_plane *intel_plane = to_intel_plane(plane);
3252 config->sprites_enabled |= intel_plane->wm.enabled;
3253 config->sprites_scaled |= intel_plane->wm.scaled;
3257 static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
3258 struct skl_pipe_wm_parameters *p)
3260 struct drm_device *dev = crtc->dev;
3261 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3262 enum pipe pipe = intel_crtc->pipe;
3263 struct drm_plane *plane;
3264 int i = 1; /* Index for sprite planes start */
3266 p->active = intel_crtc_active(crtc);
3268 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
3269 p->pixel_rate = skl_pipe_pixel_rate(&intel_crtc->config);
3272 * For now, assume primary and cursor planes are always enabled.
3274 p->plane[0].enabled = true;
3275 p->plane[0].bytes_per_pixel =
3276 crtc->primary->fb->bits_per_pixel / 8;
3277 p->plane[0].horiz_pixels = intel_crtc->config.pipe_src_w;
3278 p->plane[0].vert_pixels = intel_crtc->config.pipe_src_h;
3280 p->cursor.enabled = true;
3281 p->cursor.bytes_per_pixel = 4;
3282 p->cursor.horiz_pixels = intel_crtc->cursor_width ?
3283 intel_crtc->cursor_width : 64;
3286 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3287 struct intel_plane *intel_plane = to_intel_plane(plane);
3289 if (intel_plane->pipe == pipe)
3290 p->plane[i++] = intel_plane->wm;
3294 static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
3295 struct intel_plane_wm_parameters *p_params,
3296 uint16_t ddb_allocation,
3298 uint16_t *out_blocks, /* out */
3299 uint8_t *out_lines /* out */)
3301 uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
3302 uint32_t result_bytes;
3304 if (mem_value == 0 || !p->active || !p_params->enabled)
3307 method1 = skl_wm_method1(p->pixel_rate,
3308 p_params->bytes_per_pixel,
3310 method2 = skl_wm_method2(p->pixel_rate,
3312 p_params->horiz_pixels,
3313 p_params->bytes_per_pixel,
3316 plane_bytes_per_line = p_params->horiz_pixels *
3317 p_params->bytes_per_pixel;
3319 /* For now xtile and linear */
3320 if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
3321 result_bytes = min(method1, method2);
3323 result_bytes = method1;
3325 res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
3326 res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
3328 if (res_blocks > ddb_allocation || res_lines > 31)
3331 *out_blocks = res_blocks;
3332 *out_lines = res_lines;
3337 static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3338 struct skl_ddb_allocation *ddb,
3339 struct skl_pipe_wm_parameters *p,
3343 struct skl_wm_level *result)
3345 uint16_t latency = dev_priv->wm.skl_latency[level];
3346 uint16_t ddb_blocks;
3349 for (i = 0; i < num_planes; i++) {
3350 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3352 result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
3355 &result->plane_res_b[i],
3356 &result->plane_res_l[i]);
3359 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
3360 result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
3361 latency, &result->cursor_res_b,
3362 &result->cursor_res_l);
3366 skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
3368 if (!intel_crtc_active(crtc))
3371 return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
3375 static void skl_compute_transition_wm(struct drm_crtc *crtc,
3376 struct skl_pipe_wm_parameters *params,
3377 struct skl_wm_level *trans_wm /* out */)
3379 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3382 if (!params->active)
3385 /* Until we know more, just disable transition WMs */
3386 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3387 trans_wm->plane_en[i] = false;
3388 trans_wm->cursor_en = false;
3391 static void skl_compute_pipe_wm(struct drm_crtc *crtc,
3392 struct skl_ddb_allocation *ddb,
3393 struct skl_pipe_wm_parameters *params,
3394 struct skl_pipe_wm *pipe_wm)
3396 struct drm_device *dev = crtc->dev;
3397 const struct drm_i915_private *dev_priv = dev->dev_private;
3398 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3399 int level, max_level = ilk_wm_max_level(dev);
3401 for (level = 0; level <= max_level; level++) {
3402 skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
3403 level, intel_num_planes(intel_crtc),
3404 &pipe_wm->wm[level]);
3406 pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
3408 skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
3411 static void skl_compute_wm_results(struct drm_device *dev,
3412 struct skl_pipe_wm_parameters *p,
3413 struct skl_pipe_wm *p_wm,
3414 struct skl_wm_values *r,
3415 struct intel_crtc *intel_crtc)
3417 int level, max_level = ilk_wm_max_level(dev);
3418 enum pipe pipe = intel_crtc->pipe;
3422 for (level = 0; level <= max_level; level++) {
3423 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3426 temp |= p_wm->wm[level].plane_res_l[i] <<
3427 PLANE_WM_LINES_SHIFT;
3428 temp |= p_wm->wm[level].plane_res_b[i];
3429 if (p_wm->wm[level].plane_en[i])
3430 temp |= PLANE_WM_EN;
3432 r->plane[pipe][i][level] = temp;
3437 temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
3438 temp |= p_wm->wm[level].cursor_res_b;
3440 if (p_wm->wm[level].cursor_en)
3441 temp |= PLANE_WM_EN;
3443 r->cursor[pipe][level] = temp;
3447 /* transition WMs */
3448 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3450 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3451 temp |= p_wm->trans_wm.plane_res_b[i];
3452 if (p_wm->trans_wm.plane_en[i])
3453 temp |= PLANE_WM_EN;
3455 r->plane_trans[pipe][i] = temp;
3459 temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
3460 temp |= p_wm->trans_wm.cursor_res_b;
3461 if (p_wm->trans_wm.cursor_en)
3462 temp |= PLANE_WM_EN;
3464 r->cursor_trans[pipe] = temp;
3466 r->wm_linetime[pipe] = p_wm->linetime;
3469 static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
3470 const struct skl_ddb_entry *entry)
3473 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3478 static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3479 const struct skl_wm_values *new)
3481 struct drm_device *dev = dev_priv->dev;
3482 struct intel_crtc *crtc;
3484 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
3485 int i, level, max_level = ilk_wm_max_level(dev);
3486 enum pipe pipe = crtc->pipe;
3488 if (!new->dirty[pipe])
3491 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
3493 for (level = 0; level <= max_level; level++) {
3494 for (i = 0; i < intel_num_planes(crtc); i++)
3495 I915_WRITE(PLANE_WM(pipe, i, level),
3496 new->plane[pipe][i][level]);
3497 I915_WRITE(CUR_WM(pipe, level),
3498 new->cursor[pipe][level]);
3500 for (i = 0; i < intel_num_planes(crtc); i++)
3501 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3502 new->plane_trans[pipe][i]);
3503 I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
3505 for (i = 0; i < intel_num_planes(crtc); i++)
3506 skl_ddb_entry_write(dev_priv,
3507 PLANE_BUF_CFG(pipe, i),
3508 &new->ddb.plane[pipe][i]);
3510 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3511 &new->ddb.cursor[pipe]);
3516 * When setting up a new DDB allocation arrangement, we need to correctly
3517 * sequence the times at which the new allocations for the pipes are taken into
3518 * account or we'll have pipes fetching from space previously allocated to
3521 * Roughly the sequence looks like:
3522 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3523 * overlapping with a previous light-up pipe (another way to put it is:
3524 * pipes with their new allocation strickly included into their old ones).
3525 * 2. re-allocate the other pipes that get their allocation reduced
3526 * 3. allocate the pipes having their allocation increased
3528 * Steps 1. and 2. are here to take care of the following case:
3529 * - Initially DDB looks like this:
3532 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3536 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3540 skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
3542 struct drm_device *dev = dev_priv->dev;
3545 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3547 for_each_plane(pipe, plane) {
3548 I915_WRITE(PLANE_SURF(pipe, plane),
3549 I915_READ(PLANE_SURF(pipe, plane)));
3551 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3555 skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3556 const struct skl_ddb_allocation *new,
3559 uint16_t old_size, new_size;
3561 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3562 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3564 return old_size != new_size &&
3565 new->pipe[pipe].start >= old->pipe[pipe].start &&
3566 new->pipe[pipe].end <= old->pipe[pipe].end;
3569 static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3570 struct skl_wm_values *new_values)
3572 struct drm_device *dev = dev_priv->dev;
3573 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3574 bool reallocated[I915_MAX_PIPES] = {false, false, false};
3575 struct intel_crtc *crtc;
3578 new_ddb = &new_values->ddb;
3579 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3582 * First pass: flush the pipes with the new allocation contained into
3585 * We'll wait for the vblank on those pipes to ensure we can safely
3586 * re-allocate the freed space without this pipe fetching from it.
3588 for_each_intel_crtc(dev, crtc) {
3594 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3597 skl_wm_flush_pipe(dev_priv, pipe, 1);
3598 intel_wait_for_vblank(dev, pipe);
3600 reallocated[pipe] = true;
3605 * Second pass: flush the pipes that are having their allocation
3606 * reduced, but overlapping with a previous allocation.
3608 * Here as well we need to wait for the vblank to make sure the freed
3609 * space is not used anymore.
3611 for_each_intel_crtc(dev, crtc) {
3617 if (reallocated[pipe])
3620 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3621 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
3622 skl_wm_flush_pipe(dev_priv, pipe, 2);
3623 intel_wait_for_vblank(dev, pipe);
3626 reallocated[pipe] = true;
3630 * Third pass: flush the pipes that got more space allocated.
3632 * We don't need to actively wait for the update here, next vblank
3633 * will just get more DDB space with the correct WM values.
3635 for_each_intel_crtc(dev, crtc) {
3642 * At this point, only the pipes more space than before are
3643 * left to re-allocate.
3645 if (reallocated[pipe])
3648 skl_wm_flush_pipe(dev_priv, pipe, 3);
3652 static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3653 struct skl_pipe_wm_parameters *params,
3654 struct intel_wm_config *config,
3655 struct skl_ddb_allocation *ddb, /* out */
3656 struct skl_pipe_wm *pipe_wm /* out */)
3658 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3660 skl_compute_wm_pipe_parameters(crtc, params);
3661 skl_allocate_pipe_ddb(crtc, config, params, ddb);
3662 skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
3664 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
3667 intel_crtc->wm.skl_active = *pipe_wm;
3671 static void skl_update_other_pipe_wm(struct drm_device *dev,
3672 struct drm_crtc *crtc,
3673 struct intel_wm_config *config,
3674 struct skl_wm_values *r)
3676 struct intel_crtc *intel_crtc;
3677 struct intel_crtc *this_crtc = to_intel_crtc(crtc);
3680 * If the WM update hasn't changed the allocation for this_crtc (the
3681 * crtc we are currently computing the new WM values for), other
3682 * enabled crtcs will keep the same allocation and we don't need to
3683 * recompute anything for them.
3685 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
3689 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3690 * other active pipes need new DDB allocation and WM values.
3692 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
3694 struct skl_pipe_wm_parameters params = {};
3695 struct skl_pipe_wm pipe_wm = {};
3698 if (this_crtc->pipe == intel_crtc->pipe)
3701 if (!intel_crtc->active)
3704 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
3709 * If we end up re-computing the other pipe WM values, it's
3710 * because it was really needed, so we expect the WM values to
3713 WARN_ON(!wm_changed);
3715 skl_compute_wm_results(dev, ¶ms, &pipe_wm, r, intel_crtc);
3716 r->dirty[intel_crtc->pipe] = true;
3720 static void skl_update_wm(struct drm_crtc *crtc)
3722 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3723 struct drm_device *dev = crtc->dev;
3724 struct drm_i915_private *dev_priv = dev->dev_private;
3725 struct skl_pipe_wm_parameters params = {};
3726 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3727 struct skl_pipe_wm pipe_wm = {};
3728 struct intel_wm_config config = {};
3730 memset(results, 0, sizeof(*results));
3732 skl_compute_wm_global_parameters(dev, &config);
3734 if (!skl_update_pipe_wm(crtc, ¶ms, &config,
3735 &results->ddb, &pipe_wm))
3738 skl_compute_wm_results(dev, ¶ms, &pipe_wm, results, intel_crtc);
3739 results->dirty[intel_crtc->pipe] = true;
3741 skl_update_other_pipe_wm(dev, crtc, &config, results);
3742 skl_write_wm_values(dev_priv, results);
3743 skl_flush_wm_values(dev_priv, results);
3745 /* store the new configuration */
3746 dev_priv->wm.skl_hw = *results;
3750 skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3751 uint32_t sprite_width, uint32_t sprite_height,
3752 int pixel_size, bool enabled, bool scaled)
3754 struct intel_plane *intel_plane = to_intel_plane(plane);
3756 intel_plane->wm.enabled = enabled;
3757 intel_plane->wm.scaled = scaled;
3758 intel_plane->wm.horiz_pixels = sprite_width;
3759 intel_plane->wm.vert_pixels = sprite_height;
3760 intel_plane->wm.bytes_per_pixel = pixel_size;
3762 skl_update_wm(crtc);
3765 static void ilk_update_wm(struct drm_crtc *crtc)
3767 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3768 struct drm_device *dev = crtc->dev;
3769 struct drm_i915_private *dev_priv = dev->dev_private;
3770 struct ilk_wm_maximums max;
3771 struct ilk_pipe_wm_parameters params = {};
3772 struct ilk_wm_values results = {};
3773 enum intel_ddb_partitioning partitioning;
3774 struct intel_pipe_wm pipe_wm = {};
3775 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3776 struct intel_wm_config config = {};
3778 ilk_compute_wm_parameters(crtc, ¶ms);
3780 intel_compute_pipe_wm(crtc, ¶ms, &pipe_wm);
3782 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm)))
3785 intel_crtc->wm.active = pipe_wm;
3787 ilk_compute_wm_config(dev, &config);
3789 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_1_2, &max);
3790 ilk_wm_merge(dev, &config, &max, &lp_wm_1_2);
3792 /* 5/6 split only in single pipe config on IVB+ */
3793 if (INTEL_INFO(dev)->gen >= 7 &&
3794 config.num_pipes_active == 1 && config.sprites_enabled) {
3795 ilk_compute_wm_maximums(dev, 1, &config, INTEL_DDB_PART_5_6, &max);
3796 ilk_wm_merge(dev, &config, &max, &lp_wm_5_6);
3798 best_lp_wm = ilk_find_best_result(dev, &lp_wm_1_2, &lp_wm_5_6);
3800 best_lp_wm = &lp_wm_1_2;
3803 partitioning = (best_lp_wm == &lp_wm_1_2) ?
3804 INTEL_DDB_PART_1_2 : INTEL_DDB_PART_5_6;
3806 ilk_compute_wm_results(dev, best_lp_wm, partitioning, &results);
3808 ilk_write_wm_values(dev_priv, &results);
3812 ilk_update_sprite_wm(struct drm_plane *plane,
3813 struct drm_crtc *crtc,
3814 uint32_t sprite_width, uint32_t sprite_height,
3815 int pixel_size, bool enabled, bool scaled)
3817 struct drm_device *dev = plane->dev;
3818 struct intel_plane *intel_plane = to_intel_plane(plane);
3820 intel_plane->wm.enabled = enabled;
3821 intel_plane->wm.scaled = scaled;
3822 intel_plane->wm.horiz_pixels = sprite_width;
3823 intel_plane->wm.vert_pixels = sprite_width;
3824 intel_plane->wm.bytes_per_pixel = pixel_size;
3827 * IVB workaround: must disable low power watermarks for at least
3828 * one frame before enabling scaling. LP watermarks can be re-enabled
3829 * when scaling is disabled.
3831 * WaCxSRDisabledForSpriteScaling:ivb
3833 if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev))
3834 intel_wait_for_vblank(dev, intel_plane->pipe);
3836 ilk_update_wm(crtc);
3839 static void skl_pipe_wm_active_state(uint32_t val,
3840 struct skl_pipe_wm *active,
3846 bool is_enabled = (val & PLANE_WM_EN) != 0;
3850 active->wm[level].plane_en[i] = is_enabled;
3851 active->wm[level].plane_res_b[i] =
3852 val & PLANE_WM_BLOCKS_MASK;
3853 active->wm[level].plane_res_l[i] =
3854 (val >> PLANE_WM_LINES_SHIFT) &
3855 PLANE_WM_LINES_MASK;
3857 active->wm[level].cursor_en = is_enabled;
3858 active->wm[level].cursor_res_b =
3859 val & PLANE_WM_BLOCKS_MASK;
3860 active->wm[level].cursor_res_l =
3861 (val >> PLANE_WM_LINES_SHIFT) &
3862 PLANE_WM_LINES_MASK;
3866 active->trans_wm.plane_en[i] = is_enabled;
3867 active->trans_wm.plane_res_b[i] =
3868 val & PLANE_WM_BLOCKS_MASK;
3869 active->trans_wm.plane_res_l[i] =
3870 (val >> PLANE_WM_LINES_SHIFT) &
3871 PLANE_WM_LINES_MASK;
3873 active->trans_wm.cursor_en = is_enabled;
3874 active->trans_wm.cursor_res_b =
3875 val & PLANE_WM_BLOCKS_MASK;
3876 active->trans_wm.cursor_res_l =
3877 (val >> PLANE_WM_LINES_SHIFT) &
3878 PLANE_WM_LINES_MASK;
3883 static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3885 struct drm_device *dev = crtc->dev;
3886 struct drm_i915_private *dev_priv = dev->dev_private;
3887 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3888 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3889 struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
3890 enum pipe pipe = intel_crtc->pipe;
3891 int level, i, max_level;
3894 max_level = ilk_wm_max_level(dev);
3896 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3898 for (level = 0; level <= max_level; level++) {
3899 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3900 hw->plane[pipe][i][level] =
3901 I915_READ(PLANE_WM(pipe, i, level));
3902 hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
3905 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3906 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3907 hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
3909 if (!intel_crtc_active(crtc))
3912 hw->dirty[pipe] = true;
3914 active->linetime = hw->wm_linetime[pipe];
3916 for (level = 0; level <= max_level; level++) {
3917 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3918 temp = hw->plane[pipe][i][level];
3919 skl_pipe_wm_active_state(temp, active, false,
3922 temp = hw->cursor[pipe][level];
3923 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3926 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3927 temp = hw->plane_trans[pipe][i];
3928 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3931 temp = hw->cursor_trans[pipe];
3932 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3935 void skl_wm_get_hw_state(struct drm_device *dev)
3937 struct drm_i915_private *dev_priv = dev->dev_private;
3938 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3939 struct drm_crtc *crtc;
3941 skl_ddb_get_hw_state(dev_priv, ddb);
3942 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3943 skl_pipe_wm_get_hw_state(crtc);
3946 static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3948 struct drm_device *dev = crtc->dev;
3949 struct drm_i915_private *dev_priv = dev->dev_private;
3950 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3951 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3952 struct intel_pipe_wm *active = &intel_crtc->wm.active;
3953 enum pipe pipe = intel_crtc->pipe;
3954 static const unsigned int wm0_pipe_reg[] = {
3955 [PIPE_A] = WM0_PIPEA_ILK,
3956 [PIPE_B] = WM0_PIPEB_ILK,
3957 [PIPE_C] = WM0_PIPEC_IVB,
3960 hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
3961 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
3962 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3964 active->pipe_enabled = intel_crtc_active(crtc);
3966 if (active->pipe_enabled) {
3967 u32 tmp = hw->wm_pipe[pipe];
3970 * For active pipes LP0 watermark is marked as
3971 * enabled, and LP1+ watermaks as disabled since
3972 * we can't really reverse compute them in case
3973 * multiple pipes are active.
3975 active->wm[0].enable = true;
3976 active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
3977 active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
3978 active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
3979 active->linetime = hw->wm_linetime[pipe];
3981 int level, max_level = ilk_wm_max_level(dev);
3984 * For inactive pipes, all watermark levels
3985 * should be marked as enabled but zeroed,
3986 * which is what we'd compute them to.
3988 for (level = 0; level <= max_level; level++)
3989 active->wm[level].enable = true;
3993 void ilk_wm_get_hw_state(struct drm_device *dev)
3995 struct drm_i915_private *dev_priv = dev->dev_private;
3996 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3997 struct drm_crtc *crtc;
3999 for_each_crtc(dev, crtc)
4000 ilk_pipe_wm_get_hw_state(crtc);
4002 hw->wm_lp[0] = I915_READ(WM1_LP_ILK);
4003 hw->wm_lp[1] = I915_READ(WM2_LP_ILK);
4004 hw->wm_lp[2] = I915_READ(WM3_LP_ILK);
4006 hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK);
4007 if (INTEL_INFO(dev)->gen >= 7) {
4008 hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB);
4009 hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB);
4012 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4013 hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ?
4014 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4015 else if (IS_IVYBRIDGE(dev))
4016 hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ?
4017 INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2;
4020 !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS);
4024 * intel_update_watermarks - update FIFO watermark values based on current modes
4026 * Calculate watermark values for the various WM regs based on current mode
4027 * and plane configuration.
4029 * There are several cases to deal with here:
4030 * - normal (i.e. non-self-refresh)
4031 * - self-refresh (SR) mode
4032 * - lines are large relative to FIFO size (buffer can hold up to 2)
4033 * - lines are small relative to FIFO size (buffer can hold more than 2
4034 * lines), so need to account for TLB latency
4036 * The normal calculation is:
4037 * watermark = dotclock * bytes per pixel * latency
4038 * where latency is platform & configuration dependent (we assume pessimal
4041 * The SR calculation is:
4042 * watermark = (trunc(latency/line time)+1) * surface width *
4045 * line time = htotal / dotclock
4046 * surface width = hdisplay for normal plane and 64 for cursor
4047 * and latency is assumed to be high, as above.
4049 * The final value programmed to the register should always be rounded up,
4050 * and include an extra 2 entries to account for clock crossings.
4052 * We don't use the sprite, so we can ignore that. And on Crestline we have
4053 * to set the non-SR watermarks to 8.
4055 void intel_update_watermarks(struct drm_crtc *crtc)
4057 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
4059 if (dev_priv->display.update_wm)
4060 dev_priv->display.update_wm(crtc);
4063 void intel_update_sprite_watermarks(struct drm_plane *plane,
4064 struct drm_crtc *crtc,
4065 uint32_t sprite_width,
4066 uint32_t sprite_height,
4068 bool enabled, bool scaled)
4070 struct drm_i915_private *dev_priv = plane->dev->dev_private;
4072 if (dev_priv->display.update_sprite_wm)
4073 dev_priv->display.update_sprite_wm(plane, crtc,
4074 sprite_width, sprite_height,
4075 pixel_size, enabled, scaled);
4078 static struct drm_i915_gem_object *
4079 intel_alloc_context_page(struct drm_device *dev)
4081 struct drm_i915_gem_object *ctx;
4084 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
4086 ctx = i915_gem_alloc_object(dev, 4096);
4088 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
4092 ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0);
4094 DRM_ERROR("failed to pin power context: %d\n", ret);
4098 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
4100 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
4107 i915_gem_object_ggtt_unpin(ctx);
4109 drm_gem_object_unreference(&ctx->base);
4114 * Lock protecting IPS related data structures
4116 DEFINE_SPINLOCK(mchdev_lock);
4118 /* Global for IPS driver to get at the current i915 device. Protected by
4120 static struct drm_i915_private *i915_mch_dev;
4122 bool ironlake_set_drps(struct drm_device *dev, u8 val)
4124 struct drm_i915_private *dev_priv = dev->dev_private;
4127 assert_spin_locked(&mchdev_lock);
4129 rgvswctl = I915_READ16(MEMSWCTL);
4130 if (rgvswctl & MEMCTL_CMD_STS) {
4131 DRM_DEBUG("gpu busy, RCS change rejected\n");
4132 return false; /* still busy with another command */
4135 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
4136 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
4137 I915_WRITE16(MEMSWCTL, rgvswctl);
4138 POSTING_READ16(MEMSWCTL);
4140 rgvswctl |= MEMCTL_CMD_STS;
4141 I915_WRITE16(MEMSWCTL, rgvswctl);
4146 static void ironlake_enable_drps(struct drm_device *dev)
4148 struct drm_i915_private *dev_priv = dev->dev_private;
4149 u32 rgvmodectl = I915_READ(MEMMODECTL);
4150 u8 fmax, fmin, fstart, vstart;
4152 spin_lock_irq(&mchdev_lock);
4154 /* Enable temp reporting */
4155 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
4156 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
4158 /* 100ms RC evaluation intervals */
4159 I915_WRITE(RCUPEI, 100000);
4160 I915_WRITE(RCDNEI, 100000);
4162 /* Set max/min thresholds to 90ms and 80ms respectively */
4163 I915_WRITE(RCBMAXAVG, 90000);
4164 I915_WRITE(RCBMINAVG, 80000);
4166 I915_WRITE(MEMIHYST, 1);
4168 /* Set up min, max, and cur for interrupt handling */
4169 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
4170 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
4171 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
4172 MEMMODE_FSTART_SHIFT;
4174 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
4177 dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
4178 dev_priv->ips.fstart = fstart;
4180 dev_priv->ips.max_delay = fstart;
4181 dev_priv->ips.min_delay = fmin;
4182 dev_priv->ips.cur_delay = fstart;
4184 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
4185 fmax, fmin, fstart);
4187 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
4190 * Interrupts will be enabled in ironlake_irq_postinstall
4193 I915_WRITE(VIDSTART, vstart);
4194 POSTING_READ(VIDSTART);
4196 rgvmodectl |= MEMMODE_SWMODE_EN;
4197 I915_WRITE(MEMMODECTL, rgvmodectl);
4199 if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
4200 DRM_ERROR("stuck trying to change perf mode\n");
4203 ironlake_set_drps(dev, fstart);
4205 dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
4207 dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
4208 dev_priv->ips.last_count2 = I915_READ(0x112f4);
4209 dev_priv->ips.last_time2 = ktime_get_raw_ns();
4211 spin_unlock_irq(&mchdev_lock);
4214 static void ironlake_disable_drps(struct drm_device *dev)
4216 struct drm_i915_private *dev_priv = dev->dev_private;
4219 spin_lock_irq(&mchdev_lock);
4221 rgvswctl = I915_READ16(MEMSWCTL);
4223 /* Ack interrupts, disable EFC interrupt */
4224 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
4225 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
4226 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
4227 I915_WRITE(DEIIR, DE_PCU_EVENT);
4228 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4230 /* Go back to the starting frequency */
4231 ironlake_set_drps(dev, dev_priv->ips.fstart);
4233 rgvswctl |= MEMCTL_CMD_STS;
4234 I915_WRITE(MEMSWCTL, rgvswctl);
4237 spin_unlock_irq(&mchdev_lock);
4240 /* There's a funny hw issue where the hw returns all 0 when reading from
4241 * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
4242 * ourselves, instead of doing a rmw cycle (which might result in us clearing
4243 * all limits and the gpu stuck at whatever frequency it is at atm).
4245 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val)
4249 /* Only set the down limit when we've reached the lowest level to avoid
4250 * getting more interrupts, otherwise leave this clear. This prevents a
4251 * race in the hw when coming out of rc6: There's a tiny window where
4252 * the hw runs at the minimal clock before selecting the desired
4253 * frequency, if the down threshold expires in that window we will not
4254 * receive a down interrupt. */
4255 limits = dev_priv->rps.max_freq_softlimit << 24;
4256 if (val <= dev_priv->rps.min_freq_softlimit)
4257 limits |= dev_priv->rps.min_freq_softlimit << 16;
4262 static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4266 new_power = dev_priv->rps.power;
4267 switch (dev_priv->rps.power) {
4269 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq)
4270 new_power = BETWEEN;
4274 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq)
4275 new_power = LOW_POWER;
4276 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq)
4277 new_power = HIGH_POWER;
4281 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq)
4282 new_power = BETWEEN;
4285 /* Max/min bins are special */
4286 if (val == dev_priv->rps.min_freq_softlimit)
4287 new_power = LOW_POWER;
4288 if (val == dev_priv->rps.max_freq_softlimit)
4289 new_power = HIGH_POWER;
4290 if (new_power == dev_priv->rps.power)
4293 /* Note the units here are not exactly 1us, but 1280ns. */
4294 switch (new_power) {
4296 /* Upclock if more than 95% busy over 16ms */
4297 I915_WRITE(GEN6_RP_UP_EI, 12500);
4298 I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800);
4300 /* Downclock if less than 85% busy over 32ms */
4301 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
4302 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250);
4304 I915_WRITE(GEN6_RP_CONTROL,
4305 GEN6_RP_MEDIA_TURBO |
4306 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4307 GEN6_RP_MEDIA_IS_GFX |
4309 GEN6_RP_UP_BUSY_AVG |
4310 GEN6_RP_DOWN_IDLE_AVG);
4314 /* Upclock if more than 90% busy over 13ms */
4315 I915_WRITE(GEN6_RP_UP_EI, 10250);
4316 I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225);
4318 /* Downclock if less than 75% busy over 32ms */
4319 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
4320 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750);
4322 I915_WRITE(GEN6_RP_CONTROL,
4323 GEN6_RP_MEDIA_TURBO |
4324 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4325 GEN6_RP_MEDIA_IS_GFX |
4327 GEN6_RP_UP_BUSY_AVG |
4328 GEN6_RP_DOWN_IDLE_AVG);
4332 /* Upclock if more than 85% busy over 10ms */
4333 I915_WRITE(GEN6_RP_UP_EI, 8000);
4334 I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800);
4336 /* Downclock if less than 60% busy over 32ms */
4337 I915_WRITE(GEN6_RP_DOWN_EI, 25000);
4338 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000);
4340 I915_WRITE(GEN6_RP_CONTROL,
4341 GEN6_RP_MEDIA_TURBO |
4342 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4343 GEN6_RP_MEDIA_IS_GFX |
4345 GEN6_RP_UP_BUSY_AVG |
4346 GEN6_RP_DOWN_IDLE_AVG);
4350 dev_priv->rps.power = new_power;
4351 dev_priv->rps.last_adj = 0;
4354 static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4358 if (val > dev_priv->rps.min_freq_softlimit)
4359 mask |= GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
4360 if (val < dev_priv->rps.max_freq_softlimit)
4361 mask |= GEN6_PM_RP_UP_THRESHOLD;
4363 mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
4364 mask &= dev_priv->pm_rps_events;
4366 /* IVB and SNB hard hangs on looping batchbuffer
4367 * if GEN6_PM_UP_EI_EXPIRED is masked.
4369 if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
4370 mask |= GEN6_PM_RP_UP_EI_EXPIRED;
4372 if (IS_GEN8(dev_priv->dev))
4373 mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
4378 /* gen6_set_rps is called to update the frequency request, but should also be
4379 * called when the range (min_delay and max_delay) is modified so that we can
4380 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4381 void gen6_set_rps(struct drm_device *dev, u8 val)
4383 struct drm_i915_private *dev_priv = dev->dev_private;
4385 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4386 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
4387 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
4389 /* min/max delay may still have been modified so be sure to
4390 * write the limits value.
4392 if (val != dev_priv->rps.cur_freq) {
4393 gen6_set_rps_thresholds(dev_priv, val);
4395 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4396 I915_WRITE(GEN6_RPNSWREQ,
4397 HSW_FREQUENCY(val));
4399 I915_WRITE(GEN6_RPNSWREQ,
4400 GEN6_FREQUENCY(val) |
4402 GEN6_AGGRESSIVE_TURBO);
4405 /* Make sure we continue to get interrupts
4406 * until we hit the minimum or maximum frequencies.
4408 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val));
4409 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4411 POSTING_READ(GEN6_RPNSWREQ);
4413 dev_priv->rps.cur_freq = val;
4414 trace_intel_gpu_freq_change(val * 50);
4417 /* vlv_set_rps_idle: Set the frequency to Rpn if Gfx clocks are down
4419 * * If Gfx is Idle, then
4420 * 1. Mask Turbo interrupts
4421 * 2. Bring up Gfx clock
4422 * 3. Change the freq to Rpn and wait till P-Unit updates freq
4423 * 4. Clear the Force GFX CLK ON bit so that Gfx can down
4424 * 5. Unmask Turbo interrupts
4426 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4428 struct drm_device *dev = dev_priv->dev;
4430 /* Latest VLV doesn't need to force the gfx clock */
4431 if (dev->pdev->revision >= 0xd) {
4432 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4437 * When we are idle. Drop to min voltage state.
4440 if (dev_priv->rps.cur_freq <= dev_priv->rps.min_freq_softlimit)
4443 /* Mask turbo interrupt so that they will not come in between */
4444 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4446 vlv_force_gfx_clock(dev_priv, true);
4448 dev_priv->rps.cur_freq = dev_priv->rps.min_freq_softlimit;
4450 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ,
4451 dev_priv->rps.min_freq_softlimit);
4453 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
4454 & GENFREQSTATUS) == 0, 100))
4455 DRM_ERROR("timed out waiting for Punit\n");
4457 vlv_force_gfx_clock(dev_priv, false);
4459 I915_WRITE(GEN6_PMINTRMSK,
4460 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4463 void gen6_rps_idle(struct drm_i915_private *dev_priv)
4465 struct drm_device *dev = dev_priv->dev;
4467 mutex_lock(&dev_priv->rps.hw_lock);
4468 if (dev_priv->rps.enabled) {
4469 if (IS_CHERRYVIEW(dev))
4470 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4471 else if (IS_VALLEYVIEW(dev))
4472 vlv_set_rps_idle(dev_priv);
4474 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4475 dev_priv->rps.last_adj = 0;
4477 mutex_unlock(&dev_priv->rps.hw_lock);
4480 void gen6_rps_boost(struct drm_i915_private *dev_priv)
4482 struct drm_device *dev = dev_priv->dev;
4484 mutex_lock(&dev_priv->rps.hw_lock);
4485 if (dev_priv->rps.enabled) {
4486 if (IS_VALLEYVIEW(dev))
4487 valleyview_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
4489 gen6_set_rps(dev_priv->dev, dev_priv->rps.max_freq_softlimit);
4490 dev_priv->rps.last_adj = 0;
4492 mutex_unlock(&dev_priv->rps.hw_lock);
4495 void valleyview_set_rps(struct drm_device *dev, u8 val)
4497 struct drm_i915_private *dev_priv = dev->dev_private;
4499 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4500 WARN_ON(val > dev_priv->rps.max_freq_softlimit);
4501 WARN_ON(val < dev_priv->rps.min_freq_softlimit);
4503 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1),
4504 "Odd GPU freq value\n"))
4507 if (val != dev_priv->rps.cur_freq)
4508 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
4510 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
4512 dev_priv->rps.cur_freq = val;
4513 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
4516 static void gen9_disable_rps(struct drm_device *dev)
4518 struct drm_i915_private *dev_priv = dev->dev_private;
4520 I915_WRITE(GEN6_RC_CONTROL, 0);
4523 static void gen6_disable_rps(struct drm_device *dev)
4525 struct drm_i915_private *dev_priv = dev->dev_private;
4527 I915_WRITE(GEN6_RC_CONTROL, 0);
4528 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4531 static void cherryview_disable_rps(struct drm_device *dev)
4533 struct drm_i915_private *dev_priv = dev->dev_private;
4535 I915_WRITE(GEN6_RC_CONTROL, 0);
4538 static void valleyview_disable_rps(struct drm_device *dev)
4540 struct drm_i915_private *dev_priv = dev->dev_private;
4542 /* we're doing forcewake before Disabling RC6,
4543 * This what the BIOS expects when going into suspend */
4544 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4546 I915_WRITE(GEN6_RC_CONTROL, 0);
4548 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4551 static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
4553 if (IS_VALLEYVIEW(dev)) {
4554 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4555 mode = GEN6_RC_CTL_RC6_ENABLE;
4560 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
4561 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
4562 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
4563 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
4566 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4567 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
4570 static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4572 /* No RC6 before Ironlake */
4573 if (INTEL_INFO(dev)->gen < 5)
4576 /* RC6 is only on Ironlake mobile not on desktop */
4577 if (INTEL_INFO(dev)->gen == 5 && !IS_IRONLAKE_M(dev))
4580 /* Respect the kernel parameter if it is set */
4581 if (enable_rc6 >= 0) {
4585 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4588 mask = INTEL_RC6_ENABLE;
4590 if ((enable_rc6 & mask) != enable_rc6)
4591 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n",
4592 enable_rc6 & mask, enable_rc6, mask);
4594 return enable_rc6 & mask;
4597 /* Disable RC6 on Ironlake */
4598 if (INTEL_INFO(dev)->gen == 5)
4601 if (IS_IVYBRIDGE(dev))
4602 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
4604 return INTEL_RC6_ENABLE;
4607 int intel_enable_rc6(const struct drm_device *dev)
4609 return i915.enable_rc6;
4612 static void gen6_init_rps_frequencies(struct drm_device *dev)
4614 struct drm_i915_private *dev_priv = dev->dev_private;
4615 uint32_t rp_state_cap;
4616 u32 ddcc_status = 0;
4619 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
4620 /* All of these values are in units of 50MHz */
4621 dev_priv->rps.cur_freq = 0;
4622 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4623 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4624 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
4625 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
4626 /* hw_max = RP0 until we check for overclocking */
4627 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4629 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4630 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4631 ret = sandybridge_pcode_read(dev_priv,
4632 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4635 dev_priv->rps.efficient_freq =
4636 (ddcc_status >> 8) & 0xff;
4639 /* Preserve min/max settings in case of re-init */
4640 if (dev_priv->rps.max_freq_softlimit == 0)
4641 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4643 if (dev_priv->rps.min_freq_softlimit == 0) {
4644 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4645 dev_priv->rps.min_freq_softlimit =
4646 /* max(RPe, 450 MHz) */
4647 max(dev_priv->rps.efficient_freq, (u8) 9);
4649 dev_priv->rps.min_freq_softlimit =
4650 dev_priv->rps.min_freq;
4654 static void gen9_enable_rps(struct drm_device *dev)
4656 struct drm_i915_private *dev_priv = dev->dev_private;
4657 struct intel_engine_cs *ring;
4658 uint32_t rc6_mask = 0;
4661 /* 1a: Software RC state - RC0 */
4662 I915_WRITE(GEN6_RC_STATE, 0);
4664 /* 1b: Get forcewake during program sequence. Although the driver
4665 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4666 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4668 /* 2a: Disable RC states. */
4669 I915_WRITE(GEN6_RC_CONTROL, 0);
4671 /* 2b: Program RC6 thresholds.*/
4672 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4673 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4674 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4675 for_each_ring(ring, dev_priv, unused)
4676 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4677 I915_WRITE(GEN6_RC_SLEEP, 0);
4678 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4680 /* 3a: Enable RC6 */
4681 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4682 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4683 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4685 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4686 GEN6_RC_CTL_EI_MODE(1) |
4689 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4693 static void gen8_enable_rps(struct drm_device *dev)
4695 struct drm_i915_private *dev_priv = dev->dev_private;
4696 struct intel_engine_cs *ring;
4697 uint32_t rc6_mask = 0;
4700 /* 1a: Software RC state - RC0 */
4701 I915_WRITE(GEN6_RC_STATE, 0);
4703 /* 1c & 1d: Get forcewake during program sequence. Although the driver
4704 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4705 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4707 /* 2a: Disable RC states. */
4708 I915_WRITE(GEN6_RC_CONTROL, 0);
4710 /* Initialize rps frequencies */
4711 gen6_init_rps_frequencies(dev);
4713 /* 2b: Program RC6 thresholds.*/
4714 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
4715 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4716 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4717 for_each_ring(ring, dev_priv, unused)
4718 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4719 I915_WRITE(GEN6_RC_SLEEP, 0);
4720 if (IS_BROADWELL(dev))
4721 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4723 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4726 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4727 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4728 intel_print_rc6_info(dev, rc6_mask);
4729 if (IS_BROADWELL(dev))
4730 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4731 GEN7_RC_CTL_TO_MODE |
4734 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4735 GEN6_RC_CTL_EI_MODE(1) |
4738 /* 4 Program defaults and thresholds for RPS*/
4739 I915_WRITE(GEN6_RPNSWREQ,
4740 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4741 I915_WRITE(GEN6_RC_VIDEO_FREQ,
4742 HSW_FREQUENCY(dev_priv->rps.rp1_freq));
4743 /* NB: Docs say 1s, and 1000000 - which aren't equivalent */
4744 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */
4746 /* Docs recommend 900MHz, and 300 MHz respectively */
4747 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
4748 dev_priv->rps.max_freq_softlimit << 24 |
4749 dev_priv->rps.min_freq_softlimit << 16);
4751 I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */
4752 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/
4753 I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */
4754 I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */
4756 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4759 I915_WRITE(GEN6_RP_CONTROL,
4760 GEN6_RP_MEDIA_TURBO |
4761 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4762 GEN6_RP_MEDIA_IS_GFX |
4764 GEN6_RP_UP_BUSY_AVG |
4765 GEN6_RP_DOWN_IDLE_AVG);
4767 /* 6: Ring frequency + overclocking (our driver does this later */
4769 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4770 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4772 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4775 static void gen6_enable_rps(struct drm_device *dev)
4777 struct drm_i915_private *dev_priv = dev->dev_private;
4778 struct intel_engine_cs *ring;
4779 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
4784 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4786 /* Here begins a magic sequence of register writes to enable
4787 * auto-downclocking.
4789 * Perhaps there might be some value in exposing these to
4792 I915_WRITE(GEN6_RC_STATE, 0);
4794 /* Clear the DBG now so we don't confuse earlier errors */
4795 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
4796 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
4797 I915_WRITE(GTFIFODBG, gtfifodbg);
4800 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4802 /* Initialize rps frequencies */
4803 gen6_init_rps_frequencies(dev);
4805 /* disable the counters and set deterministic thresholds */
4806 I915_WRITE(GEN6_RC_CONTROL, 0);
4808 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
4809 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
4810 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
4811 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
4812 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
4814 for_each_ring(ring, dev_priv, i)
4815 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4817 I915_WRITE(GEN6_RC_SLEEP, 0);
4818 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
4819 if (IS_IVYBRIDGE(dev))
4820 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
4822 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
4823 I915_WRITE(GEN6_RC6p_THRESHOLD, 150000);
4824 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
4826 /* Check if we are enabling RC6 */
4827 rc6_mode = intel_enable_rc6(dev_priv->dev);
4828 if (rc6_mode & INTEL_RC6_ENABLE)
4829 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
4831 /* We don't use those on Haswell */
4832 if (!IS_HASWELL(dev)) {
4833 if (rc6_mode & INTEL_RC6p_ENABLE)
4834 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
4836 if (rc6_mode & INTEL_RC6pp_ENABLE)
4837 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
4840 intel_print_rc6_info(dev, rc6_mask);
4842 I915_WRITE(GEN6_RC_CONTROL,
4844 GEN6_RC_CTL_EI_MODE(1) |
4845 GEN6_RC_CTL_HW_ENABLE);
4847 /* Power down if completely idle for over 50ms */
4848 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000);
4849 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
4851 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
4853 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
4855 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
4856 if (!ret && (pcu_mbox & (1<<31))) { /* OC supported */
4857 DRM_DEBUG_DRIVER("Overclocking supported. Max: %dMHz, Overclock max: %dMHz\n",
4858 (dev_priv->rps.max_freq_softlimit & 0xff) * 50,
4859 (pcu_mbox & 0xff) * 50);
4860 dev_priv->rps.max_freq = pcu_mbox & 0xff;
4863 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4864 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
4867 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
4868 if (IS_GEN6(dev) && ret) {
4869 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
4870 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
4871 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
4872 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
4873 rc6vids &= 0xffff00;
4874 rc6vids |= GEN6_ENCODE_RC6_VID(450);
4875 ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids);
4877 DRM_ERROR("Couldn't fix incorrect rc6 voltage\n");
4880 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4883 static void __gen6_update_ring_freq(struct drm_device *dev)
4885 struct drm_i915_private *dev_priv = dev->dev_private;
4887 unsigned int gpu_freq;
4888 unsigned int max_ia_freq, min_ring_freq;
4889 int scaling_factor = 180;
4890 struct cpufreq_policy *policy;
4892 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4894 policy = cpufreq_cpu_get(0);
4896 max_ia_freq = policy->cpuinfo.max_freq;
4897 cpufreq_cpu_put(policy);
4900 * Default to measured freq if none found, PCU will ensure we
4903 max_ia_freq = tsc_khz;
4906 /* Convert from kHz to MHz */
4907 max_ia_freq /= 1000;
4909 min_ring_freq = I915_READ(DCLK) & 0xf;
4910 /* convert DDR frequency from units of 266.6MHz to bandwidth */
4911 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
4914 * For each potential GPU frequency, load a ring frequency we'd like
4915 * to use for memory access. We do this by specifying the IA frequency
4916 * the PCU should use as a reference to determine the ring frequency.
4918 for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
4920 int diff = dev_priv->rps.max_freq - gpu_freq;
4921 unsigned int ia_freq = 0, ring_freq = 0;
4923 if (INTEL_INFO(dev)->gen >= 8) {
4924 /* max(2 * GT, DDR). NB: GT is 50MHz units */
4925 ring_freq = max(min_ring_freq, gpu_freq);
4926 } else if (IS_HASWELL(dev)) {
4927 ring_freq = mult_frac(gpu_freq, 5, 4);
4928 ring_freq = max(min_ring_freq, ring_freq);
4929 /* leave ia_freq as the default, chosen by cpufreq */
4931 /* On older processors, there is no separate ring
4932 * clock domain, so in order to boost the bandwidth
4933 * of the ring, we need to upclock the CPU (ia_freq).
4935 * For GPU frequencies less than 750MHz,
4936 * just use the lowest ring freq.
4938 if (gpu_freq < min_freq)
4941 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
4942 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
4945 sandybridge_pcode_write(dev_priv,
4946 GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
4947 ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT |
4948 ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT |
4953 void gen6_update_ring_freq(struct drm_device *dev)
4955 struct drm_i915_private *dev_priv = dev->dev_private;
4957 if (INTEL_INFO(dev)->gen < 6 || IS_VALLEYVIEW(dev))
4960 mutex_lock(&dev_priv->rps.hw_lock);
4961 __gen6_update_ring_freq(dev);
4962 mutex_unlock(&dev_priv->rps.hw_lock);
4965 static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
4969 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
4970 rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
4975 static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv)
4979 val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG);
4980 rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK;
4985 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv)
4989 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4990 rp1 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & PUNIT_GPU_STATUS_MAX_FREQ_MASK;
4995 static int cherryview_rps_min_freq(struct drm_i915_private *dev_priv)
4999 val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG);
5000 rpn = (val >> PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT) & PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK;
5004 static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv)
5008 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5010 rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT;
5015 static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv)
5019 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE);
5021 rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT;
5023 rp0 = min_t(u32, rp0, 0xea);
5028 static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv)
5032 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO);
5033 rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT;
5034 val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI);
5035 rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5;
5040 static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv)
5042 return vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff;
5045 /* Check that the pctx buffer wasn't move under us. */
5046 static void valleyview_check_pctx(struct drm_i915_private *dev_priv)
5048 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5050 WARN_ON(pctx_addr != dev_priv->mm.stolen_base +
5051 dev_priv->vlv_pctx->stolen->start);
5055 /* Check that the pcbr address is not empty. */
5056 static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5058 unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095;
5060 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5063 static void cherryview_setup_pctx(struct drm_device *dev)
5065 struct drm_i915_private *dev_priv = dev->dev_private;
5066 unsigned long pctx_paddr, paddr;
5067 struct i915_gtt *gtt = &dev_priv->gtt;
5069 int pctx_size = 32*1024;
5071 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5073 pcbr = I915_READ(VLV_PCBR);
5074 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5075 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5076 paddr = (dev_priv->mm.stolen_base +
5077 (gtt->stolen_size - pctx_size));
5079 pctx_paddr = (paddr & (~4095));
5080 I915_WRITE(VLV_PCBR, pctx_paddr);
5083 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5086 static void valleyview_setup_pctx(struct drm_device *dev)
5088 struct drm_i915_private *dev_priv = dev->dev_private;
5089 struct drm_i915_gem_object *pctx;
5090 unsigned long pctx_paddr;
5092 int pctx_size = 24*1024;
5094 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5096 pcbr = I915_READ(VLV_PCBR);
5098 /* BIOS set it up already, grab the pre-alloc'd space */
5101 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5102 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev,
5104 I915_GTT_OFFSET_NONE,
5109 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5112 * From the Gunit register HAS:
5113 * The Gfx driver is expected to program this register and ensure
5114 * proper allocation within Gfx stolen memory. For example, this
5115 * register should be programmed such than the PCBR range does not
5116 * overlap with other ranges, such as the frame buffer, protected
5117 * memory, or any other relevant ranges.
5119 pctx = i915_gem_object_create_stolen(dev, pctx_size);
5121 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5125 pctx_paddr = dev_priv->mm.stolen_base + pctx->stolen->start;
5126 I915_WRITE(VLV_PCBR, pctx_paddr);
5129 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5130 dev_priv->vlv_pctx = pctx;
5133 static void valleyview_cleanup_pctx(struct drm_device *dev)
5135 struct drm_i915_private *dev_priv = dev->dev_private;
5137 if (WARN_ON(!dev_priv->vlv_pctx))
5140 drm_gem_object_unreference(&dev_priv->vlv_pctx->base);
5141 dev_priv->vlv_pctx = NULL;
5144 static void valleyview_init_gt_powersave(struct drm_device *dev)
5146 struct drm_i915_private *dev_priv = dev->dev_private;
5149 valleyview_setup_pctx(dev);
5151 mutex_lock(&dev_priv->rps.hw_lock);
5153 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5154 switch ((val >> 6) & 3) {
5157 dev_priv->mem_freq = 800;
5160 dev_priv->mem_freq = 1066;
5163 dev_priv->mem_freq = 1333;
5166 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5168 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
5169 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5170 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5171 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5172 dev_priv->rps.max_freq);
5174 dev_priv->rps.efficient_freq = valleyview_rps_rpe_freq(dev_priv);
5175 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5176 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5177 dev_priv->rps.efficient_freq);
5179 dev_priv->rps.rp1_freq = valleyview_rps_guar_freq(dev_priv);
5180 DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n",
5181 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5182 dev_priv->rps.rp1_freq);
5184 dev_priv->rps.min_freq = valleyview_rps_min_freq(dev_priv);
5185 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5186 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5187 dev_priv->rps.min_freq);
5189 /* Preserve min/max settings in case of re-init */
5190 if (dev_priv->rps.max_freq_softlimit == 0)
5191 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5193 if (dev_priv->rps.min_freq_softlimit == 0)
5194 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5196 mutex_unlock(&dev_priv->rps.hw_lock);
5199 static void cherryview_init_gt_powersave(struct drm_device *dev)
5201 struct drm_i915_private *dev_priv = dev->dev_private;
5204 cherryview_setup_pctx(dev);
5206 mutex_lock(&dev_priv->rps.hw_lock);
5208 mutex_lock(&dev_priv->dpio_lock);
5209 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5210 mutex_unlock(&dev_priv->dpio_lock);
5212 switch ((val >> 2) & 0x7) {
5215 dev_priv->rps.cz_freq = 200;
5216 dev_priv->mem_freq = 1600;
5219 dev_priv->rps.cz_freq = 267;
5220 dev_priv->mem_freq = 1600;
5223 dev_priv->rps.cz_freq = 333;
5224 dev_priv->mem_freq = 2000;
5227 dev_priv->rps.cz_freq = 320;
5228 dev_priv->mem_freq = 1600;
5231 dev_priv->rps.cz_freq = 400;
5232 dev_priv->mem_freq = 1600;
5235 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
5237 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
5238 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
5239 DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n",
5240 vlv_gpu_freq(dev_priv, dev_priv->rps.max_freq),
5241 dev_priv->rps.max_freq);
5243 dev_priv->rps.efficient_freq = cherryview_rps_rpe_freq(dev_priv);
5244 DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n",
5245 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5246 dev_priv->rps.efficient_freq);
5248 dev_priv->rps.rp1_freq = cherryview_rps_guar_freq(dev_priv);
5249 DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n",
5250 vlv_gpu_freq(dev_priv, dev_priv->rps.rp1_freq),
5251 dev_priv->rps.rp1_freq);
5253 dev_priv->rps.min_freq = cherryview_rps_min_freq(dev_priv);
5254 DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n",
5255 vlv_gpu_freq(dev_priv, dev_priv->rps.min_freq),
5256 dev_priv->rps.min_freq);
5258 WARN_ONCE((dev_priv->rps.max_freq |
5259 dev_priv->rps.efficient_freq |
5260 dev_priv->rps.rp1_freq |
5261 dev_priv->rps.min_freq) & 1,
5262 "Odd GPU freq values\n");
5264 /* Preserve min/max settings in case of re-init */
5265 if (dev_priv->rps.max_freq_softlimit == 0)
5266 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
5268 if (dev_priv->rps.min_freq_softlimit == 0)
5269 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq;
5271 mutex_unlock(&dev_priv->rps.hw_lock);
5274 static void valleyview_cleanup_gt_powersave(struct drm_device *dev)
5276 valleyview_cleanup_pctx(dev);
5279 static void cherryview_enable_rps(struct drm_device *dev)
5281 struct drm_i915_private *dev_priv = dev->dev_private;
5282 struct intel_engine_cs *ring;
5283 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5286 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5288 gtfifodbg = I915_READ(GTFIFODBG);
5290 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5292 I915_WRITE(GTFIFODBG, gtfifodbg);
5295 cherryview_check_pctx(dev_priv);
5297 /* 1a & 1b: Get forcewake during program sequence. Although the driver
5298 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
5299 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
5301 /* 2a: Program RC6 thresholds.*/
5302 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
5303 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
5304 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
5306 for_each_ring(ring, dev_priv, i)
5307 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5308 I915_WRITE(GEN6_RC_SLEEP, 0);
5310 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
5312 /* allows RC6 residency counter to work */
5313 I915_WRITE(VLV_COUNTER_CONTROL,
5314 _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
5315 VLV_MEDIA_RC6_COUNT_EN |
5316 VLV_RENDER_RC6_COUNT_EN));
5318 /* For now we assume BIOS is allocating and populating the PCBR */
5319 pcbr = I915_READ(VLV_PCBR);
5322 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
5323 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5324 rc6_mode = GEN6_RC_CTL_EI_MODE(1);
5326 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5328 /* 4 Program defaults and thresholds for RPS*/
5329 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5330 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5331 I915_WRITE(GEN6_RP_UP_EI, 66000);
5332 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5334 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5336 /* WaDisablePwrmtrEvent:chv (pre-production hw) */
5337 I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
5338 I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
5341 I915_WRITE(GEN6_RP_CONTROL,
5342 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5343 GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
5345 GEN6_RP_UP_BUSY_AVG |
5346 GEN6_RP_DOWN_IDLE_AVG);
5348 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5350 /* RPS code assumes GPLL is used */
5351 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5353 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
5354 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5356 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5357 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5358 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5359 dev_priv->rps.cur_freq);
5361 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5362 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5363 dev_priv->rps.efficient_freq);
5365 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5367 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
5370 static void valleyview_enable_rps(struct drm_device *dev)
5372 struct drm_i915_private *dev_priv = dev->dev_private;
5373 struct intel_engine_cs *ring;
5374 u32 gtfifodbg, val, rc6_mode = 0;
5377 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
5379 valleyview_check_pctx(dev_priv);
5381 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
5382 DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n",
5384 I915_WRITE(GTFIFODBG, gtfifodbg);
5387 /* If VLV, Forcewake all wells, else re-direct to regular path */
5388 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
5390 I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
5391 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
5392 I915_WRITE(GEN6_RP_UP_EI, 66000);
5393 I915_WRITE(GEN6_RP_DOWN_EI, 350000);
5395 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
5396 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240);
5398 I915_WRITE(GEN6_RP_CONTROL,
5399 GEN6_RP_MEDIA_TURBO |
5400 GEN6_RP_MEDIA_HW_NORMAL_MODE |
5401 GEN6_RP_MEDIA_IS_GFX |
5403 GEN6_RP_UP_BUSY_AVG |
5404 GEN6_RP_DOWN_IDLE_CONT);
5406 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000);
5407 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
5408 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
5410 for_each_ring(ring, dev_priv, i)
5411 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
5413 I915_WRITE(GEN6_RC6_THRESHOLD, 0x557);
5415 /* allows RC6 residency counter to work */
5416 I915_WRITE(VLV_COUNTER_CONTROL,
5417 _MASKED_BIT_ENABLE(VLV_MEDIA_RC0_COUNT_EN |
5418 VLV_RENDER_RC0_COUNT_EN |
5419 VLV_MEDIA_RC6_COUNT_EN |
5420 VLV_RENDER_RC6_COUNT_EN));
5422 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
5423 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5425 intel_print_rc6_info(dev, rc6_mode);
5427 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5429 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
5431 /* RPS code assumes GPLL is used */
5432 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5434 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
5435 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
5437 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
5438 DRM_DEBUG_DRIVER("current GPU freq: %d MHz (%u)\n",
5439 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
5440 dev_priv->rps.cur_freq);
5442 DRM_DEBUG_DRIVER("setting GPU freq to %d MHz (%u)\n",
5443 vlv_gpu_freq(dev_priv, dev_priv->rps.efficient_freq),
5444 dev_priv->rps.efficient_freq);
5446 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
5448 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
5451 void ironlake_teardown_rc6(struct drm_device *dev)
5453 struct drm_i915_private *dev_priv = dev->dev_private;
5455 if (dev_priv->ips.renderctx) {
5456 i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
5457 drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
5458 dev_priv->ips.renderctx = NULL;
5461 if (dev_priv->ips.pwrctx) {
5462 i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
5463 drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
5464 dev_priv->ips.pwrctx = NULL;
5468 static void ironlake_disable_rc6(struct drm_device *dev)
5470 struct drm_i915_private *dev_priv = dev->dev_private;
5472 if (I915_READ(PWRCTXA)) {
5473 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
5474 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
5475 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
5478 I915_WRITE(PWRCTXA, 0);
5479 POSTING_READ(PWRCTXA);
5481 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
5482 POSTING_READ(RSTDBYCTL);
5486 static int ironlake_setup_rc6(struct drm_device *dev)
5488 struct drm_i915_private *dev_priv = dev->dev_private;
5490 if (dev_priv->ips.renderctx == NULL)
5491 dev_priv->ips.renderctx = intel_alloc_context_page(dev);
5492 if (!dev_priv->ips.renderctx)
5495 if (dev_priv->ips.pwrctx == NULL)
5496 dev_priv->ips.pwrctx = intel_alloc_context_page(dev);
5497 if (!dev_priv->ips.pwrctx) {
5498 ironlake_teardown_rc6(dev);
5505 static void ironlake_enable_rc6(struct drm_device *dev)
5507 struct drm_i915_private *dev_priv = dev->dev_private;
5508 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
5509 bool was_interruptible;
5512 /* rc6 disabled by default due to repeated reports of hanging during
5515 if (!intel_enable_rc6(dev))
5518 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
5520 ret = ironlake_setup_rc6(dev);
5524 was_interruptible = dev_priv->mm.interruptible;
5525 dev_priv->mm.interruptible = false;
5528 * GPU can automatically power down the render unit if given a page
5531 ret = intel_ring_begin(ring, 6);
5533 ironlake_teardown_rc6(dev);
5534 dev_priv->mm.interruptible = was_interruptible;
5538 intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
5539 intel_ring_emit(ring, MI_SET_CONTEXT);
5540 intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) |
5542 MI_SAVE_EXT_STATE_EN |
5543 MI_RESTORE_EXT_STATE_EN |
5544 MI_RESTORE_INHIBIT);
5545 intel_ring_emit(ring, MI_SUSPEND_FLUSH);
5546 intel_ring_emit(ring, MI_NOOP);
5547 intel_ring_emit(ring, MI_FLUSH);
5548 intel_ring_advance(ring);
5551 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
5552 * does an implicit flush, combined with MI_FLUSH above, it should be
5553 * safe to assume that renderctx is valid
5555 ret = intel_ring_idle(ring);
5556 dev_priv->mm.interruptible = was_interruptible;
5558 DRM_ERROR("failed to enable ironlake power savings\n");
5559 ironlake_teardown_rc6(dev);
5563 I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN);
5564 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
5566 intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE);
5569 static unsigned long intel_pxfreq(u32 vidfreq)
5572 int div = (vidfreq & 0x3f0000) >> 16;
5573 int post = (vidfreq & 0x3000) >> 12;
5574 int pre = (vidfreq & 0x7);
5579 freq = ((div * 133333) / ((1<<post) * pre));
5584 static const struct cparams {
5590 { 1, 1333, 301, 28664 },
5591 { 1, 1066, 294, 24460 },
5592 { 1, 800, 294, 25192 },
5593 { 0, 1333, 276, 27605 },
5594 { 0, 1066, 276, 27605 },
5595 { 0, 800, 231, 23784 },
5598 static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
5600 u64 total_count, diff, ret;
5601 u32 count1, count2, count3, m = 0, c = 0;
5602 unsigned long now = jiffies_to_msecs(jiffies), diff1;
5605 assert_spin_locked(&mchdev_lock);
5607 diff1 = now - dev_priv->ips.last_time1;
5609 /* Prevent division-by-zero if we are asking too fast.
5610 * Also, we don't get interesting results if we are polling
5611 * faster than once in 10ms, so just return the saved value
5615 return dev_priv->ips.chipset_power;
5617 count1 = I915_READ(DMIEC);
5618 count2 = I915_READ(DDREC);
5619 count3 = I915_READ(CSIEC);
5621 total_count = count1 + count2 + count3;
5623 /* FIXME: handle per-counter overflow */
5624 if (total_count < dev_priv->ips.last_count1) {
5625 diff = ~0UL - dev_priv->ips.last_count1;
5626 diff += total_count;
5628 diff = total_count - dev_priv->ips.last_count1;
5631 for (i = 0; i < ARRAY_SIZE(cparams); i++) {
5632 if (cparams[i].i == dev_priv->ips.c_m &&
5633 cparams[i].t == dev_priv->ips.r_t) {
5640 diff = div_u64(diff, diff1);
5641 ret = ((m * diff) + c);
5642 ret = div_u64(ret, 10);
5644 dev_priv->ips.last_count1 = total_count;
5645 dev_priv->ips.last_time1 = now;
5647 dev_priv->ips.chipset_power = ret;
5652 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5654 struct drm_device *dev = dev_priv->dev;
5657 if (INTEL_INFO(dev)->gen != 5)
5660 spin_lock_irq(&mchdev_lock);
5662 val = __i915_chipset_val(dev_priv);
5664 spin_unlock_irq(&mchdev_lock);
5669 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
5671 unsigned long m, x, b;
5674 tsfs = I915_READ(TSFS);
5676 m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
5677 x = I915_READ8(TR1);
5679 b = tsfs & TSFS_INTR_MASK;
5681 return ((m * x) / 127) - b;
5684 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
5686 struct drm_device *dev = dev_priv->dev;
5687 static const struct v_table {
5688 u16 vd; /* in .1 mil */
5689 u16 vm; /* in .1 mil */
5820 if (INTEL_INFO(dev)->is_mobile)
5821 return v_table[pxvid].vm;
5823 return v_table[pxvid].vd;
5826 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
5828 u64 now, diff, diffms;
5831 assert_spin_locked(&mchdev_lock);
5833 now = ktime_get_raw_ns();
5834 diffms = now - dev_priv->ips.last_time2;
5835 do_div(diffms, NSEC_PER_MSEC);
5837 /* Don't divide by 0 */
5841 count = I915_READ(GFXEC);
5843 if (count < dev_priv->ips.last_count2) {
5844 diff = ~0UL - dev_priv->ips.last_count2;
5847 diff = count - dev_priv->ips.last_count2;
5850 dev_priv->ips.last_count2 = count;
5851 dev_priv->ips.last_time2 = now;
5853 /* More magic constants... */
5855 diff = div_u64(diff, diffms * 10);
5856 dev_priv->ips.gfx_power = diff;
5859 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5861 struct drm_device *dev = dev_priv->dev;
5863 if (INTEL_INFO(dev)->gen != 5)
5866 spin_lock_irq(&mchdev_lock);
5868 __i915_update_gfx_val(dev_priv);
5870 spin_unlock_irq(&mchdev_lock);
5873 static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5875 unsigned long t, corr, state1, corr2, state2;
5878 assert_spin_locked(&mchdev_lock);
5880 pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_freq * 4));
5881 pxvid = (pxvid >> 24) & 0x7f;
5882 ext_v = pvid_to_extvid(dev_priv, pxvid);
5886 t = i915_mch_val(dev_priv);
5888 /* Revel in the empirically derived constants */
5890 /* Correction factor in 1/100000 units */
5892 corr = ((t * 2349) + 135940);
5894 corr = ((t * 964) + 29317);
5896 corr = ((t * 301) + 1004);
5898 corr = corr * ((150142 * state1) / 10000 - 78642);
5900 corr2 = (corr * dev_priv->ips.corr);
5902 state2 = (corr2 * state1) / 10000;
5903 state2 /= 100; /* convert to mW */
5905 __i915_update_gfx_val(dev_priv);
5907 return dev_priv->ips.gfx_power + state2;
5910 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5912 struct drm_device *dev = dev_priv->dev;
5915 if (INTEL_INFO(dev)->gen != 5)
5918 spin_lock_irq(&mchdev_lock);
5920 val = __i915_gfx_val(dev_priv);
5922 spin_unlock_irq(&mchdev_lock);
5928 * i915_read_mch_val - return value for IPS use
5930 * Calculate and return a value for the IPS driver to use when deciding whether
5931 * we have thermal and power headroom to increase CPU or GPU power budget.
5933 unsigned long i915_read_mch_val(void)
5935 struct drm_i915_private *dev_priv;
5936 unsigned long chipset_val, graphics_val, ret = 0;
5938 spin_lock_irq(&mchdev_lock);
5941 dev_priv = i915_mch_dev;
5943 chipset_val = __i915_chipset_val(dev_priv);
5944 graphics_val = __i915_gfx_val(dev_priv);
5946 ret = chipset_val + graphics_val;
5949 spin_unlock_irq(&mchdev_lock);
5953 EXPORT_SYMBOL_GPL(i915_read_mch_val);
5956 * i915_gpu_raise - raise GPU frequency limit
5958 * Raise the limit; IPS indicates we have thermal headroom.
5960 bool i915_gpu_raise(void)
5962 struct drm_i915_private *dev_priv;
5965 spin_lock_irq(&mchdev_lock);
5966 if (!i915_mch_dev) {
5970 dev_priv = i915_mch_dev;
5972 if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
5973 dev_priv->ips.max_delay--;
5976 spin_unlock_irq(&mchdev_lock);
5980 EXPORT_SYMBOL_GPL(i915_gpu_raise);
5983 * i915_gpu_lower - lower GPU frequency limit
5985 * IPS indicates we're close to a thermal limit, so throttle back the GPU
5986 * frequency maximum.
5988 bool i915_gpu_lower(void)
5990 struct drm_i915_private *dev_priv;
5993 spin_lock_irq(&mchdev_lock);
5994 if (!i915_mch_dev) {
5998 dev_priv = i915_mch_dev;
6000 if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
6001 dev_priv->ips.max_delay++;
6004 spin_unlock_irq(&mchdev_lock);
6008 EXPORT_SYMBOL_GPL(i915_gpu_lower);
6011 * i915_gpu_busy - indicate GPU business to IPS
6013 * Tell the IPS driver whether or not the GPU is busy.
6015 bool i915_gpu_busy(void)
6017 struct drm_i915_private *dev_priv;
6018 struct intel_engine_cs *ring;
6022 spin_lock_irq(&mchdev_lock);
6025 dev_priv = i915_mch_dev;
6027 for_each_ring(ring, dev_priv, i)
6028 ret |= !list_empty(&ring->request_list);
6031 spin_unlock_irq(&mchdev_lock);
6035 EXPORT_SYMBOL_GPL(i915_gpu_busy);
6038 * i915_gpu_turbo_disable - disable graphics turbo
6040 * Disable graphics turbo by resetting the max frequency and setting the
6041 * current frequency to the default.
6043 bool i915_gpu_turbo_disable(void)
6045 struct drm_i915_private *dev_priv;
6048 spin_lock_irq(&mchdev_lock);
6049 if (!i915_mch_dev) {
6053 dev_priv = i915_mch_dev;
6055 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6057 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
6061 spin_unlock_irq(&mchdev_lock);
6065 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
6068 * Tells the intel_ips driver that the i915 driver is now loaded, if
6069 * IPS got loaded first.
6071 * This awkward dance is so that neither module has to depend on the
6072 * other in order for IPS to do the appropriate communication of
6073 * GPU turbo limits to i915.
6076 ips_ping_for_i915_load(void)
6080 link = symbol_get(ips_link_to_i915_driver);
6083 symbol_put(ips_link_to_i915_driver);
6087 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
6089 /* We only register the i915 ips part with intel-ips once everything is
6090 * set up, to avoid intel-ips sneaking in and reading bogus values. */
6091 spin_lock_irq(&mchdev_lock);
6092 i915_mch_dev = dev_priv;
6093 spin_unlock_irq(&mchdev_lock);
6095 ips_ping_for_i915_load();
6098 void intel_gpu_ips_teardown(void)
6100 spin_lock_irq(&mchdev_lock);
6101 i915_mch_dev = NULL;
6102 spin_unlock_irq(&mchdev_lock);
6105 static void intel_init_emon(struct drm_device *dev)
6107 struct drm_i915_private *dev_priv = dev->dev_private;
6112 /* Disable to program */
6116 /* Program energy weights for various events */
6117 I915_WRITE(SDEW, 0x15040d00);
6118 I915_WRITE(CSIEW0, 0x007f0000);
6119 I915_WRITE(CSIEW1, 0x1e220004);
6120 I915_WRITE(CSIEW2, 0x04000004);
6122 for (i = 0; i < 5; i++)
6123 I915_WRITE(PEW + (i * 4), 0);
6124 for (i = 0; i < 3; i++)
6125 I915_WRITE(DEW + (i * 4), 0);
6127 /* Program P-state weights to account for frequency power adjustment */
6128 for (i = 0; i < 16; i++) {
6129 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
6130 unsigned long freq = intel_pxfreq(pxvidfreq);
6131 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
6136 val *= (freq / 1000);
6138 val /= (127*127*900);
6140 DRM_ERROR("bad pxval: %ld\n", val);
6143 /* Render standby states get 0 weight */
6147 for (i = 0; i < 4; i++) {
6148 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
6149 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
6150 I915_WRITE(PXW + (i * 4), val);
6153 /* Adjust magic regs to magic values (more experimental results) */
6154 I915_WRITE(OGW0, 0);
6155 I915_WRITE(OGW1, 0);
6156 I915_WRITE(EG0, 0x00007f00);
6157 I915_WRITE(EG1, 0x0000000e);
6158 I915_WRITE(EG2, 0x000e0000);
6159 I915_WRITE(EG3, 0x68000300);
6160 I915_WRITE(EG4, 0x42000000);
6161 I915_WRITE(EG5, 0x00140031);
6165 for (i = 0; i < 8; i++)
6166 I915_WRITE(PXWL + (i * 4), 0);
6168 /* Enable PMON + select events */
6169 I915_WRITE(ECR, 0x80000019);
6171 lcfuse = I915_READ(LCFUSE02);
6173 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6176 void intel_init_gt_powersave(struct drm_device *dev)
6178 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6);
6180 if (IS_CHERRYVIEW(dev))
6181 cherryview_init_gt_powersave(dev);
6182 else if (IS_VALLEYVIEW(dev))
6183 valleyview_init_gt_powersave(dev);
6186 void intel_cleanup_gt_powersave(struct drm_device *dev)
6188 if (IS_CHERRYVIEW(dev))
6190 else if (IS_VALLEYVIEW(dev))
6191 valleyview_cleanup_gt_powersave(dev);
6195 * intel_suspend_gt_powersave - suspend PM work and helper threads
6198 * We don't want to disable RC6 or other features here, we just want
6199 * to make sure any work we've queued has finished and won't bother
6200 * us while we're suspended.
6202 void intel_suspend_gt_powersave(struct drm_device *dev)
6204 struct drm_i915_private *dev_priv = dev->dev_private;
6206 if (INTEL_INFO(dev)->gen < 6)
6209 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6212 * TODO: disable RPS interrupts on GEN9+ too once RPS support
6215 if (INTEL_INFO(dev)->gen < 9)
6216 gen6_disable_rps_interrupts(dev);
6218 /* Force GPU to min freq during suspend */
6219 gen6_rps_idle(dev_priv);
6222 void intel_disable_gt_powersave(struct drm_device *dev)
6224 struct drm_i915_private *dev_priv = dev->dev_private;
6226 if (IS_IRONLAKE_M(dev)) {
6227 ironlake_disable_drps(dev);
6228 ironlake_disable_rc6(dev);
6229 } else if (INTEL_INFO(dev)->gen >= 6) {
6230 intel_suspend_gt_powersave(dev);
6232 mutex_lock(&dev_priv->rps.hw_lock);
6233 if (INTEL_INFO(dev)->gen >= 9)
6234 gen9_disable_rps(dev);
6235 else if (IS_CHERRYVIEW(dev))
6236 cherryview_disable_rps(dev);
6237 else if (IS_VALLEYVIEW(dev))
6238 valleyview_disable_rps(dev);
6240 gen6_disable_rps(dev);
6242 dev_priv->rps.enabled = false;
6243 mutex_unlock(&dev_priv->rps.hw_lock);
6247 static void intel_gen6_powersave_work(struct work_struct *work)
6249 struct drm_i915_private *dev_priv =
6250 container_of(work, struct drm_i915_private,
6251 rps.delayed_resume_work.work);
6252 struct drm_device *dev = dev_priv->dev;
6254 mutex_lock(&dev_priv->rps.hw_lock);
6257 * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
6260 if (INTEL_INFO(dev)->gen < 9)
6261 gen6_reset_rps_interrupts(dev);
6263 if (IS_CHERRYVIEW(dev)) {
6264 cherryview_enable_rps(dev);
6265 } else if (IS_VALLEYVIEW(dev)) {
6266 valleyview_enable_rps(dev);
6267 } else if (INTEL_INFO(dev)->gen >= 9) {
6268 gen9_enable_rps(dev);
6269 } else if (IS_BROADWELL(dev)) {
6270 gen8_enable_rps(dev);
6271 __gen6_update_ring_freq(dev);
6273 gen6_enable_rps(dev);
6274 __gen6_update_ring_freq(dev);
6276 dev_priv->rps.enabled = true;
6278 if (INTEL_INFO(dev)->gen < 9)
6279 gen6_enable_rps_interrupts(dev);
6281 mutex_unlock(&dev_priv->rps.hw_lock);
6283 intel_runtime_pm_put(dev_priv);
6286 void intel_enable_gt_powersave(struct drm_device *dev)
6288 struct drm_i915_private *dev_priv = dev->dev_private;
6290 if (IS_IRONLAKE_M(dev)) {
6291 mutex_lock(&dev->struct_mutex);
6292 ironlake_enable_drps(dev);
6293 ironlake_enable_rc6(dev);
6294 intel_init_emon(dev);
6295 mutex_unlock(&dev->struct_mutex);
6296 } else if (INTEL_INFO(dev)->gen >= 6) {
6298 * PCU communication is slow and this doesn't need to be
6299 * done at any specific time, so do this out of our fast path
6300 * to make resume and init faster.
6302 * We depend on the HW RC6 power context save/restore
6303 * mechanism when entering D3 through runtime PM suspend. So
6304 * disable RPM until RPS/RC6 is properly setup. We can only
6305 * get here via the driver load/system resume/runtime resume
6306 * paths, so the _noresume version is enough (and in case of
6307 * runtime resume it's necessary).
6309 if (schedule_delayed_work(&dev_priv->rps.delayed_resume_work,
6310 round_jiffies_up_relative(HZ)))
6311 intel_runtime_pm_get_noresume(dev_priv);
6315 void intel_reset_gt_powersave(struct drm_device *dev)
6317 struct drm_i915_private *dev_priv = dev->dev_private;
6319 dev_priv->rps.enabled = false;
6320 intel_enable_gt_powersave(dev);
6323 static void ibx_init_clock_gating(struct drm_device *dev)
6325 struct drm_i915_private *dev_priv = dev->dev_private;
6328 * On Ibex Peak and Cougar Point, we need to disable clock
6329 * gating for the panel power sequencer or it will fail to
6330 * start up when no ports are active.
6332 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
6335 static void g4x_disable_trickle_feed(struct drm_device *dev)
6337 struct drm_i915_private *dev_priv = dev->dev_private;
6340 for_each_pipe(dev_priv, pipe) {
6341 I915_WRITE(DSPCNTR(pipe),
6342 I915_READ(DSPCNTR(pipe)) |
6343 DISPPLANE_TRICKLE_FEED_DISABLE);
6344 intel_flush_primary_plane(dev_priv, pipe);
6348 static void ilk_init_lp_watermarks(struct drm_device *dev)
6350 struct drm_i915_private *dev_priv = dev->dev_private;
6352 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6353 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
6354 I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN);
6357 * Don't touch WM1S_LP_EN here.
6358 * Doing so could cause underruns.
6362 static void ironlake_init_clock_gating(struct drm_device *dev)
6364 struct drm_i915_private *dev_priv = dev->dev_private;
6365 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6369 * WaFbcDisableDpfcClockGating:ilk
6371 dspclk_gate |= ILK_DPFCRUNIT_CLOCK_GATE_DISABLE |
6372 ILK_DPFCUNIT_CLOCK_GATE_DISABLE |
6373 ILK_DPFDUNIT_CLOCK_GATE_ENABLE;
6375 I915_WRITE(PCH_3DCGDIS0,
6376 MARIUNIT_CLOCK_GATE_DISABLE |
6377 SVSMUNIT_CLOCK_GATE_DISABLE);
6378 I915_WRITE(PCH_3DCGDIS1,
6379 VFMUNIT_CLOCK_GATE_DISABLE);
6382 * According to the spec the following bits should be set in
6383 * order to enable memory self-refresh
6384 * The bit 22/21 of 0x42004
6385 * The bit 5 of 0x42020
6386 * The bit 15 of 0x45000
6388 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6389 (I915_READ(ILK_DISPLAY_CHICKEN2) |
6390 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
6391 dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE;
6392 I915_WRITE(DISP_ARB_CTL,
6393 (I915_READ(DISP_ARB_CTL) |
6396 ilk_init_lp_watermarks(dev);
6399 * Based on the document from hardware guys the following bits
6400 * should be set unconditionally in order to enable FBC.
6401 * The bit 22 of 0x42000
6402 * The bit 22 of 0x42004
6403 * The bit 7,8,9 of 0x42020.
6405 if (IS_IRONLAKE_M(dev)) {
6406 /* WaFbcAsynchFlipDisableFbcQueue:ilk */
6407 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6408 I915_READ(ILK_DISPLAY_CHICKEN1) |
6410 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6411 I915_READ(ILK_DISPLAY_CHICKEN2) |
6415 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6417 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6418 I915_READ(ILK_DISPLAY_CHICKEN2) |
6419 ILK_ELPIN_409_SELECT);
6420 I915_WRITE(_3D_CHICKEN2,
6421 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6422 _3D_CHICKEN2_WM_READ_PIPELINED);
6424 /* WaDisableRenderCachePipelinedFlush:ilk */
6425 I915_WRITE(CACHE_MODE_0,
6426 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6428 /* WaDisable_RenderCache_OperationalFlush:ilk */
6429 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6431 g4x_disable_trickle_feed(dev);
6433 ibx_init_clock_gating(dev);
6436 static void cpt_init_clock_gating(struct drm_device *dev)
6438 struct drm_i915_private *dev_priv = dev->dev_private;
6443 * On Ibex Peak and Cougar Point, we need to disable clock
6444 * gating for the panel power sequencer or it will fail to
6445 * start up when no ports are active.
6447 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE |
6448 PCH_DPLUNIT_CLOCK_GATE_DISABLE |
6449 PCH_CPUNIT_CLOCK_GATE_DISABLE);
6450 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
6451 DPLS_EDP_PPS_FIX_DIS);
6452 /* The below fixes the weird display corruption, a few pixels shifted
6453 * downward, on (only) LVDS of some HP laptops with IVY.
6455 for_each_pipe(dev_priv, pipe) {
6456 val = I915_READ(TRANS_CHICKEN2(pipe));
6457 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
6458 val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6459 if (dev_priv->vbt.fdi_rx_polarity_inverted)
6460 val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED;
6461 val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
6462 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER;
6463 val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH;
6464 I915_WRITE(TRANS_CHICKEN2(pipe), val);
6466 /* WADP0ClockGatingDisable */
6467 for_each_pipe(dev_priv, pipe) {
6468 I915_WRITE(TRANS_CHICKEN1(pipe),
6469 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6473 static void gen6_check_mch_setup(struct drm_device *dev)
6475 struct drm_i915_private *dev_priv = dev->dev_private;
6478 tmp = I915_READ(MCH_SSKPD);
6479 if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL)
6480 DRM_DEBUG_KMS("Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n",
6484 static void gen6_init_clock_gating(struct drm_device *dev)
6486 struct drm_i915_private *dev_priv = dev->dev_private;
6487 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6489 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
6491 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6492 I915_READ(ILK_DISPLAY_CHICKEN2) |
6493 ILK_ELPIN_409_SELECT);
6495 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
6496 I915_WRITE(_3D_CHICKEN,
6497 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
6499 /* WaDisable_RenderCache_OperationalFlush:snb */
6500 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6503 * BSpec recoomends 8x4 when MSAA is used,
6504 * however in practice 16x4 seems fastest.
6506 * Note that PS/WM thread counts depend on the WIZ hashing
6507 * disable bit, which we don't touch here, but it's good
6508 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6510 I915_WRITE(GEN6_GT_MODE,
6511 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6513 ilk_init_lp_watermarks(dev);
6515 I915_WRITE(CACHE_MODE_0,
6516 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
6518 I915_WRITE(GEN6_UCGCTL1,
6519 I915_READ(GEN6_UCGCTL1) |
6520 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
6521 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6523 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
6524 * gating disable must be set. Failure to set it results in
6525 * flickering pixels due to Z write ordering failures after
6526 * some amount of runtime in the Mesa "fire" demo, and Unigine
6527 * Sanctuary and Tropics, and apparently anything else with
6528 * alpha test or pixel discard.
6530 * According to the spec, bit 11 (RCCUNIT) must also be set,
6531 * but we didn't debug actual testcases to find it out.
6533 * WaDisableRCCUnitClockGating:snb
6534 * WaDisableRCPBUnitClockGating:snb
6536 I915_WRITE(GEN6_UCGCTL2,
6537 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
6538 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
6540 /* WaStripsFansDisableFastClipPerformanceFix:snb */
6541 I915_WRITE(_3D_CHICKEN3,
6542 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL));
6546 * "This bit must be set if 3DSTATE_CLIP clip mode is set to normal and
6547 * 3DSTATE_SF number of SF output attributes is more than 16."
6549 I915_WRITE(_3D_CHICKEN3,
6550 _MASKED_BIT_ENABLE(_3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH));
6553 * According to the spec the following bits should be
6554 * set in order to enable memory self-refresh and fbc:
6555 * The bit21 and bit22 of 0x42000
6556 * The bit21 and bit22 of 0x42004
6557 * The bit5 and bit7 of 0x42020
6558 * The bit14 of 0x70180
6559 * The bit14 of 0x71180
6561 * WaFbcAsynchFlipDisableFbcQueue:snb
6563 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6564 I915_READ(ILK_DISPLAY_CHICKEN1) |
6565 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6566 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6567 I915_READ(ILK_DISPLAY_CHICKEN2) |
6568 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6569 I915_WRITE(ILK_DSPCLK_GATE_D,
6570 I915_READ(ILK_DSPCLK_GATE_D) |
6571 ILK_DPARBUNIT_CLOCK_GATE_ENABLE |
6572 ILK_DPFDUNIT_CLOCK_GATE_ENABLE);
6574 g4x_disable_trickle_feed(dev);
6576 cpt_init_clock_gating(dev);
6578 gen6_check_mch_setup(dev);
6581 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6583 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
6586 * WaVSThreadDispatchOverride:ivb,vlv
6588 * This actually overrides the dispatch
6589 * mode for all thread types.
6591 reg &= ~GEN7_FF_SCHED_MASK;
6592 reg |= GEN7_FF_TS_SCHED_HW;
6593 reg |= GEN7_FF_VS_SCHED_HW;
6594 reg |= GEN7_FF_DS_SCHED_HW;
6596 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
6599 static void lpt_init_clock_gating(struct drm_device *dev)
6601 struct drm_i915_private *dev_priv = dev->dev_private;
6604 * TODO: this bit should only be enabled when really needed, then
6605 * disabled when not needed anymore in order to save power.
6607 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
6608 I915_WRITE(SOUTH_DSPCLK_GATE_D,
6609 I915_READ(SOUTH_DSPCLK_GATE_D) |
6610 PCH_LP_PARTITION_LEVEL_DISABLE);
6612 /* WADPOClockGatingDisable:hsw */
6613 I915_WRITE(_TRANSA_CHICKEN1,
6614 I915_READ(_TRANSA_CHICKEN1) |
6615 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE);
6618 static void lpt_suspend_hw(struct drm_device *dev)
6620 struct drm_i915_private *dev_priv = dev->dev_private;
6622 if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
6623 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
6625 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
6626 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
6630 static void broadwell_init_clock_gating(struct drm_device *dev)
6632 struct drm_i915_private *dev_priv = dev->dev_private;
6635 I915_WRITE(WM3_LP_ILK, 0);
6636 I915_WRITE(WM2_LP_ILK, 0);
6637 I915_WRITE(WM1_LP_ILK, 0);
6639 /* WaSwitchSolVfFArbitrationPriority:bdw */
6640 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6642 /* WaPsrDPAMaskVBlankInSRD:bdw */
6643 I915_WRITE(CHICKEN_PAR1_1,
6644 I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD);
6646 /* WaPsrDPRSUnmaskVBlankInSRD:bdw */
6647 for_each_pipe(dev_priv, pipe) {
6648 I915_WRITE(CHICKEN_PIPESL_1(pipe),
6649 I915_READ(CHICKEN_PIPESL_1(pipe)) |
6650 BDW_DPRS_MASK_VBLANK_SRD);
6653 /* WaVSRefCountFullforceMissDisable:bdw */
6654 /* WaDSRefCountFullforceMissDisable:bdw */
6655 I915_WRITE(GEN7_FF_THREAD_MODE,
6656 I915_READ(GEN7_FF_THREAD_MODE) &
6657 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6659 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6660 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6662 /* WaDisableSDEUnitClockGating:bdw */
6663 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6664 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6666 lpt_init_clock_gating(dev);
6669 static void haswell_init_clock_gating(struct drm_device *dev)
6671 struct drm_i915_private *dev_priv = dev->dev_private;
6673 ilk_init_lp_watermarks(dev);
6675 /* L3 caching of data atomics doesn't work -- disable it. */
6676 I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
6677 I915_WRITE(HSW_ROW_CHICKEN3,
6678 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
6680 /* This is required by WaCatErrorRejectionIssue:hsw */
6681 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6682 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6683 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6685 /* WaVSRefCountFullforceMissDisable:hsw */
6686 I915_WRITE(GEN7_FF_THREAD_MODE,
6687 I915_READ(GEN7_FF_THREAD_MODE) & ~GEN7_FF_VS_REF_CNT_FFME);
6689 /* WaDisable_RenderCache_OperationalFlush:hsw */
6690 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6692 /* enable HiZ Raw Stall Optimization */
6693 I915_WRITE(CACHE_MODE_0_GEN7,
6694 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6696 /* WaDisable4x2SubspanOptimization:hsw */
6697 I915_WRITE(CACHE_MODE_1,
6698 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6701 * BSpec recommends 8x4 when MSAA is used,
6702 * however in practice 16x4 seems fastest.
6704 * Note that PS/WM thread counts depend on the WIZ hashing
6705 * disable bit, which we don't touch here, but it's good
6706 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6708 I915_WRITE(GEN7_GT_MODE,
6709 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6711 /* WaSwitchSolVfFArbitrationPriority:hsw */
6712 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
6714 /* WaRsPkgCStateDisplayPMReq:hsw */
6715 I915_WRITE(CHICKEN_PAR1_1,
6716 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
6718 lpt_init_clock_gating(dev);
6721 static void ivybridge_init_clock_gating(struct drm_device *dev)
6723 struct drm_i915_private *dev_priv = dev->dev_private;
6726 ilk_init_lp_watermarks(dev);
6728 I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE);
6730 /* WaDisableEarlyCull:ivb */
6731 I915_WRITE(_3D_CHICKEN3,
6732 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6734 /* WaDisableBackToBackFlipFix:ivb */
6735 I915_WRITE(IVB_CHICKEN3,
6736 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6737 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6739 /* WaDisablePSDDualDispatchEnable:ivb */
6740 if (IS_IVB_GT1(dev))
6741 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6742 _MASKED_BIT_ENABLE(GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6744 /* WaDisable_RenderCache_OperationalFlush:ivb */
6745 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6747 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
6748 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
6749 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
6751 /* WaApplyL3ControlAndL3ChickenMode:ivb */
6752 I915_WRITE(GEN7_L3CNTLREG1,
6753 GEN7_WA_FOR_GEN7_L3_CONTROL);
6754 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
6755 GEN7_WA_L3_CHICKEN_MODE);
6756 if (IS_IVB_GT1(dev))
6757 I915_WRITE(GEN7_ROW_CHICKEN2,
6758 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6760 /* must write both registers */
6761 I915_WRITE(GEN7_ROW_CHICKEN2,
6762 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6763 I915_WRITE(GEN7_ROW_CHICKEN2_GT2,
6764 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6767 /* WaForceL3Serialization:ivb */
6768 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6769 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6772 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6773 * This implements the WaDisableRCZUnitClockGating:ivb workaround.
6775 I915_WRITE(GEN6_UCGCTL2,
6776 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6778 /* This is required by WaCatErrorRejectionIssue:ivb */
6779 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6780 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6781 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6783 g4x_disable_trickle_feed(dev);
6785 gen7_setup_fixed_func_scheduler(dev_priv);
6787 if (0) { /* causes HiZ corruption on ivb:gt1 */
6788 /* enable HiZ Raw Stall Optimization */
6789 I915_WRITE(CACHE_MODE_0_GEN7,
6790 _MASKED_BIT_DISABLE(HIZ_RAW_STALL_OPT_DISABLE));
6793 /* WaDisable4x2SubspanOptimization:ivb */
6794 I915_WRITE(CACHE_MODE_1,
6795 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6798 * BSpec recommends 8x4 when MSAA is used,
6799 * however in practice 16x4 seems fastest.
6801 * Note that PS/WM thread counts depend on the WIZ hashing
6802 * disable bit, which we don't touch here, but it's good
6803 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
6805 I915_WRITE(GEN7_GT_MODE,
6806 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
6808 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
6809 snpcr &= ~GEN6_MBC_SNPCR_MASK;
6810 snpcr |= GEN6_MBC_SNPCR_MED;
6811 I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
6813 if (!HAS_PCH_NOP(dev))
6814 cpt_init_clock_gating(dev);
6816 gen6_check_mch_setup(dev);
6819 static void valleyview_init_clock_gating(struct drm_device *dev)
6821 struct drm_i915_private *dev_priv = dev->dev_private;
6823 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6825 /* WaDisableEarlyCull:vlv */
6826 I915_WRITE(_3D_CHICKEN3,
6827 _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
6829 /* WaDisableBackToBackFlipFix:vlv */
6830 I915_WRITE(IVB_CHICKEN3,
6831 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
6832 CHICKEN3_DGMG_DONE_FIX_DISABLE);
6834 /* WaPsdDispatchEnable:vlv */
6835 /* WaDisablePSDDualDispatchEnable:vlv */
6836 I915_WRITE(GEN7_HALF_SLICE_CHICKEN1,
6837 _MASKED_BIT_ENABLE(GEN7_MAX_PS_THREAD_DEP |
6838 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE));
6840 /* WaDisable_RenderCache_OperationalFlush:vlv */
6841 I915_WRITE(CACHE_MODE_0_GEN7, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6843 /* WaForceL3Serialization:vlv */
6844 I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
6845 ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
6847 /* WaDisableDopClockGating:vlv */
6848 I915_WRITE(GEN7_ROW_CHICKEN2,
6849 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
6851 /* This is required by WaCatErrorRejectionIssue:vlv */
6852 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
6853 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
6854 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
6856 gen7_setup_fixed_func_scheduler(dev_priv);
6859 * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
6860 * This implements the WaDisableRCZUnitClockGating:vlv workaround.
6862 I915_WRITE(GEN6_UCGCTL2,
6863 GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
6865 /* WaDisableL3Bank2xClockGate:vlv
6866 * Disabling L3 clock gating- MMIO 940c[25] = 1
6867 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */
6868 I915_WRITE(GEN7_UCGCTL4,
6869 I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
6871 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6874 * BSpec says this must be set, even though
6875 * WaDisable4x2SubspanOptimization isn't listed for VLV.
6877 I915_WRITE(CACHE_MODE_1,
6878 _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
6881 * WaIncreaseL3CreditsForVLVB0:vlv
6882 * This is the hardware default actually.
6884 I915_WRITE(GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
6887 * WaDisableVLVClockGating_VBIIssue:vlv
6888 * Disable clock gating on th GCFG unit to prevent a delay
6889 * in the reporting of vblank events.
6891 I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS);
6894 static void cherryview_init_clock_gating(struct drm_device *dev)
6896 struct drm_i915_private *dev_priv = dev->dev_private;
6898 I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE);
6900 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
6902 /* WaVSRefCountFullforceMissDisable:chv */
6903 /* WaDSRefCountFullforceMissDisable:chv */
6904 I915_WRITE(GEN7_FF_THREAD_MODE,
6905 I915_READ(GEN7_FF_THREAD_MODE) &
6906 ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME));
6908 /* WaDisableSemaphoreAndSyncFlipWait:chv */
6909 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
6910 _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
6912 /* WaDisableCSUnitClockGating:chv */
6913 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
6914 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
6916 /* WaDisableSDEUnitClockGating:chv */
6917 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6918 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6921 static void g4x_init_clock_gating(struct drm_device *dev)
6923 struct drm_i915_private *dev_priv = dev->dev_private;
6924 uint32_t dspclk_gate;
6926 I915_WRITE(RENCLK_GATE_D1, 0);
6927 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
6928 GS_UNIT_CLOCK_GATE_DISABLE |
6929 CL_UNIT_CLOCK_GATE_DISABLE);
6930 I915_WRITE(RAMCLK_GATE_D, 0);
6931 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
6932 OVRUNIT_CLOCK_GATE_DISABLE |
6933 OVCUNIT_CLOCK_GATE_DISABLE;
6935 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
6936 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
6938 /* WaDisableRenderCachePipelinedFlush */
6939 I915_WRITE(CACHE_MODE_0,
6940 _MASKED_BIT_ENABLE(CM0_PIPELINED_RENDER_FLUSH_DISABLE));
6942 /* WaDisable_RenderCache_OperationalFlush:g4x */
6943 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6945 g4x_disable_trickle_feed(dev);
6948 static void crestline_init_clock_gating(struct drm_device *dev)
6950 struct drm_i915_private *dev_priv = dev->dev_private;
6952 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
6953 I915_WRITE(RENCLK_GATE_D2, 0);
6954 I915_WRITE(DSPCLK_GATE_D, 0);
6955 I915_WRITE(RAMCLK_GATE_D, 0);
6956 I915_WRITE16(DEUC, 0);
6957 I915_WRITE(MI_ARB_STATE,
6958 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6960 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6961 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6964 static void broadwater_init_clock_gating(struct drm_device *dev)
6966 struct drm_i915_private *dev_priv = dev->dev_private;
6968 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
6969 I965_RCC_CLOCK_GATE_DISABLE |
6970 I965_RCPB_CLOCK_GATE_DISABLE |
6971 I965_ISC_CLOCK_GATE_DISABLE |
6972 I965_FBC_CLOCK_GATE_DISABLE);
6973 I915_WRITE(RENCLK_GATE_D2, 0);
6974 I915_WRITE(MI_ARB_STATE,
6975 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
6977 /* WaDisable_RenderCache_OperationalFlush:gen4 */
6978 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
6981 static void gen3_init_clock_gating(struct drm_device *dev)
6983 struct drm_i915_private *dev_priv = dev->dev_private;
6984 u32 dstate = I915_READ(D_STATE);
6986 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
6987 DSTATE_DOT_CLOCK_GATING;
6988 I915_WRITE(D_STATE, dstate);
6990 if (IS_PINEVIEW(dev))
6991 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
6993 /* IIR "flip pending" means done if this bit is set */
6994 I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
6996 /* interrupts should cause a wake up from C3 */
6997 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
6999 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
7000 I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
7002 I915_WRITE(MI_ARB_STATE,
7003 _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
7006 static void i85x_init_clock_gating(struct drm_device *dev)
7008 struct drm_i915_private *dev_priv = dev->dev_private;
7010 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7012 /* interrupts should cause a wake up from C3 */
7013 I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
7014 _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
7016 I915_WRITE(MEM_MODE,
7017 _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
7020 static void i830_init_clock_gating(struct drm_device *dev)
7022 struct drm_i915_private *dev_priv = dev->dev_private;
7024 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7026 I915_WRITE(MEM_MODE,
7027 _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
7028 _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
7031 void intel_init_clock_gating(struct drm_device *dev)
7033 struct drm_i915_private *dev_priv = dev->dev_private;
7035 dev_priv->display.init_clock_gating(dev);
7038 void intel_suspend_hw(struct drm_device *dev)
7040 if (HAS_PCH_LPT(dev))
7041 lpt_suspend_hw(dev);
7044 static void intel_init_fbc(struct drm_i915_private *dev_priv)
7046 if (!HAS_FBC(dev_priv)) {
7047 dev_priv->fbc.enabled = false;
7051 if (INTEL_INFO(dev_priv)->gen >= 7) {
7052 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7053 dev_priv->display.enable_fbc = gen7_enable_fbc;
7054 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7055 } else if (INTEL_INFO(dev_priv)->gen >= 5) {
7056 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7057 dev_priv->display.enable_fbc = ironlake_enable_fbc;
7058 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7059 } else if (IS_GM45(dev_priv)) {
7060 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7061 dev_priv->display.enable_fbc = g4x_enable_fbc;
7062 dev_priv->display.disable_fbc = g4x_disable_fbc;
7064 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7065 dev_priv->display.enable_fbc = i8xx_enable_fbc;
7066 dev_priv->display.disable_fbc = i8xx_disable_fbc;
7068 /* This value was pulled out of someone's hat */
7069 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
7072 dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
7075 /* Set up chip specific power management-related functions */
7076 void intel_init_pm(struct drm_device *dev)
7078 struct drm_i915_private *dev_priv = dev->dev_private;
7080 intel_init_fbc(dev_priv);
7083 if (IS_PINEVIEW(dev))
7084 i915_pineview_get_mem_freq(dev);
7085 else if (IS_GEN5(dev))
7086 i915_ironlake_get_mem_freq(dev);
7088 /* For FIFO watermark updates */
7089 if (INTEL_INFO(dev)->gen >= 9) {
7090 skl_setup_wm_latency(dev);
7092 dev_priv->display.init_clock_gating = gen9_init_clock_gating;
7093 dev_priv->display.update_wm = skl_update_wm;
7094 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
7095 } else if (HAS_PCH_SPLIT(dev)) {
7096 ilk_setup_wm_latency(dev);
7098 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
7099 dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
7100 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] &&
7101 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
7102 dev_priv->display.update_wm = ilk_update_wm;
7103 dev_priv->display.update_sprite_wm = ilk_update_sprite_wm;
7105 DRM_DEBUG_KMS("Failed to read display plane latency. "
7110 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
7111 else if (IS_GEN6(dev))
7112 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
7113 else if (IS_IVYBRIDGE(dev))
7114 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
7115 else if (IS_HASWELL(dev))
7116 dev_priv->display.init_clock_gating = haswell_init_clock_gating;
7117 else if (INTEL_INFO(dev)->gen == 8)
7118 dev_priv->display.init_clock_gating = broadwell_init_clock_gating;
7119 } else if (IS_CHERRYVIEW(dev)) {
7120 dev_priv->display.update_wm = cherryview_update_wm;
7121 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
7122 dev_priv->display.init_clock_gating =
7123 cherryview_init_clock_gating;
7124 } else if (IS_VALLEYVIEW(dev)) {
7125 dev_priv->display.update_wm = valleyview_update_wm;
7126 dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm;
7127 dev_priv->display.init_clock_gating =
7128 valleyview_init_clock_gating;
7129 } else if (IS_PINEVIEW(dev)) {
7130 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
7133 dev_priv->mem_freq)) {
7134 DRM_INFO("failed to find known CxSR latency "
7135 "(found ddr%s fsb freq %d, mem freq %d), "
7137 (dev_priv->is_ddr3 == 1) ? "3" : "2",
7138 dev_priv->fsb_freq, dev_priv->mem_freq);
7139 /* Disable CxSR and never update its watermark again */
7140 intel_set_memory_cxsr(dev_priv, false);
7141 dev_priv->display.update_wm = NULL;
7143 dev_priv->display.update_wm = pineview_update_wm;
7144 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7145 } else if (IS_G4X(dev)) {
7146 dev_priv->display.update_wm = g4x_update_wm;
7147 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
7148 } else if (IS_GEN4(dev)) {
7149 dev_priv->display.update_wm = i965_update_wm;
7150 if (IS_CRESTLINE(dev))
7151 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
7152 else if (IS_BROADWATER(dev))
7153 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
7154 } else if (IS_GEN3(dev)) {
7155 dev_priv->display.update_wm = i9xx_update_wm;
7156 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
7157 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7158 } else if (IS_GEN2(dev)) {
7159 if (INTEL_INFO(dev)->num_pipes == 1) {
7160 dev_priv->display.update_wm = i845_update_wm;
7161 dev_priv->display.get_fifo_size = i845_get_fifo_size;
7163 dev_priv->display.update_wm = i9xx_update_wm;
7164 dev_priv->display.get_fifo_size = i830_get_fifo_size;
7167 if (IS_I85X(dev) || IS_I865G(dev))
7168 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
7170 dev_priv->display.init_clock_gating = i830_init_clock_gating;
7172 DRM_ERROR("unexpected fall-through in intel_init_pm\n");
7176 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7178 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7180 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7181 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7185 I915_WRITE(GEN6_PCODE_DATA, *val);
7186 I915_WRITE(GEN6_PCODE_DATA1, 0);
7187 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7189 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7191 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7195 *val = I915_READ(GEN6_PCODE_DATA);
7196 I915_WRITE(GEN6_PCODE_DATA, 0);
7201 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
7203 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7205 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7206 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7210 I915_WRITE(GEN6_PCODE_DATA, val);
7211 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7213 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
7215 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7219 I915_WRITE(GEN6_PCODE_DATA, 0);
7224 static int vlv_gpu_freq_div(unsigned int czclk_freq)
7226 switch (czclk_freq) {
7241 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7243 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
7245 div = vlv_gpu_freq_div(czclk_freq);
7249 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
7252 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7254 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
7256 mul = vlv_gpu_freq_div(czclk_freq);
7260 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
7263 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7265 int div, czclk_freq = dev_priv->rps.cz_freq;
7267 div = vlv_gpu_freq_div(czclk_freq) / 2;
7271 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
7274 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7276 int mul, czclk_freq = dev_priv->rps.cz_freq;
7278 mul = vlv_gpu_freq_div(czclk_freq) / 2;
7282 /* CHV needs even values */
7283 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
7286 int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7290 if (IS_CHERRYVIEW(dev_priv->dev))
7291 ret = chv_gpu_freq(dev_priv, val);
7292 else if (IS_VALLEYVIEW(dev_priv->dev))
7293 ret = byt_gpu_freq(dev_priv, val);
7298 int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7302 if (IS_CHERRYVIEW(dev_priv->dev))
7303 ret = chv_freq_opcode(dev_priv, val);
7304 else if (IS_VALLEYVIEW(dev_priv->dev))
7305 ret = byt_freq_opcode(dev_priv, val);
7310 void intel_pm_setup(struct drm_device *dev)
7312 struct drm_i915_private *dev_priv = dev->dev_private;
7314 mutex_init(&dev_priv->rps.hw_lock);
7316 INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
7317 intel_gen6_powersave_work);
7319 dev_priv->pm.suspended = false;