drm/i915: Extract PCU communication
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / intel_pm.c
1 /*
2  * Copyright © 2012 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eugeni Dodonov <eugeni.dodonov@intel.com>
25  *
26  */
27
28 #include <linux/cpufreq.h>
29 #include "i915_drv.h"
30 #include "intel_drv.h"
31 #include "../../../platform/x86/intel_ips.h"
32 #include <linux/module.h>
33
34 #define FORCEWAKE_ACK_TIMEOUT_MS 2
35
36 /* FBC, or Frame Buffer Compression, is a technique employed to compress the
37  * framebuffer contents in-memory, aiming at reducing the required bandwidth
38  * during in-memory transfers and, therefore, reduce the power packet.
39  *
40  * The benefits of FBC are mostly visible with solid backgrounds and
41  * variation-less patterns.
42  *
43  * FBC-related functionality can be enabled by the means of the
44  * i915.i915_enable_fbc parameter
45  */
46
47 static void i8xx_disable_fbc(struct drm_device *dev)
48 {
49         struct drm_i915_private *dev_priv = dev->dev_private;
50         u32 fbc_ctl;
51
52         /* Disable compression */
53         fbc_ctl = I915_READ(FBC_CONTROL);
54         if ((fbc_ctl & FBC_CTL_EN) == 0)
55                 return;
56
57         fbc_ctl &= ~FBC_CTL_EN;
58         I915_WRITE(FBC_CONTROL, fbc_ctl);
59
60         /* Wait for compressing bit to clear */
61         if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
62                 DRM_DEBUG_KMS("FBC idle timed out\n");
63                 return;
64         }
65
66         DRM_DEBUG_KMS("disabled FBC\n");
67 }
68
69 static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
70 {
71         struct drm_device *dev = crtc->dev;
72         struct drm_i915_private *dev_priv = dev->dev_private;
73         struct drm_framebuffer *fb = crtc->fb;
74         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
75         struct drm_i915_gem_object *obj = intel_fb->obj;
76         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
77         int cfb_pitch;
78         int plane, i;
79         u32 fbc_ctl, fbc_ctl2;
80
81         cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
82         if (fb->pitches[0] < cfb_pitch)
83                 cfb_pitch = fb->pitches[0];
84
85         /* FBC_CTL wants 64B units */
86         cfb_pitch = (cfb_pitch / 64) - 1;
87         plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
88
89         /* Clear old tags */
90         for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
91                 I915_WRITE(FBC_TAG + (i * 4), 0);
92
93         /* Set it up... */
94         fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
95         fbc_ctl2 |= plane;
96         I915_WRITE(FBC_CONTROL2, fbc_ctl2);
97         I915_WRITE(FBC_FENCE_OFF, crtc->y);
98
99         /* enable it... */
100         fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
101         if (IS_I945GM(dev))
102                 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
103         fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
104         fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
105         fbc_ctl |= obj->fence_reg;
106         I915_WRITE(FBC_CONTROL, fbc_ctl);
107
108         DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
109                       cfb_pitch, crtc->y, intel_crtc->plane);
110 }
111
112 static bool i8xx_fbc_enabled(struct drm_device *dev)
113 {
114         struct drm_i915_private *dev_priv = dev->dev_private;
115
116         return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
117 }
118
119 static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
120 {
121         struct drm_device *dev = crtc->dev;
122         struct drm_i915_private *dev_priv = dev->dev_private;
123         struct drm_framebuffer *fb = crtc->fb;
124         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
125         struct drm_i915_gem_object *obj = intel_fb->obj;
126         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
127         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
128         unsigned long stall_watermark = 200;
129         u32 dpfc_ctl;
130
131         dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
132         dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
133         I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
134
135         I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
136                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
137                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
138         I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
139
140         /* enable it... */
141         I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
142
143         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
144 }
145
146 static void g4x_disable_fbc(struct drm_device *dev)
147 {
148         struct drm_i915_private *dev_priv = dev->dev_private;
149         u32 dpfc_ctl;
150
151         /* Disable compression */
152         dpfc_ctl = I915_READ(DPFC_CONTROL);
153         if (dpfc_ctl & DPFC_CTL_EN) {
154                 dpfc_ctl &= ~DPFC_CTL_EN;
155                 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
156
157                 DRM_DEBUG_KMS("disabled FBC\n");
158         }
159 }
160
161 static bool g4x_fbc_enabled(struct drm_device *dev)
162 {
163         struct drm_i915_private *dev_priv = dev->dev_private;
164
165         return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
166 }
167
168 static void sandybridge_blit_fbc_update(struct drm_device *dev)
169 {
170         struct drm_i915_private *dev_priv = dev->dev_private;
171         u32 blt_ecoskpd;
172
173         /* Make sure blitter notifies FBC of writes */
174         gen6_gt_force_wake_get(dev_priv);
175         blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
176         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
177                 GEN6_BLITTER_LOCK_SHIFT;
178         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
179         blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
180         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
181         blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
182                          GEN6_BLITTER_LOCK_SHIFT);
183         I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
184         POSTING_READ(GEN6_BLITTER_ECOSKPD);
185         gen6_gt_force_wake_put(dev_priv);
186 }
187
188 static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
189 {
190         struct drm_device *dev = crtc->dev;
191         struct drm_i915_private *dev_priv = dev->dev_private;
192         struct drm_framebuffer *fb = crtc->fb;
193         struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
194         struct drm_i915_gem_object *obj = intel_fb->obj;
195         struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
196         int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
197         unsigned long stall_watermark = 200;
198         u32 dpfc_ctl;
199
200         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
201         dpfc_ctl &= DPFC_RESERVED;
202         dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
203         /* Set persistent mode for front-buffer rendering, ala X. */
204         dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
205         dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
206         I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
207
208         I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
209                    (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
210                    (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
211         I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
212         I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
213         /* enable it... */
214         I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
215
216         if (IS_GEN6(dev)) {
217                 I915_WRITE(SNB_DPFC_CTL_SA,
218                            SNB_CPU_FENCE_ENABLE | obj->fence_reg);
219                 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
220                 sandybridge_blit_fbc_update(dev);
221         }
222
223         DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
224 }
225
226 static void ironlake_disable_fbc(struct drm_device *dev)
227 {
228         struct drm_i915_private *dev_priv = dev->dev_private;
229         u32 dpfc_ctl;
230
231         /* Disable compression */
232         dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
233         if (dpfc_ctl & DPFC_CTL_EN) {
234                 dpfc_ctl &= ~DPFC_CTL_EN;
235                 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
236
237                 DRM_DEBUG_KMS("disabled FBC\n");
238         }
239 }
240
241 static bool ironlake_fbc_enabled(struct drm_device *dev)
242 {
243         struct drm_i915_private *dev_priv = dev->dev_private;
244
245         return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
246 }
247
248 bool intel_fbc_enabled(struct drm_device *dev)
249 {
250         struct drm_i915_private *dev_priv = dev->dev_private;
251
252         if (!dev_priv->display.fbc_enabled)
253                 return false;
254
255         return dev_priv->display.fbc_enabled(dev);
256 }
257
258 static void intel_fbc_work_fn(struct work_struct *__work)
259 {
260         struct intel_fbc_work *work =
261                 container_of(to_delayed_work(__work),
262                              struct intel_fbc_work, work);
263         struct drm_device *dev = work->crtc->dev;
264         struct drm_i915_private *dev_priv = dev->dev_private;
265
266         mutex_lock(&dev->struct_mutex);
267         if (work == dev_priv->fbc_work) {
268                 /* Double check that we haven't switched fb without cancelling
269                  * the prior work.
270                  */
271                 if (work->crtc->fb == work->fb) {
272                         dev_priv->display.enable_fbc(work->crtc,
273                                                      work->interval);
274
275                         dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
276                         dev_priv->cfb_fb = work->crtc->fb->base.id;
277                         dev_priv->cfb_y = work->crtc->y;
278                 }
279
280                 dev_priv->fbc_work = NULL;
281         }
282         mutex_unlock(&dev->struct_mutex);
283
284         kfree(work);
285 }
286
287 static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
288 {
289         if (dev_priv->fbc_work == NULL)
290                 return;
291
292         DRM_DEBUG_KMS("cancelling pending FBC enable\n");
293
294         /* Synchronisation is provided by struct_mutex and checking of
295          * dev_priv->fbc_work, so we can perform the cancellation
296          * entirely asynchronously.
297          */
298         if (cancel_delayed_work(&dev_priv->fbc_work->work))
299                 /* tasklet was killed before being run, clean up */
300                 kfree(dev_priv->fbc_work);
301
302         /* Mark the work as no longer wanted so that if it does
303          * wake-up (because the work was already running and waiting
304          * for our mutex), it will discover that is no longer
305          * necessary to run.
306          */
307         dev_priv->fbc_work = NULL;
308 }
309
310 void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
311 {
312         struct intel_fbc_work *work;
313         struct drm_device *dev = crtc->dev;
314         struct drm_i915_private *dev_priv = dev->dev_private;
315
316         if (!dev_priv->display.enable_fbc)
317                 return;
318
319         intel_cancel_fbc_work(dev_priv);
320
321         work = kzalloc(sizeof *work, GFP_KERNEL);
322         if (work == NULL) {
323                 dev_priv->display.enable_fbc(crtc, interval);
324                 return;
325         }
326
327         work->crtc = crtc;
328         work->fb = crtc->fb;
329         work->interval = interval;
330         INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
331
332         dev_priv->fbc_work = work;
333
334         DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
335
336         /* Delay the actual enabling to let pageflipping cease and the
337          * display to settle before starting the compression. Note that
338          * this delay also serves a second purpose: it allows for a
339          * vblank to pass after disabling the FBC before we attempt
340          * to modify the control registers.
341          *
342          * A more complicated solution would involve tracking vblanks
343          * following the termination of the page-flipping sequence
344          * and indeed performing the enable as a co-routine and not
345          * waiting synchronously upon the vblank.
346          */
347         schedule_delayed_work(&work->work, msecs_to_jiffies(50));
348 }
349
350 void intel_disable_fbc(struct drm_device *dev)
351 {
352         struct drm_i915_private *dev_priv = dev->dev_private;
353
354         intel_cancel_fbc_work(dev_priv);
355
356         if (!dev_priv->display.disable_fbc)
357                 return;
358
359         dev_priv->display.disable_fbc(dev);
360         dev_priv->cfb_plane = -1;
361 }
362
363 /**
364  * intel_update_fbc - enable/disable FBC as needed
365  * @dev: the drm_device
366  *
367  * Set up the framebuffer compression hardware at mode set time.  We
368  * enable it if possible:
369  *   - plane A only (on pre-965)
370  *   - no pixel mulitply/line duplication
371  *   - no alpha buffer discard
372  *   - no dual wide
373  *   - framebuffer <= 2048 in width, 1536 in height
374  *
375  * We can't assume that any compression will take place (worst case),
376  * so the compressed buffer has to be the same size as the uncompressed
377  * one.  It also must reside (along with the line length buffer) in
378  * stolen memory.
379  *
380  * We need to enable/disable FBC on a global basis.
381  */
382 void intel_update_fbc(struct drm_device *dev)
383 {
384         struct drm_i915_private *dev_priv = dev->dev_private;
385         struct drm_crtc *crtc = NULL, *tmp_crtc;
386         struct intel_crtc *intel_crtc;
387         struct drm_framebuffer *fb;
388         struct intel_framebuffer *intel_fb;
389         struct drm_i915_gem_object *obj;
390         int enable_fbc;
391
392         if (!i915_powersave)
393                 return;
394
395         if (!I915_HAS_FBC(dev))
396                 return;
397
398         /*
399          * If FBC is already on, we just have to verify that we can
400          * keep it that way...
401          * Need to disable if:
402          *   - more than one pipe is active
403          *   - changing FBC params (stride, fence, mode)
404          *   - new fb is too large to fit in compressed buffer
405          *   - going to an unsupported config (interlace, pixel multiply, etc.)
406          */
407         list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
408                 if (tmp_crtc->enabled &&
409                     !to_intel_crtc(tmp_crtc)->primary_disabled &&
410                     tmp_crtc->fb) {
411                         if (crtc) {
412                                 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
413                                 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
414                                 goto out_disable;
415                         }
416                         crtc = tmp_crtc;
417                 }
418         }
419
420         if (!crtc || crtc->fb == NULL) {
421                 DRM_DEBUG_KMS("no output, disabling\n");
422                 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
423                 goto out_disable;
424         }
425
426         intel_crtc = to_intel_crtc(crtc);
427         fb = crtc->fb;
428         intel_fb = to_intel_framebuffer(fb);
429         obj = intel_fb->obj;
430
431         enable_fbc = i915_enable_fbc;
432         if (enable_fbc < 0) {
433                 DRM_DEBUG_KMS("fbc set to per-chip default\n");
434                 enable_fbc = 1;
435                 if (INTEL_INFO(dev)->gen <= 6)
436                         enable_fbc = 0;
437         }
438         if (!enable_fbc) {
439                 DRM_DEBUG_KMS("fbc disabled per module param\n");
440                 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
441                 goto out_disable;
442         }
443         if (intel_fb->obj->base.size > dev_priv->cfb_size) {
444                 DRM_DEBUG_KMS("framebuffer too large, disabling "
445                               "compression\n");
446                 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
447                 goto out_disable;
448         }
449         if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
450             (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
451                 DRM_DEBUG_KMS("mode incompatible with compression, "
452                               "disabling\n");
453                 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
454                 goto out_disable;
455         }
456         if ((crtc->mode.hdisplay > 2048) ||
457             (crtc->mode.vdisplay > 1536)) {
458                 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
459                 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
460                 goto out_disable;
461         }
462         if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
463                 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
464                 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
465                 goto out_disable;
466         }
467
468         /* The use of a CPU fence is mandatory in order to detect writes
469          * by the CPU to the scanout and trigger updates to the FBC.
470          */
471         if (obj->tiling_mode != I915_TILING_X ||
472             obj->fence_reg == I915_FENCE_REG_NONE) {
473                 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
474                 dev_priv->no_fbc_reason = FBC_NOT_TILED;
475                 goto out_disable;
476         }
477
478         /* If the kernel debugger is active, always disable compression */
479         if (in_dbg_master())
480                 goto out_disable;
481
482         /* If the scanout has not changed, don't modify the FBC settings.
483          * Note that we make the fundamental assumption that the fb->obj
484          * cannot be unpinned (and have its GTT offset and fence revoked)
485          * without first being decoupled from the scanout and FBC disabled.
486          */
487         if (dev_priv->cfb_plane == intel_crtc->plane &&
488             dev_priv->cfb_fb == fb->base.id &&
489             dev_priv->cfb_y == crtc->y)
490                 return;
491
492         if (intel_fbc_enabled(dev)) {
493                 /* We update FBC along two paths, after changing fb/crtc
494                  * configuration (modeswitching) and after page-flipping
495                  * finishes. For the latter, we know that not only did
496                  * we disable the FBC at the start of the page-flip
497                  * sequence, but also more than one vblank has passed.
498                  *
499                  * For the former case of modeswitching, it is possible
500                  * to switch between two FBC valid configurations
501                  * instantaneously so we do need to disable the FBC
502                  * before we can modify its control registers. We also
503                  * have to wait for the next vblank for that to take
504                  * effect. However, since we delay enabling FBC we can
505                  * assume that a vblank has passed since disabling and
506                  * that we can safely alter the registers in the deferred
507                  * callback.
508                  *
509                  * In the scenario that we go from a valid to invalid
510                  * and then back to valid FBC configuration we have
511                  * no strict enforcement that a vblank occurred since
512                  * disabling the FBC. However, along all current pipe
513                  * disabling paths we do need to wait for a vblank at
514                  * some point. And we wait before enabling FBC anyway.
515                  */
516                 DRM_DEBUG_KMS("disabling active FBC for update\n");
517                 intel_disable_fbc(dev);
518         }
519
520         intel_enable_fbc(crtc, 500);
521         return;
522
523 out_disable:
524         /* Multiple disables should be harmless */
525         if (intel_fbc_enabled(dev)) {
526                 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
527                 intel_disable_fbc(dev);
528         }
529 }
530
531 static void i915_pineview_get_mem_freq(struct drm_device *dev)
532 {
533         drm_i915_private_t *dev_priv = dev->dev_private;
534         u32 tmp;
535
536         tmp = I915_READ(CLKCFG);
537
538         switch (tmp & CLKCFG_FSB_MASK) {
539         case CLKCFG_FSB_533:
540                 dev_priv->fsb_freq = 533; /* 133*4 */
541                 break;
542         case CLKCFG_FSB_800:
543                 dev_priv->fsb_freq = 800; /* 200*4 */
544                 break;
545         case CLKCFG_FSB_667:
546                 dev_priv->fsb_freq =  667; /* 167*4 */
547                 break;
548         case CLKCFG_FSB_400:
549                 dev_priv->fsb_freq = 400; /* 100*4 */
550                 break;
551         }
552
553         switch (tmp & CLKCFG_MEM_MASK) {
554         case CLKCFG_MEM_533:
555                 dev_priv->mem_freq = 533;
556                 break;
557         case CLKCFG_MEM_667:
558                 dev_priv->mem_freq = 667;
559                 break;
560         case CLKCFG_MEM_800:
561                 dev_priv->mem_freq = 800;
562                 break;
563         }
564
565         /* detect pineview DDR3 setting */
566         tmp = I915_READ(CSHRDDR3CTL);
567         dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0;
568 }
569
570 static void i915_ironlake_get_mem_freq(struct drm_device *dev)
571 {
572         drm_i915_private_t *dev_priv = dev->dev_private;
573         u16 ddrpll, csipll;
574
575         ddrpll = I915_READ16(DDRMPLL1);
576         csipll = I915_READ16(CSIPLL0);
577
578         switch (ddrpll & 0xff) {
579         case 0xc:
580                 dev_priv->mem_freq = 800;
581                 break;
582         case 0x10:
583                 dev_priv->mem_freq = 1066;
584                 break;
585         case 0x14:
586                 dev_priv->mem_freq = 1333;
587                 break;
588         case 0x18:
589                 dev_priv->mem_freq = 1600;
590                 break;
591         default:
592                 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
593                                  ddrpll & 0xff);
594                 dev_priv->mem_freq = 0;
595                 break;
596         }
597
598         dev_priv->ips.r_t = dev_priv->mem_freq;
599
600         switch (csipll & 0x3ff) {
601         case 0x00c:
602                 dev_priv->fsb_freq = 3200;
603                 break;
604         case 0x00e:
605                 dev_priv->fsb_freq = 3733;
606                 break;
607         case 0x010:
608                 dev_priv->fsb_freq = 4266;
609                 break;
610         case 0x012:
611                 dev_priv->fsb_freq = 4800;
612                 break;
613         case 0x014:
614                 dev_priv->fsb_freq = 5333;
615                 break;
616         case 0x016:
617                 dev_priv->fsb_freq = 5866;
618                 break;
619         case 0x018:
620                 dev_priv->fsb_freq = 6400;
621                 break;
622         default:
623                 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
624                                  csipll & 0x3ff);
625                 dev_priv->fsb_freq = 0;
626                 break;
627         }
628
629         if (dev_priv->fsb_freq == 3200) {
630                 dev_priv->ips.c_m = 0;
631         } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) {
632                 dev_priv->ips.c_m = 1;
633         } else {
634                 dev_priv->ips.c_m = 2;
635         }
636 }
637
638 static const struct cxsr_latency cxsr_latency_table[] = {
639         {1, 0, 800, 400, 3382, 33382, 3983, 33983},    /* DDR2-400 SC */
640         {1, 0, 800, 667, 3354, 33354, 3807, 33807},    /* DDR2-667 SC */
641         {1, 0, 800, 800, 3347, 33347, 3763, 33763},    /* DDR2-800 SC */
642         {1, 1, 800, 667, 6420, 36420, 6873, 36873},    /* DDR3-667 SC */
643         {1, 1, 800, 800, 5902, 35902, 6318, 36318},    /* DDR3-800 SC */
644
645         {1, 0, 667, 400, 3400, 33400, 4021, 34021},    /* DDR2-400 SC */
646         {1, 0, 667, 667, 3372, 33372, 3845, 33845},    /* DDR2-667 SC */
647         {1, 0, 667, 800, 3386, 33386, 3822, 33822},    /* DDR2-800 SC */
648         {1, 1, 667, 667, 6438, 36438, 6911, 36911},    /* DDR3-667 SC */
649         {1, 1, 667, 800, 5941, 35941, 6377, 36377},    /* DDR3-800 SC */
650
651         {1, 0, 400, 400, 3472, 33472, 4173, 34173},    /* DDR2-400 SC */
652         {1, 0, 400, 667, 3443, 33443, 3996, 33996},    /* DDR2-667 SC */
653         {1, 0, 400, 800, 3430, 33430, 3946, 33946},    /* DDR2-800 SC */
654         {1, 1, 400, 667, 6509, 36509, 7062, 37062},    /* DDR3-667 SC */
655         {1, 1, 400, 800, 5985, 35985, 6501, 36501},    /* DDR3-800 SC */
656
657         {0, 0, 800, 400, 3438, 33438, 4065, 34065},    /* DDR2-400 SC */
658         {0, 0, 800, 667, 3410, 33410, 3889, 33889},    /* DDR2-667 SC */
659         {0, 0, 800, 800, 3403, 33403, 3845, 33845},    /* DDR2-800 SC */
660         {0, 1, 800, 667, 6476, 36476, 6955, 36955},    /* DDR3-667 SC */
661         {0, 1, 800, 800, 5958, 35958, 6400, 36400},    /* DDR3-800 SC */
662
663         {0, 0, 667, 400, 3456, 33456, 4103, 34106},    /* DDR2-400 SC */
664         {0, 0, 667, 667, 3428, 33428, 3927, 33927},    /* DDR2-667 SC */
665         {0, 0, 667, 800, 3443, 33443, 3905, 33905},    /* DDR2-800 SC */
666         {0, 1, 667, 667, 6494, 36494, 6993, 36993},    /* DDR3-667 SC */
667         {0, 1, 667, 800, 5998, 35998, 6460, 36460},    /* DDR3-800 SC */
668
669         {0, 0, 400, 400, 3528, 33528, 4255, 34255},    /* DDR2-400 SC */
670         {0, 0, 400, 667, 3500, 33500, 4079, 34079},    /* DDR2-667 SC */
671         {0, 0, 400, 800, 3487, 33487, 4029, 34029},    /* DDR2-800 SC */
672         {0, 1, 400, 667, 6566, 36566, 7145, 37145},    /* DDR3-667 SC */
673         {0, 1, 400, 800, 6042, 36042, 6584, 36584},    /* DDR3-800 SC */
674 };
675
676 static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
677                                                          int is_ddr3,
678                                                          int fsb,
679                                                          int mem)
680 {
681         const struct cxsr_latency *latency;
682         int i;
683
684         if (fsb == 0 || mem == 0)
685                 return NULL;
686
687         for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
688                 latency = &cxsr_latency_table[i];
689                 if (is_desktop == latency->is_desktop &&
690                     is_ddr3 == latency->is_ddr3 &&
691                     fsb == latency->fsb_freq && mem == latency->mem_freq)
692                         return latency;
693         }
694
695         DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
696
697         return NULL;
698 }
699
700 static void pineview_disable_cxsr(struct drm_device *dev)
701 {
702         struct drm_i915_private *dev_priv = dev->dev_private;
703
704         /* deactivate cxsr */
705         I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
706 }
707
708 /*
709  * Latency for FIFO fetches is dependent on several factors:
710  *   - memory configuration (speed, channels)
711  *   - chipset
712  *   - current MCH state
713  * It can be fairly high in some situations, so here we assume a fairly
714  * pessimal value.  It's a tradeoff between extra memory fetches (if we
715  * set this value too high, the FIFO will fetch frequently to stay full)
716  * and power consumption (set it too low to save power and we might see
717  * FIFO underruns and display "flicker").
718  *
719  * A value of 5us seems to be a good balance; safe for very low end
720  * platforms but not overly aggressive on lower latency configs.
721  */
722 static const int latency_ns = 5000;
723
724 static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
725 {
726         struct drm_i915_private *dev_priv = dev->dev_private;
727         uint32_t dsparb = I915_READ(DSPARB);
728         int size;
729
730         size = dsparb & 0x7f;
731         if (plane)
732                 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
733
734         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
735                       plane ? "B" : "A", size);
736
737         return size;
738 }
739
740 static int i85x_get_fifo_size(struct drm_device *dev, int plane)
741 {
742         struct drm_i915_private *dev_priv = dev->dev_private;
743         uint32_t dsparb = I915_READ(DSPARB);
744         int size;
745
746         size = dsparb & 0x1ff;
747         if (plane)
748                 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
749         size >>= 1; /* Convert to cachelines */
750
751         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
752                       plane ? "B" : "A", size);
753
754         return size;
755 }
756
757 static int i845_get_fifo_size(struct drm_device *dev, int plane)
758 {
759         struct drm_i915_private *dev_priv = dev->dev_private;
760         uint32_t dsparb = I915_READ(DSPARB);
761         int size;
762
763         size = dsparb & 0x7f;
764         size >>= 2; /* Convert to cachelines */
765
766         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
767                       plane ? "B" : "A",
768                       size);
769
770         return size;
771 }
772
773 static int i830_get_fifo_size(struct drm_device *dev, int plane)
774 {
775         struct drm_i915_private *dev_priv = dev->dev_private;
776         uint32_t dsparb = I915_READ(DSPARB);
777         int size;
778
779         size = dsparb & 0x7f;
780         size >>= 1; /* Convert to cachelines */
781
782         DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
783                       plane ? "B" : "A", size);
784
785         return size;
786 }
787
788 /* Pineview has different values for various configs */
789 static const struct intel_watermark_params pineview_display_wm = {
790         PINEVIEW_DISPLAY_FIFO,
791         PINEVIEW_MAX_WM,
792         PINEVIEW_DFT_WM,
793         PINEVIEW_GUARD_WM,
794         PINEVIEW_FIFO_LINE_SIZE
795 };
796 static const struct intel_watermark_params pineview_display_hplloff_wm = {
797         PINEVIEW_DISPLAY_FIFO,
798         PINEVIEW_MAX_WM,
799         PINEVIEW_DFT_HPLLOFF_WM,
800         PINEVIEW_GUARD_WM,
801         PINEVIEW_FIFO_LINE_SIZE
802 };
803 static const struct intel_watermark_params pineview_cursor_wm = {
804         PINEVIEW_CURSOR_FIFO,
805         PINEVIEW_CURSOR_MAX_WM,
806         PINEVIEW_CURSOR_DFT_WM,
807         PINEVIEW_CURSOR_GUARD_WM,
808         PINEVIEW_FIFO_LINE_SIZE,
809 };
810 static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
811         PINEVIEW_CURSOR_FIFO,
812         PINEVIEW_CURSOR_MAX_WM,
813         PINEVIEW_CURSOR_DFT_WM,
814         PINEVIEW_CURSOR_GUARD_WM,
815         PINEVIEW_FIFO_LINE_SIZE
816 };
817 static const struct intel_watermark_params g4x_wm_info = {
818         G4X_FIFO_SIZE,
819         G4X_MAX_WM,
820         G4X_MAX_WM,
821         2,
822         G4X_FIFO_LINE_SIZE,
823 };
824 static const struct intel_watermark_params g4x_cursor_wm_info = {
825         I965_CURSOR_FIFO,
826         I965_CURSOR_MAX_WM,
827         I965_CURSOR_DFT_WM,
828         2,
829         G4X_FIFO_LINE_SIZE,
830 };
831 static const struct intel_watermark_params valleyview_wm_info = {
832         VALLEYVIEW_FIFO_SIZE,
833         VALLEYVIEW_MAX_WM,
834         VALLEYVIEW_MAX_WM,
835         2,
836         G4X_FIFO_LINE_SIZE,
837 };
838 static const struct intel_watermark_params valleyview_cursor_wm_info = {
839         I965_CURSOR_FIFO,
840         VALLEYVIEW_CURSOR_MAX_WM,
841         I965_CURSOR_DFT_WM,
842         2,
843         G4X_FIFO_LINE_SIZE,
844 };
845 static const struct intel_watermark_params i965_cursor_wm_info = {
846         I965_CURSOR_FIFO,
847         I965_CURSOR_MAX_WM,
848         I965_CURSOR_DFT_WM,
849         2,
850         I915_FIFO_LINE_SIZE,
851 };
852 static const struct intel_watermark_params i945_wm_info = {
853         I945_FIFO_SIZE,
854         I915_MAX_WM,
855         1,
856         2,
857         I915_FIFO_LINE_SIZE
858 };
859 static const struct intel_watermark_params i915_wm_info = {
860         I915_FIFO_SIZE,
861         I915_MAX_WM,
862         1,
863         2,
864         I915_FIFO_LINE_SIZE
865 };
866 static const struct intel_watermark_params i855_wm_info = {
867         I855GM_FIFO_SIZE,
868         I915_MAX_WM,
869         1,
870         2,
871         I830_FIFO_LINE_SIZE
872 };
873 static const struct intel_watermark_params i830_wm_info = {
874         I830_FIFO_SIZE,
875         I915_MAX_WM,
876         1,
877         2,
878         I830_FIFO_LINE_SIZE
879 };
880
881 static const struct intel_watermark_params ironlake_display_wm_info = {
882         ILK_DISPLAY_FIFO,
883         ILK_DISPLAY_MAXWM,
884         ILK_DISPLAY_DFTWM,
885         2,
886         ILK_FIFO_LINE_SIZE
887 };
888 static const struct intel_watermark_params ironlake_cursor_wm_info = {
889         ILK_CURSOR_FIFO,
890         ILK_CURSOR_MAXWM,
891         ILK_CURSOR_DFTWM,
892         2,
893         ILK_FIFO_LINE_SIZE
894 };
895 static const struct intel_watermark_params ironlake_display_srwm_info = {
896         ILK_DISPLAY_SR_FIFO,
897         ILK_DISPLAY_MAX_SRWM,
898         ILK_DISPLAY_DFT_SRWM,
899         2,
900         ILK_FIFO_LINE_SIZE
901 };
902 static const struct intel_watermark_params ironlake_cursor_srwm_info = {
903         ILK_CURSOR_SR_FIFO,
904         ILK_CURSOR_MAX_SRWM,
905         ILK_CURSOR_DFT_SRWM,
906         2,
907         ILK_FIFO_LINE_SIZE
908 };
909
910 static const struct intel_watermark_params sandybridge_display_wm_info = {
911         SNB_DISPLAY_FIFO,
912         SNB_DISPLAY_MAXWM,
913         SNB_DISPLAY_DFTWM,
914         2,
915         SNB_FIFO_LINE_SIZE
916 };
917 static const struct intel_watermark_params sandybridge_cursor_wm_info = {
918         SNB_CURSOR_FIFO,
919         SNB_CURSOR_MAXWM,
920         SNB_CURSOR_DFTWM,
921         2,
922         SNB_FIFO_LINE_SIZE
923 };
924 static const struct intel_watermark_params sandybridge_display_srwm_info = {
925         SNB_DISPLAY_SR_FIFO,
926         SNB_DISPLAY_MAX_SRWM,
927         SNB_DISPLAY_DFT_SRWM,
928         2,
929         SNB_FIFO_LINE_SIZE
930 };
931 static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
932         SNB_CURSOR_SR_FIFO,
933         SNB_CURSOR_MAX_SRWM,
934         SNB_CURSOR_DFT_SRWM,
935         2,
936         SNB_FIFO_LINE_SIZE
937 };
938
939
940 /**
941  * intel_calculate_wm - calculate watermark level
942  * @clock_in_khz: pixel clock
943  * @wm: chip FIFO params
944  * @pixel_size: display pixel size
945  * @latency_ns: memory latency for the platform
946  *
947  * Calculate the watermark level (the level at which the display plane will
948  * start fetching from memory again).  Each chip has a different display
949  * FIFO size and allocation, so the caller needs to figure that out and pass
950  * in the correct intel_watermark_params structure.
951  *
952  * As the pixel clock runs, the FIFO will be drained at a rate that depends
953  * on the pixel size.  When it reaches the watermark level, it'll start
954  * fetching FIFO line sized based chunks from memory until the FIFO fills
955  * past the watermark point.  If the FIFO drains completely, a FIFO underrun
956  * will occur, and a display engine hang could result.
957  */
958 static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
959                                         const struct intel_watermark_params *wm,
960                                         int fifo_size,
961                                         int pixel_size,
962                                         unsigned long latency_ns)
963 {
964         long entries_required, wm_size;
965
966         /*
967          * Note: we need to make sure we don't overflow for various clock &
968          * latency values.
969          * clocks go from a few thousand to several hundred thousand.
970          * latency is usually a few thousand
971          */
972         entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
973                 1000;
974         entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
975
976         DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
977
978         wm_size = fifo_size - (entries_required + wm->guard_size);
979
980         DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
981
982         /* Don't promote wm_size to unsigned... */
983         if (wm_size > (long)wm->max_wm)
984                 wm_size = wm->max_wm;
985         if (wm_size <= 0)
986                 wm_size = wm->default_wm;
987         return wm_size;
988 }
989
990 static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
991 {
992         struct drm_crtc *crtc, *enabled = NULL;
993
994         list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
995                 if (crtc->enabled && crtc->fb) {
996                         if (enabled)
997                                 return NULL;
998                         enabled = crtc;
999                 }
1000         }
1001
1002         return enabled;
1003 }
1004
1005 static void pineview_update_wm(struct drm_device *dev)
1006 {
1007         struct drm_i915_private *dev_priv = dev->dev_private;
1008         struct drm_crtc *crtc;
1009         const struct cxsr_latency *latency;
1010         u32 reg;
1011         unsigned long wm;
1012
1013         latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
1014                                          dev_priv->fsb_freq, dev_priv->mem_freq);
1015         if (!latency) {
1016                 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
1017                 pineview_disable_cxsr(dev);
1018                 return;
1019         }
1020
1021         crtc = single_enabled_crtc(dev);
1022         if (crtc) {
1023                 int clock = crtc->mode.clock;
1024                 int pixel_size = crtc->fb->bits_per_pixel / 8;
1025
1026                 /* Display SR */
1027                 wm = intel_calculate_wm(clock, &pineview_display_wm,
1028                                         pineview_display_wm.fifo_size,
1029                                         pixel_size, latency->display_sr);
1030                 reg = I915_READ(DSPFW1);
1031                 reg &= ~DSPFW_SR_MASK;
1032                 reg |= wm << DSPFW_SR_SHIFT;
1033                 I915_WRITE(DSPFW1, reg);
1034                 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
1035
1036                 /* cursor SR */
1037                 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
1038                                         pineview_display_wm.fifo_size,
1039                                         pixel_size, latency->cursor_sr);
1040                 reg = I915_READ(DSPFW3);
1041                 reg &= ~DSPFW_CURSOR_SR_MASK;
1042                 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
1043                 I915_WRITE(DSPFW3, reg);
1044
1045                 /* Display HPLL off SR */
1046                 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
1047                                         pineview_display_hplloff_wm.fifo_size,
1048                                         pixel_size, latency->display_hpll_disable);
1049                 reg = I915_READ(DSPFW3);
1050                 reg &= ~DSPFW_HPLL_SR_MASK;
1051                 reg |= wm & DSPFW_HPLL_SR_MASK;
1052                 I915_WRITE(DSPFW3, reg);
1053
1054                 /* cursor HPLL off SR */
1055                 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
1056                                         pineview_display_hplloff_wm.fifo_size,
1057                                         pixel_size, latency->cursor_hpll_disable);
1058                 reg = I915_READ(DSPFW3);
1059                 reg &= ~DSPFW_HPLL_CURSOR_MASK;
1060                 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
1061                 I915_WRITE(DSPFW3, reg);
1062                 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
1063
1064                 /* activate cxsr */
1065                 I915_WRITE(DSPFW3,
1066                            I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
1067                 DRM_DEBUG_KMS("Self-refresh is enabled\n");
1068         } else {
1069                 pineview_disable_cxsr(dev);
1070                 DRM_DEBUG_KMS("Self-refresh is disabled\n");
1071         }
1072 }
1073
1074 static bool g4x_compute_wm0(struct drm_device *dev,
1075                             int plane,
1076                             const struct intel_watermark_params *display,
1077                             int display_latency_ns,
1078                             const struct intel_watermark_params *cursor,
1079                             int cursor_latency_ns,
1080                             int *plane_wm,
1081                             int *cursor_wm)
1082 {
1083         struct drm_crtc *crtc;
1084         int htotal, hdisplay, clock, pixel_size;
1085         int line_time_us, line_count;
1086         int entries, tlb_miss;
1087
1088         crtc = intel_get_crtc_for_plane(dev, plane);
1089         if (crtc->fb == NULL || !crtc->enabled) {
1090                 *cursor_wm = cursor->guard_size;
1091                 *plane_wm = display->guard_size;
1092                 return false;
1093         }
1094
1095         htotal = crtc->mode.htotal;
1096         hdisplay = crtc->mode.hdisplay;
1097         clock = crtc->mode.clock;
1098         pixel_size = crtc->fb->bits_per_pixel / 8;
1099
1100         /* Use the small buffer method to calculate plane watermark */
1101         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1102         tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
1103         if (tlb_miss > 0)
1104                 entries += tlb_miss;
1105         entries = DIV_ROUND_UP(entries, display->cacheline_size);
1106         *plane_wm = entries + display->guard_size;
1107         if (*plane_wm > (int)display->max_wm)
1108                 *plane_wm = display->max_wm;
1109
1110         /* Use the large buffer method to calculate cursor watermark */
1111         line_time_us = ((htotal * 1000) / clock);
1112         line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1113         entries = line_count * 64 * pixel_size;
1114         tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1115         if (tlb_miss > 0)
1116                 entries += tlb_miss;
1117         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1118         *cursor_wm = entries + cursor->guard_size;
1119         if (*cursor_wm > (int)cursor->max_wm)
1120                 *cursor_wm = (int)cursor->max_wm;
1121
1122         return true;
1123 }
1124
1125 /*
1126  * Check the wm result.
1127  *
1128  * If any calculated watermark values is larger than the maximum value that
1129  * can be programmed into the associated watermark register, that watermark
1130  * must be disabled.
1131  */
1132 static bool g4x_check_srwm(struct drm_device *dev,
1133                            int display_wm, int cursor_wm,
1134                            const struct intel_watermark_params *display,
1135                            const struct intel_watermark_params *cursor)
1136 {
1137         DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1138                       display_wm, cursor_wm);
1139
1140         if (display_wm > display->max_wm) {
1141                 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1142                               display_wm, display->max_wm);
1143                 return false;
1144         }
1145
1146         if (cursor_wm > cursor->max_wm) {
1147                 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1148                               cursor_wm, cursor->max_wm);
1149                 return false;
1150         }
1151
1152         if (!(display_wm || cursor_wm)) {
1153                 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1154                 return false;
1155         }
1156
1157         return true;
1158 }
1159
1160 static bool g4x_compute_srwm(struct drm_device *dev,
1161                              int plane,
1162                              int latency_ns,
1163                              const struct intel_watermark_params *display,
1164                              const struct intel_watermark_params *cursor,
1165                              int *display_wm, int *cursor_wm)
1166 {
1167         struct drm_crtc *crtc;
1168         int hdisplay, htotal, pixel_size, clock;
1169         unsigned long line_time_us;
1170         int line_count, line_size;
1171         int small, large;
1172         int entries;
1173
1174         if (!latency_ns) {
1175                 *display_wm = *cursor_wm = 0;
1176                 return false;
1177         }
1178
1179         crtc = intel_get_crtc_for_plane(dev, plane);
1180         hdisplay = crtc->mode.hdisplay;
1181         htotal = crtc->mode.htotal;
1182         clock = crtc->mode.clock;
1183         pixel_size = crtc->fb->bits_per_pixel / 8;
1184
1185         line_time_us = (htotal * 1000) / clock;
1186         line_count = (latency_ns / line_time_us + 1000) / 1000;
1187         line_size = hdisplay * pixel_size;
1188
1189         /* Use the minimum of the small and large buffer method for primary */
1190         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1191         large = line_count * line_size;
1192
1193         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1194         *display_wm = entries + display->guard_size;
1195
1196         /* calculate the self-refresh watermark for display cursor */
1197         entries = line_count * pixel_size * 64;
1198         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1199         *cursor_wm = entries + cursor->guard_size;
1200
1201         return g4x_check_srwm(dev,
1202                               *display_wm, *cursor_wm,
1203                               display, cursor);
1204 }
1205
1206 static bool vlv_compute_drain_latency(struct drm_device *dev,
1207                                      int plane,
1208                                      int *plane_prec_mult,
1209                                      int *plane_dl,
1210                                      int *cursor_prec_mult,
1211                                      int *cursor_dl)
1212 {
1213         struct drm_crtc *crtc;
1214         int clock, pixel_size;
1215         int entries;
1216
1217         crtc = intel_get_crtc_for_plane(dev, plane);
1218         if (crtc->fb == NULL || !crtc->enabled)
1219                 return false;
1220
1221         clock = crtc->mode.clock;       /* VESA DOT Clock */
1222         pixel_size = crtc->fb->bits_per_pixel / 8;      /* BPP */
1223
1224         entries = (clock / 1000) * pixel_size;
1225         *plane_prec_mult = (entries > 256) ?
1226                 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1227         *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1228                                                      pixel_size);
1229
1230         entries = (clock / 1000) * 4;   /* BPP is always 4 for cursor */
1231         *cursor_prec_mult = (entries > 256) ?
1232                 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1233         *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1234
1235         return true;
1236 }
1237
1238 /*
1239  * Update drain latency registers of memory arbiter
1240  *
1241  * Valleyview SoC has a new memory arbiter and needs drain latency registers
1242  * to be programmed. Each plane has a drain latency multiplier and a drain
1243  * latency value.
1244  */
1245
1246 static void vlv_update_drain_latency(struct drm_device *dev)
1247 {
1248         struct drm_i915_private *dev_priv = dev->dev_private;
1249         int planea_prec, planea_dl, planeb_prec, planeb_dl;
1250         int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1251         int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1252                                                         either 16 or 32 */
1253
1254         /* For plane A, Cursor A */
1255         if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1256                                       &cursor_prec_mult, &cursora_dl)) {
1257                 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1258                         DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1259                 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1260                         DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1261
1262                 I915_WRITE(VLV_DDL1, cursora_prec |
1263                                 (cursora_dl << DDL_CURSORA_SHIFT) |
1264                                 planea_prec | planea_dl);
1265         }
1266
1267         /* For plane B, Cursor B */
1268         if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1269                                       &cursor_prec_mult, &cursorb_dl)) {
1270                 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1271                         DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1272                 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1273                         DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1274
1275                 I915_WRITE(VLV_DDL2, cursorb_prec |
1276                                 (cursorb_dl << DDL_CURSORB_SHIFT) |
1277                                 planeb_prec | planeb_dl);
1278         }
1279 }
1280
1281 #define single_plane_enabled(mask) is_power_of_2(mask)
1282
1283 static void valleyview_update_wm(struct drm_device *dev)
1284 {
1285         static const int sr_latency_ns = 12000;
1286         struct drm_i915_private *dev_priv = dev->dev_private;
1287         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1288         int plane_sr, cursor_sr;
1289         unsigned int enabled = 0;
1290
1291         vlv_update_drain_latency(dev);
1292
1293         if (g4x_compute_wm0(dev, 0,
1294                             &valleyview_wm_info, latency_ns,
1295                             &valleyview_cursor_wm_info, latency_ns,
1296                             &planea_wm, &cursora_wm))
1297                 enabled |= 1;
1298
1299         if (g4x_compute_wm0(dev, 1,
1300                             &valleyview_wm_info, latency_ns,
1301                             &valleyview_cursor_wm_info, latency_ns,
1302                             &planeb_wm, &cursorb_wm))
1303                 enabled |= 2;
1304
1305         plane_sr = cursor_sr = 0;
1306         if (single_plane_enabled(enabled) &&
1307             g4x_compute_srwm(dev, ffs(enabled) - 1,
1308                              sr_latency_ns,
1309                              &valleyview_wm_info,
1310                              &valleyview_cursor_wm_info,
1311                              &plane_sr, &cursor_sr))
1312                 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1313         else
1314                 I915_WRITE(FW_BLC_SELF_VLV,
1315                            I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1316
1317         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1318                       planea_wm, cursora_wm,
1319                       planeb_wm, cursorb_wm,
1320                       plane_sr, cursor_sr);
1321
1322         I915_WRITE(DSPFW1,
1323                    (plane_sr << DSPFW_SR_SHIFT) |
1324                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1325                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
1326                    planea_wm);
1327         I915_WRITE(DSPFW2,
1328                    (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1329                    (cursora_wm << DSPFW_CURSORA_SHIFT));
1330         I915_WRITE(DSPFW3,
1331                    (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
1332 }
1333
1334 static void g4x_update_wm(struct drm_device *dev)
1335 {
1336         static const int sr_latency_ns = 12000;
1337         struct drm_i915_private *dev_priv = dev->dev_private;
1338         int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1339         int plane_sr, cursor_sr;
1340         unsigned int enabled = 0;
1341
1342         if (g4x_compute_wm0(dev, 0,
1343                             &g4x_wm_info, latency_ns,
1344                             &g4x_cursor_wm_info, latency_ns,
1345                             &planea_wm, &cursora_wm))
1346                 enabled |= 1;
1347
1348         if (g4x_compute_wm0(dev, 1,
1349                             &g4x_wm_info, latency_ns,
1350                             &g4x_cursor_wm_info, latency_ns,
1351                             &planeb_wm, &cursorb_wm))
1352                 enabled |= 2;
1353
1354         plane_sr = cursor_sr = 0;
1355         if (single_plane_enabled(enabled) &&
1356             g4x_compute_srwm(dev, ffs(enabled) - 1,
1357                              sr_latency_ns,
1358                              &g4x_wm_info,
1359                              &g4x_cursor_wm_info,
1360                              &plane_sr, &cursor_sr))
1361                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1362         else
1363                 I915_WRITE(FW_BLC_SELF,
1364                            I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1365
1366         DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1367                       planea_wm, cursora_wm,
1368                       planeb_wm, cursorb_wm,
1369                       plane_sr, cursor_sr);
1370
1371         I915_WRITE(DSPFW1,
1372                    (plane_sr << DSPFW_SR_SHIFT) |
1373                    (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1374                    (planeb_wm << DSPFW_PLANEB_SHIFT) |
1375                    planea_wm);
1376         I915_WRITE(DSPFW2,
1377                    (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1378                    (cursora_wm << DSPFW_CURSORA_SHIFT));
1379         /* HPLL off in SR has some issues on G4x... disable it */
1380         I915_WRITE(DSPFW3,
1381                    (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
1382                    (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1383 }
1384
1385 static void i965_update_wm(struct drm_device *dev)
1386 {
1387         struct drm_i915_private *dev_priv = dev->dev_private;
1388         struct drm_crtc *crtc;
1389         int srwm = 1;
1390         int cursor_sr = 16;
1391
1392         /* Calc sr entries for one plane configs */
1393         crtc = single_enabled_crtc(dev);
1394         if (crtc) {
1395                 /* self-refresh has much higher latency */
1396                 static const int sr_latency_ns = 12000;
1397                 int clock = crtc->mode.clock;
1398                 int htotal = crtc->mode.htotal;
1399                 int hdisplay = crtc->mode.hdisplay;
1400                 int pixel_size = crtc->fb->bits_per_pixel / 8;
1401                 unsigned long line_time_us;
1402                 int entries;
1403
1404                 line_time_us = ((htotal * 1000) / clock);
1405
1406                 /* Use ns/us then divide to preserve precision */
1407                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1408                         pixel_size * hdisplay;
1409                 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1410                 srwm = I965_FIFO_SIZE - entries;
1411                 if (srwm < 0)
1412                         srwm = 1;
1413                 srwm &= 0x1ff;
1414                 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1415                               entries, srwm);
1416
1417                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1418                         pixel_size * 64;
1419                 entries = DIV_ROUND_UP(entries,
1420                                           i965_cursor_wm_info.cacheline_size);
1421                 cursor_sr = i965_cursor_wm_info.fifo_size -
1422                         (entries + i965_cursor_wm_info.guard_size);
1423
1424                 if (cursor_sr > i965_cursor_wm_info.max_wm)
1425                         cursor_sr = i965_cursor_wm_info.max_wm;
1426
1427                 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1428                               "cursor %d\n", srwm, cursor_sr);
1429
1430                 if (IS_CRESTLINE(dev))
1431                         I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1432         } else {
1433                 /* Turn off self refresh if both pipes are enabled */
1434                 if (IS_CRESTLINE(dev))
1435                         I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1436                                    & ~FW_BLC_SELF_EN);
1437         }
1438
1439         DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1440                       srwm);
1441
1442         /* 965 has limitations... */
1443         I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1444                    (8 << 16) | (8 << 8) | (8 << 0));
1445         I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1446         /* update cursor SR watermark */
1447         I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1448 }
1449
1450 static void i9xx_update_wm(struct drm_device *dev)
1451 {
1452         struct drm_i915_private *dev_priv = dev->dev_private;
1453         const struct intel_watermark_params *wm_info;
1454         uint32_t fwater_lo;
1455         uint32_t fwater_hi;
1456         int cwm, srwm = 1;
1457         int fifo_size;
1458         int planea_wm, planeb_wm;
1459         struct drm_crtc *crtc, *enabled = NULL;
1460
1461         if (IS_I945GM(dev))
1462                 wm_info = &i945_wm_info;
1463         else if (!IS_GEN2(dev))
1464                 wm_info = &i915_wm_info;
1465         else
1466                 wm_info = &i855_wm_info;
1467
1468         fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1469         crtc = intel_get_crtc_for_plane(dev, 0);
1470         if (crtc->enabled && crtc->fb) {
1471                 planea_wm = intel_calculate_wm(crtc->mode.clock,
1472                                                wm_info, fifo_size,
1473                                                crtc->fb->bits_per_pixel / 8,
1474                                                latency_ns);
1475                 enabled = crtc;
1476         } else
1477                 planea_wm = fifo_size - wm_info->guard_size;
1478
1479         fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1480         crtc = intel_get_crtc_for_plane(dev, 1);
1481         if (crtc->enabled && crtc->fb) {
1482                 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1483                                                wm_info, fifo_size,
1484                                                crtc->fb->bits_per_pixel / 8,
1485                                                latency_ns);
1486                 if (enabled == NULL)
1487                         enabled = crtc;
1488                 else
1489                         enabled = NULL;
1490         } else
1491                 planeb_wm = fifo_size - wm_info->guard_size;
1492
1493         DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1494
1495         /*
1496          * Overlay gets an aggressive default since video jitter is bad.
1497          */
1498         cwm = 2;
1499
1500         /* Play safe and disable self-refresh before adjusting watermarks. */
1501         if (IS_I945G(dev) || IS_I945GM(dev))
1502                 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1503         else if (IS_I915GM(dev))
1504                 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1505
1506         /* Calc sr entries for one plane configs */
1507         if (HAS_FW_BLC(dev) && enabled) {
1508                 /* self-refresh has much higher latency */
1509                 static const int sr_latency_ns = 6000;
1510                 int clock = enabled->mode.clock;
1511                 int htotal = enabled->mode.htotal;
1512                 int hdisplay = enabled->mode.hdisplay;
1513                 int pixel_size = enabled->fb->bits_per_pixel / 8;
1514                 unsigned long line_time_us;
1515                 int entries;
1516
1517                 line_time_us = (htotal * 1000) / clock;
1518
1519                 /* Use ns/us then divide to preserve precision */
1520                 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1521                         pixel_size * hdisplay;
1522                 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1523                 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1524                 srwm = wm_info->fifo_size - entries;
1525                 if (srwm < 0)
1526                         srwm = 1;
1527
1528                 if (IS_I945G(dev) || IS_I945GM(dev))
1529                         I915_WRITE(FW_BLC_SELF,
1530                                    FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1531                 else if (IS_I915GM(dev))
1532                         I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1533         }
1534
1535         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1536                       planea_wm, planeb_wm, cwm, srwm);
1537
1538         fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1539         fwater_hi = (cwm & 0x1f);
1540
1541         /* Set request length to 8 cachelines per fetch */
1542         fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1543         fwater_hi = fwater_hi | (1 << 8);
1544
1545         I915_WRITE(FW_BLC, fwater_lo);
1546         I915_WRITE(FW_BLC2, fwater_hi);
1547
1548         if (HAS_FW_BLC(dev)) {
1549                 if (enabled) {
1550                         if (IS_I945G(dev) || IS_I945GM(dev))
1551                                 I915_WRITE(FW_BLC_SELF,
1552                                            FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1553                         else if (IS_I915GM(dev))
1554                                 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1555                         DRM_DEBUG_KMS("memory self refresh enabled\n");
1556                 } else
1557                         DRM_DEBUG_KMS("memory self refresh disabled\n");
1558         }
1559 }
1560
1561 static void i830_update_wm(struct drm_device *dev)
1562 {
1563         struct drm_i915_private *dev_priv = dev->dev_private;
1564         struct drm_crtc *crtc;
1565         uint32_t fwater_lo;
1566         int planea_wm;
1567
1568         crtc = single_enabled_crtc(dev);
1569         if (crtc == NULL)
1570                 return;
1571
1572         planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1573                                        dev_priv->display.get_fifo_size(dev, 0),
1574                                        crtc->fb->bits_per_pixel / 8,
1575                                        latency_ns);
1576         fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1577         fwater_lo |= (3<<8) | planea_wm;
1578
1579         DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1580
1581         I915_WRITE(FW_BLC, fwater_lo);
1582 }
1583
1584 #define ILK_LP0_PLANE_LATENCY           700
1585 #define ILK_LP0_CURSOR_LATENCY          1300
1586
1587 /*
1588  * Check the wm result.
1589  *
1590  * If any calculated watermark values is larger than the maximum value that
1591  * can be programmed into the associated watermark register, that watermark
1592  * must be disabled.
1593  */
1594 static bool ironlake_check_srwm(struct drm_device *dev, int level,
1595                                 int fbc_wm, int display_wm, int cursor_wm,
1596                                 const struct intel_watermark_params *display,
1597                                 const struct intel_watermark_params *cursor)
1598 {
1599         struct drm_i915_private *dev_priv = dev->dev_private;
1600
1601         DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1602                       " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1603
1604         if (fbc_wm > SNB_FBC_MAX_SRWM) {
1605                 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1606                               fbc_wm, SNB_FBC_MAX_SRWM, level);
1607
1608                 /* fbc has it's own way to disable FBC WM */
1609                 I915_WRITE(DISP_ARB_CTL,
1610                            I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1611                 return false;
1612         }
1613
1614         if (display_wm > display->max_wm) {
1615                 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1616                               display_wm, SNB_DISPLAY_MAX_SRWM, level);
1617                 return false;
1618         }
1619
1620         if (cursor_wm > cursor->max_wm) {
1621                 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1622                               cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1623                 return false;
1624         }
1625
1626         if (!(fbc_wm || display_wm || cursor_wm)) {
1627                 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1628                 return false;
1629         }
1630
1631         return true;
1632 }
1633
1634 /*
1635  * Compute watermark values of WM[1-3],
1636  */
1637 static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1638                                   int latency_ns,
1639                                   const struct intel_watermark_params *display,
1640                                   const struct intel_watermark_params *cursor,
1641                                   int *fbc_wm, int *display_wm, int *cursor_wm)
1642 {
1643         struct drm_crtc *crtc;
1644         unsigned long line_time_us;
1645         int hdisplay, htotal, pixel_size, clock;
1646         int line_count, line_size;
1647         int small, large;
1648         int entries;
1649
1650         if (!latency_ns) {
1651                 *fbc_wm = *display_wm = *cursor_wm = 0;
1652                 return false;
1653         }
1654
1655         crtc = intel_get_crtc_for_plane(dev, plane);
1656         hdisplay = crtc->mode.hdisplay;
1657         htotal = crtc->mode.htotal;
1658         clock = crtc->mode.clock;
1659         pixel_size = crtc->fb->bits_per_pixel / 8;
1660
1661         line_time_us = (htotal * 1000) / clock;
1662         line_count = (latency_ns / line_time_us + 1000) / 1000;
1663         line_size = hdisplay * pixel_size;
1664
1665         /* Use the minimum of the small and large buffer method for primary */
1666         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1667         large = line_count * line_size;
1668
1669         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1670         *display_wm = entries + display->guard_size;
1671
1672         /*
1673          * Spec says:
1674          * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1675          */
1676         *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1677
1678         /* calculate the self-refresh watermark for display cursor */
1679         entries = line_count * pixel_size * 64;
1680         entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1681         *cursor_wm = entries + cursor->guard_size;
1682
1683         return ironlake_check_srwm(dev, level,
1684                                    *fbc_wm, *display_wm, *cursor_wm,
1685                                    display, cursor);
1686 }
1687
1688 static void ironlake_update_wm(struct drm_device *dev)
1689 {
1690         struct drm_i915_private *dev_priv = dev->dev_private;
1691         int fbc_wm, plane_wm, cursor_wm;
1692         unsigned int enabled;
1693
1694         enabled = 0;
1695         if (g4x_compute_wm0(dev, 0,
1696                             &ironlake_display_wm_info,
1697                             ILK_LP0_PLANE_LATENCY,
1698                             &ironlake_cursor_wm_info,
1699                             ILK_LP0_CURSOR_LATENCY,
1700                             &plane_wm, &cursor_wm)) {
1701                 I915_WRITE(WM0_PIPEA_ILK,
1702                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1703                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1704                               " plane %d, " "cursor: %d\n",
1705                               plane_wm, cursor_wm);
1706                 enabled |= 1;
1707         }
1708
1709         if (g4x_compute_wm0(dev, 1,
1710                             &ironlake_display_wm_info,
1711                             ILK_LP0_PLANE_LATENCY,
1712                             &ironlake_cursor_wm_info,
1713                             ILK_LP0_CURSOR_LATENCY,
1714                             &plane_wm, &cursor_wm)) {
1715                 I915_WRITE(WM0_PIPEB_ILK,
1716                            (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1717                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1718                               " plane %d, cursor: %d\n",
1719                               plane_wm, cursor_wm);
1720                 enabled |= 2;
1721         }
1722
1723         /*
1724          * Calculate and update the self-refresh watermark only when one
1725          * display plane is used.
1726          */
1727         I915_WRITE(WM3_LP_ILK, 0);
1728         I915_WRITE(WM2_LP_ILK, 0);
1729         I915_WRITE(WM1_LP_ILK, 0);
1730
1731         if (!single_plane_enabled(enabled))
1732                 return;
1733         enabled = ffs(enabled) - 1;
1734
1735         /* WM1 */
1736         if (!ironlake_compute_srwm(dev, 1, enabled,
1737                                    ILK_READ_WM1_LATENCY() * 500,
1738                                    &ironlake_display_srwm_info,
1739                                    &ironlake_cursor_srwm_info,
1740                                    &fbc_wm, &plane_wm, &cursor_wm))
1741                 return;
1742
1743         I915_WRITE(WM1_LP_ILK,
1744                    WM1_LP_SR_EN |
1745                    (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1746                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1747                    (plane_wm << WM1_LP_SR_SHIFT) |
1748                    cursor_wm);
1749
1750         /* WM2 */
1751         if (!ironlake_compute_srwm(dev, 2, enabled,
1752                                    ILK_READ_WM2_LATENCY() * 500,
1753                                    &ironlake_display_srwm_info,
1754                                    &ironlake_cursor_srwm_info,
1755                                    &fbc_wm, &plane_wm, &cursor_wm))
1756                 return;
1757
1758         I915_WRITE(WM2_LP_ILK,
1759                    WM2_LP_EN |
1760                    (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1761                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1762                    (plane_wm << WM1_LP_SR_SHIFT) |
1763                    cursor_wm);
1764
1765         /*
1766          * WM3 is unsupported on ILK, probably because we don't have latency
1767          * data for that power state
1768          */
1769 }
1770
1771 static void sandybridge_update_wm(struct drm_device *dev)
1772 {
1773         struct drm_i915_private *dev_priv = dev->dev_private;
1774         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
1775         u32 val;
1776         int fbc_wm, plane_wm, cursor_wm;
1777         unsigned int enabled;
1778
1779         enabled = 0;
1780         if (g4x_compute_wm0(dev, 0,
1781                             &sandybridge_display_wm_info, latency,
1782                             &sandybridge_cursor_wm_info, latency,
1783                             &plane_wm, &cursor_wm)) {
1784                 val = I915_READ(WM0_PIPEA_ILK);
1785                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1786                 I915_WRITE(WM0_PIPEA_ILK, val |
1787                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1788                 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1789                               " plane %d, " "cursor: %d\n",
1790                               plane_wm, cursor_wm);
1791                 enabled |= 1;
1792         }
1793
1794         if (g4x_compute_wm0(dev, 1,
1795                             &sandybridge_display_wm_info, latency,
1796                             &sandybridge_cursor_wm_info, latency,
1797                             &plane_wm, &cursor_wm)) {
1798                 val = I915_READ(WM0_PIPEB_ILK);
1799                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1800                 I915_WRITE(WM0_PIPEB_ILK, val |
1801                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1802                 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1803                               " plane %d, cursor: %d\n",
1804                               plane_wm, cursor_wm);
1805                 enabled |= 2;
1806         }
1807
1808         if ((dev_priv->num_pipe == 3) &&
1809             g4x_compute_wm0(dev, 2,
1810                             &sandybridge_display_wm_info, latency,
1811                             &sandybridge_cursor_wm_info, latency,
1812                             &plane_wm, &cursor_wm)) {
1813                 val = I915_READ(WM0_PIPEC_IVB);
1814                 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1815                 I915_WRITE(WM0_PIPEC_IVB, val |
1816                            ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1817                 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1818                               " plane %d, cursor: %d\n",
1819                               plane_wm, cursor_wm);
1820                 enabled |= 3;
1821         }
1822
1823         /*
1824          * Calculate and update the self-refresh watermark only when one
1825          * display plane is used.
1826          *
1827          * SNB support 3 levels of watermark.
1828          *
1829          * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1830          * and disabled in the descending order
1831          *
1832          */
1833         I915_WRITE(WM3_LP_ILK, 0);
1834         I915_WRITE(WM2_LP_ILK, 0);
1835         I915_WRITE(WM1_LP_ILK, 0);
1836
1837         if (!single_plane_enabled(enabled) ||
1838             dev_priv->sprite_scaling_enabled)
1839                 return;
1840         enabled = ffs(enabled) - 1;
1841
1842         /* WM1 */
1843         if (!ironlake_compute_srwm(dev, 1, enabled,
1844                                    SNB_READ_WM1_LATENCY() * 500,
1845                                    &sandybridge_display_srwm_info,
1846                                    &sandybridge_cursor_srwm_info,
1847                                    &fbc_wm, &plane_wm, &cursor_wm))
1848                 return;
1849
1850         I915_WRITE(WM1_LP_ILK,
1851                    WM1_LP_SR_EN |
1852                    (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1853                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1854                    (plane_wm << WM1_LP_SR_SHIFT) |
1855                    cursor_wm);
1856
1857         /* WM2 */
1858         if (!ironlake_compute_srwm(dev, 2, enabled,
1859                                    SNB_READ_WM2_LATENCY() * 500,
1860                                    &sandybridge_display_srwm_info,
1861                                    &sandybridge_cursor_srwm_info,
1862                                    &fbc_wm, &plane_wm, &cursor_wm))
1863                 return;
1864
1865         I915_WRITE(WM2_LP_ILK,
1866                    WM2_LP_EN |
1867                    (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1868                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1869                    (plane_wm << WM1_LP_SR_SHIFT) |
1870                    cursor_wm);
1871
1872         /* WM3 */
1873         if (!ironlake_compute_srwm(dev, 3, enabled,
1874                                    SNB_READ_WM3_LATENCY() * 500,
1875                                    &sandybridge_display_srwm_info,
1876                                    &sandybridge_cursor_srwm_info,
1877                                    &fbc_wm, &plane_wm, &cursor_wm))
1878                 return;
1879
1880         I915_WRITE(WM3_LP_ILK,
1881                    WM3_LP_EN |
1882                    (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1883                    (fbc_wm << WM1_LP_FBC_SHIFT) |
1884                    (plane_wm << WM1_LP_SR_SHIFT) |
1885                    cursor_wm);
1886 }
1887
1888 static void
1889 haswell_update_linetime_wm(struct drm_device *dev, int pipe,
1890                                  struct drm_display_mode *mode)
1891 {
1892         struct drm_i915_private *dev_priv = dev->dev_private;
1893         u32 temp;
1894
1895         temp = I915_READ(PIPE_WM_LINETIME(pipe));
1896         temp &= ~PIPE_WM_LINETIME_MASK;
1897
1898         /* The WM are computed with base on how long it takes to fill a single
1899          * row at the given clock rate, multiplied by 8.
1900          * */
1901         temp |= PIPE_WM_LINETIME_TIME(
1902                 ((mode->crtc_hdisplay * 1000) / mode->clock) * 8);
1903
1904         /* IPS watermarks are only used by pipe A, and are ignored by
1905          * pipes B and C.  They are calculated similarly to the common
1906          * linetime values, except that we are using CD clock frequency
1907          * in MHz instead of pixel rate for the division.
1908          *
1909          * This is a placeholder for the IPS watermark calculation code.
1910          */
1911
1912         I915_WRITE(PIPE_WM_LINETIME(pipe), temp);
1913 }
1914
1915 static bool
1916 sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1917                               uint32_t sprite_width, int pixel_size,
1918                               const struct intel_watermark_params *display,
1919                               int display_latency_ns, int *sprite_wm)
1920 {
1921         struct drm_crtc *crtc;
1922         int clock;
1923         int entries, tlb_miss;
1924
1925         crtc = intel_get_crtc_for_plane(dev, plane);
1926         if (crtc->fb == NULL || !crtc->enabled) {
1927                 *sprite_wm = display->guard_size;
1928                 return false;
1929         }
1930
1931         clock = crtc->mode.clock;
1932
1933         /* Use the small buffer method to calculate the sprite watermark */
1934         entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1935         tlb_miss = display->fifo_size*display->cacheline_size -
1936                 sprite_width * 8;
1937         if (tlb_miss > 0)
1938                 entries += tlb_miss;
1939         entries = DIV_ROUND_UP(entries, display->cacheline_size);
1940         *sprite_wm = entries + display->guard_size;
1941         if (*sprite_wm > (int)display->max_wm)
1942                 *sprite_wm = display->max_wm;
1943
1944         return true;
1945 }
1946
1947 static bool
1948 sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
1949                                 uint32_t sprite_width, int pixel_size,
1950                                 const struct intel_watermark_params *display,
1951                                 int latency_ns, int *sprite_wm)
1952 {
1953         struct drm_crtc *crtc;
1954         unsigned long line_time_us;
1955         int clock;
1956         int line_count, line_size;
1957         int small, large;
1958         int entries;
1959
1960         if (!latency_ns) {
1961                 *sprite_wm = 0;
1962                 return false;
1963         }
1964
1965         crtc = intel_get_crtc_for_plane(dev, plane);
1966         clock = crtc->mode.clock;
1967         if (!clock) {
1968                 *sprite_wm = 0;
1969                 return false;
1970         }
1971
1972         line_time_us = (sprite_width * 1000) / clock;
1973         if (!line_time_us) {
1974                 *sprite_wm = 0;
1975                 return false;
1976         }
1977
1978         line_count = (latency_ns / line_time_us + 1000) / 1000;
1979         line_size = sprite_width * pixel_size;
1980
1981         /* Use the minimum of the small and large buffer method for primary */
1982         small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1983         large = line_count * line_size;
1984
1985         entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1986         *sprite_wm = entries + display->guard_size;
1987
1988         return *sprite_wm > 0x3ff ? false : true;
1989 }
1990
1991 static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
1992                                          uint32_t sprite_width, int pixel_size)
1993 {
1994         struct drm_i915_private *dev_priv = dev->dev_private;
1995         int latency = SNB_READ_WM0_LATENCY() * 100;     /* In unit 0.1us */
1996         u32 val;
1997         int sprite_wm, reg;
1998         int ret;
1999
2000         switch (pipe) {
2001         case 0:
2002                 reg = WM0_PIPEA_ILK;
2003                 break;
2004         case 1:
2005                 reg = WM0_PIPEB_ILK;
2006                 break;
2007         case 2:
2008                 reg = WM0_PIPEC_IVB;
2009                 break;
2010         default:
2011                 return; /* bad pipe */
2012         }
2013
2014         ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
2015                                             &sandybridge_display_wm_info,
2016                                             latency, &sprite_wm);
2017         if (!ret) {
2018                 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
2019                               pipe);
2020                 return;
2021         }
2022
2023         val = I915_READ(reg);
2024         val &= ~WM0_PIPE_SPRITE_MASK;
2025         I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
2026         DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
2027
2028
2029         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2030                                               pixel_size,
2031                                               &sandybridge_display_srwm_info,
2032                                               SNB_READ_WM1_LATENCY() * 500,
2033                                               &sprite_wm);
2034         if (!ret) {
2035                 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
2036                               pipe);
2037                 return;
2038         }
2039         I915_WRITE(WM1S_LP_ILK, sprite_wm);
2040
2041         /* Only IVB has two more LP watermarks for sprite */
2042         if (!IS_IVYBRIDGE(dev))
2043                 return;
2044
2045         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2046                                               pixel_size,
2047                                               &sandybridge_display_srwm_info,
2048                                               SNB_READ_WM2_LATENCY() * 500,
2049                                               &sprite_wm);
2050         if (!ret) {
2051                 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
2052                               pipe);
2053                 return;
2054         }
2055         I915_WRITE(WM2S_LP_IVB, sprite_wm);
2056
2057         ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
2058                                               pixel_size,
2059                                               &sandybridge_display_srwm_info,
2060                                               SNB_READ_WM3_LATENCY() * 500,
2061                                               &sprite_wm);
2062         if (!ret) {
2063                 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
2064                               pipe);
2065                 return;
2066         }
2067         I915_WRITE(WM3S_LP_IVB, sprite_wm);
2068 }
2069
2070 /**
2071  * intel_update_watermarks - update FIFO watermark values based on current modes
2072  *
2073  * Calculate watermark values for the various WM regs based on current mode
2074  * and plane configuration.
2075  *
2076  * There are several cases to deal with here:
2077  *   - normal (i.e. non-self-refresh)
2078  *   - self-refresh (SR) mode
2079  *   - lines are large relative to FIFO size (buffer can hold up to 2)
2080  *   - lines are small relative to FIFO size (buffer can hold more than 2
2081  *     lines), so need to account for TLB latency
2082  *
2083  *   The normal calculation is:
2084  *     watermark = dotclock * bytes per pixel * latency
2085  *   where latency is platform & configuration dependent (we assume pessimal
2086  *   values here).
2087  *
2088  *   The SR calculation is:
2089  *     watermark = (trunc(latency/line time)+1) * surface width *
2090  *       bytes per pixel
2091  *   where
2092  *     line time = htotal / dotclock
2093  *     surface width = hdisplay for normal plane and 64 for cursor
2094  *   and latency is assumed to be high, as above.
2095  *
2096  * The final value programmed to the register should always be rounded up,
2097  * and include an extra 2 entries to account for clock crossings.
2098  *
2099  * We don't use the sprite, so we can ignore that.  And on Crestline we have
2100  * to set the non-SR watermarks to 8.
2101  */
2102 void intel_update_watermarks(struct drm_device *dev)
2103 {
2104         struct drm_i915_private *dev_priv = dev->dev_private;
2105
2106         if (dev_priv->display.update_wm)
2107                 dev_priv->display.update_wm(dev);
2108 }
2109
2110 void intel_update_linetime_watermarks(struct drm_device *dev,
2111                 int pipe, struct drm_display_mode *mode)
2112 {
2113         struct drm_i915_private *dev_priv = dev->dev_private;
2114
2115         if (dev_priv->display.update_linetime_wm)
2116                 dev_priv->display.update_linetime_wm(dev, pipe, mode);
2117 }
2118
2119 void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
2120                                     uint32_t sprite_width, int pixel_size)
2121 {
2122         struct drm_i915_private *dev_priv = dev->dev_private;
2123
2124         if (dev_priv->display.update_sprite_wm)
2125                 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
2126                                                    pixel_size);
2127 }
2128
2129 static struct drm_i915_gem_object *
2130 intel_alloc_context_page(struct drm_device *dev)
2131 {
2132         struct drm_i915_gem_object *ctx;
2133         int ret;
2134
2135         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2136
2137         ctx = i915_gem_alloc_object(dev, 4096);
2138         if (!ctx) {
2139                 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
2140                 return NULL;
2141         }
2142
2143         ret = i915_gem_object_pin(ctx, 4096, true, false);
2144         if (ret) {
2145                 DRM_ERROR("failed to pin power context: %d\n", ret);
2146                 goto err_unref;
2147         }
2148
2149         ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2150         if (ret) {
2151                 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2152                 goto err_unpin;
2153         }
2154
2155         return ctx;
2156
2157 err_unpin:
2158         i915_gem_object_unpin(ctx);
2159 err_unref:
2160         drm_gem_object_unreference(&ctx->base);
2161         mutex_unlock(&dev->struct_mutex);
2162         return NULL;
2163 }
2164
2165 /**
2166  * Lock protecting IPS related data structures
2167  */
2168 DEFINE_SPINLOCK(mchdev_lock);
2169
2170 /* Global for IPS driver to get at the current i915 device. Protected by
2171  * mchdev_lock. */
2172 static struct drm_i915_private *i915_mch_dev;
2173
2174 bool ironlake_set_drps(struct drm_device *dev, u8 val)
2175 {
2176         struct drm_i915_private *dev_priv = dev->dev_private;
2177         u16 rgvswctl;
2178
2179         assert_spin_locked(&mchdev_lock);
2180
2181         rgvswctl = I915_READ16(MEMSWCTL);
2182         if (rgvswctl & MEMCTL_CMD_STS) {
2183                 DRM_DEBUG("gpu busy, RCS change rejected\n");
2184                 return false; /* still busy with another command */
2185         }
2186
2187         rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2188                 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2189         I915_WRITE16(MEMSWCTL, rgvswctl);
2190         POSTING_READ16(MEMSWCTL);
2191
2192         rgvswctl |= MEMCTL_CMD_STS;
2193         I915_WRITE16(MEMSWCTL, rgvswctl);
2194
2195         return true;
2196 }
2197
2198 static void ironlake_enable_drps(struct drm_device *dev)
2199 {
2200         struct drm_i915_private *dev_priv = dev->dev_private;
2201         u32 rgvmodectl = I915_READ(MEMMODECTL);
2202         u8 fmax, fmin, fstart, vstart;
2203
2204         spin_lock_irq(&mchdev_lock);
2205
2206         /* Enable temp reporting */
2207         I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2208         I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2209
2210         /* 100ms RC evaluation intervals */
2211         I915_WRITE(RCUPEI, 100000);
2212         I915_WRITE(RCDNEI, 100000);
2213
2214         /* Set max/min thresholds to 90ms and 80ms respectively */
2215         I915_WRITE(RCBMAXAVG, 90000);
2216         I915_WRITE(RCBMINAVG, 80000);
2217
2218         I915_WRITE(MEMIHYST, 1);
2219
2220         /* Set up min, max, and cur for interrupt handling */
2221         fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2222         fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2223         fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2224                 MEMMODE_FSTART_SHIFT;
2225
2226         vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2227                 PXVFREQ_PX_SHIFT;
2228
2229         dev_priv->ips.fmax = fmax; /* IPS callback will increase this */
2230         dev_priv->ips.fstart = fstart;
2231
2232         dev_priv->ips.max_delay = fstart;
2233         dev_priv->ips.min_delay = fmin;
2234         dev_priv->ips.cur_delay = fstart;
2235
2236         DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2237                          fmax, fmin, fstart);
2238
2239         I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2240
2241         /*
2242          * Interrupts will be enabled in ironlake_irq_postinstall
2243          */
2244
2245         I915_WRITE(VIDSTART, vstart);
2246         POSTING_READ(VIDSTART);
2247
2248         rgvmodectl |= MEMMODE_SWMODE_EN;
2249         I915_WRITE(MEMMODECTL, rgvmodectl);
2250
2251         if (wait_for_atomic((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2252                 DRM_ERROR("stuck trying to change perf mode\n");
2253         mdelay(1);
2254
2255         ironlake_set_drps(dev, fstart);
2256
2257         dev_priv->ips.last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2258                 I915_READ(0x112e0);
2259         dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies);
2260         dev_priv->ips.last_count2 = I915_READ(0x112f4);
2261         getrawmonotonic(&dev_priv->ips.last_time2);
2262
2263         spin_unlock_irq(&mchdev_lock);
2264 }
2265
2266 static void ironlake_disable_drps(struct drm_device *dev)
2267 {
2268         struct drm_i915_private *dev_priv = dev->dev_private;
2269         u16 rgvswctl;
2270
2271         spin_lock_irq(&mchdev_lock);
2272
2273         rgvswctl = I915_READ16(MEMSWCTL);
2274
2275         /* Ack interrupts, disable EFC interrupt */
2276         I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2277         I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2278         I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2279         I915_WRITE(DEIIR, DE_PCU_EVENT);
2280         I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2281
2282         /* Go back to the starting frequency */
2283         ironlake_set_drps(dev, dev_priv->ips.fstart);
2284         mdelay(1);
2285         rgvswctl |= MEMCTL_CMD_STS;
2286         I915_WRITE(MEMSWCTL, rgvswctl);
2287         mdelay(1);
2288
2289         spin_unlock_irq(&mchdev_lock);
2290 }
2291
2292 /* There's a funny hw issue where the hw returns all 0 when reading from
2293  * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value
2294  * ourselves, instead of doing a rmw cycle (which might result in us clearing
2295  * all limits and the gpu stuck at whatever frequency it is at atm).
2296  */
2297 static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 *val)
2298 {
2299         u32 limits;
2300
2301         limits = 0;
2302
2303         if (*val >= dev_priv->rps.max_delay)
2304                 *val = dev_priv->rps.max_delay;
2305         limits |= dev_priv->rps.max_delay << 24;
2306
2307         /* Only set the down limit when we've reached the lowest level to avoid
2308          * getting more interrupts, otherwise leave this clear. This prevents a
2309          * race in the hw when coming out of rc6: There's a tiny window where
2310          * the hw runs at the minimal clock before selecting the desired
2311          * frequency, if the down threshold expires in that window we will not
2312          * receive a down interrupt. */
2313         if (*val <= dev_priv->rps.min_delay) {
2314                 *val = dev_priv->rps.min_delay;
2315                 limits |= dev_priv->rps.min_delay << 16;
2316         }
2317
2318         return limits;
2319 }
2320
2321 void gen6_set_rps(struct drm_device *dev, u8 val)
2322 {
2323         struct drm_i915_private *dev_priv = dev->dev_private;
2324         u32 limits = gen6_rps_limits(dev_priv, &val);
2325
2326         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2327         WARN_ON(val > dev_priv->rps.max_delay);
2328         WARN_ON(val < dev_priv->rps.min_delay);
2329
2330         if (val == dev_priv->rps.cur_delay)
2331                 return;
2332
2333         I915_WRITE(GEN6_RPNSWREQ,
2334                    GEN6_FREQUENCY(val) |
2335                    GEN6_OFFSET(0) |
2336                    GEN6_AGGRESSIVE_TURBO);
2337
2338         /* Make sure we continue to get interrupts
2339          * until we hit the minimum or maximum frequencies.
2340          */
2341         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, limits);
2342
2343         POSTING_READ(GEN6_RPNSWREQ);
2344
2345         dev_priv->rps.cur_delay = val;
2346
2347         trace_intel_gpu_freq_change(val * 50);
2348 }
2349
2350 static void gen6_disable_rps(struct drm_device *dev)
2351 {
2352         struct drm_i915_private *dev_priv = dev->dev_private;
2353
2354         I915_WRITE(GEN6_RC_CONTROL, 0);
2355         I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2356         I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2357         I915_WRITE(GEN6_PMIER, 0);
2358         /* Complete PM interrupt masking here doesn't race with the rps work
2359          * item again unmasking PM interrupts because that is using a different
2360          * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2361          * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2362
2363         spin_lock_irq(&dev_priv->rps.lock);
2364         dev_priv->rps.pm_iir = 0;
2365         spin_unlock_irq(&dev_priv->rps.lock);
2366
2367         I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2368 }
2369
2370 int intel_enable_rc6(const struct drm_device *dev)
2371 {
2372         /* Respect the kernel parameter if it is set */
2373         if (i915_enable_rc6 >= 0)
2374                 return i915_enable_rc6;
2375
2376         if (INTEL_INFO(dev)->gen == 5) {
2377 #ifdef CONFIG_INTEL_IOMMU
2378                 /* Disable rc6 on ilk if VT-d is on. */
2379                 if (intel_iommu_gfx_mapped)
2380                         return false;
2381 #endif
2382                 DRM_DEBUG_DRIVER("Ironlake: only RC6 available\n");
2383                 return INTEL_RC6_ENABLE;
2384         }
2385
2386         if (IS_HASWELL(dev)) {
2387                 DRM_DEBUG_DRIVER("Haswell: only RC6 available\n");
2388                 return INTEL_RC6_ENABLE;
2389         }
2390
2391         /* snb/ivb have more than one rc6 state. */
2392         if (INTEL_INFO(dev)->gen == 6) {
2393                 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2394                 return INTEL_RC6_ENABLE;
2395         }
2396
2397         DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2398         return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2399 }
2400
2401 static void gen6_enable_rps(struct drm_device *dev)
2402 {
2403         struct drm_i915_private *dev_priv = dev->dev_private;
2404         struct intel_ring_buffer *ring;
2405         u32 rp_state_cap;
2406         u32 gt_perf_status;
2407         u32 pcu_mbox, rc6_mask = 0;
2408         u32 gtfifodbg;
2409         int rc6_mode;
2410         int i, ret;
2411
2412         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2413
2414         /* Here begins a magic sequence of register writes to enable
2415          * auto-downclocking.
2416          *
2417          * Perhaps there might be some value in exposing these to
2418          * userspace...
2419          */
2420         I915_WRITE(GEN6_RC_STATE, 0);
2421
2422         /* Clear the DBG now so we don't confuse earlier errors */
2423         if ((gtfifodbg = I915_READ(GTFIFODBG))) {
2424                 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
2425                 I915_WRITE(GTFIFODBG, gtfifodbg);
2426         }
2427
2428         gen6_gt_force_wake_get(dev_priv);
2429
2430         rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2431         gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2432
2433         /* In units of 100MHz */
2434         dev_priv->rps.max_delay = rp_state_cap & 0xff;
2435         dev_priv->rps.min_delay = (rp_state_cap & 0xff0000) >> 16;
2436         dev_priv->rps.cur_delay = 0;
2437
2438         /* disable the counters and set deterministic thresholds */
2439         I915_WRITE(GEN6_RC_CONTROL, 0);
2440
2441         I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
2442         I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
2443         I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
2444         I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2445         I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2446
2447         for_each_ring(ring, dev_priv, i)
2448                 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
2449
2450         I915_WRITE(GEN6_RC_SLEEP, 0);
2451         I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2452         I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2453         I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
2454         I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2455
2456         /* Check if we are enabling RC6 */
2457         rc6_mode = intel_enable_rc6(dev_priv->dev);
2458         if (rc6_mode & INTEL_RC6_ENABLE)
2459                 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
2460
2461         /* We don't use those on Haswell */
2462         if (!IS_HASWELL(dev)) {
2463                 if (rc6_mode & INTEL_RC6p_ENABLE)
2464                         rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2465
2466                 if (rc6_mode & INTEL_RC6pp_ENABLE)
2467                         rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
2468         }
2469
2470         DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2471                         (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
2472                         (rc6_mask & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
2473                         (rc6_mask & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
2474
2475         I915_WRITE(GEN6_RC_CONTROL,
2476                    rc6_mask |
2477                    GEN6_RC_CTL_EI_MODE(1) |
2478                    GEN6_RC_CTL_HW_ENABLE);
2479
2480         I915_WRITE(GEN6_RPNSWREQ,
2481                    GEN6_FREQUENCY(10) |
2482                    GEN6_OFFSET(0) |
2483                    GEN6_AGGRESSIVE_TURBO);
2484         I915_WRITE(GEN6_RC_VIDEO_FREQ,
2485                    GEN6_FREQUENCY(12));
2486
2487         I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2488         I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2489                    dev_priv->rps.max_delay << 24 |
2490                    dev_priv->rps.min_delay << 16);
2491
2492         I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400);
2493         I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000);
2494         I915_WRITE(GEN6_RP_UP_EI, 66000);
2495         I915_WRITE(GEN6_RP_DOWN_EI, 350000);
2496
2497         I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2498         I915_WRITE(GEN6_RP_CONTROL,
2499                    GEN6_RP_MEDIA_TURBO |
2500                    GEN6_RP_MEDIA_HW_NORMAL_MODE |
2501                    GEN6_RP_MEDIA_IS_GFX |
2502                    GEN6_RP_ENABLE |
2503                    GEN6_RP_UP_BUSY_AVG |
2504                    (IS_HASWELL(dev) ? GEN7_RP_DOWN_IDLE_AVG : GEN6_RP_DOWN_IDLE_CONT));
2505
2506         ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, 0);
2507         if (!ret) {
2508                 pcu_mbox = 0;
2509                 ret = sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, &pcu_mbox);
2510                 if (ret && pcu_mbox & (1<<31)) { /* OC supported */
2511                         dev_priv->rps.max_delay = pcu_mbox & 0xff;
2512                         DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2513                 }
2514         } else {
2515                 DRM_DEBUG_DRIVER("Failed to set the min frequency\n");
2516         }
2517
2518         gen6_set_rps(dev_priv->dev, (gt_perf_status & 0xff00) >> 8);
2519
2520         /* requires MSI enabled */
2521         I915_WRITE(GEN6_PMIER, GEN6_PM_DEFERRED_EVENTS);
2522         spin_lock_irq(&dev_priv->rps.lock);
2523         WARN_ON(dev_priv->rps.pm_iir != 0);
2524         I915_WRITE(GEN6_PMIMR, 0);
2525         spin_unlock_irq(&dev_priv->rps.lock);
2526         /* enable all PM interrupts */
2527         I915_WRITE(GEN6_PMINTRMSK, 0);
2528
2529         gen6_gt_force_wake_put(dev_priv);
2530 }
2531
2532 static void gen6_update_ring_freq(struct drm_device *dev)
2533 {
2534         struct drm_i915_private *dev_priv = dev->dev_private;
2535         int min_freq = 15;
2536         int gpu_freq, ia_freq, max_ia_freq;
2537         int scaling_factor = 180;
2538
2539         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2540
2541         max_ia_freq = cpufreq_quick_get_max(0);
2542         /*
2543          * Default to measured freq if none found, PCU will ensure we don't go
2544          * over
2545          */
2546         if (!max_ia_freq)
2547                 max_ia_freq = tsc_khz;
2548
2549         /* Convert from kHz to MHz */
2550         max_ia_freq /= 1000;
2551
2552         /*
2553          * For each potential GPU frequency, load a ring frequency we'd like
2554          * to use for memory access.  We do this by specifying the IA frequency
2555          * the PCU should use as a reference to determine the ring frequency.
2556          */
2557         for (gpu_freq = dev_priv->rps.max_delay; gpu_freq >= dev_priv->rps.min_delay;
2558              gpu_freq--) {
2559                 int diff = dev_priv->rps.max_delay - gpu_freq;
2560
2561                 /*
2562                  * For GPU frequencies less than 750MHz, just use the lowest
2563                  * ring freq.
2564                  */
2565                 if (gpu_freq < min_freq)
2566                         ia_freq = 800;
2567                 else
2568                         ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2569                 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
2570                 ia_freq <<= GEN6_PCODE_FREQ_IA_RATIO_SHIFT;
2571
2572                 sandybridge_pcode_write(dev_priv,
2573                                         GEN6_PCODE_WRITE_MIN_FREQ_TABLE,
2574                                         ia_freq | gpu_freq);
2575         }
2576 }
2577
2578 void ironlake_teardown_rc6(struct drm_device *dev)
2579 {
2580         struct drm_i915_private *dev_priv = dev->dev_private;
2581
2582         if (dev_priv->renderctx) {
2583                 i915_gem_object_unpin(dev_priv->renderctx);
2584                 drm_gem_object_unreference(&dev_priv->renderctx->base);
2585                 dev_priv->renderctx = NULL;
2586         }
2587
2588         if (dev_priv->pwrctx) {
2589                 i915_gem_object_unpin(dev_priv->pwrctx);
2590                 drm_gem_object_unreference(&dev_priv->pwrctx->base);
2591                 dev_priv->pwrctx = NULL;
2592         }
2593 }
2594
2595 static void ironlake_disable_rc6(struct drm_device *dev)
2596 {
2597         struct drm_i915_private *dev_priv = dev->dev_private;
2598
2599         if (I915_READ(PWRCTXA)) {
2600                 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
2601                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
2602                 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
2603                          50);
2604
2605                 I915_WRITE(PWRCTXA, 0);
2606                 POSTING_READ(PWRCTXA);
2607
2608                 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2609                 POSTING_READ(RSTDBYCTL);
2610         }
2611 }
2612
2613 static int ironlake_setup_rc6(struct drm_device *dev)
2614 {
2615         struct drm_i915_private *dev_priv = dev->dev_private;
2616
2617         if (dev_priv->renderctx == NULL)
2618                 dev_priv->renderctx = intel_alloc_context_page(dev);
2619         if (!dev_priv->renderctx)
2620                 return -ENOMEM;
2621
2622         if (dev_priv->pwrctx == NULL)
2623                 dev_priv->pwrctx = intel_alloc_context_page(dev);
2624         if (!dev_priv->pwrctx) {
2625                 ironlake_teardown_rc6(dev);
2626                 return -ENOMEM;
2627         }
2628
2629         return 0;
2630 }
2631
2632 static void ironlake_enable_rc6(struct drm_device *dev)
2633 {
2634         struct drm_i915_private *dev_priv = dev->dev_private;
2635         struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
2636         int ret;
2637
2638         /* rc6 disabled by default due to repeated reports of hanging during
2639          * boot and resume.
2640          */
2641         if (!intel_enable_rc6(dev))
2642                 return;
2643
2644         WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2645
2646         ret = ironlake_setup_rc6(dev);
2647         if (ret)
2648                 return;
2649
2650         /*
2651          * GPU can automatically power down the render unit if given a page
2652          * to save state.
2653          */
2654         ret = intel_ring_begin(ring, 6);
2655         if (ret) {
2656                 ironlake_teardown_rc6(dev);
2657                 return;
2658         }
2659
2660         intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2661         intel_ring_emit(ring, MI_SET_CONTEXT);
2662         intel_ring_emit(ring, dev_priv->renderctx->gtt_offset |
2663                         MI_MM_SPACE_GTT |
2664                         MI_SAVE_EXT_STATE_EN |
2665                         MI_RESTORE_EXT_STATE_EN |
2666                         MI_RESTORE_INHIBIT);
2667         intel_ring_emit(ring, MI_SUSPEND_FLUSH);
2668         intel_ring_emit(ring, MI_NOOP);
2669         intel_ring_emit(ring, MI_FLUSH);
2670         intel_ring_advance(ring);
2671
2672         /*
2673          * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2674          * does an implicit flush, combined with MI_FLUSH above, it should be
2675          * safe to assume that renderctx is valid
2676          */
2677         ret = intel_wait_ring_idle(ring);
2678         if (ret) {
2679                 DRM_ERROR("failed to enable ironlake power power savings\n");
2680                 ironlake_teardown_rc6(dev);
2681                 return;
2682         }
2683
2684         I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
2685         I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2686 }
2687
2688 static unsigned long intel_pxfreq(u32 vidfreq)
2689 {
2690         unsigned long freq;
2691         int div = (vidfreq & 0x3f0000) >> 16;
2692         int post = (vidfreq & 0x3000) >> 12;
2693         int pre = (vidfreq & 0x7);
2694
2695         if (!pre)
2696                 return 0;
2697
2698         freq = ((div * 133333) / ((1<<post) * pre));
2699
2700         return freq;
2701 }
2702
2703 static const struct cparams {
2704         u16 i;
2705         u16 t;
2706         u16 m;
2707         u16 c;
2708 } cparams[] = {
2709         { 1, 1333, 301, 28664 },
2710         { 1, 1066, 294, 24460 },
2711         { 1, 800, 294, 25192 },
2712         { 0, 1333, 276, 27605 },
2713         { 0, 1066, 276, 27605 },
2714         { 0, 800, 231, 23784 },
2715 };
2716
2717 unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
2718 {
2719         u64 total_count, diff, ret;
2720         u32 count1, count2, count3, m = 0, c = 0;
2721         unsigned long now = jiffies_to_msecs(jiffies), diff1;
2722         int i;
2723
2724         assert_spin_locked(&mchdev_lock);
2725
2726         diff1 = now - dev_priv->ips.last_time1;
2727
2728         /* Prevent division-by-zero if we are asking too fast.
2729          * Also, we don't get interesting results if we are polling
2730          * faster than once in 10ms, so just return the saved value
2731          * in such cases.
2732          */
2733         if (diff1 <= 10)
2734                 return dev_priv->ips.chipset_power;
2735
2736         count1 = I915_READ(DMIEC);
2737         count2 = I915_READ(DDREC);
2738         count3 = I915_READ(CSIEC);
2739
2740         total_count = count1 + count2 + count3;
2741
2742         /* FIXME: handle per-counter overflow */
2743         if (total_count < dev_priv->ips.last_count1) {
2744                 diff = ~0UL - dev_priv->ips.last_count1;
2745                 diff += total_count;
2746         } else {
2747                 diff = total_count - dev_priv->ips.last_count1;
2748         }
2749
2750         for (i = 0; i < ARRAY_SIZE(cparams); i++) {
2751                 if (cparams[i].i == dev_priv->ips.c_m &&
2752                     cparams[i].t == dev_priv->ips.r_t) {
2753                         m = cparams[i].m;
2754                         c = cparams[i].c;
2755                         break;
2756                 }
2757         }
2758
2759         diff = div_u64(diff, diff1);
2760         ret = ((m * diff) + c);
2761         ret = div_u64(ret, 10);
2762
2763         dev_priv->ips.last_count1 = total_count;
2764         dev_priv->ips.last_time1 = now;
2765
2766         dev_priv->ips.chipset_power = ret;
2767
2768         return ret;
2769 }
2770
2771 unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
2772 {
2773         unsigned long m, x, b;
2774         u32 tsfs;
2775
2776         tsfs = I915_READ(TSFS);
2777
2778         m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT);
2779         x = I915_READ8(TR1);
2780
2781         b = tsfs & TSFS_INTR_MASK;
2782
2783         return ((m * x) / 127) - b;
2784 }
2785
2786 static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
2787 {
2788         static const struct v_table {
2789                 u16 vd; /* in .1 mil */
2790                 u16 vm; /* in .1 mil */
2791         } v_table[] = {
2792                 { 0, 0, },
2793                 { 375, 0, },
2794                 { 500, 0, },
2795                 { 625, 0, },
2796                 { 750, 0, },
2797                 { 875, 0, },
2798                 { 1000, 0, },
2799                 { 1125, 0, },
2800                 { 4125, 3000, },
2801                 { 4125, 3000, },
2802                 { 4125, 3000, },
2803                 { 4125, 3000, },
2804                 { 4125, 3000, },
2805                 { 4125, 3000, },
2806                 { 4125, 3000, },
2807                 { 4125, 3000, },
2808                 { 4125, 3000, },
2809                 { 4125, 3000, },
2810                 { 4125, 3000, },
2811                 { 4125, 3000, },
2812                 { 4125, 3000, },
2813                 { 4125, 3000, },
2814                 { 4125, 3000, },
2815                 { 4125, 3000, },
2816                 { 4125, 3000, },
2817                 { 4125, 3000, },
2818                 { 4125, 3000, },
2819                 { 4125, 3000, },
2820                 { 4125, 3000, },
2821                 { 4125, 3000, },
2822                 { 4125, 3000, },
2823                 { 4125, 3000, },
2824                 { 4250, 3125, },
2825                 { 4375, 3250, },
2826                 { 4500, 3375, },
2827                 { 4625, 3500, },
2828                 { 4750, 3625, },
2829                 { 4875, 3750, },
2830                 { 5000, 3875, },
2831                 { 5125, 4000, },
2832                 { 5250, 4125, },
2833                 { 5375, 4250, },
2834                 { 5500, 4375, },
2835                 { 5625, 4500, },
2836                 { 5750, 4625, },
2837                 { 5875, 4750, },
2838                 { 6000, 4875, },
2839                 { 6125, 5000, },
2840                 { 6250, 5125, },
2841                 { 6375, 5250, },
2842                 { 6500, 5375, },
2843                 { 6625, 5500, },
2844                 { 6750, 5625, },
2845                 { 6875, 5750, },
2846                 { 7000, 5875, },
2847                 { 7125, 6000, },
2848                 { 7250, 6125, },
2849                 { 7375, 6250, },
2850                 { 7500, 6375, },
2851                 { 7625, 6500, },
2852                 { 7750, 6625, },
2853                 { 7875, 6750, },
2854                 { 8000, 6875, },
2855                 { 8125, 7000, },
2856                 { 8250, 7125, },
2857                 { 8375, 7250, },
2858                 { 8500, 7375, },
2859                 { 8625, 7500, },
2860                 { 8750, 7625, },
2861                 { 8875, 7750, },
2862                 { 9000, 7875, },
2863                 { 9125, 8000, },
2864                 { 9250, 8125, },
2865                 { 9375, 8250, },
2866                 { 9500, 8375, },
2867                 { 9625, 8500, },
2868                 { 9750, 8625, },
2869                 { 9875, 8750, },
2870                 { 10000, 8875, },
2871                 { 10125, 9000, },
2872                 { 10250, 9125, },
2873                 { 10375, 9250, },
2874                 { 10500, 9375, },
2875                 { 10625, 9500, },
2876                 { 10750, 9625, },
2877                 { 10875, 9750, },
2878                 { 11000, 9875, },
2879                 { 11125, 10000, },
2880                 { 11250, 10125, },
2881                 { 11375, 10250, },
2882                 { 11500, 10375, },
2883                 { 11625, 10500, },
2884                 { 11750, 10625, },
2885                 { 11875, 10750, },
2886                 { 12000, 10875, },
2887                 { 12125, 11000, },
2888                 { 12250, 11125, },
2889                 { 12375, 11250, },
2890                 { 12500, 11375, },
2891                 { 12625, 11500, },
2892                 { 12750, 11625, },
2893                 { 12875, 11750, },
2894                 { 13000, 11875, },
2895                 { 13125, 12000, },
2896                 { 13250, 12125, },
2897                 { 13375, 12250, },
2898                 { 13500, 12375, },
2899                 { 13625, 12500, },
2900                 { 13750, 12625, },
2901                 { 13875, 12750, },
2902                 { 14000, 12875, },
2903                 { 14125, 13000, },
2904                 { 14250, 13125, },
2905                 { 14375, 13250, },
2906                 { 14500, 13375, },
2907                 { 14625, 13500, },
2908                 { 14750, 13625, },
2909                 { 14875, 13750, },
2910                 { 15000, 13875, },
2911                 { 15125, 14000, },
2912                 { 15250, 14125, },
2913                 { 15375, 14250, },
2914                 { 15500, 14375, },
2915                 { 15625, 14500, },
2916                 { 15750, 14625, },
2917                 { 15875, 14750, },
2918                 { 16000, 14875, },
2919                 { 16125, 15000, },
2920         };
2921         if (dev_priv->info->is_mobile)
2922                 return v_table[pxvid].vm;
2923         else
2924                 return v_table[pxvid].vd;
2925 }
2926
2927 static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
2928 {
2929         struct timespec now, diff1;
2930         u64 diff;
2931         unsigned long diffms;
2932         u32 count;
2933
2934         assert_spin_locked(&mchdev_lock);
2935
2936         getrawmonotonic(&now);
2937         diff1 = timespec_sub(now, dev_priv->ips.last_time2);
2938
2939         /* Don't divide by 0 */
2940         diffms = diff1.tv_sec * 1000 + diff1.tv_nsec / 1000000;
2941         if (!diffms)
2942                 return;
2943
2944         count = I915_READ(GFXEC);
2945
2946         if (count < dev_priv->ips.last_count2) {
2947                 diff = ~0UL - dev_priv->ips.last_count2;
2948                 diff += count;
2949         } else {
2950                 diff = count - dev_priv->ips.last_count2;
2951         }
2952
2953         dev_priv->ips.last_count2 = count;
2954         dev_priv->ips.last_time2 = now;
2955
2956         /* More magic constants... */
2957         diff = diff * 1181;
2958         diff = div_u64(diff, diffms * 10);
2959         dev_priv->ips.gfx_power = diff;
2960 }
2961
2962 void i915_update_gfx_val(struct drm_i915_private *dev_priv)
2963 {
2964         if (dev_priv->info->gen != 5)
2965                 return;
2966
2967         spin_lock_irq(&mchdev_lock);
2968
2969         __i915_update_gfx_val(dev_priv);
2970
2971         spin_unlock_irq(&mchdev_lock);
2972 }
2973
2974 unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
2975 {
2976         unsigned long t, corr, state1, corr2, state2;
2977         u32 pxvid, ext_v;
2978
2979         assert_spin_locked(&mchdev_lock);
2980
2981         pxvid = I915_READ(PXVFREQ_BASE + (dev_priv->rps.cur_delay * 4));
2982         pxvid = (pxvid >> 24) & 0x7f;
2983         ext_v = pvid_to_extvid(dev_priv, pxvid);
2984
2985         state1 = ext_v;
2986
2987         t = i915_mch_val(dev_priv);
2988
2989         /* Revel in the empirically derived constants */
2990
2991         /* Correction factor in 1/100000 units */
2992         if (t > 80)
2993                 corr = ((t * 2349) + 135940);
2994         else if (t >= 50)
2995                 corr = ((t * 964) + 29317);
2996         else /* < 50 */
2997                 corr = ((t * 301) + 1004);
2998
2999         corr = corr * ((150142 * state1) / 10000 - 78642);
3000         corr /= 100000;
3001         corr2 = (corr * dev_priv->ips.corr);
3002
3003         state2 = (corr2 * state1) / 10000;
3004         state2 /= 100; /* convert to mW */
3005
3006         __i915_update_gfx_val(dev_priv);
3007
3008         return dev_priv->ips.gfx_power + state2;
3009 }
3010
3011 /**
3012  * i915_read_mch_val - return value for IPS use
3013  *
3014  * Calculate and return a value for the IPS driver to use when deciding whether
3015  * we have thermal and power headroom to increase CPU or GPU power budget.
3016  */
3017 unsigned long i915_read_mch_val(void)
3018 {
3019         struct drm_i915_private *dev_priv;
3020         unsigned long chipset_val, graphics_val, ret = 0;
3021
3022         spin_lock_irq(&mchdev_lock);
3023         if (!i915_mch_dev)
3024                 goto out_unlock;
3025         dev_priv = i915_mch_dev;
3026
3027         chipset_val = i915_chipset_val(dev_priv);
3028         graphics_val = i915_gfx_val(dev_priv);
3029
3030         ret = chipset_val + graphics_val;
3031
3032 out_unlock:
3033         spin_unlock_irq(&mchdev_lock);
3034
3035         return ret;
3036 }
3037 EXPORT_SYMBOL_GPL(i915_read_mch_val);
3038
3039 /**
3040  * i915_gpu_raise - raise GPU frequency limit
3041  *
3042  * Raise the limit; IPS indicates we have thermal headroom.
3043  */
3044 bool i915_gpu_raise(void)
3045 {
3046         struct drm_i915_private *dev_priv;
3047         bool ret = true;
3048
3049         spin_lock_irq(&mchdev_lock);
3050         if (!i915_mch_dev) {
3051                 ret = false;
3052                 goto out_unlock;
3053         }
3054         dev_priv = i915_mch_dev;
3055
3056         if (dev_priv->ips.max_delay > dev_priv->ips.fmax)
3057                 dev_priv->ips.max_delay--;
3058
3059 out_unlock:
3060         spin_unlock_irq(&mchdev_lock);
3061
3062         return ret;
3063 }
3064 EXPORT_SYMBOL_GPL(i915_gpu_raise);
3065
3066 /**
3067  * i915_gpu_lower - lower GPU frequency limit
3068  *
3069  * IPS indicates we're close to a thermal limit, so throttle back the GPU
3070  * frequency maximum.
3071  */
3072 bool i915_gpu_lower(void)
3073 {
3074         struct drm_i915_private *dev_priv;
3075         bool ret = true;
3076
3077         spin_lock_irq(&mchdev_lock);
3078         if (!i915_mch_dev) {
3079                 ret = false;
3080                 goto out_unlock;
3081         }
3082         dev_priv = i915_mch_dev;
3083
3084         if (dev_priv->ips.max_delay < dev_priv->ips.min_delay)
3085                 dev_priv->ips.max_delay++;
3086
3087 out_unlock:
3088         spin_unlock_irq(&mchdev_lock);
3089
3090         return ret;
3091 }
3092 EXPORT_SYMBOL_GPL(i915_gpu_lower);
3093
3094 /**
3095  * i915_gpu_busy - indicate GPU business to IPS
3096  *
3097  * Tell the IPS driver whether or not the GPU is busy.
3098  */
3099 bool i915_gpu_busy(void)
3100 {
3101         struct drm_i915_private *dev_priv;
3102         struct intel_ring_buffer *ring;
3103         bool ret = false;
3104         int i;
3105
3106         spin_lock_irq(&mchdev_lock);
3107         if (!i915_mch_dev)
3108                 goto out_unlock;
3109         dev_priv = i915_mch_dev;
3110
3111         for_each_ring(ring, dev_priv, i)
3112                 ret |= !list_empty(&ring->request_list);
3113
3114 out_unlock:
3115         spin_unlock_irq(&mchdev_lock);
3116
3117         return ret;
3118 }
3119 EXPORT_SYMBOL_GPL(i915_gpu_busy);
3120
3121 /**
3122  * i915_gpu_turbo_disable - disable graphics turbo
3123  *
3124  * Disable graphics turbo by resetting the max frequency and setting the
3125  * current frequency to the default.
3126  */
3127 bool i915_gpu_turbo_disable(void)
3128 {
3129         struct drm_i915_private *dev_priv;
3130         bool ret = true;
3131
3132         spin_lock_irq(&mchdev_lock);
3133         if (!i915_mch_dev) {
3134                 ret = false;
3135                 goto out_unlock;
3136         }
3137         dev_priv = i915_mch_dev;
3138
3139         dev_priv->ips.max_delay = dev_priv->ips.fstart;
3140
3141         if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart))
3142                 ret = false;
3143
3144 out_unlock:
3145         spin_unlock_irq(&mchdev_lock);
3146
3147         return ret;
3148 }
3149 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
3150
3151 /**
3152  * Tells the intel_ips driver that the i915 driver is now loaded, if
3153  * IPS got loaded first.
3154  *
3155  * This awkward dance is so that neither module has to depend on the
3156  * other in order for IPS to do the appropriate communication of
3157  * GPU turbo limits to i915.
3158  */
3159 static void
3160 ips_ping_for_i915_load(void)
3161 {
3162         void (*link)(void);
3163
3164         link = symbol_get(ips_link_to_i915_driver);
3165         if (link) {
3166                 link();
3167                 symbol_put(ips_link_to_i915_driver);
3168         }
3169 }
3170
3171 void intel_gpu_ips_init(struct drm_i915_private *dev_priv)
3172 {
3173         /* We only register the i915 ips part with intel-ips once everything is
3174          * set up, to avoid intel-ips sneaking in and reading bogus values. */
3175         spin_lock_irq(&mchdev_lock);
3176         i915_mch_dev = dev_priv;
3177         spin_unlock_irq(&mchdev_lock);
3178
3179         ips_ping_for_i915_load();
3180 }
3181
3182 void intel_gpu_ips_teardown(void)
3183 {
3184         spin_lock_irq(&mchdev_lock);
3185         i915_mch_dev = NULL;
3186         spin_unlock_irq(&mchdev_lock);
3187 }
3188 static void intel_init_emon(struct drm_device *dev)
3189 {
3190         struct drm_i915_private *dev_priv = dev->dev_private;
3191         u32 lcfuse;
3192         u8 pxw[16];
3193         int i;
3194
3195         /* Disable to program */
3196         I915_WRITE(ECR, 0);
3197         POSTING_READ(ECR);
3198
3199         /* Program energy weights for various events */
3200         I915_WRITE(SDEW, 0x15040d00);
3201         I915_WRITE(CSIEW0, 0x007f0000);
3202         I915_WRITE(CSIEW1, 0x1e220004);
3203         I915_WRITE(CSIEW2, 0x04000004);
3204
3205         for (i = 0; i < 5; i++)
3206                 I915_WRITE(PEW + (i * 4), 0);
3207         for (i = 0; i < 3; i++)
3208                 I915_WRITE(DEW + (i * 4), 0);
3209
3210         /* Program P-state weights to account for frequency power adjustment */
3211         for (i = 0; i < 16; i++) {
3212                 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
3213                 unsigned long freq = intel_pxfreq(pxvidfreq);
3214                 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
3215                         PXVFREQ_PX_SHIFT;
3216                 unsigned long val;
3217
3218                 val = vid * vid;
3219                 val *= (freq / 1000);
3220                 val *= 255;
3221                 val /= (127*127*900);
3222                 if (val > 0xff)
3223                         DRM_ERROR("bad pxval: %ld\n", val);
3224                 pxw[i] = val;
3225         }
3226         /* Render standby states get 0 weight */
3227         pxw[14] = 0;
3228         pxw[15] = 0;
3229
3230         for (i = 0; i < 4; i++) {
3231                 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
3232                         (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
3233                 I915_WRITE(PXW + (i * 4), val);
3234         }
3235
3236         /* Adjust magic regs to magic values (more experimental results) */
3237         I915_WRITE(OGW0, 0);
3238         I915_WRITE(OGW1, 0);
3239         I915_WRITE(EG0, 0x00007f00);
3240         I915_WRITE(EG1, 0x0000000e);
3241         I915_WRITE(EG2, 0x000e0000);
3242         I915_WRITE(EG3, 0x68000300);
3243         I915_WRITE(EG4, 0x42000000);
3244         I915_WRITE(EG5, 0x00140031);
3245         I915_WRITE(EG6, 0);
3246         I915_WRITE(EG7, 0);
3247
3248         for (i = 0; i < 8; i++)
3249                 I915_WRITE(PXWL + (i * 4), 0);
3250
3251         /* Enable PMON + select events */
3252         I915_WRITE(ECR, 0x80000019);
3253
3254         lcfuse = I915_READ(LCFUSE02);
3255
3256         dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
3257 }
3258
3259 void intel_disable_gt_powersave(struct drm_device *dev)
3260 {
3261         if (IS_IRONLAKE_M(dev)) {
3262                 ironlake_disable_drps(dev);
3263                 ironlake_disable_rc6(dev);
3264         } else if (INTEL_INFO(dev)->gen >= 6 && !IS_VALLEYVIEW(dev)) {
3265                 gen6_disable_rps(dev);
3266         }
3267 }
3268
3269 void intel_enable_gt_powersave(struct drm_device *dev)
3270 {
3271         if (IS_IRONLAKE_M(dev)) {
3272                 ironlake_enable_drps(dev);
3273                 ironlake_enable_rc6(dev);
3274                 intel_init_emon(dev);
3275         } else if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
3276                 gen6_enable_rps(dev);
3277                 gen6_update_ring_freq(dev);
3278         }
3279 }
3280
3281 static void ironlake_init_clock_gating(struct drm_device *dev)
3282 {
3283         struct drm_i915_private *dev_priv = dev->dev_private;
3284         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3285
3286         /* Required for FBC */
3287         dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
3288                 DPFCRUNIT_CLOCK_GATE_DISABLE |
3289                 DPFDUNIT_CLOCK_GATE_DISABLE;
3290         /* Required for CxSR */
3291         dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
3292
3293         I915_WRITE(PCH_3DCGDIS0,
3294                    MARIUNIT_CLOCK_GATE_DISABLE |
3295                    SVSMUNIT_CLOCK_GATE_DISABLE);
3296         I915_WRITE(PCH_3DCGDIS1,
3297                    VFMUNIT_CLOCK_GATE_DISABLE);
3298
3299         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3300
3301         /*
3302          * According to the spec the following bits should be set in
3303          * order to enable memory self-refresh
3304          * The bit 22/21 of 0x42004
3305          * The bit 5 of 0x42020
3306          * The bit 15 of 0x45000
3307          */
3308         I915_WRITE(ILK_DISPLAY_CHICKEN2,
3309                    (I915_READ(ILK_DISPLAY_CHICKEN2) |
3310                     ILK_DPARB_GATE | ILK_VSDPFD_FULL));
3311         I915_WRITE(ILK_DSPCLK_GATE,
3312                    (I915_READ(ILK_DSPCLK_GATE) |
3313                     ILK_DPARB_CLK_GATE));
3314         I915_WRITE(DISP_ARB_CTL,
3315                    (I915_READ(DISP_ARB_CTL) |
3316                     DISP_FBC_WM_DIS));
3317         I915_WRITE(WM3_LP_ILK, 0);
3318         I915_WRITE(WM2_LP_ILK, 0);
3319         I915_WRITE(WM1_LP_ILK, 0);
3320
3321         /*
3322          * Based on the document from hardware guys the following bits
3323          * should be set unconditionally in order to enable FBC.
3324          * The bit 22 of 0x42000
3325          * The bit 22 of 0x42004
3326          * The bit 7,8,9 of 0x42020.
3327          */
3328         if (IS_IRONLAKE_M(dev)) {
3329                 I915_WRITE(ILK_DISPLAY_CHICKEN1,
3330                            I915_READ(ILK_DISPLAY_CHICKEN1) |
3331                            ILK_FBCQ_DIS);
3332                 I915_WRITE(ILK_DISPLAY_CHICKEN2,
3333                            I915_READ(ILK_DISPLAY_CHICKEN2) |
3334                            ILK_DPARB_GATE);
3335                 I915_WRITE(ILK_DSPCLK_GATE,
3336                            I915_READ(ILK_DSPCLK_GATE) |
3337                            ILK_DPFC_DIS1 |
3338                            ILK_DPFC_DIS2 |
3339                            ILK_CLK_FBC);
3340         }
3341
3342         I915_WRITE(ILK_DISPLAY_CHICKEN2,
3343                    I915_READ(ILK_DISPLAY_CHICKEN2) |
3344                    ILK_ELPIN_409_SELECT);
3345         I915_WRITE(_3D_CHICKEN2,
3346                    _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
3347                    _3D_CHICKEN2_WM_READ_PIPELINED);
3348 }
3349
3350 static void gen6_init_clock_gating(struct drm_device *dev)
3351 {
3352         struct drm_i915_private *dev_priv = dev->dev_private;
3353         int pipe;
3354         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3355
3356         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3357
3358         I915_WRITE(ILK_DISPLAY_CHICKEN2,
3359                    I915_READ(ILK_DISPLAY_CHICKEN2) |
3360                    ILK_ELPIN_409_SELECT);
3361
3362         I915_WRITE(WM3_LP_ILK, 0);
3363         I915_WRITE(WM2_LP_ILK, 0);
3364         I915_WRITE(WM1_LP_ILK, 0);
3365
3366         I915_WRITE(CACHE_MODE_0,
3367                    _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
3368
3369         I915_WRITE(GEN6_UCGCTL1,
3370                    I915_READ(GEN6_UCGCTL1) |
3371                    GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
3372                    GEN6_CSUNIT_CLOCK_GATE_DISABLE);
3373
3374         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3375          * gating disable must be set.  Failure to set it results in
3376          * flickering pixels due to Z write ordering failures after
3377          * some amount of runtime in the Mesa "fire" demo, and Unigine
3378          * Sanctuary and Tropics, and apparently anything else with
3379          * alpha test or pixel discard.
3380          *
3381          * According to the spec, bit 11 (RCCUNIT) must also be set,
3382          * but we didn't debug actual testcases to find it out.
3383          *
3384          * Also apply WaDisableVDSUnitClockGating and
3385          * WaDisableRCPBUnitClockGating.
3386          */
3387         I915_WRITE(GEN6_UCGCTL2,
3388                    GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
3389                    GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
3390                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3391
3392         /* Bspec says we need to always set all mask bits. */
3393         I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
3394                    _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
3395
3396         /*
3397          * According to the spec the following bits should be
3398          * set in order to enable memory self-refresh and fbc:
3399          * The bit21 and bit22 of 0x42000
3400          * The bit21 and bit22 of 0x42004
3401          * The bit5 and bit7 of 0x42020
3402          * The bit14 of 0x70180
3403          * The bit14 of 0x71180
3404          */
3405         I915_WRITE(ILK_DISPLAY_CHICKEN1,
3406                    I915_READ(ILK_DISPLAY_CHICKEN1) |
3407                    ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
3408         I915_WRITE(ILK_DISPLAY_CHICKEN2,
3409                    I915_READ(ILK_DISPLAY_CHICKEN2) |
3410                    ILK_DPARB_GATE | ILK_VSDPFD_FULL);
3411         I915_WRITE(ILK_DSPCLK_GATE,
3412                    I915_READ(ILK_DSPCLK_GATE) |
3413                    ILK_DPARB_CLK_GATE  |
3414                    ILK_DPFD_CLK_GATE);
3415
3416         I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3417                    GEN6_MBCTL_ENABLE_BOOT_FETCH);
3418
3419         for_each_pipe(pipe) {
3420                 I915_WRITE(DSPCNTR(pipe),
3421                            I915_READ(DSPCNTR(pipe)) |
3422                            DISPPLANE_TRICKLE_FEED_DISABLE);
3423                 intel_flush_display_plane(dev_priv, pipe);
3424         }
3425 }
3426
3427 static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
3428 {
3429         uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
3430
3431         reg &= ~GEN7_FF_SCHED_MASK;
3432         reg |= GEN7_FF_TS_SCHED_HW;
3433         reg |= GEN7_FF_VS_SCHED_HW;
3434         reg |= GEN7_FF_DS_SCHED_HW;
3435
3436         I915_WRITE(GEN7_FF_THREAD_MODE, reg);
3437 }
3438
3439 static void haswell_init_clock_gating(struct drm_device *dev)
3440 {
3441         struct drm_i915_private *dev_priv = dev->dev_private;
3442         int pipe;
3443
3444         I915_WRITE(WM3_LP_ILK, 0);
3445         I915_WRITE(WM2_LP_ILK, 0);
3446         I915_WRITE(WM1_LP_ILK, 0);
3447
3448         /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3449          * This implements the WaDisableRCZUnitClockGating workaround.
3450          */
3451         I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
3452
3453         /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3454         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3455                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3456
3457         /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3458         I915_WRITE(GEN7_L3CNTLREG1,
3459                         GEN7_WA_FOR_GEN7_L3_CONTROL);
3460         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3461                         GEN7_WA_L3_CHICKEN_MODE);
3462
3463         /* This is required by WaCatErrorRejectionIssue */
3464         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3465                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3466                         GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3467
3468         for_each_pipe(pipe) {
3469                 I915_WRITE(DSPCNTR(pipe),
3470                            I915_READ(DSPCNTR(pipe)) |
3471                            DISPPLANE_TRICKLE_FEED_DISABLE);
3472                 intel_flush_display_plane(dev_priv, pipe);
3473         }
3474
3475         gen7_setup_fixed_func_scheduler(dev_priv);
3476
3477         /* WaDisable4x2SubspanOptimization */
3478         I915_WRITE(CACHE_MODE_1,
3479                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3480
3481         /* XXX: This is a workaround for early silicon revisions and should be
3482          * removed later.
3483          */
3484         I915_WRITE(WM_DBG,
3485                         I915_READ(WM_DBG) |
3486                         WM_DBG_DISALLOW_MULTIPLE_LP |
3487                         WM_DBG_DISALLOW_SPRITE |
3488                         WM_DBG_DISALLOW_MAXFIFO);
3489
3490 }
3491
3492 static void ivybridge_init_clock_gating(struct drm_device *dev)
3493 {
3494         struct drm_i915_private *dev_priv = dev->dev_private;
3495         int pipe;
3496         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3497         uint32_t snpcr;
3498
3499         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3500
3501         I915_WRITE(WM3_LP_ILK, 0);
3502         I915_WRITE(WM2_LP_ILK, 0);
3503         I915_WRITE(WM1_LP_ILK, 0);
3504
3505         I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3506
3507         /* WaDisableEarlyCull */
3508         I915_WRITE(_3D_CHICKEN3,
3509                    _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
3510
3511         /* WaDisableBackToBackFlipFix */
3512         I915_WRITE(IVB_CHICKEN3,
3513                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3514                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
3515
3516         /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3517         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3518                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3519
3520         /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3521         I915_WRITE(GEN7_L3CNTLREG1,
3522                         GEN7_WA_FOR_GEN7_L3_CONTROL);
3523         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
3524                         GEN7_WA_L3_CHICKEN_MODE);
3525
3526         /* WaForceL3Serialization */
3527         I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3528                    ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3529
3530         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3531          * gating disable must be set.  Failure to set it results in
3532          * flickering pixels due to Z write ordering failures after
3533          * some amount of runtime in the Mesa "fire" demo, and Unigine
3534          * Sanctuary and Tropics, and apparently anything else with
3535          * alpha test or pixel discard.
3536          *
3537          * According to the spec, bit 11 (RCCUNIT) must also be set,
3538          * but we didn't debug actual testcases to find it out.
3539          *
3540          * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3541          * This implements the WaDisableRCZUnitClockGating workaround.
3542          */
3543         I915_WRITE(GEN6_UCGCTL2,
3544                    GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
3545                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3546
3547         /* This is required by WaCatErrorRejectionIssue */
3548         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3549                         I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3550                         GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3551
3552         for_each_pipe(pipe) {
3553                 I915_WRITE(DSPCNTR(pipe),
3554                            I915_READ(DSPCNTR(pipe)) |
3555                            DISPPLANE_TRICKLE_FEED_DISABLE);
3556                 intel_flush_display_plane(dev_priv, pipe);
3557         }
3558
3559         I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3560                    GEN6_MBCTL_ENABLE_BOOT_FETCH);
3561
3562         gen7_setup_fixed_func_scheduler(dev_priv);
3563
3564         /* WaDisable4x2SubspanOptimization */
3565         I915_WRITE(CACHE_MODE_1,
3566                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3567
3568         snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
3569         snpcr &= ~GEN6_MBC_SNPCR_MASK;
3570         snpcr |= GEN6_MBC_SNPCR_MED;
3571         I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr);
3572 }
3573
3574 static void valleyview_init_clock_gating(struct drm_device *dev)
3575 {
3576         struct drm_i915_private *dev_priv = dev->dev_private;
3577         int pipe;
3578         uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
3579
3580         I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
3581
3582         I915_WRITE(WM3_LP_ILK, 0);
3583         I915_WRITE(WM2_LP_ILK, 0);
3584         I915_WRITE(WM1_LP_ILK, 0);
3585
3586         I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
3587
3588         /* WaDisableEarlyCull */
3589         I915_WRITE(_3D_CHICKEN3,
3590                    _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_DISABLE_OBJEND_CULL));
3591
3592         /* WaDisableBackToBackFlipFix */
3593         I915_WRITE(IVB_CHICKEN3,
3594                    CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
3595                    CHICKEN3_DGMG_DONE_FIX_DISABLE);
3596
3597         /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
3598         I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
3599                    GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
3600
3601         /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
3602         I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
3603         I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
3604
3605         /* WaForceL3Serialization */
3606         I915_WRITE(GEN7_L3SQCREG4, I915_READ(GEN7_L3SQCREG4) &
3607                    ~L3SQ_URB_READ_CAM_MATCH_DISABLE);
3608
3609         /* This is required by WaCatErrorRejectionIssue */
3610         I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
3611                    I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
3612                    GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
3613
3614         I915_WRITE(GEN6_MBCTL, I915_READ(GEN6_MBCTL) |
3615                    GEN6_MBCTL_ENABLE_BOOT_FETCH);
3616
3617
3618         /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
3619          * gating disable must be set.  Failure to set it results in
3620          * flickering pixels due to Z write ordering failures after
3621          * some amount of runtime in the Mesa "fire" demo, and Unigine
3622          * Sanctuary and Tropics, and apparently anything else with
3623          * alpha test or pixel discard.
3624          *
3625          * According to the spec, bit 11 (RCCUNIT) must also be set,
3626          * but we didn't debug actual testcases to find it out.
3627          *
3628          * According to the spec, bit 13 (RCZUNIT) must be set on IVB.
3629          * This implements the WaDisableRCZUnitClockGating workaround.
3630          *
3631          * Also apply WaDisableVDSUnitClockGating and
3632          * WaDisableRCPBUnitClockGating.
3633          */
3634         I915_WRITE(GEN6_UCGCTL2,
3635                    GEN7_VDSUNIT_CLOCK_GATE_DISABLE |
3636                    GEN7_TDLUNIT_CLOCK_GATE_DISABLE |
3637                    GEN6_RCZUNIT_CLOCK_GATE_DISABLE |
3638                    GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
3639                    GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
3640
3641         I915_WRITE(GEN7_UCGCTL4, GEN7_L3BANK2X_CLOCK_GATE_DISABLE);
3642
3643         for_each_pipe(pipe) {
3644                 I915_WRITE(DSPCNTR(pipe),
3645                            I915_READ(DSPCNTR(pipe)) |
3646                            DISPPLANE_TRICKLE_FEED_DISABLE);
3647                 intel_flush_display_plane(dev_priv, pipe);
3648         }
3649
3650         I915_WRITE(CACHE_MODE_1,
3651                    _MASKED_BIT_ENABLE(PIXEL_SUBSPAN_COLLECT_OPT_DISABLE));
3652
3653         /*
3654          * On ValleyView, the GUnit needs to signal the GT
3655          * when flip and other events complete.  So enable
3656          * all the GUnit->GT interrupts here
3657          */
3658         I915_WRITE(VLV_DPFLIPSTAT, PIPEB_LINE_COMPARE_INT_EN |
3659                    PIPEB_HLINE_INT_EN | PIPEB_VBLANK_INT_EN |
3660                    SPRITED_FLIPDONE_INT_EN | SPRITEC_FLIPDONE_INT_EN |
3661                    PLANEB_FLIPDONE_INT_EN | PIPEA_LINE_COMPARE_INT_EN |
3662                    PIPEA_HLINE_INT_EN | PIPEA_VBLANK_INT_EN |
3663                    SPRITEB_FLIPDONE_INT_EN | SPRITEA_FLIPDONE_INT_EN |
3664                    PLANEA_FLIPDONE_INT_EN);
3665 }
3666
3667 static void g4x_init_clock_gating(struct drm_device *dev)
3668 {
3669         struct drm_i915_private *dev_priv = dev->dev_private;
3670         uint32_t dspclk_gate;
3671
3672         I915_WRITE(RENCLK_GATE_D1, 0);
3673         I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
3674                    GS_UNIT_CLOCK_GATE_DISABLE |
3675                    CL_UNIT_CLOCK_GATE_DISABLE);
3676         I915_WRITE(RAMCLK_GATE_D, 0);
3677         dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
3678                 OVRUNIT_CLOCK_GATE_DISABLE |
3679                 OVCUNIT_CLOCK_GATE_DISABLE;
3680         if (IS_GM45(dev))
3681                 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
3682         I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
3683 }
3684
3685 static void crestline_init_clock_gating(struct drm_device *dev)
3686 {
3687         struct drm_i915_private *dev_priv = dev->dev_private;
3688
3689         I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
3690         I915_WRITE(RENCLK_GATE_D2, 0);
3691         I915_WRITE(DSPCLK_GATE_D, 0);
3692         I915_WRITE(RAMCLK_GATE_D, 0);
3693         I915_WRITE16(DEUC, 0);
3694 }
3695
3696 static void broadwater_init_clock_gating(struct drm_device *dev)
3697 {
3698         struct drm_i915_private *dev_priv = dev->dev_private;
3699
3700         I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
3701                    I965_RCC_CLOCK_GATE_DISABLE |
3702                    I965_RCPB_CLOCK_GATE_DISABLE |
3703                    I965_ISC_CLOCK_GATE_DISABLE |
3704                    I965_FBC_CLOCK_GATE_DISABLE);
3705         I915_WRITE(RENCLK_GATE_D2, 0);
3706 }
3707
3708 static void gen3_init_clock_gating(struct drm_device *dev)
3709 {
3710         struct drm_i915_private *dev_priv = dev->dev_private;
3711         u32 dstate = I915_READ(D_STATE);
3712
3713         dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
3714                 DSTATE_DOT_CLOCK_GATING;
3715         I915_WRITE(D_STATE, dstate);
3716
3717         if (IS_PINEVIEW(dev))
3718                 I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
3719
3720         /* IIR "flip pending" means done if this bit is set */
3721         I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
3722 }
3723
3724 static void i85x_init_clock_gating(struct drm_device *dev)
3725 {
3726         struct drm_i915_private *dev_priv = dev->dev_private;
3727
3728         I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
3729 }
3730
3731 static void i830_init_clock_gating(struct drm_device *dev)
3732 {
3733         struct drm_i915_private *dev_priv = dev->dev_private;
3734
3735         I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
3736 }
3737
3738 static void ibx_init_clock_gating(struct drm_device *dev)
3739 {
3740         struct drm_i915_private *dev_priv = dev->dev_private;
3741
3742         /*
3743          * On Ibex Peak and Cougar Point, we need to disable clock
3744          * gating for the panel power sequencer or it will fail to
3745          * start up when no ports are active.
3746          */
3747         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3748 }
3749
3750 static void cpt_init_clock_gating(struct drm_device *dev)
3751 {
3752         struct drm_i915_private *dev_priv = dev->dev_private;
3753         int pipe;
3754
3755         /*
3756          * On Ibex Peak and Cougar Point, we need to disable clock
3757          * gating for the panel power sequencer or it will fail to
3758          * start up when no ports are active.
3759          */
3760         I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
3761         I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
3762                    DPLS_EDP_PPS_FIX_DIS);
3763         /* Without this, mode sets may fail silently on FDI */
3764         for_each_pipe(pipe)
3765                 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
3766 }
3767
3768 void intel_init_clock_gating(struct drm_device *dev)
3769 {
3770         struct drm_i915_private *dev_priv = dev->dev_private;
3771
3772         dev_priv->display.init_clock_gating(dev);
3773
3774         if (dev_priv->display.init_pch_clock_gating)
3775                 dev_priv->display.init_pch_clock_gating(dev);
3776 }
3777
3778 /* Starting with Haswell, we have different power wells for
3779  * different parts of the GPU. This attempts to enable them all.
3780  */
3781 void intel_init_power_wells(struct drm_device *dev)
3782 {
3783         struct drm_i915_private *dev_priv = dev->dev_private;
3784         unsigned long power_wells[] = {
3785                 HSW_PWR_WELL_CTL1,
3786                 HSW_PWR_WELL_CTL2,
3787                 HSW_PWR_WELL_CTL4
3788         };
3789         int i;
3790
3791         if (!IS_HASWELL(dev))
3792                 return;
3793
3794         mutex_lock(&dev->struct_mutex);
3795
3796         for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
3797                 int well = I915_READ(power_wells[i]);
3798
3799                 if ((well & HSW_PWR_WELL_STATE) == 0) {
3800                         I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
3801                         if (wait_for(I915_READ(power_wells[i] & HSW_PWR_WELL_STATE), 20))
3802                                 DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
3803                 }
3804         }
3805
3806         mutex_unlock(&dev->struct_mutex);
3807 }
3808
3809 /* Set up chip specific power management-related functions */
3810 void intel_init_pm(struct drm_device *dev)
3811 {
3812         struct drm_i915_private *dev_priv = dev->dev_private;
3813
3814         if (I915_HAS_FBC(dev)) {
3815                 if (HAS_PCH_SPLIT(dev)) {
3816                         dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
3817                         dev_priv->display.enable_fbc = ironlake_enable_fbc;
3818                         dev_priv->display.disable_fbc = ironlake_disable_fbc;
3819                 } else if (IS_GM45(dev)) {
3820                         dev_priv->display.fbc_enabled = g4x_fbc_enabled;
3821                         dev_priv->display.enable_fbc = g4x_enable_fbc;
3822                         dev_priv->display.disable_fbc = g4x_disable_fbc;
3823                 } else if (IS_CRESTLINE(dev)) {
3824                         dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
3825                         dev_priv->display.enable_fbc = i8xx_enable_fbc;
3826                         dev_priv->display.disable_fbc = i8xx_disable_fbc;
3827                 }
3828                 /* 855GM needs testing */
3829         }
3830
3831         /* For cxsr */
3832         if (IS_PINEVIEW(dev))
3833                 i915_pineview_get_mem_freq(dev);
3834         else if (IS_GEN5(dev))
3835                 i915_ironlake_get_mem_freq(dev);
3836
3837         /* For FIFO watermark updates */
3838         if (HAS_PCH_SPLIT(dev)) {
3839                 if (HAS_PCH_IBX(dev))
3840                         dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
3841                 else if (HAS_PCH_CPT(dev))
3842                         dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
3843
3844                 if (IS_GEN5(dev)) {
3845                         if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
3846                                 dev_priv->display.update_wm = ironlake_update_wm;
3847                         else {
3848                                 DRM_DEBUG_KMS("Failed to get proper latency. "
3849                                               "Disable CxSR\n");
3850                                 dev_priv->display.update_wm = NULL;
3851                         }
3852                         dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
3853                 } else if (IS_GEN6(dev)) {
3854                         if (SNB_READ_WM0_LATENCY()) {
3855                                 dev_priv->display.update_wm = sandybridge_update_wm;
3856                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3857                         } else {
3858                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
3859                                               "Disable CxSR\n");
3860                                 dev_priv->display.update_wm = NULL;
3861                         }
3862                         dev_priv->display.init_clock_gating = gen6_init_clock_gating;
3863                 } else if (IS_IVYBRIDGE(dev)) {
3864                         /* FIXME: detect B0+ stepping and use auto training */
3865                         if (SNB_READ_WM0_LATENCY()) {
3866                                 dev_priv->display.update_wm = sandybridge_update_wm;
3867                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3868                         } else {
3869                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
3870                                               "Disable CxSR\n");
3871                                 dev_priv->display.update_wm = NULL;
3872                         }
3873                         dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3874                 } else if (IS_HASWELL(dev)) {
3875                         if (SNB_READ_WM0_LATENCY()) {
3876                                 dev_priv->display.update_wm = sandybridge_update_wm;
3877                                 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3878                                 dev_priv->display.update_linetime_wm = haswell_update_linetime_wm;
3879                         } else {
3880                                 DRM_DEBUG_KMS("Failed to read display plane latency. "
3881                                               "Disable CxSR\n");
3882                                 dev_priv->display.update_wm = NULL;
3883                         }
3884                         dev_priv->display.init_clock_gating = haswell_init_clock_gating;
3885                 } else
3886                         dev_priv->display.update_wm = NULL;
3887         } else if (IS_VALLEYVIEW(dev)) {
3888                 dev_priv->display.update_wm = valleyview_update_wm;
3889                 dev_priv->display.init_clock_gating =
3890                         valleyview_init_clock_gating;
3891         } else if (IS_PINEVIEW(dev)) {
3892                 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
3893                                             dev_priv->is_ddr3,
3894                                             dev_priv->fsb_freq,
3895                                             dev_priv->mem_freq)) {
3896                         DRM_INFO("failed to find known CxSR latency "
3897                                  "(found ddr%s fsb freq %d, mem freq %d), "
3898                                  "disabling CxSR\n",
3899                                  (dev_priv->is_ddr3 == 1) ? "3" : "2",
3900                                  dev_priv->fsb_freq, dev_priv->mem_freq);
3901                         /* Disable CxSR and never update its watermark again */
3902                         pineview_disable_cxsr(dev);
3903                         dev_priv->display.update_wm = NULL;
3904                 } else
3905                         dev_priv->display.update_wm = pineview_update_wm;
3906                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3907         } else if (IS_G4X(dev)) {
3908                 dev_priv->display.update_wm = g4x_update_wm;
3909                 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
3910         } else if (IS_GEN4(dev)) {
3911                 dev_priv->display.update_wm = i965_update_wm;
3912                 if (IS_CRESTLINE(dev))
3913                         dev_priv->display.init_clock_gating = crestline_init_clock_gating;
3914                 else if (IS_BROADWATER(dev))
3915                         dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
3916         } else if (IS_GEN3(dev)) {
3917                 dev_priv->display.update_wm = i9xx_update_wm;
3918                 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
3919                 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3920         } else if (IS_I865G(dev)) {
3921                 dev_priv->display.update_wm = i830_update_wm;
3922                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3923                 dev_priv->display.get_fifo_size = i830_get_fifo_size;
3924         } else if (IS_I85X(dev)) {
3925                 dev_priv->display.update_wm = i9xx_update_wm;
3926                 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
3927                 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3928         } else {
3929                 dev_priv->display.update_wm = i830_update_wm;
3930                 dev_priv->display.init_clock_gating = i830_init_clock_gating;
3931                 if (IS_845G(dev))
3932                         dev_priv->display.get_fifo_size = i845_get_fifo_size;
3933                 else
3934                         dev_priv->display.get_fifo_size = i830_get_fifo_size;
3935         }
3936 }
3937
3938 static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
3939 {
3940         u32 gt_thread_status_mask;
3941
3942         if (IS_HASWELL(dev_priv->dev))
3943                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
3944         else
3945                 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
3946
3947         /* w/a for a sporadic read returning 0 by waiting for the GT
3948          * thread to wake up.
3949          */
3950         if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
3951                 DRM_ERROR("GT thread status wait timed out\n");
3952 }
3953
3954 static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
3955 {
3956         u32 forcewake_ack;
3957
3958         if (IS_HASWELL(dev_priv->dev))
3959                 forcewake_ack = FORCEWAKE_ACK_HSW;
3960         else
3961                 forcewake_ack = FORCEWAKE_ACK;
3962
3963         if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
3964                             FORCEWAKE_ACK_TIMEOUT_MS))
3965                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
3966
3967         I915_WRITE_NOTRACE(FORCEWAKE, 1);
3968         POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
3969
3970         if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
3971                             FORCEWAKE_ACK_TIMEOUT_MS))
3972                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
3973
3974         __gen6_gt_wait_for_thread_c0(dev_priv);
3975 }
3976
3977 static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
3978 {
3979         u32 forcewake_ack;
3980
3981         if (IS_HASWELL(dev_priv->dev))
3982                 forcewake_ack = FORCEWAKE_ACK_HSW;
3983         else
3984                 forcewake_ack = FORCEWAKE_MT_ACK;
3985
3986         if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1) == 0,
3987                             FORCEWAKE_ACK_TIMEOUT_MS))
3988                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
3989
3990         I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1));
3991         POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
3992
3993         if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & 1),
3994                             FORCEWAKE_ACK_TIMEOUT_MS))
3995                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
3996
3997         __gen6_gt_wait_for_thread_c0(dev_priv);
3998 }
3999
4000 /*
4001  * Generally this is called implicitly by the register read function. However,
4002  * if some sequence requires the GT to not power down then this function should
4003  * be called at the beginning of the sequence followed by a call to
4004  * gen6_gt_force_wake_put() at the end of the sequence.
4005  */
4006 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
4007 {
4008         unsigned long irqflags;
4009
4010         spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
4011         if (dev_priv->forcewake_count++ == 0)
4012                 dev_priv->gt.force_wake_get(dev_priv);
4013         spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
4014 }
4015
4016 void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
4017 {
4018         u32 gtfifodbg;
4019         gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
4020         if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
4021              "MMIO read or write has been dropped %x\n", gtfifodbg))
4022                 I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
4023 }
4024
4025 static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4026 {
4027         I915_WRITE_NOTRACE(FORCEWAKE, 0);
4028         /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4029         gen6_gt_check_fifodbg(dev_priv);
4030 }
4031
4032 static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
4033 {
4034         I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1));
4035         /* gen6_gt_check_fifodbg doubles as the POSTING_READ */
4036         gen6_gt_check_fifodbg(dev_priv);
4037 }
4038
4039 /*
4040  * see gen6_gt_force_wake_get()
4041  */
4042 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
4043 {
4044         unsigned long irqflags;
4045
4046         spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
4047         if (--dev_priv->forcewake_count == 0)
4048                 dev_priv->gt.force_wake_put(dev_priv);
4049         spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
4050 }
4051
4052 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
4053 {
4054         int ret = 0;
4055
4056         if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
4057                 int loop = 500;
4058                 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
4059                 while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
4060                         udelay(10);
4061                         fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
4062                 }
4063                 if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
4064                         ++ret;
4065                 dev_priv->gt_fifo_count = fifo;
4066         }
4067         dev_priv->gt_fifo_count--;
4068
4069         return ret;
4070 }
4071
4072 static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
4073 {
4074         if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1) == 0,
4075                             FORCEWAKE_ACK_TIMEOUT_MS))
4076                 DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
4077
4078         I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(1));
4079
4080         if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & 1),
4081                             FORCEWAKE_ACK_TIMEOUT_MS))
4082                 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
4083
4084         __gen6_gt_wait_for_thread_c0(dev_priv);
4085 }
4086
4087 static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
4088 {
4089         I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(1));
4090         /* The below doubles as a POSTING_READ */
4091         gen6_gt_check_fifodbg(dev_priv);
4092 }
4093
4094 void intel_gt_init(struct drm_device *dev)
4095 {
4096         struct drm_i915_private *dev_priv = dev->dev_private;
4097
4098         spin_lock_init(&dev_priv->gt_lock);
4099
4100         if (IS_VALLEYVIEW(dev)) {
4101                 dev_priv->gt.force_wake_get = vlv_force_wake_get;
4102                 dev_priv->gt.force_wake_put = vlv_force_wake_put;
4103         } else if (INTEL_INFO(dev)->gen >= 6) {
4104                 dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get;
4105                 dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put;
4106
4107                 /* IVB configs may use multi-threaded forcewake */
4108                 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
4109                         u32 ecobus;
4110
4111                         /* A small trick here - if the bios hasn't configured
4112                          * MT forcewake, and if the device is in RC6, then
4113                          * force_wake_mt_get will not wake the device and the
4114                          * ECOBUS read will return zero. Which will be
4115                          * (correctly) interpreted by the test below as MT
4116                          * forcewake being disabled.
4117                          */
4118                         mutex_lock(&dev->struct_mutex);
4119                         __gen6_gt_force_wake_mt_get(dev_priv);
4120                         ecobus = I915_READ_NOTRACE(ECOBUS);
4121                         __gen6_gt_force_wake_mt_put(dev_priv);
4122                         mutex_unlock(&dev->struct_mutex);
4123
4124                         if (ecobus & FORCEWAKE_MT_ENABLE) {
4125                                 DRM_DEBUG_KMS("Using MT version of forcewake\n");
4126                                 dev_priv->gt.force_wake_get =
4127                                         __gen6_gt_force_wake_mt_get;
4128                                 dev_priv->gt.force_wake_put =
4129                                         __gen6_gt_force_wake_mt_put;
4130                         }
4131                 }
4132         }
4133 }
4134
4135 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
4136 {
4137         WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
4138
4139         if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
4140                 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
4141                 return -EAGAIN;
4142         }
4143
4144         I915_WRITE(GEN6_PCODE_DATA, *val);
4145         I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
4146
4147         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
4148                      500)) {
4149                 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
4150                 return -ETIMEDOUT;
4151         }
4152
4153         *val = I915_READ(GEN6_PCODE_DATA);
4154         I915_WRITE(GEN6_PCODE_DATA, 0);
4155
4156         return 0;
4157 }
4158
4159 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
4160 {
4161         WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
4162
4163         if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
4164                 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
4165                 return -EAGAIN;
4166         }
4167
4168         I915_WRITE(GEN6_PCODE_DATA, val);
4169         I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
4170
4171         if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
4172                      500)) {
4173                 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
4174                 return -ETIMEDOUT;
4175         }
4176
4177         I915_WRITE(GEN6_PCODE_DATA, 0);
4178
4179         return 0;
4180 }