1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
41 * DOC: interrupt handling
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
48 static const u32 hpd_ibx[HPD_NUM_PINS] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
56 static const u32 hpd_cpt[HPD_NUM_PINS] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
64 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
73 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
82 static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
102 #define GEN5_IRQ_RESET(type) do { \
103 I915_WRITE(type##IMR, 0xffffffff); \
104 POSTING_READ(type##IMR); \
105 I915_WRITE(type##IER, 0); \
106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
113 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
120 I915_WRITE((reg), 0xffffffff); \
122 I915_WRITE((reg), 0xffffffff); \
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 I915_WRITE(type##IER, (ier_val)); \
137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
141 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
143 /* For display hotplug interrupt */
145 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
147 assert_spin_locked(&dev_priv->irq_lock);
149 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
152 if ((dev_priv->irq_mask & mask) != 0) {
153 dev_priv->irq_mask &= ~mask;
154 I915_WRITE(DEIMR, dev_priv->irq_mask);
160 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
162 assert_spin_locked(&dev_priv->irq_lock);
164 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
167 if ((dev_priv->irq_mask & mask) != mask) {
168 dev_priv->irq_mask |= mask;
169 I915_WRITE(DEIMR, dev_priv->irq_mask);
175 * ilk_update_gt_irq - update GTIMR
176 * @dev_priv: driver private
177 * @interrupt_mask: mask of interrupt bits to update
178 * @enabled_irq_mask: mask of interrupt bits to enable
180 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
181 uint32_t interrupt_mask,
182 uint32_t enabled_irq_mask)
184 assert_spin_locked(&dev_priv->irq_lock);
186 WARN_ON(enabled_irq_mask & ~interrupt_mask);
188 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
191 dev_priv->gt_irq_mask &= ~interrupt_mask;
192 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
193 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
197 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
199 ilk_update_gt_irq(dev_priv, mask, mask);
202 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
204 ilk_update_gt_irq(dev_priv, mask, 0);
207 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
209 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
212 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
214 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
217 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
219 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
223 * snb_update_pm_irq - update GEN6_PMIMR
224 * @dev_priv: driver private
225 * @interrupt_mask: mask of interrupt bits to update
226 * @enabled_irq_mask: mask of interrupt bits to enable
228 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
229 uint32_t interrupt_mask,
230 uint32_t enabled_irq_mask)
234 WARN_ON(enabled_irq_mask & ~interrupt_mask);
236 assert_spin_locked(&dev_priv->irq_lock);
238 new_val = dev_priv->pm_irq_mask;
239 new_val &= ~interrupt_mask;
240 new_val |= (~enabled_irq_mask & interrupt_mask);
242 if (new_val != dev_priv->pm_irq_mask) {
243 dev_priv->pm_irq_mask = new_val;
244 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
245 POSTING_READ(gen6_pm_imr(dev_priv));
249 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
251 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
254 snb_update_pm_irq(dev_priv, mask, mask);
257 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
260 snb_update_pm_irq(dev_priv, mask, 0);
263 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
265 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
268 __gen6_disable_pm_irq(dev_priv, mask);
271 void gen6_reset_rps_interrupts(struct drm_device *dev)
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 uint32_t reg = gen6_pm_iir(dev_priv);
276 spin_lock_irq(&dev_priv->irq_lock);
277 I915_WRITE(reg, dev_priv->pm_rps_events);
278 I915_WRITE(reg, dev_priv->pm_rps_events);
280 spin_unlock_irq(&dev_priv->irq_lock);
283 void gen6_enable_rps_interrupts(struct drm_device *dev)
285 struct drm_i915_private *dev_priv = dev->dev_private;
287 spin_lock_irq(&dev_priv->irq_lock);
289 WARN_ON(dev_priv->rps.pm_iir);
290 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
291 dev_priv->rps.interrupts_enabled = true;
292 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
293 dev_priv->pm_rps_events);
294 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
296 spin_unlock_irq(&dev_priv->irq_lock);
299 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
302 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
303 * if GEN6_PM_UP_EI_EXPIRED is masked.
305 * TODO: verify if this can be reproduced on VLV,CHV.
307 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
308 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
310 if (INTEL_INFO(dev_priv)->gen >= 8)
311 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
316 void gen6_disable_rps_interrupts(struct drm_device *dev)
318 struct drm_i915_private *dev_priv = dev->dev_private;
320 spin_lock_irq(&dev_priv->irq_lock);
321 dev_priv->rps.interrupts_enabled = false;
322 spin_unlock_irq(&dev_priv->irq_lock);
324 cancel_work_sync(&dev_priv->rps.work);
326 spin_lock_irq(&dev_priv->irq_lock);
328 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
330 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
331 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
332 ~dev_priv->pm_rps_events);
333 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
334 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
336 dev_priv->rps.pm_iir = 0;
338 spin_unlock_irq(&dev_priv->irq_lock);
342 * ibx_display_interrupt_update - update SDEIMR
343 * @dev_priv: driver private
344 * @interrupt_mask: mask of interrupt bits to update
345 * @enabled_irq_mask: mask of interrupt bits to enable
347 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
348 uint32_t interrupt_mask,
349 uint32_t enabled_irq_mask)
351 uint32_t sdeimr = I915_READ(SDEIMR);
352 sdeimr &= ~interrupt_mask;
353 sdeimr |= (~enabled_irq_mask & interrupt_mask);
355 WARN_ON(enabled_irq_mask & ~interrupt_mask);
357 assert_spin_locked(&dev_priv->irq_lock);
359 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
362 I915_WRITE(SDEIMR, sdeimr);
363 POSTING_READ(SDEIMR);
367 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
368 u32 enable_mask, u32 status_mask)
370 u32 reg = PIPESTAT(pipe);
371 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
373 assert_spin_locked(&dev_priv->irq_lock);
374 WARN_ON(!intel_irqs_enabled(dev_priv));
376 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
377 status_mask & ~PIPESTAT_INT_STATUS_MASK,
378 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
379 pipe_name(pipe), enable_mask, status_mask))
382 if ((pipestat & enable_mask) == enable_mask)
385 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
387 /* Enable the interrupt, clear any pending status */
388 pipestat |= enable_mask | status_mask;
389 I915_WRITE(reg, pipestat);
394 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
395 u32 enable_mask, u32 status_mask)
397 u32 reg = PIPESTAT(pipe);
398 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
400 assert_spin_locked(&dev_priv->irq_lock);
401 WARN_ON(!intel_irqs_enabled(dev_priv));
403 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
404 status_mask & ~PIPESTAT_INT_STATUS_MASK,
405 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
406 pipe_name(pipe), enable_mask, status_mask))
409 if ((pipestat & enable_mask) == 0)
412 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
414 pipestat &= ~enable_mask;
415 I915_WRITE(reg, pipestat);
419 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
421 u32 enable_mask = status_mask << 16;
424 * On pipe A we don't support the PSR interrupt yet,
425 * on pipe B and C the same bit MBZ.
427 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
430 * On pipe B and C we don't support the PSR interrupt yet, on pipe
431 * A the same bit is for perf counters which we don't use either.
433 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
436 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
437 SPRITE0_FLIP_DONE_INT_EN_VLV |
438 SPRITE1_FLIP_DONE_INT_EN_VLV);
439 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
440 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
441 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
442 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
448 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
453 if (IS_VALLEYVIEW(dev_priv->dev))
454 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
457 enable_mask = status_mask << 16;
458 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
462 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
467 if (IS_VALLEYVIEW(dev_priv->dev))
468 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
471 enable_mask = status_mask << 16;
472 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
476 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
478 static void i915_enable_asle_pipestat(struct drm_device *dev)
480 struct drm_i915_private *dev_priv = dev->dev_private;
482 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
485 spin_lock_irq(&dev_priv->irq_lock);
487 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
488 if (INTEL_INFO(dev)->gen >= 4)
489 i915_enable_pipestat(dev_priv, PIPE_A,
490 PIPE_LEGACY_BLC_EVENT_STATUS);
492 spin_unlock_irq(&dev_priv->irq_lock);
496 * i915_pipe_enabled - check if a pipe is enabled
498 * @pipe: pipe to check
500 * Reading certain registers when the pipe is disabled can hang the chip.
501 * Use this routine to make sure the PLL is running and the pipe is active
502 * before reading such registers if unsure.
505 i915_pipe_enabled(struct drm_device *dev, int pipe)
507 struct drm_i915_private *dev_priv = dev->dev_private;
509 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
510 /* Locking is horribly broken here, but whatever. */
511 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
514 return intel_crtc->active;
516 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
521 * This timing diagram depicts the video signal in and
522 * around the vertical blanking period.
524 * Assumptions about the fictitious mode used in this example:
526 * vsync_start = vblank_start + 1
527 * vsync_end = vblank_start + 2
528 * vtotal = vblank_start + 3
531 * latch double buffered registers
532 * increment frame counter (ctg+)
533 * generate start of vblank interrupt (gen4+)
536 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
537 * | may be shifted forward 1-3 extra lines via PIPECONF
539 * | | start of vsync:
540 * | | generate vsync interrupt
542 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
543 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
544 * ----va---> <-----------------vb--------------------> <--------va-------------
545 * | | <----vs-----> |
546 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
547 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
548 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
550 * last visible pixel first visible pixel
551 * | increment frame counter (gen3/4)
552 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
554 * x = horizontal active
555 * _ = horizontal blanking
556 * hs = horizontal sync
557 * va = vertical active
558 * vb = vertical blanking
560 * vbs = vblank_start (number)
563 * - most events happen at the start of horizontal sync
564 * - frame start happens at the start of horizontal blank, 1-4 lines
565 * (depending on PIPECONF settings) after the start of vblank
566 * - gen3/4 pixel and frame counter are synchronized with the start
567 * of horizontal active on the first line of vertical active
570 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
572 /* Gen2 doesn't have a hardware frame counter */
576 /* Called from drm generic code, passed a 'crtc', which
577 * we use as a pipe index
579 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
581 struct drm_i915_private *dev_priv = dev->dev_private;
582 unsigned long high_frame;
583 unsigned long low_frame;
584 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
586 if (!i915_pipe_enabled(dev, pipe)) {
587 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
588 "pipe %c\n", pipe_name(pipe));
592 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
593 struct intel_crtc *intel_crtc =
594 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
595 const struct drm_display_mode *mode =
596 &intel_crtc->config->base.adjusted_mode;
598 htotal = mode->crtc_htotal;
599 hsync_start = mode->crtc_hsync_start;
600 vbl_start = mode->crtc_vblank_start;
601 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
602 vbl_start = DIV_ROUND_UP(vbl_start, 2);
604 enum transcoder cpu_transcoder = (enum transcoder) pipe;
606 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
607 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
608 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
609 if ((I915_READ(PIPECONF(cpu_transcoder)) &
610 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
611 vbl_start = DIV_ROUND_UP(vbl_start, 2);
614 /* Convert to pixel count */
617 /* Start of vblank event occurs at start of hsync */
618 vbl_start -= htotal - hsync_start;
620 high_frame = PIPEFRAME(pipe);
621 low_frame = PIPEFRAMEPIXEL(pipe);
624 * High & low register fields aren't synchronized, so make sure
625 * we get a low value that's stable across two reads of the high
629 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
630 low = I915_READ(low_frame);
631 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
632 } while (high1 != high2);
634 high1 >>= PIPE_FRAME_HIGH_SHIFT;
635 pixel = low & PIPE_PIXEL_MASK;
636 low >>= PIPE_FRAME_LOW_SHIFT;
639 * The frame counter increments at beginning of active.
640 * Cook up a vblank counter by also checking the pixel
641 * counter against vblank start.
643 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
646 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
648 struct drm_i915_private *dev_priv = dev->dev_private;
649 int reg = PIPE_FRMCOUNT_GM45(pipe);
651 if (!i915_pipe_enabled(dev, pipe)) {
652 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
653 "pipe %c\n", pipe_name(pipe));
657 return I915_READ(reg);
660 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
661 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
663 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
665 struct drm_device *dev = crtc->base.dev;
666 struct drm_i915_private *dev_priv = dev->dev_private;
667 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
668 enum pipe pipe = crtc->pipe;
669 int position, vtotal;
671 vtotal = mode->crtc_vtotal;
672 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
676 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
678 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
681 * See update_scanline_offset() for the details on the
682 * scanline_offset adjustment.
684 return (position + crtc->scanline_offset) % vtotal;
687 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
688 unsigned int flags, int *vpos, int *hpos,
689 ktime_t *stime, ktime_t *etime)
691 struct drm_i915_private *dev_priv = dev->dev_private;
692 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
693 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
694 const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
696 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
699 unsigned long irqflags;
701 if (!intel_crtc->active) {
702 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
703 "pipe %c\n", pipe_name(pipe));
707 htotal = mode->crtc_htotal;
708 hsync_start = mode->crtc_hsync_start;
709 vtotal = mode->crtc_vtotal;
710 vbl_start = mode->crtc_vblank_start;
711 vbl_end = mode->crtc_vblank_end;
713 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
714 vbl_start = DIV_ROUND_UP(vbl_start, 2);
719 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
722 * Lock uncore.lock, as we will do multiple timing critical raw
723 * register reads, potentially with preemption disabled, so the
724 * following code must not block on uncore.lock.
726 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
728 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
730 /* Get optional system timestamp before query. */
732 *stime = ktime_get();
734 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
735 /* No obvious pixelcount register. Only query vertical
736 * scanout position from Display scan line register.
738 position = __intel_get_crtc_scanline(intel_crtc);
740 /* Have access to pixelcount since start of frame.
741 * We can split this into vertical and horizontal
744 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
746 /* convert to pixel counts */
752 * In interlaced modes, the pixel counter counts all pixels,
753 * so one field will have htotal more pixels. In order to avoid
754 * the reported position from jumping backwards when the pixel
755 * counter is beyond the length of the shorter field, just
756 * clamp the position the length of the shorter field. This
757 * matches how the scanline counter based position works since
758 * the scanline counter doesn't count the two half lines.
760 if (position >= vtotal)
761 position = vtotal - 1;
764 * Start of vblank interrupt is triggered at start of hsync,
765 * just prior to the first active line of vblank. However we
766 * consider lines to start at the leading edge of horizontal
767 * active. So, should we get here before we've crossed into
768 * the horizontal active of the first line in vblank, we would
769 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
770 * always add htotal-hsync_start to the current pixel position.
772 position = (position + htotal - hsync_start) % vtotal;
775 /* Get optional system timestamp after query. */
777 *etime = ktime_get();
779 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
781 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
783 in_vbl = position >= vbl_start && position < vbl_end;
786 * While in vblank, position will be negative
787 * counting up towards 0 at vbl_end. And outside
788 * vblank, position will be positive counting
791 if (position >= vbl_start)
794 position += vtotal - vbl_end;
796 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
800 *vpos = position / htotal;
801 *hpos = position - (*vpos * htotal);
806 ret |= DRM_SCANOUTPOS_IN_VBLANK;
811 int intel_get_crtc_scanline(struct intel_crtc *crtc)
813 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
814 unsigned long irqflags;
817 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
818 position = __intel_get_crtc_scanline(crtc);
819 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
824 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
826 struct timeval *vblank_time,
829 struct drm_crtc *crtc;
831 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
832 DRM_ERROR("Invalid crtc %d\n", pipe);
836 /* Get drm_crtc to timestamp: */
837 crtc = intel_get_crtc_for_pipe(dev, pipe);
839 DRM_ERROR("Invalid crtc %d\n", pipe);
843 if (!crtc->enabled) {
844 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
848 /* Helper routine in DRM core does all the work: */
849 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
852 &to_intel_crtc(crtc)->config->base.adjusted_mode);
855 static bool intel_hpd_irq_event(struct drm_device *dev,
856 struct drm_connector *connector)
858 enum drm_connector_status old_status;
860 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
861 old_status = connector->status;
863 connector->status = connector->funcs->detect(connector, false);
864 if (old_status == connector->status)
867 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
870 drm_get_connector_status_name(old_status),
871 drm_get_connector_status_name(connector->status));
876 static void i915_digport_work_func(struct work_struct *work)
878 struct drm_i915_private *dev_priv =
879 container_of(work, struct drm_i915_private, dig_port_work);
880 u32 long_port_mask, short_port_mask;
881 struct intel_digital_port *intel_dig_port;
885 spin_lock_irq(&dev_priv->irq_lock);
886 long_port_mask = dev_priv->long_hpd_port_mask;
887 dev_priv->long_hpd_port_mask = 0;
888 short_port_mask = dev_priv->short_hpd_port_mask;
889 dev_priv->short_hpd_port_mask = 0;
890 spin_unlock_irq(&dev_priv->irq_lock);
892 for (i = 0; i < I915_MAX_PORTS; i++) {
894 bool long_hpd = false;
895 intel_dig_port = dev_priv->hpd_irq_port[i];
896 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
899 if (long_port_mask & (1 << i)) {
902 } else if (short_port_mask & (1 << i))
908 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
909 if (ret == IRQ_NONE) {
910 /* fall back to old school hpd */
911 old_bits |= (1 << intel_dig_port->base.hpd_pin);
917 spin_lock_irq(&dev_priv->irq_lock);
918 dev_priv->hpd_event_bits |= old_bits;
919 spin_unlock_irq(&dev_priv->irq_lock);
920 schedule_work(&dev_priv->hotplug_work);
925 * Handle hotplug events outside the interrupt handler proper.
927 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
929 static void i915_hotplug_work_func(struct work_struct *work)
931 struct drm_i915_private *dev_priv =
932 container_of(work, struct drm_i915_private, hotplug_work);
933 struct drm_device *dev = dev_priv->dev;
934 struct drm_mode_config *mode_config = &dev->mode_config;
935 struct intel_connector *intel_connector;
936 struct intel_encoder *intel_encoder;
937 struct drm_connector *connector;
938 bool hpd_disabled = false;
939 bool changed = false;
942 mutex_lock(&mode_config->mutex);
943 DRM_DEBUG_KMS("running encoder hotplug functions\n");
945 spin_lock_irq(&dev_priv->irq_lock);
947 hpd_event_bits = dev_priv->hpd_event_bits;
948 dev_priv->hpd_event_bits = 0;
949 list_for_each_entry(connector, &mode_config->connector_list, head) {
950 intel_connector = to_intel_connector(connector);
951 if (!intel_connector->encoder)
953 intel_encoder = intel_connector->encoder;
954 if (intel_encoder->hpd_pin > HPD_NONE &&
955 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
956 connector->polled == DRM_CONNECTOR_POLL_HPD) {
957 DRM_INFO("HPD interrupt storm detected on connector %s: "
958 "switching from hotplug detection to polling\n",
960 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
961 connector->polled = DRM_CONNECTOR_POLL_CONNECT
962 | DRM_CONNECTOR_POLL_DISCONNECT;
965 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
966 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
967 connector->name, intel_encoder->hpd_pin);
970 /* if there were no outputs to poll, poll was disabled,
971 * therefore make sure it's enabled when disabling HPD on
974 drm_kms_helper_poll_enable(dev);
975 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
976 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
979 spin_unlock_irq(&dev_priv->irq_lock);
981 list_for_each_entry(connector, &mode_config->connector_list, head) {
982 intel_connector = to_intel_connector(connector);
983 if (!intel_connector->encoder)
985 intel_encoder = intel_connector->encoder;
986 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
987 if (intel_encoder->hot_plug)
988 intel_encoder->hot_plug(intel_encoder);
989 if (intel_hpd_irq_event(dev, connector))
993 mutex_unlock(&mode_config->mutex);
996 drm_kms_helper_hotplug_event(dev);
999 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1001 struct drm_i915_private *dev_priv = dev->dev_private;
1002 u32 busy_up, busy_down, max_avg, min_avg;
1005 spin_lock(&mchdev_lock);
1007 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1009 new_delay = dev_priv->ips.cur_delay;
1011 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1012 busy_up = I915_READ(RCPREVBSYTUPAVG);
1013 busy_down = I915_READ(RCPREVBSYTDNAVG);
1014 max_avg = I915_READ(RCBMAXAVG);
1015 min_avg = I915_READ(RCBMINAVG);
1017 /* Handle RCS change request from hw */
1018 if (busy_up > max_avg) {
1019 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1020 new_delay = dev_priv->ips.cur_delay - 1;
1021 if (new_delay < dev_priv->ips.max_delay)
1022 new_delay = dev_priv->ips.max_delay;
1023 } else if (busy_down < min_avg) {
1024 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1025 new_delay = dev_priv->ips.cur_delay + 1;
1026 if (new_delay > dev_priv->ips.min_delay)
1027 new_delay = dev_priv->ips.min_delay;
1030 if (ironlake_set_drps(dev, new_delay))
1031 dev_priv->ips.cur_delay = new_delay;
1033 spin_unlock(&mchdev_lock);
1038 static void notify_ring(struct drm_device *dev,
1039 struct intel_engine_cs *ring)
1041 if (!intel_ring_initialized(ring))
1044 trace_i915_gem_request_notify(ring);
1046 wake_up_all(&ring->irq_queue);
1049 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1050 struct intel_rps_ei *rps_ei)
1052 u32 cz_ts, cz_freq_khz;
1053 u32 render_count, media_count;
1054 u32 elapsed_render, elapsed_media, elapsed_time;
1057 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1058 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1060 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1061 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1063 if (rps_ei->cz_clock == 0) {
1064 rps_ei->cz_clock = cz_ts;
1065 rps_ei->render_c0 = render_count;
1066 rps_ei->media_c0 = media_count;
1068 return dev_priv->rps.cur_freq;
1071 elapsed_time = cz_ts - rps_ei->cz_clock;
1072 rps_ei->cz_clock = cz_ts;
1074 elapsed_render = render_count - rps_ei->render_c0;
1075 rps_ei->render_c0 = render_count;
1077 elapsed_media = media_count - rps_ei->media_c0;
1078 rps_ei->media_c0 = media_count;
1080 /* Convert all the counters into common unit of milli sec */
1081 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1082 elapsed_render /= cz_freq_khz;
1083 elapsed_media /= cz_freq_khz;
1086 * Calculate overall C0 residency percentage
1087 * only if elapsed time is non zero
1091 ((max(elapsed_render, elapsed_media) * 100)
1099 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1100 * busy-ness calculated from C0 counters of render & media power wells
1101 * @dev_priv: DRM device private
1104 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1106 u32 residency_C0_up = 0, residency_C0_down = 0;
1109 dev_priv->rps.ei_interrupt_count++;
1111 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1114 if (dev_priv->rps.up_ei.cz_clock == 0) {
1115 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1116 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1117 return dev_priv->rps.cur_freq;
1122 * To down throttle, C0 residency should be less than down threshold
1123 * for continous EI intervals. So calculate down EI counters
1124 * once in VLV_INT_COUNT_FOR_DOWN_EI
1126 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1128 dev_priv->rps.ei_interrupt_count = 0;
1130 residency_C0_down = vlv_c0_residency(dev_priv,
1131 &dev_priv->rps.down_ei);
1133 residency_C0_up = vlv_c0_residency(dev_priv,
1134 &dev_priv->rps.up_ei);
1137 new_delay = dev_priv->rps.cur_freq;
1139 adj = dev_priv->rps.last_adj;
1140 /* C0 residency is greater than UP threshold. Increase Frequency */
1141 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1147 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1148 new_delay = dev_priv->rps.cur_freq + adj;
1151 * For better performance, jump directly
1152 * to RPe if we're below it.
1154 if (new_delay < dev_priv->rps.efficient_freq)
1155 new_delay = dev_priv->rps.efficient_freq;
1157 } else if (!dev_priv->rps.ei_interrupt_count &&
1158 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1164 * This means, C0 residency is less than down threshold over
1165 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1167 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1168 new_delay = dev_priv->rps.cur_freq + adj;
1174 static void gen6_pm_rps_work(struct work_struct *work)
1176 struct drm_i915_private *dev_priv =
1177 container_of(work, struct drm_i915_private, rps.work);
1181 spin_lock_irq(&dev_priv->irq_lock);
1182 /* Speed up work cancelation during disabling rps interrupts. */
1183 if (!dev_priv->rps.interrupts_enabled) {
1184 spin_unlock_irq(&dev_priv->irq_lock);
1187 pm_iir = dev_priv->rps.pm_iir;
1188 dev_priv->rps.pm_iir = 0;
1189 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1190 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1191 spin_unlock_irq(&dev_priv->irq_lock);
1193 /* Make sure we didn't queue anything we're not going to process. */
1194 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1196 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1199 mutex_lock(&dev_priv->rps.hw_lock);
1201 adj = dev_priv->rps.last_adj;
1202 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1206 /* CHV needs even encode values */
1207 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1209 new_delay = dev_priv->rps.cur_freq + adj;
1212 * For better performance, jump directly
1213 * to RPe if we're below it.
1215 if (new_delay < dev_priv->rps.efficient_freq)
1216 new_delay = dev_priv->rps.efficient_freq;
1217 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1218 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1219 new_delay = dev_priv->rps.efficient_freq;
1221 new_delay = dev_priv->rps.min_freq_softlimit;
1223 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1224 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1225 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1229 /* CHV needs even encode values */
1230 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1232 new_delay = dev_priv->rps.cur_freq + adj;
1233 } else { /* unknown event */
1234 new_delay = dev_priv->rps.cur_freq;
1237 /* sysfs frequency interfaces may have snuck in while servicing the
1240 new_delay = clamp_t(int, new_delay,
1241 dev_priv->rps.min_freq_softlimit,
1242 dev_priv->rps.max_freq_softlimit);
1244 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1246 if (IS_VALLEYVIEW(dev_priv->dev))
1247 valleyview_set_rps(dev_priv->dev, new_delay);
1249 gen6_set_rps(dev_priv->dev, new_delay);
1251 mutex_unlock(&dev_priv->rps.hw_lock);
1256 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1258 * @work: workqueue struct
1260 * Doesn't actually do anything except notify userspace. As a consequence of
1261 * this event, userspace should try to remap the bad rows since statistically
1262 * it is likely the same row is more likely to go bad again.
1264 static void ivybridge_parity_work(struct work_struct *work)
1266 struct drm_i915_private *dev_priv =
1267 container_of(work, struct drm_i915_private, l3_parity.error_work);
1268 u32 error_status, row, bank, subbank;
1269 char *parity_event[6];
1273 /* We must turn off DOP level clock gating to access the L3 registers.
1274 * In order to prevent a get/put style interface, acquire struct mutex
1275 * any time we access those registers.
1277 mutex_lock(&dev_priv->dev->struct_mutex);
1279 /* If we've screwed up tracking, just let the interrupt fire again */
1280 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1283 misccpctl = I915_READ(GEN7_MISCCPCTL);
1284 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1285 POSTING_READ(GEN7_MISCCPCTL);
1287 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1291 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1294 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1296 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1298 error_status = I915_READ(reg);
1299 row = GEN7_PARITY_ERROR_ROW(error_status);
1300 bank = GEN7_PARITY_ERROR_BANK(error_status);
1301 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1303 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1306 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1307 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1308 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1309 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1310 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1311 parity_event[5] = NULL;
1313 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1314 KOBJ_CHANGE, parity_event);
1316 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1317 slice, row, bank, subbank);
1319 kfree(parity_event[4]);
1320 kfree(parity_event[3]);
1321 kfree(parity_event[2]);
1322 kfree(parity_event[1]);
1325 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1328 WARN_ON(dev_priv->l3_parity.which_slice);
1329 spin_lock_irq(&dev_priv->irq_lock);
1330 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1331 spin_unlock_irq(&dev_priv->irq_lock);
1333 mutex_unlock(&dev_priv->dev->struct_mutex);
1336 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1338 struct drm_i915_private *dev_priv = dev->dev_private;
1340 if (!HAS_L3_DPF(dev))
1343 spin_lock(&dev_priv->irq_lock);
1344 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1345 spin_unlock(&dev_priv->irq_lock);
1347 iir &= GT_PARITY_ERROR(dev);
1348 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1349 dev_priv->l3_parity.which_slice |= 1 << 1;
1351 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1352 dev_priv->l3_parity.which_slice |= 1 << 0;
1354 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1357 static void ilk_gt_irq_handler(struct drm_device *dev,
1358 struct drm_i915_private *dev_priv,
1362 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1363 notify_ring(dev, &dev_priv->ring[RCS]);
1364 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1365 notify_ring(dev, &dev_priv->ring[VCS]);
1368 static void snb_gt_irq_handler(struct drm_device *dev,
1369 struct drm_i915_private *dev_priv,
1374 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1375 notify_ring(dev, &dev_priv->ring[RCS]);
1376 if (gt_iir & GT_BSD_USER_INTERRUPT)
1377 notify_ring(dev, &dev_priv->ring[VCS]);
1378 if (gt_iir & GT_BLT_USER_INTERRUPT)
1379 notify_ring(dev, &dev_priv->ring[BCS]);
1381 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1382 GT_BSD_CS_ERROR_INTERRUPT |
1383 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1384 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1386 if (gt_iir & GT_PARITY_ERROR(dev))
1387 ivybridge_parity_error_irq_handler(dev, gt_iir);
1390 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1391 struct drm_i915_private *dev_priv,
1394 struct intel_engine_cs *ring;
1397 irqreturn_t ret = IRQ_NONE;
1399 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1400 tmp = I915_READ(GEN8_GT_IIR(0));
1402 I915_WRITE(GEN8_GT_IIR(0), tmp);
1405 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1406 ring = &dev_priv->ring[RCS];
1407 if (rcs & GT_RENDER_USER_INTERRUPT)
1408 notify_ring(dev, ring);
1409 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1410 intel_lrc_irq_handler(ring);
1412 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1413 ring = &dev_priv->ring[BCS];
1414 if (bcs & GT_RENDER_USER_INTERRUPT)
1415 notify_ring(dev, ring);
1416 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1417 intel_lrc_irq_handler(ring);
1419 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1422 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1423 tmp = I915_READ(GEN8_GT_IIR(1));
1425 I915_WRITE(GEN8_GT_IIR(1), tmp);
1428 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1429 ring = &dev_priv->ring[VCS];
1430 if (vcs & GT_RENDER_USER_INTERRUPT)
1431 notify_ring(dev, ring);
1432 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1433 intel_lrc_irq_handler(ring);
1435 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1436 ring = &dev_priv->ring[VCS2];
1437 if (vcs & GT_RENDER_USER_INTERRUPT)
1438 notify_ring(dev, ring);
1439 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1440 intel_lrc_irq_handler(ring);
1442 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1445 if (master_ctl & GEN8_GT_PM_IRQ) {
1446 tmp = I915_READ(GEN8_GT_IIR(2));
1447 if (tmp & dev_priv->pm_rps_events) {
1448 I915_WRITE(GEN8_GT_IIR(2),
1449 tmp & dev_priv->pm_rps_events);
1451 gen6_rps_irq_handler(dev_priv, tmp);
1453 DRM_ERROR("The master control interrupt lied (PM)!\n");
1456 if (master_ctl & GEN8_GT_VECS_IRQ) {
1457 tmp = I915_READ(GEN8_GT_IIR(3));
1459 I915_WRITE(GEN8_GT_IIR(3), tmp);
1462 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1463 ring = &dev_priv->ring[VECS];
1464 if (vcs & GT_RENDER_USER_INTERRUPT)
1465 notify_ring(dev, ring);
1466 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1467 intel_lrc_irq_handler(ring);
1469 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1475 #define HPD_STORM_DETECT_PERIOD 1000
1476 #define HPD_STORM_THRESHOLD 5
1478 static int pch_port_to_hotplug_shift(enum port port)
1494 static int i915_port_to_hotplug_shift(enum port port)
1510 static inline enum port get_port_from_pin(enum hpd_pin pin)
1520 return PORT_A; /* no hpd */
1524 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1525 u32 hotplug_trigger,
1526 u32 dig_hotplug_reg,
1527 const u32 hpd[HPD_NUM_PINS])
1529 struct drm_i915_private *dev_priv = dev->dev_private;
1532 bool storm_detected = false;
1533 bool queue_dig = false, queue_hp = false;
1535 u32 dig_port_mask = 0;
1537 if (!hotplug_trigger)
1540 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1541 hotplug_trigger, dig_hotplug_reg);
1543 spin_lock(&dev_priv->irq_lock);
1544 for (i = 1; i < HPD_NUM_PINS; i++) {
1545 if (!(hpd[i] & hotplug_trigger))
1548 port = get_port_from_pin(i);
1549 if (port && dev_priv->hpd_irq_port[port]) {
1552 if (HAS_PCH_SPLIT(dev)) {
1553 dig_shift = pch_port_to_hotplug_shift(port);
1554 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1556 dig_shift = i915_port_to_hotplug_shift(port);
1557 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1560 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1562 long_hpd ? "long" : "short");
1563 /* for long HPD pulses we want to have the digital queue happen,
1564 but we still want HPD storm detection to function. */
1566 dev_priv->long_hpd_port_mask |= (1 << port);
1567 dig_port_mask |= hpd[i];
1569 /* for short HPD just trigger the digital queue */
1570 dev_priv->short_hpd_port_mask |= (1 << port);
1571 hotplug_trigger &= ~hpd[i];
1577 for (i = 1; i < HPD_NUM_PINS; i++) {
1578 if (hpd[i] & hotplug_trigger &&
1579 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1581 * On GMCH platforms the interrupt mask bits only
1582 * prevent irq generation, not the setting of the
1583 * hotplug bits itself. So only WARN about unexpected
1584 * interrupts on saner platforms.
1586 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1587 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1588 hotplug_trigger, i, hpd[i]);
1593 if (!(hpd[i] & hotplug_trigger) ||
1594 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1597 if (!(dig_port_mask & hpd[i])) {
1598 dev_priv->hpd_event_bits |= (1 << i);
1602 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1603 dev_priv->hpd_stats[i].hpd_last_jiffies
1604 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1605 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1606 dev_priv->hpd_stats[i].hpd_cnt = 0;
1607 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1608 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1609 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1610 dev_priv->hpd_event_bits &= ~(1 << i);
1611 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1612 storm_detected = true;
1614 dev_priv->hpd_stats[i].hpd_cnt++;
1615 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1616 dev_priv->hpd_stats[i].hpd_cnt);
1621 dev_priv->display.hpd_irq_setup(dev);
1622 spin_unlock(&dev_priv->irq_lock);
1625 * Our hotplug handler can grab modeset locks (by calling down into the
1626 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1627 * queue for otherwise the flush_work in the pageflip code will
1631 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1633 schedule_work(&dev_priv->hotplug_work);
1636 static void gmbus_irq_handler(struct drm_device *dev)
1638 struct drm_i915_private *dev_priv = dev->dev_private;
1640 wake_up_all(&dev_priv->gmbus_wait_queue);
1643 static void dp_aux_irq_handler(struct drm_device *dev)
1645 struct drm_i915_private *dev_priv = dev->dev_private;
1647 wake_up_all(&dev_priv->gmbus_wait_queue);
1650 #if defined(CONFIG_DEBUG_FS)
1651 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1652 uint32_t crc0, uint32_t crc1,
1653 uint32_t crc2, uint32_t crc3,
1656 struct drm_i915_private *dev_priv = dev->dev_private;
1657 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1658 struct intel_pipe_crc_entry *entry;
1661 spin_lock(&pipe_crc->lock);
1663 if (!pipe_crc->entries) {
1664 spin_unlock(&pipe_crc->lock);
1665 DRM_DEBUG_KMS("spurious interrupt\n");
1669 head = pipe_crc->head;
1670 tail = pipe_crc->tail;
1672 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1673 spin_unlock(&pipe_crc->lock);
1674 DRM_ERROR("CRC buffer overflowing\n");
1678 entry = &pipe_crc->entries[head];
1680 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1681 entry->crc[0] = crc0;
1682 entry->crc[1] = crc1;
1683 entry->crc[2] = crc2;
1684 entry->crc[3] = crc3;
1685 entry->crc[4] = crc4;
1687 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1688 pipe_crc->head = head;
1690 spin_unlock(&pipe_crc->lock);
1692 wake_up_interruptible(&pipe_crc->wq);
1696 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1697 uint32_t crc0, uint32_t crc1,
1698 uint32_t crc2, uint32_t crc3,
1703 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1705 struct drm_i915_private *dev_priv = dev->dev_private;
1707 display_pipe_crc_irq_handler(dev, pipe,
1708 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1712 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1714 struct drm_i915_private *dev_priv = dev->dev_private;
1716 display_pipe_crc_irq_handler(dev, pipe,
1717 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1718 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1719 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1720 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1721 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1724 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1726 struct drm_i915_private *dev_priv = dev->dev_private;
1727 uint32_t res1, res2;
1729 if (INTEL_INFO(dev)->gen >= 3)
1730 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1734 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1735 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1739 display_pipe_crc_irq_handler(dev, pipe,
1740 I915_READ(PIPE_CRC_RES_RED(pipe)),
1741 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1742 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1746 /* The RPS events need forcewake, so we add them to a work queue and mask their
1747 * IMR bits until the work is done. Other interrupts can be processed without
1748 * the work queue. */
1749 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1751 /* TODO: RPS on GEN9+ is not supported yet. */
1752 if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
1753 "GEN9+: unexpected RPS IRQ\n"))
1756 if (pm_iir & dev_priv->pm_rps_events) {
1757 spin_lock(&dev_priv->irq_lock);
1758 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1759 if (dev_priv->rps.interrupts_enabled) {
1760 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1761 queue_work(dev_priv->wq, &dev_priv->rps.work);
1763 spin_unlock(&dev_priv->irq_lock);
1766 if (INTEL_INFO(dev_priv)->gen >= 8)
1769 if (HAS_VEBOX(dev_priv->dev)) {
1770 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1771 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1773 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1774 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1778 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1780 if (!drm_handle_vblank(dev, pipe))
1786 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1788 struct drm_i915_private *dev_priv = dev->dev_private;
1789 u32 pipe_stats[I915_MAX_PIPES] = { };
1792 spin_lock(&dev_priv->irq_lock);
1793 for_each_pipe(dev_priv, pipe) {
1795 u32 mask, iir_bit = 0;
1798 * PIPESTAT bits get signalled even when the interrupt is
1799 * disabled with the mask bits, and some of the status bits do
1800 * not generate interrupts at all (like the underrun bit). Hence
1801 * we need to be careful that we only handle what we want to
1805 /* fifo underruns are filterered in the underrun handler. */
1806 mask = PIPE_FIFO_UNDERRUN_STATUS;
1810 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1813 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1816 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1820 mask |= dev_priv->pipestat_irq_mask[pipe];
1825 reg = PIPESTAT(pipe);
1826 mask |= PIPESTAT_INT_ENABLE_MASK;
1827 pipe_stats[pipe] = I915_READ(reg) & mask;
1830 * Clear the PIPE*STAT regs before the IIR
1832 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1833 PIPESTAT_INT_STATUS_MASK))
1834 I915_WRITE(reg, pipe_stats[pipe]);
1836 spin_unlock(&dev_priv->irq_lock);
1838 for_each_pipe(dev_priv, pipe) {
1839 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1840 intel_pipe_handle_vblank(dev, pipe))
1841 intel_check_page_flip(dev, pipe);
1843 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1844 intel_prepare_page_flip(dev, pipe);
1845 intel_finish_page_flip(dev, pipe);
1848 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1849 i9xx_pipe_crc_irq_handler(dev, pipe);
1851 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1852 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1855 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1856 gmbus_irq_handler(dev);
1859 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1861 struct drm_i915_private *dev_priv = dev->dev_private;
1862 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1864 if (hotplug_status) {
1865 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1867 * Make sure hotplug status is cleared before we clear IIR, or else we
1868 * may miss hotplug events.
1870 POSTING_READ(PORT_HOTPLUG_STAT);
1873 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1875 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1877 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1879 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1882 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1883 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1884 dp_aux_irq_handler(dev);
1888 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1890 struct drm_device *dev = arg;
1891 struct drm_i915_private *dev_priv = dev->dev_private;
1892 u32 iir, gt_iir, pm_iir;
1893 irqreturn_t ret = IRQ_NONE;
1896 /* Find, clear, then process each source of interrupt */
1898 gt_iir = I915_READ(GTIIR);
1900 I915_WRITE(GTIIR, gt_iir);
1902 pm_iir = I915_READ(GEN6_PMIIR);
1904 I915_WRITE(GEN6_PMIIR, pm_iir);
1906 iir = I915_READ(VLV_IIR);
1908 /* Consume port before clearing IIR or we'll miss events */
1909 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1910 i9xx_hpd_irq_handler(dev);
1911 I915_WRITE(VLV_IIR, iir);
1914 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1920 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1922 gen6_rps_irq_handler(dev_priv, pm_iir);
1923 /* Call regardless, as some status bits might not be
1924 * signalled in iir */
1925 valleyview_pipestat_irq_handler(dev, iir);
1932 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1934 struct drm_device *dev = arg;
1935 struct drm_i915_private *dev_priv = dev->dev_private;
1936 u32 master_ctl, iir;
1937 irqreturn_t ret = IRQ_NONE;
1940 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1941 iir = I915_READ(VLV_IIR);
1943 if (master_ctl == 0 && iir == 0)
1948 I915_WRITE(GEN8_MASTER_IRQ, 0);
1950 /* Find, clear, then process each source of interrupt */
1953 /* Consume port before clearing IIR or we'll miss events */
1954 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1955 i9xx_hpd_irq_handler(dev);
1956 I915_WRITE(VLV_IIR, iir);
1959 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1961 /* Call regardless, as some status bits might not be
1962 * signalled in iir */
1963 valleyview_pipestat_irq_handler(dev, iir);
1965 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1966 POSTING_READ(GEN8_MASTER_IRQ);
1972 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1974 struct drm_i915_private *dev_priv = dev->dev_private;
1976 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1977 u32 dig_hotplug_reg;
1979 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1980 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1982 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1984 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1985 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1986 SDE_AUDIO_POWER_SHIFT);
1987 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1991 if (pch_iir & SDE_AUX_MASK)
1992 dp_aux_irq_handler(dev);
1994 if (pch_iir & SDE_GMBUS)
1995 gmbus_irq_handler(dev);
1997 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1998 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2000 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2001 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2003 if (pch_iir & SDE_POISON)
2004 DRM_ERROR("PCH poison interrupt\n");
2006 if (pch_iir & SDE_FDI_MASK)
2007 for_each_pipe(dev_priv, pipe)
2008 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2010 I915_READ(FDI_RX_IIR(pipe)));
2012 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2013 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2015 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2016 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2018 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2019 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2021 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2022 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2025 static void ivb_err_int_handler(struct drm_device *dev)
2027 struct drm_i915_private *dev_priv = dev->dev_private;
2028 u32 err_int = I915_READ(GEN7_ERR_INT);
2031 if (err_int & ERR_INT_POISON)
2032 DRM_ERROR("Poison interrupt\n");
2034 for_each_pipe(dev_priv, pipe) {
2035 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2036 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2038 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2039 if (IS_IVYBRIDGE(dev))
2040 ivb_pipe_crc_irq_handler(dev, pipe);
2042 hsw_pipe_crc_irq_handler(dev, pipe);
2046 I915_WRITE(GEN7_ERR_INT, err_int);
2049 static void cpt_serr_int_handler(struct drm_device *dev)
2051 struct drm_i915_private *dev_priv = dev->dev_private;
2052 u32 serr_int = I915_READ(SERR_INT);
2054 if (serr_int & SERR_INT_POISON)
2055 DRM_ERROR("PCH poison interrupt\n");
2057 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2058 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2060 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2061 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2063 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2064 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2066 I915_WRITE(SERR_INT, serr_int);
2069 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2071 struct drm_i915_private *dev_priv = dev->dev_private;
2073 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2074 u32 dig_hotplug_reg;
2076 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2077 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2079 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2081 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2082 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2083 SDE_AUDIO_POWER_SHIFT_CPT);
2084 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2088 if (pch_iir & SDE_AUX_MASK_CPT)
2089 dp_aux_irq_handler(dev);
2091 if (pch_iir & SDE_GMBUS_CPT)
2092 gmbus_irq_handler(dev);
2094 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2095 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2097 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2098 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2100 if (pch_iir & SDE_FDI_MASK_CPT)
2101 for_each_pipe(dev_priv, pipe)
2102 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2104 I915_READ(FDI_RX_IIR(pipe)));
2106 if (pch_iir & SDE_ERROR_CPT)
2107 cpt_serr_int_handler(dev);
2110 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2112 struct drm_i915_private *dev_priv = dev->dev_private;
2115 if (de_iir & DE_AUX_CHANNEL_A)
2116 dp_aux_irq_handler(dev);
2118 if (de_iir & DE_GSE)
2119 intel_opregion_asle_intr(dev);
2121 if (de_iir & DE_POISON)
2122 DRM_ERROR("Poison interrupt\n");
2124 for_each_pipe(dev_priv, pipe) {
2125 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2126 intel_pipe_handle_vblank(dev, pipe))
2127 intel_check_page_flip(dev, pipe);
2129 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2130 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2132 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2133 i9xx_pipe_crc_irq_handler(dev, pipe);
2135 /* plane/pipes map 1:1 on ilk+ */
2136 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2137 intel_prepare_page_flip(dev, pipe);
2138 intel_finish_page_flip_plane(dev, pipe);
2142 /* check event from PCH */
2143 if (de_iir & DE_PCH_EVENT) {
2144 u32 pch_iir = I915_READ(SDEIIR);
2146 if (HAS_PCH_CPT(dev))
2147 cpt_irq_handler(dev, pch_iir);
2149 ibx_irq_handler(dev, pch_iir);
2151 /* should clear PCH hotplug event before clear CPU irq */
2152 I915_WRITE(SDEIIR, pch_iir);
2155 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2156 ironlake_rps_change_irq_handler(dev);
2159 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2161 struct drm_i915_private *dev_priv = dev->dev_private;
2164 if (de_iir & DE_ERR_INT_IVB)
2165 ivb_err_int_handler(dev);
2167 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2168 dp_aux_irq_handler(dev);
2170 if (de_iir & DE_GSE_IVB)
2171 intel_opregion_asle_intr(dev);
2173 for_each_pipe(dev_priv, pipe) {
2174 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2175 intel_pipe_handle_vblank(dev, pipe))
2176 intel_check_page_flip(dev, pipe);
2178 /* plane/pipes map 1:1 on ilk+ */
2179 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2180 intel_prepare_page_flip(dev, pipe);
2181 intel_finish_page_flip_plane(dev, pipe);
2185 /* check event from PCH */
2186 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2187 u32 pch_iir = I915_READ(SDEIIR);
2189 cpt_irq_handler(dev, pch_iir);
2191 /* clear PCH hotplug event before clear CPU irq */
2192 I915_WRITE(SDEIIR, pch_iir);
2197 * To handle irqs with the minimum potential races with fresh interrupts, we:
2198 * 1 - Disable Master Interrupt Control.
2199 * 2 - Find the source(s) of the interrupt.
2200 * 3 - Clear the Interrupt Identity bits (IIR).
2201 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2202 * 5 - Re-enable Master Interrupt Control.
2204 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2206 struct drm_device *dev = arg;
2207 struct drm_i915_private *dev_priv = dev->dev_private;
2208 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2209 irqreturn_t ret = IRQ_NONE;
2211 /* We get interrupts on unclaimed registers, so check for this before we
2212 * do any I915_{READ,WRITE}. */
2213 intel_uncore_check_errors(dev);
2215 /* disable master interrupt before clearing iir */
2216 de_ier = I915_READ(DEIER);
2217 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2218 POSTING_READ(DEIER);
2220 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2221 * interrupts will will be stored on its back queue, and then we'll be
2222 * able to process them after we restore SDEIER (as soon as we restore
2223 * it, we'll get an interrupt if SDEIIR still has something to process
2224 * due to its back queue). */
2225 if (!HAS_PCH_NOP(dev)) {
2226 sde_ier = I915_READ(SDEIER);
2227 I915_WRITE(SDEIER, 0);
2228 POSTING_READ(SDEIER);
2231 /* Find, clear, then process each source of interrupt */
2233 gt_iir = I915_READ(GTIIR);
2235 I915_WRITE(GTIIR, gt_iir);
2237 if (INTEL_INFO(dev)->gen >= 6)
2238 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2240 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2243 de_iir = I915_READ(DEIIR);
2245 I915_WRITE(DEIIR, de_iir);
2247 if (INTEL_INFO(dev)->gen >= 7)
2248 ivb_display_irq_handler(dev, de_iir);
2250 ilk_display_irq_handler(dev, de_iir);
2253 if (INTEL_INFO(dev)->gen >= 6) {
2254 u32 pm_iir = I915_READ(GEN6_PMIIR);
2256 I915_WRITE(GEN6_PMIIR, pm_iir);
2258 gen6_rps_irq_handler(dev_priv, pm_iir);
2262 I915_WRITE(DEIER, de_ier);
2263 POSTING_READ(DEIER);
2264 if (!HAS_PCH_NOP(dev)) {
2265 I915_WRITE(SDEIER, sde_ier);
2266 POSTING_READ(SDEIER);
2272 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2274 struct drm_device *dev = arg;
2275 struct drm_i915_private *dev_priv = dev->dev_private;
2277 irqreturn_t ret = IRQ_NONE;
2280 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2283 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2286 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2287 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2291 I915_WRITE(GEN8_MASTER_IRQ, 0);
2292 POSTING_READ(GEN8_MASTER_IRQ);
2294 /* Find, clear, then process each source of interrupt */
2296 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2298 if (master_ctl & GEN8_DE_MISC_IRQ) {
2299 tmp = I915_READ(GEN8_DE_MISC_IIR);
2301 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2303 if (tmp & GEN8_DE_MISC_GSE)
2304 intel_opregion_asle_intr(dev);
2306 DRM_ERROR("Unexpected DE Misc interrupt\n");
2309 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2312 if (master_ctl & GEN8_DE_PORT_IRQ) {
2313 tmp = I915_READ(GEN8_DE_PORT_IIR);
2315 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2319 dp_aux_irq_handler(dev);
2321 DRM_ERROR("Unexpected DE Port interrupt\n");
2324 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2327 for_each_pipe(dev_priv, pipe) {
2328 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2330 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2333 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2336 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2338 if (pipe_iir & GEN8_PIPE_VBLANK &&
2339 intel_pipe_handle_vblank(dev, pipe))
2340 intel_check_page_flip(dev, pipe);
2343 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2345 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2348 intel_prepare_page_flip(dev, pipe);
2349 intel_finish_page_flip_plane(dev, pipe);
2352 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2353 hsw_pipe_crc_irq_handler(dev, pipe);
2355 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2356 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2361 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2363 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2366 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2368 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2370 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2373 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2375 * FIXME(BDW): Assume for now that the new interrupt handling
2376 * scheme also closed the SDE interrupt handling race we've seen
2377 * on older pch-split platforms. But this needs testing.
2379 u32 pch_iir = I915_READ(SDEIIR);
2381 I915_WRITE(SDEIIR, pch_iir);
2383 cpt_irq_handler(dev, pch_iir);
2385 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2389 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2390 POSTING_READ(GEN8_MASTER_IRQ);
2395 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2396 bool reset_completed)
2398 struct intel_engine_cs *ring;
2402 * Notify all waiters for GPU completion events that reset state has
2403 * been changed, and that they need to restart their wait after
2404 * checking for potential errors (and bail out to drop locks if there is
2405 * a gpu reset pending so that i915_error_work_func can acquire them).
2408 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2409 for_each_ring(ring, dev_priv, i)
2410 wake_up_all(&ring->irq_queue);
2412 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2413 wake_up_all(&dev_priv->pending_flip_queue);
2416 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2417 * reset state is cleared.
2419 if (reset_completed)
2420 wake_up_all(&dev_priv->gpu_error.reset_queue);
2424 * i915_error_work_func - do process context error handling work
2425 * @work: work struct
2427 * Fire an error uevent so userspace can see that a hang or error
2430 static void i915_error_work_func(struct work_struct *work)
2432 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2434 struct drm_i915_private *dev_priv =
2435 container_of(error, struct drm_i915_private, gpu_error);
2436 struct drm_device *dev = dev_priv->dev;
2437 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2438 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2439 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2442 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2445 * Note that there's only one work item which does gpu resets, so we
2446 * need not worry about concurrent gpu resets potentially incrementing
2447 * error->reset_counter twice. We only need to take care of another
2448 * racing irq/hangcheck declaring the gpu dead for a second time. A
2449 * quick check for that is good enough: schedule_work ensures the
2450 * correct ordering between hang detection and this work item, and since
2451 * the reset in-progress bit is only ever set by code outside of this
2452 * work we don't need to worry about any other races.
2454 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2455 DRM_DEBUG_DRIVER("resetting chip\n");
2456 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2460 * In most cases it's guaranteed that we get here with an RPM
2461 * reference held, for example because there is a pending GPU
2462 * request that won't finish until the reset is done. This
2463 * isn't the case at least when we get here by doing a
2464 * simulated reset via debugs, so get an RPM reference.
2466 intel_runtime_pm_get(dev_priv);
2468 intel_prepare_reset(dev);
2471 * All state reset _must_ be completed before we update the
2472 * reset counter, for otherwise waiters might miss the reset
2473 * pending state and not properly drop locks, resulting in
2474 * deadlocks with the reset work.
2476 ret = i915_reset(dev);
2478 intel_finish_reset(dev);
2480 intel_runtime_pm_put(dev_priv);
2484 * After all the gem state is reset, increment the reset
2485 * counter and wake up everyone waiting for the reset to
2488 * Since unlock operations are a one-sided barrier only,
2489 * we need to insert a barrier here to order any seqno
2491 * the counter increment.
2493 smp_mb__before_atomic();
2494 atomic_inc(&dev_priv->gpu_error.reset_counter);
2496 kobject_uevent_env(&dev->primary->kdev->kobj,
2497 KOBJ_CHANGE, reset_done_event);
2499 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2503 * Note: The wake_up also serves as a memory barrier so that
2504 * waiters see the update value of the reset counter atomic_t.
2506 i915_error_wake_up(dev_priv, true);
2510 static void i915_report_and_clear_eir(struct drm_device *dev)
2512 struct drm_i915_private *dev_priv = dev->dev_private;
2513 uint32_t instdone[I915_NUM_INSTDONE_REG];
2514 u32 eir = I915_READ(EIR);
2520 pr_err("render error detected, EIR: 0x%08x\n", eir);
2522 i915_get_extra_instdone(dev, instdone);
2525 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2526 u32 ipeir = I915_READ(IPEIR_I965);
2528 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2529 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2530 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2531 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2532 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2533 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2534 I915_WRITE(IPEIR_I965, ipeir);
2535 POSTING_READ(IPEIR_I965);
2537 if (eir & GM45_ERROR_PAGE_TABLE) {
2538 u32 pgtbl_err = I915_READ(PGTBL_ER);
2539 pr_err("page table error\n");
2540 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2541 I915_WRITE(PGTBL_ER, pgtbl_err);
2542 POSTING_READ(PGTBL_ER);
2546 if (!IS_GEN2(dev)) {
2547 if (eir & I915_ERROR_PAGE_TABLE) {
2548 u32 pgtbl_err = I915_READ(PGTBL_ER);
2549 pr_err("page table error\n");
2550 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2551 I915_WRITE(PGTBL_ER, pgtbl_err);
2552 POSTING_READ(PGTBL_ER);
2556 if (eir & I915_ERROR_MEMORY_REFRESH) {
2557 pr_err("memory refresh error:\n");
2558 for_each_pipe(dev_priv, pipe)
2559 pr_err("pipe %c stat: 0x%08x\n",
2560 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2561 /* pipestat has already been acked */
2563 if (eir & I915_ERROR_INSTRUCTION) {
2564 pr_err("instruction error\n");
2565 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2566 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2567 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2568 if (INTEL_INFO(dev)->gen < 4) {
2569 u32 ipeir = I915_READ(IPEIR);
2571 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2572 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2573 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2574 I915_WRITE(IPEIR, ipeir);
2575 POSTING_READ(IPEIR);
2577 u32 ipeir = I915_READ(IPEIR_I965);
2579 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2580 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2581 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2582 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2583 I915_WRITE(IPEIR_I965, ipeir);
2584 POSTING_READ(IPEIR_I965);
2588 I915_WRITE(EIR, eir);
2590 eir = I915_READ(EIR);
2593 * some errors might have become stuck,
2596 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2597 I915_WRITE(EMR, I915_READ(EMR) | eir);
2598 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2603 * i915_handle_error - handle an error interrupt
2606 * Do some basic checking of regsiter state at error interrupt time and
2607 * dump it to the syslog. Also call i915_capture_error_state() to make
2608 * sure we get a record and make it available in debugfs. Fire a uevent
2609 * so userspace knows something bad happened (should trigger collection
2610 * of a ring dump etc.).
2612 void i915_handle_error(struct drm_device *dev, bool wedged,
2613 const char *fmt, ...)
2615 struct drm_i915_private *dev_priv = dev->dev_private;
2619 va_start(args, fmt);
2620 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2623 i915_capture_error_state(dev, wedged, error_msg);
2624 i915_report_and_clear_eir(dev);
2627 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2628 &dev_priv->gpu_error.reset_counter);
2631 * Wakeup waiting processes so that the reset work function
2632 * i915_error_work_func doesn't deadlock trying to grab various
2633 * locks. By bumping the reset counter first, the woken
2634 * processes will see a reset in progress and back off,
2635 * releasing their locks and then wait for the reset completion.
2636 * We must do this for _all_ gpu waiters that might hold locks
2637 * that the reset work needs to acquire.
2639 * Note: The wake_up serves as the required memory barrier to
2640 * ensure that the waiters see the updated value of the reset
2643 i915_error_wake_up(dev_priv, false);
2647 * Our reset work can grab modeset locks (since it needs to reset the
2648 * state of outstanding pagelips). Hence it must not be run on our own
2649 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2650 * code will deadlock.
2652 schedule_work(&dev_priv->gpu_error.work);
2655 /* Called from drm generic code, passed 'crtc' which
2656 * we use as a pipe index
2658 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2660 struct drm_i915_private *dev_priv = dev->dev_private;
2661 unsigned long irqflags;
2663 if (!i915_pipe_enabled(dev, pipe))
2666 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2667 if (INTEL_INFO(dev)->gen >= 4)
2668 i915_enable_pipestat(dev_priv, pipe,
2669 PIPE_START_VBLANK_INTERRUPT_STATUS);
2671 i915_enable_pipestat(dev_priv, pipe,
2672 PIPE_VBLANK_INTERRUPT_STATUS);
2673 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2678 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2680 struct drm_i915_private *dev_priv = dev->dev_private;
2681 unsigned long irqflags;
2682 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2683 DE_PIPE_VBLANK(pipe);
2685 if (!i915_pipe_enabled(dev, pipe))
2688 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2689 ironlake_enable_display_irq(dev_priv, bit);
2690 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2695 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2697 struct drm_i915_private *dev_priv = dev->dev_private;
2698 unsigned long irqflags;
2700 if (!i915_pipe_enabled(dev, pipe))
2703 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2704 i915_enable_pipestat(dev_priv, pipe,
2705 PIPE_START_VBLANK_INTERRUPT_STATUS);
2706 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2711 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2713 struct drm_i915_private *dev_priv = dev->dev_private;
2714 unsigned long irqflags;
2716 if (!i915_pipe_enabled(dev, pipe))
2719 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2720 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2721 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2722 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2723 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2727 /* Called from drm generic code, passed 'crtc' which
2728 * we use as a pipe index
2730 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2732 struct drm_i915_private *dev_priv = dev->dev_private;
2733 unsigned long irqflags;
2735 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2736 i915_disable_pipestat(dev_priv, pipe,
2737 PIPE_VBLANK_INTERRUPT_STATUS |
2738 PIPE_START_VBLANK_INTERRUPT_STATUS);
2739 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2742 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2744 struct drm_i915_private *dev_priv = dev->dev_private;
2745 unsigned long irqflags;
2746 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2747 DE_PIPE_VBLANK(pipe);
2749 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2750 ironlake_disable_display_irq(dev_priv, bit);
2751 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2754 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2756 struct drm_i915_private *dev_priv = dev->dev_private;
2757 unsigned long irqflags;
2759 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2760 i915_disable_pipestat(dev_priv, pipe,
2761 PIPE_START_VBLANK_INTERRUPT_STATUS);
2762 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2765 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2767 struct drm_i915_private *dev_priv = dev->dev_private;
2768 unsigned long irqflags;
2770 if (!i915_pipe_enabled(dev, pipe))
2773 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2774 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2775 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2776 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2777 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2780 static struct drm_i915_gem_request *
2781 ring_last_request(struct intel_engine_cs *ring)
2783 return list_entry(ring->request_list.prev,
2784 struct drm_i915_gem_request, list);
2788 ring_idle(struct intel_engine_cs *ring)
2790 return (list_empty(&ring->request_list) ||
2791 i915_gem_request_completed(ring_last_request(ring), false));
2795 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2797 if (INTEL_INFO(dev)->gen >= 8) {
2798 return (ipehr >> 23) == 0x1c;
2800 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2801 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2802 MI_SEMAPHORE_REGISTER);
2806 static struct intel_engine_cs *
2807 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2809 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2810 struct intel_engine_cs *signaller;
2813 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2814 for_each_ring(signaller, dev_priv, i) {
2815 if (ring == signaller)
2818 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2822 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2824 for_each_ring(signaller, dev_priv, i) {
2825 if(ring == signaller)
2828 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2833 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2834 ring->id, ipehr, offset);
2839 static struct intel_engine_cs *
2840 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2842 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2843 u32 cmd, ipehr, head;
2847 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2848 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2852 * HEAD is likely pointing to the dword after the actual command,
2853 * so scan backwards until we find the MBOX. But limit it to just 3
2854 * or 4 dwords depending on the semaphore wait command size.
2855 * Note that we don't care about ACTHD here since that might
2856 * point at at batch, and semaphores are always emitted into the
2857 * ringbuffer itself.
2859 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2860 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2862 for (i = backwards; i; --i) {
2864 * Be paranoid and presume the hw has gone off into the wild -
2865 * our ring is smaller than what the hardware (and hence
2866 * HEAD_ADDR) allows. Also handles wrap-around.
2868 head &= ring->buffer->size - 1;
2870 /* This here seems to blow up */
2871 cmd = ioread32(ring->buffer->virtual_start + head);
2881 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2882 if (INTEL_INFO(ring->dev)->gen >= 8) {
2883 offset = ioread32(ring->buffer->virtual_start + head + 12);
2885 offset = ioread32(ring->buffer->virtual_start + head + 8);
2887 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2890 static int semaphore_passed(struct intel_engine_cs *ring)
2892 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2893 struct intel_engine_cs *signaller;
2896 ring->hangcheck.deadlock++;
2898 signaller = semaphore_waits_for(ring, &seqno);
2899 if (signaller == NULL)
2902 /* Prevent pathological recursion due to driver bugs */
2903 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2906 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2909 /* cursory check for an unkickable deadlock */
2910 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2911 semaphore_passed(signaller) < 0)
2917 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2919 struct intel_engine_cs *ring;
2922 for_each_ring(ring, dev_priv, i)
2923 ring->hangcheck.deadlock = 0;
2926 static enum intel_ring_hangcheck_action
2927 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2929 struct drm_device *dev = ring->dev;
2930 struct drm_i915_private *dev_priv = dev->dev_private;
2933 if (acthd != ring->hangcheck.acthd) {
2934 if (acthd > ring->hangcheck.max_acthd) {
2935 ring->hangcheck.max_acthd = acthd;
2936 return HANGCHECK_ACTIVE;
2939 return HANGCHECK_ACTIVE_LOOP;
2943 return HANGCHECK_HUNG;
2945 /* Is the chip hanging on a WAIT_FOR_EVENT?
2946 * If so we can simply poke the RB_WAIT bit
2947 * and break the hang. This should work on
2948 * all but the second generation chipsets.
2950 tmp = I915_READ_CTL(ring);
2951 if (tmp & RING_WAIT) {
2952 i915_handle_error(dev, false,
2953 "Kicking stuck wait on %s",
2955 I915_WRITE_CTL(ring, tmp);
2956 return HANGCHECK_KICK;
2959 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2960 switch (semaphore_passed(ring)) {
2962 return HANGCHECK_HUNG;
2964 i915_handle_error(dev, false,
2965 "Kicking stuck semaphore on %s",
2967 I915_WRITE_CTL(ring, tmp);
2968 return HANGCHECK_KICK;
2970 return HANGCHECK_WAIT;
2974 return HANGCHECK_HUNG;
2978 * This is called when the chip hasn't reported back with completed
2979 * batchbuffers in a long time. We keep track per ring seqno progress and
2980 * if there are no progress, hangcheck score for that ring is increased.
2981 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2982 * we kick the ring. If we see no progress on three subsequent calls
2983 * we assume chip is wedged and try to fix it by resetting the chip.
2985 static void i915_hangcheck_elapsed(struct work_struct *work)
2987 struct drm_i915_private *dev_priv =
2988 container_of(work, typeof(*dev_priv),
2989 gpu_error.hangcheck_work.work);
2990 struct drm_device *dev = dev_priv->dev;
2991 struct intel_engine_cs *ring;
2993 int busy_count = 0, rings_hung = 0;
2994 bool stuck[I915_NUM_RINGS] = { 0 };
2999 if (!i915.enable_hangcheck)
3002 for_each_ring(ring, dev_priv, i) {
3007 semaphore_clear_deadlocks(dev_priv);
3009 seqno = ring->get_seqno(ring, false);
3010 acthd = intel_ring_get_active_head(ring);
3012 if (ring->hangcheck.seqno == seqno) {
3013 if (ring_idle(ring)) {
3014 ring->hangcheck.action = HANGCHECK_IDLE;
3016 if (waitqueue_active(&ring->irq_queue)) {
3017 /* Issue a wake-up to catch stuck h/w. */
3018 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3019 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3020 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3023 DRM_INFO("Fake missed irq on %s\n",
3025 wake_up_all(&ring->irq_queue);
3027 /* Safeguard against driver failure */
3028 ring->hangcheck.score += BUSY;
3032 /* We always increment the hangcheck score
3033 * if the ring is busy and still processing
3034 * the same request, so that no single request
3035 * can run indefinitely (such as a chain of
3036 * batches). The only time we do not increment
3037 * the hangcheck score on this ring, if this
3038 * ring is in a legitimate wait for another
3039 * ring. In that case the waiting ring is a
3040 * victim and we want to be sure we catch the
3041 * right culprit. Then every time we do kick
3042 * the ring, add a small increment to the
3043 * score so that we can catch a batch that is
3044 * being repeatedly kicked and so responsible
3045 * for stalling the machine.
3047 ring->hangcheck.action = ring_stuck(ring,
3050 switch (ring->hangcheck.action) {
3051 case HANGCHECK_IDLE:
3052 case HANGCHECK_WAIT:
3053 case HANGCHECK_ACTIVE:
3055 case HANGCHECK_ACTIVE_LOOP:
3056 ring->hangcheck.score += BUSY;
3058 case HANGCHECK_KICK:
3059 ring->hangcheck.score += KICK;
3061 case HANGCHECK_HUNG:
3062 ring->hangcheck.score += HUNG;
3068 ring->hangcheck.action = HANGCHECK_ACTIVE;
3070 /* Gradually reduce the count so that we catch DoS
3071 * attempts across multiple batches.
3073 if (ring->hangcheck.score > 0)
3074 ring->hangcheck.score--;
3076 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3079 ring->hangcheck.seqno = seqno;
3080 ring->hangcheck.acthd = acthd;
3084 for_each_ring(ring, dev_priv, i) {
3085 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3086 DRM_INFO("%s on %s\n",
3087 stuck[i] ? "stuck" : "no progress",
3094 return i915_handle_error(dev, true, "Ring hung");
3097 /* Reset timer case chip hangs without another request
3099 i915_queue_hangcheck(dev);
3102 void i915_queue_hangcheck(struct drm_device *dev)
3104 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3106 if (!i915.enable_hangcheck)
3109 /* Don't continually defer the hangcheck so that it is always run at
3110 * least once after work has been scheduled on any ring. Otherwise,
3111 * we will ignore a hung ring if a second ring is kept busy.
3114 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3115 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3118 static void ibx_irq_reset(struct drm_device *dev)
3120 struct drm_i915_private *dev_priv = dev->dev_private;
3122 if (HAS_PCH_NOP(dev))
3125 GEN5_IRQ_RESET(SDE);
3127 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3128 I915_WRITE(SERR_INT, 0xffffffff);
3132 * SDEIER is also touched by the interrupt handler to work around missed PCH
3133 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3134 * instead we unconditionally enable all PCH interrupt sources here, but then
3135 * only unmask them as needed with SDEIMR.
3137 * This function needs to be called before interrupts are enabled.
3139 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3141 struct drm_i915_private *dev_priv = dev->dev_private;
3143 if (HAS_PCH_NOP(dev))
3146 WARN_ON(I915_READ(SDEIER) != 0);
3147 I915_WRITE(SDEIER, 0xffffffff);
3148 POSTING_READ(SDEIER);
3151 static void gen5_gt_irq_reset(struct drm_device *dev)
3153 struct drm_i915_private *dev_priv = dev->dev_private;
3156 if (INTEL_INFO(dev)->gen >= 6)
3157 GEN5_IRQ_RESET(GEN6_PM);
3162 static void ironlake_irq_reset(struct drm_device *dev)
3164 struct drm_i915_private *dev_priv = dev->dev_private;
3166 I915_WRITE(HWSTAM, 0xffffffff);
3170 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3172 gen5_gt_irq_reset(dev);
3177 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3181 I915_WRITE(PORT_HOTPLUG_EN, 0);
3182 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3184 for_each_pipe(dev_priv, pipe)
3185 I915_WRITE(PIPESTAT(pipe), 0xffff);
3187 GEN5_IRQ_RESET(VLV_);
3190 static void valleyview_irq_preinstall(struct drm_device *dev)
3192 struct drm_i915_private *dev_priv = dev->dev_private;
3195 I915_WRITE(VLV_IMR, 0);
3196 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3197 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3198 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3200 gen5_gt_irq_reset(dev);
3202 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3204 vlv_display_irq_reset(dev_priv);
3207 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3209 GEN8_IRQ_RESET_NDX(GT, 0);
3210 GEN8_IRQ_RESET_NDX(GT, 1);
3211 GEN8_IRQ_RESET_NDX(GT, 2);
3212 GEN8_IRQ_RESET_NDX(GT, 3);
3215 static void gen8_irq_reset(struct drm_device *dev)
3217 struct drm_i915_private *dev_priv = dev->dev_private;
3220 I915_WRITE(GEN8_MASTER_IRQ, 0);
3221 POSTING_READ(GEN8_MASTER_IRQ);
3223 gen8_gt_irq_reset(dev_priv);
3225 for_each_pipe(dev_priv, pipe)
3226 if (intel_display_power_is_enabled(dev_priv,
3227 POWER_DOMAIN_PIPE(pipe)))
3228 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3230 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3231 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3232 GEN5_IRQ_RESET(GEN8_PCU_);
3237 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3239 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3241 spin_lock_irq(&dev_priv->irq_lock);
3242 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3243 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3244 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3245 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3246 spin_unlock_irq(&dev_priv->irq_lock);
3249 static void cherryview_irq_preinstall(struct drm_device *dev)
3251 struct drm_i915_private *dev_priv = dev->dev_private;
3253 I915_WRITE(GEN8_MASTER_IRQ, 0);
3254 POSTING_READ(GEN8_MASTER_IRQ);
3256 gen8_gt_irq_reset(dev_priv);
3258 GEN5_IRQ_RESET(GEN8_PCU_);
3260 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3262 vlv_display_irq_reset(dev_priv);
3265 static void ibx_hpd_irq_setup(struct drm_device *dev)
3267 struct drm_i915_private *dev_priv = dev->dev_private;
3268 struct intel_encoder *intel_encoder;
3269 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3271 if (HAS_PCH_IBX(dev)) {
3272 hotplug_irqs = SDE_HOTPLUG_MASK;
3273 for_each_intel_encoder(dev, intel_encoder)
3274 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3275 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3277 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3278 for_each_intel_encoder(dev, intel_encoder)
3279 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3280 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3283 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3286 * Enable digital hotplug on the PCH, and configure the DP short pulse
3287 * duration to 2ms (which is the minimum in the Display Port spec)
3289 * This register is the same on all known PCH chips.
3291 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3292 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3293 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3294 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3295 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3296 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3299 static void ibx_irq_postinstall(struct drm_device *dev)
3301 struct drm_i915_private *dev_priv = dev->dev_private;
3304 if (HAS_PCH_NOP(dev))
3307 if (HAS_PCH_IBX(dev))
3308 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3310 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3312 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3313 I915_WRITE(SDEIMR, ~mask);
3316 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3318 struct drm_i915_private *dev_priv = dev->dev_private;
3319 u32 pm_irqs, gt_irqs;
3321 pm_irqs = gt_irqs = 0;
3323 dev_priv->gt_irq_mask = ~0;
3324 if (HAS_L3_DPF(dev)) {
3325 /* L3 parity interrupt is always unmasked. */
3326 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3327 gt_irqs |= GT_PARITY_ERROR(dev);
3330 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3332 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3333 ILK_BSD_USER_INTERRUPT;
3335 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3338 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3340 if (INTEL_INFO(dev)->gen >= 6) {
3342 * RPS interrupts will get enabled/disabled on demand when RPS
3343 * itself is enabled/disabled.
3346 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3348 dev_priv->pm_irq_mask = 0xffffffff;
3349 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3353 static int ironlake_irq_postinstall(struct drm_device *dev)
3355 struct drm_i915_private *dev_priv = dev->dev_private;
3356 u32 display_mask, extra_mask;
3358 if (INTEL_INFO(dev)->gen >= 7) {
3359 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3360 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3361 DE_PLANEB_FLIP_DONE_IVB |
3362 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3363 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3364 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3366 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3367 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3369 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3371 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3372 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3375 dev_priv->irq_mask = ~display_mask;
3377 I915_WRITE(HWSTAM, 0xeffe);
3379 ibx_irq_pre_postinstall(dev);
3381 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3383 gen5_gt_irq_postinstall(dev);
3385 ibx_irq_postinstall(dev);
3387 if (IS_IRONLAKE_M(dev)) {
3388 /* Enable PCU event interrupts
3390 * spinlocking not required here for correctness since interrupt
3391 * setup is guaranteed to run in single-threaded context. But we
3392 * need it to make the assert_spin_locked happy. */
3393 spin_lock_irq(&dev_priv->irq_lock);
3394 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3395 spin_unlock_irq(&dev_priv->irq_lock);
3401 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3407 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3408 PIPE_FIFO_UNDERRUN_STATUS;
3410 for_each_pipe(dev_priv, pipe)
3411 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3412 POSTING_READ(PIPESTAT(PIPE_A));
3414 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3415 PIPE_CRC_DONE_INTERRUPT_STATUS;
3417 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3418 for_each_pipe(dev_priv, pipe)
3419 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3421 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3422 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3423 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3424 if (IS_CHERRYVIEW(dev_priv))
3425 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3426 dev_priv->irq_mask &= ~iir_mask;
3428 I915_WRITE(VLV_IIR, iir_mask);
3429 I915_WRITE(VLV_IIR, iir_mask);
3430 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3431 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3432 POSTING_READ(VLV_IMR);
3435 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3441 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3442 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3443 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3444 if (IS_CHERRYVIEW(dev_priv))
3445 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3447 dev_priv->irq_mask |= iir_mask;
3448 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3449 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3450 I915_WRITE(VLV_IIR, iir_mask);
3451 I915_WRITE(VLV_IIR, iir_mask);
3452 POSTING_READ(VLV_IIR);
3454 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3455 PIPE_CRC_DONE_INTERRUPT_STATUS;
3457 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3458 for_each_pipe(dev_priv, pipe)
3459 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3461 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3462 PIPE_FIFO_UNDERRUN_STATUS;
3464 for_each_pipe(dev_priv, pipe)
3465 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3466 POSTING_READ(PIPESTAT(PIPE_A));
3469 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3471 assert_spin_locked(&dev_priv->irq_lock);
3473 if (dev_priv->display_irqs_enabled)
3476 dev_priv->display_irqs_enabled = true;
3478 if (intel_irqs_enabled(dev_priv))
3479 valleyview_display_irqs_install(dev_priv);
3482 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3484 assert_spin_locked(&dev_priv->irq_lock);
3486 if (!dev_priv->display_irqs_enabled)
3489 dev_priv->display_irqs_enabled = false;
3491 if (intel_irqs_enabled(dev_priv))
3492 valleyview_display_irqs_uninstall(dev_priv);
3495 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3497 dev_priv->irq_mask = ~0;
3499 I915_WRITE(PORT_HOTPLUG_EN, 0);
3500 POSTING_READ(PORT_HOTPLUG_EN);
3502 I915_WRITE(VLV_IIR, 0xffffffff);
3503 I915_WRITE(VLV_IIR, 0xffffffff);
3504 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3505 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3506 POSTING_READ(VLV_IMR);
3508 /* Interrupt setup is already guaranteed to be single-threaded, this is
3509 * just to make the assert_spin_locked check happy. */
3510 spin_lock_irq(&dev_priv->irq_lock);
3511 if (dev_priv->display_irqs_enabled)
3512 valleyview_display_irqs_install(dev_priv);
3513 spin_unlock_irq(&dev_priv->irq_lock);
3516 static int valleyview_irq_postinstall(struct drm_device *dev)
3518 struct drm_i915_private *dev_priv = dev->dev_private;
3520 vlv_display_irq_postinstall(dev_priv);
3522 gen5_gt_irq_postinstall(dev);
3524 /* ack & enable invalid PTE error interrupts */
3525 #if 0 /* FIXME: add support to irq handler for checking these bits */
3526 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3527 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3530 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3535 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3537 /* These are interrupts we'll toggle with the ring mask register */
3538 uint32_t gt_interrupts[] = {
3539 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3540 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3541 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3542 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3543 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3544 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3545 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3546 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3547 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3549 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3550 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3553 dev_priv->pm_irq_mask = 0xffffffff;
3554 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3555 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3557 * RPS interrupts will get enabled/disabled on demand when RPS itself
3558 * is enabled/disabled.
3560 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3561 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3564 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3566 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3567 uint32_t de_pipe_enables;
3569 u32 aux_en = GEN8_AUX_CHANNEL_A;
3571 if (IS_GEN9(dev_priv)) {
3572 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3573 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3574 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3577 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3578 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3580 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3581 GEN8_PIPE_FIFO_UNDERRUN;
3583 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3584 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3585 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3587 for_each_pipe(dev_priv, pipe)
3588 if (intel_display_power_is_enabled(dev_priv,
3589 POWER_DOMAIN_PIPE(pipe)))
3590 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3591 dev_priv->de_irq_mask[pipe],
3594 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
3597 static int gen8_irq_postinstall(struct drm_device *dev)
3599 struct drm_i915_private *dev_priv = dev->dev_private;
3601 ibx_irq_pre_postinstall(dev);
3603 gen8_gt_irq_postinstall(dev_priv);
3604 gen8_de_irq_postinstall(dev_priv);
3606 ibx_irq_postinstall(dev);
3608 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3609 POSTING_READ(GEN8_MASTER_IRQ);
3614 static int cherryview_irq_postinstall(struct drm_device *dev)
3616 struct drm_i915_private *dev_priv = dev->dev_private;
3618 vlv_display_irq_postinstall(dev_priv);
3620 gen8_gt_irq_postinstall(dev_priv);
3622 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3623 POSTING_READ(GEN8_MASTER_IRQ);
3628 static void gen8_irq_uninstall(struct drm_device *dev)
3630 struct drm_i915_private *dev_priv = dev->dev_private;
3635 gen8_irq_reset(dev);
3638 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3640 /* Interrupt setup is already guaranteed to be single-threaded, this is
3641 * just to make the assert_spin_locked check happy. */
3642 spin_lock_irq(&dev_priv->irq_lock);
3643 if (dev_priv->display_irqs_enabled)
3644 valleyview_display_irqs_uninstall(dev_priv);
3645 spin_unlock_irq(&dev_priv->irq_lock);
3647 vlv_display_irq_reset(dev_priv);
3649 dev_priv->irq_mask = ~0;
3652 static void valleyview_irq_uninstall(struct drm_device *dev)
3654 struct drm_i915_private *dev_priv = dev->dev_private;
3659 I915_WRITE(VLV_MASTER_IER, 0);
3661 gen5_gt_irq_reset(dev);
3663 I915_WRITE(HWSTAM, 0xffffffff);
3665 vlv_display_irq_uninstall(dev_priv);
3668 static void cherryview_irq_uninstall(struct drm_device *dev)
3670 struct drm_i915_private *dev_priv = dev->dev_private;
3675 I915_WRITE(GEN8_MASTER_IRQ, 0);
3676 POSTING_READ(GEN8_MASTER_IRQ);
3678 gen8_gt_irq_reset(dev_priv);
3680 GEN5_IRQ_RESET(GEN8_PCU_);
3682 vlv_display_irq_uninstall(dev_priv);
3685 static void ironlake_irq_uninstall(struct drm_device *dev)
3687 struct drm_i915_private *dev_priv = dev->dev_private;
3692 ironlake_irq_reset(dev);
3695 static void i8xx_irq_preinstall(struct drm_device * dev)
3697 struct drm_i915_private *dev_priv = dev->dev_private;
3700 for_each_pipe(dev_priv, pipe)
3701 I915_WRITE(PIPESTAT(pipe), 0);
3702 I915_WRITE16(IMR, 0xffff);
3703 I915_WRITE16(IER, 0x0);
3704 POSTING_READ16(IER);
3707 static int i8xx_irq_postinstall(struct drm_device *dev)
3709 struct drm_i915_private *dev_priv = dev->dev_private;
3712 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3714 /* Unmask the interrupts that we always want on. */
3715 dev_priv->irq_mask =
3716 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3717 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3718 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3719 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3720 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3721 I915_WRITE16(IMR, dev_priv->irq_mask);
3724 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3725 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3726 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3727 I915_USER_INTERRUPT);
3728 POSTING_READ16(IER);
3730 /* Interrupt setup is already guaranteed to be single-threaded, this is
3731 * just to make the assert_spin_locked check happy. */
3732 spin_lock_irq(&dev_priv->irq_lock);
3733 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3734 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3735 spin_unlock_irq(&dev_priv->irq_lock);
3741 * Returns true when a page flip has completed.
3743 static bool i8xx_handle_vblank(struct drm_device *dev,
3744 int plane, int pipe, u32 iir)
3746 struct drm_i915_private *dev_priv = dev->dev_private;
3747 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3749 if (!intel_pipe_handle_vblank(dev, pipe))
3752 if ((iir & flip_pending) == 0)
3753 goto check_page_flip;
3755 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3756 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3757 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3758 * the flip is completed (no longer pending). Since this doesn't raise
3759 * an interrupt per se, we watch for the change at vblank.
3761 if (I915_READ16(ISR) & flip_pending)
3762 goto check_page_flip;
3764 intel_prepare_page_flip(dev, plane);
3765 intel_finish_page_flip(dev, pipe);
3769 intel_check_page_flip(dev, pipe);
3773 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3775 struct drm_device *dev = arg;
3776 struct drm_i915_private *dev_priv = dev->dev_private;
3781 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3782 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3784 iir = I915_READ16(IIR);
3788 while (iir & ~flip_mask) {
3789 /* Can't rely on pipestat interrupt bit in iir as it might
3790 * have been cleared after the pipestat interrupt was received.
3791 * It doesn't set the bit in iir again, but it still produces
3792 * interrupts (for non-MSI).
3794 spin_lock(&dev_priv->irq_lock);
3795 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3796 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3798 for_each_pipe(dev_priv, pipe) {
3799 int reg = PIPESTAT(pipe);
3800 pipe_stats[pipe] = I915_READ(reg);
3803 * Clear the PIPE*STAT regs before the IIR
3805 if (pipe_stats[pipe] & 0x8000ffff)
3806 I915_WRITE(reg, pipe_stats[pipe]);
3808 spin_unlock(&dev_priv->irq_lock);
3810 I915_WRITE16(IIR, iir & ~flip_mask);
3811 new_iir = I915_READ16(IIR); /* Flush posted writes */
3813 if (iir & I915_USER_INTERRUPT)
3814 notify_ring(dev, &dev_priv->ring[RCS]);
3816 for_each_pipe(dev_priv, pipe) {
3821 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3822 i8xx_handle_vblank(dev, plane, pipe, iir))
3823 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3825 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3826 i9xx_pipe_crc_irq_handler(dev, pipe);
3828 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3829 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3839 static void i8xx_irq_uninstall(struct drm_device * dev)
3841 struct drm_i915_private *dev_priv = dev->dev_private;
3844 for_each_pipe(dev_priv, pipe) {
3845 /* Clear enable bits; then clear status bits */
3846 I915_WRITE(PIPESTAT(pipe), 0);
3847 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3849 I915_WRITE16(IMR, 0xffff);
3850 I915_WRITE16(IER, 0x0);
3851 I915_WRITE16(IIR, I915_READ16(IIR));
3854 static void i915_irq_preinstall(struct drm_device * dev)
3856 struct drm_i915_private *dev_priv = dev->dev_private;
3859 if (I915_HAS_HOTPLUG(dev)) {
3860 I915_WRITE(PORT_HOTPLUG_EN, 0);
3861 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3864 I915_WRITE16(HWSTAM, 0xeffe);
3865 for_each_pipe(dev_priv, pipe)
3866 I915_WRITE(PIPESTAT(pipe), 0);
3867 I915_WRITE(IMR, 0xffffffff);
3868 I915_WRITE(IER, 0x0);
3872 static int i915_irq_postinstall(struct drm_device *dev)
3874 struct drm_i915_private *dev_priv = dev->dev_private;
3877 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3879 /* Unmask the interrupts that we always want on. */
3880 dev_priv->irq_mask =
3881 ~(I915_ASLE_INTERRUPT |
3882 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3883 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3884 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3885 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3886 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3889 I915_ASLE_INTERRUPT |
3890 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3891 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3892 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3893 I915_USER_INTERRUPT;
3895 if (I915_HAS_HOTPLUG(dev)) {
3896 I915_WRITE(PORT_HOTPLUG_EN, 0);
3897 POSTING_READ(PORT_HOTPLUG_EN);
3899 /* Enable in IER... */
3900 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3901 /* and unmask in IMR */
3902 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3905 I915_WRITE(IMR, dev_priv->irq_mask);
3906 I915_WRITE(IER, enable_mask);
3909 i915_enable_asle_pipestat(dev);
3911 /* Interrupt setup is already guaranteed to be single-threaded, this is
3912 * just to make the assert_spin_locked check happy. */
3913 spin_lock_irq(&dev_priv->irq_lock);
3914 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3915 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3916 spin_unlock_irq(&dev_priv->irq_lock);
3922 * Returns true when a page flip has completed.
3924 static bool i915_handle_vblank(struct drm_device *dev,
3925 int plane, int pipe, u32 iir)
3927 struct drm_i915_private *dev_priv = dev->dev_private;
3928 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3930 if (!intel_pipe_handle_vblank(dev, pipe))
3933 if ((iir & flip_pending) == 0)
3934 goto check_page_flip;
3936 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3937 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3938 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3939 * the flip is completed (no longer pending). Since this doesn't raise
3940 * an interrupt per se, we watch for the change at vblank.
3942 if (I915_READ(ISR) & flip_pending)
3943 goto check_page_flip;
3945 intel_prepare_page_flip(dev, plane);
3946 intel_finish_page_flip(dev, pipe);
3950 intel_check_page_flip(dev, pipe);
3954 static irqreturn_t i915_irq_handler(int irq, void *arg)
3956 struct drm_device *dev = arg;
3957 struct drm_i915_private *dev_priv = dev->dev_private;
3958 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3960 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3961 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3962 int pipe, ret = IRQ_NONE;
3964 iir = I915_READ(IIR);
3966 bool irq_received = (iir & ~flip_mask) != 0;
3967 bool blc_event = false;
3969 /* Can't rely on pipestat interrupt bit in iir as it might
3970 * have been cleared after the pipestat interrupt was received.
3971 * It doesn't set the bit in iir again, but it still produces
3972 * interrupts (for non-MSI).
3974 spin_lock(&dev_priv->irq_lock);
3975 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3976 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3978 for_each_pipe(dev_priv, pipe) {
3979 int reg = PIPESTAT(pipe);
3980 pipe_stats[pipe] = I915_READ(reg);
3982 /* Clear the PIPE*STAT regs before the IIR */
3983 if (pipe_stats[pipe] & 0x8000ffff) {
3984 I915_WRITE(reg, pipe_stats[pipe]);
3985 irq_received = true;
3988 spin_unlock(&dev_priv->irq_lock);
3993 /* Consume port. Then clear IIR or we'll miss events */
3994 if (I915_HAS_HOTPLUG(dev) &&
3995 iir & I915_DISPLAY_PORT_INTERRUPT)
3996 i9xx_hpd_irq_handler(dev);
3998 I915_WRITE(IIR, iir & ~flip_mask);
3999 new_iir = I915_READ(IIR); /* Flush posted writes */
4001 if (iir & I915_USER_INTERRUPT)
4002 notify_ring(dev, &dev_priv->ring[RCS]);
4004 for_each_pipe(dev_priv, pipe) {
4009 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4010 i915_handle_vblank(dev, plane, pipe, iir))
4011 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4013 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4016 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4017 i9xx_pipe_crc_irq_handler(dev, pipe);
4019 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4020 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4024 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4025 intel_opregion_asle_intr(dev);
4027 /* With MSI, interrupts are only generated when iir
4028 * transitions from zero to nonzero. If another bit got
4029 * set while we were handling the existing iir bits, then
4030 * we would never get another interrupt.
4032 * This is fine on non-MSI as well, as if we hit this path
4033 * we avoid exiting the interrupt handler only to generate
4036 * Note that for MSI this could cause a stray interrupt report
4037 * if an interrupt landed in the time between writing IIR and
4038 * the posting read. This should be rare enough to never
4039 * trigger the 99% of 100,000 interrupts test for disabling
4044 } while (iir & ~flip_mask);
4049 static void i915_irq_uninstall(struct drm_device * dev)
4051 struct drm_i915_private *dev_priv = dev->dev_private;
4054 if (I915_HAS_HOTPLUG(dev)) {
4055 I915_WRITE(PORT_HOTPLUG_EN, 0);
4056 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4059 I915_WRITE16(HWSTAM, 0xffff);
4060 for_each_pipe(dev_priv, pipe) {
4061 /* Clear enable bits; then clear status bits */
4062 I915_WRITE(PIPESTAT(pipe), 0);
4063 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4065 I915_WRITE(IMR, 0xffffffff);
4066 I915_WRITE(IER, 0x0);
4068 I915_WRITE(IIR, I915_READ(IIR));
4071 static void i965_irq_preinstall(struct drm_device * dev)
4073 struct drm_i915_private *dev_priv = dev->dev_private;
4076 I915_WRITE(PORT_HOTPLUG_EN, 0);
4077 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4079 I915_WRITE(HWSTAM, 0xeffe);
4080 for_each_pipe(dev_priv, pipe)
4081 I915_WRITE(PIPESTAT(pipe), 0);
4082 I915_WRITE(IMR, 0xffffffff);
4083 I915_WRITE(IER, 0x0);
4087 static int i965_irq_postinstall(struct drm_device *dev)
4089 struct drm_i915_private *dev_priv = dev->dev_private;
4093 /* Unmask the interrupts that we always want on. */
4094 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4095 I915_DISPLAY_PORT_INTERRUPT |
4096 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4097 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4098 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4099 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4100 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4102 enable_mask = ~dev_priv->irq_mask;
4103 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4104 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4105 enable_mask |= I915_USER_INTERRUPT;
4108 enable_mask |= I915_BSD_USER_INTERRUPT;
4110 /* Interrupt setup is already guaranteed to be single-threaded, this is
4111 * just to make the assert_spin_locked check happy. */
4112 spin_lock_irq(&dev_priv->irq_lock);
4113 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4114 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4115 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4116 spin_unlock_irq(&dev_priv->irq_lock);
4119 * Enable some error detection, note the instruction error mask
4120 * bit is reserved, so we leave it masked.
4123 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4124 GM45_ERROR_MEM_PRIV |
4125 GM45_ERROR_CP_PRIV |
4126 I915_ERROR_MEMORY_REFRESH);
4128 error_mask = ~(I915_ERROR_PAGE_TABLE |
4129 I915_ERROR_MEMORY_REFRESH);
4131 I915_WRITE(EMR, error_mask);
4133 I915_WRITE(IMR, dev_priv->irq_mask);
4134 I915_WRITE(IER, enable_mask);
4137 I915_WRITE(PORT_HOTPLUG_EN, 0);
4138 POSTING_READ(PORT_HOTPLUG_EN);
4140 i915_enable_asle_pipestat(dev);
4145 static void i915_hpd_irq_setup(struct drm_device *dev)
4147 struct drm_i915_private *dev_priv = dev->dev_private;
4148 struct intel_encoder *intel_encoder;
4151 assert_spin_locked(&dev_priv->irq_lock);
4153 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4154 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4155 /* Note HDMI and DP share hotplug bits */
4156 /* enable bits are the same for all generations */
4157 for_each_intel_encoder(dev, intel_encoder)
4158 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4159 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4160 /* Programming the CRT detection parameters tends
4161 to generate a spurious hotplug event about three
4162 seconds later. So just do it once.
4165 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4166 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4167 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4169 /* Ignore TV since it's buggy */
4170 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4173 static irqreturn_t i965_irq_handler(int irq, void *arg)
4175 struct drm_device *dev = arg;
4176 struct drm_i915_private *dev_priv = dev->dev_private;
4178 u32 pipe_stats[I915_MAX_PIPES];
4179 int ret = IRQ_NONE, pipe;
4181 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4182 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4184 iir = I915_READ(IIR);
4187 bool irq_received = (iir & ~flip_mask) != 0;
4188 bool blc_event = false;
4190 /* Can't rely on pipestat interrupt bit in iir as it might
4191 * have been cleared after the pipestat interrupt was received.
4192 * It doesn't set the bit in iir again, but it still produces
4193 * interrupts (for non-MSI).
4195 spin_lock(&dev_priv->irq_lock);
4196 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4197 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4199 for_each_pipe(dev_priv, pipe) {
4200 int reg = PIPESTAT(pipe);
4201 pipe_stats[pipe] = I915_READ(reg);
4204 * Clear the PIPE*STAT regs before the IIR
4206 if (pipe_stats[pipe] & 0x8000ffff) {
4207 I915_WRITE(reg, pipe_stats[pipe]);
4208 irq_received = true;
4211 spin_unlock(&dev_priv->irq_lock);
4218 /* Consume port. Then clear IIR or we'll miss events */
4219 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4220 i9xx_hpd_irq_handler(dev);
4222 I915_WRITE(IIR, iir & ~flip_mask);
4223 new_iir = I915_READ(IIR); /* Flush posted writes */
4225 if (iir & I915_USER_INTERRUPT)
4226 notify_ring(dev, &dev_priv->ring[RCS]);
4227 if (iir & I915_BSD_USER_INTERRUPT)
4228 notify_ring(dev, &dev_priv->ring[VCS]);
4230 for_each_pipe(dev_priv, pipe) {
4231 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4232 i915_handle_vblank(dev, pipe, pipe, iir))
4233 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4235 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4238 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4239 i9xx_pipe_crc_irq_handler(dev, pipe);
4241 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4242 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4245 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4246 intel_opregion_asle_intr(dev);
4248 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4249 gmbus_irq_handler(dev);
4251 /* With MSI, interrupts are only generated when iir
4252 * transitions from zero to nonzero. If another bit got
4253 * set while we were handling the existing iir bits, then
4254 * we would never get another interrupt.
4256 * This is fine on non-MSI as well, as if we hit this path
4257 * we avoid exiting the interrupt handler only to generate
4260 * Note that for MSI this could cause a stray interrupt report
4261 * if an interrupt landed in the time between writing IIR and
4262 * the posting read. This should be rare enough to never
4263 * trigger the 99% of 100,000 interrupts test for disabling
4272 static void i965_irq_uninstall(struct drm_device * dev)
4274 struct drm_i915_private *dev_priv = dev->dev_private;
4280 I915_WRITE(PORT_HOTPLUG_EN, 0);
4281 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4283 I915_WRITE(HWSTAM, 0xffffffff);
4284 for_each_pipe(dev_priv, pipe)
4285 I915_WRITE(PIPESTAT(pipe), 0);
4286 I915_WRITE(IMR, 0xffffffff);
4287 I915_WRITE(IER, 0x0);
4289 for_each_pipe(dev_priv, pipe)
4290 I915_WRITE(PIPESTAT(pipe),
4291 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4292 I915_WRITE(IIR, I915_READ(IIR));
4295 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4297 struct drm_i915_private *dev_priv =
4298 container_of(work, typeof(*dev_priv),
4299 hotplug_reenable_work.work);
4300 struct drm_device *dev = dev_priv->dev;
4301 struct drm_mode_config *mode_config = &dev->mode_config;
4304 intel_runtime_pm_get(dev_priv);
4306 spin_lock_irq(&dev_priv->irq_lock);
4307 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4308 struct drm_connector *connector;
4310 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4313 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4315 list_for_each_entry(connector, &mode_config->connector_list, head) {
4316 struct intel_connector *intel_connector = to_intel_connector(connector);
4318 if (intel_connector->encoder->hpd_pin == i) {
4319 if (connector->polled != intel_connector->polled)
4320 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4322 connector->polled = intel_connector->polled;
4323 if (!connector->polled)
4324 connector->polled = DRM_CONNECTOR_POLL_HPD;
4328 if (dev_priv->display.hpd_irq_setup)
4329 dev_priv->display.hpd_irq_setup(dev);
4330 spin_unlock_irq(&dev_priv->irq_lock);
4332 intel_runtime_pm_put(dev_priv);
4336 * intel_irq_init - initializes irq support
4337 * @dev_priv: i915 device instance
4339 * This function initializes all the irq support including work items, timers
4340 * and all the vtables. It does not setup the interrupt itself though.
4342 void intel_irq_init(struct drm_i915_private *dev_priv)
4344 struct drm_device *dev = dev_priv->dev;
4346 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4347 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4348 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4349 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4350 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4352 /* Let's track the enabled rps events */
4353 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4354 /* WaGsvRC0ResidencyMethod:vlv */
4355 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4357 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4359 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4360 i915_hangcheck_elapsed);
4361 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4362 intel_hpd_irq_reenable_work);
4364 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4366 if (IS_GEN2(dev_priv)) {
4367 dev->max_vblank_count = 0;
4368 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4369 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4370 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4371 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4373 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4374 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4378 * Opt out of the vblank disable timer on everything except gen2.
4379 * Gen2 doesn't have a hardware frame counter and so depends on
4380 * vblank interrupts to produce sane vblank seuquence numbers.
4382 if (!IS_GEN2(dev_priv))
4383 dev->vblank_disable_immediate = true;
4385 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4386 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4387 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4390 if (IS_CHERRYVIEW(dev_priv)) {
4391 dev->driver->irq_handler = cherryview_irq_handler;
4392 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4393 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4394 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4395 dev->driver->enable_vblank = valleyview_enable_vblank;
4396 dev->driver->disable_vblank = valleyview_disable_vblank;
4397 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4398 } else if (IS_VALLEYVIEW(dev_priv)) {
4399 dev->driver->irq_handler = valleyview_irq_handler;
4400 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4401 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4402 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4403 dev->driver->enable_vblank = valleyview_enable_vblank;
4404 dev->driver->disable_vblank = valleyview_disable_vblank;
4405 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4406 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4407 dev->driver->irq_handler = gen8_irq_handler;
4408 dev->driver->irq_preinstall = gen8_irq_reset;
4409 dev->driver->irq_postinstall = gen8_irq_postinstall;
4410 dev->driver->irq_uninstall = gen8_irq_uninstall;
4411 dev->driver->enable_vblank = gen8_enable_vblank;
4412 dev->driver->disable_vblank = gen8_disable_vblank;
4413 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4414 } else if (HAS_PCH_SPLIT(dev)) {
4415 dev->driver->irq_handler = ironlake_irq_handler;
4416 dev->driver->irq_preinstall = ironlake_irq_reset;
4417 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4418 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4419 dev->driver->enable_vblank = ironlake_enable_vblank;
4420 dev->driver->disable_vblank = ironlake_disable_vblank;
4421 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4423 if (INTEL_INFO(dev_priv)->gen == 2) {
4424 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4425 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4426 dev->driver->irq_handler = i8xx_irq_handler;
4427 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4428 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4429 dev->driver->irq_preinstall = i915_irq_preinstall;
4430 dev->driver->irq_postinstall = i915_irq_postinstall;
4431 dev->driver->irq_uninstall = i915_irq_uninstall;
4432 dev->driver->irq_handler = i915_irq_handler;
4434 dev->driver->irq_preinstall = i965_irq_preinstall;
4435 dev->driver->irq_postinstall = i965_irq_postinstall;
4436 dev->driver->irq_uninstall = i965_irq_uninstall;
4437 dev->driver->irq_handler = i965_irq_handler;
4439 if (I915_HAS_HOTPLUG(dev_priv))
4440 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4441 dev->driver->enable_vblank = i915_enable_vblank;
4442 dev->driver->disable_vblank = i915_disable_vblank;
4447 * intel_hpd_init - initializes and enables hpd support
4448 * @dev_priv: i915 device instance
4450 * This function enables the hotplug support. It requires that interrupts have
4451 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4452 * poll request can run concurrently to other code, so locking rules must be
4455 * This is a separate step from interrupt enabling to simplify the locking rules
4456 * in the driver load and resume code.
4458 void intel_hpd_init(struct drm_i915_private *dev_priv)
4460 struct drm_device *dev = dev_priv->dev;
4461 struct drm_mode_config *mode_config = &dev->mode_config;
4462 struct drm_connector *connector;
4465 for (i = 1; i < HPD_NUM_PINS; i++) {
4466 dev_priv->hpd_stats[i].hpd_cnt = 0;
4467 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4469 list_for_each_entry(connector, &mode_config->connector_list, head) {
4470 struct intel_connector *intel_connector = to_intel_connector(connector);
4471 connector->polled = intel_connector->polled;
4472 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4473 connector->polled = DRM_CONNECTOR_POLL_HPD;
4474 if (intel_connector->mst_port)
4475 connector->polled = DRM_CONNECTOR_POLL_HPD;
4478 /* Interrupt setup is already guaranteed to be single-threaded, this is
4479 * just to make the assert_spin_locked checks happy. */
4480 spin_lock_irq(&dev_priv->irq_lock);
4481 if (dev_priv->display.hpd_irq_setup)
4482 dev_priv->display.hpd_irq_setup(dev);
4483 spin_unlock_irq(&dev_priv->irq_lock);
4487 * intel_irq_install - enables the hardware interrupt
4488 * @dev_priv: i915 device instance
4490 * This function enables the hardware interrupt handling, but leaves the hotplug
4491 * handling still disabled. It is called after intel_irq_init().
4493 * In the driver load and resume code we need working interrupts in a few places
4494 * but don't want to deal with the hassle of concurrent probe and hotplug
4495 * workers. Hence the split into this two-stage approach.
4497 int intel_irq_install(struct drm_i915_private *dev_priv)
4500 * We enable some interrupt sources in our postinstall hooks, so mark
4501 * interrupts as enabled _before_ actually enabling them to avoid
4502 * special cases in our ordering checks.
4504 dev_priv->pm.irqs_enabled = true;
4506 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4510 * intel_irq_uninstall - finilizes all irq handling
4511 * @dev_priv: i915 device instance
4513 * This stops interrupt and hotplug handling and unregisters and frees all
4514 * resources acquired in the init functions.
4516 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4518 drm_irq_uninstall(dev_priv->dev);
4519 intel_hpd_cancel_work(dev_priv);
4520 dev_priv->pm.irqs_enabled = false;
4524 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4525 * @dev_priv: i915 device instance
4527 * This function is used to disable interrupts at runtime, both in the runtime
4528 * pm and the system suspend/resume code.
4530 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4532 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4533 dev_priv->pm.irqs_enabled = false;
4537 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4538 * @dev_priv: i915 device instance
4540 * This function is used to enable interrupts at runtime, both in the runtime
4541 * pm and the system suspend/resume code.
4543 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4545 dev_priv->pm.irqs_enabled = true;
4546 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4547 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);