1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
41 * DOC: interrupt handling
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
48 static const u32 hpd_ibx[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
56 static const u32 hpd_cpt[] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
64 static const u32 hpd_mask_i915[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
73 static const u32 hpd_status_g4x[] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
102 #define GEN5_IRQ_RESET(type) do { \
103 I915_WRITE(type##IMR, 0xffffffff); \
104 POSTING_READ(type##IMR); \
105 I915_WRITE(type##IER, 0); \
106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
113 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
120 I915_WRITE((reg), 0xffffffff); \
122 I915_WRITE((reg), 0xffffffff); \
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 I915_WRITE(type##IER, (ier_val)); \
137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
141 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
143 /* For display hotplug interrupt */
145 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
147 assert_spin_locked(&dev_priv->irq_lock);
149 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
152 if ((dev_priv->irq_mask & mask) != 0) {
153 dev_priv->irq_mask &= ~mask;
154 I915_WRITE(DEIMR, dev_priv->irq_mask);
160 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
162 assert_spin_locked(&dev_priv->irq_lock);
164 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
167 if ((dev_priv->irq_mask & mask) != mask) {
168 dev_priv->irq_mask |= mask;
169 I915_WRITE(DEIMR, dev_priv->irq_mask);
175 * ilk_update_gt_irq - update GTIMR
176 * @dev_priv: driver private
177 * @interrupt_mask: mask of interrupt bits to update
178 * @enabled_irq_mask: mask of interrupt bits to enable
180 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
181 uint32_t interrupt_mask,
182 uint32_t enabled_irq_mask)
184 assert_spin_locked(&dev_priv->irq_lock);
186 WARN_ON(enabled_irq_mask & ~interrupt_mask);
188 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
191 dev_priv->gt_irq_mask &= ~interrupt_mask;
192 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
193 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
197 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
199 ilk_update_gt_irq(dev_priv, mask, mask);
202 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
204 ilk_update_gt_irq(dev_priv, mask, 0);
207 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
209 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
212 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
214 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
217 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
219 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
223 * snb_update_pm_irq - update GEN6_PMIMR
224 * @dev_priv: driver private
225 * @interrupt_mask: mask of interrupt bits to update
226 * @enabled_irq_mask: mask of interrupt bits to enable
228 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
229 uint32_t interrupt_mask,
230 uint32_t enabled_irq_mask)
234 WARN_ON(enabled_irq_mask & ~interrupt_mask);
236 assert_spin_locked(&dev_priv->irq_lock);
238 new_val = dev_priv->pm_irq_mask;
239 new_val &= ~interrupt_mask;
240 new_val |= (~enabled_irq_mask & interrupt_mask);
242 if (new_val != dev_priv->pm_irq_mask) {
243 dev_priv->pm_irq_mask = new_val;
244 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
245 POSTING_READ(gen6_pm_imr(dev_priv));
249 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
251 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
254 snb_update_pm_irq(dev_priv, mask, mask);
257 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
260 snb_update_pm_irq(dev_priv, mask, 0);
263 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
265 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
268 __gen6_disable_pm_irq(dev_priv, mask);
271 void gen6_reset_rps_interrupts(struct drm_device *dev)
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 uint32_t reg = gen6_pm_iir(dev_priv);
276 spin_lock_irq(&dev_priv->irq_lock);
277 I915_WRITE(reg, dev_priv->pm_rps_events);
278 I915_WRITE(reg, dev_priv->pm_rps_events);
280 spin_unlock_irq(&dev_priv->irq_lock);
283 void gen6_enable_rps_interrupts(struct drm_device *dev)
285 struct drm_i915_private *dev_priv = dev->dev_private;
287 spin_lock_irq(&dev_priv->irq_lock);
288 WARN_ON(dev_priv->rps.pm_iir);
289 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
290 dev_priv->rps.interrupts_enabled = true;
291 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
292 spin_unlock_irq(&dev_priv->irq_lock);
295 void gen6_disable_rps_interrupts(struct drm_device *dev)
297 struct drm_i915_private *dev_priv = dev->dev_private;
299 spin_lock_irq(&dev_priv->irq_lock);
300 dev_priv->rps.interrupts_enabled = false;
301 spin_unlock_irq(&dev_priv->irq_lock);
303 cancel_work_sync(&dev_priv->rps.work);
305 spin_lock_irq(&dev_priv->irq_lock);
307 I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
308 ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
310 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
311 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
312 ~dev_priv->pm_rps_events);
313 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
314 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
316 dev_priv->rps.pm_iir = 0;
318 spin_unlock_irq(&dev_priv->irq_lock);
322 * ibx_display_interrupt_update - update SDEIMR
323 * @dev_priv: driver private
324 * @interrupt_mask: mask of interrupt bits to update
325 * @enabled_irq_mask: mask of interrupt bits to enable
327 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
328 uint32_t interrupt_mask,
329 uint32_t enabled_irq_mask)
331 uint32_t sdeimr = I915_READ(SDEIMR);
332 sdeimr &= ~interrupt_mask;
333 sdeimr |= (~enabled_irq_mask & interrupt_mask);
335 WARN_ON(enabled_irq_mask & ~interrupt_mask);
337 assert_spin_locked(&dev_priv->irq_lock);
339 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
342 I915_WRITE(SDEIMR, sdeimr);
343 POSTING_READ(SDEIMR);
347 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
348 u32 enable_mask, u32 status_mask)
350 u32 reg = PIPESTAT(pipe);
351 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
353 assert_spin_locked(&dev_priv->irq_lock);
354 WARN_ON(!intel_irqs_enabled(dev_priv));
356 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
357 status_mask & ~PIPESTAT_INT_STATUS_MASK,
358 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
359 pipe_name(pipe), enable_mask, status_mask))
362 if ((pipestat & enable_mask) == enable_mask)
365 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
367 /* Enable the interrupt, clear any pending status */
368 pipestat |= enable_mask | status_mask;
369 I915_WRITE(reg, pipestat);
374 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
375 u32 enable_mask, u32 status_mask)
377 u32 reg = PIPESTAT(pipe);
378 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
380 assert_spin_locked(&dev_priv->irq_lock);
381 WARN_ON(!intel_irqs_enabled(dev_priv));
383 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
384 status_mask & ~PIPESTAT_INT_STATUS_MASK,
385 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
386 pipe_name(pipe), enable_mask, status_mask))
389 if ((pipestat & enable_mask) == 0)
392 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
394 pipestat &= ~enable_mask;
395 I915_WRITE(reg, pipestat);
399 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
401 u32 enable_mask = status_mask << 16;
404 * On pipe A we don't support the PSR interrupt yet,
405 * on pipe B and C the same bit MBZ.
407 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
410 * On pipe B and C we don't support the PSR interrupt yet, on pipe
411 * A the same bit is for perf counters which we don't use either.
413 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
416 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
417 SPRITE0_FLIP_DONE_INT_EN_VLV |
418 SPRITE1_FLIP_DONE_INT_EN_VLV);
419 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
420 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
421 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
422 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
428 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
433 if (IS_VALLEYVIEW(dev_priv->dev))
434 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
437 enable_mask = status_mask << 16;
438 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
442 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
447 if (IS_VALLEYVIEW(dev_priv->dev))
448 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
451 enable_mask = status_mask << 16;
452 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
456 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
458 static void i915_enable_asle_pipestat(struct drm_device *dev)
460 struct drm_i915_private *dev_priv = dev->dev_private;
462 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
465 spin_lock_irq(&dev_priv->irq_lock);
467 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
468 if (INTEL_INFO(dev)->gen >= 4)
469 i915_enable_pipestat(dev_priv, PIPE_A,
470 PIPE_LEGACY_BLC_EVENT_STATUS);
472 spin_unlock_irq(&dev_priv->irq_lock);
476 * i915_pipe_enabled - check if a pipe is enabled
478 * @pipe: pipe to check
480 * Reading certain registers when the pipe is disabled can hang the chip.
481 * Use this routine to make sure the PLL is running and the pipe is active
482 * before reading such registers if unsure.
485 i915_pipe_enabled(struct drm_device *dev, int pipe)
487 struct drm_i915_private *dev_priv = dev->dev_private;
489 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
490 /* Locking is horribly broken here, but whatever. */
491 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
494 return intel_crtc->active;
496 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
501 * This timing diagram depicts the video signal in and
502 * around the vertical blanking period.
504 * Assumptions about the fictitious mode used in this example:
506 * vsync_start = vblank_start + 1
507 * vsync_end = vblank_start + 2
508 * vtotal = vblank_start + 3
511 * latch double buffered registers
512 * increment frame counter (ctg+)
513 * generate start of vblank interrupt (gen4+)
516 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
517 * | may be shifted forward 1-3 extra lines via PIPECONF
519 * | | start of vsync:
520 * | | generate vsync interrupt
522 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
523 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
524 * ----va---> <-----------------vb--------------------> <--------va-------------
525 * | | <----vs-----> |
526 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
527 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
528 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
530 * last visible pixel first visible pixel
531 * | increment frame counter (gen3/4)
532 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
534 * x = horizontal active
535 * _ = horizontal blanking
536 * hs = horizontal sync
537 * va = vertical active
538 * vb = vertical blanking
540 * vbs = vblank_start (number)
543 * - most events happen at the start of horizontal sync
544 * - frame start happens at the start of horizontal blank, 1-4 lines
545 * (depending on PIPECONF settings) after the start of vblank
546 * - gen3/4 pixel and frame counter are synchronized with the start
547 * of horizontal active on the first line of vertical active
550 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
552 /* Gen2 doesn't have a hardware frame counter */
556 /* Called from drm generic code, passed a 'crtc', which
557 * we use as a pipe index
559 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
561 struct drm_i915_private *dev_priv = dev->dev_private;
562 unsigned long high_frame;
563 unsigned long low_frame;
564 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
566 if (!i915_pipe_enabled(dev, pipe)) {
567 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
568 "pipe %c\n", pipe_name(pipe));
572 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
573 struct intel_crtc *intel_crtc =
574 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
575 const struct drm_display_mode *mode =
576 &intel_crtc->config.adjusted_mode;
578 htotal = mode->crtc_htotal;
579 hsync_start = mode->crtc_hsync_start;
580 vbl_start = mode->crtc_vblank_start;
581 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
582 vbl_start = DIV_ROUND_UP(vbl_start, 2);
584 enum transcoder cpu_transcoder = (enum transcoder) pipe;
586 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
587 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
588 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
589 if ((I915_READ(PIPECONF(cpu_transcoder)) &
590 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
591 vbl_start = DIV_ROUND_UP(vbl_start, 2);
594 /* Convert to pixel count */
597 /* Start of vblank event occurs at start of hsync */
598 vbl_start -= htotal - hsync_start;
600 high_frame = PIPEFRAME(pipe);
601 low_frame = PIPEFRAMEPIXEL(pipe);
604 * High & low register fields aren't synchronized, so make sure
605 * we get a low value that's stable across two reads of the high
609 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
610 low = I915_READ(low_frame);
611 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
612 } while (high1 != high2);
614 high1 >>= PIPE_FRAME_HIGH_SHIFT;
615 pixel = low & PIPE_PIXEL_MASK;
616 low >>= PIPE_FRAME_LOW_SHIFT;
619 * The frame counter increments at beginning of active.
620 * Cook up a vblank counter by also checking the pixel
621 * counter against vblank start.
623 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
626 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
628 struct drm_i915_private *dev_priv = dev->dev_private;
629 int reg = PIPE_FRMCOUNT_GM45(pipe);
631 if (!i915_pipe_enabled(dev, pipe)) {
632 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
633 "pipe %c\n", pipe_name(pipe));
637 return I915_READ(reg);
640 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
641 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
643 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
645 struct drm_device *dev = crtc->base.dev;
646 struct drm_i915_private *dev_priv = dev->dev_private;
647 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
648 enum pipe pipe = crtc->pipe;
649 int position, vtotal;
651 vtotal = mode->crtc_vtotal;
652 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
656 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
658 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
661 * See update_scanline_offset() for the details on the
662 * scanline_offset adjustment.
664 return (position + crtc->scanline_offset) % vtotal;
667 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
668 unsigned int flags, int *vpos, int *hpos,
669 ktime_t *stime, ktime_t *etime)
671 struct drm_i915_private *dev_priv = dev->dev_private;
672 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
673 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
674 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
676 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
679 unsigned long irqflags;
681 if (!intel_crtc->active) {
682 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
683 "pipe %c\n", pipe_name(pipe));
687 htotal = mode->crtc_htotal;
688 hsync_start = mode->crtc_hsync_start;
689 vtotal = mode->crtc_vtotal;
690 vbl_start = mode->crtc_vblank_start;
691 vbl_end = mode->crtc_vblank_end;
693 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
694 vbl_start = DIV_ROUND_UP(vbl_start, 2);
699 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
702 * Lock uncore.lock, as we will do multiple timing critical raw
703 * register reads, potentially with preemption disabled, so the
704 * following code must not block on uncore.lock.
706 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
708 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
710 /* Get optional system timestamp before query. */
712 *stime = ktime_get();
714 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
715 /* No obvious pixelcount register. Only query vertical
716 * scanout position from Display scan line register.
718 position = __intel_get_crtc_scanline(intel_crtc);
720 /* Have access to pixelcount since start of frame.
721 * We can split this into vertical and horizontal
724 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
726 /* convert to pixel counts */
732 * In interlaced modes, the pixel counter counts all pixels,
733 * so one field will have htotal more pixels. In order to avoid
734 * the reported position from jumping backwards when the pixel
735 * counter is beyond the length of the shorter field, just
736 * clamp the position the length of the shorter field. This
737 * matches how the scanline counter based position works since
738 * the scanline counter doesn't count the two half lines.
740 if (position >= vtotal)
741 position = vtotal - 1;
744 * Start of vblank interrupt is triggered at start of hsync,
745 * just prior to the first active line of vblank. However we
746 * consider lines to start at the leading edge of horizontal
747 * active. So, should we get here before we've crossed into
748 * the horizontal active of the first line in vblank, we would
749 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
750 * always add htotal-hsync_start to the current pixel position.
752 position = (position + htotal - hsync_start) % vtotal;
755 /* Get optional system timestamp after query. */
757 *etime = ktime_get();
759 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
761 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
763 in_vbl = position >= vbl_start && position < vbl_end;
766 * While in vblank, position will be negative
767 * counting up towards 0 at vbl_end. And outside
768 * vblank, position will be positive counting
771 if (position >= vbl_start)
774 position += vtotal - vbl_end;
776 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
780 *vpos = position / htotal;
781 *hpos = position - (*vpos * htotal);
786 ret |= DRM_SCANOUTPOS_IN_VBLANK;
791 int intel_get_crtc_scanline(struct intel_crtc *crtc)
793 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
794 unsigned long irqflags;
797 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
798 position = __intel_get_crtc_scanline(crtc);
799 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
804 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
806 struct timeval *vblank_time,
809 struct drm_crtc *crtc;
811 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
812 DRM_ERROR("Invalid crtc %d\n", pipe);
816 /* Get drm_crtc to timestamp: */
817 crtc = intel_get_crtc_for_pipe(dev, pipe);
819 DRM_ERROR("Invalid crtc %d\n", pipe);
823 if (!crtc->enabled) {
824 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
828 /* Helper routine in DRM core does all the work: */
829 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
832 &to_intel_crtc(crtc)->config.adjusted_mode);
835 static bool intel_hpd_irq_event(struct drm_device *dev,
836 struct drm_connector *connector)
838 enum drm_connector_status old_status;
840 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
841 old_status = connector->status;
843 connector->status = connector->funcs->detect(connector, false);
844 if (old_status == connector->status)
847 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
850 drm_get_connector_status_name(old_status),
851 drm_get_connector_status_name(connector->status));
856 static void i915_digport_work_func(struct work_struct *work)
858 struct drm_i915_private *dev_priv =
859 container_of(work, struct drm_i915_private, dig_port_work);
860 u32 long_port_mask, short_port_mask;
861 struct intel_digital_port *intel_dig_port;
865 spin_lock_irq(&dev_priv->irq_lock);
866 long_port_mask = dev_priv->long_hpd_port_mask;
867 dev_priv->long_hpd_port_mask = 0;
868 short_port_mask = dev_priv->short_hpd_port_mask;
869 dev_priv->short_hpd_port_mask = 0;
870 spin_unlock_irq(&dev_priv->irq_lock);
872 for (i = 0; i < I915_MAX_PORTS; i++) {
874 bool long_hpd = false;
875 intel_dig_port = dev_priv->hpd_irq_port[i];
876 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
879 if (long_port_mask & (1 << i)) {
882 } else if (short_port_mask & (1 << i))
886 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
888 /* if we get true fallback to old school hpd */
889 old_bits |= (1 << intel_dig_port->base.hpd_pin);
895 spin_lock_irq(&dev_priv->irq_lock);
896 dev_priv->hpd_event_bits |= old_bits;
897 spin_unlock_irq(&dev_priv->irq_lock);
898 schedule_work(&dev_priv->hotplug_work);
903 * Handle hotplug events outside the interrupt handler proper.
905 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
907 static void i915_hotplug_work_func(struct work_struct *work)
909 struct drm_i915_private *dev_priv =
910 container_of(work, struct drm_i915_private, hotplug_work);
911 struct drm_device *dev = dev_priv->dev;
912 struct drm_mode_config *mode_config = &dev->mode_config;
913 struct intel_connector *intel_connector;
914 struct intel_encoder *intel_encoder;
915 struct drm_connector *connector;
916 bool hpd_disabled = false;
917 bool changed = false;
920 mutex_lock(&mode_config->mutex);
921 DRM_DEBUG_KMS("running encoder hotplug functions\n");
923 spin_lock_irq(&dev_priv->irq_lock);
925 hpd_event_bits = dev_priv->hpd_event_bits;
926 dev_priv->hpd_event_bits = 0;
927 list_for_each_entry(connector, &mode_config->connector_list, head) {
928 intel_connector = to_intel_connector(connector);
929 if (!intel_connector->encoder)
931 intel_encoder = intel_connector->encoder;
932 if (intel_encoder->hpd_pin > HPD_NONE &&
933 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
934 connector->polled == DRM_CONNECTOR_POLL_HPD) {
935 DRM_INFO("HPD interrupt storm detected on connector %s: "
936 "switching from hotplug detection to polling\n",
938 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
939 connector->polled = DRM_CONNECTOR_POLL_CONNECT
940 | DRM_CONNECTOR_POLL_DISCONNECT;
943 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
944 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
945 connector->name, intel_encoder->hpd_pin);
948 /* if there were no outputs to poll, poll was disabled,
949 * therefore make sure it's enabled when disabling HPD on
952 drm_kms_helper_poll_enable(dev);
953 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
954 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
957 spin_unlock_irq(&dev_priv->irq_lock);
959 list_for_each_entry(connector, &mode_config->connector_list, head) {
960 intel_connector = to_intel_connector(connector);
961 if (!intel_connector->encoder)
963 intel_encoder = intel_connector->encoder;
964 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
965 if (intel_encoder->hot_plug)
966 intel_encoder->hot_plug(intel_encoder);
967 if (intel_hpd_irq_event(dev, connector))
971 mutex_unlock(&mode_config->mutex);
974 drm_kms_helper_hotplug_event(dev);
977 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
979 struct drm_i915_private *dev_priv = dev->dev_private;
980 u32 busy_up, busy_down, max_avg, min_avg;
983 spin_lock(&mchdev_lock);
985 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
987 new_delay = dev_priv->ips.cur_delay;
989 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
990 busy_up = I915_READ(RCPREVBSYTUPAVG);
991 busy_down = I915_READ(RCPREVBSYTDNAVG);
992 max_avg = I915_READ(RCBMAXAVG);
993 min_avg = I915_READ(RCBMINAVG);
995 /* Handle RCS change request from hw */
996 if (busy_up > max_avg) {
997 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
998 new_delay = dev_priv->ips.cur_delay - 1;
999 if (new_delay < dev_priv->ips.max_delay)
1000 new_delay = dev_priv->ips.max_delay;
1001 } else if (busy_down < min_avg) {
1002 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1003 new_delay = dev_priv->ips.cur_delay + 1;
1004 if (new_delay > dev_priv->ips.min_delay)
1005 new_delay = dev_priv->ips.min_delay;
1008 if (ironlake_set_drps(dev, new_delay))
1009 dev_priv->ips.cur_delay = new_delay;
1011 spin_unlock(&mchdev_lock);
1016 static void notify_ring(struct drm_device *dev,
1017 struct intel_engine_cs *ring)
1019 if (!intel_ring_initialized(ring))
1022 trace_i915_gem_request_notify(ring);
1024 wake_up_all(&ring->irq_queue);
1027 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1028 struct intel_rps_ei *rps_ei)
1030 u32 cz_ts, cz_freq_khz;
1031 u32 render_count, media_count;
1032 u32 elapsed_render, elapsed_media, elapsed_time;
1035 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1036 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1038 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1039 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1041 if (rps_ei->cz_clock == 0) {
1042 rps_ei->cz_clock = cz_ts;
1043 rps_ei->render_c0 = render_count;
1044 rps_ei->media_c0 = media_count;
1046 return dev_priv->rps.cur_freq;
1049 elapsed_time = cz_ts - rps_ei->cz_clock;
1050 rps_ei->cz_clock = cz_ts;
1052 elapsed_render = render_count - rps_ei->render_c0;
1053 rps_ei->render_c0 = render_count;
1055 elapsed_media = media_count - rps_ei->media_c0;
1056 rps_ei->media_c0 = media_count;
1058 /* Convert all the counters into common unit of milli sec */
1059 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1060 elapsed_render /= cz_freq_khz;
1061 elapsed_media /= cz_freq_khz;
1064 * Calculate overall C0 residency percentage
1065 * only if elapsed time is non zero
1069 ((max(elapsed_render, elapsed_media) * 100)
1077 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1078 * busy-ness calculated from C0 counters of render & media power wells
1079 * @dev_priv: DRM device private
1082 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1084 u32 residency_C0_up = 0, residency_C0_down = 0;
1087 dev_priv->rps.ei_interrupt_count++;
1089 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1092 if (dev_priv->rps.up_ei.cz_clock == 0) {
1093 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1094 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1095 return dev_priv->rps.cur_freq;
1100 * To down throttle, C0 residency should be less than down threshold
1101 * for continous EI intervals. So calculate down EI counters
1102 * once in VLV_INT_COUNT_FOR_DOWN_EI
1104 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1106 dev_priv->rps.ei_interrupt_count = 0;
1108 residency_C0_down = vlv_c0_residency(dev_priv,
1109 &dev_priv->rps.down_ei);
1111 residency_C0_up = vlv_c0_residency(dev_priv,
1112 &dev_priv->rps.up_ei);
1115 new_delay = dev_priv->rps.cur_freq;
1117 adj = dev_priv->rps.last_adj;
1118 /* C0 residency is greater than UP threshold. Increase Frequency */
1119 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1125 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1126 new_delay = dev_priv->rps.cur_freq + adj;
1129 * For better performance, jump directly
1130 * to RPe if we're below it.
1132 if (new_delay < dev_priv->rps.efficient_freq)
1133 new_delay = dev_priv->rps.efficient_freq;
1135 } else if (!dev_priv->rps.ei_interrupt_count &&
1136 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1142 * This means, C0 residency is less than down threshold over
1143 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1145 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1146 new_delay = dev_priv->rps.cur_freq + adj;
1152 static void gen6_pm_rps_work(struct work_struct *work)
1154 struct drm_i915_private *dev_priv =
1155 container_of(work, struct drm_i915_private, rps.work);
1159 spin_lock_irq(&dev_priv->irq_lock);
1160 /* Speed up work cancelation during disabling rps interrupts. */
1161 if (!dev_priv->rps.interrupts_enabled) {
1162 spin_unlock_irq(&dev_priv->irq_lock);
1165 pm_iir = dev_priv->rps.pm_iir;
1166 dev_priv->rps.pm_iir = 0;
1167 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1168 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1169 spin_unlock_irq(&dev_priv->irq_lock);
1171 /* Make sure we didn't queue anything we're not going to process. */
1172 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1174 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1177 mutex_lock(&dev_priv->rps.hw_lock);
1179 adj = dev_priv->rps.last_adj;
1180 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1184 /* CHV needs even encode values */
1185 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1187 new_delay = dev_priv->rps.cur_freq + adj;
1190 * For better performance, jump directly
1191 * to RPe if we're below it.
1193 if (new_delay < dev_priv->rps.efficient_freq)
1194 new_delay = dev_priv->rps.efficient_freq;
1195 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1196 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1197 new_delay = dev_priv->rps.efficient_freq;
1199 new_delay = dev_priv->rps.min_freq_softlimit;
1201 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1202 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1203 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1207 /* CHV needs even encode values */
1208 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1210 new_delay = dev_priv->rps.cur_freq + adj;
1211 } else { /* unknown event */
1212 new_delay = dev_priv->rps.cur_freq;
1215 /* sysfs frequency interfaces may have snuck in while servicing the
1218 new_delay = clamp_t(int, new_delay,
1219 dev_priv->rps.min_freq_softlimit,
1220 dev_priv->rps.max_freq_softlimit);
1222 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1224 if (IS_VALLEYVIEW(dev_priv->dev))
1225 valleyview_set_rps(dev_priv->dev, new_delay);
1227 gen6_set_rps(dev_priv->dev, new_delay);
1229 mutex_unlock(&dev_priv->rps.hw_lock);
1234 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1236 * @work: workqueue struct
1238 * Doesn't actually do anything except notify userspace. As a consequence of
1239 * this event, userspace should try to remap the bad rows since statistically
1240 * it is likely the same row is more likely to go bad again.
1242 static void ivybridge_parity_work(struct work_struct *work)
1244 struct drm_i915_private *dev_priv =
1245 container_of(work, struct drm_i915_private, l3_parity.error_work);
1246 u32 error_status, row, bank, subbank;
1247 char *parity_event[6];
1251 /* We must turn off DOP level clock gating to access the L3 registers.
1252 * In order to prevent a get/put style interface, acquire struct mutex
1253 * any time we access those registers.
1255 mutex_lock(&dev_priv->dev->struct_mutex);
1257 /* If we've screwed up tracking, just let the interrupt fire again */
1258 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1261 misccpctl = I915_READ(GEN7_MISCCPCTL);
1262 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1263 POSTING_READ(GEN7_MISCCPCTL);
1265 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1269 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1272 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1274 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1276 error_status = I915_READ(reg);
1277 row = GEN7_PARITY_ERROR_ROW(error_status);
1278 bank = GEN7_PARITY_ERROR_BANK(error_status);
1279 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1281 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1284 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1285 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1286 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1287 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1288 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1289 parity_event[5] = NULL;
1291 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1292 KOBJ_CHANGE, parity_event);
1294 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1295 slice, row, bank, subbank);
1297 kfree(parity_event[4]);
1298 kfree(parity_event[3]);
1299 kfree(parity_event[2]);
1300 kfree(parity_event[1]);
1303 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1306 WARN_ON(dev_priv->l3_parity.which_slice);
1307 spin_lock_irq(&dev_priv->irq_lock);
1308 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1309 spin_unlock_irq(&dev_priv->irq_lock);
1311 mutex_unlock(&dev_priv->dev->struct_mutex);
1314 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1316 struct drm_i915_private *dev_priv = dev->dev_private;
1318 if (!HAS_L3_DPF(dev))
1321 spin_lock(&dev_priv->irq_lock);
1322 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1323 spin_unlock(&dev_priv->irq_lock);
1325 iir &= GT_PARITY_ERROR(dev);
1326 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1327 dev_priv->l3_parity.which_slice |= 1 << 1;
1329 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1330 dev_priv->l3_parity.which_slice |= 1 << 0;
1332 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1335 static void ilk_gt_irq_handler(struct drm_device *dev,
1336 struct drm_i915_private *dev_priv,
1340 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1341 notify_ring(dev, &dev_priv->ring[RCS]);
1342 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1343 notify_ring(dev, &dev_priv->ring[VCS]);
1346 static void snb_gt_irq_handler(struct drm_device *dev,
1347 struct drm_i915_private *dev_priv,
1352 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1353 notify_ring(dev, &dev_priv->ring[RCS]);
1354 if (gt_iir & GT_BSD_USER_INTERRUPT)
1355 notify_ring(dev, &dev_priv->ring[VCS]);
1356 if (gt_iir & GT_BLT_USER_INTERRUPT)
1357 notify_ring(dev, &dev_priv->ring[BCS]);
1359 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1360 GT_BSD_CS_ERROR_INTERRUPT |
1361 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1362 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1364 if (gt_iir & GT_PARITY_ERROR(dev))
1365 ivybridge_parity_error_irq_handler(dev, gt_iir);
1368 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1369 struct drm_i915_private *dev_priv,
1372 struct intel_engine_cs *ring;
1375 irqreturn_t ret = IRQ_NONE;
1377 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1378 tmp = I915_READ(GEN8_GT_IIR(0));
1380 I915_WRITE(GEN8_GT_IIR(0), tmp);
1383 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1384 ring = &dev_priv->ring[RCS];
1385 if (rcs & GT_RENDER_USER_INTERRUPT)
1386 notify_ring(dev, ring);
1387 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1388 intel_execlists_handle_ctx_events(ring);
1390 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1391 ring = &dev_priv->ring[BCS];
1392 if (bcs & GT_RENDER_USER_INTERRUPT)
1393 notify_ring(dev, ring);
1394 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1395 intel_execlists_handle_ctx_events(ring);
1397 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1400 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1401 tmp = I915_READ(GEN8_GT_IIR(1));
1403 I915_WRITE(GEN8_GT_IIR(1), tmp);
1406 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1407 ring = &dev_priv->ring[VCS];
1408 if (vcs & GT_RENDER_USER_INTERRUPT)
1409 notify_ring(dev, ring);
1410 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1411 intel_execlists_handle_ctx_events(ring);
1413 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1414 ring = &dev_priv->ring[VCS2];
1415 if (vcs & GT_RENDER_USER_INTERRUPT)
1416 notify_ring(dev, ring);
1417 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1418 intel_execlists_handle_ctx_events(ring);
1420 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1423 if (master_ctl & GEN8_GT_PM_IRQ) {
1424 tmp = I915_READ(GEN8_GT_IIR(2));
1425 if (tmp & dev_priv->pm_rps_events) {
1426 I915_WRITE(GEN8_GT_IIR(2),
1427 tmp & dev_priv->pm_rps_events);
1429 gen6_rps_irq_handler(dev_priv, tmp);
1431 DRM_ERROR("The master control interrupt lied (PM)!\n");
1434 if (master_ctl & GEN8_GT_VECS_IRQ) {
1435 tmp = I915_READ(GEN8_GT_IIR(3));
1437 I915_WRITE(GEN8_GT_IIR(3), tmp);
1440 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1441 ring = &dev_priv->ring[VECS];
1442 if (vcs & GT_RENDER_USER_INTERRUPT)
1443 notify_ring(dev, ring);
1444 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1445 intel_execlists_handle_ctx_events(ring);
1447 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1453 #define HPD_STORM_DETECT_PERIOD 1000
1454 #define HPD_STORM_THRESHOLD 5
1456 static int pch_port_to_hotplug_shift(enum port port)
1472 static int i915_port_to_hotplug_shift(enum port port)
1488 static inline enum port get_port_from_pin(enum hpd_pin pin)
1498 return PORT_A; /* no hpd */
1502 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1503 u32 hotplug_trigger,
1504 u32 dig_hotplug_reg,
1507 struct drm_i915_private *dev_priv = dev->dev_private;
1510 bool storm_detected = false;
1511 bool queue_dig = false, queue_hp = false;
1513 u32 dig_port_mask = 0;
1515 if (!hotplug_trigger)
1518 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1519 hotplug_trigger, dig_hotplug_reg);
1521 spin_lock(&dev_priv->irq_lock);
1522 for (i = 1; i < HPD_NUM_PINS; i++) {
1523 if (!(hpd[i] & hotplug_trigger))
1526 port = get_port_from_pin(i);
1527 if (port && dev_priv->hpd_irq_port[port]) {
1530 if (HAS_PCH_SPLIT(dev)) {
1531 dig_shift = pch_port_to_hotplug_shift(port);
1532 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1534 dig_shift = i915_port_to_hotplug_shift(port);
1535 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1538 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1540 long_hpd ? "long" : "short");
1541 /* for long HPD pulses we want to have the digital queue happen,
1542 but we still want HPD storm detection to function. */
1544 dev_priv->long_hpd_port_mask |= (1 << port);
1545 dig_port_mask |= hpd[i];
1547 /* for short HPD just trigger the digital queue */
1548 dev_priv->short_hpd_port_mask |= (1 << port);
1549 hotplug_trigger &= ~hpd[i];
1555 for (i = 1; i < HPD_NUM_PINS; i++) {
1556 if (hpd[i] & hotplug_trigger &&
1557 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1559 * On GMCH platforms the interrupt mask bits only
1560 * prevent irq generation, not the setting of the
1561 * hotplug bits itself. So only WARN about unexpected
1562 * interrupts on saner platforms.
1564 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1565 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1566 hotplug_trigger, i, hpd[i]);
1571 if (!(hpd[i] & hotplug_trigger) ||
1572 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1575 if (!(dig_port_mask & hpd[i])) {
1576 dev_priv->hpd_event_bits |= (1 << i);
1580 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1581 dev_priv->hpd_stats[i].hpd_last_jiffies
1582 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1583 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1584 dev_priv->hpd_stats[i].hpd_cnt = 0;
1585 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1586 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1587 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1588 dev_priv->hpd_event_bits &= ~(1 << i);
1589 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1590 storm_detected = true;
1592 dev_priv->hpd_stats[i].hpd_cnt++;
1593 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1594 dev_priv->hpd_stats[i].hpd_cnt);
1599 dev_priv->display.hpd_irq_setup(dev);
1600 spin_unlock(&dev_priv->irq_lock);
1603 * Our hotplug handler can grab modeset locks (by calling down into the
1604 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1605 * queue for otherwise the flush_work in the pageflip code will
1609 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1611 schedule_work(&dev_priv->hotplug_work);
1614 static void gmbus_irq_handler(struct drm_device *dev)
1616 struct drm_i915_private *dev_priv = dev->dev_private;
1618 wake_up_all(&dev_priv->gmbus_wait_queue);
1621 static void dp_aux_irq_handler(struct drm_device *dev)
1623 struct drm_i915_private *dev_priv = dev->dev_private;
1625 wake_up_all(&dev_priv->gmbus_wait_queue);
1628 #if defined(CONFIG_DEBUG_FS)
1629 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1630 uint32_t crc0, uint32_t crc1,
1631 uint32_t crc2, uint32_t crc3,
1634 struct drm_i915_private *dev_priv = dev->dev_private;
1635 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1636 struct intel_pipe_crc_entry *entry;
1639 spin_lock(&pipe_crc->lock);
1641 if (!pipe_crc->entries) {
1642 spin_unlock(&pipe_crc->lock);
1643 DRM_DEBUG_KMS("spurious interrupt\n");
1647 head = pipe_crc->head;
1648 tail = pipe_crc->tail;
1650 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1651 spin_unlock(&pipe_crc->lock);
1652 DRM_ERROR("CRC buffer overflowing\n");
1656 entry = &pipe_crc->entries[head];
1658 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1659 entry->crc[0] = crc0;
1660 entry->crc[1] = crc1;
1661 entry->crc[2] = crc2;
1662 entry->crc[3] = crc3;
1663 entry->crc[4] = crc4;
1665 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1666 pipe_crc->head = head;
1668 spin_unlock(&pipe_crc->lock);
1670 wake_up_interruptible(&pipe_crc->wq);
1674 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1675 uint32_t crc0, uint32_t crc1,
1676 uint32_t crc2, uint32_t crc3,
1681 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1683 struct drm_i915_private *dev_priv = dev->dev_private;
1685 display_pipe_crc_irq_handler(dev, pipe,
1686 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1690 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1692 struct drm_i915_private *dev_priv = dev->dev_private;
1694 display_pipe_crc_irq_handler(dev, pipe,
1695 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1696 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1697 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1698 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1699 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1702 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1704 struct drm_i915_private *dev_priv = dev->dev_private;
1705 uint32_t res1, res2;
1707 if (INTEL_INFO(dev)->gen >= 3)
1708 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1712 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1713 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1717 display_pipe_crc_irq_handler(dev, pipe,
1718 I915_READ(PIPE_CRC_RES_RED(pipe)),
1719 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1720 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1724 /* The RPS events need forcewake, so we add them to a work queue and mask their
1725 * IMR bits until the work is done. Other interrupts can be processed without
1726 * the work queue. */
1727 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1729 /* TODO: RPS on GEN9+ is not supported yet. */
1730 if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
1731 "GEN9+: unexpected RPS IRQ\n"))
1734 if (pm_iir & dev_priv->pm_rps_events) {
1735 spin_lock(&dev_priv->irq_lock);
1736 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1737 if (dev_priv->rps.interrupts_enabled) {
1738 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1739 queue_work(dev_priv->wq, &dev_priv->rps.work);
1741 spin_unlock(&dev_priv->irq_lock);
1744 if (INTEL_INFO(dev_priv)->gen >= 8)
1747 if (HAS_VEBOX(dev_priv->dev)) {
1748 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1749 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1751 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1752 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1756 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1758 if (!drm_handle_vblank(dev, pipe))
1764 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1766 struct drm_i915_private *dev_priv = dev->dev_private;
1767 u32 pipe_stats[I915_MAX_PIPES] = { };
1770 spin_lock(&dev_priv->irq_lock);
1771 for_each_pipe(dev_priv, pipe) {
1773 u32 mask, iir_bit = 0;
1776 * PIPESTAT bits get signalled even when the interrupt is
1777 * disabled with the mask bits, and some of the status bits do
1778 * not generate interrupts at all (like the underrun bit). Hence
1779 * we need to be careful that we only handle what we want to
1783 /* fifo underruns are filterered in the underrun handler. */
1784 mask = PIPE_FIFO_UNDERRUN_STATUS;
1788 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1791 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1794 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1798 mask |= dev_priv->pipestat_irq_mask[pipe];
1803 reg = PIPESTAT(pipe);
1804 mask |= PIPESTAT_INT_ENABLE_MASK;
1805 pipe_stats[pipe] = I915_READ(reg) & mask;
1808 * Clear the PIPE*STAT regs before the IIR
1810 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1811 PIPESTAT_INT_STATUS_MASK))
1812 I915_WRITE(reg, pipe_stats[pipe]);
1814 spin_unlock(&dev_priv->irq_lock);
1816 for_each_pipe(dev_priv, pipe) {
1817 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1818 intel_pipe_handle_vblank(dev, pipe))
1819 intel_check_page_flip(dev, pipe);
1821 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1822 intel_prepare_page_flip(dev, pipe);
1823 intel_finish_page_flip(dev, pipe);
1826 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1827 i9xx_pipe_crc_irq_handler(dev, pipe);
1829 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1830 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1833 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1834 gmbus_irq_handler(dev);
1837 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1839 struct drm_i915_private *dev_priv = dev->dev_private;
1840 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1842 if (hotplug_status) {
1843 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1845 * Make sure hotplug status is cleared before we clear IIR, or else we
1846 * may miss hotplug events.
1848 POSTING_READ(PORT_HOTPLUG_STAT);
1851 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1853 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1855 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1857 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1860 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1861 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1862 dp_aux_irq_handler(dev);
1866 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1868 struct drm_device *dev = arg;
1869 struct drm_i915_private *dev_priv = dev->dev_private;
1870 u32 iir, gt_iir, pm_iir;
1871 irqreturn_t ret = IRQ_NONE;
1874 /* Find, clear, then process each source of interrupt */
1876 gt_iir = I915_READ(GTIIR);
1878 I915_WRITE(GTIIR, gt_iir);
1880 pm_iir = I915_READ(GEN6_PMIIR);
1882 I915_WRITE(GEN6_PMIIR, pm_iir);
1884 iir = I915_READ(VLV_IIR);
1886 /* Consume port before clearing IIR or we'll miss events */
1887 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1888 i9xx_hpd_irq_handler(dev);
1889 I915_WRITE(VLV_IIR, iir);
1892 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1898 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1900 gen6_rps_irq_handler(dev_priv, pm_iir);
1901 /* Call regardless, as some status bits might not be
1902 * signalled in iir */
1903 valleyview_pipestat_irq_handler(dev, iir);
1910 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1912 struct drm_device *dev = arg;
1913 struct drm_i915_private *dev_priv = dev->dev_private;
1914 u32 master_ctl, iir;
1915 irqreturn_t ret = IRQ_NONE;
1918 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1919 iir = I915_READ(VLV_IIR);
1921 if (master_ctl == 0 && iir == 0)
1926 I915_WRITE(GEN8_MASTER_IRQ, 0);
1928 /* Find, clear, then process each source of interrupt */
1931 /* Consume port before clearing IIR or we'll miss events */
1932 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1933 i9xx_hpd_irq_handler(dev);
1934 I915_WRITE(VLV_IIR, iir);
1937 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1939 /* Call regardless, as some status bits might not be
1940 * signalled in iir */
1941 valleyview_pipestat_irq_handler(dev, iir);
1943 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1944 POSTING_READ(GEN8_MASTER_IRQ);
1950 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1952 struct drm_i915_private *dev_priv = dev->dev_private;
1954 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1955 u32 dig_hotplug_reg;
1957 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1958 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1960 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1962 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1963 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1964 SDE_AUDIO_POWER_SHIFT);
1965 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1969 if (pch_iir & SDE_AUX_MASK)
1970 dp_aux_irq_handler(dev);
1972 if (pch_iir & SDE_GMBUS)
1973 gmbus_irq_handler(dev);
1975 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1976 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1978 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1979 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1981 if (pch_iir & SDE_POISON)
1982 DRM_ERROR("PCH poison interrupt\n");
1984 if (pch_iir & SDE_FDI_MASK)
1985 for_each_pipe(dev_priv, pipe)
1986 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1988 I915_READ(FDI_RX_IIR(pipe)));
1990 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1991 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1993 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1994 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1996 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1997 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1999 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2000 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2003 static void ivb_err_int_handler(struct drm_device *dev)
2005 struct drm_i915_private *dev_priv = dev->dev_private;
2006 u32 err_int = I915_READ(GEN7_ERR_INT);
2009 if (err_int & ERR_INT_POISON)
2010 DRM_ERROR("Poison interrupt\n");
2012 for_each_pipe(dev_priv, pipe) {
2013 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2014 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2016 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2017 if (IS_IVYBRIDGE(dev))
2018 ivb_pipe_crc_irq_handler(dev, pipe);
2020 hsw_pipe_crc_irq_handler(dev, pipe);
2024 I915_WRITE(GEN7_ERR_INT, err_int);
2027 static void cpt_serr_int_handler(struct drm_device *dev)
2029 struct drm_i915_private *dev_priv = dev->dev_private;
2030 u32 serr_int = I915_READ(SERR_INT);
2032 if (serr_int & SERR_INT_POISON)
2033 DRM_ERROR("PCH poison interrupt\n");
2035 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2036 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2038 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2039 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2041 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2042 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2044 I915_WRITE(SERR_INT, serr_int);
2047 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2049 struct drm_i915_private *dev_priv = dev->dev_private;
2051 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2052 u32 dig_hotplug_reg;
2054 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2055 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2057 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2059 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2060 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2061 SDE_AUDIO_POWER_SHIFT_CPT);
2062 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2066 if (pch_iir & SDE_AUX_MASK_CPT)
2067 dp_aux_irq_handler(dev);
2069 if (pch_iir & SDE_GMBUS_CPT)
2070 gmbus_irq_handler(dev);
2072 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2073 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2075 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2076 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2078 if (pch_iir & SDE_FDI_MASK_CPT)
2079 for_each_pipe(dev_priv, pipe)
2080 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2082 I915_READ(FDI_RX_IIR(pipe)));
2084 if (pch_iir & SDE_ERROR_CPT)
2085 cpt_serr_int_handler(dev);
2088 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2090 struct drm_i915_private *dev_priv = dev->dev_private;
2093 if (de_iir & DE_AUX_CHANNEL_A)
2094 dp_aux_irq_handler(dev);
2096 if (de_iir & DE_GSE)
2097 intel_opregion_asle_intr(dev);
2099 if (de_iir & DE_POISON)
2100 DRM_ERROR("Poison interrupt\n");
2102 for_each_pipe(dev_priv, pipe) {
2103 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2104 intel_pipe_handle_vblank(dev, pipe))
2105 intel_check_page_flip(dev, pipe);
2107 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2108 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2110 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2111 i9xx_pipe_crc_irq_handler(dev, pipe);
2113 /* plane/pipes map 1:1 on ilk+ */
2114 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2115 intel_prepare_page_flip(dev, pipe);
2116 intel_finish_page_flip_plane(dev, pipe);
2120 /* check event from PCH */
2121 if (de_iir & DE_PCH_EVENT) {
2122 u32 pch_iir = I915_READ(SDEIIR);
2124 if (HAS_PCH_CPT(dev))
2125 cpt_irq_handler(dev, pch_iir);
2127 ibx_irq_handler(dev, pch_iir);
2129 /* should clear PCH hotplug event before clear CPU irq */
2130 I915_WRITE(SDEIIR, pch_iir);
2133 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2134 ironlake_rps_change_irq_handler(dev);
2137 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2139 struct drm_i915_private *dev_priv = dev->dev_private;
2142 if (de_iir & DE_ERR_INT_IVB)
2143 ivb_err_int_handler(dev);
2145 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2146 dp_aux_irq_handler(dev);
2148 if (de_iir & DE_GSE_IVB)
2149 intel_opregion_asle_intr(dev);
2151 for_each_pipe(dev_priv, pipe) {
2152 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2153 intel_pipe_handle_vblank(dev, pipe))
2154 intel_check_page_flip(dev, pipe);
2156 /* plane/pipes map 1:1 on ilk+ */
2157 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2158 intel_prepare_page_flip(dev, pipe);
2159 intel_finish_page_flip_plane(dev, pipe);
2163 /* check event from PCH */
2164 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2165 u32 pch_iir = I915_READ(SDEIIR);
2167 cpt_irq_handler(dev, pch_iir);
2169 /* clear PCH hotplug event before clear CPU irq */
2170 I915_WRITE(SDEIIR, pch_iir);
2175 * To handle irqs with the minimum potential races with fresh interrupts, we:
2176 * 1 - Disable Master Interrupt Control.
2177 * 2 - Find the source(s) of the interrupt.
2178 * 3 - Clear the Interrupt Identity bits (IIR).
2179 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2180 * 5 - Re-enable Master Interrupt Control.
2182 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2184 struct drm_device *dev = arg;
2185 struct drm_i915_private *dev_priv = dev->dev_private;
2186 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2187 irqreturn_t ret = IRQ_NONE;
2189 /* We get interrupts on unclaimed registers, so check for this before we
2190 * do any I915_{READ,WRITE}. */
2191 intel_uncore_check_errors(dev);
2193 /* disable master interrupt before clearing iir */
2194 de_ier = I915_READ(DEIER);
2195 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2196 POSTING_READ(DEIER);
2198 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2199 * interrupts will will be stored on its back queue, and then we'll be
2200 * able to process them after we restore SDEIER (as soon as we restore
2201 * it, we'll get an interrupt if SDEIIR still has something to process
2202 * due to its back queue). */
2203 if (!HAS_PCH_NOP(dev)) {
2204 sde_ier = I915_READ(SDEIER);
2205 I915_WRITE(SDEIER, 0);
2206 POSTING_READ(SDEIER);
2209 /* Find, clear, then process each source of interrupt */
2211 gt_iir = I915_READ(GTIIR);
2213 I915_WRITE(GTIIR, gt_iir);
2215 if (INTEL_INFO(dev)->gen >= 6)
2216 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2218 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2221 de_iir = I915_READ(DEIIR);
2223 I915_WRITE(DEIIR, de_iir);
2225 if (INTEL_INFO(dev)->gen >= 7)
2226 ivb_display_irq_handler(dev, de_iir);
2228 ilk_display_irq_handler(dev, de_iir);
2231 if (INTEL_INFO(dev)->gen >= 6) {
2232 u32 pm_iir = I915_READ(GEN6_PMIIR);
2234 I915_WRITE(GEN6_PMIIR, pm_iir);
2236 gen6_rps_irq_handler(dev_priv, pm_iir);
2240 I915_WRITE(DEIER, de_ier);
2241 POSTING_READ(DEIER);
2242 if (!HAS_PCH_NOP(dev)) {
2243 I915_WRITE(SDEIER, sde_ier);
2244 POSTING_READ(SDEIER);
2250 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2252 struct drm_device *dev = arg;
2253 struct drm_i915_private *dev_priv = dev->dev_private;
2255 irqreturn_t ret = IRQ_NONE;
2258 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2261 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2264 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2265 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2269 I915_WRITE(GEN8_MASTER_IRQ, 0);
2270 POSTING_READ(GEN8_MASTER_IRQ);
2272 /* Find, clear, then process each source of interrupt */
2274 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2276 if (master_ctl & GEN8_DE_MISC_IRQ) {
2277 tmp = I915_READ(GEN8_DE_MISC_IIR);
2279 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2281 if (tmp & GEN8_DE_MISC_GSE)
2282 intel_opregion_asle_intr(dev);
2284 DRM_ERROR("Unexpected DE Misc interrupt\n");
2287 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2290 if (master_ctl & GEN8_DE_PORT_IRQ) {
2291 tmp = I915_READ(GEN8_DE_PORT_IIR);
2293 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2297 dp_aux_irq_handler(dev);
2299 DRM_ERROR("Unexpected DE Port interrupt\n");
2302 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2305 for_each_pipe(dev_priv, pipe) {
2306 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2308 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2311 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2314 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2316 if (pipe_iir & GEN8_PIPE_VBLANK &&
2317 intel_pipe_handle_vblank(dev, pipe))
2318 intel_check_page_flip(dev, pipe);
2321 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2323 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2326 intel_prepare_page_flip(dev, pipe);
2327 intel_finish_page_flip_plane(dev, pipe);
2330 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2331 hsw_pipe_crc_irq_handler(dev, pipe);
2333 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2334 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2339 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2341 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2344 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2346 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2348 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2351 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2353 * FIXME(BDW): Assume for now that the new interrupt handling
2354 * scheme also closed the SDE interrupt handling race we've seen
2355 * on older pch-split platforms. But this needs testing.
2357 u32 pch_iir = I915_READ(SDEIIR);
2359 I915_WRITE(SDEIIR, pch_iir);
2361 cpt_irq_handler(dev, pch_iir);
2363 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2367 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2368 POSTING_READ(GEN8_MASTER_IRQ);
2373 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2374 bool reset_completed)
2376 struct intel_engine_cs *ring;
2380 * Notify all waiters for GPU completion events that reset state has
2381 * been changed, and that they need to restart their wait after
2382 * checking for potential errors (and bail out to drop locks if there is
2383 * a gpu reset pending so that i915_error_work_func can acquire them).
2386 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2387 for_each_ring(ring, dev_priv, i)
2388 wake_up_all(&ring->irq_queue);
2390 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2391 wake_up_all(&dev_priv->pending_flip_queue);
2394 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2395 * reset state is cleared.
2397 if (reset_completed)
2398 wake_up_all(&dev_priv->gpu_error.reset_queue);
2402 * i915_error_work_func - do process context error handling work
2403 * @work: work struct
2405 * Fire an error uevent so userspace can see that a hang or error
2408 static void i915_error_work_func(struct work_struct *work)
2410 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2412 struct drm_i915_private *dev_priv =
2413 container_of(error, struct drm_i915_private, gpu_error);
2414 struct drm_device *dev = dev_priv->dev;
2415 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2416 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2417 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2420 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2423 * Note that there's only one work item which does gpu resets, so we
2424 * need not worry about concurrent gpu resets potentially incrementing
2425 * error->reset_counter twice. We only need to take care of another
2426 * racing irq/hangcheck declaring the gpu dead for a second time. A
2427 * quick check for that is good enough: schedule_work ensures the
2428 * correct ordering between hang detection and this work item, and since
2429 * the reset in-progress bit is only ever set by code outside of this
2430 * work we don't need to worry about any other races.
2432 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2433 DRM_DEBUG_DRIVER("resetting chip\n");
2434 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2438 * In most cases it's guaranteed that we get here with an RPM
2439 * reference held, for example because there is a pending GPU
2440 * request that won't finish until the reset is done. This
2441 * isn't the case at least when we get here by doing a
2442 * simulated reset via debugs, so get an RPM reference.
2444 intel_runtime_pm_get(dev_priv);
2446 intel_prepare_reset(dev);
2449 * All state reset _must_ be completed before we update the
2450 * reset counter, for otherwise waiters might miss the reset
2451 * pending state and not properly drop locks, resulting in
2452 * deadlocks with the reset work.
2454 ret = i915_reset(dev);
2456 intel_finish_reset(dev);
2458 intel_runtime_pm_put(dev_priv);
2462 * After all the gem state is reset, increment the reset
2463 * counter and wake up everyone waiting for the reset to
2466 * Since unlock operations are a one-sided barrier only,
2467 * we need to insert a barrier here to order any seqno
2469 * the counter increment.
2471 smp_mb__before_atomic();
2472 atomic_inc(&dev_priv->gpu_error.reset_counter);
2474 kobject_uevent_env(&dev->primary->kdev->kobj,
2475 KOBJ_CHANGE, reset_done_event);
2477 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2481 * Note: The wake_up also serves as a memory barrier so that
2482 * waiters see the update value of the reset counter atomic_t.
2484 i915_error_wake_up(dev_priv, true);
2488 static void i915_report_and_clear_eir(struct drm_device *dev)
2490 struct drm_i915_private *dev_priv = dev->dev_private;
2491 uint32_t instdone[I915_NUM_INSTDONE_REG];
2492 u32 eir = I915_READ(EIR);
2498 pr_err("render error detected, EIR: 0x%08x\n", eir);
2500 i915_get_extra_instdone(dev, instdone);
2503 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2504 u32 ipeir = I915_READ(IPEIR_I965);
2506 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2507 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2508 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2509 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2510 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2511 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2512 I915_WRITE(IPEIR_I965, ipeir);
2513 POSTING_READ(IPEIR_I965);
2515 if (eir & GM45_ERROR_PAGE_TABLE) {
2516 u32 pgtbl_err = I915_READ(PGTBL_ER);
2517 pr_err("page table error\n");
2518 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2519 I915_WRITE(PGTBL_ER, pgtbl_err);
2520 POSTING_READ(PGTBL_ER);
2524 if (!IS_GEN2(dev)) {
2525 if (eir & I915_ERROR_PAGE_TABLE) {
2526 u32 pgtbl_err = I915_READ(PGTBL_ER);
2527 pr_err("page table error\n");
2528 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2529 I915_WRITE(PGTBL_ER, pgtbl_err);
2530 POSTING_READ(PGTBL_ER);
2534 if (eir & I915_ERROR_MEMORY_REFRESH) {
2535 pr_err("memory refresh error:\n");
2536 for_each_pipe(dev_priv, pipe)
2537 pr_err("pipe %c stat: 0x%08x\n",
2538 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2539 /* pipestat has already been acked */
2541 if (eir & I915_ERROR_INSTRUCTION) {
2542 pr_err("instruction error\n");
2543 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2544 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2545 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2546 if (INTEL_INFO(dev)->gen < 4) {
2547 u32 ipeir = I915_READ(IPEIR);
2549 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2550 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2551 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2552 I915_WRITE(IPEIR, ipeir);
2553 POSTING_READ(IPEIR);
2555 u32 ipeir = I915_READ(IPEIR_I965);
2557 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2558 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2559 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2560 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2561 I915_WRITE(IPEIR_I965, ipeir);
2562 POSTING_READ(IPEIR_I965);
2566 I915_WRITE(EIR, eir);
2568 eir = I915_READ(EIR);
2571 * some errors might have become stuck,
2574 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2575 I915_WRITE(EMR, I915_READ(EMR) | eir);
2576 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2581 * i915_handle_error - handle an error interrupt
2584 * Do some basic checking of regsiter state at error interrupt time and
2585 * dump it to the syslog. Also call i915_capture_error_state() to make
2586 * sure we get a record and make it available in debugfs. Fire a uevent
2587 * so userspace knows something bad happened (should trigger collection
2588 * of a ring dump etc.).
2590 void i915_handle_error(struct drm_device *dev, bool wedged,
2591 const char *fmt, ...)
2593 struct drm_i915_private *dev_priv = dev->dev_private;
2597 va_start(args, fmt);
2598 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2601 i915_capture_error_state(dev, wedged, error_msg);
2602 i915_report_and_clear_eir(dev);
2605 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2606 &dev_priv->gpu_error.reset_counter);
2609 * Wakeup waiting processes so that the reset work function
2610 * i915_error_work_func doesn't deadlock trying to grab various
2611 * locks. By bumping the reset counter first, the woken
2612 * processes will see a reset in progress and back off,
2613 * releasing their locks and then wait for the reset completion.
2614 * We must do this for _all_ gpu waiters that might hold locks
2615 * that the reset work needs to acquire.
2617 * Note: The wake_up serves as the required memory barrier to
2618 * ensure that the waiters see the updated value of the reset
2621 i915_error_wake_up(dev_priv, false);
2625 * Our reset work can grab modeset locks (since it needs to reset the
2626 * state of outstanding pagelips). Hence it must not be run on our own
2627 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2628 * code will deadlock.
2630 schedule_work(&dev_priv->gpu_error.work);
2633 /* Called from drm generic code, passed 'crtc' which
2634 * we use as a pipe index
2636 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2638 struct drm_i915_private *dev_priv = dev->dev_private;
2639 unsigned long irqflags;
2641 if (!i915_pipe_enabled(dev, pipe))
2644 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2645 if (INTEL_INFO(dev)->gen >= 4)
2646 i915_enable_pipestat(dev_priv, pipe,
2647 PIPE_START_VBLANK_INTERRUPT_STATUS);
2649 i915_enable_pipestat(dev_priv, pipe,
2650 PIPE_VBLANK_INTERRUPT_STATUS);
2651 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2656 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2658 struct drm_i915_private *dev_priv = dev->dev_private;
2659 unsigned long irqflags;
2660 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2661 DE_PIPE_VBLANK(pipe);
2663 if (!i915_pipe_enabled(dev, pipe))
2666 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2667 ironlake_enable_display_irq(dev_priv, bit);
2668 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2673 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2675 struct drm_i915_private *dev_priv = dev->dev_private;
2676 unsigned long irqflags;
2678 if (!i915_pipe_enabled(dev, pipe))
2681 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2682 i915_enable_pipestat(dev_priv, pipe,
2683 PIPE_START_VBLANK_INTERRUPT_STATUS);
2684 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2689 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2691 struct drm_i915_private *dev_priv = dev->dev_private;
2692 unsigned long irqflags;
2694 if (!i915_pipe_enabled(dev, pipe))
2697 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2698 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2699 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2700 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2701 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2705 /* Called from drm generic code, passed 'crtc' which
2706 * we use as a pipe index
2708 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2710 struct drm_i915_private *dev_priv = dev->dev_private;
2711 unsigned long irqflags;
2713 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2714 i915_disable_pipestat(dev_priv, pipe,
2715 PIPE_VBLANK_INTERRUPT_STATUS |
2716 PIPE_START_VBLANK_INTERRUPT_STATUS);
2717 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2720 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2722 struct drm_i915_private *dev_priv = dev->dev_private;
2723 unsigned long irqflags;
2724 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2725 DE_PIPE_VBLANK(pipe);
2727 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2728 ironlake_disable_display_irq(dev_priv, bit);
2729 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2732 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2734 struct drm_i915_private *dev_priv = dev->dev_private;
2735 unsigned long irqflags;
2737 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2738 i915_disable_pipestat(dev_priv, pipe,
2739 PIPE_START_VBLANK_INTERRUPT_STATUS);
2740 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2743 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2745 struct drm_i915_private *dev_priv = dev->dev_private;
2746 unsigned long irqflags;
2748 if (!i915_pipe_enabled(dev, pipe))
2751 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2752 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2753 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2754 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2755 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2758 static struct drm_i915_gem_request *
2759 ring_last_request(struct intel_engine_cs *ring)
2761 return list_entry(ring->request_list.prev,
2762 struct drm_i915_gem_request, list);
2766 ring_idle(struct intel_engine_cs *ring)
2768 return (list_empty(&ring->request_list) ||
2769 i915_gem_request_completed(ring_last_request(ring), false));
2773 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2775 if (INTEL_INFO(dev)->gen >= 8) {
2776 return (ipehr >> 23) == 0x1c;
2778 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2779 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2780 MI_SEMAPHORE_REGISTER);
2784 static struct intel_engine_cs *
2785 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2787 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2788 struct intel_engine_cs *signaller;
2791 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2792 for_each_ring(signaller, dev_priv, i) {
2793 if (ring == signaller)
2796 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2800 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2802 for_each_ring(signaller, dev_priv, i) {
2803 if(ring == signaller)
2806 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2811 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2812 ring->id, ipehr, offset);
2817 static struct intel_engine_cs *
2818 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2820 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2821 u32 cmd, ipehr, head;
2825 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2826 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2830 * HEAD is likely pointing to the dword after the actual command,
2831 * so scan backwards until we find the MBOX. But limit it to just 3
2832 * or 4 dwords depending on the semaphore wait command size.
2833 * Note that we don't care about ACTHD here since that might
2834 * point at at batch, and semaphores are always emitted into the
2835 * ringbuffer itself.
2837 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2838 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2840 for (i = backwards; i; --i) {
2842 * Be paranoid and presume the hw has gone off into the wild -
2843 * our ring is smaller than what the hardware (and hence
2844 * HEAD_ADDR) allows. Also handles wrap-around.
2846 head &= ring->buffer->size - 1;
2848 /* This here seems to blow up */
2849 cmd = ioread32(ring->buffer->virtual_start + head);
2859 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2860 if (INTEL_INFO(ring->dev)->gen >= 8) {
2861 offset = ioread32(ring->buffer->virtual_start + head + 12);
2863 offset = ioread32(ring->buffer->virtual_start + head + 8);
2865 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2868 static int semaphore_passed(struct intel_engine_cs *ring)
2870 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2871 struct intel_engine_cs *signaller;
2874 ring->hangcheck.deadlock++;
2876 signaller = semaphore_waits_for(ring, &seqno);
2877 if (signaller == NULL)
2880 /* Prevent pathological recursion due to driver bugs */
2881 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2884 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2887 /* cursory check for an unkickable deadlock */
2888 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2889 semaphore_passed(signaller) < 0)
2895 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2897 struct intel_engine_cs *ring;
2900 for_each_ring(ring, dev_priv, i)
2901 ring->hangcheck.deadlock = 0;
2904 static enum intel_ring_hangcheck_action
2905 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2907 struct drm_device *dev = ring->dev;
2908 struct drm_i915_private *dev_priv = dev->dev_private;
2911 if (acthd != ring->hangcheck.acthd) {
2912 if (acthd > ring->hangcheck.max_acthd) {
2913 ring->hangcheck.max_acthd = acthd;
2914 return HANGCHECK_ACTIVE;
2917 return HANGCHECK_ACTIVE_LOOP;
2921 return HANGCHECK_HUNG;
2923 /* Is the chip hanging on a WAIT_FOR_EVENT?
2924 * If so we can simply poke the RB_WAIT bit
2925 * and break the hang. This should work on
2926 * all but the second generation chipsets.
2928 tmp = I915_READ_CTL(ring);
2929 if (tmp & RING_WAIT) {
2930 i915_handle_error(dev, false,
2931 "Kicking stuck wait on %s",
2933 I915_WRITE_CTL(ring, tmp);
2934 return HANGCHECK_KICK;
2937 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2938 switch (semaphore_passed(ring)) {
2940 return HANGCHECK_HUNG;
2942 i915_handle_error(dev, false,
2943 "Kicking stuck semaphore on %s",
2945 I915_WRITE_CTL(ring, tmp);
2946 return HANGCHECK_KICK;
2948 return HANGCHECK_WAIT;
2952 return HANGCHECK_HUNG;
2956 * This is called when the chip hasn't reported back with completed
2957 * batchbuffers in a long time. We keep track per ring seqno progress and
2958 * if there are no progress, hangcheck score for that ring is increased.
2959 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2960 * we kick the ring. If we see no progress on three subsequent calls
2961 * we assume chip is wedged and try to fix it by resetting the chip.
2963 static void i915_hangcheck_elapsed(unsigned long data)
2965 struct drm_device *dev = (struct drm_device *)data;
2966 struct drm_i915_private *dev_priv = dev->dev_private;
2967 struct intel_engine_cs *ring;
2969 int busy_count = 0, rings_hung = 0;
2970 bool stuck[I915_NUM_RINGS] = { 0 };
2975 if (!i915.enable_hangcheck)
2978 for_each_ring(ring, dev_priv, i) {
2983 semaphore_clear_deadlocks(dev_priv);
2985 seqno = ring->get_seqno(ring, false);
2986 acthd = intel_ring_get_active_head(ring);
2988 if (ring->hangcheck.seqno == seqno) {
2989 if (ring_idle(ring)) {
2990 ring->hangcheck.action = HANGCHECK_IDLE;
2992 if (waitqueue_active(&ring->irq_queue)) {
2993 /* Issue a wake-up to catch stuck h/w. */
2994 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2995 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2996 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2999 DRM_INFO("Fake missed irq on %s\n",
3001 wake_up_all(&ring->irq_queue);
3003 /* Safeguard against driver failure */
3004 ring->hangcheck.score += BUSY;
3008 /* We always increment the hangcheck score
3009 * if the ring is busy and still processing
3010 * the same request, so that no single request
3011 * can run indefinitely (such as a chain of
3012 * batches). The only time we do not increment
3013 * the hangcheck score on this ring, if this
3014 * ring is in a legitimate wait for another
3015 * ring. In that case the waiting ring is a
3016 * victim and we want to be sure we catch the
3017 * right culprit. Then every time we do kick
3018 * the ring, add a small increment to the
3019 * score so that we can catch a batch that is
3020 * being repeatedly kicked and so responsible
3021 * for stalling the machine.
3023 ring->hangcheck.action = ring_stuck(ring,
3026 switch (ring->hangcheck.action) {
3027 case HANGCHECK_IDLE:
3028 case HANGCHECK_WAIT:
3029 case HANGCHECK_ACTIVE:
3031 case HANGCHECK_ACTIVE_LOOP:
3032 ring->hangcheck.score += BUSY;
3034 case HANGCHECK_KICK:
3035 ring->hangcheck.score += KICK;
3037 case HANGCHECK_HUNG:
3038 ring->hangcheck.score += HUNG;
3044 ring->hangcheck.action = HANGCHECK_ACTIVE;
3046 /* Gradually reduce the count so that we catch DoS
3047 * attempts across multiple batches.
3049 if (ring->hangcheck.score > 0)
3050 ring->hangcheck.score--;
3052 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3055 ring->hangcheck.seqno = seqno;
3056 ring->hangcheck.acthd = acthd;
3060 for_each_ring(ring, dev_priv, i) {
3061 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3062 DRM_INFO("%s on %s\n",
3063 stuck[i] ? "stuck" : "no progress",
3070 return i915_handle_error(dev, true, "Ring hung");
3073 /* Reset timer case chip hangs without another request
3075 i915_queue_hangcheck(dev);
3078 void i915_queue_hangcheck(struct drm_device *dev)
3080 struct drm_i915_private *dev_priv = dev->dev_private;
3081 struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
3083 if (!i915.enable_hangcheck)
3086 /* Don't continually defer the hangcheck, but make sure it is active */
3087 if (timer_pending(timer))
3090 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3093 static void ibx_irq_reset(struct drm_device *dev)
3095 struct drm_i915_private *dev_priv = dev->dev_private;
3097 if (HAS_PCH_NOP(dev))
3100 GEN5_IRQ_RESET(SDE);
3102 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3103 I915_WRITE(SERR_INT, 0xffffffff);
3107 * SDEIER is also touched by the interrupt handler to work around missed PCH
3108 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3109 * instead we unconditionally enable all PCH interrupt sources here, but then
3110 * only unmask them as needed with SDEIMR.
3112 * This function needs to be called before interrupts are enabled.
3114 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3116 struct drm_i915_private *dev_priv = dev->dev_private;
3118 if (HAS_PCH_NOP(dev))
3121 WARN_ON(I915_READ(SDEIER) != 0);
3122 I915_WRITE(SDEIER, 0xffffffff);
3123 POSTING_READ(SDEIER);
3126 static void gen5_gt_irq_reset(struct drm_device *dev)
3128 struct drm_i915_private *dev_priv = dev->dev_private;
3131 if (INTEL_INFO(dev)->gen >= 6)
3132 GEN5_IRQ_RESET(GEN6_PM);
3137 static void ironlake_irq_reset(struct drm_device *dev)
3139 struct drm_i915_private *dev_priv = dev->dev_private;
3141 I915_WRITE(HWSTAM, 0xffffffff);
3145 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3147 gen5_gt_irq_reset(dev);
3152 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3156 I915_WRITE(PORT_HOTPLUG_EN, 0);
3157 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3159 for_each_pipe(dev_priv, pipe)
3160 I915_WRITE(PIPESTAT(pipe), 0xffff);
3162 GEN5_IRQ_RESET(VLV_);
3165 static void valleyview_irq_preinstall(struct drm_device *dev)
3167 struct drm_i915_private *dev_priv = dev->dev_private;
3170 I915_WRITE(VLV_IMR, 0);
3171 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3172 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3173 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3175 gen5_gt_irq_reset(dev);
3177 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3179 vlv_display_irq_reset(dev_priv);
3182 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3184 GEN8_IRQ_RESET_NDX(GT, 0);
3185 GEN8_IRQ_RESET_NDX(GT, 1);
3186 GEN8_IRQ_RESET_NDX(GT, 2);
3187 GEN8_IRQ_RESET_NDX(GT, 3);
3190 static void gen8_irq_reset(struct drm_device *dev)
3192 struct drm_i915_private *dev_priv = dev->dev_private;
3195 I915_WRITE(GEN8_MASTER_IRQ, 0);
3196 POSTING_READ(GEN8_MASTER_IRQ);
3198 gen8_gt_irq_reset(dev_priv);
3200 for_each_pipe(dev_priv, pipe)
3201 if (intel_display_power_is_enabled(dev_priv,
3202 POWER_DOMAIN_PIPE(pipe)))
3203 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3205 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3206 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3207 GEN5_IRQ_RESET(GEN8_PCU_);
3212 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3214 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3216 spin_lock_irq(&dev_priv->irq_lock);
3217 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3218 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3219 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3220 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3221 spin_unlock_irq(&dev_priv->irq_lock);
3224 static void cherryview_irq_preinstall(struct drm_device *dev)
3226 struct drm_i915_private *dev_priv = dev->dev_private;
3228 I915_WRITE(GEN8_MASTER_IRQ, 0);
3229 POSTING_READ(GEN8_MASTER_IRQ);
3231 gen8_gt_irq_reset(dev_priv);
3233 GEN5_IRQ_RESET(GEN8_PCU_);
3235 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3237 vlv_display_irq_reset(dev_priv);
3240 static void ibx_hpd_irq_setup(struct drm_device *dev)
3242 struct drm_i915_private *dev_priv = dev->dev_private;
3243 struct intel_encoder *intel_encoder;
3244 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3246 if (HAS_PCH_IBX(dev)) {
3247 hotplug_irqs = SDE_HOTPLUG_MASK;
3248 for_each_intel_encoder(dev, intel_encoder)
3249 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3250 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3252 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3253 for_each_intel_encoder(dev, intel_encoder)
3254 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3255 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3258 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3261 * Enable digital hotplug on the PCH, and configure the DP short pulse
3262 * duration to 2ms (which is the minimum in the Display Port spec)
3264 * This register is the same on all known PCH chips.
3266 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3267 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3268 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3269 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3270 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3271 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3274 static void ibx_irq_postinstall(struct drm_device *dev)
3276 struct drm_i915_private *dev_priv = dev->dev_private;
3279 if (HAS_PCH_NOP(dev))
3282 if (HAS_PCH_IBX(dev))
3283 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3285 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3287 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3288 I915_WRITE(SDEIMR, ~mask);
3291 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3293 struct drm_i915_private *dev_priv = dev->dev_private;
3294 u32 pm_irqs, gt_irqs;
3296 pm_irqs = gt_irqs = 0;
3298 dev_priv->gt_irq_mask = ~0;
3299 if (HAS_L3_DPF(dev)) {
3300 /* L3 parity interrupt is always unmasked. */
3301 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3302 gt_irqs |= GT_PARITY_ERROR(dev);
3305 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3307 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3308 ILK_BSD_USER_INTERRUPT;
3310 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3313 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3315 if (INTEL_INFO(dev)->gen >= 6) {
3316 pm_irqs |= dev_priv->pm_rps_events;
3319 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3321 dev_priv->pm_irq_mask = 0xffffffff;
3322 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3326 static int ironlake_irq_postinstall(struct drm_device *dev)
3328 struct drm_i915_private *dev_priv = dev->dev_private;
3329 u32 display_mask, extra_mask;
3331 if (INTEL_INFO(dev)->gen >= 7) {
3332 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3333 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3334 DE_PLANEB_FLIP_DONE_IVB |
3335 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3336 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3337 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3339 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3340 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3342 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3344 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3345 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3348 dev_priv->irq_mask = ~display_mask;
3350 I915_WRITE(HWSTAM, 0xeffe);
3352 ibx_irq_pre_postinstall(dev);
3354 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3356 gen5_gt_irq_postinstall(dev);
3358 ibx_irq_postinstall(dev);
3360 if (IS_IRONLAKE_M(dev)) {
3361 /* Enable PCU event interrupts
3363 * spinlocking not required here for correctness since interrupt
3364 * setup is guaranteed to run in single-threaded context. But we
3365 * need it to make the assert_spin_locked happy. */
3366 spin_lock_irq(&dev_priv->irq_lock);
3367 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3368 spin_unlock_irq(&dev_priv->irq_lock);
3374 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3380 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3381 PIPE_FIFO_UNDERRUN_STATUS;
3383 for_each_pipe(dev_priv, pipe)
3384 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3385 POSTING_READ(PIPESTAT(PIPE_A));
3387 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3388 PIPE_CRC_DONE_INTERRUPT_STATUS;
3390 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3391 for_each_pipe(dev_priv, pipe)
3392 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3394 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3395 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3396 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3397 if (IS_CHERRYVIEW(dev_priv))
3398 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3399 dev_priv->irq_mask &= ~iir_mask;
3401 I915_WRITE(VLV_IIR, iir_mask);
3402 I915_WRITE(VLV_IIR, iir_mask);
3403 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3404 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3405 POSTING_READ(VLV_IMR);
3408 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3414 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3415 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3416 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3417 if (IS_CHERRYVIEW(dev_priv))
3418 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3420 dev_priv->irq_mask |= iir_mask;
3421 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3422 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3423 I915_WRITE(VLV_IIR, iir_mask);
3424 I915_WRITE(VLV_IIR, iir_mask);
3425 POSTING_READ(VLV_IIR);
3427 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3428 PIPE_CRC_DONE_INTERRUPT_STATUS;
3430 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3431 for_each_pipe(dev_priv, pipe)
3432 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3434 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3435 PIPE_FIFO_UNDERRUN_STATUS;
3437 for_each_pipe(dev_priv, pipe)
3438 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3439 POSTING_READ(PIPESTAT(PIPE_A));
3442 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3444 assert_spin_locked(&dev_priv->irq_lock);
3446 if (dev_priv->display_irqs_enabled)
3449 dev_priv->display_irqs_enabled = true;
3451 if (intel_irqs_enabled(dev_priv))
3452 valleyview_display_irqs_install(dev_priv);
3455 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3457 assert_spin_locked(&dev_priv->irq_lock);
3459 if (!dev_priv->display_irqs_enabled)
3462 dev_priv->display_irqs_enabled = false;
3464 if (intel_irqs_enabled(dev_priv))
3465 valleyview_display_irqs_uninstall(dev_priv);
3468 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3470 dev_priv->irq_mask = ~0;
3472 I915_WRITE(PORT_HOTPLUG_EN, 0);
3473 POSTING_READ(PORT_HOTPLUG_EN);
3475 I915_WRITE(VLV_IIR, 0xffffffff);
3476 I915_WRITE(VLV_IIR, 0xffffffff);
3477 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3478 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3479 POSTING_READ(VLV_IMR);
3481 /* Interrupt setup is already guaranteed to be single-threaded, this is
3482 * just to make the assert_spin_locked check happy. */
3483 spin_lock_irq(&dev_priv->irq_lock);
3484 if (dev_priv->display_irqs_enabled)
3485 valleyview_display_irqs_install(dev_priv);
3486 spin_unlock_irq(&dev_priv->irq_lock);
3489 static int valleyview_irq_postinstall(struct drm_device *dev)
3491 struct drm_i915_private *dev_priv = dev->dev_private;
3493 vlv_display_irq_postinstall(dev_priv);
3495 gen5_gt_irq_postinstall(dev);
3497 /* ack & enable invalid PTE error interrupts */
3498 #if 0 /* FIXME: add support to irq handler for checking these bits */
3499 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3500 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3503 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3508 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3510 /* These are interrupts we'll toggle with the ring mask register */
3511 uint32_t gt_interrupts[] = {
3512 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3513 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3514 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3515 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3516 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3517 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3518 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3519 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3520 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3522 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3523 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3526 dev_priv->pm_irq_mask = 0xffffffff;
3527 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3528 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3529 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
3530 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3533 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3535 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3536 uint32_t de_pipe_enables;
3538 u32 aux_en = GEN8_AUX_CHANNEL_A;
3540 if (IS_GEN9(dev_priv)) {
3541 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3542 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3543 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3546 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3547 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3549 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3550 GEN8_PIPE_FIFO_UNDERRUN;
3552 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3553 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3554 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3556 for_each_pipe(dev_priv, pipe)
3557 if (intel_display_power_is_enabled(dev_priv,
3558 POWER_DOMAIN_PIPE(pipe)))
3559 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3560 dev_priv->de_irq_mask[pipe],
3563 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
3566 static int gen8_irq_postinstall(struct drm_device *dev)
3568 struct drm_i915_private *dev_priv = dev->dev_private;
3570 ibx_irq_pre_postinstall(dev);
3572 gen8_gt_irq_postinstall(dev_priv);
3573 gen8_de_irq_postinstall(dev_priv);
3575 ibx_irq_postinstall(dev);
3577 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3578 POSTING_READ(GEN8_MASTER_IRQ);
3583 static int cherryview_irq_postinstall(struct drm_device *dev)
3585 struct drm_i915_private *dev_priv = dev->dev_private;
3587 vlv_display_irq_postinstall(dev_priv);
3589 gen8_gt_irq_postinstall(dev_priv);
3591 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3592 POSTING_READ(GEN8_MASTER_IRQ);
3597 static void gen8_irq_uninstall(struct drm_device *dev)
3599 struct drm_i915_private *dev_priv = dev->dev_private;
3604 gen8_irq_reset(dev);
3607 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3609 /* Interrupt setup is already guaranteed to be single-threaded, this is
3610 * just to make the assert_spin_locked check happy. */
3611 spin_lock_irq(&dev_priv->irq_lock);
3612 if (dev_priv->display_irqs_enabled)
3613 valleyview_display_irqs_uninstall(dev_priv);
3614 spin_unlock_irq(&dev_priv->irq_lock);
3616 vlv_display_irq_reset(dev_priv);
3618 dev_priv->irq_mask = 0;
3621 static void valleyview_irq_uninstall(struct drm_device *dev)
3623 struct drm_i915_private *dev_priv = dev->dev_private;
3628 I915_WRITE(VLV_MASTER_IER, 0);
3630 gen5_gt_irq_reset(dev);
3632 I915_WRITE(HWSTAM, 0xffffffff);
3634 vlv_display_irq_uninstall(dev_priv);
3637 static void cherryview_irq_uninstall(struct drm_device *dev)
3639 struct drm_i915_private *dev_priv = dev->dev_private;
3644 I915_WRITE(GEN8_MASTER_IRQ, 0);
3645 POSTING_READ(GEN8_MASTER_IRQ);
3647 gen8_gt_irq_reset(dev_priv);
3649 GEN5_IRQ_RESET(GEN8_PCU_);
3651 vlv_display_irq_uninstall(dev_priv);
3654 static void ironlake_irq_uninstall(struct drm_device *dev)
3656 struct drm_i915_private *dev_priv = dev->dev_private;
3661 ironlake_irq_reset(dev);
3664 static void i8xx_irq_preinstall(struct drm_device * dev)
3666 struct drm_i915_private *dev_priv = dev->dev_private;
3669 for_each_pipe(dev_priv, pipe)
3670 I915_WRITE(PIPESTAT(pipe), 0);
3671 I915_WRITE16(IMR, 0xffff);
3672 I915_WRITE16(IER, 0x0);
3673 POSTING_READ16(IER);
3676 static int i8xx_irq_postinstall(struct drm_device *dev)
3678 struct drm_i915_private *dev_priv = dev->dev_private;
3681 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3683 /* Unmask the interrupts that we always want on. */
3684 dev_priv->irq_mask =
3685 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3686 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3687 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3688 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3689 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3690 I915_WRITE16(IMR, dev_priv->irq_mask);
3693 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3694 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3695 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3696 I915_USER_INTERRUPT);
3697 POSTING_READ16(IER);
3699 /* Interrupt setup is already guaranteed to be single-threaded, this is
3700 * just to make the assert_spin_locked check happy. */
3701 spin_lock_irq(&dev_priv->irq_lock);
3702 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3703 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3704 spin_unlock_irq(&dev_priv->irq_lock);
3710 * Returns true when a page flip has completed.
3712 static bool i8xx_handle_vblank(struct drm_device *dev,
3713 int plane, int pipe, u32 iir)
3715 struct drm_i915_private *dev_priv = dev->dev_private;
3716 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3718 if (!intel_pipe_handle_vblank(dev, pipe))
3721 if ((iir & flip_pending) == 0)
3722 goto check_page_flip;
3724 intel_prepare_page_flip(dev, plane);
3726 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3727 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3728 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3729 * the flip is completed (no longer pending). Since this doesn't raise
3730 * an interrupt per se, we watch for the change at vblank.
3732 if (I915_READ16(ISR) & flip_pending)
3733 goto check_page_flip;
3735 intel_finish_page_flip(dev, pipe);
3739 intel_check_page_flip(dev, pipe);
3743 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3745 struct drm_device *dev = arg;
3746 struct drm_i915_private *dev_priv = dev->dev_private;
3751 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3752 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3754 iir = I915_READ16(IIR);
3758 while (iir & ~flip_mask) {
3759 /* Can't rely on pipestat interrupt bit in iir as it might
3760 * have been cleared after the pipestat interrupt was received.
3761 * It doesn't set the bit in iir again, but it still produces
3762 * interrupts (for non-MSI).
3764 spin_lock(&dev_priv->irq_lock);
3765 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3766 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3768 for_each_pipe(dev_priv, pipe) {
3769 int reg = PIPESTAT(pipe);
3770 pipe_stats[pipe] = I915_READ(reg);
3773 * Clear the PIPE*STAT regs before the IIR
3775 if (pipe_stats[pipe] & 0x8000ffff)
3776 I915_WRITE(reg, pipe_stats[pipe]);
3778 spin_unlock(&dev_priv->irq_lock);
3780 I915_WRITE16(IIR, iir & ~flip_mask);
3781 new_iir = I915_READ16(IIR); /* Flush posted writes */
3783 if (iir & I915_USER_INTERRUPT)
3784 notify_ring(dev, &dev_priv->ring[RCS]);
3786 for_each_pipe(dev_priv, pipe) {
3791 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3792 i8xx_handle_vblank(dev, plane, pipe, iir))
3793 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3795 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3796 i9xx_pipe_crc_irq_handler(dev, pipe);
3798 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3799 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3809 static void i8xx_irq_uninstall(struct drm_device * dev)
3811 struct drm_i915_private *dev_priv = dev->dev_private;
3814 for_each_pipe(dev_priv, pipe) {
3815 /* Clear enable bits; then clear status bits */
3816 I915_WRITE(PIPESTAT(pipe), 0);
3817 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3819 I915_WRITE16(IMR, 0xffff);
3820 I915_WRITE16(IER, 0x0);
3821 I915_WRITE16(IIR, I915_READ16(IIR));
3824 static void i915_irq_preinstall(struct drm_device * dev)
3826 struct drm_i915_private *dev_priv = dev->dev_private;
3829 if (I915_HAS_HOTPLUG(dev)) {
3830 I915_WRITE(PORT_HOTPLUG_EN, 0);
3831 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3834 I915_WRITE16(HWSTAM, 0xeffe);
3835 for_each_pipe(dev_priv, pipe)
3836 I915_WRITE(PIPESTAT(pipe), 0);
3837 I915_WRITE(IMR, 0xffffffff);
3838 I915_WRITE(IER, 0x0);
3842 static int i915_irq_postinstall(struct drm_device *dev)
3844 struct drm_i915_private *dev_priv = dev->dev_private;
3847 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3849 /* Unmask the interrupts that we always want on. */
3850 dev_priv->irq_mask =
3851 ~(I915_ASLE_INTERRUPT |
3852 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3853 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3854 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3855 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3856 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3859 I915_ASLE_INTERRUPT |
3860 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3861 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3862 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3863 I915_USER_INTERRUPT;
3865 if (I915_HAS_HOTPLUG(dev)) {
3866 I915_WRITE(PORT_HOTPLUG_EN, 0);
3867 POSTING_READ(PORT_HOTPLUG_EN);
3869 /* Enable in IER... */
3870 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3871 /* and unmask in IMR */
3872 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3875 I915_WRITE(IMR, dev_priv->irq_mask);
3876 I915_WRITE(IER, enable_mask);
3879 i915_enable_asle_pipestat(dev);
3881 /* Interrupt setup is already guaranteed to be single-threaded, this is
3882 * just to make the assert_spin_locked check happy. */
3883 spin_lock_irq(&dev_priv->irq_lock);
3884 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3885 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3886 spin_unlock_irq(&dev_priv->irq_lock);
3892 * Returns true when a page flip has completed.
3894 static bool i915_handle_vblank(struct drm_device *dev,
3895 int plane, int pipe, u32 iir)
3897 struct drm_i915_private *dev_priv = dev->dev_private;
3898 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3900 if (!intel_pipe_handle_vblank(dev, pipe))
3903 if ((iir & flip_pending) == 0)
3904 goto check_page_flip;
3906 intel_prepare_page_flip(dev, plane);
3908 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3909 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3910 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3911 * the flip is completed (no longer pending). Since this doesn't raise
3912 * an interrupt per se, we watch for the change at vblank.
3914 if (I915_READ(ISR) & flip_pending)
3915 goto check_page_flip;
3917 intel_finish_page_flip(dev, pipe);
3921 intel_check_page_flip(dev, pipe);
3925 static irqreturn_t i915_irq_handler(int irq, void *arg)
3927 struct drm_device *dev = arg;
3928 struct drm_i915_private *dev_priv = dev->dev_private;
3929 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3931 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3932 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3933 int pipe, ret = IRQ_NONE;
3935 iir = I915_READ(IIR);
3937 bool irq_received = (iir & ~flip_mask) != 0;
3938 bool blc_event = false;
3940 /* Can't rely on pipestat interrupt bit in iir as it might
3941 * have been cleared after the pipestat interrupt was received.
3942 * It doesn't set the bit in iir again, but it still produces
3943 * interrupts (for non-MSI).
3945 spin_lock(&dev_priv->irq_lock);
3946 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3947 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3949 for_each_pipe(dev_priv, pipe) {
3950 int reg = PIPESTAT(pipe);
3951 pipe_stats[pipe] = I915_READ(reg);
3953 /* Clear the PIPE*STAT regs before the IIR */
3954 if (pipe_stats[pipe] & 0x8000ffff) {
3955 I915_WRITE(reg, pipe_stats[pipe]);
3956 irq_received = true;
3959 spin_unlock(&dev_priv->irq_lock);
3964 /* Consume port. Then clear IIR or we'll miss events */
3965 if (I915_HAS_HOTPLUG(dev) &&
3966 iir & I915_DISPLAY_PORT_INTERRUPT)
3967 i9xx_hpd_irq_handler(dev);
3969 I915_WRITE(IIR, iir & ~flip_mask);
3970 new_iir = I915_READ(IIR); /* Flush posted writes */
3972 if (iir & I915_USER_INTERRUPT)
3973 notify_ring(dev, &dev_priv->ring[RCS]);
3975 for_each_pipe(dev_priv, pipe) {
3980 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3981 i915_handle_vblank(dev, plane, pipe, iir))
3982 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3984 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3987 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3988 i9xx_pipe_crc_irq_handler(dev, pipe);
3990 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3991 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3995 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3996 intel_opregion_asle_intr(dev);
3998 /* With MSI, interrupts are only generated when iir
3999 * transitions from zero to nonzero. If another bit got
4000 * set while we were handling the existing iir bits, then
4001 * we would never get another interrupt.
4003 * This is fine on non-MSI as well, as if we hit this path
4004 * we avoid exiting the interrupt handler only to generate
4007 * Note that for MSI this could cause a stray interrupt report
4008 * if an interrupt landed in the time between writing IIR and
4009 * the posting read. This should be rare enough to never
4010 * trigger the 99% of 100,000 interrupts test for disabling
4015 } while (iir & ~flip_mask);
4020 static void i915_irq_uninstall(struct drm_device * dev)
4022 struct drm_i915_private *dev_priv = dev->dev_private;
4025 if (I915_HAS_HOTPLUG(dev)) {
4026 I915_WRITE(PORT_HOTPLUG_EN, 0);
4027 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4030 I915_WRITE16(HWSTAM, 0xffff);
4031 for_each_pipe(dev_priv, pipe) {
4032 /* Clear enable bits; then clear status bits */
4033 I915_WRITE(PIPESTAT(pipe), 0);
4034 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4036 I915_WRITE(IMR, 0xffffffff);
4037 I915_WRITE(IER, 0x0);
4039 I915_WRITE(IIR, I915_READ(IIR));
4042 static void i965_irq_preinstall(struct drm_device * dev)
4044 struct drm_i915_private *dev_priv = dev->dev_private;
4047 I915_WRITE(PORT_HOTPLUG_EN, 0);
4048 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4050 I915_WRITE(HWSTAM, 0xeffe);
4051 for_each_pipe(dev_priv, pipe)
4052 I915_WRITE(PIPESTAT(pipe), 0);
4053 I915_WRITE(IMR, 0xffffffff);
4054 I915_WRITE(IER, 0x0);
4058 static int i965_irq_postinstall(struct drm_device *dev)
4060 struct drm_i915_private *dev_priv = dev->dev_private;
4064 /* Unmask the interrupts that we always want on. */
4065 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4066 I915_DISPLAY_PORT_INTERRUPT |
4067 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4068 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4069 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4070 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4071 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4073 enable_mask = ~dev_priv->irq_mask;
4074 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4075 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4076 enable_mask |= I915_USER_INTERRUPT;
4079 enable_mask |= I915_BSD_USER_INTERRUPT;
4081 /* Interrupt setup is already guaranteed to be single-threaded, this is
4082 * just to make the assert_spin_locked check happy. */
4083 spin_lock_irq(&dev_priv->irq_lock);
4084 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4085 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4086 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4087 spin_unlock_irq(&dev_priv->irq_lock);
4090 * Enable some error detection, note the instruction error mask
4091 * bit is reserved, so we leave it masked.
4094 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4095 GM45_ERROR_MEM_PRIV |
4096 GM45_ERROR_CP_PRIV |
4097 I915_ERROR_MEMORY_REFRESH);
4099 error_mask = ~(I915_ERROR_PAGE_TABLE |
4100 I915_ERROR_MEMORY_REFRESH);
4102 I915_WRITE(EMR, error_mask);
4104 I915_WRITE(IMR, dev_priv->irq_mask);
4105 I915_WRITE(IER, enable_mask);
4108 I915_WRITE(PORT_HOTPLUG_EN, 0);
4109 POSTING_READ(PORT_HOTPLUG_EN);
4111 i915_enable_asle_pipestat(dev);
4116 static void i915_hpd_irq_setup(struct drm_device *dev)
4118 struct drm_i915_private *dev_priv = dev->dev_private;
4119 struct intel_encoder *intel_encoder;
4122 assert_spin_locked(&dev_priv->irq_lock);
4124 if (I915_HAS_HOTPLUG(dev)) {
4125 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4126 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4127 /* Note HDMI and DP share hotplug bits */
4128 /* enable bits are the same for all generations */
4129 for_each_intel_encoder(dev, intel_encoder)
4130 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4131 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4132 /* Programming the CRT detection parameters tends
4133 to generate a spurious hotplug event about three
4134 seconds later. So just do it once.
4137 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4138 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4139 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4141 /* Ignore TV since it's buggy */
4142 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4146 static irqreturn_t i965_irq_handler(int irq, void *arg)
4148 struct drm_device *dev = arg;
4149 struct drm_i915_private *dev_priv = dev->dev_private;
4151 u32 pipe_stats[I915_MAX_PIPES];
4152 int ret = IRQ_NONE, pipe;
4154 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4155 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4157 iir = I915_READ(IIR);
4160 bool irq_received = (iir & ~flip_mask) != 0;
4161 bool blc_event = false;
4163 /* Can't rely on pipestat interrupt bit in iir as it might
4164 * have been cleared after the pipestat interrupt was received.
4165 * It doesn't set the bit in iir again, but it still produces
4166 * interrupts (for non-MSI).
4168 spin_lock(&dev_priv->irq_lock);
4169 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4170 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4172 for_each_pipe(dev_priv, pipe) {
4173 int reg = PIPESTAT(pipe);
4174 pipe_stats[pipe] = I915_READ(reg);
4177 * Clear the PIPE*STAT regs before the IIR
4179 if (pipe_stats[pipe] & 0x8000ffff) {
4180 I915_WRITE(reg, pipe_stats[pipe]);
4181 irq_received = true;
4184 spin_unlock(&dev_priv->irq_lock);
4191 /* Consume port. Then clear IIR or we'll miss events */
4192 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4193 i9xx_hpd_irq_handler(dev);
4195 I915_WRITE(IIR, iir & ~flip_mask);
4196 new_iir = I915_READ(IIR); /* Flush posted writes */
4198 if (iir & I915_USER_INTERRUPT)
4199 notify_ring(dev, &dev_priv->ring[RCS]);
4200 if (iir & I915_BSD_USER_INTERRUPT)
4201 notify_ring(dev, &dev_priv->ring[VCS]);
4203 for_each_pipe(dev_priv, pipe) {
4204 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4205 i915_handle_vblank(dev, pipe, pipe, iir))
4206 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4208 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4211 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4212 i9xx_pipe_crc_irq_handler(dev, pipe);
4214 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4215 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4218 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4219 intel_opregion_asle_intr(dev);
4221 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4222 gmbus_irq_handler(dev);
4224 /* With MSI, interrupts are only generated when iir
4225 * transitions from zero to nonzero. If another bit got
4226 * set while we were handling the existing iir bits, then
4227 * we would never get another interrupt.
4229 * This is fine on non-MSI as well, as if we hit this path
4230 * we avoid exiting the interrupt handler only to generate
4233 * Note that for MSI this could cause a stray interrupt report
4234 * if an interrupt landed in the time between writing IIR and
4235 * the posting read. This should be rare enough to never
4236 * trigger the 99% of 100,000 interrupts test for disabling
4245 static void i965_irq_uninstall(struct drm_device * dev)
4247 struct drm_i915_private *dev_priv = dev->dev_private;
4253 I915_WRITE(PORT_HOTPLUG_EN, 0);
4254 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4256 I915_WRITE(HWSTAM, 0xffffffff);
4257 for_each_pipe(dev_priv, pipe)
4258 I915_WRITE(PIPESTAT(pipe), 0);
4259 I915_WRITE(IMR, 0xffffffff);
4260 I915_WRITE(IER, 0x0);
4262 for_each_pipe(dev_priv, pipe)
4263 I915_WRITE(PIPESTAT(pipe),
4264 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4265 I915_WRITE(IIR, I915_READ(IIR));
4268 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4270 struct drm_i915_private *dev_priv =
4271 container_of(work, typeof(*dev_priv),
4272 hotplug_reenable_work.work);
4273 struct drm_device *dev = dev_priv->dev;
4274 struct drm_mode_config *mode_config = &dev->mode_config;
4277 intel_runtime_pm_get(dev_priv);
4279 spin_lock_irq(&dev_priv->irq_lock);
4280 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4281 struct drm_connector *connector;
4283 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4286 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4288 list_for_each_entry(connector, &mode_config->connector_list, head) {
4289 struct intel_connector *intel_connector = to_intel_connector(connector);
4291 if (intel_connector->encoder->hpd_pin == i) {
4292 if (connector->polled != intel_connector->polled)
4293 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4295 connector->polled = intel_connector->polled;
4296 if (!connector->polled)
4297 connector->polled = DRM_CONNECTOR_POLL_HPD;
4301 if (dev_priv->display.hpd_irq_setup)
4302 dev_priv->display.hpd_irq_setup(dev);
4303 spin_unlock_irq(&dev_priv->irq_lock);
4305 intel_runtime_pm_put(dev_priv);
4309 * intel_irq_init - initializes irq support
4310 * @dev_priv: i915 device instance
4312 * This function initializes all the irq support including work items, timers
4313 * and all the vtables. It does not setup the interrupt itself though.
4315 void intel_irq_init(struct drm_i915_private *dev_priv)
4317 struct drm_device *dev = dev_priv->dev;
4319 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4320 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4321 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4322 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4323 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4325 /* Let's track the enabled rps events */
4326 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4327 /* WaGsvRC0ResidencyMethod:vlv */
4328 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4330 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4332 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4333 i915_hangcheck_elapsed,
4334 (unsigned long) dev);
4335 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4336 intel_hpd_irq_reenable_work);
4338 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4340 if (IS_GEN2(dev_priv)) {
4341 dev->max_vblank_count = 0;
4342 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4343 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4344 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4345 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4347 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4348 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4352 * Opt out of the vblank disable timer on everything except gen2.
4353 * Gen2 doesn't have a hardware frame counter and so depends on
4354 * vblank interrupts to produce sane vblank seuquence numbers.
4356 if (!IS_GEN2(dev_priv))
4357 dev->vblank_disable_immediate = true;
4359 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4360 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4361 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4364 if (IS_CHERRYVIEW(dev_priv)) {
4365 dev->driver->irq_handler = cherryview_irq_handler;
4366 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4367 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4368 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4369 dev->driver->enable_vblank = valleyview_enable_vblank;
4370 dev->driver->disable_vblank = valleyview_disable_vblank;
4371 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4372 } else if (IS_VALLEYVIEW(dev_priv)) {
4373 dev->driver->irq_handler = valleyview_irq_handler;
4374 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4375 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4376 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4377 dev->driver->enable_vblank = valleyview_enable_vblank;
4378 dev->driver->disable_vblank = valleyview_disable_vblank;
4379 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4380 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4381 dev->driver->irq_handler = gen8_irq_handler;
4382 dev->driver->irq_preinstall = gen8_irq_reset;
4383 dev->driver->irq_postinstall = gen8_irq_postinstall;
4384 dev->driver->irq_uninstall = gen8_irq_uninstall;
4385 dev->driver->enable_vblank = gen8_enable_vblank;
4386 dev->driver->disable_vblank = gen8_disable_vblank;
4387 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4388 } else if (HAS_PCH_SPLIT(dev)) {
4389 dev->driver->irq_handler = ironlake_irq_handler;
4390 dev->driver->irq_preinstall = ironlake_irq_reset;
4391 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4392 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4393 dev->driver->enable_vblank = ironlake_enable_vblank;
4394 dev->driver->disable_vblank = ironlake_disable_vblank;
4395 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4397 if (INTEL_INFO(dev_priv)->gen == 2) {
4398 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4399 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4400 dev->driver->irq_handler = i8xx_irq_handler;
4401 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4402 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4403 dev->driver->irq_preinstall = i915_irq_preinstall;
4404 dev->driver->irq_postinstall = i915_irq_postinstall;
4405 dev->driver->irq_uninstall = i915_irq_uninstall;
4406 dev->driver->irq_handler = i915_irq_handler;
4407 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4409 dev->driver->irq_preinstall = i965_irq_preinstall;
4410 dev->driver->irq_postinstall = i965_irq_postinstall;
4411 dev->driver->irq_uninstall = i965_irq_uninstall;
4412 dev->driver->irq_handler = i965_irq_handler;
4413 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4415 dev->driver->enable_vblank = i915_enable_vblank;
4416 dev->driver->disable_vblank = i915_disable_vblank;
4421 * intel_hpd_init - initializes and enables hpd support
4422 * @dev_priv: i915 device instance
4424 * This function enables the hotplug support. It requires that interrupts have
4425 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4426 * poll request can run concurrently to other code, so locking rules must be
4429 * This is a separate step from interrupt enabling to simplify the locking rules
4430 * in the driver load and resume code.
4432 void intel_hpd_init(struct drm_i915_private *dev_priv)
4434 struct drm_device *dev = dev_priv->dev;
4435 struct drm_mode_config *mode_config = &dev->mode_config;
4436 struct drm_connector *connector;
4439 for (i = 1; i < HPD_NUM_PINS; i++) {
4440 dev_priv->hpd_stats[i].hpd_cnt = 0;
4441 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4443 list_for_each_entry(connector, &mode_config->connector_list, head) {
4444 struct intel_connector *intel_connector = to_intel_connector(connector);
4445 connector->polled = intel_connector->polled;
4446 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4447 connector->polled = DRM_CONNECTOR_POLL_HPD;
4448 if (intel_connector->mst_port)
4449 connector->polled = DRM_CONNECTOR_POLL_HPD;
4452 /* Interrupt setup is already guaranteed to be single-threaded, this is
4453 * just to make the assert_spin_locked checks happy. */
4454 spin_lock_irq(&dev_priv->irq_lock);
4455 if (dev_priv->display.hpd_irq_setup)
4456 dev_priv->display.hpd_irq_setup(dev);
4457 spin_unlock_irq(&dev_priv->irq_lock);
4461 * intel_irq_install - enables the hardware interrupt
4462 * @dev_priv: i915 device instance
4464 * This function enables the hardware interrupt handling, but leaves the hotplug
4465 * handling still disabled. It is called after intel_irq_init().
4467 * In the driver load and resume code we need working interrupts in a few places
4468 * but don't want to deal with the hassle of concurrent probe and hotplug
4469 * workers. Hence the split into this two-stage approach.
4471 int intel_irq_install(struct drm_i915_private *dev_priv)
4474 * We enable some interrupt sources in our postinstall hooks, so mark
4475 * interrupts as enabled _before_ actually enabling them to avoid
4476 * special cases in our ordering checks.
4478 dev_priv->pm.irqs_enabled = true;
4480 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4484 * intel_irq_uninstall - finilizes all irq handling
4485 * @dev_priv: i915 device instance
4487 * This stops interrupt and hotplug handling and unregisters and frees all
4488 * resources acquired in the init functions.
4490 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4492 drm_irq_uninstall(dev_priv->dev);
4493 intel_hpd_cancel_work(dev_priv);
4494 dev_priv->pm.irqs_enabled = false;
4498 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4499 * @dev_priv: i915 device instance
4501 * This function is used to disable interrupts at runtime, both in the runtime
4502 * pm and the system suspend/resume code.
4504 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4506 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4507 dev_priv->pm.irqs_enabled = false;
4511 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4512 * @dev_priv: i915 device instance
4514 * This function is used to enable interrupts at runtime, both in the runtime
4515 * pm and the system suspend/resume code.
4517 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4519 dev_priv->pm.irqs_enabled = true;
4520 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4521 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);