1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
41 * DOC: interrupt handling
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
48 static const u32 hpd_ibx[HPD_NUM_PINS] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
56 static const u32 hpd_cpt[HPD_NUM_PINS] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
64 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
73 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
82 static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
102 #define GEN5_IRQ_RESET(type) do { \
103 I915_WRITE(type##IMR, 0xffffffff); \
104 POSTING_READ(type##IMR); \
105 I915_WRITE(type##IER, 0); \
106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
113 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
120 I915_WRITE((reg), 0xffffffff); \
122 I915_WRITE((reg), 0xffffffff); \
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 I915_WRITE(type##IER, (ier_val)); \
137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
141 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
143 /* For display hotplug interrupt */
145 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
147 assert_spin_locked(&dev_priv->irq_lock);
149 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
152 if ((dev_priv->irq_mask & mask) != 0) {
153 dev_priv->irq_mask &= ~mask;
154 I915_WRITE(DEIMR, dev_priv->irq_mask);
160 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
162 assert_spin_locked(&dev_priv->irq_lock);
164 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
167 if ((dev_priv->irq_mask & mask) != mask) {
168 dev_priv->irq_mask |= mask;
169 I915_WRITE(DEIMR, dev_priv->irq_mask);
175 * ilk_update_gt_irq - update GTIMR
176 * @dev_priv: driver private
177 * @interrupt_mask: mask of interrupt bits to update
178 * @enabled_irq_mask: mask of interrupt bits to enable
180 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
181 uint32_t interrupt_mask,
182 uint32_t enabled_irq_mask)
184 assert_spin_locked(&dev_priv->irq_lock);
186 WARN_ON(enabled_irq_mask & ~interrupt_mask);
188 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
191 dev_priv->gt_irq_mask &= ~interrupt_mask;
192 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
193 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
197 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
199 ilk_update_gt_irq(dev_priv, mask, mask);
202 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
204 ilk_update_gt_irq(dev_priv, mask, 0);
207 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
209 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
212 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
214 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
217 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
219 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
223 * snb_update_pm_irq - update GEN6_PMIMR
224 * @dev_priv: driver private
225 * @interrupt_mask: mask of interrupt bits to update
226 * @enabled_irq_mask: mask of interrupt bits to enable
228 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
229 uint32_t interrupt_mask,
230 uint32_t enabled_irq_mask)
234 WARN_ON(enabled_irq_mask & ~interrupt_mask);
236 assert_spin_locked(&dev_priv->irq_lock);
238 new_val = dev_priv->pm_irq_mask;
239 new_val &= ~interrupt_mask;
240 new_val |= (~enabled_irq_mask & interrupt_mask);
242 if (new_val != dev_priv->pm_irq_mask) {
243 dev_priv->pm_irq_mask = new_val;
244 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
245 POSTING_READ(gen6_pm_imr(dev_priv));
249 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
251 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
254 snb_update_pm_irq(dev_priv, mask, mask);
257 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
260 snb_update_pm_irq(dev_priv, mask, 0);
263 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
265 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
268 __gen6_disable_pm_irq(dev_priv, mask);
271 void gen6_reset_rps_interrupts(struct drm_device *dev)
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 uint32_t reg = gen6_pm_iir(dev_priv);
276 spin_lock_irq(&dev_priv->irq_lock);
277 I915_WRITE(reg, dev_priv->pm_rps_events);
278 I915_WRITE(reg, dev_priv->pm_rps_events);
280 dev_priv->rps.pm_iir = 0;
281 spin_unlock_irq(&dev_priv->irq_lock);
284 void gen6_enable_rps_interrupts(struct drm_device *dev)
286 struct drm_i915_private *dev_priv = dev->dev_private;
288 spin_lock_irq(&dev_priv->irq_lock);
290 WARN_ON(dev_priv->rps.pm_iir);
291 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
292 dev_priv->rps.interrupts_enabled = true;
293 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
294 dev_priv->pm_rps_events);
295 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
297 spin_unlock_irq(&dev_priv->irq_lock);
300 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
303 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
304 * if GEN6_PM_UP_EI_EXPIRED is masked.
306 * TODO: verify if this can be reproduced on VLV,CHV.
308 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
309 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
311 if (INTEL_INFO(dev_priv)->gen >= 8)
312 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
317 void gen6_disable_rps_interrupts(struct drm_device *dev)
319 struct drm_i915_private *dev_priv = dev->dev_private;
321 spin_lock_irq(&dev_priv->irq_lock);
322 dev_priv->rps.interrupts_enabled = false;
323 spin_unlock_irq(&dev_priv->irq_lock);
325 cancel_work_sync(&dev_priv->rps.work);
327 spin_lock_irq(&dev_priv->irq_lock);
329 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
331 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
332 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
333 ~dev_priv->pm_rps_events);
335 spin_unlock_irq(&dev_priv->irq_lock);
337 synchronize_irq(dev->irq);
341 * ibx_display_interrupt_update - update SDEIMR
342 * @dev_priv: driver private
343 * @interrupt_mask: mask of interrupt bits to update
344 * @enabled_irq_mask: mask of interrupt bits to enable
346 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
347 uint32_t interrupt_mask,
348 uint32_t enabled_irq_mask)
350 uint32_t sdeimr = I915_READ(SDEIMR);
351 sdeimr &= ~interrupt_mask;
352 sdeimr |= (~enabled_irq_mask & interrupt_mask);
354 WARN_ON(enabled_irq_mask & ~interrupt_mask);
356 assert_spin_locked(&dev_priv->irq_lock);
358 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
361 I915_WRITE(SDEIMR, sdeimr);
362 POSTING_READ(SDEIMR);
366 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
367 u32 enable_mask, u32 status_mask)
369 u32 reg = PIPESTAT(pipe);
370 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
372 assert_spin_locked(&dev_priv->irq_lock);
373 WARN_ON(!intel_irqs_enabled(dev_priv));
375 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
376 status_mask & ~PIPESTAT_INT_STATUS_MASK,
377 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
378 pipe_name(pipe), enable_mask, status_mask))
381 if ((pipestat & enable_mask) == enable_mask)
384 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
386 /* Enable the interrupt, clear any pending status */
387 pipestat |= enable_mask | status_mask;
388 I915_WRITE(reg, pipestat);
393 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
394 u32 enable_mask, u32 status_mask)
396 u32 reg = PIPESTAT(pipe);
397 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
399 assert_spin_locked(&dev_priv->irq_lock);
400 WARN_ON(!intel_irqs_enabled(dev_priv));
402 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
403 status_mask & ~PIPESTAT_INT_STATUS_MASK,
404 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
405 pipe_name(pipe), enable_mask, status_mask))
408 if ((pipestat & enable_mask) == 0)
411 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
413 pipestat &= ~enable_mask;
414 I915_WRITE(reg, pipestat);
418 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
420 u32 enable_mask = status_mask << 16;
423 * On pipe A we don't support the PSR interrupt yet,
424 * on pipe B and C the same bit MBZ.
426 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
429 * On pipe B and C we don't support the PSR interrupt yet, on pipe
430 * A the same bit is for perf counters which we don't use either.
432 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
435 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
436 SPRITE0_FLIP_DONE_INT_EN_VLV |
437 SPRITE1_FLIP_DONE_INT_EN_VLV);
438 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
439 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
440 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
441 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
447 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
452 if (IS_VALLEYVIEW(dev_priv->dev))
453 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
456 enable_mask = status_mask << 16;
457 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
461 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
466 if (IS_VALLEYVIEW(dev_priv->dev))
467 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
470 enable_mask = status_mask << 16;
471 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
475 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
477 static void i915_enable_asle_pipestat(struct drm_device *dev)
479 struct drm_i915_private *dev_priv = dev->dev_private;
481 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
484 spin_lock_irq(&dev_priv->irq_lock);
486 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
487 if (INTEL_INFO(dev)->gen >= 4)
488 i915_enable_pipestat(dev_priv, PIPE_A,
489 PIPE_LEGACY_BLC_EVENT_STATUS);
491 spin_unlock_irq(&dev_priv->irq_lock);
495 * This timing diagram depicts the video signal in and
496 * around the vertical blanking period.
498 * Assumptions about the fictitious mode used in this example:
500 * vsync_start = vblank_start + 1
501 * vsync_end = vblank_start + 2
502 * vtotal = vblank_start + 3
505 * latch double buffered registers
506 * increment frame counter (ctg+)
507 * generate start of vblank interrupt (gen4+)
510 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
511 * | may be shifted forward 1-3 extra lines via PIPECONF
513 * | | start of vsync:
514 * | | generate vsync interrupt
516 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
517 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
518 * ----va---> <-----------------vb--------------------> <--------va-------------
519 * | | <----vs-----> |
520 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
521 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
522 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
524 * last visible pixel first visible pixel
525 * | increment frame counter (gen3/4)
526 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
528 * x = horizontal active
529 * _ = horizontal blanking
530 * hs = horizontal sync
531 * va = vertical active
532 * vb = vertical blanking
534 * vbs = vblank_start (number)
537 * - most events happen at the start of horizontal sync
538 * - frame start happens at the start of horizontal blank, 1-4 lines
539 * (depending on PIPECONF settings) after the start of vblank
540 * - gen3/4 pixel and frame counter are synchronized with the start
541 * of horizontal active on the first line of vertical active
544 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
546 /* Gen2 doesn't have a hardware frame counter */
550 /* Called from drm generic code, passed a 'crtc', which
551 * we use as a pipe index
553 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
555 struct drm_i915_private *dev_priv = dev->dev_private;
556 unsigned long high_frame;
557 unsigned long low_frame;
558 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
559 struct intel_crtc *intel_crtc =
560 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
561 const struct drm_display_mode *mode =
562 &intel_crtc->config->base.adjusted_mode;
564 htotal = mode->crtc_htotal;
565 hsync_start = mode->crtc_hsync_start;
566 vbl_start = mode->crtc_vblank_start;
567 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
568 vbl_start = DIV_ROUND_UP(vbl_start, 2);
570 /* Convert to pixel count */
573 /* Start of vblank event occurs at start of hsync */
574 vbl_start -= htotal - hsync_start;
576 high_frame = PIPEFRAME(pipe);
577 low_frame = PIPEFRAMEPIXEL(pipe);
580 * High & low register fields aren't synchronized, so make sure
581 * we get a low value that's stable across two reads of the high
585 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
586 low = I915_READ(low_frame);
587 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
588 } while (high1 != high2);
590 high1 >>= PIPE_FRAME_HIGH_SHIFT;
591 pixel = low & PIPE_PIXEL_MASK;
592 low >>= PIPE_FRAME_LOW_SHIFT;
595 * The frame counter increments at beginning of active.
596 * Cook up a vblank counter by also checking the pixel
597 * counter against vblank start.
599 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
602 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
604 struct drm_i915_private *dev_priv = dev->dev_private;
605 int reg = PIPE_FRMCOUNT_GM45(pipe);
607 return I915_READ(reg);
610 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
611 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
613 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
615 struct drm_device *dev = crtc->base.dev;
616 struct drm_i915_private *dev_priv = dev->dev_private;
617 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
618 enum pipe pipe = crtc->pipe;
619 int position, vtotal;
621 vtotal = mode->crtc_vtotal;
622 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
626 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
628 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
631 * See update_scanline_offset() for the details on the
632 * scanline_offset adjustment.
634 return (position + crtc->scanline_offset) % vtotal;
637 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
638 unsigned int flags, int *vpos, int *hpos,
639 ktime_t *stime, ktime_t *etime)
641 struct drm_i915_private *dev_priv = dev->dev_private;
642 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
643 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
644 const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
646 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
649 unsigned long irqflags;
651 if (!intel_crtc->active) {
652 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
653 "pipe %c\n", pipe_name(pipe));
657 htotal = mode->crtc_htotal;
658 hsync_start = mode->crtc_hsync_start;
659 vtotal = mode->crtc_vtotal;
660 vbl_start = mode->crtc_vblank_start;
661 vbl_end = mode->crtc_vblank_end;
663 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
664 vbl_start = DIV_ROUND_UP(vbl_start, 2);
669 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
672 * Lock uncore.lock, as we will do multiple timing critical raw
673 * register reads, potentially with preemption disabled, so the
674 * following code must not block on uncore.lock.
676 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
678 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
680 /* Get optional system timestamp before query. */
682 *stime = ktime_get();
684 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
685 /* No obvious pixelcount register. Only query vertical
686 * scanout position from Display scan line register.
688 position = __intel_get_crtc_scanline(intel_crtc);
690 /* Have access to pixelcount since start of frame.
691 * We can split this into vertical and horizontal
694 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
696 /* convert to pixel counts */
702 * In interlaced modes, the pixel counter counts all pixels,
703 * so one field will have htotal more pixels. In order to avoid
704 * the reported position from jumping backwards when the pixel
705 * counter is beyond the length of the shorter field, just
706 * clamp the position the length of the shorter field. This
707 * matches how the scanline counter based position works since
708 * the scanline counter doesn't count the two half lines.
710 if (position >= vtotal)
711 position = vtotal - 1;
714 * Start of vblank interrupt is triggered at start of hsync,
715 * just prior to the first active line of vblank. However we
716 * consider lines to start at the leading edge of horizontal
717 * active. So, should we get here before we've crossed into
718 * the horizontal active of the first line in vblank, we would
719 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
720 * always add htotal-hsync_start to the current pixel position.
722 position = (position + htotal - hsync_start) % vtotal;
725 /* Get optional system timestamp after query. */
727 *etime = ktime_get();
729 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
731 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
733 in_vbl = position >= vbl_start && position < vbl_end;
736 * While in vblank, position will be negative
737 * counting up towards 0 at vbl_end. And outside
738 * vblank, position will be positive counting
741 if (position >= vbl_start)
744 position += vtotal - vbl_end;
746 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
750 *vpos = position / htotal;
751 *hpos = position - (*vpos * htotal);
756 ret |= DRM_SCANOUTPOS_IN_VBLANK;
761 int intel_get_crtc_scanline(struct intel_crtc *crtc)
763 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
764 unsigned long irqflags;
767 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
768 position = __intel_get_crtc_scanline(crtc);
769 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
774 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
776 struct timeval *vblank_time,
779 struct drm_crtc *crtc;
781 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
782 DRM_ERROR("Invalid crtc %d\n", pipe);
786 /* Get drm_crtc to timestamp: */
787 crtc = intel_get_crtc_for_pipe(dev, pipe);
789 DRM_ERROR("Invalid crtc %d\n", pipe);
793 if (!crtc->state->enable) {
794 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
798 /* Helper routine in DRM core does all the work: */
799 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
802 &to_intel_crtc(crtc)->config->base.adjusted_mode);
805 static bool intel_hpd_irq_event(struct drm_device *dev,
806 struct drm_connector *connector)
808 enum drm_connector_status old_status;
810 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
811 old_status = connector->status;
813 connector->status = connector->funcs->detect(connector, false);
814 if (old_status == connector->status)
817 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
820 drm_get_connector_status_name(old_status),
821 drm_get_connector_status_name(connector->status));
826 static void i915_digport_work_func(struct work_struct *work)
828 struct drm_i915_private *dev_priv =
829 container_of(work, struct drm_i915_private, dig_port_work);
830 u32 long_port_mask, short_port_mask;
831 struct intel_digital_port *intel_dig_port;
835 spin_lock_irq(&dev_priv->irq_lock);
836 long_port_mask = dev_priv->long_hpd_port_mask;
837 dev_priv->long_hpd_port_mask = 0;
838 short_port_mask = dev_priv->short_hpd_port_mask;
839 dev_priv->short_hpd_port_mask = 0;
840 spin_unlock_irq(&dev_priv->irq_lock);
842 for (i = 0; i < I915_MAX_PORTS; i++) {
844 bool long_hpd = false;
845 intel_dig_port = dev_priv->hpd_irq_port[i];
846 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
849 if (long_port_mask & (1 << i)) {
852 } else if (short_port_mask & (1 << i))
858 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
859 if (ret == IRQ_NONE) {
860 /* fall back to old school hpd */
861 old_bits |= (1 << intel_dig_port->base.hpd_pin);
867 spin_lock_irq(&dev_priv->irq_lock);
868 dev_priv->hpd_event_bits |= old_bits;
869 spin_unlock_irq(&dev_priv->irq_lock);
870 schedule_work(&dev_priv->hotplug_work);
875 * Handle hotplug events outside the interrupt handler proper.
877 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
879 static void i915_hotplug_work_func(struct work_struct *work)
881 struct drm_i915_private *dev_priv =
882 container_of(work, struct drm_i915_private, hotplug_work);
883 struct drm_device *dev = dev_priv->dev;
884 struct drm_mode_config *mode_config = &dev->mode_config;
885 struct intel_connector *intel_connector;
886 struct intel_encoder *intel_encoder;
887 struct drm_connector *connector;
888 bool hpd_disabled = false;
889 bool changed = false;
892 mutex_lock(&mode_config->mutex);
893 DRM_DEBUG_KMS("running encoder hotplug functions\n");
895 spin_lock_irq(&dev_priv->irq_lock);
897 hpd_event_bits = dev_priv->hpd_event_bits;
898 dev_priv->hpd_event_bits = 0;
899 list_for_each_entry(connector, &mode_config->connector_list, head) {
900 intel_connector = to_intel_connector(connector);
901 if (!intel_connector->encoder)
903 intel_encoder = intel_connector->encoder;
904 if (intel_encoder->hpd_pin > HPD_NONE &&
905 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
906 connector->polled == DRM_CONNECTOR_POLL_HPD) {
907 DRM_INFO("HPD interrupt storm detected on connector %s: "
908 "switching from hotplug detection to polling\n",
910 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
911 connector->polled = DRM_CONNECTOR_POLL_CONNECT
912 | DRM_CONNECTOR_POLL_DISCONNECT;
915 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
916 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
917 connector->name, intel_encoder->hpd_pin);
920 /* if there were no outputs to poll, poll was disabled,
921 * therefore make sure it's enabled when disabling HPD on
924 drm_kms_helper_poll_enable(dev);
925 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
926 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
929 spin_unlock_irq(&dev_priv->irq_lock);
931 list_for_each_entry(connector, &mode_config->connector_list, head) {
932 intel_connector = to_intel_connector(connector);
933 if (!intel_connector->encoder)
935 intel_encoder = intel_connector->encoder;
936 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
937 if (intel_encoder->hot_plug)
938 intel_encoder->hot_plug(intel_encoder);
939 if (intel_hpd_irq_event(dev, connector))
943 mutex_unlock(&mode_config->mutex);
946 drm_kms_helper_hotplug_event(dev);
949 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
951 struct drm_i915_private *dev_priv = dev->dev_private;
952 u32 busy_up, busy_down, max_avg, min_avg;
955 spin_lock(&mchdev_lock);
957 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
959 new_delay = dev_priv->ips.cur_delay;
961 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
962 busy_up = I915_READ(RCPREVBSYTUPAVG);
963 busy_down = I915_READ(RCPREVBSYTDNAVG);
964 max_avg = I915_READ(RCBMAXAVG);
965 min_avg = I915_READ(RCBMINAVG);
967 /* Handle RCS change request from hw */
968 if (busy_up > max_avg) {
969 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
970 new_delay = dev_priv->ips.cur_delay - 1;
971 if (new_delay < dev_priv->ips.max_delay)
972 new_delay = dev_priv->ips.max_delay;
973 } else if (busy_down < min_avg) {
974 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
975 new_delay = dev_priv->ips.cur_delay + 1;
976 if (new_delay > dev_priv->ips.min_delay)
977 new_delay = dev_priv->ips.min_delay;
980 if (ironlake_set_drps(dev, new_delay))
981 dev_priv->ips.cur_delay = new_delay;
983 spin_unlock(&mchdev_lock);
988 static void notify_ring(struct drm_device *dev,
989 struct intel_engine_cs *ring)
991 if (!intel_ring_initialized(ring))
994 trace_i915_gem_request_notify(ring);
996 wake_up_all(&ring->irq_queue);
999 static void vlv_c0_read(struct drm_i915_private *dev_priv,
1000 struct intel_rps_ei *ei)
1002 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1003 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
1004 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
1007 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
1008 const struct intel_rps_ei *old,
1009 const struct intel_rps_ei *now,
1014 if (old->cz_clock == 0)
1017 time = now->cz_clock - old->cz_clock;
1018 time *= threshold * dev_priv->mem_freq;
1020 /* Workload can be split between render + media, e.g. SwapBuffers
1021 * being blitted in X after being rendered in mesa. To account for
1022 * this we need to combine both engines into our activity counter.
1024 c0 = now->render_c0 - old->render_c0;
1025 c0 += now->media_c0 - old->media_c0;
1026 c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000;
1031 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1033 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1034 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1037 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1039 struct intel_rps_ei now;
1042 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1045 vlv_c0_read(dev_priv, &now);
1046 if (now.cz_clock == 0)
1049 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1050 if (!vlv_c0_above(dev_priv,
1051 &dev_priv->rps.down_ei, &now,
1052 VLV_RP_DOWN_EI_THRESHOLD))
1053 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1054 dev_priv->rps.down_ei = now;
1057 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1058 if (vlv_c0_above(dev_priv,
1059 &dev_priv->rps.up_ei, &now,
1060 VLV_RP_UP_EI_THRESHOLD))
1061 events |= GEN6_PM_RP_UP_THRESHOLD;
1062 dev_priv->rps.up_ei = now;
1068 static void gen6_pm_rps_work(struct work_struct *work)
1070 struct drm_i915_private *dev_priv =
1071 container_of(work, struct drm_i915_private, rps.work);
1075 spin_lock_irq(&dev_priv->irq_lock);
1076 /* Speed up work cancelation during disabling rps interrupts. */
1077 if (!dev_priv->rps.interrupts_enabled) {
1078 spin_unlock_irq(&dev_priv->irq_lock);
1081 pm_iir = dev_priv->rps.pm_iir;
1082 dev_priv->rps.pm_iir = 0;
1083 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1084 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1085 spin_unlock_irq(&dev_priv->irq_lock);
1087 /* Make sure we didn't queue anything we're not going to process. */
1088 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1090 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1093 mutex_lock(&dev_priv->rps.hw_lock);
1095 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1097 adj = dev_priv->rps.last_adj;
1098 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1102 /* CHV needs even encode values */
1103 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1105 new_delay = dev_priv->rps.cur_freq + adj;
1108 * For better performance, jump directly
1109 * to RPe if we're below it.
1111 if (new_delay < dev_priv->rps.efficient_freq)
1112 new_delay = dev_priv->rps.efficient_freq;
1113 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1114 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1115 new_delay = dev_priv->rps.efficient_freq;
1117 new_delay = dev_priv->rps.min_freq_softlimit;
1119 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1123 /* CHV needs even encode values */
1124 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1126 new_delay = dev_priv->rps.cur_freq + adj;
1127 } else { /* unknown event */
1128 new_delay = dev_priv->rps.cur_freq;
1131 /* sysfs frequency interfaces may have snuck in while servicing the
1134 new_delay = clamp_t(int, new_delay,
1135 dev_priv->rps.min_freq_softlimit,
1136 dev_priv->rps.max_freq_softlimit);
1138 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1140 intel_set_rps(dev_priv->dev, new_delay);
1142 mutex_unlock(&dev_priv->rps.hw_lock);
1147 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1149 * @work: workqueue struct
1151 * Doesn't actually do anything except notify userspace. As a consequence of
1152 * this event, userspace should try to remap the bad rows since statistically
1153 * it is likely the same row is more likely to go bad again.
1155 static void ivybridge_parity_work(struct work_struct *work)
1157 struct drm_i915_private *dev_priv =
1158 container_of(work, struct drm_i915_private, l3_parity.error_work);
1159 u32 error_status, row, bank, subbank;
1160 char *parity_event[6];
1164 /* We must turn off DOP level clock gating to access the L3 registers.
1165 * In order to prevent a get/put style interface, acquire struct mutex
1166 * any time we access those registers.
1168 mutex_lock(&dev_priv->dev->struct_mutex);
1170 /* If we've screwed up tracking, just let the interrupt fire again */
1171 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1174 misccpctl = I915_READ(GEN7_MISCCPCTL);
1175 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1176 POSTING_READ(GEN7_MISCCPCTL);
1178 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1182 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1185 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1187 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1189 error_status = I915_READ(reg);
1190 row = GEN7_PARITY_ERROR_ROW(error_status);
1191 bank = GEN7_PARITY_ERROR_BANK(error_status);
1192 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1194 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1197 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1198 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1199 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1200 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1201 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1202 parity_event[5] = NULL;
1204 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1205 KOBJ_CHANGE, parity_event);
1207 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1208 slice, row, bank, subbank);
1210 kfree(parity_event[4]);
1211 kfree(parity_event[3]);
1212 kfree(parity_event[2]);
1213 kfree(parity_event[1]);
1216 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1219 WARN_ON(dev_priv->l3_parity.which_slice);
1220 spin_lock_irq(&dev_priv->irq_lock);
1221 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1222 spin_unlock_irq(&dev_priv->irq_lock);
1224 mutex_unlock(&dev_priv->dev->struct_mutex);
1227 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1229 struct drm_i915_private *dev_priv = dev->dev_private;
1231 if (!HAS_L3_DPF(dev))
1234 spin_lock(&dev_priv->irq_lock);
1235 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1236 spin_unlock(&dev_priv->irq_lock);
1238 iir &= GT_PARITY_ERROR(dev);
1239 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1240 dev_priv->l3_parity.which_slice |= 1 << 1;
1242 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1243 dev_priv->l3_parity.which_slice |= 1 << 0;
1245 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1248 static void ilk_gt_irq_handler(struct drm_device *dev,
1249 struct drm_i915_private *dev_priv,
1253 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1254 notify_ring(dev, &dev_priv->ring[RCS]);
1255 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1256 notify_ring(dev, &dev_priv->ring[VCS]);
1259 static void snb_gt_irq_handler(struct drm_device *dev,
1260 struct drm_i915_private *dev_priv,
1265 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1266 notify_ring(dev, &dev_priv->ring[RCS]);
1267 if (gt_iir & GT_BSD_USER_INTERRUPT)
1268 notify_ring(dev, &dev_priv->ring[VCS]);
1269 if (gt_iir & GT_BLT_USER_INTERRUPT)
1270 notify_ring(dev, &dev_priv->ring[BCS]);
1272 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1273 GT_BSD_CS_ERROR_INTERRUPT |
1274 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1275 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1277 if (gt_iir & GT_PARITY_ERROR(dev))
1278 ivybridge_parity_error_irq_handler(dev, gt_iir);
1281 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1282 struct drm_i915_private *dev_priv,
1285 struct intel_engine_cs *ring;
1288 irqreturn_t ret = IRQ_NONE;
1290 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1291 tmp = I915_READ(GEN8_GT_IIR(0));
1293 I915_WRITE(GEN8_GT_IIR(0), tmp);
1296 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1297 ring = &dev_priv->ring[RCS];
1298 if (rcs & GT_RENDER_USER_INTERRUPT)
1299 notify_ring(dev, ring);
1300 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1301 intel_lrc_irq_handler(ring);
1303 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1304 ring = &dev_priv->ring[BCS];
1305 if (bcs & GT_RENDER_USER_INTERRUPT)
1306 notify_ring(dev, ring);
1307 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1308 intel_lrc_irq_handler(ring);
1310 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1313 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1314 tmp = I915_READ(GEN8_GT_IIR(1));
1316 I915_WRITE(GEN8_GT_IIR(1), tmp);
1319 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1320 ring = &dev_priv->ring[VCS];
1321 if (vcs & GT_RENDER_USER_INTERRUPT)
1322 notify_ring(dev, ring);
1323 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1324 intel_lrc_irq_handler(ring);
1326 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1327 ring = &dev_priv->ring[VCS2];
1328 if (vcs & GT_RENDER_USER_INTERRUPT)
1329 notify_ring(dev, ring);
1330 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1331 intel_lrc_irq_handler(ring);
1333 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1336 if (master_ctl & GEN8_GT_PM_IRQ) {
1337 tmp = I915_READ(GEN8_GT_IIR(2));
1338 if (tmp & dev_priv->pm_rps_events) {
1339 I915_WRITE(GEN8_GT_IIR(2),
1340 tmp & dev_priv->pm_rps_events);
1342 gen6_rps_irq_handler(dev_priv, tmp);
1344 DRM_ERROR("The master control interrupt lied (PM)!\n");
1347 if (master_ctl & GEN8_GT_VECS_IRQ) {
1348 tmp = I915_READ(GEN8_GT_IIR(3));
1350 I915_WRITE(GEN8_GT_IIR(3), tmp);
1353 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1354 ring = &dev_priv->ring[VECS];
1355 if (vcs & GT_RENDER_USER_INTERRUPT)
1356 notify_ring(dev, ring);
1357 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1358 intel_lrc_irq_handler(ring);
1360 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1366 #define HPD_STORM_DETECT_PERIOD 1000
1367 #define HPD_STORM_THRESHOLD 5
1369 static int pch_port_to_hotplug_shift(enum port port)
1385 static int i915_port_to_hotplug_shift(enum port port)
1401 static inline enum port get_port_from_pin(enum hpd_pin pin)
1411 return PORT_A; /* no hpd */
1415 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1416 u32 hotplug_trigger,
1417 u32 dig_hotplug_reg,
1418 const u32 hpd[HPD_NUM_PINS])
1420 struct drm_i915_private *dev_priv = dev->dev_private;
1423 bool storm_detected = false;
1424 bool queue_dig = false, queue_hp = false;
1426 u32 dig_port_mask = 0;
1428 if (!hotplug_trigger)
1431 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1432 hotplug_trigger, dig_hotplug_reg);
1434 spin_lock(&dev_priv->irq_lock);
1435 for (i = 1; i < HPD_NUM_PINS; i++) {
1436 if (!(hpd[i] & hotplug_trigger))
1439 port = get_port_from_pin(i);
1440 if (port && dev_priv->hpd_irq_port[port]) {
1443 if (HAS_PCH_SPLIT(dev)) {
1444 dig_shift = pch_port_to_hotplug_shift(port);
1445 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1447 dig_shift = i915_port_to_hotplug_shift(port);
1448 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1451 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1453 long_hpd ? "long" : "short");
1454 /* for long HPD pulses we want to have the digital queue happen,
1455 but we still want HPD storm detection to function. */
1457 dev_priv->long_hpd_port_mask |= (1 << port);
1458 dig_port_mask |= hpd[i];
1460 /* for short HPD just trigger the digital queue */
1461 dev_priv->short_hpd_port_mask |= (1 << port);
1462 hotplug_trigger &= ~hpd[i];
1468 for (i = 1; i < HPD_NUM_PINS; i++) {
1469 if (hpd[i] & hotplug_trigger &&
1470 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1472 * On GMCH platforms the interrupt mask bits only
1473 * prevent irq generation, not the setting of the
1474 * hotplug bits itself. So only WARN about unexpected
1475 * interrupts on saner platforms.
1477 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1478 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1479 hotplug_trigger, i, hpd[i]);
1484 if (!(hpd[i] & hotplug_trigger) ||
1485 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1488 if (!(dig_port_mask & hpd[i])) {
1489 dev_priv->hpd_event_bits |= (1 << i);
1493 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1494 dev_priv->hpd_stats[i].hpd_last_jiffies
1495 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1496 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1497 dev_priv->hpd_stats[i].hpd_cnt = 0;
1498 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1499 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1500 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1501 dev_priv->hpd_event_bits &= ~(1 << i);
1502 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1503 storm_detected = true;
1505 dev_priv->hpd_stats[i].hpd_cnt++;
1506 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1507 dev_priv->hpd_stats[i].hpd_cnt);
1512 dev_priv->display.hpd_irq_setup(dev);
1513 spin_unlock(&dev_priv->irq_lock);
1516 * Our hotplug handler can grab modeset locks (by calling down into the
1517 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1518 * queue for otherwise the flush_work in the pageflip code will
1522 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1524 schedule_work(&dev_priv->hotplug_work);
1527 static void gmbus_irq_handler(struct drm_device *dev)
1529 struct drm_i915_private *dev_priv = dev->dev_private;
1531 wake_up_all(&dev_priv->gmbus_wait_queue);
1534 static void dp_aux_irq_handler(struct drm_device *dev)
1536 struct drm_i915_private *dev_priv = dev->dev_private;
1538 wake_up_all(&dev_priv->gmbus_wait_queue);
1541 #if defined(CONFIG_DEBUG_FS)
1542 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1543 uint32_t crc0, uint32_t crc1,
1544 uint32_t crc2, uint32_t crc3,
1547 struct drm_i915_private *dev_priv = dev->dev_private;
1548 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1549 struct intel_pipe_crc_entry *entry;
1552 spin_lock(&pipe_crc->lock);
1554 if (!pipe_crc->entries) {
1555 spin_unlock(&pipe_crc->lock);
1556 DRM_DEBUG_KMS("spurious interrupt\n");
1560 head = pipe_crc->head;
1561 tail = pipe_crc->tail;
1563 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1564 spin_unlock(&pipe_crc->lock);
1565 DRM_ERROR("CRC buffer overflowing\n");
1569 entry = &pipe_crc->entries[head];
1571 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1572 entry->crc[0] = crc0;
1573 entry->crc[1] = crc1;
1574 entry->crc[2] = crc2;
1575 entry->crc[3] = crc3;
1576 entry->crc[4] = crc4;
1578 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1579 pipe_crc->head = head;
1581 spin_unlock(&pipe_crc->lock);
1583 wake_up_interruptible(&pipe_crc->wq);
1587 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1588 uint32_t crc0, uint32_t crc1,
1589 uint32_t crc2, uint32_t crc3,
1594 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1596 struct drm_i915_private *dev_priv = dev->dev_private;
1598 display_pipe_crc_irq_handler(dev, pipe,
1599 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1603 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1605 struct drm_i915_private *dev_priv = dev->dev_private;
1607 display_pipe_crc_irq_handler(dev, pipe,
1608 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1609 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1610 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1611 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1612 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1615 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1617 struct drm_i915_private *dev_priv = dev->dev_private;
1618 uint32_t res1, res2;
1620 if (INTEL_INFO(dev)->gen >= 3)
1621 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1625 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1626 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1630 display_pipe_crc_irq_handler(dev, pipe,
1631 I915_READ(PIPE_CRC_RES_RED(pipe)),
1632 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1633 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1637 /* The RPS events need forcewake, so we add them to a work queue and mask their
1638 * IMR bits until the work is done. Other interrupts can be processed without
1639 * the work queue. */
1640 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1642 if (pm_iir & dev_priv->pm_rps_events) {
1643 spin_lock(&dev_priv->irq_lock);
1644 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1645 if (dev_priv->rps.interrupts_enabled) {
1646 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1647 queue_work(dev_priv->wq, &dev_priv->rps.work);
1649 spin_unlock(&dev_priv->irq_lock);
1652 if (INTEL_INFO(dev_priv)->gen >= 8)
1655 if (HAS_VEBOX(dev_priv->dev)) {
1656 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1657 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1659 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1660 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1664 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1666 if (!drm_handle_vblank(dev, pipe))
1672 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1674 struct drm_i915_private *dev_priv = dev->dev_private;
1675 u32 pipe_stats[I915_MAX_PIPES] = { };
1678 spin_lock(&dev_priv->irq_lock);
1679 for_each_pipe(dev_priv, pipe) {
1681 u32 mask, iir_bit = 0;
1684 * PIPESTAT bits get signalled even when the interrupt is
1685 * disabled with the mask bits, and some of the status bits do
1686 * not generate interrupts at all (like the underrun bit). Hence
1687 * we need to be careful that we only handle what we want to
1691 /* fifo underruns are filterered in the underrun handler. */
1692 mask = PIPE_FIFO_UNDERRUN_STATUS;
1696 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1699 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1702 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1706 mask |= dev_priv->pipestat_irq_mask[pipe];
1711 reg = PIPESTAT(pipe);
1712 mask |= PIPESTAT_INT_ENABLE_MASK;
1713 pipe_stats[pipe] = I915_READ(reg) & mask;
1716 * Clear the PIPE*STAT regs before the IIR
1718 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1719 PIPESTAT_INT_STATUS_MASK))
1720 I915_WRITE(reg, pipe_stats[pipe]);
1722 spin_unlock(&dev_priv->irq_lock);
1724 for_each_pipe(dev_priv, pipe) {
1725 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1726 intel_pipe_handle_vblank(dev, pipe))
1727 intel_check_page_flip(dev, pipe);
1729 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1730 intel_prepare_page_flip(dev, pipe);
1731 intel_finish_page_flip(dev, pipe);
1734 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1735 i9xx_pipe_crc_irq_handler(dev, pipe);
1737 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1738 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1741 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1742 gmbus_irq_handler(dev);
1745 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1747 struct drm_i915_private *dev_priv = dev->dev_private;
1748 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1750 if (hotplug_status) {
1751 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1753 * Make sure hotplug status is cleared before we clear IIR, or else we
1754 * may miss hotplug events.
1756 POSTING_READ(PORT_HOTPLUG_STAT);
1759 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1761 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1763 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1765 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1768 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1769 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1770 dp_aux_irq_handler(dev);
1774 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1776 struct drm_device *dev = arg;
1777 struct drm_i915_private *dev_priv = dev->dev_private;
1778 u32 iir, gt_iir, pm_iir;
1779 irqreturn_t ret = IRQ_NONE;
1781 if (!intel_irqs_enabled(dev_priv))
1785 /* Find, clear, then process each source of interrupt */
1787 gt_iir = I915_READ(GTIIR);
1789 I915_WRITE(GTIIR, gt_iir);
1791 pm_iir = I915_READ(GEN6_PMIIR);
1793 I915_WRITE(GEN6_PMIIR, pm_iir);
1795 iir = I915_READ(VLV_IIR);
1797 /* Consume port before clearing IIR or we'll miss events */
1798 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1799 i9xx_hpd_irq_handler(dev);
1800 I915_WRITE(VLV_IIR, iir);
1803 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1809 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1811 gen6_rps_irq_handler(dev_priv, pm_iir);
1812 /* Call regardless, as some status bits might not be
1813 * signalled in iir */
1814 valleyview_pipestat_irq_handler(dev, iir);
1821 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1823 struct drm_device *dev = arg;
1824 struct drm_i915_private *dev_priv = dev->dev_private;
1825 u32 master_ctl, iir;
1826 irqreturn_t ret = IRQ_NONE;
1828 if (!intel_irqs_enabled(dev_priv))
1832 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1833 iir = I915_READ(VLV_IIR);
1835 if (master_ctl == 0 && iir == 0)
1840 I915_WRITE(GEN8_MASTER_IRQ, 0);
1842 /* Find, clear, then process each source of interrupt */
1845 /* Consume port before clearing IIR or we'll miss events */
1846 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1847 i9xx_hpd_irq_handler(dev);
1848 I915_WRITE(VLV_IIR, iir);
1851 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1853 /* Call regardless, as some status bits might not be
1854 * signalled in iir */
1855 valleyview_pipestat_irq_handler(dev, iir);
1857 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1858 POSTING_READ(GEN8_MASTER_IRQ);
1864 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1866 struct drm_i915_private *dev_priv = dev->dev_private;
1868 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1869 u32 dig_hotplug_reg;
1871 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1872 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1874 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1876 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1877 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1878 SDE_AUDIO_POWER_SHIFT);
1879 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1883 if (pch_iir & SDE_AUX_MASK)
1884 dp_aux_irq_handler(dev);
1886 if (pch_iir & SDE_GMBUS)
1887 gmbus_irq_handler(dev);
1889 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1890 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1892 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1893 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1895 if (pch_iir & SDE_POISON)
1896 DRM_ERROR("PCH poison interrupt\n");
1898 if (pch_iir & SDE_FDI_MASK)
1899 for_each_pipe(dev_priv, pipe)
1900 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1902 I915_READ(FDI_RX_IIR(pipe)));
1904 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1905 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1907 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1908 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1910 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1911 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1913 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1914 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1917 static void ivb_err_int_handler(struct drm_device *dev)
1919 struct drm_i915_private *dev_priv = dev->dev_private;
1920 u32 err_int = I915_READ(GEN7_ERR_INT);
1923 if (err_int & ERR_INT_POISON)
1924 DRM_ERROR("Poison interrupt\n");
1926 for_each_pipe(dev_priv, pipe) {
1927 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1928 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1930 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1931 if (IS_IVYBRIDGE(dev))
1932 ivb_pipe_crc_irq_handler(dev, pipe);
1934 hsw_pipe_crc_irq_handler(dev, pipe);
1938 I915_WRITE(GEN7_ERR_INT, err_int);
1941 static void cpt_serr_int_handler(struct drm_device *dev)
1943 struct drm_i915_private *dev_priv = dev->dev_private;
1944 u32 serr_int = I915_READ(SERR_INT);
1946 if (serr_int & SERR_INT_POISON)
1947 DRM_ERROR("PCH poison interrupt\n");
1949 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1950 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1952 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1953 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1955 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1956 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
1958 I915_WRITE(SERR_INT, serr_int);
1961 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1963 struct drm_i915_private *dev_priv = dev->dev_private;
1965 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
1966 u32 dig_hotplug_reg;
1968 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1969 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1971 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
1973 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1974 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1975 SDE_AUDIO_POWER_SHIFT_CPT);
1976 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1980 if (pch_iir & SDE_AUX_MASK_CPT)
1981 dp_aux_irq_handler(dev);
1983 if (pch_iir & SDE_GMBUS_CPT)
1984 gmbus_irq_handler(dev);
1986 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1987 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1989 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1990 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1992 if (pch_iir & SDE_FDI_MASK_CPT)
1993 for_each_pipe(dev_priv, pipe)
1994 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1996 I915_READ(FDI_RX_IIR(pipe)));
1998 if (pch_iir & SDE_ERROR_CPT)
1999 cpt_serr_int_handler(dev);
2002 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2004 struct drm_i915_private *dev_priv = dev->dev_private;
2007 if (de_iir & DE_AUX_CHANNEL_A)
2008 dp_aux_irq_handler(dev);
2010 if (de_iir & DE_GSE)
2011 intel_opregion_asle_intr(dev);
2013 if (de_iir & DE_POISON)
2014 DRM_ERROR("Poison interrupt\n");
2016 for_each_pipe(dev_priv, pipe) {
2017 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2018 intel_pipe_handle_vblank(dev, pipe))
2019 intel_check_page_flip(dev, pipe);
2021 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2022 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2024 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2025 i9xx_pipe_crc_irq_handler(dev, pipe);
2027 /* plane/pipes map 1:1 on ilk+ */
2028 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2029 intel_prepare_page_flip(dev, pipe);
2030 intel_finish_page_flip_plane(dev, pipe);
2034 /* check event from PCH */
2035 if (de_iir & DE_PCH_EVENT) {
2036 u32 pch_iir = I915_READ(SDEIIR);
2038 if (HAS_PCH_CPT(dev))
2039 cpt_irq_handler(dev, pch_iir);
2041 ibx_irq_handler(dev, pch_iir);
2043 /* should clear PCH hotplug event before clear CPU irq */
2044 I915_WRITE(SDEIIR, pch_iir);
2047 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2048 ironlake_rps_change_irq_handler(dev);
2051 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2053 struct drm_i915_private *dev_priv = dev->dev_private;
2056 if (de_iir & DE_ERR_INT_IVB)
2057 ivb_err_int_handler(dev);
2059 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2060 dp_aux_irq_handler(dev);
2062 if (de_iir & DE_GSE_IVB)
2063 intel_opregion_asle_intr(dev);
2065 for_each_pipe(dev_priv, pipe) {
2066 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2067 intel_pipe_handle_vblank(dev, pipe))
2068 intel_check_page_flip(dev, pipe);
2070 /* plane/pipes map 1:1 on ilk+ */
2071 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2072 intel_prepare_page_flip(dev, pipe);
2073 intel_finish_page_flip_plane(dev, pipe);
2077 /* check event from PCH */
2078 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2079 u32 pch_iir = I915_READ(SDEIIR);
2081 cpt_irq_handler(dev, pch_iir);
2083 /* clear PCH hotplug event before clear CPU irq */
2084 I915_WRITE(SDEIIR, pch_iir);
2089 * To handle irqs with the minimum potential races with fresh interrupts, we:
2090 * 1 - Disable Master Interrupt Control.
2091 * 2 - Find the source(s) of the interrupt.
2092 * 3 - Clear the Interrupt Identity bits (IIR).
2093 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2094 * 5 - Re-enable Master Interrupt Control.
2096 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2098 struct drm_device *dev = arg;
2099 struct drm_i915_private *dev_priv = dev->dev_private;
2100 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2101 irqreturn_t ret = IRQ_NONE;
2103 if (!intel_irqs_enabled(dev_priv))
2106 /* We get interrupts on unclaimed registers, so check for this before we
2107 * do any I915_{READ,WRITE}. */
2108 intel_uncore_check_errors(dev);
2110 /* disable master interrupt before clearing iir */
2111 de_ier = I915_READ(DEIER);
2112 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2113 POSTING_READ(DEIER);
2115 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2116 * interrupts will will be stored on its back queue, and then we'll be
2117 * able to process them after we restore SDEIER (as soon as we restore
2118 * it, we'll get an interrupt if SDEIIR still has something to process
2119 * due to its back queue). */
2120 if (!HAS_PCH_NOP(dev)) {
2121 sde_ier = I915_READ(SDEIER);
2122 I915_WRITE(SDEIER, 0);
2123 POSTING_READ(SDEIER);
2126 /* Find, clear, then process each source of interrupt */
2128 gt_iir = I915_READ(GTIIR);
2130 I915_WRITE(GTIIR, gt_iir);
2132 if (INTEL_INFO(dev)->gen >= 6)
2133 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2135 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2138 de_iir = I915_READ(DEIIR);
2140 I915_WRITE(DEIIR, de_iir);
2142 if (INTEL_INFO(dev)->gen >= 7)
2143 ivb_display_irq_handler(dev, de_iir);
2145 ilk_display_irq_handler(dev, de_iir);
2148 if (INTEL_INFO(dev)->gen >= 6) {
2149 u32 pm_iir = I915_READ(GEN6_PMIIR);
2151 I915_WRITE(GEN6_PMIIR, pm_iir);
2153 gen6_rps_irq_handler(dev_priv, pm_iir);
2157 I915_WRITE(DEIER, de_ier);
2158 POSTING_READ(DEIER);
2159 if (!HAS_PCH_NOP(dev)) {
2160 I915_WRITE(SDEIER, sde_ier);
2161 POSTING_READ(SDEIER);
2167 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2169 struct drm_device *dev = arg;
2170 struct drm_i915_private *dev_priv = dev->dev_private;
2172 irqreturn_t ret = IRQ_NONE;
2175 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2177 if (!intel_irqs_enabled(dev_priv))
2181 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2184 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2185 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2189 I915_WRITE(GEN8_MASTER_IRQ, 0);
2190 POSTING_READ(GEN8_MASTER_IRQ);
2192 /* Find, clear, then process each source of interrupt */
2194 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2196 if (master_ctl & GEN8_DE_MISC_IRQ) {
2197 tmp = I915_READ(GEN8_DE_MISC_IIR);
2199 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2201 if (tmp & GEN8_DE_MISC_GSE)
2202 intel_opregion_asle_intr(dev);
2204 DRM_ERROR("Unexpected DE Misc interrupt\n");
2207 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2210 if (master_ctl & GEN8_DE_PORT_IRQ) {
2211 tmp = I915_READ(GEN8_DE_PORT_IIR);
2213 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2217 dp_aux_irq_handler(dev);
2219 DRM_ERROR("Unexpected DE Port interrupt\n");
2222 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2225 for_each_pipe(dev_priv, pipe) {
2226 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2228 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2231 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2234 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2236 if (pipe_iir & GEN8_PIPE_VBLANK &&
2237 intel_pipe_handle_vblank(dev, pipe))
2238 intel_check_page_flip(dev, pipe);
2241 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2243 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2246 intel_prepare_page_flip(dev, pipe);
2247 intel_finish_page_flip_plane(dev, pipe);
2250 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2251 hsw_pipe_crc_irq_handler(dev, pipe);
2253 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2254 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2259 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2261 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2264 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2266 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2268 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2271 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2273 * FIXME(BDW): Assume for now that the new interrupt handling
2274 * scheme also closed the SDE interrupt handling race we've seen
2275 * on older pch-split platforms. But this needs testing.
2277 u32 pch_iir = I915_READ(SDEIIR);
2279 I915_WRITE(SDEIIR, pch_iir);
2281 cpt_irq_handler(dev, pch_iir);
2283 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2287 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2288 POSTING_READ(GEN8_MASTER_IRQ);
2293 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2294 bool reset_completed)
2296 struct intel_engine_cs *ring;
2300 * Notify all waiters for GPU completion events that reset state has
2301 * been changed, and that they need to restart their wait after
2302 * checking for potential errors (and bail out to drop locks if there is
2303 * a gpu reset pending so that i915_error_work_func can acquire them).
2306 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2307 for_each_ring(ring, dev_priv, i)
2308 wake_up_all(&ring->irq_queue);
2310 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2311 wake_up_all(&dev_priv->pending_flip_queue);
2314 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2315 * reset state is cleared.
2317 if (reset_completed)
2318 wake_up_all(&dev_priv->gpu_error.reset_queue);
2322 * i915_reset_and_wakeup - do process context error handling work
2324 * Fire an error uevent so userspace can see that a hang or error
2327 static void i915_reset_and_wakeup(struct drm_device *dev)
2329 struct drm_i915_private *dev_priv = to_i915(dev);
2330 struct i915_gpu_error *error = &dev_priv->gpu_error;
2331 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2332 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2333 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2336 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2339 * Note that there's only one work item which does gpu resets, so we
2340 * need not worry about concurrent gpu resets potentially incrementing
2341 * error->reset_counter twice. We only need to take care of another
2342 * racing irq/hangcheck declaring the gpu dead for a second time. A
2343 * quick check for that is good enough: schedule_work ensures the
2344 * correct ordering between hang detection and this work item, and since
2345 * the reset in-progress bit is only ever set by code outside of this
2346 * work we don't need to worry about any other races.
2348 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2349 DRM_DEBUG_DRIVER("resetting chip\n");
2350 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2354 * In most cases it's guaranteed that we get here with an RPM
2355 * reference held, for example because there is a pending GPU
2356 * request that won't finish until the reset is done. This
2357 * isn't the case at least when we get here by doing a
2358 * simulated reset via debugs, so get an RPM reference.
2360 intel_runtime_pm_get(dev_priv);
2362 intel_prepare_reset(dev);
2365 * All state reset _must_ be completed before we update the
2366 * reset counter, for otherwise waiters might miss the reset
2367 * pending state and not properly drop locks, resulting in
2368 * deadlocks with the reset work.
2370 ret = i915_reset(dev);
2372 intel_finish_reset(dev);
2374 intel_runtime_pm_put(dev_priv);
2378 * After all the gem state is reset, increment the reset
2379 * counter and wake up everyone waiting for the reset to
2382 * Since unlock operations are a one-sided barrier only,
2383 * we need to insert a barrier here to order any seqno
2385 * the counter increment.
2387 smp_mb__before_atomic();
2388 atomic_inc(&dev_priv->gpu_error.reset_counter);
2390 kobject_uevent_env(&dev->primary->kdev->kobj,
2391 KOBJ_CHANGE, reset_done_event);
2393 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2397 * Note: The wake_up also serves as a memory barrier so that
2398 * waiters see the update value of the reset counter atomic_t.
2400 i915_error_wake_up(dev_priv, true);
2404 static void i915_report_and_clear_eir(struct drm_device *dev)
2406 struct drm_i915_private *dev_priv = dev->dev_private;
2407 uint32_t instdone[I915_NUM_INSTDONE_REG];
2408 u32 eir = I915_READ(EIR);
2414 pr_err("render error detected, EIR: 0x%08x\n", eir);
2416 i915_get_extra_instdone(dev, instdone);
2419 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2420 u32 ipeir = I915_READ(IPEIR_I965);
2422 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2423 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2424 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2425 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2426 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2427 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2428 I915_WRITE(IPEIR_I965, ipeir);
2429 POSTING_READ(IPEIR_I965);
2431 if (eir & GM45_ERROR_PAGE_TABLE) {
2432 u32 pgtbl_err = I915_READ(PGTBL_ER);
2433 pr_err("page table error\n");
2434 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2435 I915_WRITE(PGTBL_ER, pgtbl_err);
2436 POSTING_READ(PGTBL_ER);
2440 if (!IS_GEN2(dev)) {
2441 if (eir & I915_ERROR_PAGE_TABLE) {
2442 u32 pgtbl_err = I915_READ(PGTBL_ER);
2443 pr_err("page table error\n");
2444 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2445 I915_WRITE(PGTBL_ER, pgtbl_err);
2446 POSTING_READ(PGTBL_ER);
2450 if (eir & I915_ERROR_MEMORY_REFRESH) {
2451 pr_err("memory refresh error:\n");
2452 for_each_pipe(dev_priv, pipe)
2453 pr_err("pipe %c stat: 0x%08x\n",
2454 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2455 /* pipestat has already been acked */
2457 if (eir & I915_ERROR_INSTRUCTION) {
2458 pr_err("instruction error\n");
2459 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2460 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2461 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2462 if (INTEL_INFO(dev)->gen < 4) {
2463 u32 ipeir = I915_READ(IPEIR);
2465 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2466 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2467 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2468 I915_WRITE(IPEIR, ipeir);
2469 POSTING_READ(IPEIR);
2471 u32 ipeir = I915_READ(IPEIR_I965);
2473 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2474 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2475 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2476 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2477 I915_WRITE(IPEIR_I965, ipeir);
2478 POSTING_READ(IPEIR_I965);
2482 I915_WRITE(EIR, eir);
2484 eir = I915_READ(EIR);
2487 * some errors might have become stuck,
2490 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2491 I915_WRITE(EMR, I915_READ(EMR) | eir);
2492 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2497 * i915_handle_error - handle a gpu error
2500 * Do some basic checking of regsiter state at error time and
2501 * dump it to the syslog. Also call i915_capture_error_state() to make
2502 * sure we get a record and make it available in debugfs. Fire a uevent
2503 * so userspace knows something bad happened (should trigger collection
2504 * of a ring dump etc.).
2506 void i915_handle_error(struct drm_device *dev, bool wedged,
2507 const char *fmt, ...)
2509 struct drm_i915_private *dev_priv = dev->dev_private;
2513 va_start(args, fmt);
2514 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2517 i915_capture_error_state(dev, wedged, error_msg);
2518 i915_report_and_clear_eir(dev);
2521 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2522 &dev_priv->gpu_error.reset_counter);
2525 * Wakeup waiting processes so that the reset function
2526 * i915_reset_and_wakeup doesn't deadlock trying to grab
2527 * various locks. By bumping the reset counter first, the woken
2528 * processes will see a reset in progress and back off,
2529 * releasing their locks and then wait for the reset completion.
2530 * We must do this for _all_ gpu waiters that might hold locks
2531 * that the reset work needs to acquire.
2533 * Note: The wake_up serves as the required memory barrier to
2534 * ensure that the waiters see the updated value of the reset
2537 i915_error_wake_up(dev_priv, false);
2540 i915_reset_and_wakeup(dev);
2543 /* Called from drm generic code, passed 'crtc' which
2544 * we use as a pipe index
2546 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2548 struct drm_i915_private *dev_priv = dev->dev_private;
2549 unsigned long irqflags;
2551 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2552 if (INTEL_INFO(dev)->gen >= 4)
2553 i915_enable_pipestat(dev_priv, pipe,
2554 PIPE_START_VBLANK_INTERRUPT_STATUS);
2556 i915_enable_pipestat(dev_priv, pipe,
2557 PIPE_VBLANK_INTERRUPT_STATUS);
2558 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2563 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2565 struct drm_i915_private *dev_priv = dev->dev_private;
2566 unsigned long irqflags;
2567 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2568 DE_PIPE_VBLANK(pipe);
2570 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2571 ironlake_enable_display_irq(dev_priv, bit);
2572 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2577 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2579 struct drm_i915_private *dev_priv = dev->dev_private;
2580 unsigned long irqflags;
2582 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2583 i915_enable_pipestat(dev_priv, pipe,
2584 PIPE_START_VBLANK_INTERRUPT_STATUS);
2585 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2590 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2592 struct drm_i915_private *dev_priv = dev->dev_private;
2593 unsigned long irqflags;
2595 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2596 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2597 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2598 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2599 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2603 /* Called from drm generic code, passed 'crtc' which
2604 * we use as a pipe index
2606 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2608 struct drm_i915_private *dev_priv = dev->dev_private;
2609 unsigned long irqflags;
2611 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2612 i915_disable_pipestat(dev_priv, pipe,
2613 PIPE_VBLANK_INTERRUPT_STATUS |
2614 PIPE_START_VBLANK_INTERRUPT_STATUS);
2615 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2618 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2620 struct drm_i915_private *dev_priv = dev->dev_private;
2621 unsigned long irqflags;
2622 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2623 DE_PIPE_VBLANK(pipe);
2625 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2626 ironlake_disable_display_irq(dev_priv, bit);
2627 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2630 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2632 struct drm_i915_private *dev_priv = dev->dev_private;
2633 unsigned long irqflags;
2635 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2636 i915_disable_pipestat(dev_priv, pipe,
2637 PIPE_START_VBLANK_INTERRUPT_STATUS);
2638 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2641 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2643 struct drm_i915_private *dev_priv = dev->dev_private;
2644 unsigned long irqflags;
2646 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2647 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2648 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2649 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2650 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2653 static struct drm_i915_gem_request *
2654 ring_last_request(struct intel_engine_cs *ring)
2656 return list_entry(ring->request_list.prev,
2657 struct drm_i915_gem_request, list);
2661 ring_idle(struct intel_engine_cs *ring)
2663 return (list_empty(&ring->request_list) ||
2664 i915_gem_request_completed(ring_last_request(ring), false));
2668 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2670 if (INTEL_INFO(dev)->gen >= 8) {
2671 return (ipehr >> 23) == 0x1c;
2673 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2674 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2675 MI_SEMAPHORE_REGISTER);
2679 static struct intel_engine_cs *
2680 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2682 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2683 struct intel_engine_cs *signaller;
2686 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2687 for_each_ring(signaller, dev_priv, i) {
2688 if (ring == signaller)
2691 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2695 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2697 for_each_ring(signaller, dev_priv, i) {
2698 if(ring == signaller)
2701 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2706 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2707 ring->id, ipehr, offset);
2712 static struct intel_engine_cs *
2713 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2715 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2716 u32 cmd, ipehr, head;
2720 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2721 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2725 * HEAD is likely pointing to the dword after the actual command,
2726 * so scan backwards until we find the MBOX. But limit it to just 3
2727 * or 4 dwords depending on the semaphore wait command size.
2728 * Note that we don't care about ACTHD here since that might
2729 * point at at batch, and semaphores are always emitted into the
2730 * ringbuffer itself.
2732 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2733 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2735 for (i = backwards; i; --i) {
2737 * Be paranoid and presume the hw has gone off into the wild -
2738 * our ring is smaller than what the hardware (and hence
2739 * HEAD_ADDR) allows. Also handles wrap-around.
2741 head &= ring->buffer->size - 1;
2743 /* This here seems to blow up */
2744 cmd = ioread32(ring->buffer->virtual_start + head);
2754 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2755 if (INTEL_INFO(ring->dev)->gen >= 8) {
2756 offset = ioread32(ring->buffer->virtual_start + head + 12);
2758 offset = ioread32(ring->buffer->virtual_start + head + 8);
2760 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2763 static int semaphore_passed(struct intel_engine_cs *ring)
2765 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2766 struct intel_engine_cs *signaller;
2769 ring->hangcheck.deadlock++;
2771 signaller = semaphore_waits_for(ring, &seqno);
2772 if (signaller == NULL)
2775 /* Prevent pathological recursion due to driver bugs */
2776 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2779 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2782 /* cursory check for an unkickable deadlock */
2783 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2784 semaphore_passed(signaller) < 0)
2790 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2792 struct intel_engine_cs *ring;
2795 for_each_ring(ring, dev_priv, i)
2796 ring->hangcheck.deadlock = 0;
2799 static enum intel_ring_hangcheck_action
2800 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2802 struct drm_device *dev = ring->dev;
2803 struct drm_i915_private *dev_priv = dev->dev_private;
2806 if (acthd != ring->hangcheck.acthd) {
2807 if (acthd > ring->hangcheck.max_acthd) {
2808 ring->hangcheck.max_acthd = acthd;
2809 return HANGCHECK_ACTIVE;
2812 return HANGCHECK_ACTIVE_LOOP;
2816 return HANGCHECK_HUNG;
2818 /* Is the chip hanging on a WAIT_FOR_EVENT?
2819 * If so we can simply poke the RB_WAIT bit
2820 * and break the hang. This should work on
2821 * all but the second generation chipsets.
2823 tmp = I915_READ_CTL(ring);
2824 if (tmp & RING_WAIT) {
2825 i915_handle_error(dev, false,
2826 "Kicking stuck wait on %s",
2828 I915_WRITE_CTL(ring, tmp);
2829 return HANGCHECK_KICK;
2832 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2833 switch (semaphore_passed(ring)) {
2835 return HANGCHECK_HUNG;
2837 i915_handle_error(dev, false,
2838 "Kicking stuck semaphore on %s",
2840 I915_WRITE_CTL(ring, tmp);
2841 return HANGCHECK_KICK;
2843 return HANGCHECK_WAIT;
2847 return HANGCHECK_HUNG;
2851 * This is called when the chip hasn't reported back with completed
2852 * batchbuffers in a long time. We keep track per ring seqno progress and
2853 * if there are no progress, hangcheck score for that ring is increased.
2854 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2855 * we kick the ring. If we see no progress on three subsequent calls
2856 * we assume chip is wedged and try to fix it by resetting the chip.
2858 static void i915_hangcheck_elapsed(struct work_struct *work)
2860 struct drm_i915_private *dev_priv =
2861 container_of(work, typeof(*dev_priv),
2862 gpu_error.hangcheck_work.work);
2863 struct drm_device *dev = dev_priv->dev;
2864 struct intel_engine_cs *ring;
2866 int busy_count = 0, rings_hung = 0;
2867 bool stuck[I915_NUM_RINGS] = { 0 };
2872 if (!i915.enable_hangcheck)
2875 for_each_ring(ring, dev_priv, i) {
2880 semaphore_clear_deadlocks(dev_priv);
2882 seqno = ring->get_seqno(ring, false);
2883 acthd = intel_ring_get_active_head(ring);
2885 if (ring->hangcheck.seqno == seqno) {
2886 if (ring_idle(ring)) {
2887 ring->hangcheck.action = HANGCHECK_IDLE;
2889 if (waitqueue_active(&ring->irq_queue)) {
2890 /* Issue a wake-up to catch stuck h/w. */
2891 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2892 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2893 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2896 DRM_INFO("Fake missed irq on %s\n",
2898 wake_up_all(&ring->irq_queue);
2900 /* Safeguard against driver failure */
2901 ring->hangcheck.score += BUSY;
2905 /* We always increment the hangcheck score
2906 * if the ring is busy and still processing
2907 * the same request, so that no single request
2908 * can run indefinitely (such as a chain of
2909 * batches). The only time we do not increment
2910 * the hangcheck score on this ring, if this
2911 * ring is in a legitimate wait for another
2912 * ring. In that case the waiting ring is a
2913 * victim and we want to be sure we catch the
2914 * right culprit. Then every time we do kick
2915 * the ring, add a small increment to the
2916 * score so that we can catch a batch that is
2917 * being repeatedly kicked and so responsible
2918 * for stalling the machine.
2920 ring->hangcheck.action = ring_stuck(ring,
2923 switch (ring->hangcheck.action) {
2924 case HANGCHECK_IDLE:
2925 case HANGCHECK_WAIT:
2926 case HANGCHECK_ACTIVE:
2928 case HANGCHECK_ACTIVE_LOOP:
2929 ring->hangcheck.score += BUSY;
2931 case HANGCHECK_KICK:
2932 ring->hangcheck.score += KICK;
2934 case HANGCHECK_HUNG:
2935 ring->hangcheck.score += HUNG;
2941 ring->hangcheck.action = HANGCHECK_ACTIVE;
2943 /* Gradually reduce the count so that we catch DoS
2944 * attempts across multiple batches.
2946 if (ring->hangcheck.score > 0)
2947 ring->hangcheck.score--;
2949 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
2952 ring->hangcheck.seqno = seqno;
2953 ring->hangcheck.acthd = acthd;
2957 for_each_ring(ring, dev_priv, i) {
2958 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
2959 DRM_INFO("%s on %s\n",
2960 stuck[i] ? "stuck" : "no progress",
2967 return i915_handle_error(dev, true, "Ring hung");
2970 /* Reset timer case chip hangs without another request
2972 i915_queue_hangcheck(dev);
2975 void i915_queue_hangcheck(struct drm_device *dev)
2977 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
2979 if (!i915.enable_hangcheck)
2982 /* Don't continually defer the hangcheck so that it is always run at
2983 * least once after work has been scheduled on any ring. Otherwise,
2984 * we will ignore a hung ring if a second ring is kept busy.
2987 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
2988 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
2991 static void ibx_irq_reset(struct drm_device *dev)
2993 struct drm_i915_private *dev_priv = dev->dev_private;
2995 if (HAS_PCH_NOP(dev))
2998 GEN5_IRQ_RESET(SDE);
3000 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3001 I915_WRITE(SERR_INT, 0xffffffff);
3005 * SDEIER is also touched by the interrupt handler to work around missed PCH
3006 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3007 * instead we unconditionally enable all PCH interrupt sources here, but then
3008 * only unmask them as needed with SDEIMR.
3010 * This function needs to be called before interrupts are enabled.
3012 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3014 struct drm_i915_private *dev_priv = dev->dev_private;
3016 if (HAS_PCH_NOP(dev))
3019 WARN_ON(I915_READ(SDEIER) != 0);
3020 I915_WRITE(SDEIER, 0xffffffff);
3021 POSTING_READ(SDEIER);
3024 static void gen5_gt_irq_reset(struct drm_device *dev)
3026 struct drm_i915_private *dev_priv = dev->dev_private;
3029 if (INTEL_INFO(dev)->gen >= 6)
3030 GEN5_IRQ_RESET(GEN6_PM);
3035 static void ironlake_irq_reset(struct drm_device *dev)
3037 struct drm_i915_private *dev_priv = dev->dev_private;
3039 I915_WRITE(HWSTAM, 0xffffffff);
3043 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3045 gen5_gt_irq_reset(dev);
3050 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3054 I915_WRITE(PORT_HOTPLUG_EN, 0);
3055 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3057 for_each_pipe(dev_priv, pipe)
3058 I915_WRITE(PIPESTAT(pipe), 0xffff);
3060 GEN5_IRQ_RESET(VLV_);
3063 static void valleyview_irq_preinstall(struct drm_device *dev)
3065 struct drm_i915_private *dev_priv = dev->dev_private;
3068 I915_WRITE(VLV_IMR, 0);
3069 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3070 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3071 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3073 gen5_gt_irq_reset(dev);
3075 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3077 vlv_display_irq_reset(dev_priv);
3080 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3082 GEN8_IRQ_RESET_NDX(GT, 0);
3083 GEN8_IRQ_RESET_NDX(GT, 1);
3084 GEN8_IRQ_RESET_NDX(GT, 2);
3085 GEN8_IRQ_RESET_NDX(GT, 3);
3088 static void gen8_irq_reset(struct drm_device *dev)
3090 struct drm_i915_private *dev_priv = dev->dev_private;
3093 I915_WRITE(GEN8_MASTER_IRQ, 0);
3094 POSTING_READ(GEN8_MASTER_IRQ);
3096 gen8_gt_irq_reset(dev_priv);
3098 for_each_pipe(dev_priv, pipe)
3099 if (intel_display_power_is_enabled(dev_priv,
3100 POWER_DOMAIN_PIPE(pipe)))
3101 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3103 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3104 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3105 GEN5_IRQ_RESET(GEN8_PCU_);
3110 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3111 unsigned int pipe_mask)
3113 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3115 spin_lock_irq(&dev_priv->irq_lock);
3116 if (pipe_mask & 1 << PIPE_A)
3117 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A,
3118 dev_priv->de_irq_mask[PIPE_A],
3119 ~dev_priv->de_irq_mask[PIPE_A] | extra_ier);
3120 if (pipe_mask & 1 << PIPE_B)
3121 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B,
3122 dev_priv->de_irq_mask[PIPE_B],
3123 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3124 if (pipe_mask & 1 << PIPE_C)
3125 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C,
3126 dev_priv->de_irq_mask[PIPE_C],
3127 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3128 spin_unlock_irq(&dev_priv->irq_lock);
3131 static void cherryview_irq_preinstall(struct drm_device *dev)
3133 struct drm_i915_private *dev_priv = dev->dev_private;
3135 I915_WRITE(GEN8_MASTER_IRQ, 0);
3136 POSTING_READ(GEN8_MASTER_IRQ);
3138 gen8_gt_irq_reset(dev_priv);
3140 GEN5_IRQ_RESET(GEN8_PCU_);
3142 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3144 vlv_display_irq_reset(dev_priv);
3147 static void ibx_hpd_irq_setup(struct drm_device *dev)
3149 struct drm_i915_private *dev_priv = dev->dev_private;
3150 struct intel_encoder *intel_encoder;
3151 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3153 if (HAS_PCH_IBX(dev)) {
3154 hotplug_irqs = SDE_HOTPLUG_MASK;
3155 for_each_intel_encoder(dev, intel_encoder)
3156 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3157 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3159 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3160 for_each_intel_encoder(dev, intel_encoder)
3161 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3162 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3165 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3168 * Enable digital hotplug on the PCH, and configure the DP short pulse
3169 * duration to 2ms (which is the minimum in the Display Port spec)
3171 * This register is the same on all known PCH chips.
3173 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3174 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3175 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3176 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3177 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3178 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3181 static void ibx_irq_postinstall(struct drm_device *dev)
3183 struct drm_i915_private *dev_priv = dev->dev_private;
3186 if (HAS_PCH_NOP(dev))
3189 if (HAS_PCH_IBX(dev))
3190 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3192 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3194 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3195 I915_WRITE(SDEIMR, ~mask);
3198 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3200 struct drm_i915_private *dev_priv = dev->dev_private;
3201 u32 pm_irqs, gt_irqs;
3203 pm_irqs = gt_irqs = 0;
3205 dev_priv->gt_irq_mask = ~0;
3206 if (HAS_L3_DPF(dev)) {
3207 /* L3 parity interrupt is always unmasked. */
3208 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3209 gt_irqs |= GT_PARITY_ERROR(dev);
3212 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3214 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3215 ILK_BSD_USER_INTERRUPT;
3217 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3220 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3222 if (INTEL_INFO(dev)->gen >= 6) {
3224 * RPS interrupts will get enabled/disabled on demand when RPS
3225 * itself is enabled/disabled.
3228 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3230 dev_priv->pm_irq_mask = 0xffffffff;
3231 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3235 static int ironlake_irq_postinstall(struct drm_device *dev)
3237 struct drm_i915_private *dev_priv = dev->dev_private;
3238 u32 display_mask, extra_mask;
3240 if (INTEL_INFO(dev)->gen >= 7) {
3241 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3242 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3243 DE_PLANEB_FLIP_DONE_IVB |
3244 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3245 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3246 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3248 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3249 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3251 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3253 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3254 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3257 dev_priv->irq_mask = ~display_mask;
3259 I915_WRITE(HWSTAM, 0xeffe);
3261 ibx_irq_pre_postinstall(dev);
3263 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3265 gen5_gt_irq_postinstall(dev);
3267 ibx_irq_postinstall(dev);
3269 if (IS_IRONLAKE_M(dev)) {
3270 /* Enable PCU event interrupts
3272 * spinlocking not required here for correctness since interrupt
3273 * setup is guaranteed to run in single-threaded context. But we
3274 * need it to make the assert_spin_locked happy. */
3275 spin_lock_irq(&dev_priv->irq_lock);
3276 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3277 spin_unlock_irq(&dev_priv->irq_lock);
3283 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3289 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3290 PIPE_FIFO_UNDERRUN_STATUS;
3292 for_each_pipe(dev_priv, pipe)
3293 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3294 POSTING_READ(PIPESTAT(PIPE_A));
3296 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3297 PIPE_CRC_DONE_INTERRUPT_STATUS;
3299 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3300 for_each_pipe(dev_priv, pipe)
3301 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3303 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3304 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3305 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3306 if (IS_CHERRYVIEW(dev_priv))
3307 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3308 dev_priv->irq_mask &= ~iir_mask;
3310 I915_WRITE(VLV_IIR, iir_mask);
3311 I915_WRITE(VLV_IIR, iir_mask);
3312 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3313 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3314 POSTING_READ(VLV_IMR);
3317 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3323 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3324 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3325 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3326 if (IS_CHERRYVIEW(dev_priv))
3327 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3329 dev_priv->irq_mask |= iir_mask;
3330 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3331 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3332 I915_WRITE(VLV_IIR, iir_mask);
3333 I915_WRITE(VLV_IIR, iir_mask);
3334 POSTING_READ(VLV_IIR);
3336 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3337 PIPE_CRC_DONE_INTERRUPT_STATUS;
3339 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3340 for_each_pipe(dev_priv, pipe)
3341 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3343 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3344 PIPE_FIFO_UNDERRUN_STATUS;
3346 for_each_pipe(dev_priv, pipe)
3347 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3348 POSTING_READ(PIPESTAT(PIPE_A));
3351 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3353 assert_spin_locked(&dev_priv->irq_lock);
3355 if (dev_priv->display_irqs_enabled)
3358 dev_priv->display_irqs_enabled = true;
3360 if (intel_irqs_enabled(dev_priv))
3361 valleyview_display_irqs_install(dev_priv);
3364 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3366 assert_spin_locked(&dev_priv->irq_lock);
3368 if (!dev_priv->display_irqs_enabled)
3371 dev_priv->display_irqs_enabled = false;
3373 if (intel_irqs_enabled(dev_priv))
3374 valleyview_display_irqs_uninstall(dev_priv);
3377 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3379 dev_priv->irq_mask = ~0;
3381 I915_WRITE(PORT_HOTPLUG_EN, 0);
3382 POSTING_READ(PORT_HOTPLUG_EN);
3384 I915_WRITE(VLV_IIR, 0xffffffff);
3385 I915_WRITE(VLV_IIR, 0xffffffff);
3386 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3387 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3388 POSTING_READ(VLV_IMR);
3390 /* Interrupt setup is already guaranteed to be single-threaded, this is
3391 * just to make the assert_spin_locked check happy. */
3392 spin_lock_irq(&dev_priv->irq_lock);
3393 if (dev_priv->display_irqs_enabled)
3394 valleyview_display_irqs_install(dev_priv);
3395 spin_unlock_irq(&dev_priv->irq_lock);
3398 static int valleyview_irq_postinstall(struct drm_device *dev)
3400 struct drm_i915_private *dev_priv = dev->dev_private;
3402 vlv_display_irq_postinstall(dev_priv);
3404 gen5_gt_irq_postinstall(dev);
3406 /* ack & enable invalid PTE error interrupts */
3407 #if 0 /* FIXME: add support to irq handler for checking these bits */
3408 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3409 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3412 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3417 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3419 /* These are interrupts we'll toggle with the ring mask register */
3420 uint32_t gt_interrupts[] = {
3421 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3422 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3423 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3424 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3425 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3426 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3427 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3428 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3429 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3431 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3432 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3435 dev_priv->pm_irq_mask = 0xffffffff;
3436 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3437 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3439 * RPS interrupts will get enabled/disabled on demand when RPS itself
3440 * is enabled/disabled.
3442 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3443 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3446 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3448 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3449 uint32_t de_pipe_enables;
3451 u32 aux_en = GEN8_AUX_CHANNEL_A;
3453 if (IS_GEN9(dev_priv)) {
3454 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3455 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3456 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3459 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3460 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3462 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3463 GEN8_PIPE_FIFO_UNDERRUN;
3465 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3466 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3467 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3469 for_each_pipe(dev_priv, pipe)
3470 if (intel_display_power_is_enabled(dev_priv,
3471 POWER_DOMAIN_PIPE(pipe)))
3472 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3473 dev_priv->de_irq_mask[pipe],
3476 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
3479 static int gen8_irq_postinstall(struct drm_device *dev)
3481 struct drm_i915_private *dev_priv = dev->dev_private;
3483 ibx_irq_pre_postinstall(dev);
3485 gen8_gt_irq_postinstall(dev_priv);
3486 gen8_de_irq_postinstall(dev_priv);
3488 ibx_irq_postinstall(dev);
3490 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3491 POSTING_READ(GEN8_MASTER_IRQ);
3496 static int cherryview_irq_postinstall(struct drm_device *dev)
3498 struct drm_i915_private *dev_priv = dev->dev_private;
3500 vlv_display_irq_postinstall(dev_priv);
3502 gen8_gt_irq_postinstall(dev_priv);
3504 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3505 POSTING_READ(GEN8_MASTER_IRQ);
3510 static void gen8_irq_uninstall(struct drm_device *dev)
3512 struct drm_i915_private *dev_priv = dev->dev_private;
3517 gen8_irq_reset(dev);
3520 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3522 /* Interrupt setup is already guaranteed to be single-threaded, this is
3523 * just to make the assert_spin_locked check happy. */
3524 spin_lock_irq(&dev_priv->irq_lock);
3525 if (dev_priv->display_irqs_enabled)
3526 valleyview_display_irqs_uninstall(dev_priv);
3527 spin_unlock_irq(&dev_priv->irq_lock);
3529 vlv_display_irq_reset(dev_priv);
3531 dev_priv->irq_mask = ~0;
3534 static void valleyview_irq_uninstall(struct drm_device *dev)
3536 struct drm_i915_private *dev_priv = dev->dev_private;
3541 I915_WRITE(VLV_MASTER_IER, 0);
3543 gen5_gt_irq_reset(dev);
3545 I915_WRITE(HWSTAM, 0xffffffff);
3547 vlv_display_irq_uninstall(dev_priv);
3550 static void cherryview_irq_uninstall(struct drm_device *dev)
3552 struct drm_i915_private *dev_priv = dev->dev_private;
3557 I915_WRITE(GEN8_MASTER_IRQ, 0);
3558 POSTING_READ(GEN8_MASTER_IRQ);
3560 gen8_gt_irq_reset(dev_priv);
3562 GEN5_IRQ_RESET(GEN8_PCU_);
3564 vlv_display_irq_uninstall(dev_priv);
3567 static void ironlake_irq_uninstall(struct drm_device *dev)
3569 struct drm_i915_private *dev_priv = dev->dev_private;
3574 ironlake_irq_reset(dev);
3577 static void i8xx_irq_preinstall(struct drm_device * dev)
3579 struct drm_i915_private *dev_priv = dev->dev_private;
3582 for_each_pipe(dev_priv, pipe)
3583 I915_WRITE(PIPESTAT(pipe), 0);
3584 I915_WRITE16(IMR, 0xffff);
3585 I915_WRITE16(IER, 0x0);
3586 POSTING_READ16(IER);
3589 static int i8xx_irq_postinstall(struct drm_device *dev)
3591 struct drm_i915_private *dev_priv = dev->dev_private;
3594 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3596 /* Unmask the interrupts that we always want on. */
3597 dev_priv->irq_mask =
3598 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3599 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3600 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3601 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3602 I915_WRITE16(IMR, dev_priv->irq_mask);
3605 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3606 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3607 I915_USER_INTERRUPT);
3608 POSTING_READ16(IER);
3610 /* Interrupt setup is already guaranteed to be single-threaded, this is
3611 * just to make the assert_spin_locked check happy. */
3612 spin_lock_irq(&dev_priv->irq_lock);
3613 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3614 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3615 spin_unlock_irq(&dev_priv->irq_lock);
3621 * Returns true when a page flip has completed.
3623 static bool i8xx_handle_vblank(struct drm_device *dev,
3624 int plane, int pipe, u32 iir)
3626 struct drm_i915_private *dev_priv = dev->dev_private;
3627 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3629 if (!intel_pipe_handle_vblank(dev, pipe))
3632 if ((iir & flip_pending) == 0)
3633 goto check_page_flip;
3635 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3636 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3637 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3638 * the flip is completed (no longer pending). Since this doesn't raise
3639 * an interrupt per se, we watch for the change at vblank.
3641 if (I915_READ16(ISR) & flip_pending)
3642 goto check_page_flip;
3644 intel_prepare_page_flip(dev, plane);
3645 intel_finish_page_flip(dev, pipe);
3649 intel_check_page_flip(dev, pipe);
3653 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3655 struct drm_device *dev = arg;
3656 struct drm_i915_private *dev_priv = dev->dev_private;
3661 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3662 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3664 if (!intel_irqs_enabled(dev_priv))
3667 iir = I915_READ16(IIR);
3671 while (iir & ~flip_mask) {
3672 /* Can't rely on pipestat interrupt bit in iir as it might
3673 * have been cleared after the pipestat interrupt was received.
3674 * It doesn't set the bit in iir again, but it still produces
3675 * interrupts (for non-MSI).
3677 spin_lock(&dev_priv->irq_lock);
3678 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3679 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3681 for_each_pipe(dev_priv, pipe) {
3682 int reg = PIPESTAT(pipe);
3683 pipe_stats[pipe] = I915_READ(reg);
3686 * Clear the PIPE*STAT regs before the IIR
3688 if (pipe_stats[pipe] & 0x8000ffff)
3689 I915_WRITE(reg, pipe_stats[pipe]);
3691 spin_unlock(&dev_priv->irq_lock);
3693 I915_WRITE16(IIR, iir & ~flip_mask);
3694 new_iir = I915_READ16(IIR); /* Flush posted writes */
3696 if (iir & I915_USER_INTERRUPT)
3697 notify_ring(dev, &dev_priv->ring[RCS]);
3699 for_each_pipe(dev_priv, pipe) {
3704 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3705 i8xx_handle_vblank(dev, plane, pipe, iir))
3706 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3708 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3709 i9xx_pipe_crc_irq_handler(dev, pipe);
3711 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3712 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3722 static void i8xx_irq_uninstall(struct drm_device * dev)
3724 struct drm_i915_private *dev_priv = dev->dev_private;
3727 for_each_pipe(dev_priv, pipe) {
3728 /* Clear enable bits; then clear status bits */
3729 I915_WRITE(PIPESTAT(pipe), 0);
3730 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3732 I915_WRITE16(IMR, 0xffff);
3733 I915_WRITE16(IER, 0x0);
3734 I915_WRITE16(IIR, I915_READ16(IIR));
3737 static void i915_irq_preinstall(struct drm_device * dev)
3739 struct drm_i915_private *dev_priv = dev->dev_private;
3742 if (I915_HAS_HOTPLUG(dev)) {
3743 I915_WRITE(PORT_HOTPLUG_EN, 0);
3744 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3747 I915_WRITE16(HWSTAM, 0xeffe);
3748 for_each_pipe(dev_priv, pipe)
3749 I915_WRITE(PIPESTAT(pipe), 0);
3750 I915_WRITE(IMR, 0xffffffff);
3751 I915_WRITE(IER, 0x0);
3755 static int i915_irq_postinstall(struct drm_device *dev)
3757 struct drm_i915_private *dev_priv = dev->dev_private;
3760 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3762 /* Unmask the interrupts that we always want on. */
3763 dev_priv->irq_mask =
3764 ~(I915_ASLE_INTERRUPT |
3765 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3766 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3767 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3768 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3771 I915_ASLE_INTERRUPT |
3772 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3773 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3774 I915_USER_INTERRUPT;
3776 if (I915_HAS_HOTPLUG(dev)) {
3777 I915_WRITE(PORT_HOTPLUG_EN, 0);
3778 POSTING_READ(PORT_HOTPLUG_EN);
3780 /* Enable in IER... */
3781 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3782 /* and unmask in IMR */
3783 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3786 I915_WRITE(IMR, dev_priv->irq_mask);
3787 I915_WRITE(IER, enable_mask);
3790 i915_enable_asle_pipestat(dev);
3792 /* Interrupt setup is already guaranteed to be single-threaded, this is
3793 * just to make the assert_spin_locked check happy. */
3794 spin_lock_irq(&dev_priv->irq_lock);
3795 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3796 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3797 spin_unlock_irq(&dev_priv->irq_lock);
3803 * Returns true when a page flip has completed.
3805 static bool i915_handle_vblank(struct drm_device *dev,
3806 int plane, int pipe, u32 iir)
3808 struct drm_i915_private *dev_priv = dev->dev_private;
3809 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3811 if (!intel_pipe_handle_vblank(dev, pipe))
3814 if ((iir & flip_pending) == 0)
3815 goto check_page_flip;
3817 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3818 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3819 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3820 * the flip is completed (no longer pending). Since this doesn't raise
3821 * an interrupt per se, we watch for the change at vblank.
3823 if (I915_READ(ISR) & flip_pending)
3824 goto check_page_flip;
3826 intel_prepare_page_flip(dev, plane);
3827 intel_finish_page_flip(dev, pipe);
3831 intel_check_page_flip(dev, pipe);
3835 static irqreturn_t i915_irq_handler(int irq, void *arg)
3837 struct drm_device *dev = arg;
3838 struct drm_i915_private *dev_priv = dev->dev_private;
3839 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3841 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3842 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3843 int pipe, ret = IRQ_NONE;
3845 if (!intel_irqs_enabled(dev_priv))
3848 iir = I915_READ(IIR);
3850 bool irq_received = (iir & ~flip_mask) != 0;
3851 bool blc_event = false;
3853 /* Can't rely on pipestat interrupt bit in iir as it might
3854 * have been cleared after the pipestat interrupt was received.
3855 * It doesn't set the bit in iir again, but it still produces
3856 * interrupts (for non-MSI).
3858 spin_lock(&dev_priv->irq_lock);
3859 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3860 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3862 for_each_pipe(dev_priv, pipe) {
3863 int reg = PIPESTAT(pipe);
3864 pipe_stats[pipe] = I915_READ(reg);
3866 /* Clear the PIPE*STAT regs before the IIR */
3867 if (pipe_stats[pipe] & 0x8000ffff) {
3868 I915_WRITE(reg, pipe_stats[pipe]);
3869 irq_received = true;
3872 spin_unlock(&dev_priv->irq_lock);
3877 /* Consume port. Then clear IIR or we'll miss events */
3878 if (I915_HAS_HOTPLUG(dev) &&
3879 iir & I915_DISPLAY_PORT_INTERRUPT)
3880 i9xx_hpd_irq_handler(dev);
3882 I915_WRITE(IIR, iir & ~flip_mask);
3883 new_iir = I915_READ(IIR); /* Flush posted writes */
3885 if (iir & I915_USER_INTERRUPT)
3886 notify_ring(dev, &dev_priv->ring[RCS]);
3888 for_each_pipe(dev_priv, pipe) {
3893 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3894 i915_handle_vblank(dev, plane, pipe, iir))
3895 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3897 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3900 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3901 i9xx_pipe_crc_irq_handler(dev, pipe);
3903 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3904 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3908 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3909 intel_opregion_asle_intr(dev);
3911 /* With MSI, interrupts are only generated when iir
3912 * transitions from zero to nonzero. If another bit got
3913 * set while we were handling the existing iir bits, then
3914 * we would never get another interrupt.
3916 * This is fine on non-MSI as well, as if we hit this path
3917 * we avoid exiting the interrupt handler only to generate
3920 * Note that for MSI this could cause a stray interrupt report
3921 * if an interrupt landed in the time between writing IIR and
3922 * the posting read. This should be rare enough to never
3923 * trigger the 99% of 100,000 interrupts test for disabling
3928 } while (iir & ~flip_mask);
3933 static void i915_irq_uninstall(struct drm_device * dev)
3935 struct drm_i915_private *dev_priv = dev->dev_private;
3938 if (I915_HAS_HOTPLUG(dev)) {
3939 I915_WRITE(PORT_HOTPLUG_EN, 0);
3940 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3943 I915_WRITE16(HWSTAM, 0xffff);
3944 for_each_pipe(dev_priv, pipe) {
3945 /* Clear enable bits; then clear status bits */
3946 I915_WRITE(PIPESTAT(pipe), 0);
3947 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3949 I915_WRITE(IMR, 0xffffffff);
3950 I915_WRITE(IER, 0x0);
3952 I915_WRITE(IIR, I915_READ(IIR));
3955 static void i965_irq_preinstall(struct drm_device * dev)
3957 struct drm_i915_private *dev_priv = dev->dev_private;
3960 I915_WRITE(PORT_HOTPLUG_EN, 0);
3961 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3963 I915_WRITE(HWSTAM, 0xeffe);
3964 for_each_pipe(dev_priv, pipe)
3965 I915_WRITE(PIPESTAT(pipe), 0);
3966 I915_WRITE(IMR, 0xffffffff);
3967 I915_WRITE(IER, 0x0);
3971 static int i965_irq_postinstall(struct drm_device *dev)
3973 struct drm_i915_private *dev_priv = dev->dev_private;
3977 /* Unmask the interrupts that we always want on. */
3978 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
3979 I915_DISPLAY_PORT_INTERRUPT |
3980 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3981 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3982 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3983 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3984 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3986 enable_mask = ~dev_priv->irq_mask;
3987 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3988 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3989 enable_mask |= I915_USER_INTERRUPT;
3992 enable_mask |= I915_BSD_USER_INTERRUPT;
3994 /* Interrupt setup is already guaranteed to be single-threaded, this is
3995 * just to make the assert_spin_locked check happy. */
3996 spin_lock_irq(&dev_priv->irq_lock);
3997 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3998 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3999 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4000 spin_unlock_irq(&dev_priv->irq_lock);
4003 * Enable some error detection, note the instruction error mask
4004 * bit is reserved, so we leave it masked.
4007 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4008 GM45_ERROR_MEM_PRIV |
4009 GM45_ERROR_CP_PRIV |
4010 I915_ERROR_MEMORY_REFRESH);
4012 error_mask = ~(I915_ERROR_PAGE_TABLE |
4013 I915_ERROR_MEMORY_REFRESH);
4015 I915_WRITE(EMR, error_mask);
4017 I915_WRITE(IMR, dev_priv->irq_mask);
4018 I915_WRITE(IER, enable_mask);
4021 I915_WRITE(PORT_HOTPLUG_EN, 0);
4022 POSTING_READ(PORT_HOTPLUG_EN);
4024 i915_enable_asle_pipestat(dev);
4029 static void i915_hpd_irq_setup(struct drm_device *dev)
4031 struct drm_i915_private *dev_priv = dev->dev_private;
4032 struct intel_encoder *intel_encoder;
4035 assert_spin_locked(&dev_priv->irq_lock);
4037 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4038 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4039 /* Note HDMI and DP share hotplug bits */
4040 /* enable bits are the same for all generations */
4041 for_each_intel_encoder(dev, intel_encoder)
4042 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4043 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4044 /* Programming the CRT detection parameters tends
4045 to generate a spurious hotplug event about three
4046 seconds later. So just do it once.
4049 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4050 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4051 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4053 /* Ignore TV since it's buggy */
4054 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4057 static irqreturn_t i965_irq_handler(int irq, void *arg)
4059 struct drm_device *dev = arg;
4060 struct drm_i915_private *dev_priv = dev->dev_private;
4062 u32 pipe_stats[I915_MAX_PIPES];
4063 int ret = IRQ_NONE, pipe;
4065 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4066 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4068 if (!intel_irqs_enabled(dev_priv))
4071 iir = I915_READ(IIR);
4074 bool irq_received = (iir & ~flip_mask) != 0;
4075 bool blc_event = false;
4077 /* Can't rely on pipestat interrupt bit in iir as it might
4078 * have been cleared after the pipestat interrupt was received.
4079 * It doesn't set the bit in iir again, but it still produces
4080 * interrupts (for non-MSI).
4082 spin_lock(&dev_priv->irq_lock);
4083 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4084 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4086 for_each_pipe(dev_priv, pipe) {
4087 int reg = PIPESTAT(pipe);
4088 pipe_stats[pipe] = I915_READ(reg);
4091 * Clear the PIPE*STAT regs before the IIR
4093 if (pipe_stats[pipe] & 0x8000ffff) {
4094 I915_WRITE(reg, pipe_stats[pipe]);
4095 irq_received = true;
4098 spin_unlock(&dev_priv->irq_lock);
4105 /* Consume port. Then clear IIR or we'll miss events */
4106 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4107 i9xx_hpd_irq_handler(dev);
4109 I915_WRITE(IIR, iir & ~flip_mask);
4110 new_iir = I915_READ(IIR); /* Flush posted writes */
4112 if (iir & I915_USER_INTERRUPT)
4113 notify_ring(dev, &dev_priv->ring[RCS]);
4114 if (iir & I915_BSD_USER_INTERRUPT)
4115 notify_ring(dev, &dev_priv->ring[VCS]);
4117 for_each_pipe(dev_priv, pipe) {
4118 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4119 i915_handle_vblank(dev, pipe, pipe, iir))
4120 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4122 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4125 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4126 i9xx_pipe_crc_irq_handler(dev, pipe);
4128 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4129 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4132 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4133 intel_opregion_asle_intr(dev);
4135 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4136 gmbus_irq_handler(dev);
4138 /* With MSI, interrupts are only generated when iir
4139 * transitions from zero to nonzero. If another bit got
4140 * set while we were handling the existing iir bits, then
4141 * we would never get another interrupt.
4143 * This is fine on non-MSI as well, as if we hit this path
4144 * we avoid exiting the interrupt handler only to generate
4147 * Note that for MSI this could cause a stray interrupt report
4148 * if an interrupt landed in the time between writing IIR and
4149 * the posting read. This should be rare enough to never
4150 * trigger the 99% of 100,000 interrupts test for disabling
4159 static void i965_irq_uninstall(struct drm_device * dev)
4161 struct drm_i915_private *dev_priv = dev->dev_private;
4167 I915_WRITE(PORT_HOTPLUG_EN, 0);
4168 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4170 I915_WRITE(HWSTAM, 0xffffffff);
4171 for_each_pipe(dev_priv, pipe)
4172 I915_WRITE(PIPESTAT(pipe), 0);
4173 I915_WRITE(IMR, 0xffffffff);
4174 I915_WRITE(IER, 0x0);
4176 for_each_pipe(dev_priv, pipe)
4177 I915_WRITE(PIPESTAT(pipe),
4178 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4179 I915_WRITE(IIR, I915_READ(IIR));
4182 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4184 struct drm_i915_private *dev_priv =
4185 container_of(work, typeof(*dev_priv),
4186 hotplug_reenable_work.work);
4187 struct drm_device *dev = dev_priv->dev;
4188 struct drm_mode_config *mode_config = &dev->mode_config;
4191 intel_runtime_pm_get(dev_priv);
4193 spin_lock_irq(&dev_priv->irq_lock);
4194 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4195 struct drm_connector *connector;
4197 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4200 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4202 list_for_each_entry(connector, &mode_config->connector_list, head) {
4203 struct intel_connector *intel_connector = to_intel_connector(connector);
4205 if (intel_connector->encoder->hpd_pin == i) {
4206 if (connector->polled != intel_connector->polled)
4207 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4209 connector->polled = intel_connector->polled;
4210 if (!connector->polled)
4211 connector->polled = DRM_CONNECTOR_POLL_HPD;
4215 if (dev_priv->display.hpd_irq_setup)
4216 dev_priv->display.hpd_irq_setup(dev);
4217 spin_unlock_irq(&dev_priv->irq_lock);
4219 intel_runtime_pm_put(dev_priv);
4223 * intel_irq_init - initializes irq support
4224 * @dev_priv: i915 device instance
4226 * This function initializes all the irq support including work items, timers
4227 * and all the vtables. It does not setup the interrupt itself though.
4229 void intel_irq_init(struct drm_i915_private *dev_priv)
4231 struct drm_device *dev = dev_priv->dev;
4233 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4234 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4235 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4236 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4238 /* Let's track the enabled rps events */
4239 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4240 /* WaGsvRC0ResidencyMethod:vlv */
4241 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4243 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4245 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4246 i915_hangcheck_elapsed);
4247 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4248 intel_hpd_irq_reenable_work);
4250 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4252 if (IS_GEN2(dev_priv)) {
4253 dev->max_vblank_count = 0;
4254 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4255 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4256 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4257 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4259 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4260 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4264 * Opt out of the vblank disable timer on everything except gen2.
4265 * Gen2 doesn't have a hardware frame counter and so depends on
4266 * vblank interrupts to produce sane vblank seuquence numbers.
4268 if (!IS_GEN2(dev_priv))
4269 dev->vblank_disable_immediate = true;
4271 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4272 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4274 if (IS_CHERRYVIEW(dev_priv)) {
4275 dev->driver->irq_handler = cherryview_irq_handler;
4276 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4277 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4278 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4279 dev->driver->enable_vblank = valleyview_enable_vblank;
4280 dev->driver->disable_vblank = valleyview_disable_vblank;
4281 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4282 } else if (IS_VALLEYVIEW(dev_priv)) {
4283 dev->driver->irq_handler = valleyview_irq_handler;
4284 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4285 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4286 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4287 dev->driver->enable_vblank = valleyview_enable_vblank;
4288 dev->driver->disable_vblank = valleyview_disable_vblank;
4289 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4290 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4291 dev->driver->irq_handler = gen8_irq_handler;
4292 dev->driver->irq_preinstall = gen8_irq_reset;
4293 dev->driver->irq_postinstall = gen8_irq_postinstall;
4294 dev->driver->irq_uninstall = gen8_irq_uninstall;
4295 dev->driver->enable_vblank = gen8_enable_vblank;
4296 dev->driver->disable_vblank = gen8_disable_vblank;
4297 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4298 } else if (HAS_PCH_SPLIT(dev)) {
4299 dev->driver->irq_handler = ironlake_irq_handler;
4300 dev->driver->irq_preinstall = ironlake_irq_reset;
4301 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4302 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4303 dev->driver->enable_vblank = ironlake_enable_vblank;
4304 dev->driver->disable_vblank = ironlake_disable_vblank;
4305 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4307 if (INTEL_INFO(dev_priv)->gen == 2) {
4308 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4309 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4310 dev->driver->irq_handler = i8xx_irq_handler;
4311 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4312 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4313 dev->driver->irq_preinstall = i915_irq_preinstall;
4314 dev->driver->irq_postinstall = i915_irq_postinstall;
4315 dev->driver->irq_uninstall = i915_irq_uninstall;
4316 dev->driver->irq_handler = i915_irq_handler;
4318 dev->driver->irq_preinstall = i965_irq_preinstall;
4319 dev->driver->irq_postinstall = i965_irq_postinstall;
4320 dev->driver->irq_uninstall = i965_irq_uninstall;
4321 dev->driver->irq_handler = i965_irq_handler;
4323 if (I915_HAS_HOTPLUG(dev_priv))
4324 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4325 dev->driver->enable_vblank = i915_enable_vblank;
4326 dev->driver->disable_vblank = i915_disable_vblank;
4331 * intel_hpd_init - initializes and enables hpd support
4332 * @dev_priv: i915 device instance
4334 * This function enables the hotplug support. It requires that interrupts have
4335 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4336 * poll request can run concurrently to other code, so locking rules must be
4339 * This is a separate step from interrupt enabling to simplify the locking rules
4340 * in the driver load and resume code.
4342 void intel_hpd_init(struct drm_i915_private *dev_priv)
4344 struct drm_device *dev = dev_priv->dev;
4345 struct drm_mode_config *mode_config = &dev->mode_config;
4346 struct drm_connector *connector;
4349 for (i = 1; i < HPD_NUM_PINS; i++) {
4350 dev_priv->hpd_stats[i].hpd_cnt = 0;
4351 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4353 list_for_each_entry(connector, &mode_config->connector_list, head) {
4354 struct intel_connector *intel_connector = to_intel_connector(connector);
4355 connector->polled = intel_connector->polled;
4356 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4357 connector->polled = DRM_CONNECTOR_POLL_HPD;
4358 if (intel_connector->mst_port)
4359 connector->polled = DRM_CONNECTOR_POLL_HPD;
4362 /* Interrupt setup is already guaranteed to be single-threaded, this is
4363 * just to make the assert_spin_locked checks happy. */
4364 spin_lock_irq(&dev_priv->irq_lock);
4365 if (dev_priv->display.hpd_irq_setup)
4366 dev_priv->display.hpd_irq_setup(dev);
4367 spin_unlock_irq(&dev_priv->irq_lock);
4371 * intel_irq_install - enables the hardware interrupt
4372 * @dev_priv: i915 device instance
4374 * This function enables the hardware interrupt handling, but leaves the hotplug
4375 * handling still disabled. It is called after intel_irq_init().
4377 * In the driver load and resume code we need working interrupts in a few places
4378 * but don't want to deal with the hassle of concurrent probe and hotplug
4379 * workers. Hence the split into this two-stage approach.
4381 int intel_irq_install(struct drm_i915_private *dev_priv)
4384 * We enable some interrupt sources in our postinstall hooks, so mark
4385 * interrupts as enabled _before_ actually enabling them to avoid
4386 * special cases in our ordering checks.
4388 dev_priv->pm.irqs_enabled = true;
4390 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4394 * intel_irq_uninstall - finilizes all irq handling
4395 * @dev_priv: i915 device instance
4397 * This stops interrupt and hotplug handling and unregisters and frees all
4398 * resources acquired in the init functions.
4400 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4402 drm_irq_uninstall(dev_priv->dev);
4403 intel_hpd_cancel_work(dev_priv);
4404 dev_priv->pm.irqs_enabled = false;
4408 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4409 * @dev_priv: i915 device instance
4411 * This function is used to disable interrupts at runtime, both in the runtime
4412 * pm and the system suspend/resume code.
4414 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4416 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4417 dev_priv->pm.irqs_enabled = false;
4418 synchronize_irq(dev_priv->dev->irq);
4422 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4423 * @dev_priv: i915 device instance
4425 * This function is used to enable interrupts at runtime, both in the runtime
4426 * pm and the system suspend/resume code.
4428 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4430 dev_priv->pm.irqs_enabled = true;
4431 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4432 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);