1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include "intel_drv.h"
41 * DOC: interrupt handling
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
48 static const u32 hpd_ibx[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
56 static const u32 hpd_cpt[] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
64 static const u32 hpd_mask_i915[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
73 static const u32 hpd_status_g4x[] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
102 #define GEN5_IRQ_RESET(type) do { \
103 I915_WRITE(type##IMR, 0xffffffff); \
104 POSTING_READ(type##IMR); \
105 I915_WRITE(type##IER, 0); \
106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
113 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
120 I915_WRITE((reg), 0xffffffff); \
122 I915_WRITE((reg), 0xffffffff); \
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 I915_WRITE(type##IER, (ier_val)); \
137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
141 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
143 /* For display hotplug interrupt */
145 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
147 assert_spin_locked(&dev_priv->irq_lock);
149 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
152 if ((dev_priv->irq_mask & mask) != 0) {
153 dev_priv->irq_mask &= ~mask;
154 I915_WRITE(DEIMR, dev_priv->irq_mask);
160 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
162 assert_spin_locked(&dev_priv->irq_lock);
164 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
167 if ((dev_priv->irq_mask & mask) != mask) {
168 dev_priv->irq_mask |= mask;
169 I915_WRITE(DEIMR, dev_priv->irq_mask);
175 * ilk_update_gt_irq - update GTIMR
176 * @dev_priv: driver private
177 * @interrupt_mask: mask of interrupt bits to update
178 * @enabled_irq_mask: mask of interrupt bits to enable
180 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
181 uint32_t interrupt_mask,
182 uint32_t enabled_irq_mask)
184 assert_spin_locked(&dev_priv->irq_lock);
186 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
189 dev_priv->gt_irq_mask &= ~interrupt_mask;
190 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
191 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
195 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
197 ilk_update_gt_irq(dev_priv, mask, mask);
200 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
202 ilk_update_gt_irq(dev_priv, mask, 0);
205 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
207 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
210 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
212 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
215 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
217 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
221 * snb_update_pm_irq - update GEN6_PMIMR
222 * @dev_priv: driver private
223 * @interrupt_mask: mask of interrupt bits to update
224 * @enabled_irq_mask: mask of interrupt bits to enable
226 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
227 uint32_t interrupt_mask,
228 uint32_t enabled_irq_mask)
232 assert_spin_locked(&dev_priv->irq_lock);
234 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
237 new_val = dev_priv->pm_irq_mask;
238 new_val &= ~interrupt_mask;
239 new_val |= (~enabled_irq_mask & interrupt_mask);
241 if (new_val != dev_priv->pm_irq_mask) {
242 dev_priv->pm_irq_mask = new_val;
243 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
244 POSTING_READ(gen6_pm_imr(dev_priv));
248 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
250 snb_update_pm_irq(dev_priv, mask, mask);
253 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
255 snb_update_pm_irq(dev_priv, mask, 0);
258 void gen6_reset_rps_interrupts(struct drm_device *dev)
260 struct drm_i915_private *dev_priv = dev->dev_private;
261 uint32_t reg = gen6_pm_iir(dev_priv);
263 spin_lock_irq(&dev_priv->irq_lock);
264 I915_WRITE(reg, dev_priv->pm_rps_events);
265 I915_WRITE(reg, dev_priv->pm_rps_events);
267 spin_unlock_irq(&dev_priv->irq_lock);
270 void gen6_enable_rps_interrupts(struct drm_device *dev)
272 struct drm_i915_private *dev_priv = dev->dev_private;
274 spin_lock_irq(&dev_priv->irq_lock);
275 WARN_ON(dev_priv->rps.pm_iir);
276 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
277 dev_priv->rps.interrupts_enabled = true;
278 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
279 spin_unlock_irq(&dev_priv->irq_lock);
282 void gen6_disable_rps_interrupts(struct drm_device *dev)
284 struct drm_i915_private *dev_priv = dev->dev_private;
286 spin_lock_irq(&dev_priv->irq_lock);
287 dev_priv->rps.interrupts_enabled = false;
288 spin_unlock_irq(&dev_priv->irq_lock);
290 cancel_work_sync(&dev_priv->rps.work);
292 I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
293 ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
294 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
295 ~dev_priv->pm_rps_events);
297 spin_lock_irq(&dev_priv->irq_lock);
298 dev_priv->rps.pm_iir = 0;
299 spin_unlock_irq(&dev_priv->irq_lock);
301 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
305 * ibx_display_interrupt_update - update SDEIMR
306 * @dev_priv: driver private
307 * @interrupt_mask: mask of interrupt bits to update
308 * @enabled_irq_mask: mask of interrupt bits to enable
310 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
311 uint32_t interrupt_mask,
312 uint32_t enabled_irq_mask)
314 uint32_t sdeimr = I915_READ(SDEIMR);
315 sdeimr &= ~interrupt_mask;
316 sdeimr |= (~enabled_irq_mask & interrupt_mask);
318 assert_spin_locked(&dev_priv->irq_lock);
320 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
323 I915_WRITE(SDEIMR, sdeimr);
324 POSTING_READ(SDEIMR);
328 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
329 u32 enable_mask, u32 status_mask)
331 u32 reg = PIPESTAT(pipe);
332 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
334 assert_spin_locked(&dev_priv->irq_lock);
335 WARN_ON(!intel_irqs_enabled(dev_priv));
337 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
338 status_mask & ~PIPESTAT_INT_STATUS_MASK,
339 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
340 pipe_name(pipe), enable_mask, status_mask))
343 if ((pipestat & enable_mask) == enable_mask)
346 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
348 /* Enable the interrupt, clear any pending status */
349 pipestat |= enable_mask | status_mask;
350 I915_WRITE(reg, pipestat);
355 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
356 u32 enable_mask, u32 status_mask)
358 u32 reg = PIPESTAT(pipe);
359 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
361 assert_spin_locked(&dev_priv->irq_lock);
362 WARN_ON(!intel_irqs_enabled(dev_priv));
364 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
365 status_mask & ~PIPESTAT_INT_STATUS_MASK,
366 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
367 pipe_name(pipe), enable_mask, status_mask))
370 if ((pipestat & enable_mask) == 0)
373 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
375 pipestat &= ~enable_mask;
376 I915_WRITE(reg, pipestat);
380 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
382 u32 enable_mask = status_mask << 16;
385 * On pipe A we don't support the PSR interrupt yet,
386 * on pipe B and C the same bit MBZ.
388 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
391 * On pipe B and C we don't support the PSR interrupt yet, on pipe
392 * A the same bit is for perf counters which we don't use either.
394 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
397 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
398 SPRITE0_FLIP_DONE_INT_EN_VLV |
399 SPRITE1_FLIP_DONE_INT_EN_VLV);
400 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
401 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
402 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
403 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
409 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
414 if (IS_VALLEYVIEW(dev_priv->dev))
415 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
418 enable_mask = status_mask << 16;
419 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
423 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
428 if (IS_VALLEYVIEW(dev_priv->dev))
429 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
432 enable_mask = status_mask << 16;
433 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
437 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
439 static void i915_enable_asle_pipestat(struct drm_device *dev)
441 struct drm_i915_private *dev_priv = dev->dev_private;
443 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
446 spin_lock_irq(&dev_priv->irq_lock);
448 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
449 if (INTEL_INFO(dev)->gen >= 4)
450 i915_enable_pipestat(dev_priv, PIPE_A,
451 PIPE_LEGACY_BLC_EVENT_STATUS);
453 spin_unlock_irq(&dev_priv->irq_lock);
457 * i915_pipe_enabled - check if a pipe is enabled
459 * @pipe: pipe to check
461 * Reading certain registers when the pipe is disabled can hang the chip.
462 * Use this routine to make sure the PLL is running and the pipe is active
463 * before reading such registers if unsure.
466 i915_pipe_enabled(struct drm_device *dev, int pipe)
468 struct drm_i915_private *dev_priv = dev->dev_private;
470 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
471 /* Locking is horribly broken here, but whatever. */
472 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
473 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
475 return intel_crtc->active;
477 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
482 * This timing diagram depicts the video signal in and
483 * around the vertical blanking period.
485 * Assumptions about the fictitious mode used in this example:
487 * vsync_start = vblank_start + 1
488 * vsync_end = vblank_start + 2
489 * vtotal = vblank_start + 3
492 * latch double buffered registers
493 * increment frame counter (ctg+)
494 * generate start of vblank interrupt (gen4+)
497 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
498 * | may be shifted forward 1-3 extra lines via PIPECONF
500 * | | start of vsync:
501 * | | generate vsync interrupt
503 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
504 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
505 * ----va---> <-----------------vb--------------------> <--------va-------------
506 * | | <----vs-----> |
507 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
508 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
509 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
511 * last visible pixel first visible pixel
512 * | increment frame counter (gen3/4)
513 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
515 * x = horizontal active
516 * _ = horizontal blanking
517 * hs = horizontal sync
518 * va = vertical active
519 * vb = vertical blanking
521 * vbs = vblank_start (number)
524 * - most events happen at the start of horizontal sync
525 * - frame start happens at the start of horizontal blank, 1-4 lines
526 * (depending on PIPECONF settings) after the start of vblank
527 * - gen3/4 pixel and frame counter are synchronized with the start
528 * of horizontal active on the first line of vertical active
531 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
533 /* Gen2 doesn't have a hardware frame counter */
537 /* Called from drm generic code, passed a 'crtc', which
538 * we use as a pipe index
540 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
542 struct drm_i915_private *dev_priv = dev->dev_private;
543 unsigned long high_frame;
544 unsigned long low_frame;
545 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
547 if (!i915_pipe_enabled(dev, pipe)) {
548 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
549 "pipe %c\n", pipe_name(pipe));
553 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
554 struct intel_crtc *intel_crtc =
555 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
556 const struct drm_display_mode *mode =
557 &intel_crtc->config.adjusted_mode;
559 htotal = mode->crtc_htotal;
560 hsync_start = mode->crtc_hsync_start;
561 vbl_start = mode->crtc_vblank_start;
562 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
563 vbl_start = DIV_ROUND_UP(vbl_start, 2);
565 enum transcoder cpu_transcoder = (enum transcoder) pipe;
567 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
568 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
569 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
570 if ((I915_READ(PIPECONF(cpu_transcoder)) &
571 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
572 vbl_start = DIV_ROUND_UP(vbl_start, 2);
575 /* Convert to pixel count */
578 /* Start of vblank event occurs at start of hsync */
579 vbl_start -= htotal - hsync_start;
581 high_frame = PIPEFRAME(pipe);
582 low_frame = PIPEFRAMEPIXEL(pipe);
585 * High & low register fields aren't synchronized, so make sure
586 * we get a low value that's stable across two reads of the high
590 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
591 low = I915_READ(low_frame);
592 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
593 } while (high1 != high2);
595 high1 >>= PIPE_FRAME_HIGH_SHIFT;
596 pixel = low & PIPE_PIXEL_MASK;
597 low >>= PIPE_FRAME_LOW_SHIFT;
600 * The frame counter increments at beginning of active.
601 * Cook up a vblank counter by also checking the pixel
602 * counter against vblank start.
604 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
607 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
609 struct drm_i915_private *dev_priv = dev->dev_private;
610 int reg = PIPE_FRMCOUNT_GM45(pipe);
612 if (!i915_pipe_enabled(dev, pipe)) {
613 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
614 "pipe %c\n", pipe_name(pipe));
618 return I915_READ(reg);
621 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
622 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
624 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
626 struct drm_device *dev = crtc->base.dev;
627 struct drm_i915_private *dev_priv = dev->dev_private;
628 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
629 enum pipe pipe = crtc->pipe;
630 int position, vtotal;
632 vtotal = mode->crtc_vtotal;
633 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
637 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
639 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
642 * See update_scanline_offset() for the details on the
643 * scanline_offset adjustment.
645 return (position + crtc->scanline_offset) % vtotal;
648 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
649 unsigned int flags, int *vpos, int *hpos,
650 ktime_t *stime, ktime_t *etime)
652 struct drm_i915_private *dev_priv = dev->dev_private;
653 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
654 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
655 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
657 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
660 unsigned long irqflags;
662 if (!intel_crtc->active) {
663 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
664 "pipe %c\n", pipe_name(pipe));
668 htotal = mode->crtc_htotal;
669 hsync_start = mode->crtc_hsync_start;
670 vtotal = mode->crtc_vtotal;
671 vbl_start = mode->crtc_vblank_start;
672 vbl_end = mode->crtc_vblank_end;
674 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
675 vbl_start = DIV_ROUND_UP(vbl_start, 2);
680 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
683 * Lock uncore.lock, as we will do multiple timing critical raw
684 * register reads, potentially with preemption disabled, so the
685 * following code must not block on uncore.lock.
687 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
689 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
691 /* Get optional system timestamp before query. */
693 *stime = ktime_get();
695 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
696 /* No obvious pixelcount register. Only query vertical
697 * scanout position from Display scan line register.
699 position = __intel_get_crtc_scanline(intel_crtc);
701 /* Have access to pixelcount since start of frame.
702 * We can split this into vertical and horizontal
705 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
707 /* convert to pixel counts */
713 * In interlaced modes, the pixel counter counts all pixels,
714 * so one field will have htotal more pixels. In order to avoid
715 * the reported position from jumping backwards when the pixel
716 * counter is beyond the length of the shorter field, just
717 * clamp the position the length of the shorter field. This
718 * matches how the scanline counter based position works since
719 * the scanline counter doesn't count the two half lines.
721 if (position >= vtotal)
722 position = vtotal - 1;
725 * Start of vblank interrupt is triggered at start of hsync,
726 * just prior to the first active line of vblank. However we
727 * consider lines to start at the leading edge of horizontal
728 * active. So, should we get here before we've crossed into
729 * the horizontal active of the first line in vblank, we would
730 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
731 * always add htotal-hsync_start to the current pixel position.
733 position = (position + htotal - hsync_start) % vtotal;
736 /* Get optional system timestamp after query. */
738 *etime = ktime_get();
740 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
742 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
744 in_vbl = position >= vbl_start && position < vbl_end;
747 * While in vblank, position will be negative
748 * counting up towards 0 at vbl_end. And outside
749 * vblank, position will be positive counting
752 if (position >= vbl_start)
755 position += vtotal - vbl_end;
757 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
761 *vpos = position / htotal;
762 *hpos = position - (*vpos * htotal);
767 ret |= DRM_SCANOUTPOS_IN_VBLANK;
772 int intel_get_crtc_scanline(struct intel_crtc *crtc)
774 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
775 unsigned long irqflags;
778 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
779 position = __intel_get_crtc_scanline(crtc);
780 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
785 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
787 struct timeval *vblank_time,
790 struct drm_crtc *crtc;
792 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
793 DRM_ERROR("Invalid crtc %d\n", pipe);
797 /* Get drm_crtc to timestamp: */
798 crtc = intel_get_crtc_for_pipe(dev, pipe);
800 DRM_ERROR("Invalid crtc %d\n", pipe);
804 if (!crtc->enabled) {
805 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
809 /* Helper routine in DRM core does all the work: */
810 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
813 &to_intel_crtc(crtc)->config.adjusted_mode);
816 static bool intel_hpd_irq_event(struct drm_device *dev,
817 struct drm_connector *connector)
819 enum drm_connector_status old_status;
821 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
822 old_status = connector->status;
824 connector->status = connector->funcs->detect(connector, false);
825 if (old_status == connector->status)
828 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
831 drm_get_connector_status_name(old_status),
832 drm_get_connector_status_name(connector->status));
837 static void i915_digport_work_func(struct work_struct *work)
839 struct drm_i915_private *dev_priv =
840 container_of(work, struct drm_i915_private, dig_port_work);
841 u32 long_port_mask, short_port_mask;
842 struct intel_digital_port *intel_dig_port;
846 spin_lock_irq(&dev_priv->irq_lock);
847 long_port_mask = dev_priv->long_hpd_port_mask;
848 dev_priv->long_hpd_port_mask = 0;
849 short_port_mask = dev_priv->short_hpd_port_mask;
850 dev_priv->short_hpd_port_mask = 0;
851 spin_unlock_irq(&dev_priv->irq_lock);
853 for (i = 0; i < I915_MAX_PORTS; i++) {
855 bool long_hpd = false;
856 intel_dig_port = dev_priv->hpd_irq_port[i];
857 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
860 if (long_port_mask & (1 << i)) {
863 } else if (short_port_mask & (1 << i))
867 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
869 /* if we get true fallback to old school hpd */
870 old_bits |= (1 << intel_dig_port->base.hpd_pin);
876 spin_lock_irq(&dev_priv->irq_lock);
877 dev_priv->hpd_event_bits |= old_bits;
878 spin_unlock_irq(&dev_priv->irq_lock);
879 schedule_work(&dev_priv->hotplug_work);
884 * Handle hotplug events outside the interrupt handler proper.
886 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
888 static void i915_hotplug_work_func(struct work_struct *work)
890 struct drm_i915_private *dev_priv =
891 container_of(work, struct drm_i915_private, hotplug_work);
892 struct drm_device *dev = dev_priv->dev;
893 struct drm_mode_config *mode_config = &dev->mode_config;
894 struct intel_connector *intel_connector;
895 struct intel_encoder *intel_encoder;
896 struct drm_connector *connector;
897 bool hpd_disabled = false;
898 bool changed = false;
901 mutex_lock(&mode_config->mutex);
902 DRM_DEBUG_KMS("running encoder hotplug functions\n");
904 spin_lock_irq(&dev_priv->irq_lock);
906 hpd_event_bits = dev_priv->hpd_event_bits;
907 dev_priv->hpd_event_bits = 0;
908 list_for_each_entry(connector, &mode_config->connector_list, head) {
909 intel_connector = to_intel_connector(connector);
910 if (!intel_connector->encoder)
912 intel_encoder = intel_connector->encoder;
913 if (intel_encoder->hpd_pin > HPD_NONE &&
914 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
915 connector->polled == DRM_CONNECTOR_POLL_HPD) {
916 DRM_INFO("HPD interrupt storm detected on connector %s: "
917 "switching from hotplug detection to polling\n",
919 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
920 connector->polled = DRM_CONNECTOR_POLL_CONNECT
921 | DRM_CONNECTOR_POLL_DISCONNECT;
924 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
925 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
926 connector->name, intel_encoder->hpd_pin);
929 /* if there were no outputs to poll, poll was disabled,
930 * therefore make sure it's enabled when disabling HPD on
933 drm_kms_helper_poll_enable(dev);
934 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
935 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
938 spin_unlock_irq(&dev_priv->irq_lock);
940 list_for_each_entry(connector, &mode_config->connector_list, head) {
941 intel_connector = to_intel_connector(connector);
942 if (!intel_connector->encoder)
944 intel_encoder = intel_connector->encoder;
945 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
946 if (intel_encoder->hot_plug)
947 intel_encoder->hot_plug(intel_encoder);
948 if (intel_hpd_irq_event(dev, connector))
952 mutex_unlock(&mode_config->mutex);
955 drm_kms_helper_hotplug_event(dev);
958 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
960 struct drm_i915_private *dev_priv = dev->dev_private;
961 u32 busy_up, busy_down, max_avg, min_avg;
964 spin_lock(&mchdev_lock);
966 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
968 new_delay = dev_priv->ips.cur_delay;
970 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
971 busy_up = I915_READ(RCPREVBSYTUPAVG);
972 busy_down = I915_READ(RCPREVBSYTDNAVG);
973 max_avg = I915_READ(RCBMAXAVG);
974 min_avg = I915_READ(RCBMINAVG);
976 /* Handle RCS change request from hw */
977 if (busy_up > max_avg) {
978 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
979 new_delay = dev_priv->ips.cur_delay - 1;
980 if (new_delay < dev_priv->ips.max_delay)
981 new_delay = dev_priv->ips.max_delay;
982 } else if (busy_down < min_avg) {
983 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
984 new_delay = dev_priv->ips.cur_delay + 1;
985 if (new_delay > dev_priv->ips.min_delay)
986 new_delay = dev_priv->ips.min_delay;
989 if (ironlake_set_drps(dev, new_delay))
990 dev_priv->ips.cur_delay = new_delay;
992 spin_unlock(&mchdev_lock);
997 static void notify_ring(struct drm_device *dev,
998 struct intel_engine_cs *ring)
1000 if (!intel_ring_initialized(ring))
1003 trace_i915_gem_request_complete(ring);
1005 wake_up_all(&ring->irq_queue);
1008 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1009 struct intel_rps_ei *rps_ei)
1011 u32 cz_ts, cz_freq_khz;
1012 u32 render_count, media_count;
1013 u32 elapsed_render, elapsed_media, elapsed_time;
1016 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1017 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1019 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1020 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1022 if (rps_ei->cz_clock == 0) {
1023 rps_ei->cz_clock = cz_ts;
1024 rps_ei->render_c0 = render_count;
1025 rps_ei->media_c0 = media_count;
1027 return dev_priv->rps.cur_freq;
1030 elapsed_time = cz_ts - rps_ei->cz_clock;
1031 rps_ei->cz_clock = cz_ts;
1033 elapsed_render = render_count - rps_ei->render_c0;
1034 rps_ei->render_c0 = render_count;
1036 elapsed_media = media_count - rps_ei->media_c0;
1037 rps_ei->media_c0 = media_count;
1039 /* Convert all the counters into common unit of milli sec */
1040 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1041 elapsed_render /= cz_freq_khz;
1042 elapsed_media /= cz_freq_khz;
1045 * Calculate overall C0 residency percentage
1046 * only if elapsed time is non zero
1050 ((max(elapsed_render, elapsed_media) * 100)
1058 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1059 * busy-ness calculated from C0 counters of render & media power wells
1060 * @dev_priv: DRM device private
1063 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1065 u32 residency_C0_up = 0, residency_C0_down = 0;
1068 dev_priv->rps.ei_interrupt_count++;
1070 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1073 if (dev_priv->rps.up_ei.cz_clock == 0) {
1074 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1075 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1076 return dev_priv->rps.cur_freq;
1081 * To down throttle, C0 residency should be less than down threshold
1082 * for continous EI intervals. So calculate down EI counters
1083 * once in VLV_INT_COUNT_FOR_DOWN_EI
1085 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1087 dev_priv->rps.ei_interrupt_count = 0;
1089 residency_C0_down = vlv_c0_residency(dev_priv,
1090 &dev_priv->rps.down_ei);
1092 residency_C0_up = vlv_c0_residency(dev_priv,
1093 &dev_priv->rps.up_ei);
1096 new_delay = dev_priv->rps.cur_freq;
1098 adj = dev_priv->rps.last_adj;
1099 /* C0 residency is greater than UP threshold. Increase Frequency */
1100 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1106 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1107 new_delay = dev_priv->rps.cur_freq + adj;
1110 * For better performance, jump directly
1111 * to RPe if we're below it.
1113 if (new_delay < dev_priv->rps.efficient_freq)
1114 new_delay = dev_priv->rps.efficient_freq;
1116 } else if (!dev_priv->rps.ei_interrupt_count &&
1117 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1123 * This means, C0 residency is less than down threshold over
1124 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1126 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1127 new_delay = dev_priv->rps.cur_freq + adj;
1133 static void gen6_pm_rps_work(struct work_struct *work)
1135 struct drm_i915_private *dev_priv =
1136 container_of(work, struct drm_i915_private, rps.work);
1140 spin_lock_irq(&dev_priv->irq_lock);
1141 /* Speed up work cancelation during disabling rps interrupts. */
1142 if (!dev_priv->rps.interrupts_enabled) {
1143 spin_unlock_irq(&dev_priv->irq_lock);
1146 pm_iir = dev_priv->rps.pm_iir;
1147 dev_priv->rps.pm_iir = 0;
1148 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1149 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1150 spin_unlock_irq(&dev_priv->irq_lock);
1152 /* Make sure we didn't queue anything we're not going to process. */
1153 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1155 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1158 mutex_lock(&dev_priv->rps.hw_lock);
1160 adj = dev_priv->rps.last_adj;
1161 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1165 /* CHV needs even encode values */
1166 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1168 new_delay = dev_priv->rps.cur_freq + adj;
1171 * For better performance, jump directly
1172 * to RPe if we're below it.
1174 if (new_delay < dev_priv->rps.efficient_freq)
1175 new_delay = dev_priv->rps.efficient_freq;
1176 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1177 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1178 new_delay = dev_priv->rps.efficient_freq;
1180 new_delay = dev_priv->rps.min_freq_softlimit;
1182 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1183 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1184 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1188 /* CHV needs even encode values */
1189 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1191 new_delay = dev_priv->rps.cur_freq + adj;
1192 } else { /* unknown event */
1193 new_delay = dev_priv->rps.cur_freq;
1196 /* sysfs frequency interfaces may have snuck in while servicing the
1199 new_delay = clamp_t(int, new_delay,
1200 dev_priv->rps.min_freq_softlimit,
1201 dev_priv->rps.max_freq_softlimit);
1203 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1205 if (IS_VALLEYVIEW(dev_priv->dev))
1206 valleyview_set_rps(dev_priv->dev, new_delay);
1208 gen6_set_rps(dev_priv->dev, new_delay);
1210 mutex_unlock(&dev_priv->rps.hw_lock);
1215 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1217 * @work: workqueue struct
1219 * Doesn't actually do anything except notify userspace. As a consequence of
1220 * this event, userspace should try to remap the bad rows since statistically
1221 * it is likely the same row is more likely to go bad again.
1223 static void ivybridge_parity_work(struct work_struct *work)
1225 struct drm_i915_private *dev_priv =
1226 container_of(work, struct drm_i915_private, l3_parity.error_work);
1227 u32 error_status, row, bank, subbank;
1228 char *parity_event[6];
1232 /* We must turn off DOP level clock gating to access the L3 registers.
1233 * In order to prevent a get/put style interface, acquire struct mutex
1234 * any time we access those registers.
1236 mutex_lock(&dev_priv->dev->struct_mutex);
1238 /* If we've screwed up tracking, just let the interrupt fire again */
1239 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1242 misccpctl = I915_READ(GEN7_MISCCPCTL);
1243 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1244 POSTING_READ(GEN7_MISCCPCTL);
1246 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1250 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1253 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1255 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1257 error_status = I915_READ(reg);
1258 row = GEN7_PARITY_ERROR_ROW(error_status);
1259 bank = GEN7_PARITY_ERROR_BANK(error_status);
1260 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1262 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1265 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1266 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1267 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1268 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1269 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1270 parity_event[5] = NULL;
1272 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1273 KOBJ_CHANGE, parity_event);
1275 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1276 slice, row, bank, subbank);
1278 kfree(parity_event[4]);
1279 kfree(parity_event[3]);
1280 kfree(parity_event[2]);
1281 kfree(parity_event[1]);
1284 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1287 WARN_ON(dev_priv->l3_parity.which_slice);
1288 spin_lock_irq(&dev_priv->irq_lock);
1289 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1290 spin_unlock_irq(&dev_priv->irq_lock);
1292 mutex_unlock(&dev_priv->dev->struct_mutex);
1295 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1297 struct drm_i915_private *dev_priv = dev->dev_private;
1299 if (!HAS_L3_DPF(dev))
1302 spin_lock(&dev_priv->irq_lock);
1303 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1304 spin_unlock(&dev_priv->irq_lock);
1306 iir &= GT_PARITY_ERROR(dev);
1307 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1308 dev_priv->l3_parity.which_slice |= 1 << 1;
1310 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1311 dev_priv->l3_parity.which_slice |= 1 << 0;
1313 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1316 static void ilk_gt_irq_handler(struct drm_device *dev,
1317 struct drm_i915_private *dev_priv,
1321 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1322 notify_ring(dev, &dev_priv->ring[RCS]);
1323 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1324 notify_ring(dev, &dev_priv->ring[VCS]);
1327 static void snb_gt_irq_handler(struct drm_device *dev,
1328 struct drm_i915_private *dev_priv,
1333 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1334 notify_ring(dev, &dev_priv->ring[RCS]);
1335 if (gt_iir & GT_BSD_USER_INTERRUPT)
1336 notify_ring(dev, &dev_priv->ring[VCS]);
1337 if (gt_iir & GT_BLT_USER_INTERRUPT)
1338 notify_ring(dev, &dev_priv->ring[BCS]);
1340 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1341 GT_BSD_CS_ERROR_INTERRUPT |
1342 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1343 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1347 if (gt_iir & GT_PARITY_ERROR(dev))
1348 ivybridge_parity_error_irq_handler(dev, gt_iir);
1351 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1352 struct drm_i915_private *dev_priv,
1355 struct intel_engine_cs *ring;
1358 irqreturn_t ret = IRQ_NONE;
1360 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1361 tmp = I915_READ(GEN8_GT_IIR(0));
1363 I915_WRITE(GEN8_GT_IIR(0), tmp);
1366 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1367 ring = &dev_priv->ring[RCS];
1368 if (rcs & GT_RENDER_USER_INTERRUPT)
1369 notify_ring(dev, ring);
1370 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1371 intel_execlists_handle_ctx_events(ring);
1373 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1374 ring = &dev_priv->ring[BCS];
1375 if (bcs & GT_RENDER_USER_INTERRUPT)
1376 notify_ring(dev, ring);
1377 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1378 intel_execlists_handle_ctx_events(ring);
1380 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1383 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1384 tmp = I915_READ(GEN8_GT_IIR(1));
1386 I915_WRITE(GEN8_GT_IIR(1), tmp);
1389 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1390 ring = &dev_priv->ring[VCS];
1391 if (vcs & GT_RENDER_USER_INTERRUPT)
1392 notify_ring(dev, ring);
1393 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1394 intel_execlists_handle_ctx_events(ring);
1396 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1397 ring = &dev_priv->ring[VCS2];
1398 if (vcs & GT_RENDER_USER_INTERRUPT)
1399 notify_ring(dev, ring);
1400 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1401 intel_execlists_handle_ctx_events(ring);
1403 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1406 if (master_ctl & GEN8_GT_PM_IRQ) {
1407 tmp = I915_READ(GEN8_GT_IIR(2));
1408 if (tmp & dev_priv->pm_rps_events) {
1409 I915_WRITE(GEN8_GT_IIR(2),
1410 tmp & dev_priv->pm_rps_events);
1412 gen6_rps_irq_handler(dev_priv, tmp);
1414 DRM_ERROR("The master control interrupt lied (PM)!\n");
1417 if (master_ctl & GEN8_GT_VECS_IRQ) {
1418 tmp = I915_READ(GEN8_GT_IIR(3));
1420 I915_WRITE(GEN8_GT_IIR(3), tmp);
1423 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1424 ring = &dev_priv->ring[VECS];
1425 if (vcs & GT_RENDER_USER_INTERRUPT)
1426 notify_ring(dev, ring);
1427 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1428 intel_execlists_handle_ctx_events(ring);
1430 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1436 #define HPD_STORM_DETECT_PERIOD 1000
1437 #define HPD_STORM_THRESHOLD 5
1439 static int pch_port_to_hotplug_shift(enum port port)
1455 static int i915_port_to_hotplug_shift(enum port port)
1471 static inline enum port get_port_from_pin(enum hpd_pin pin)
1481 return PORT_A; /* no hpd */
1485 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1486 u32 hotplug_trigger,
1487 u32 dig_hotplug_reg,
1490 struct drm_i915_private *dev_priv = dev->dev_private;
1493 bool storm_detected = false;
1494 bool queue_dig = false, queue_hp = false;
1496 u32 dig_port_mask = 0;
1498 if (!hotplug_trigger)
1501 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1502 hotplug_trigger, dig_hotplug_reg);
1504 spin_lock(&dev_priv->irq_lock);
1505 for (i = 1; i < HPD_NUM_PINS; i++) {
1506 if (!(hpd[i] & hotplug_trigger))
1509 port = get_port_from_pin(i);
1510 if (port && dev_priv->hpd_irq_port[port]) {
1513 if (HAS_PCH_SPLIT(dev)) {
1514 dig_shift = pch_port_to_hotplug_shift(port);
1515 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1517 dig_shift = i915_port_to_hotplug_shift(port);
1518 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1521 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1523 long_hpd ? "long" : "short");
1524 /* for long HPD pulses we want to have the digital queue happen,
1525 but we still want HPD storm detection to function. */
1527 dev_priv->long_hpd_port_mask |= (1 << port);
1528 dig_port_mask |= hpd[i];
1530 /* for short HPD just trigger the digital queue */
1531 dev_priv->short_hpd_port_mask |= (1 << port);
1532 hotplug_trigger &= ~hpd[i];
1538 for (i = 1; i < HPD_NUM_PINS; i++) {
1539 if (hpd[i] & hotplug_trigger &&
1540 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1542 * On GMCH platforms the interrupt mask bits only
1543 * prevent irq generation, not the setting of the
1544 * hotplug bits itself. So only WARN about unexpected
1545 * interrupts on saner platforms.
1547 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1548 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1549 hotplug_trigger, i, hpd[i]);
1554 if (!(hpd[i] & hotplug_trigger) ||
1555 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1558 if (!(dig_port_mask & hpd[i])) {
1559 dev_priv->hpd_event_bits |= (1 << i);
1563 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1564 dev_priv->hpd_stats[i].hpd_last_jiffies
1565 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1566 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1567 dev_priv->hpd_stats[i].hpd_cnt = 0;
1568 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1569 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1570 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1571 dev_priv->hpd_event_bits &= ~(1 << i);
1572 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1573 storm_detected = true;
1575 dev_priv->hpd_stats[i].hpd_cnt++;
1576 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1577 dev_priv->hpd_stats[i].hpd_cnt);
1582 dev_priv->display.hpd_irq_setup(dev);
1583 spin_unlock(&dev_priv->irq_lock);
1586 * Our hotplug handler can grab modeset locks (by calling down into the
1587 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1588 * queue for otherwise the flush_work in the pageflip code will
1592 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1594 schedule_work(&dev_priv->hotplug_work);
1597 static void gmbus_irq_handler(struct drm_device *dev)
1599 struct drm_i915_private *dev_priv = dev->dev_private;
1601 wake_up_all(&dev_priv->gmbus_wait_queue);
1604 static void dp_aux_irq_handler(struct drm_device *dev)
1606 struct drm_i915_private *dev_priv = dev->dev_private;
1608 wake_up_all(&dev_priv->gmbus_wait_queue);
1611 #if defined(CONFIG_DEBUG_FS)
1612 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1613 uint32_t crc0, uint32_t crc1,
1614 uint32_t crc2, uint32_t crc3,
1617 struct drm_i915_private *dev_priv = dev->dev_private;
1618 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1619 struct intel_pipe_crc_entry *entry;
1622 spin_lock(&pipe_crc->lock);
1624 if (!pipe_crc->entries) {
1625 spin_unlock(&pipe_crc->lock);
1626 DRM_ERROR("spurious interrupt\n");
1630 head = pipe_crc->head;
1631 tail = pipe_crc->tail;
1633 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1634 spin_unlock(&pipe_crc->lock);
1635 DRM_ERROR("CRC buffer overflowing\n");
1639 entry = &pipe_crc->entries[head];
1641 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1642 entry->crc[0] = crc0;
1643 entry->crc[1] = crc1;
1644 entry->crc[2] = crc2;
1645 entry->crc[3] = crc3;
1646 entry->crc[4] = crc4;
1648 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1649 pipe_crc->head = head;
1651 spin_unlock(&pipe_crc->lock);
1653 wake_up_interruptible(&pipe_crc->wq);
1657 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1658 uint32_t crc0, uint32_t crc1,
1659 uint32_t crc2, uint32_t crc3,
1664 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1666 struct drm_i915_private *dev_priv = dev->dev_private;
1668 display_pipe_crc_irq_handler(dev, pipe,
1669 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1673 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1675 struct drm_i915_private *dev_priv = dev->dev_private;
1677 display_pipe_crc_irq_handler(dev, pipe,
1678 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1679 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1680 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1681 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1682 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1685 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1687 struct drm_i915_private *dev_priv = dev->dev_private;
1688 uint32_t res1, res2;
1690 if (INTEL_INFO(dev)->gen >= 3)
1691 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1695 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1696 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1700 display_pipe_crc_irq_handler(dev, pipe,
1701 I915_READ(PIPE_CRC_RES_RED(pipe)),
1702 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1703 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1707 /* The RPS events need forcewake, so we add them to a work queue and mask their
1708 * IMR bits until the work is done. Other interrupts can be processed without
1709 * the work queue. */
1710 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1712 /* TODO: RPS on GEN9+ is not supported yet. */
1713 if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
1714 "GEN9+: unexpected RPS IRQ\n"))
1717 if (pm_iir & dev_priv->pm_rps_events) {
1718 spin_lock(&dev_priv->irq_lock);
1719 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1720 if (dev_priv->rps.interrupts_enabled) {
1721 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1722 queue_work(dev_priv->wq, &dev_priv->rps.work);
1724 spin_unlock(&dev_priv->irq_lock);
1727 if (INTEL_INFO(dev_priv)->gen >= 8)
1730 if (HAS_VEBOX(dev_priv->dev)) {
1731 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1732 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1734 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1735 i915_handle_error(dev_priv->dev, false,
1736 "VEBOX CS error interrupt 0x%08x",
1742 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1744 if (!drm_handle_vblank(dev, pipe))
1750 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1752 struct drm_i915_private *dev_priv = dev->dev_private;
1753 u32 pipe_stats[I915_MAX_PIPES] = { };
1756 spin_lock(&dev_priv->irq_lock);
1757 for_each_pipe(dev_priv, pipe) {
1759 u32 mask, iir_bit = 0;
1762 * PIPESTAT bits get signalled even when the interrupt is
1763 * disabled with the mask bits, and some of the status bits do
1764 * not generate interrupts at all (like the underrun bit). Hence
1765 * we need to be careful that we only handle what we want to
1769 /* fifo underruns are filterered in the underrun handler. */
1770 mask = PIPE_FIFO_UNDERRUN_STATUS;
1774 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1777 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1780 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1784 mask |= dev_priv->pipestat_irq_mask[pipe];
1789 reg = PIPESTAT(pipe);
1790 mask |= PIPESTAT_INT_ENABLE_MASK;
1791 pipe_stats[pipe] = I915_READ(reg) & mask;
1794 * Clear the PIPE*STAT regs before the IIR
1796 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1797 PIPESTAT_INT_STATUS_MASK))
1798 I915_WRITE(reg, pipe_stats[pipe]);
1800 spin_unlock(&dev_priv->irq_lock);
1802 for_each_pipe(dev_priv, pipe) {
1803 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1804 intel_pipe_handle_vblank(dev, pipe))
1805 intel_check_page_flip(dev, pipe);
1807 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1808 intel_prepare_page_flip(dev, pipe);
1809 intel_finish_page_flip(dev, pipe);
1812 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1813 i9xx_pipe_crc_irq_handler(dev, pipe);
1815 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1816 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1819 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1820 gmbus_irq_handler(dev);
1823 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1825 struct drm_i915_private *dev_priv = dev->dev_private;
1826 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1828 if (hotplug_status) {
1829 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1831 * Make sure hotplug status is cleared before we clear IIR, or else we
1832 * may miss hotplug events.
1834 POSTING_READ(PORT_HOTPLUG_STAT);
1837 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1839 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1841 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1843 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1846 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1847 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1848 dp_aux_irq_handler(dev);
1852 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1854 struct drm_device *dev = arg;
1855 struct drm_i915_private *dev_priv = dev->dev_private;
1856 u32 iir, gt_iir, pm_iir;
1857 irqreturn_t ret = IRQ_NONE;
1860 /* Find, clear, then process each source of interrupt */
1862 gt_iir = I915_READ(GTIIR);
1864 I915_WRITE(GTIIR, gt_iir);
1866 pm_iir = I915_READ(GEN6_PMIIR);
1868 I915_WRITE(GEN6_PMIIR, pm_iir);
1870 iir = I915_READ(VLV_IIR);
1872 /* Consume port before clearing IIR or we'll miss events */
1873 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1874 i9xx_hpd_irq_handler(dev);
1875 I915_WRITE(VLV_IIR, iir);
1878 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1884 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1886 gen6_rps_irq_handler(dev_priv, pm_iir);
1887 /* Call regardless, as some status bits might not be
1888 * signalled in iir */
1889 valleyview_pipestat_irq_handler(dev, iir);
1896 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1898 struct drm_device *dev = arg;
1899 struct drm_i915_private *dev_priv = dev->dev_private;
1900 u32 master_ctl, iir;
1901 irqreturn_t ret = IRQ_NONE;
1904 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1905 iir = I915_READ(VLV_IIR);
1907 if (master_ctl == 0 && iir == 0)
1912 I915_WRITE(GEN8_MASTER_IRQ, 0);
1914 /* Find, clear, then process each source of interrupt */
1917 /* Consume port before clearing IIR or we'll miss events */
1918 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1919 i9xx_hpd_irq_handler(dev);
1920 I915_WRITE(VLV_IIR, iir);
1923 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1925 /* Call regardless, as some status bits might not be
1926 * signalled in iir */
1927 valleyview_pipestat_irq_handler(dev, iir);
1929 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1930 POSTING_READ(GEN8_MASTER_IRQ);
1936 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1938 struct drm_i915_private *dev_priv = dev->dev_private;
1940 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1941 u32 dig_hotplug_reg;
1943 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1944 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1946 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1948 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1949 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1950 SDE_AUDIO_POWER_SHIFT);
1951 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1955 if (pch_iir & SDE_AUX_MASK)
1956 dp_aux_irq_handler(dev);
1958 if (pch_iir & SDE_GMBUS)
1959 gmbus_irq_handler(dev);
1961 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1962 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1964 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1965 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1967 if (pch_iir & SDE_POISON)
1968 DRM_ERROR("PCH poison interrupt\n");
1970 if (pch_iir & SDE_FDI_MASK)
1971 for_each_pipe(dev_priv, pipe)
1972 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1974 I915_READ(FDI_RX_IIR(pipe)));
1976 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1977 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1979 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1980 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1982 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1983 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1985 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1986 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1989 static void ivb_err_int_handler(struct drm_device *dev)
1991 struct drm_i915_private *dev_priv = dev->dev_private;
1992 u32 err_int = I915_READ(GEN7_ERR_INT);
1995 if (err_int & ERR_INT_POISON)
1996 DRM_ERROR("Poison interrupt\n");
1998 for_each_pipe(dev_priv, pipe) {
1999 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2000 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2002 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2003 if (IS_IVYBRIDGE(dev))
2004 ivb_pipe_crc_irq_handler(dev, pipe);
2006 hsw_pipe_crc_irq_handler(dev, pipe);
2010 I915_WRITE(GEN7_ERR_INT, err_int);
2013 static void cpt_serr_int_handler(struct drm_device *dev)
2015 struct drm_i915_private *dev_priv = dev->dev_private;
2016 u32 serr_int = I915_READ(SERR_INT);
2018 if (serr_int & SERR_INT_POISON)
2019 DRM_ERROR("PCH poison interrupt\n");
2021 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2022 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2024 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2025 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2027 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2028 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2030 I915_WRITE(SERR_INT, serr_int);
2033 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2035 struct drm_i915_private *dev_priv = dev->dev_private;
2037 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2038 u32 dig_hotplug_reg;
2040 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2041 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2043 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2045 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2046 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2047 SDE_AUDIO_POWER_SHIFT_CPT);
2048 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2052 if (pch_iir & SDE_AUX_MASK_CPT)
2053 dp_aux_irq_handler(dev);
2055 if (pch_iir & SDE_GMBUS_CPT)
2056 gmbus_irq_handler(dev);
2058 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2059 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2061 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2062 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2064 if (pch_iir & SDE_FDI_MASK_CPT)
2065 for_each_pipe(dev_priv, pipe)
2066 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2068 I915_READ(FDI_RX_IIR(pipe)));
2070 if (pch_iir & SDE_ERROR_CPT)
2071 cpt_serr_int_handler(dev);
2074 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2076 struct drm_i915_private *dev_priv = dev->dev_private;
2079 if (de_iir & DE_AUX_CHANNEL_A)
2080 dp_aux_irq_handler(dev);
2082 if (de_iir & DE_GSE)
2083 intel_opregion_asle_intr(dev);
2085 if (de_iir & DE_POISON)
2086 DRM_ERROR("Poison interrupt\n");
2088 for_each_pipe(dev_priv, pipe) {
2089 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2090 intel_pipe_handle_vblank(dev, pipe))
2091 intel_check_page_flip(dev, pipe);
2093 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2094 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2096 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2097 i9xx_pipe_crc_irq_handler(dev, pipe);
2099 /* plane/pipes map 1:1 on ilk+ */
2100 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2101 intel_prepare_page_flip(dev, pipe);
2102 intel_finish_page_flip_plane(dev, pipe);
2106 /* check event from PCH */
2107 if (de_iir & DE_PCH_EVENT) {
2108 u32 pch_iir = I915_READ(SDEIIR);
2110 if (HAS_PCH_CPT(dev))
2111 cpt_irq_handler(dev, pch_iir);
2113 ibx_irq_handler(dev, pch_iir);
2115 /* should clear PCH hotplug event before clear CPU irq */
2116 I915_WRITE(SDEIIR, pch_iir);
2119 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2120 ironlake_rps_change_irq_handler(dev);
2123 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2125 struct drm_i915_private *dev_priv = dev->dev_private;
2128 if (de_iir & DE_ERR_INT_IVB)
2129 ivb_err_int_handler(dev);
2131 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2132 dp_aux_irq_handler(dev);
2134 if (de_iir & DE_GSE_IVB)
2135 intel_opregion_asle_intr(dev);
2137 for_each_pipe(dev_priv, pipe) {
2138 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2139 intel_pipe_handle_vblank(dev, pipe))
2140 intel_check_page_flip(dev, pipe);
2142 /* plane/pipes map 1:1 on ilk+ */
2143 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2144 intel_prepare_page_flip(dev, pipe);
2145 intel_finish_page_flip_plane(dev, pipe);
2149 /* check event from PCH */
2150 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2151 u32 pch_iir = I915_READ(SDEIIR);
2153 cpt_irq_handler(dev, pch_iir);
2155 /* clear PCH hotplug event before clear CPU irq */
2156 I915_WRITE(SDEIIR, pch_iir);
2161 * To handle irqs with the minimum potential races with fresh interrupts, we:
2162 * 1 - Disable Master Interrupt Control.
2163 * 2 - Find the source(s) of the interrupt.
2164 * 3 - Clear the Interrupt Identity bits (IIR).
2165 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2166 * 5 - Re-enable Master Interrupt Control.
2168 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2170 struct drm_device *dev = arg;
2171 struct drm_i915_private *dev_priv = dev->dev_private;
2172 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2173 irqreturn_t ret = IRQ_NONE;
2175 /* We get interrupts on unclaimed registers, so check for this before we
2176 * do any I915_{READ,WRITE}. */
2177 intel_uncore_check_errors(dev);
2179 /* disable master interrupt before clearing iir */
2180 de_ier = I915_READ(DEIER);
2181 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2182 POSTING_READ(DEIER);
2184 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2185 * interrupts will will be stored on its back queue, and then we'll be
2186 * able to process them after we restore SDEIER (as soon as we restore
2187 * it, we'll get an interrupt if SDEIIR still has something to process
2188 * due to its back queue). */
2189 if (!HAS_PCH_NOP(dev)) {
2190 sde_ier = I915_READ(SDEIER);
2191 I915_WRITE(SDEIER, 0);
2192 POSTING_READ(SDEIER);
2195 /* Find, clear, then process each source of interrupt */
2197 gt_iir = I915_READ(GTIIR);
2199 I915_WRITE(GTIIR, gt_iir);
2201 if (INTEL_INFO(dev)->gen >= 6)
2202 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2204 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2207 de_iir = I915_READ(DEIIR);
2209 I915_WRITE(DEIIR, de_iir);
2211 if (INTEL_INFO(dev)->gen >= 7)
2212 ivb_display_irq_handler(dev, de_iir);
2214 ilk_display_irq_handler(dev, de_iir);
2217 if (INTEL_INFO(dev)->gen >= 6) {
2218 u32 pm_iir = I915_READ(GEN6_PMIIR);
2220 I915_WRITE(GEN6_PMIIR, pm_iir);
2222 gen6_rps_irq_handler(dev_priv, pm_iir);
2226 I915_WRITE(DEIER, de_ier);
2227 POSTING_READ(DEIER);
2228 if (!HAS_PCH_NOP(dev)) {
2229 I915_WRITE(SDEIER, sde_ier);
2230 POSTING_READ(SDEIER);
2236 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2238 struct drm_device *dev = arg;
2239 struct drm_i915_private *dev_priv = dev->dev_private;
2241 irqreturn_t ret = IRQ_NONE;
2244 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2247 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2250 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2251 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2255 I915_WRITE(GEN8_MASTER_IRQ, 0);
2256 POSTING_READ(GEN8_MASTER_IRQ);
2258 /* Find, clear, then process each source of interrupt */
2260 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2262 if (master_ctl & GEN8_DE_MISC_IRQ) {
2263 tmp = I915_READ(GEN8_DE_MISC_IIR);
2265 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2267 if (tmp & GEN8_DE_MISC_GSE)
2268 intel_opregion_asle_intr(dev);
2270 DRM_ERROR("Unexpected DE Misc interrupt\n");
2273 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2276 if (master_ctl & GEN8_DE_PORT_IRQ) {
2277 tmp = I915_READ(GEN8_DE_PORT_IIR);
2279 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2283 dp_aux_irq_handler(dev);
2285 DRM_ERROR("Unexpected DE Port interrupt\n");
2288 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2291 for_each_pipe(dev_priv, pipe) {
2292 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2294 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2297 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2300 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2302 if (pipe_iir & GEN8_PIPE_VBLANK &&
2303 intel_pipe_handle_vblank(dev, pipe))
2304 intel_check_page_flip(dev, pipe);
2307 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2309 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2312 intel_prepare_page_flip(dev, pipe);
2313 intel_finish_page_flip_plane(dev, pipe);
2316 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2317 hsw_pipe_crc_irq_handler(dev, pipe);
2319 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2320 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2325 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2327 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2330 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2332 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2334 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2337 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2339 * FIXME(BDW): Assume for now that the new interrupt handling
2340 * scheme also closed the SDE interrupt handling race we've seen
2341 * on older pch-split platforms. But this needs testing.
2343 u32 pch_iir = I915_READ(SDEIIR);
2345 I915_WRITE(SDEIIR, pch_iir);
2347 cpt_irq_handler(dev, pch_iir);
2349 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2353 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2354 POSTING_READ(GEN8_MASTER_IRQ);
2359 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2360 bool reset_completed)
2362 struct intel_engine_cs *ring;
2366 * Notify all waiters for GPU completion events that reset state has
2367 * been changed, and that they need to restart their wait after
2368 * checking for potential errors (and bail out to drop locks if there is
2369 * a gpu reset pending so that i915_error_work_func can acquire them).
2372 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2373 for_each_ring(ring, dev_priv, i)
2374 wake_up_all(&ring->irq_queue);
2376 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2377 wake_up_all(&dev_priv->pending_flip_queue);
2380 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2381 * reset state is cleared.
2383 if (reset_completed)
2384 wake_up_all(&dev_priv->gpu_error.reset_queue);
2388 * i915_error_work_func - do process context error handling work
2389 * @work: work struct
2391 * Fire an error uevent so userspace can see that a hang or error
2394 static void i915_error_work_func(struct work_struct *work)
2396 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2398 struct drm_i915_private *dev_priv =
2399 container_of(error, struct drm_i915_private, gpu_error);
2400 struct drm_device *dev = dev_priv->dev;
2401 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2402 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2403 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2406 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2409 * Note that there's only one work item which does gpu resets, so we
2410 * need not worry about concurrent gpu resets potentially incrementing
2411 * error->reset_counter twice. We only need to take care of another
2412 * racing irq/hangcheck declaring the gpu dead for a second time. A
2413 * quick check for that is good enough: schedule_work ensures the
2414 * correct ordering between hang detection and this work item, and since
2415 * the reset in-progress bit is only ever set by code outside of this
2416 * work we don't need to worry about any other races.
2418 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2419 DRM_DEBUG_DRIVER("resetting chip\n");
2420 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2424 * In most cases it's guaranteed that we get here with an RPM
2425 * reference held, for example because there is a pending GPU
2426 * request that won't finish until the reset is done. This
2427 * isn't the case at least when we get here by doing a
2428 * simulated reset via debugs, so get an RPM reference.
2430 intel_runtime_pm_get(dev_priv);
2432 * All state reset _must_ be completed before we update the
2433 * reset counter, for otherwise waiters might miss the reset
2434 * pending state and not properly drop locks, resulting in
2435 * deadlocks with the reset work.
2437 ret = i915_reset(dev);
2439 intel_display_handle_reset(dev);
2441 intel_runtime_pm_put(dev_priv);
2445 * After all the gem state is reset, increment the reset
2446 * counter and wake up everyone waiting for the reset to
2449 * Since unlock operations are a one-sided barrier only,
2450 * we need to insert a barrier here to order any seqno
2452 * the counter increment.
2454 smp_mb__before_atomic();
2455 atomic_inc(&dev_priv->gpu_error.reset_counter);
2457 kobject_uevent_env(&dev->primary->kdev->kobj,
2458 KOBJ_CHANGE, reset_done_event);
2460 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2464 * Note: The wake_up also serves as a memory barrier so that
2465 * waiters see the update value of the reset counter atomic_t.
2467 i915_error_wake_up(dev_priv, true);
2471 static void i915_report_and_clear_eir(struct drm_device *dev)
2473 struct drm_i915_private *dev_priv = dev->dev_private;
2474 uint32_t instdone[I915_NUM_INSTDONE_REG];
2475 u32 eir = I915_READ(EIR);
2481 pr_err("render error detected, EIR: 0x%08x\n", eir);
2483 i915_get_extra_instdone(dev, instdone);
2486 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2487 u32 ipeir = I915_READ(IPEIR_I965);
2489 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2490 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2491 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2492 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2493 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2494 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2495 I915_WRITE(IPEIR_I965, ipeir);
2496 POSTING_READ(IPEIR_I965);
2498 if (eir & GM45_ERROR_PAGE_TABLE) {
2499 u32 pgtbl_err = I915_READ(PGTBL_ER);
2500 pr_err("page table error\n");
2501 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2502 I915_WRITE(PGTBL_ER, pgtbl_err);
2503 POSTING_READ(PGTBL_ER);
2507 if (!IS_GEN2(dev)) {
2508 if (eir & I915_ERROR_PAGE_TABLE) {
2509 u32 pgtbl_err = I915_READ(PGTBL_ER);
2510 pr_err("page table error\n");
2511 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2512 I915_WRITE(PGTBL_ER, pgtbl_err);
2513 POSTING_READ(PGTBL_ER);
2517 if (eir & I915_ERROR_MEMORY_REFRESH) {
2518 pr_err("memory refresh error:\n");
2519 for_each_pipe(dev_priv, pipe)
2520 pr_err("pipe %c stat: 0x%08x\n",
2521 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2522 /* pipestat has already been acked */
2524 if (eir & I915_ERROR_INSTRUCTION) {
2525 pr_err("instruction error\n");
2526 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2527 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2528 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2529 if (INTEL_INFO(dev)->gen < 4) {
2530 u32 ipeir = I915_READ(IPEIR);
2532 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2533 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2534 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2535 I915_WRITE(IPEIR, ipeir);
2536 POSTING_READ(IPEIR);
2538 u32 ipeir = I915_READ(IPEIR_I965);
2540 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2541 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2542 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2543 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2544 I915_WRITE(IPEIR_I965, ipeir);
2545 POSTING_READ(IPEIR_I965);
2549 I915_WRITE(EIR, eir);
2551 eir = I915_READ(EIR);
2554 * some errors might have become stuck,
2557 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2558 I915_WRITE(EMR, I915_READ(EMR) | eir);
2559 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2564 * i915_handle_error - handle an error interrupt
2567 * Do some basic checking of regsiter state at error interrupt time and
2568 * dump it to the syslog. Also call i915_capture_error_state() to make
2569 * sure we get a record and make it available in debugfs. Fire a uevent
2570 * so userspace knows something bad happened (should trigger collection
2571 * of a ring dump etc.).
2573 void i915_handle_error(struct drm_device *dev, bool wedged,
2574 const char *fmt, ...)
2576 struct drm_i915_private *dev_priv = dev->dev_private;
2580 va_start(args, fmt);
2581 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2584 i915_capture_error_state(dev, wedged, error_msg);
2585 i915_report_and_clear_eir(dev);
2588 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2589 &dev_priv->gpu_error.reset_counter);
2592 * Wakeup waiting processes so that the reset work function
2593 * i915_error_work_func doesn't deadlock trying to grab various
2594 * locks. By bumping the reset counter first, the woken
2595 * processes will see a reset in progress and back off,
2596 * releasing their locks and then wait for the reset completion.
2597 * We must do this for _all_ gpu waiters that might hold locks
2598 * that the reset work needs to acquire.
2600 * Note: The wake_up serves as the required memory barrier to
2601 * ensure that the waiters see the updated value of the reset
2604 i915_error_wake_up(dev_priv, false);
2608 * Our reset work can grab modeset locks (since it needs to reset the
2609 * state of outstanding pagelips). Hence it must not be run on our own
2610 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2611 * code will deadlock.
2613 schedule_work(&dev_priv->gpu_error.work);
2616 /* Called from drm generic code, passed 'crtc' which
2617 * we use as a pipe index
2619 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2621 struct drm_i915_private *dev_priv = dev->dev_private;
2622 unsigned long irqflags;
2624 if (!i915_pipe_enabled(dev, pipe))
2627 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2628 if (INTEL_INFO(dev)->gen >= 4)
2629 i915_enable_pipestat(dev_priv, pipe,
2630 PIPE_START_VBLANK_INTERRUPT_STATUS);
2632 i915_enable_pipestat(dev_priv, pipe,
2633 PIPE_VBLANK_INTERRUPT_STATUS);
2634 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2639 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2641 struct drm_i915_private *dev_priv = dev->dev_private;
2642 unsigned long irqflags;
2643 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2644 DE_PIPE_VBLANK(pipe);
2646 if (!i915_pipe_enabled(dev, pipe))
2649 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2650 ironlake_enable_display_irq(dev_priv, bit);
2651 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2656 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2658 struct drm_i915_private *dev_priv = dev->dev_private;
2659 unsigned long irqflags;
2661 if (!i915_pipe_enabled(dev, pipe))
2664 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2665 i915_enable_pipestat(dev_priv, pipe,
2666 PIPE_START_VBLANK_INTERRUPT_STATUS);
2667 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2672 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2674 struct drm_i915_private *dev_priv = dev->dev_private;
2675 unsigned long irqflags;
2677 if (!i915_pipe_enabled(dev, pipe))
2680 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2681 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2682 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2683 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2684 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2688 /* Called from drm generic code, passed 'crtc' which
2689 * we use as a pipe index
2691 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2693 struct drm_i915_private *dev_priv = dev->dev_private;
2694 unsigned long irqflags;
2696 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2697 i915_disable_pipestat(dev_priv, pipe,
2698 PIPE_VBLANK_INTERRUPT_STATUS |
2699 PIPE_START_VBLANK_INTERRUPT_STATUS);
2700 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2703 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2705 struct drm_i915_private *dev_priv = dev->dev_private;
2706 unsigned long irqflags;
2707 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2708 DE_PIPE_VBLANK(pipe);
2710 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2711 ironlake_disable_display_irq(dev_priv, bit);
2712 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2715 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2717 struct drm_i915_private *dev_priv = dev->dev_private;
2718 unsigned long irqflags;
2720 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2721 i915_disable_pipestat(dev_priv, pipe,
2722 PIPE_START_VBLANK_INTERRUPT_STATUS);
2723 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2726 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2728 struct drm_i915_private *dev_priv = dev->dev_private;
2729 unsigned long irqflags;
2731 if (!i915_pipe_enabled(dev, pipe))
2734 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2735 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2736 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2737 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2738 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2742 ring_last_seqno(struct intel_engine_cs *ring)
2744 return list_entry(ring->request_list.prev,
2745 struct drm_i915_gem_request, list)->seqno;
2749 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2751 return (list_empty(&ring->request_list) ||
2752 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2756 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2758 if (INTEL_INFO(dev)->gen >= 8) {
2759 return (ipehr >> 23) == 0x1c;
2761 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2762 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2763 MI_SEMAPHORE_REGISTER);
2767 static struct intel_engine_cs *
2768 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2770 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2771 struct intel_engine_cs *signaller;
2774 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2775 for_each_ring(signaller, dev_priv, i) {
2776 if (ring == signaller)
2779 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2783 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2785 for_each_ring(signaller, dev_priv, i) {
2786 if(ring == signaller)
2789 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2794 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2795 ring->id, ipehr, offset);
2800 static struct intel_engine_cs *
2801 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2803 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2804 u32 cmd, ipehr, head;
2808 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2809 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2813 * HEAD is likely pointing to the dword after the actual command,
2814 * so scan backwards until we find the MBOX. But limit it to just 3
2815 * or 4 dwords depending on the semaphore wait command size.
2816 * Note that we don't care about ACTHD here since that might
2817 * point at at batch, and semaphores are always emitted into the
2818 * ringbuffer itself.
2820 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2821 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2823 for (i = backwards; i; --i) {
2825 * Be paranoid and presume the hw has gone off into the wild -
2826 * our ring is smaller than what the hardware (and hence
2827 * HEAD_ADDR) allows. Also handles wrap-around.
2829 head &= ring->buffer->size - 1;
2831 /* This here seems to blow up */
2832 cmd = ioread32(ring->buffer->virtual_start + head);
2842 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2843 if (INTEL_INFO(ring->dev)->gen >= 8) {
2844 offset = ioread32(ring->buffer->virtual_start + head + 12);
2846 offset = ioread32(ring->buffer->virtual_start + head + 8);
2848 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2851 static int semaphore_passed(struct intel_engine_cs *ring)
2853 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2854 struct intel_engine_cs *signaller;
2857 ring->hangcheck.deadlock++;
2859 signaller = semaphore_waits_for(ring, &seqno);
2860 if (signaller == NULL)
2863 /* Prevent pathological recursion due to driver bugs */
2864 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2867 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2870 /* cursory check for an unkickable deadlock */
2871 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2872 semaphore_passed(signaller) < 0)
2878 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2880 struct intel_engine_cs *ring;
2883 for_each_ring(ring, dev_priv, i)
2884 ring->hangcheck.deadlock = 0;
2887 static enum intel_ring_hangcheck_action
2888 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2890 struct drm_device *dev = ring->dev;
2891 struct drm_i915_private *dev_priv = dev->dev_private;
2894 if (acthd != ring->hangcheck.acthd) {
2895 if (acthd > ring->hangcheck.max_acthd) {
2896 ring->hangcheck.max_acthd = acthd;
2897 return HANGCHECK_ACTIVE;
2900 return HANGCHECK_ACTIVE_LOOP;
2904 return HANGCHECK_HUNG;
2906 /* Is the chip hanging on a WAIT_FOR_EVENT?
2907 * If so we can simply poke the RB_WAIT bit
2908 * and break the hang. This should work on
2909 * all but the second generation chipsets.
2911 tmp = I915_READ_CTL(ring);
2912 if (tmp & RING_WAIT) {
2913 i915_handle_error(dev, false,
2914 "Kicking stuck wait on %s",
2916 I915_WRITE_CTL(ring, tmp);
2917 return HANGCHECK_KICK;
2920 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2921 switch (semaphore_passed(ring)) {
2923 return HANGCHECK_HUNG;
2925 i915_handle_error(dev, false,
2926 "Kicking stuck semaphore on %s",
2928 I915_WRITE_CTL(ring, tmp);
2929 return HANGCHECK_KICK;
2931 return HANGCHECK_WAIT;
2935 return HANGCHECK_HUNG;
2939 * This is called when the chip hasn't reported back with completed
2940 * batchbuffers in a long time. We keep track per ring seqno progress and
2941 * if there are no progress, hangcheck score for that ring is increased.
2942 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2943 * we kick the ring. If we see no progress on three subsequent calls
2944 * we assume chip is wedged and try to fix it by resetting the chip.
2946 static void i915_hangcheck_elapsed(unsigned long data)
2948 struct drm_device *dev = (struct drm_device *)data;
2949 struct drm_i915_private *dev_priv = dev->dev_private;
2950 struct intel_engine_cs *ring;
2952 int busy_count = 0, rings_hung = 0;
2953 bool stuck[I915_NUM_RINGS] = { 0 };
2958 if (!i915.enable_hangcheck)
2961 for_each_ring(ring, dev_priv, i) {
2966 semaphore_clear_deadlocks(dev_priv);
2968 seqno = ring->get_seqno(ring, false);
2969 acthd = intel_ring_get_active_head(ring);
2971 if (ring->hangcheck.seqno == seqno) {
2972 if (ring_idle(ring, seqno)) {
2973 ring->hangcheck.action = HANGCHECK_IDLE;
2975 if (waitqueue_active(&ring->irq_queue)) {
2976 /* Issue a wake-up to catch stuck h/w. */
2977 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2978 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2979 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2982 DRM_INFO("Fake missed irq on %s\n",
2984 wake_up_all(&ring->irq_queue);
2986 /* Safeguard against driver failure */
2987 ring->hangcheck.score += BUSY;
2991 /* We always increment the hangcheck score
2992 * if the ring is busy and still processing
2993 * the same request, so that no single request
2994 * can run indefinitely (such as a chain of
2995 * batches). The only time we do not increment
2996 * the hangcheck score on this ring, if this
2997 * ring is in a legitimate wait for another
2998 * ring. In that case the waiting ring is a
2999 * victim and we want to be sure we catch the
3000 * right culprit. Then every time we do kick
3001 * the ring, add a small increment to the
3002 * score so that we can catch a batch that is
3003 * being repeatedly kicked and so responsible
3004 * for stalling the machine.
3006 ring->hangcheck.action = ring_stuck(ring,
3009 switch (ring->hangcheck.action) {
3010 case HANGCHECK_IDLE:
3011 case HANGCHECK_WAIT:
3012 case HANGCHECK_ACTIVE:
3014 case HANGCHECK_ACTIVE_LOOP:
3015 ring->hangcheck.score += BUSY;
3017 case HANGCHECK_KICK:
3018 ring->hangcheck.score += KICK;
3020 case HANGCHECK_HUNG:
3021 ring->hangcheck.score += HUNG;
3027 ring->hangcheck.action = HANGCHECK_ACTIVE;
3029 /* Gradually reduce the count so that we catch DoS
3030 * attempts across multiple batches.
3032 if (ring->hangcheck.score > 0)
3033 ring->hangcheck.score--;
3035 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3038 ring->hangcheck.seqno = seqno;
3039 ring->hangcheck.acthd = acthd;
3043 for_each_ring(ring, dev_priv, i) {
3044 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3045 DRM_INFO("%s on %s\n",
3046 stuck[i] ? "stuck" : "no progress",
3053 return i915_handle_error(dev, true, "Ring hung");
3056 /* Reset timer case chip hangs without another request
3058 i915_queue_hangcheck(dev);
3061 void i915_queue_hangcheck(struct drm_device *dev)
3063 struct drm_i915_private *dev_priv = dev->dev_private;
3064 struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
3066 if (!i915.enable_hangcheck)
3069 /* Don't continually defer the hangcheck, but make sure it is active */
3070 if (!timer_pending(timer))
3071 timer->expires = round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES);
3072 mod_timer(timer, timer->expires);
3075 static void ibx_irq_reset(struct drm_device *dev)
3077 struct drm_i915_private *dev_priv = dev->dev_private;
3079 if (HAS_PCH_NOP(dev))
3082 GEN5_IRQ_RESET(SDE);
3084 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3085 I915_WRITE(SERR_INT, 0xffffffff);
3089 * SDEIER is also touched by the interrupt handler to work around missed PCH
3090 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3091 * instead we unconditionally enable all PCH interrupt sources here, but then
3092 * only unmask them as needed with SDEIMR.
3094 * This function needs to be called before interrupts are enabled.
3096 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3098 struct drm_i915_private *dev_priv = dev->dev_private;
3100 if (HAS_PCH_NOP(dev))
3103 WARN_ON(I915_READ(SDEIER) != 0);
3104 I915_WRITE(SDEIER, 0xffffffff);
3105 POSTING_READ(SDEIER);
3108 static void gen5_gt_irq_reset(struct drm_device *dev)
3110 struct drm_i915_private *dev_priv = dev->dev_private;
3113 if (INTEL_INFO(dev)->gen >= 6)
3114 GEN5_IRQ_RESET(GEN6_PM);
3119 static void ironlake_irq_reset(struct drm_device *dev)
3121 struct drm_i915_private *dev_priv = dev->dev_private;
3123 I915_WRITE(HWSTAM, 0xffffffff);
3127 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3129 gen5_gt_irq_reset(dev);
3134 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3138 I915_WRITE(PORT_HOTPLUG_EN, 0);
3139 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3141 for_each_pipe(dev_priv, pipe)
3142 I915_WRITE(PIPESTAT(pipe), 0xffff);
3144 GEN5_IRQ_RESET(VLV_);
3147 static void valleyview_irq_preinstall(struct drm_device *dev)
3149 struct drm_i915_private *dev_priv = dev->dev_private;
3152 I915_WRITE(VLV_IMR, 0);
3153 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3154 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3155 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3157 gen5_gt_irq_reset(dev);
3159 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3161 vlv_display_irq_reset(dev_priv);
3164 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3166 GEN8_IRQ_RESET_NDX(GT, 0);
3167 GEN8_IRQ_RESET_NDX(GT, 1);
3168 GEN8_IRQ_RESET_NDX(GT, 2);
3169 GEN8_IRQ_RESET_NDX(GT, 3);
3172 static void gen8_irq_reset(struct drm_device *dev)
3174 struct drm_i915_private *dev_priv = dev->dev_private;
3177 I915_WRITE(GEN8_MASTER_IRQ, 0);
3178 POSTING_READ(GEN8_MASTER_IRQ);
3180 gen8_gt_irq_reset(dev_priv);
3182 for_each_pipe(dev_priv, pipe)
3183 if (intel_display_power_is_enabled(dev_priv,
3184 POWER_DOMAIN_PIPE(pipe)))
3185 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3187 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3188 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3189 GEN5_IRQ_RESET(GEN8_PCU_);
3194 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3196 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3198 spin_lock_irq(&dev_priv->irq_lock);
3199 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3200 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3201 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3202 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3203 spin_unlock_irq(&dev_priv->irq_lock);
3206 static void cherryview_irq_preinstall(struct drm_device *dev)
3208 struct drm_i915_private *dev_priv = dev->dev_private;
3210 I915_WRITE(GEN8_MASTER_IRQ, 0);
3211 POSTING_READ(GEN8_MASTER_IRQ);
3213 gen8_gt_irq_reset(dev_priv);
3215 GEN5_IRQ_RESET(GEN8_PCU_);
3217 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3219 vlv_display_irq_reset(dev_priv);
3222 static void ibx_hpd_irq_setup(struct drm_device *dev)
3224 struct drm_i915_private *dev_priv = dev->dev_private;
3225 struct intel_encoder *intel_encoder;
3226 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3228 if (HAS_PCH_IBX(dev)) {
3229 hotplug_irqs = SDE_HOTPLUG_MASK;
3230 for_each_intel_encoder(dev, intel_encoder)
3231 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3232 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3234 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3235 for_each_intel_encoder(dev, intel_encoder)
3236 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3237 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3240 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3243 * Enable digital hotplug on the PCH, and configure the DP short pulse
3244 * duration to 2ms (which is the minimum in the Display Port spec)
3246 * This register is the same on all known PCH chips.
3248 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3249 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3250 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3251 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3252 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3253 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3256 static void ibx_irq_postinstall(struct drm_device *dev)
3258 struct drm_i915_private *dev_priv = dev->dev_private;
3261 if (HAS_PCH_NOP(dev))
3264 if (HAS_PCH_IBX(dev))
3265 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3267 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3269 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3270 I915_WRITE(SDEIMR, ~mask);
3273 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3275 struct drm_i915_private *dev_priv = dev->dev_private;
3276 u32 pm_irqs, gt_irqs;
3278 pm_irqs = gt_irqs = 0;
3280 dev_priv->gt_irq_mask = ~0;
3281 if (HAS_L3_DPF(dev)) {
3282 /* L3 parity interrupt is always unmasked. */
3283 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3284 gt_irqs |= GT_PARITY_ERROR(dev);
3287 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3289 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3290 ILK_BSD_USER_INTERRUPT;
3292 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3295 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3297 if (INTEL_INFO(dev)->gen >= 6) {
3298 pm_irqs |= dev_priv->pm_rps_events;
3301 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3303 dev_priv->pm_irq_mask = 0xffffffff;
3304 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3308 static int ironlake_irq_postinstall(struct drm_device *dev)
3310 struct drm_i915_private *dev_priv = dev->dev_private;
3311 u32 display_mask, extra_mask;
3313 if (INTEL_INFO(dev)->gen >= 7) {
3314 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3315 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3316 DE_PLANEB_FLIP_DONE_IVB |
3317 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3318 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3319 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3321 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3322 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3324 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3326 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3327 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3330 dev_priv->irq_mask = ~display_mask;
3332 I915_WRITE(HWSTAM, 0xeffe);
3334 ibx_irq_pre_postinstall(dev);
3336 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3338 gen5_gt_irq_postinstall(dev);
3340 ibx_irq_postinstall(dev);
3342 if (IS_IRONLAKE_M(dev)) {
3343 /* Enable PCU event interrupts
3345 * spinlocking not required here for correctness since interrupt
3346 * setup is guaranteed to run in single-threaded context. But we
3347 * need it to make the assert_spin_locked happy. */
3348 spin_lock_irq(&dev_priv->irq_lock);
3349 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3350 spin_unlock_irq(&dev_priv->irq_lock);
3356 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3362 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3363 PIPE_FIFO_UNDERRUN_STATUS;
3365 for_each_pipe(dev_priv, pipe)
3366 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3367 POSTING_READ(PIPESTAT(PIPE_A));
3369 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3370 PIPE_CRC_DONE_INTERRUPT_STATUS;
3372 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3373 for_each_pipe(dev_priv, pipe)
3374 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3376 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3377 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3378 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3379 if (IS_CHERRYVIEW(dev_priv))
3380 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3381 dev_priv->irq_mask &= ~iir_mask;
3383 I915_WRITE(VLV_IIR, iir_mask);
3384 I915_WRITE(VLV_IIR, iir_mask);
3385 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3386 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3387 POSTING_READ(VLV_IMR);
3390 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3396 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3397 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3398 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3399 if (IS_CHERRYVIEW(dev_priv))
3400 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3402 dev_priv->irq_mask |= iir_mask;
3403 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3404 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3405 I915_WRITE(VLV_IIR, iir_mask);
3406 I915_WRITE(VLV_IIR, iir_mask);
3407 POSTING_READ(VLV_IIR);
3409 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3410 PIPE_CRC_DONE_INTERRUPT_STATUS;
3412 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3413 for_each_pipe(dev_priv, pipe)
3414 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3416 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3417 PIPE_FIFO_UNDERRUN_STATUS;
3419 for_each_pipe(dev_priv, pipe)
3420 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3421 POSTING_READ(PIPESTAT(PIPE_A));
3424 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3426 assert_spin_locked(&dev_priv->irq_lock);
3428 if (dev_priv->display_irqs_enabled)
3431 dev_priv->display_irqs_enabled = true;
3433 if (intel_irqs_enabled(dev_priv))
3434 valleyview_display_irqs_install(dev_priv);
3437 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3439 assert_spin_locked(&dev_priv->irq_lock);
3441 if (!dev_priv->display_irqs_enabled)
3444 dev_priv->display_irqs_enabled = false;
3446 if (intel_irqs_enabled(dev_priv))
3447 valleyview_display_irqs_uninstall(dev_priv);
3450 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3452 dev_priv->irq_mask = ~0;
3454 I915_WRITE(PORT_HOTPLUG_EN, 0);
3455 POSTING_READ(PORT_HOTPLUG_EN);
3457 I915_WRITE(VLV_IIR, 0xffffffff);
3458 I915_WRITE(VLV_IIR, 0xffffffff);
3459 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3460 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3461 POSTING_READ(VLV_IMR);
3463 /* Interrupt setup is already guaranteed to be single-threaded, this is
3464 * just to make the assert_spin_locked check happy. */
3465 spin_lock_irq(&dev_priv->irq_lock);
3466 if (dev_priv->display_irqs_enabled)
3467 valleyview_display_irqs_install(dev_priv);
3468 spin_unlock_irq(&dev_priv->irq_lock);
3471 static int valleyview_irq_postinstall(struct drm_device *dev)
3473 struct drm_i915_private *dev_priv = dev->dev_private;
3475 vlv_display_irq_postinstall(dev_priv);
3477 gen5_gt_irq_postinstall(dev);
3479 /* ack & enable invalid PTE error interrupts */
3480 #if 0 /* FIXME: add support to irq handler for checking these bits */
3481 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3482 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3485 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3490 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3492 /* These are interrupts we'll toggle with the ring mask register */
3493 uint32_t gt_interrupts[] = {
3494 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3495 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3496 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3497 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3498 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3499 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3500 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3501 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3502 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3504 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3505 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3508 dev_priv->pm_irq_mask = 0xffffffff;
3509 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3510 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3511 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
3512 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3515 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3517 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3518 uint32_t de_pipe_enables;
3520 u32 aux_en = GEN8_AUX_CHANNEL_A;
3522 if (IS_GEN9(dev_priv)) {
3523 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3524 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3525 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3528 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3529 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3531 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3532 GEN8_PIPE_FIFO_UNDERRUN;
3534 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3535 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3536 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3538 for_each_pipe(dev_priv, pipe)
3539 if (intel_display_power_is_enabled(dev_priv,
3540 POWER_DOMAIN_PIPE(pipe)))
3541 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3542 dev_priv->de_irq_mask[pipe],
3545 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
3548 static int gen8_irq_postinstall(struct drm_device *dev)
3550 struct drm_i915_private *dev_priv = dev->dev_private;
3552 ibx_irq_pre_postinstall(dev);
3554 gen8_gt_irq_postinstall(dev_priv);
3555 gen8_de_irq_postinstall(dev_priv);
3557 ibx_irq_postinstall(dev);
3559 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3560 POSTING_READ(GEN8_MASTER_IRQ);
3565 static int cherryview_irq_postinstall(struct drm_device *dev)
3567 struct drm_i915_private *dev_priv = dev->dev_private;
3569 vlv_display_irq_postinstall(dev_priv);
3571 gen8_gt_irq_postinstall(dev_priv);
3573 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3574 POSTING_READ(GEN8_MASTER_IRQ);
3579 static void gen8_irq_uninstall(struct drm_device *dev)
3581 struct drm_i915_private *dev_priv = dev->dev_private;
3586 gen8_irq_reset(dev);
3589 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3591 /* Interrupt setup is already guaranteed to be single-threaded, this is
3592 * just to make the assert_spin_locked check happy. */
3593 spin_lock_irq(&dev_priv->irq_lock);
3594 if (dev_priv->display_irqs_enabled)
3595 valleyview_display_irqs_uninstall(dev_priv);
3596 spin_unlock_irq(&dev_priv->irq_lock);
3598 vlv_display_irq_reset(dev_priv);
3600 dev_priv->irq_mask = 0;
3603 static void valleyview_irq_uninstall(struct drm_device *dev)
3605 struct drm_i915_private *dev_priv = dev->dev_private;
3610 I915_WRITE(VLV_MASTER_IER, 0);
3612 gen5_gt_irq_reset(dev);
3614 I915_WRITE(HWSTAM, 0xffffffff);
3616 vlv_display_irq_uninstall(dev_priv);
3619 static void cherryview_irq_uninstall(struct drm_device *dev)
3621 struct drm_i915_private *dev_priv = dev->dev_private;
3626 I915_WRITE(GEN8_MASTER_IRQ, 0);
3627 POSTING_READ(GEN8_MASTER_IRQ);
3629 gen8_gt_irq_reset(dev_priv);
3631 GEN5_IRQ_RESET(GEN8_PCU_);
3633 vlv_display_irq_uninstall(dev_priv);
3636 static void ironlake_irq_uninstall(struct drm_device *dev)
3638 struct drm_i915_private *dev_priv = dev->dev_private;
3643 ironlake_irq_reset(dev);
3646 static void i8xx_irq_preinstall(struct drm_device * dev)
3648 struct drm_i915_private *dev_priv = dev->dev_private;
3651 for_each_pipe(dev_priv, pipe)
3652 I915_WRITE(PIPESTAT(pipe), 0);
3653 I915_WRITE16(IMR, 0xffff);
3654 I915_WRITE16(IER, 0x0);
3655 POSTING_READ16(IER);
3658 static int i8xx_irq_postinstall(struct drm_device *dev)
3660 struct drm_i915_private *dev_priv = dev->dev_private;
3663 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3665 /* Unmask the interrupts that we always want on. */
3666 dev_priv->irq_mask =
3667 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3668 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3669 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3670 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3671 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3672 I915_WRITE16(IMR, dev_priv->irq_mask);
3675 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3676 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3677 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3678 I915_USER_INTERRUPT);
3679 POSTING_READ16(IER);
3681 /* Interrupt setup is already guaranteed to be single-threaded, this is
3682 * just to make the assert_spin_locked check happy. */
3683 spin_lock_irq(&dev_priv->irq_lock);
3684 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3685 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3686 spin_unlock_irq(&dev_priv->irq_lock);
3692 * Returns true when a page flip has completed.
3694 static bool i8xx_handle_vblank(struct drm_device *dev,
3695 int plane, int pipe, u32 iir)
3697 struct drm_i915_private *dev_priv = dev->dev_private;
3698 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3700 if (!intel_pipe_handle_vblank(dev, pipe))
3703 if ((iir & flip_pending) == 0)
3704 goto check_page_flip;
3706 intel_prepare_page_flip(dev, plane);
3708 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3709 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3710 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3711 * the flip is completed (no longer pending). Since this doesn't raise
3712 * an interrupt per se, we watch for the change at vblank.
3714 if (I915_READ16(ISR) & flip_pending)
3715 goto check_page_flip;
3717 intel_finish_page_flip(dev, pipe);
3721 intel_check_page_flip(dev, pipe);
3725 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3727 struct drm_device *dev = arg;
3728 struct drm_i915_private *dev_priv = dev->dev_private;
3733 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3734 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3736 iir = I915_READ16(IIR);
3740 while (iir & ~flip_mask) {
3741 /* Can't rely on pipestat interrupt bit in iir as it might
3742 * have been cleared after the pipestat interrupt was received.
3743 * It doesn't set the bit in iir again, but it still produces
3744 * interrupts (for non-MSI).
3746 spin_lock(&dev_priv->irq_lock);
3747 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3748 i915_handle_error(dev, false,
3749 "Command parser error, iir 0x%08x",
3752 for_each_pipe(dev_priv, pipe) {
3753 int reg = PIPESTAT(pipe);
3754 pipe_stats[pipe] = I915_READ(reg);
3757 * Clear the PIPE*STAT regs before the IIR
3759 if (pipe_stats[pipe] & 0x8000ffff)
3760 I915_WRITE(reg, pipe_stats[pipe]);
3762 spin_unlock(&dev_priv->irq_lock);
3764 I915_WRITE16(IIR, iir & ~flip_mask);
3765 new_iir = I915_READ16(IIR); /* Flush posted writes */
3767 i915_update_dri1_breadcrumb(dev);
3769 if (iir & I915_USER_INTERRUPT)
3770 notify_ring(dev, &dev_priv->ring[RCS]);
3772 for_each_pipe(dev_priv, pipe) {
3777 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3778 i8xx_handle_vblank(dev, plane, pipe, iir))
3779 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3781 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3782 i9xx_pipe_crc_irq_handler(dev, pipe);
3784 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3785 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3795 static void i8xx_irq_uninstall(struct drm_device * dev)
3797 struct drm_i915_private *dev_priv = dev->dev_private;
3800 for_each_pipe(dev_priv, pipe) {
3801 /* Clear enable bits; then clear status bits */
3802 I915_WRITE(PIPESTAT(pipe), 0);
3803 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3805 I915_WRITE16(IMR, 0xffff);
3806 I915_WRITE16(IER, 0x0);
3807 I915_WRITE16(IIR, I915_READ16(IIR));
3810 static void i915_irq_preinstall(struct drm_device * dev)
3812 struct drm_i915_private *dev_priv = dev->dev_private;
3815 if (I915_HAS_HOTPLUG(dev)) {
3816 I915_WRITE(PORT_HOTPLUG_EN, 0);
3817 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3820 I915_WRITE16(HWSTAM, 0xeffe);
3821 for_each_pipe(dev_priv, pipe)
3822 I915_WRITE(PIPESTAT(pipe), 0);
3823 I915_WRITE(IMR, 0xffffffff);
3824 I915_WRITE(IER, 0x0);
3828 static int i915_irq_postinstall(struct drm_device *dev)
3830 struct drm_i915_private *dev_priv = dev->dev_private;
3833 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3835 /* Unmask the interrupts that we always want on. */
3836 dev_priv->irq_mask =
3837 ~(I915_ASLE_INTERRUPT |
3838 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3839 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3840 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3841 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3842 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3845 I915_ASLE_INTERRUPT |
3846 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3847 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3848 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3849 I915_USER_INTERRUPT;
3851 if (I915_HAS_HOTPLUG(dev)) {
3852 I915_WRITE(PORT_HOTPLUG_EN, 0);
3853 POSTING_READ(PORT_HOTPLUG_EN);
3855 /* Enable in IER... */
3856 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3857 /* and unmask in IMR */
3858 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3861 I915_WRITE(IMR, dev_priv->irq_mask);
3862 I915_WRITE(IER, enable_mask);
3865 i915_enable_asle_pipestat(dev);
3867 /* Interrupt setup is already guaranteed to be single-threaded, this is
3868 * just to make the assert_spin_locked check happy. */
3869 spin_lock_irq(&dev_priv->irq_lock);
3870 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3871 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3872 spin_unlock_irq(&dev_priv->irq_lock);
3878 * Returns true when a page flip has completed.
3880 static bool i915_handle_vblank(struct drm_device *dev,
3881 int plane, int pipe, u32 iir)
3883 struct drm_i915_private *dev_priv = dev->dev_private;
3884 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3886 if (!intel_pipe_handle_vblank(dev, pipe))
3889 if ((iir & flip_pending) == 0)
3890 goto check_page_flip;
3892 intel_prepare_page_flip(dev, plane);
3894 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3895 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3896 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3897 * the flip is completed (no longer pending). Since this doesn't raise
3898 * an interrupt per se, we watch for the change at vblank.
3900 if (I915_READ(ISR) & flip_pending)
3901 goto check_page_flip;
3903 intel_finish_page_flip(dev, pipe);
3907 intel_check_page_flip(dev, pipe);
3911 static irqreturn_t i915_irq_handler(int irq, void *arg)
3913 struct drm_device *dev = arg;
3914 struct drm_i915_private *dev_priv = dev->dev_private;
3915 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3917 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3918 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3919 int pipe, ret = IRQ_NONE;
3921 iir = I915_READ(IIR);
3923 bool irq_received = (iir & ~flip_mask) != 0;
3924 bool blc_event = false;
3926 /* Can't rely on pipestat interrupt bit in iir as it might
3927 * have been cleared after the pipestat interrupt was received.
3928 * It doesn't set the bit in iir again, but it still produces
3929 * interrupts (for non-MSI).
3931 spin_lock(&dev_priv->irq_lock);
3932 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3933 i915_handle_error(dev, false,
3934 "Command parser error, iir 0x%08x",
3937 for_each_pipe(dev_priv, pipe) {
3938 int reg = PIPESTAT(pipe);
3939 pipe_stats[pipe] = I915_READ(reg);
3941 /* Clear the PIPE*STAT regs before the IIR */
3942 if (pipe_stats[pipe] & 0x8000ffff) {
3943 I915_WRITE(reg, pipe_stats[pipe]);
3944 irq_received = true;
3947 spin_unlock(&dev_priv->irq_lock);
3952 /* Consume port. Then clear IIR or we'll miss events */
3953 if (I915_HAS_HOTPLUG(dev) &&
3954 iir & I915_DISPLAY_PORT_INTERRUPT)
3955 i9xx_hpd_irq_handler(dev);
3957 I915_WRITE(IIR, iir & ~flip_mask);
3958 new_iir = I915_READ(IIR); /* Flush posted writes */
3960 if (iir & I915_USER_INTERRUPT)
3961 notify_ring(dev, &dev_priv->ring[RCS]);
3963 for_each_pipe(dev_priv, pipe) {
3968 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3969 i915_handle_vblank(dev, plane, pipe, iir))
3970 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3972 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3975 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3976 i9xx_pipe_crc_irq_handler(dev, pipe);
3978 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3979 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3983 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3984 intel_opregion_asle_intr(dev);
3986 /* With MSI, interrupts are only generated when iir
3987 * transitions from zero to nonzero. If another bit got
3988 * set while we were handling the existing iir bits, then
3989 * we would never get another interrupt.
3991 * This is fine on non-MSI as well, as if we hit this path
3992 * we avoid exiting the interrupt handler only to generate
3995 * Note that for MSI this could cause a stray interrupt report
3996 * if an interrupt landed in the time between writing IIR and
3997 * the posting read. This should be rare enough to never
3998 * trigger the 99% of 100,000 interrupts test for disabling
4003 } while (iir & ~flip_mask);
4005 i915_update_dri1_breadcrumb(dev);
4010 static void i915_irq_uninstall(struct drm_device * dev)
4012 struct drm_i915_private *dev_priv = dev->dev_private;
4015 if (I915_HAS_HOTPLUG(dev)) {
4016 I915_WRITE(PORT_HOTPLUG_EN, 0);
4017 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4020 I915_WRITE16(HWSTAM, 0xffff);
4021 for_each_pipe(dev_priv, pipe) {
4022 /* Clear enable bits; then clear status bits */
4023 I915_WRITE(PIPESTAT(pipe), 0);
4024 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4026 I915_WRITE(IMR, 0xffffffff);
4027 I915_WRITE(IER, 0x0);
4029 I915_WRITE(IIR, I915_READ(IIR));
4032 static void i965_irq_preinstall(struct drm_device * dev)
4034 struct drm_i915_private *dev_priv = dev->dev_private;
4037 I915_WRITE(PORT_HOTPLUG_EN, 0);
4038 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4040 I915_WRITE(HWSTAM, 0xeffe);
4041 for_each_pipe(dev_priv, pipe)
4042 I915_WRITE(PIPESTAT(pipe), 0);
4043 I915_WRITE(IMR, 0xffffffff);
4044 I915_WRITE(IER, 0x0);
4048 static int i965_irq_postinstall(struct drm_device *dev)
4050 struct drm_i915_private *dev_priv = dev->dev_private;
4054 /* Unmask the interrupts that we always want on. */
4055 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4056 I915_DISPLAY_PORT_INTERRUPT |
4057 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4058 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4059 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4060 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4061 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4063 enable_mask = ~dev_priv->irq_mask;
4064 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4065 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4066 enable_mask |= I915_USER_INTERRUPT;
4069 enable_mask |= I915_BSD_USER_INTERRUPT;
4071 /* Interrupt setup is already guaranteed to be single-threaded, this is
4072 * just to make the assert_spin_locked check happy. */
4073 spin_lock_irq(&dev_priv->irq_lock);
4074 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4075 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4076 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4077 spin_unlock_irq(&dev_priv->irq_lock);
4080 * Enable some error detection, note the instruction error mask
4081 * bit is reserved, so we leave it masked.
4084 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4085 GM45_ERROR_MEM_PRIV |
4086 GM45_ERROR_CP_PRIV |
4087 I915_ERROR_MEMORY_REFRESH);
4089 error_mask = ~(I915_ERROR_PAGE_TABLE |
4090 I915_ERROR_MEMORY_REFRESH);
4092 I915_WRITE(EMR, error_mask);
4094 I915_WRITE(IMR, dev_priv->irq_mask);
4095 I915_WRITE(IER, enable_mask);
4098 I915_WRITE(PORT_HOTPLUG_EN, 0);
4099 POSTING_READ(PORT_HOTPLUG_EN);
4101 i915_enable_asle_pipestat(dev);
4106 static void i915_hpd_irq_setup(struct drm_device *dev)
4108 struct drm_i915_private *dev_priv = dev->dev_private;
4109 struct intel_encoder *intel_encoder;
4112 assert_spin_locked(&dev_priv->irq_lock);
4114 if (I915_HAS_HOTPLUG(dev)) {
4115 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4116 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4117 /* Note HDMI and DP share hotplug bits */
4118 /* enable bits are the same for all generations */
4119 for_each_intel_encoder(dev, intel_encoder)
4120 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4121 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4122 /* Programming the CRT detection parameters tends
4123 to generate a spurious hotplug event about three
4124 seconds later. So just do it once.
4127 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4128 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4129 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4131 /* Ignore TV since it's buggy */
4132 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4136 static irqreturn_t i965_irq_handler(int irq, void *arg)
4138 struct drm_device *dev = arg;
4139 struct drm_i915_private *dev_priv = dev->dev_private;
4141 u32 pipe_stats[I915_MAX_PIPES];
4142 int ret = IRQ_NONE, pipe;
4144 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4145 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4147 iir = I915_READ(IIR);
4150 bool irq_received = (iir & ~flip_mask) != 0;
4151 bool blc_event = false;
4153 /* Can't rely on pipestat interrupt bit in iir as it might
4154 * have been cleared after the pipestat interrupt was received.
4155 * It doesn't set the bit in iir again, but it still produces
4156 * interrupts (for non-MSI).
4158 spin_lock(&dev_priv->irq_lock);
4159 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4160 i915_handle_error(dev, false,
4161 "Command parser error, iir 0x%08x",
4164 for_each_pipe(dev_priv, pipe) {
4165 int reg = PIPESTAT(pipe);
4166 pipe_stats[pipe] = I915_READ(reg);
4169 * Clear the PIPE*STAT regs before the IIR
4171 if (pipe_stats[pipe] & 0x8000ffff) {
4172 I915_WRITE(reg, pipe_stats[pipe]);
4173 irq_received = true;
4176 spin_unlock(&dev_priv->irq_lock);
4183 /* Consume port. Then clear IIR or we'll miss events */
4184 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4185 i9xx_hpd_irq_handler(dev);
4187 I915_WRITE(IIR, iir & ~flip_mask);
4188 new_iir = I915_READ(IIR); /* Flush posted writes */
4190 if (iir & I915_USER_INTERRUPT)
4191 notify_ring(dev, &dev_priv->ring[RCS]);
4192 if (iir & I915_BSD_USER_INTERRUPT)
4193 notify_ring(dev, &dev_priv->ring[VCS]);
4195 for_each_pipe(dev_priv, pipe) {
4196 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4197 i915_handle_vblank(dev, pipe, pipe, iir))
4198 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4200 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4203 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4204 i9xx_pipe_crc_irq_handler(dev, pipe);
4206 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4207 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4210 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4211 intel_opregion_asle_intr(dev);
4213 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4214 gmbus_irq_handler(dev);
4216 /* With MSI, interrupts are only generated when iir
4217 * transitions from zero to nonzero. If another bit got
4218 * set while we were handling the existing iir bits, then
4219 * we would never get another interrupt.
4221 * This is fine on non-MSI as well, as if we hit this path
4222 * we avoid exiting the interrupt handler only to generate
4225 * Note that for MSI this could cause a stray interrupt report
4226 * if an interrupt landed in the time between writing IIR and
4227 * the posting read. This should be rare enough to never
4228 * trigger the 99% of 100,000 interrupts test for disabling
4234 i915_update_dri1_breadcrumb(dev);
4239 static void i965_irq_uninstall(struct drm_device * dev)
4241 struct drm_i915_private *dev_priv = dev->dev_private;
4247 I915_WRITE(PORT_HOTPLUG_EN, 0);
4248 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4250 I915_WRITE(HWSTAM, 0xffffffff);
4251 for_each_pipe(dev_priv, pipe)
4252 I915_WRITE(PIPESTAT(pipe), 0);
4253 I915_WRITE(IMR, 0xffffffff);
4254 I915_WRITE(IER, 0x0);
4256 for_each_pipe(dev_priv, pipe)
4257 I915_WRITE(PIPESTAT(pipe),
4258 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4259 I915_WRITE(IIR, I915_READ(IIR));
4262 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4264 struct drm_i915_private *dev_priv =
4265 container_of(work, typeof(*dev_priv),
4266 hotplug_reenable_work.work);
4267 struct drm_device *dev = dev_priv->dev;
4268 struct drm_mode_config *mode_config = &dev->mode_config;
4271 intel_runtime_pm_get(dev_priv);
4273 spin_lock_irq(&dev_priv->irq_lock);
4274 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4275 struct drm_connector *connector;
4277 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4280 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4282 list_for_each_entry(connector, &mode_config->connector_list, head) {
4283 struct intel_connector *intel_connector = to_intel_connector(connector);
4285 if (intel_connector->encoder->hpd_pin == i) {
4286 if (connector->polled != intel_connector->polled)
4287 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4289 connector->polled = intel_connector->polled;
4290 if (!connector->polled)
4291 connector->polled = DRM_CONNECTOR_POLL_HPD;
4295 if (dev_priv->display.hpd_irq_setup)
4296 dev_priv->display.hpd_irq_setup(dev);
4297 spin_unlock_irq(&dev_priv->irq_lock);
4299 intel_runtime_pm_put(dev_priv);
4303 * intel_irq_init - initializes irq support
4304 * @dev_priv: i915 device instance
4306 * This function initializes all the irq support including work items, timers
4307 * and all the vtables. It does not setup the interrupt itself though.
4309 void intel_irq_init(struct drm_i915_private *dev_priv)
4311 struct drm_device *dev = dev_priv->dev;
4313 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4314 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4315 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4316 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4317 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4319 /* Let's track the enabled rps events */
4320 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4321 /* WaGsvRC0ResidencyMethod:vlv */
4322 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4324 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4326 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4327 i915_hangcheck_elapsed,
4328 (unsigned long) dev);
4329 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4330 intel_hpd_irq_reenable_work);
4332 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4334 if (IS_GEN2(dev_priv)) {
4335 dev->max_vblank_count = 0;
4336 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4337 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4338 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4339 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4341 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4342 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4346 * Opt out of the vblank disable timer on everything except gen2.
4347 * Gen2 doesn't have a hardware frame counter and so depends on
4348 * vblank interrupts to produce sane vblank seuquence numbers.
4350 if (!IS_GEN2(dev_priv))
4351 dev->vblank_disable_immediate = true;
4353 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4354 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4355 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4358 if (IS_CHERRYVIEW(dev_priv)) {
4359 dev->driver->irq_handler = cherryview_irq_handler;
4360 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4361 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4362 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4363 dev->driver->enable_vblank = valleyview_enable_vblank;
4364 dev->driver->disable_vblank = valleyview_disable_vblank;
4365 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4366 } else if (IS_VALLEYVIEW(dev_priv)) {
4367 dev->driver->irq_handler = valleyview_irq_handler;
4368 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4369 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4370 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4371 dev->driver->enable_vblank = valleyview_enable_vblank;
4372 dev->driver->disable_vblank = valleyview_disable_vblank;
4373 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4374 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4375 dev->driver->irq_handler = gen8_irq_handler;
4376 dev->driver->irq_preinstall = gen8_irq_reset;
4377 dev->driver->irq_postinstall = gen8_irq_postinstall;
4378 dev->driver->irq_uninstall = gen8_irq_uninstall;
4379 dev->driver->enable_vblank = gen8_enable_vblank;
4380 dev->driver->disable_vblank = gen8_disable_vblank;
4381 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4382 } else if (HAS_PCH_SPLIT(dev)) {
4383 dev->driver->irq_handler = ironlake_irq_handler;
4384 dev->driver->irq_preinstall = ironlake_irq_reset;
4385 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4386 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4387 dev->driver->enable_vblank = ironlake_enable_vblank;
4388 dev->driver->disable_vblank = ironlake_disable_vblank;
4389 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4391 if (INTEL_INFO(dev_priv)->gen == 2) {
4392 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4393 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4394 dev->driver->irq_handler = i8xx_irq_handler;
4395 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4396 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4397 dev->driver->irq_preinstall = i915_irq_preinstall;
4398 dev->driver->irq_postinstall = i915_irq_postinstall;
4399 dev->driver->irq_uninstall = i915_irq_uninstall;
4400 dev->driver->irq_handler = i915_irq_handler;
4401 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4403 dev->driver->irq_preinstall = i965_irq_preinstall;
4404 dev->driver->irq_postinstall = i965_irq_postinstall;
4405 dev->driver->irq_uninstall = i965_irq_uninstall;
4406 dev->driver->irq_handler = i965_irq_handler;
4407 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4409 dev->driver->enable_vblank = i915_enable_vblank;
4410 dev->driver->disable_vblank = i915_disable_vblank;
4415 * intel_hpd_init - initializes and enables hpd support
4416 * @dev_priv: i915 device instance
4418 * This function enables the hotplug support. It requires that interrupts have
4419 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4420 * poll request can run concurrently to other code, so locking rules must be
4423 * This is a separate step from interrupt enabling to simplify the locking rules
4424 * in the driver load and resume code.
4426 void intel_hpd_init(struct drm_i915_private *dev_priv)
4428 struct drm_device *dev = dev_priv->dev;
4429 struct drm_mode_config *mode_config = &dev->mode_config;
4430 struct drm_connector *connector;
4433 for (i = 1; i < HPD_NUM_PINS; i++) {
4434 dev_priv->hpd_stats[i].hpd_cnt = 0;
4435 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4437 list_for_each_entry(connector, &mode_config->connector_list, head) {
4438 struct intel_connector *intel_connector = to_intel_connector(connector);
4439 connector->polled = intel_connector->polled;
4440 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4441 connector->polled = DRM_CONNECTOR_POLL_HPD;
4442 if (intel_connector->mst_port)
4443 connector->polled = DRM_CONNECTOR_POLL_HPD;
4446 /* Interrupt setup is already guaranteed to be single-threaded, this is
4447 * just to make the assert_spin_locked checks happy. */
4448 spin_lock_irq(&dev_priv->irq_lock);
4449 if (dev_priv->display.hpd_irq_setup)
4450 dev_priv->display.hpd_irq_setup(dev);
4451 spin_unlock_irq(&dev_priv->irq_lock);
4455 * intel_irq_install - enables the hardware interrupt
4456 * @dev_priv: i915 device instance
4458 * This function enables the hardware interrupt handling, but leaves the hotplug
4459 * handling still disabled. It is called after intel_irq_init().
4461 * In the driver load and resume code we need working interrupts in a few places
4462 * but don't want to deal with the hassle of concurrent probe and hotplug
4463 * workers. Hence the split into this two-stage approach.
4465 int intel_irq_install(struct drm_i915_private *dev_priv)
4468 * We enable some interrupt sources in our postinstall hooks, so mark
4469 * interrupts as enabled _before_ actually enabling them to avoid
4470 * special cases in our ordering checks.
4472 dev_priv->pm.irqs_enabled = true;
4474 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4478 * intel_irq_uninstall - finilizes all irq handling
4479 * @dev_priv: i915 device instance
4481 * This stops interrupt and hotplug handling and unregisters and frees all
4482 * resources acquired in the init functions.
4484 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4486 drm_irq_uninstall(dev_priv->dev);
4487 intel_hpd_cancel_work(dev_priv);
4488 dev_priv->pm.irqs_enabled = false;
4492 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4493 * @dev_priv: i915 device instance
4495 * This function is used to disable interrupts at runtime, both in the runtime
4496 * pm and the system suspend/resume code.
4498 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4500 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4501 dev_priv->pm.irqs_enabled = false;
4505 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4506 * @dev_priv: i915 device instance
4508 * This function is used to enable interrupts at runtime, both in the runtime
4509 * pm and the system suspend/resume code.
4511 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4513 dev_priv->pm.irqs_enabled = true;
4514 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4515 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);