1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 #define MAX_NOPID ((u32)~0)
36 /** These are the interrupts used by the driver */
37 #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \
38 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | \
39 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT | \
40 I915_ASLE_INTERRUPT | \
41 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
44 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
46 if ((dev_priv->irq_mask_reg & mask) != 0) {
47 dev_priv->irq_mask_reg &= ~mask;
48 I915_WRITE(IMR, dev_priv->irq_mask_reg);
49 (void) I915_READ(IMR);
54 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
56 if ((dev_priv->irq_mask_reg & mask) != mask) {
57 dev_priv->irq_mask_reg |= mask;
58 I915_WRITE(IMR, dev_priv->irq_mask_reg);
59 (void) I915_READ(IMR);
64 * Emit blits for scheduled buffer swaps.
66 * This function will be called with the HW lock held.
68 static void i915_vblank_tasklet(struct drm_device *dev)
70 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
71 unsigned long irqflags;
72 struct list_head *list, *tmp, hits, *hit;
73 int nhits, nrects, slice[2], upper[2], lower[2], i;
74 unsigned counter[2] = { atomic_read(&dev->vbl_received),
75 atomic_read(&dev->vbl_received2) };
76 struct drm_drawable_info *drw;
77 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
78 u32 cpp = dev_priv->cpp;
79 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
80 XY_SRC_COPY_BLT_WRITE_ALPHA |
81 XY_SRC_COPY_BLT_WRITE_RGB)
82 : XY_SRC_COPY_BLT_CMD;
83 u32 src_pitch = sarea_priv->pitch * cpp;
84 u32 dst_pitch = sarea_priv->pitch * cpp;
85 u32 ropcpp = (0xcc << 16) | ((cpp - 1) << 24);
88 if (IS_I965G(dev) && sarea_priv->front_tiled) {
89 cmd |= XY_SRC_COPY_BLT_DST_TILED;
92 if (IS_I965G(dev) && sarea_priv->back_tiled) {
93 cmd |= XY_SRC_COPY_BLT_SRC_TILED;
99 INIT_LIST_HEAD(&hits);
103 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
105 /* Find buffer swaps scheduled for this vertical blank */
106 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
107 drm_i915_vbl_swap_t *vbl_swap =
108 list_entry(list, drm_i915_vbl_swap_t, head);
110 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
114 dev_priv->swaps_pending--;
116 spin_unlock(&dev_priv->swaps_lock);
117 spin_lock(&dev->drw_lock);
119 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
122 spin_unlock(&dev->drw_lock);
123 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
124 spin_lock(&dev_priv->swaps_lock);
128 list_for_each(hit, &hits) {
129 drm_i915_vbl_swap_t *swap_cmp =
130 list_entry(hit, drm_i915_vbl_swap_t, head);
131 struct drm_drawable_info *drw_cmp =
132 drm_get_drawable_info(dev, swap_cmp->drw_id);
135 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
136 list_add_tail(list, hit);
141 spin_unlock(&dev->drw_lock);
143 /* List of hits was empty, or we reached the end of it */
145 list_add_tail(list, hits.prev);
149 spin_lock(&dev_priv->swaps_lock);
153 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
157 spin_unlock(&dev_priv->swaps_lock);
159 i915_kernel_lost_context(dev);
164 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
166 OUT_RING(((sarea_priv->width - 1) & 0xffff) | ((sarea_priv->height - 1) << 16));
172 OUT_RING(GFX_OP_DRAWRECT_INFO);
175 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
176 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
182 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
184 upper[0] = upper[1] = 0;
185 slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
186 slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
187 lower[0] = sarea_priv->pipeA_y + slice[0];
188 lower[1] = sarea_priv->pipeB_y + slice[0];
190 spin_lock(&dev->drw_lock);
192 /* Emit blits for buffer swaps, partitioning both outputs into as many
193 * slices as there are buffer swaps scheduled in order to avoid tearing
194 * (based on the assumption that a single buffer swap would always
195 * complete before scanout starts).
197 for (i = 0; i++ < nhits;
198 upper[0] = lower[0], lower[0] += slice[0],
199 upper[1] = lower[1], lower[1] += slice[1]) {
201 lower[0] = lower[1] = sarea_priv->height;
203 list_for_each(hit, &hits) {
204 drm_i915_vbl_swap_t *swap_hit =
205 list_entry(hit, drm_i915_vbl_swap_t, head);
206 struct drm_clip_rect *rect;
208 unsigned short top, bottom;
210 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
216 pipe = swap_hit->pipe;
218 bottom = lower[pipe];
220 for (num_rects = drw->num_rects; num_rects--; rect++) {
221 int y1 = max(rect->y1, top);
222 int y2 = min(rect->y2, bottom);
230 OUT_RING(ropcpp | dst_pitch);
231 OUT_RING((y1 << 16) | rect->x1);
232 OUT_RING((y2 << 16) | rect->x2);
233 OUT_RING(sarea_priv->front_offset);
234 OUT_RING((y1 << 16) | rect->x1);
236 OUT_RING(sarea_priv->back_offset);
243 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
245 list_for_each_safe(hit, tmp, &hits) {
246 drm_i915_vbl_swap_t *swap_hit =
247 list_entry(hit, drm_i915_vbl_swap_t, head);
251 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
255 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
257 struct drm_device *dev = (struct drm_device *) arg;
258 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
259 u32 pipea_stats, pipeb_stats;
262 pipea_stats = I915_READ(PIPEASTAT);
263 pipeb_stats = I915_READ(PIPEBSTAT);
265 if (dev->pdev->msi_enabled)
267 iir = I915_READ(IIR);
269 DRM_DEBUG("iir=%08x\n", iir);
272 if (dev->pdev->msi_enabled) {
273 I915_WRITE(IMR, dev_priv->irq_mask_reg);
274 (void) I915_READ(IMR);
279 I915_WRITE(PIPEASTAT, pipea_stats);
280 I915_WRITE(PIPEBSTAT, pipeb_stats);
282 I915_WRITE(IIR, iir);
283 if (dev->pdev->msi_enabled)
284 I915_WRITE(IMR, dev_priv->irq_mask_reg);
285 (void) I915_READ(IIR); /* Flush posted writes */
287 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
289 if (iir & I915_USER_INTERRUPT)
290 DRM_WAKEUP(&dev_priv->irq_queue);
292 if (iir & (I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
293 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)) {
294 int vblank_pipe = dev_priv->vblank_pipe;
297 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
298 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
299 if (iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
300 atomic_inc(&dev->vbl_received);
301 if (iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
302 atomic_inc(&dev->vbl_received2);
303 } else if (((iir & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT) &&
304 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
305 ((iir & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT) &&
306 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
307 atomic_inc(&dev->vbl_received);
309 DRM_WAKEUP(&dev->vbl_queue);
310 drm_vbl_send_signals(dev);
312 if (dev_priv->swaps_pending > 0)
313 drm_locked_tasklet(dev, i915_vblank_tasklet);
316 if (iir & I915_ASLE_INTERRUPT)
317 opregion_asle_intr(dev);
319 if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
320 opregion_asle_intr(dev);
325 static int i915_emit_irq(struct drm_device * dev)
327 drm_i915_private_t *dev_priv = dev->dev_private;
330 i915_kernel_lost_context(dev);
334 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
336 if (dev_priv->counter > 0x7FFFFFFFUL)
337 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
340 OUT_RING(MI_STORE_DWORD_INDEX);
341 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
342 OUT_RING(dev_priv->counter);
345 OUT_RING(MI_USER_INTERRUPT);
348 return dev_priv->counter;
351 static void i915_user_irq_get(struct drm_device *dev)
353 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
355 spin_lock(&dev_priv->user_irq_lock);
356 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
357 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
358 spin_unlock(&dev_priv->user_irq_lock);
361 static void i915_user_irq_put(struct drm_device *dev)
363 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
365 spin_lock(&dev_priv->user_irq_lock);
366 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
367 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
368 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
369 spin_unlock(&dev_priv->user_irq_lock);
372 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
374 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
377 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
378 READ_BREADCRUMB(dev_priv));
380 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
381 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
385 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
387 i915_user_irq_get(dev);
388 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
389 READ_BREADCRUMB(dev_priv) >= irq_nr);
390 i915_user_irq_put(dev);
393 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
394 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
397 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
401 static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
404 drm_i915_private_t *dev_priv = dev->dev_private;
405 unsigned int cur_vblank;
409 DRM_ERROR("called with no initialization\n");
413 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
414 (((cur_vblank = atomic_read(counter))
415 - *sequence) <= (1<<23)));
417 *sequence = cur_vblank;
423 int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
425 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
428 int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
430 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
433 /* Needs the lock as it touches the ring.
435 int i915_irq_emit(struct drm_device *dev, void *data,
436 struct drm_file *file_priv)
438 drm_i915_private_t *dev_priv = dev->dev_private;
439 drm_i915_irq_emit_t *emit = data;
442 LOCK_TEST_WITH_RETURN(dev, file_priv);
445 DRM_ERROR("called with no initialization\n");
449 result = i915_emit_irq(dev);
451 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
452 DRM_ERROR("copy_to_user\n");
459 /* Doesn't need the hardware lock.
461 int i915_irq_wait(struct drm_device *dev, void *data,
462 struct drm_file *file_priv)
464 drm_i915_private_t *dev_priv = dev->dev_private;
465 drm_i915_irq_wait_t *irqwait = data;
468 DRM_ERROR("called with no initialization\n");
472 return i915_wait_irq(dev, irqwait->irq_seq);
475 /* Set the vblank monitor pipe
477 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
478 struct drm_file *file_priv)
480 drm_i915_private_t *dev_priv = dev->dev_private;
481 drm_i915_vblank_pipe_t *pipe = data;
482 u32 enable_mask = 0, disable_mask = 0;
485 DRM_ERROR("called with no initialization\n");
489 if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
490 DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
494 if (pipe->pipe & DRM_I915_VBLANK_PIPE_A)
495 enable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
497 disable_mask |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
499 if (pipe->pipe & DRM_I915_VBLANK_PIPE_B)
500 enable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
502 disable_mask |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
504 i915_enable_irq(dev_priv, enable_mask);
505 i915_disable_irq(dev_priv, disable_mask);
507 dev_priv->vblank_pipe = pipe->pipe;
512 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
513 struct drm_file *file_priv)
515 drm_i915_private_t *dev_priv = dev->dev_private;
516 drm_i915_vblank_pipe_t *pipe = data;
520 DRM_ERROR("called with no initialization\n");
524 flag = I915_READ(IMR);
526 if (flag & I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT)
527 pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
528 if (flag & I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT)
529 pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
535 * Schedule buffer swap at given vertical blank.
537 int i915_vblank_swap(struct drm_device *dev, void *data,
538 struct drm_file *file_priv)
540 drm_i915_private_t *dev_priv = dev->dev_private;
541 drm_i915_vblank_swap_t *swap = data;
542 drm_i915_vbl_swap_t *vbl_swap;
543 unsigned int pipe, seqtype, curseq;
544 unsigned long irqflags;
545 struct list_head *list;
548 DRM_ERROR("%s called with no initialization\n", __func__);
552 if (dev_priv->sarea_priv->rotation) {
553 DRM_DEBUG("Rotation not supported\n");
557 if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
558 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
559 DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
563 pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
565 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
567 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
568 DRM_ERROR("Invalid pipe %d\n", pipe);
572 spin_lock_irqsave(&dev->drw_lock, irqflags);
574 if (!drm_get_drawable_info(dev, swap->drawable)) {
575 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
576 DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
580 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
582 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
584 if (seqtype == _DRM_VBLANK_RELATIVE)
585 swap->sequence += curseq;
587 if ((curseq - swap->sequence) <= (1<<23)) {
588 if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
589 swap->sequence = curseq + 1;
591 DRM_DEBUG("Missed target sequence\n");
596 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
598 list_for_each(list, &dev_priv->vbl_swaps.head) {
599 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
601 if (vbl_swap->drw_id == swap->drawable &&
602 vbl_swap->pipe == pipe &&
603 vbl_swap->sequence == swap->sequence) {
604 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
605 DRM_DEBUG("Already scheduled\n");
610 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
612 if (dev_priv->swaps_pending >= 100) {
613 DRM_DEBUG("Too many swaps queued\n");
617 vbl_swap = drm_calloc(1, sizeof(*vbl_swap), DRM_MEM_DRIVER);
620 DRM_ERROR("Failed to allocate memory to queue swap\n");
626 vbl_swap->drw_id = swap->drawable;
627 vbl_swap->pipe = pipe;
628 vbl_swap->sequence = swap->sequence;
630 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
632 list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);
633 dev_priv->swaps_pending++;
635 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
642 void i915_driver_irq_preinstall(struct drm_device * dev)
644 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
646 I915_WRITE(HWSTAM, 0xfffe);
647 I915_WRITE(IMR, 0x0);
648 I915_WRITE(IER, 0x0);
651 void i915_driver_irq_postinstall(struct drm_device * dev)
653 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
655 spin_lock_init(&dev_priv->swaps_lock);
656 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
657 dev_priv->swaps_pending = 0;
659 if (!dev_priv->vblank_pipe)
660 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
662 /* Set initial unmasked IRQs to just the selected vblank pipes. */
663 dev_priv->irq_mask_reg = ~0;
664 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
665 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
666 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
667 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
669 dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
671 I915_WRITE(IMR, dev_priv->irq_mask_reg);
672 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
673 (void) I915_READ(IER);
675 opregion_enable_asle(dev);
677 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
680 void i915_driver_irq_uninstall(struct drm_device * dev)
682 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
688 I915_WRITE(HWSTAM, 0xffff);
689 I915_WRITE(IMR, 0xffff);
690 I915_WRITE(IER, 0x0);
692 temp = I915_READ(IIR);
693 I915_WRITE(IIR, temp);