1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
34 /* Really want an OS-independent resettable timer. Would like to have
35 * this loop run for (eg) 3 sec, but have the timer reset every time
36 * the head pointer changes, so that EBUSY only happens if the ring
37 * actually stalls for (eg) 3 seconds.
39 int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
41 drm_i915_private_t *dev_priv = dev->dev_private;
42 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
43 u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
44 u32 last_acthd = I915_READ(acthd_reg);
46 u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
49 for (i = 0; i < 100000; i++) {
50 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
51 acthd = I915_READ(acthd_reg);
52 ring->space = ring->head - (ring->tail + 8);
54 ring->space += ring->Size;
58 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
60 if (ring->head != last_head)
62 if (acthd != last_acthd)
65 last_head = ring->head;
67 msleep_interruptible(10);
75 * Sets up the hardware status page for devices that need a physical address
78 int i915_init_phys_hws(struct drm_device *dev)
80 drm_i915_private_t *dev_priv = dev->dev_private;
81 /* Program Hardware Status Page */
82 dev_priv->status_page_dmah =
83 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
85 if (!dev_priv->status_page_dmah) {
86 DRM_ERROR("Can not allocate hardware status page\n");
89 dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
90 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
92 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
94 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
95 DRM_DEBUG("Enabled hardware status page\n");
100 * Frees the hardware status page, whether it's a physical address or a virtual
101 * address set up by the X Server.
103 void i915_free_hws(struct drm_device *dev)
105 drm_i915_private_t *dev_priv = dev->dev_private;
106 if (dev_priv->status_page_dmah) {
107 drm_pci_free(dev, dev_priv->status_page_dmah);
108 dev_priv->status_page_dmah = NULL;
111 if (dev_priv->status_gfx_addr) {
112 dev_priv->status_gfx_addr = 0;
113 drm_core_ioremapfree(&dev_priv->hws_map, dev);
116 /* Need to rewrite hardware status page */
117 I915_WRITE(HWS_PGA, 0x1ffff000);
120 void i915_kernel_lost_context(struct drm_device * dev)
122 drm_i915_private_t *dev_priv = dev->dev_private;
123 drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
125 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
126 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
127 ring->space = ring->head - (ring->tail + 8);
129 ring->space += ring->Size;
131 if (ring->head == ring->tail)
132 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
135 static int i915_dma_cleanup(struct drm_device * dev)
137 drm_i915_private_t *dev_priv = dev->dev_private;
138 /* Make sure interrupts are disabled here because the uninstall ioctl
139 * may not have been called from userspace and after dev_private
140 * is freed, it's too late.
142 if (dev->irq_enabled)
143 drm_irq_uninstall(dev);
145 if (dev_priv->ring.virtual_start) {
146 drm_core_ioremapfree(&dev_priv->ring.map, dev);
147 dev_priv->ring.virtual_start = 0;
148 dev_priv->ring.map.handle = 0;
149 dev_priv->ring.map.size = 0;
152 /* Clear the HWS virtual address at teardown */
153 if (I915_NEED_GFX_HWS(dev))
159 static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
161 drm_i915_private_t *dev_priv = dev->dev_private;
163 dev_priv->sarea = drm_getsarea(dev);
164 if (!dev_priv->sarea) {
165 DRM_ERROR("can not find sarea!\n");
166 i915_dma_cleanup(dev);
170 dev_priv->sarea_priv = (drm_i915_sarea_t *)
171 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
173 if (init->ring_size != 0) {
174 if (dev_priv->ring.ring_obj != NULL) {
175 i915_dma_cleanup(dev);
176 DRM_ERROR("Client tried to initialize ringbuffer in "
181 dev_priv->ring.Size = init->ring_size;
182 dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
184 dev_priv->ring.map.offset = init->ring_start;
185 dev_priv->ring.map.size = init->ring_size;
186 dev_priv->ring.map.type = 0;
187 dev_priv->ring.map.flags = 0;
188 dev_priv->ring.map.mtrr = 0;
190 drm_core_ioremap(&dev_priv->ring.map, dev);
192 if (dev_priv->ring.map.handle == NULL) {
193 i915_dma_cleanup(dev);
194 DRM_ERROR("can not ioremap virtual address for"
200 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
202 dev_priv->cpp = init->cpp;
203 dev_priv->back_offset = init->back_offset;
204 dev_priv->front_offset = init->front_offset;
205 dev_priv->current_page = 0;
206 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
208 /* Allow hardware batchbuffers unless told otherwise.
210 dev_priv->allow_batchbuffer = 1;
215 static int i915_dma_resume(struct drm_device * dev)
217 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
219 DRM_DEBUG("%s\n", __func__);
221 if (!dev_priv->sarea) {
222 DRM_ERROR("can not find sarea!\n");
226 if (dev_priv->ring.map.handle == NULL) {
227 DRM_ERROR("can not ioremap virtual address for"
232 /* Program Hardware Status Page */
233 if (!dev_priv->hw_status_page) {
234 DRM_ERROR("Can not find hardware status page\n");
237 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
239 if (dev_priv->status_gfx_addr != 0)
240 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
242 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
243 DRM_DEBUG("Enabled hardware status page\n");
248 static int i915_dma_init(struct drm_device *dev, void *data,
249 struct drm_file *file_priv)
251 drm_i915_init_t *init = data;
254 switch (init->func) {
256 retcode = i915_initialize(dev, init);
258 case I915_CLEANUP_DMA:
259 retcode = i915_dma_cleanup(dev);
261 case I915_RESUME_DMA:
262 retcode = i915_dma_resume(dev);
272 /* Implement basically the same security restrictions as hardware does
273 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
275 * Most of the calculations below involve calculating the size of a
276 * particular instruction. It's important to get the size right as
277 * that tells us where the next instruction to check is. Any illegal
278 * instruction detected will be given a size of zero, which is a
279 * signal to abort the rest of the buffer.
281 static int do_validate_cmd(int cmd)
283 switch (((cmd >> 29) & 0x7)) {
285 switch ((cmd >> 23) & 0x3f) {
287 return 1; /* MI_NOOP */
289 return 1; /* MI_FLUSH */
291 return 0; /* disallow everything else */
295 return 0; /* reserved */
297 return (cmd & 0xff) + 2; /* 2d commands */
299 if (((cmd >> 24) & 0x1f) <= 0x18)
302 switch ((cmd >> 24) & 0x1f) {
306 switch ((cmd >> 16) & 0xff) {
308 return (cmd & 0x1f) + 2;
310 return (cmd & 0xf) + 2;
312 return (cmd & 0xffff) + 2;
316 return (cmd & 0xffff) + 1;
320 if ((cmd & (1 << 23)) == 0) /* inline vertices */
321 return (cmd & 0x1ffff) + 2;
322 else if (cmd & (1 << 17)) /* indirect random */
323 if ((cmd & 0xffff) == 0)
324 return 0; /* unknown length, too hard */
326 return (((cmd & 0xffff) + 1) / 2) + 1;
328 return 2; /* indirect sequential */
339 static int validate_cmd(int cmd)
341 int ret = do_validate_cmd(cmd);
343 /* printk("validate_cmd( %x ): %d\n", cmd, ret); */
348 static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
350 drm_i915_private_t *dev_priv = dev->dev_private;
354 if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
357 BEGIN_LP_RING((dwords+1)&~1);
359 for (i = 0; i < dwords;) {
362 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
365 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
371 if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
388 i915_emit_box(struct drm_device *dev,
389 struct drm_clip_rect __user *boxes,
390 int i, int DR1, int DR4)
392 drm_i915_private_t *dev_priv = dev->dev_private;
393 struct drm_clip_rect box;
396 if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
400 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
401 DRM_ERROR("Bad box %d,%d..%d,%d\n",
402 box.x1, box.y1, box.x2, box.y2);
408 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
409 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
410 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
415 OUT_RING(GFX_OP_DRAWRECT_INFO);
417 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
418 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
427 /* XXX: Emitting the counter should really be moved to part of the IRQ
428 * emit. For now, do it in both places:
431 static void i915_emit_breadcrumb(struct drm_device *dev)
433 drm_i915_private_t *dev_priv = dev->dev_private;
436 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
438 if (dev_priv->counter > 0x7FFFFFFFUL)
439 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
442 OUT_RING(MI_STORE_DWORD_INDEX);
443 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
444 OUT_RING(dev_priv->counter);
449 static int i915_dispatch_cmdbuffer(struct drm_device * dev,
450 drm_i915_cmdbuffer_t * cmd)
452 int nbox = cmd->num_cliprects;
453 int i = 0, count, ret;
456 DRM_ERROR("alignment");
460 i915_kernel_lost_context(dev);
462 count = nbox ? nbox : 1;
464 for (i = 0; i < count; i++) {
466 ret = i915_emit_box(dev, cmd->cliprects, i,
472 ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
477 i915_emit_breadcrumb(dev);
481 static int i915_dispatch_batchbuffer(struct drm_device * dev,
482 drm_i915_batchbuffer_t * batch)
484 drm_i915_private_t *dev_priv = dev->dev_private;
485 struct drm_clip_rect __user *boxes = batch->cliprects;
486 int nbox = batch->num_cliprects;
490 if ((batch->start | batch->used) & 0x7) {
491 DRM_ERROR("alignment");
495 i915_kernel_lost_context(dev);
497 count = nbox ? nbox : 1;
499 for (i = 0; i < count; i++) {
501 int ret = i915_emit_box(dev, boxes, i,
502 batch->DR1, batch->DR4);
507 if (!IS_I830(dev) && !IS_845G(dev)) {
510 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
511 OUT_RING(batch->start);
513 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
514 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
519 OUT_RING(MI_BATCH_BUFFER);
520 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
521 OUT_RING(batch->start + batch->used - 4);
527 i915_emit_breadcrumb(dev);
532 static int i915_dispatch_flip(struct drm_device * dev)
534 drm_i915_private_t *dev_priv = dev->dev_private;
537 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
539 dev_priv->current_page,
540 dev_priv->sarea_priv->pf_current_page);
542 i915_kernel_lost_context(dev);
545 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
550 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
552 if (dev_priv->current_page == 0) {
553 OUT_RING(dev_priv->back_offset);
554 dev_priv->current_page = 1;
556 OUT_RING(dev_priv->front_offset);
557 dev_priv->current_page = 0;
563 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
567 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
570 OUT_RING(MI_STORE_DWORD_INDEX);
571 OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
572 OUT_RING(dev_priv->counter);
576 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
580 static int i915_quiescent(struct drm_device * dev)
582 drm_i915_private_t *dev_priv = dev->dev_private;
584 i915_kernel_lost_context(dev);
585 return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__);
588 static int i915_flush_ioctl(struct drm_device *dev, void *data,
589 struct drm_file *file_priv)
591 LOCK_TEST_WITH_RETURN(dev, file_priv);
593 return i915_quiescent(dev);
596 static int i915_batchbuffer(struct drm_device *dev, void *data,
597 struct drm_file *file_priv)
599 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
600 u32 *hw_status = dev_priv->hw_status_page;
601 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
602 dev_priv->sarea_priv;
603 drm_i915_batchbuffer_t *batch = data;
606 if (!dev_priv->allow_batchbuffer) {
607 DRM_ERROR("Batchbuffer ioctl disabled\n");
611 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
612 batch->start, batch->used, batch->num_cliprects);
614 LOCK_TEST_WITH_RETURN(dev, file_priv);
616 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
617 batch->num_cliprects *
618 sizeof(struct drm_clip_rect)))
621 ret = i915_dispatch_batchbuffer(dev, batch);
623 sarea_priv->last_dispatch = (int)hw_status[5];
627 static int i915_cmdbuffer(struct drm_device *dev, void *data,
628 struct drm_file *file_priv)
630 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
631 u32 *hw_status = dev_priv->hw_status_page;
632 drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
633 dev_priv->sarea_priv;
634 drm_i915_cmdbuffer_t *cmdbuf = data;
637 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
638 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
640 LOCK_TEST_WITH_RETURN(dev, file_priv);
642 if (cmdbuf->num_cliprects &&
643 DRM_VERIFYAREA_READ(cmdbuf->cliprects,
644 cmdbuf->num_cliprects *
645 sizeof(struct drm_clip_rect))) {
646 DRM_ERROR("Fault accessing cliprects\n");
650 ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
652 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
656 sarea_priv->last_dispatch = (int)hw_status[5];
660 static int i915_flip_bufs(struct drm_device *dev, void *data,
661 struct drm_file *file_priv)
663 DRM_DEBUG("%s\n", __func__);
665 LOCK_TEST_WITH_RETURN(dev, file_priv);
667 return i915_dispatch_flip(dev);
670 static int i915_getparam(struct drm_device *dev, void *data,
671 struct drm_file *file_priv)
673 drm_i915_private_t *dev_priv = dev->dev_private;
674 drm_i915_getparam_t *param = data;
678 DRM_ERROR("called with no initialization\n");
682 switch (param->param) {
683 case I915_PARAM_IRQ_ACTIVE:
684 value = dev->pdev->irq ? 1 : 0;
686 case I915_PARAM_ALLOW_BATCHBUFFER:
687 value = dev_priv->allow_batchbuffer ? 1 : 0;
689 case I915_PARAM_LAST_DISPATCH:
690 value = READ_BREADCRUMB(dev_priv);
692 case I915_PARAM_HAS_GEM:
696 DRM_ERROR("Unknown parameter %d\n", param->param);
700 if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
701 DRM_ERROR("DRM_COPY_TO_USER failed\n");
708 static int i915_setparam(struct drm_device *dev, void *data,
709 struct drm_file *file_priv)
711 drm_i915_private_t *dev_priv = dev->dev_private;
712 drm_i915_setparam_t *param = data;
715 DRM_ERROR("called with no initialization\n");
719 switch (param->param) {
720 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
722 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
723 dev_priv->tex_lru_log_granularity = param->value;
725 case I915_SETPARAM_ALLOW_BATCHBUFFER:
726 dev_priv->allow_batchbuffer = param->value;
729 DRM_ERROR("unknown parameter %d\n", param->param);
736 static int i915_set_status_page(struct drm_device *dev, void *data,
737 struct drm_file *file_priv)
739 drm_i915_private_t *dev_priv = dev->dev_private;
740 drm_i915_hws_addr_t *hws = data;
742 if (!I915_NEED_GFX_HWS(dev))
746 DRM_ERROR("called with no initialization\n");
750 printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws->addr);
752 dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
754 dev_priv->hws_map.offset = dev->agp->base + hws->addr;
755 dev_priv->hws_map.size = 4*1024;
756 dev_priv->hws_map.type = 0;
757 dev_priv->hws_map.flags = 0;
758 dev_priv->hws_map.mtrr = 0;
760 drm_core_ioremap(&dev_priv->hws_map, dev);
761 if (dev_priv->hws_map.handle == NULL) {
762 i915_dma_cleanup(dev);
763 dev_priv->status_gfx_addr = 0;
764 DRM_ERROR("can not ioremap virtual address for"
765 " G33 hw status page\n");
768 dev_priv->hw_status_page = dev_priv->hws_map.handle;
770 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
771 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
772 DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
773 dev_priv->status_gfx_addr);
774 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
778 int i915_driver_load(struct drm_device *dev, unsigned long flags)
780 struct drm_i915_private *dev_priv = dev->dev_private;
781 unsigned long base, size;
782 int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1;
784 /* i915 has 4 more counters */
786 dev->types[6] = _DRM_STAT_IRQ;
787 dev->types[7] = _DRM_STAT_PRIMARY;
788 dev->types[8] = _DRM_STAT_SECONDARY;
789 dev->types[9] = _DRM_STAT_DMA;
791 dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
792 if (dev_priv == NULL)
795 memset(dev_priv, 0, sizeof(drm_i915_private_t));
797 dev->dev_private = (void *)dev_priv;
800 /* Add register map (needed for suspend/resume) */
801 base = drm_get_resource_start(dev, mmio_bar);
802 size = drm_get_resource_len(dev, mmio_bar);
804 ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
805 _DRM_KERNEL | _DRM_DRIVER,
806 &dev_priv->mmio_map);
811 if (!I915_NEED_GFX_HWS(dev)) {
812 ret = i915_init_phys_hws(dev);
817 /* On the 945G/GM, the chipset reports the MSI capability on the
818 * integrated graphics even though the support isn't actually there
819 * according to the published specs. It doesn't appear to function
820 * correctly in testing on 945G.
821 * This may be a side effect of MSI having been made available for PEG
822 * and the registers being closely associated.
824 if (!IS_I945G(dev) && !IS_I945GM(dev))
825 if (pci_enable_msi(dev->pdev))
826 DRM_ERROR("failed to enable MSI\n");
828 intel_opregion_init(dev);
830 spin_lock_init(&dev_priv->user_irq_lock);
835 int i915_driver_unload(struct drm_device *dev)
837 struct drm_i915_private *dev_priv = dev->dev_private;
839 if (dev->pdev->msi_enabled)
840 pci_disable_msi(dev->pdev);
844 if (dev_priv->mmio_map)
845 drm_rmmap(dev, dev_priv->mmio_map);
847 intel_opregion_free(dev);
849 drm_free(dev->dev_private, sizeof(drm_i915_private_t),
855 int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
857 struct drm_i915_file_private *i915_file_priv;
860 i915_file_priv = (struct drm_i915_file_private *)
861 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
866 file_priv->driver_priv = i915_file_priv;
868 i915_file_priv->mm.last_gem_seqno = 0;
869 i915_file_priv->mm.last_gem_throttle_seqno = 0;
874 void i915_driver_lastclose(struct drm_device * dev)
876 drm_i915_private_t *dev_priv = dev->dev_private;
881 i915_gem_lastclose(dev);
883 if (dev_priv->agp_heap)
884 i915_mem_takedown(&(dev_priv->agp_heap));
886 i915_dma_cleanup(dev);
889 void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
891 drm_i915_private_t *dev_priv = dev->dev_private;
892 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
895 void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
897 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
899 drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
902 struct drm_ioctl_desc i915_ioctls[] = {
903 DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
904 DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
905 DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
906 DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
907 DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
908 DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
909 DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
910 DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
911 DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
912 DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
913 DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
914 DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
915 DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
916 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
917 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
918 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
919 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
920 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH),
921 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
922 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
923 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
924 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
925 DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
926 DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH),
927 DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH),
928 DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
929 DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
930 DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
931 DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
932 DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
933 DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
934 DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
935 DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
938 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
941 * Determine if the device really is AGP or not.
943 * All Intel graphics chipsets are treated as AGP, even if they are really
946 * \param dev The device to be tested.
949 * A value of 1 is always retured to indictate every i9x5 is AGP.
951 int i915_driver_device_is_agp(struct drm_device * dev)