2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
32 #include <linux/swap.h>
33 #include <linux/pci.h>
35 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
38 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
39 uint32_t read_domains,
40 uint32_t write_domain);
41 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
42 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
43 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
44 static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46 static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
49 static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
50 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
51 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
52 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
53 static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
55 static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
56 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
57 static int i915_gem_evict_something(struct drm_device *dev);
58 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
59 struct drm_i915_gem_pwrite *args,
60 struct drm_file *file_priv);
62 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
65 drm_i915_private_t *dev_priv = dev->dev_private;
68 (start & (PAGE_SIZE - 1)) != 0 ||
69 (end & (PAGE_SIZE - 1)) != 0) {
73 drm_mm_init(&dev_priv->mm.gtt_space, start,
76 dev->gtt_total = (uint32_t) (end - start);
82 i915_gem_init_ioctl(struct drm_device *dev, void *data,
83 struct drm_file *file_priv)
85 struct drm_i915_gem_init *args = data;
88 mutex_lock(&dev->struct_mutex);
89 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
90 mutex_unlock(&dev->struct_mutex);
96 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
97 struct drm_file *file_priv)
99 struct drm_i915_gem_get_aperture *args = data;
101 if (!(dev->driver->driver_features & DRIVER_GEM))
104 args->aper_size = dev->gtt_total;
105 args->aper_available_size = (args->aper_size -
106 atomic_read(&dev->pin_memory));
113 * Creates a new mm object and returns a handle to it.
116 i915_gem_create_ioctl(struct drm_device *dev, void *data,
117 struct drm_file *file_priv)
119 struct drm_i915_gem_create *args = data;
120 struct drm_gem_object *obj;
123 args->size = roundup(args->size, PAGE_SIZE);
125 /* Allocate the new object */
126 obj = drm_gem_object_alloc(dev, args->size);
130 ret = drm_gem_handle_create(file_priv, obj, &handle);
131 mutex_lock(&dev->struct_mutex);
132 drm_gem_object_handle_unreference(obj);
133 mutex_unlock(&dev->struct_mutex);
138 args->handle = handle;
144 * Reads data from the object referenced by handle.
146 * On error, the contents of *data are undefined.
149 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
150 struct drm_file *file_priv)
152 struct drm_i915_gem_pread *args = data;
153 struct drm_gem_object *obj;
154 struct drm_i915_gem_object *obj_priv;
159 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
162 obj_priv = obj->driver_private;
164 /* Bounds check source.
166 * XXX: This could use review for overflow issues...
168 if (args->offset > obj->size || args->size > obj->size ||
169 args->offset + args->size > obj->size) {
170 drm_gem_object_unreference(obj);
174 mutex_lock(&dev->struct_mutex);
176 ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset,
179 drm_gem_object_unreference(obj);
180 mutex_unlock(&dev->struct_mutex);
184 offset = args->offset;
186 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
187 args->size, &offset);
188 if (read != args->size) {
189 drm_gem_object_unreference(obj);
190 mutex_unlock(&dev->struct_mutex);
197 drm_gem_object_unreference(obj);
198 mutex_unlock(&dev->struct_mutex);
203 /* This is the fast write path which cannot handle
204 * page faults in the source data
208 fast_user_write(struct io_mapping *mapping,
209 loff_t page_base, int page_offset,
210 char __user *user_data,
214 unsigned long unwritten;
216 vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
217 unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + page_offset,
219 io_mapping_unmap_atomic(vaddr_atomic);
225 /* Here's the write path which can sleep for
230 slow_user_write(struct io_mapping *mapping,
231 loff_t page_base, int page_offset,
232 char __user *user_data,
236 unsigned long unwritten;
238 vaddr = io_mapping_map_wc(mapping, page_base);
241 unwritten = __copy_from_user(vaddr + page_offset,
243 io_mapping_unmap(vaddr);
250 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
251 struct drm_i915_gem_pwrite *args,
252 struct drm_file *file_priv)
254 struct drm_i915_gem_object *obj_priv = obj->driver_private;
255 drm_i915_private_t *dev_priv = dev->dev_private;
257 loff_t offset, page_base;
258 char __user *user_data;
259 int page_offset, page_length;
262 user_data = (char __user *) (uintptr_t) args->data_ptr;
264 if (!access_ok(VERIFY_READ, user_data, remain))
268 mutex_lock(&dev->struct_mutex);
269 ret = i915_gem_object_pin(obj, 0);
271 mutex_unlock(&dev->struct_mutex);
274 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
278 obj_priv = obj->driver_private;
279 offset = obj_priv->gtt_offset + args->offset;
283 /* Operation in this page
285 * page_base = page offset within aperture
286 * page_offset = offset within page
287 * page_length = bytes to copy for this page
289 page_base = (offset & ~(PAGE_SIZE-1));
290 page_offset = offset & (PAGE_SIZE-1);
291 page_length = remain;
292 if ((page_offset + remain) > PAGE_SIZE)
293 page_length = PAGE_SIZE - page_offset;
295 ret = fast_user_write (dev_priv->mm.gtt_mapping, page_base,
296 page_offset, user_data, page_length);
298 /* If we get a fault while copying data, then (presumably) our
299 * source page isn't available. In this case, use the
300 * non-atomic function
303 ret = slow_user_write (dev_priv->mm.gtt_mapping,
304 page_base, page_offset,
305 user_data, page_length);
310 remain -= page_length;
311 user_data += page_length;
312 offset += page_length;
316 i915_gem_object_unpin(obj);
317 mutex_unlock(&dev->struct_mutex);
323 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
324 struct drm_i915_gem_pwrite *args,
325 struct drm_file *file_priv)
331 mutex_lock(&dev->struct_mutex);
333 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
335 mutex_unlock(&dev->struct_mutex);
339 offset = args->offset;
341 written = vfs_write(obj->filp,
342 (char __user *)(uintptr_t) args->data_ptr,
343 args->size, &offset);
344 if (written != args->size) {
345 mutex_unlock(&dev->struct_mutex);
352 mutex_unlock(&dev->struct_mutex);
358 * Writes data to the object referenced by handle.
360 * On error, the contents of the buffer that were to be modified are undefined.
363 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
364 struct drm_file *file_priv)
366 struct drm_i915_gem_pwrite *args = data;
367 struct drm_gem_object *obj;
368 struct drm_i915_gem_object *obj_priv;
371 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
374 obj_priv = obj->driver_private;
376 /* Bounds check destination.
378 * XXX: This could use review for overflow issues...
380 if (args->offset > obj->size || args->size > obj->size ||
381 args->offset + args->size > obj->size) {
382 drm_gem_object_unreference(obj);
386 /* We can only do the GTT pwrite on untiled buffers, as otherwise
387 * it would end up going through the fenced access, and we'll get
388 * different detiling behavior between reading and writing.
389 * pread/pwrite currently are reading and writing from the CPU
390 * perspective, requiring manual detiling by the client.
392 if (obj_priv->phys_obj)
393 ret = i915_gem_phys_pwrite(dev, obj, args, file_priv);
394 else if (obj_priv->tiling_mode == I915_TILING_NONE &&
396 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
398 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
402 DRM_INFO("pwrite failed %d\n", ret);
405 drm_gem_object_unreference(obj);
411 * Called when user space prepares to use an object with the CPU, either
412 * through the mmap ioctl's mapping or a GTT mapping.
415 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
416 struct drm_file *file_priv)
418 struct drm_i915_gem_set_domain *args = data;
419 struct drm_gem_object *obj;
420 uint32_t read_domains = args->read_domains;
421 uint32_t write_domain = args->write_domain;
424 if (!(dev->driver->driver_features & DRIVER_GEM))
427 /* Only handle setting domains to types used by the CPU. */
428 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
431 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
434 /* Having something in the write domain implies it's in the read
435 * domain, and only that read domain. Enforce that in the request.
437 if (write_domain != 0 && read_domains != write_domain)
440 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
444 mutex_lock(&dev->struct_mutex);
446 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
447 obj, obj->size, read_domains, write_domain);
449 if (read_domains & I915_GEM_DOMAIN_GTT) {
450 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
452 /* Silently promote "you're not bound, there was nothing to do"
453 * to success, since the client was just asking us to
454 * make sure everything was done.
459 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
462 drm_gem_object_unreference(obj);
463 mutex_unlock(&dev->struct_mutex);
468 * Called when user space has done writes to this buffer
471 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
472 struct drm_file *file_priv)
474 struct drm_i915_gem_sw_finish *args = data;
475 struct drm_gem_object *obj;
476 struct drm_i915_gem_object *obj_priv;
479 if (!(dev->driver->driver_features & DRIVER_GEM))
482 mutex_lock(&dev->struct_mutex);
483 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
485 mutex_unlock(&dev->struct_mutex);
490 DRM_INFO("%s: sw_finish %d (%p %d)\n",
491 __func__, args->handle, obj, obj->size);
493 obj_priv = obj->driver_private;
495 /* Pinned buffers may be scanout, so flush the cache */
496 if (obj_priv->pin_count)
497 i915_gem_object_flush_cpu_write_domain(obj);
499 drm_gem_object_unreference(obj);
500 mutex_unlock(&dev->struct_mutex);
505 * Maps the contents of an object, returning the address it is mapped
508 * While the mapping holds a reference on the contents of the object, it doesn't
509 * imply a ref on the object itself.
512 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
513 struct drm_file *file_priv)
515 struct drm_i915_gem_mmap *args = data;
516 struct drm_gem_object *obj;
520 if (!(dev->driver->driver_features & DRIVER_GEM))
523 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
527 offset = args->offset;
529 down_write(¤t->mm->mmap_sem);
530 addr = do_mmap(obj->filp, 0, args->size,
531 PROT_READ | PROT_WRITE, MAP_SHARED,
533 up_write(¤t->mm->mmap_sem);
534 mutex_lock(&dev->struct_mutex);
535 drm_gem_object_unreference(obj);
536 mutex_unlock(&dev->struct_mutex);
537 if (IS_ERR((void *)addr))
540 args->addr_ptr = (uint64_t) addr;
546 * i915_gem_fault - fault a page into the GTT
547 * vma: VMA in question
550 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
551 * from userspace. The fault handler takes care of binding the object to
552 * the GTT (if needed), allocating and programming a fence register (again,
553 * only if needed based on whether the old reg is still valid or the object
554 * is tiled) and inserting a new PTE into the faulting process.
556 * Note that the faulting process may involve evicting existing objects
557 * from the GTT and/or fence registers to make room. So performance may
558 * suffer if the GTT working set is large or there are few fence registers
561 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
563 struct drm_gem_object *obj = vma->vm_private_data;
564 struct drm_device *dev = obj->dev;
565 struct drm_i915_private *dev_priv = dev->dev_private;
566 struct drm_i915_gem_object *obj_priv = obj->driver_private;
570 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
572 /* We don't use vmf->pgoff since that has the fake offset */
573 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
576 /* Now bind it into the GTT if needed */
577 mutex_lock(&dev->struct_mutex);
578 if (!obj_priv->gtt_space) {
579 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
581 mutex_unlock(&dev->struct_mutex);
582 return VM_FAULT_SIGBUS;
584 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
587 /* Need a new fence register? */
588 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
589 obj_priv->tiling_mode != I915_TILING_NONE) {
590 ret = i915_gem_object_get_fence_reg(obj, write);
592 return VM_FAULT_SIGBUS;
595 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
598 /* Finally, remap it using the new GTT offset */
599 ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
601 mutex_unlock(&dev->struct_mutex);
609 DRM_ERROR("can't insert pfn?? fault or busy...\n");
610 return VM_FAULT_SIGBUS;
612 return VM_FAULT_NOPAGE;
617 * i915_gem_create_mmap_offset - create a fake mmap offset for an object
618 * @obj: obj in question
620 * GEM memory mapping works by handing back to userspace a fake mmap offset
621 * it can use in a subsequent mmap(2) call. The DRM core code then looks
622 * up the object based on the offset and sets up the various memory mapping
625 * This routine allocates and attaches a fake offset for @obj.
628 i915_gem_create_mmap_offset(struct drm_gem_object *obj)
630 struct drm_device *dev = obj->dev;
631 struct drm_gem_mm *mm = dev->mm_private;
632 struct drm_i915_gem_object *obj_priv = obj->driver_private;
633 struct drm_map_list *list;
637 /* Set the object up for mmap'ing */
638 list = &obj->map_list;
639 list->map = drm_calloc(1, sizeof(struct drm_map_list),
645 map->type = _DRM_GEM;
646 map->size = obj->size;
649 /* Get a DRM GEM mmap offset allocated... */
650 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
651 obj->size / PAGE_SIZE, 0, 0);
652 if (!list->file_offset_node) {
653 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
658 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
659 obj->size / PAGE_SIZE, 0);
660 if (!list->file_offset_node) {
665 list->hash.key = list->file_offset_node->start;
666 if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
667 DRM_ERROR("failed to add to map hash\n");
671 /* By now we should be all set, any drm_mmap request on the offset
672 * below will get to our mmap & fault handler */
673 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
678 drm_mm_put_block(list->file_offset_node);
680 drm_free(list->map, sizeof(struct drm_map_list), DRM_MEM_DRIVER);
686 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
687 * @obj: object to check
689 * Return the required GTT alignment for an object, taking into account
690 * potential fence register mapping if needed.
693 i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
695 struct drm_device *dev = obj->dev;
696 struct drm_i915_gem_object *obj_priv = obj->driver_private;
700 * Minimum alignment is 4k (GTT page size), but might be greater
701 * if a fence register is needed for the object.
703 if (IS_I965G(dev) || obj_priv->tiling_mode == I915_TILING_NONE)
707 * Previous chips need to be aligned to the size of the smallest
708 * fence register that can contain the object.
715 for (i = start; i < obj->size; i <<= 1)
722 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
724 * @data: GTT mapping ioctl data
725 * @file_priv: GEM object info
727 * Simply returns the fake offset to userspace so it can mmap it.
728 * The mmap call will end up in drm_gem_mmap(), which will set things
729 * up so we can get faults in the handler above.
731 * The fault handler will take care of binding the object into the GTT
732 * (since it may have been evicted to make room for something), allocating
733 * a fence register, and mapping the appropriate aperture address into
737 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
738 struct drm_file *file_priv)
740 struct drm_i915_gem_mmap_gtt *args = data;
741 struct drm_i915_private *dev_priv = dev->dev_private;
742 struct drm_gem_object *obj;
743 struct drm_i915_gem_object *obj_priv;
746 if (!(dev->driver->driver_features & DRIVER_GEM))
749 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
753 mutex_lock(&dev->struct_mutex);
755 obj_priv = obj->driver_private;
757 if (!obj_priv->mmap_offset) {
758 ret = i915_gem_create_mmap_offset(obj);
763 args->offset = obj_priv->mmap_offset;
765 obj_priv->gtt_alignment = i915_gem_get_gtt_alignment(obj);
767 /* Make sure the alignment is correct for fence regs etc */
768 if (obj_priv->agp_mem &&
769 (obj_priv->gtt_offset & (obj_priv->gtt_alignment - 1))) {
770 drm_gem_object_unreference(obj);
771 mutex_unlock(&dev->struct_mutex);
776 * Pull it into the GTT so that we have a page list (makes the
777 * initial fault faster and any subsequent flushing possible).
779 if (!obj_priv->agp_mem) {
780 ret = i915_gem_object_bind_to_gtt(obj, obj_priv->gtt_alignment);
782 drm_gem_object_unreference(obj);
783 mutex_unlock(&dev->struct_mutex);
786 list_add(&obj_priv->list, &dev_priv->mm.inactive_list);
789 drm_gem_object_unreference(obj);
790 mutex_unlock(&dev->struct_mutex);
796 i915_gem_object_free_page_list(struct drm_gem_object *obj)
798 struct drm_i915_gem_object *obj_priv = obj->driver_private;
799 int page_count = obj->size / PAGE_SIZE;
802 if (obj_priv->page_list == NULL)
806 for (i = 0; i < page_count; i++)
807 if (obj_priv->page_list[i] != NULL) {
809 set_page_dirty(obj_priv->page_list[i]);
810 mark_page_accessed(obj_priv->page_list[i]);
811 page_cache_release(obj_priv->page_list[i]);
815 drm_free(obj_priv->page_list,
816 page_count * sizeof(struct page *),
818 obj_priv->page_list = NULL;
822 i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
824 struct drm_device *dev = obj->dev;
825 drm_i915_private_t *dev_priv = dev->dev_private;
826 struct drm_i915_gem_object *obj_priv = obj->driver_private;
828 /* Add a reference if we're newly entering the active list. */
829 if (!obj_priv->active) {
830 drm_gem_object_reference(obj);
831 obj_priv->active = 1;
833 /* Move from whatever list we were on to the tail of execution. */
834 list_move_tail(&obj_priv->list,
835 &dev_priv->mm.active_list);
836 obj_priv->last_rendering_seqno = seqno;
840 i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
842 struct drm_device *dev = obj->dev;
843 drm_i915_private_t *dev_priv = dev->dev_private;
844 struct drm_i915_gem_object *obj_priv = obj->driver_private;
846 BUG_ON(!obj_priv->active);
847 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list);
848 obj_priv->last_rendering_seqno = 0;
852 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
854 struct drm_device *dev = obj->dev;
855 drm_i915_private_t *dev_priv = dev->dev_private;
856 struct drm_i915_gem_object *obj_priv = obj->driver_private;
858 i915_verify_inactive(dev, __FILE__, __LINE__);
859 if (obj_priv->pin_count != 0)
860 list_del_init(&obj_priv->list);
862 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
864 obj_priv->last_rendering_seqno = 0;
865 if (obj_priv->active) {
866 obj_priv->active = 0;
867 drm_gem_object_unreference(obj);
869 i915_verify_inactive(dev, __FILE__, __LINE__);
873 * Creates a new sequence number, emitting a write of it to the status page
874 * plus an interrupt, which will trigger i915_user_interrupt_handler.
876 * Must be called with struct_lock held.
878 * Returned sequence numbers are nonzero on success.
881 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
883 drm_i915_private_t *dev_priv = dev->dev_private;
884 struct drm_i915_gem_request *request;
889 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
893 /* Grab the seqno we're going to make this request be, and bump the
894 * next (skipping 0 so it can be the reserved no-seqno value).
896 seqno = dev_priv->mm.next_gem_seqno;
897 dev_priv->mm.next_gem_seqno++;
898 if (dev_priv->mm.next_gem_seqno == 0)
899 dev_priv->mm.next_gem_seqno++;
902 OUT_RING(MI_STORE_DWORD_INDEX);
903 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
906 OUT_RING(MI_USER_INTERRUPT);
909 DRM_DEBUG("%d\n", seqno);
911 request->seqno = seqno;
912 request->emitted_jiffies = jiffies;
913 was_empty = list_empty(&dev_priv->mm.request_list);
914 list_add_tail(&request->list, &dev_priv->mm.request_list);
916 /* Associate any objects on the flushing list matching the write
917 * domain we're flushing with our flush.
919 if (flush_domains != 0) {
920 struct drm_i915_gem_object *obj_priv, *next;
922 list_for_each_entry_safe(obj_priv, next,
923 &dev_priv->mm.flushing_list, list) {
924 struct drm_gem_object *obj = obj_priv->obj;
926 if ((obj->write_domain & flush_domains) ==
928 obj->write_domain = 0;
929 i915_gem_object_move_to_active(obj, seqno);
935 if (was_empty && !dev_priv->mm.suspended)
936 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
941 * Command execution barrier
943 * Ensures that all commands in the ring are finished
944 * before signalling the CPU
947 i915_retire_commands(struct drm_device *dev)
949 drm_i915_private_t *dev_priv = dev->dev_private;
950 uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
951 uint32_t flush_domains = 0;
954 /* The sampler always gets flushed on i965 (sigh) */
956 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
959 OUT_RING(0); /* noop */
961 return flush_domains;
965 * Moves buffers associated only with the given active seqno from the active
966 * to inactive list, potentially freeing them.
969 i915_gem_retire_request(struct drm_device *dev,
970 struct drm_i915_gem_request *request)
972 drm_i915_private_t *dev_priv = dev->dev_private;
974 /* Move any buffers on the active list that are no longer referenced
975 * by the ringbuffer to the flushing/inactive lists as appropriate.
977 while (!list_empty(&dev_priv->mm.active_list)) {
978 struct drm_gem_object *obj;
979 struct drm_i915_gem_object *obj_priv;
981 obj_priv = list_first_entry(&dev_priv->mm.active_list,
982 struct drm_i915_gem_object,
986 /* If the seqno being retired doesn't match the oldest in the
987 * list, then the oldest in the list must still be newer than
990 if (obj_priv->last_rendering_seqno != request->seqno)
994 DRM_INFO("%s: retire %d moves to inactive list %p\n",
995 __func__, request->seqno, obj);
998 if (obj->write_domain != 0)
999 i915_gem_object_move_to_flushing(obj);
1001 i915_gem_object_move_to_inactive(obj);
1006 * Returns true if seq1 is later than seq2.
1009 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1011 return (int32_t)(seq1 - seq2) >= 0;
1015 i915_get_gem_seqno(struct drm_device *dev)
1017 drm_i915_private_t *dev_priv = dev->dev_private;
1019 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
1023 * This function clears the request list as sequence numbers are passed.
1026 i915_gem_retire_requests(struct drm_device *dev)
1028 drm_i915_private_t *dev_priv = dev->dev_private;
1031 seqno = i915_get_gem_seqno(dev);
1033 while (!list_empty(&dev_priv->mm.request_list)) {
1034 struct drm_i915_gem_request *request;
1035 uint32_t retiring_seqno;
1037 request = list_first_entry(&dev_priv->mm.request_list,
1038 struct drm_i915_gem_request,
1040 retiring_seqno = request->seqno;
1042 if (i915_seqno_passed(seqno, retiring_seqno) ||
1043 dev_priv->mm.wedged) {
1044 i915_gem_retire_request(dev, request);
1046 list_del(&request->list);
1047 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
1054 i915_gem_retire_work_handler(struct work_struct *work)
1056 drm_i915_private_t *dev_priv;
1057 struct drm_device *dev;
1059 dev_priv = container_of(work, drm_i915_private_t,
1060 mm.retire_work.work);
1061 dev = dev_priv->dev;
1063 mutex_lock(&dev->struct_mutex);
1064 i915_gem_retire_requests(dev);
1065 if (!dev_priv->mm.suspended &&
1066 !list_empty(&dev_priv->mm.request_list))
1067 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
1068 mutex_unlock(&dev->struct_mutex);
1072 * Waits for a sequence number to be signaled, and cleans up the
1073 * request and object lists appropriately for that event.
1076 i915_wait_request(struct drm_device *dev, uint32_t seqno)
1078 drm_i915_private_t *dev_priv = dev->dev_private;
1083 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1084 dev_priv->mm.waiting_gem_seqno = seqno;
1085 i915_user_irq_get(dev);
1086 ret = wait_event_interruptible(dev_priv->irq_queue,
1087 i915_seqno_passed(i915_get_gem_seqno(dev),
1089 dev_priv->mm.wedged);
1090 i915_user_irq_put(dev);
1091 dev_priv->mm.waiting_gem_seqno = 0;
1093 if (dev_priv->mm.wedged)
1096 if (ret && ret != -ERESTARTSYS)
1097 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
1098 __func__, ret, seqno, i915_get_gem_seqno(dev));
1100 /* Directly dispatch request retiring. While we have the work queue
1101 * to handle this, the waiter on a request often wants an associated
1102 * buffer to have made it to the inactive list, and we would need
1103 * a separate wait queue to handle that.
1106 i915_gem_retire_requests(dev);
1112 i915_gem_flush(struct drm_device *dev,
1113 uint32_t invalidate_domains,
1114 uint32_t flush_domains)
1116 drm_i915_private_t *dev_priv = dev->dev_private;
1121 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
1122 invalidate_domains, flush_domains);
1125 if (flush_domains & I915_GEM_DOMAIN_CPU)
1126 drm_agp_chipset_flush(dev);
1128 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
1129 I915_GEM_DOMAIN_GTT)) {
1131 * read/write caches:
1133 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
1134 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
1135 * also flushed at 2d versus 3d pipeline switches.
1139 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
1140 * MI_READ_FLUSH is set, and is always flushed on 965.
1142 * I915_GEM_DOMAIN_COMMAND may not exist?
1144 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
1145 * invalidated when MI_EXE_FLUSH is set.
1147 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
1148 * invalidated with every MI_FLUSH.
1152 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
1153 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
1154 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
1155 * are flushed at any MI_FLUSH.
1158 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
1159 if ((invalidate_domains|flush_domains) &
1160 I915_GEM_DOMAIN_RENDER)
1161 cmd &= ~MI_NO_WRITE_FLUSH;
1162 if (!IS_I965G(dev)) {
1164 * On the 965, the sampler cache always gets flushed
1165 * and this bit is reserved.
1167 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
1168 cmd |= MI_READ_FLUSH;
1170 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
1171 cmd |= MI_EXE_FLUSH;
1174 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
1178 OUT_RING(0); /* noop */
1184 * Ensures that all rendering to the object has completed and the object is
1185 * safe to unbind from the GTT or access from the CPU.
1188 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1190 struct drm_device *dev = obj->dev;
1191 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1194 /* This function only exists to support waiting for existing rendering,
1195 * not for emitting required flushes.
1197 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
1199 /* If there is rendering queued on the buffer being evicted, wait for
1202 if (obj_priv->active) {
1204 DRM_INFO("%s: object %p wait for seqno %08x\n",
1205 __func__, obj, obj_priv->last_rendering_seqno);
1207 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
1216 * Unbinds an object from the GTT aperture.
1219 i915_gem_object_unbind(struct drm_gem_object *obj)
1221 struct drm_device *dev = obj->dev;
1222 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1227 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
1228 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
1230 if (obj_priv->gtt_space == NULL)
1233 if (obj_priv->pin_count != 0) {
1234 DRM_ERROR("Attempting to unbind pinned buffer\n");
1238 /* Move the object to the CPU domain to ensure that
1239 * any possible CPU writes while it's not in the GTT
1240 * are flushed when we go to remap it. This will
1241 * also ensure that all pending GPU writes are finished
1244 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1246 if (ret != -ERESTARTSYS)
1247 DRM_ERROR("set_domain failed: %d\n", ret);
1251 if (obj_priv->agp_mem != NULL) {
1252 drm_unbind_agp(obj_priv->agp_mem);
1253 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
1254 obj_priv->agp_mem = NULL;
1257 BUG_ON(obj_priv->active);
1259 /* blow away mappings if mapped through GTT */
1260 offset = ((loff_t) obj->map_list.hash.key) << PAGE_SHIFT;
1261 if (dev->dev_mapping)
1262 unmap_mapping_range(dev->dev_mapping, offset, obj->size, 1);
1264 if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
1265 i915_gem_clear_fence_reg(obj);
1267 i915_gem_object_free_page_list(obj);
1269 if (obj_priv->gtt_space) {
1270 atomic_dec(&dev->gtt_count);
1271 atomic_sub(obj->size, &dev->gtt_memory);
1273 drm_mm_put_block(obj_priv->gtt_space);
1274 obj_priv->gtt_space = NULL;
1277 /* Remove ourselves from the LRU list if present. */
1278 if (!list_empty(&obj_priv->list))
1279 list_del_init(&obj_priv->list);
1285 i915_gem_evict_something(struct drm_device *dev)
1287 drm_i915_private_t *dev_priv = dev->dev_private;
1288 struct drm_gem_object *obj;
1289 struct drm_i915_gem_object *obj_priv;
1293 /* If there's an inactive buffer available now, grab it
1296 if (!list_empty(&dev_priv->mm.inactive_list)) {
1297 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
1298 struct drm_i915_gem_object,
1300 obj = obj_priv->obj;
1301 BUG_ON(obj_priv->pin_count != 0);
1303 DRM_INFO("%s: evicting %p\n", __func__, obj);
1305 BUG_ON(obj_priv->active);
1307 /* Wait on the rendering and unbind the buffer. */
1308 ret = i915_gem_object_unbind(obj);
1312 /* If we didn't get anything, but the ring is still processing
1313 * things, wait for one of those things to finish and hopefully
1314 * leave us a buffer to evict.
1316 if (!list_empty(&dev_priv->mm.request_list)) {
1317 struct drm_i915_gem_request *request;
1319 request = list_first_entry(&dev_priv->mm.request_list,
1320 struct drm_i915_gem_request,
1323 ret = i915_wait_request(dev, request->seqno);
1327 /* if waiting caused an object to become inactive,
1328 * then loop around and wait for it. Otherwise, we
1329 * assume that waiting freed and unbound something,
1330 * so there should now be some space in the GTT
1332 if (!list_empty(&dev_priv->mm.inactive_list))
1337 /* If we didn't have anything on the request list but there
1338 * are buffers awaiting a flush, emit one and try again.
1339 * When we wait on it, those buffers waiting for that flush
1340 * will get moved to inactive.
1342 if (!list_empty(&dev_priv->mm.flushing_list)) {
1343 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1344 struct drm_i915_gem_object,
1346 obj = obj_priv->obj;
1351 i915_add_request(dev, obj->write_domain);
1357 DRM_ERROR("inactive empty %d request empty %d "
1358 "flushing empty %d\n",
1359 list_empty(&dev_priv->mm.inactive_list),
1360 list_empty(&dev_priv->mm.request_list),
1361 list_empty(&dev_priv->mm.flushing_list));
1362 /* If we didn't do any of the above, there's nothing to be done
1363 * and we just can't fit it in.
1371 i915_gem_evict_everything(struct drm_device *dev)
1376 ret = i915_gem_evict_something(dev);
1386 i915_gem_object_get_page_list(struct drm_gem_object *obj)
1388 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1390 struct address_space *mapping;
1391 struct inode *inode;
1395 if (obj_priv->page_list)
1398 /* Get the list of pages out of our struct file. They'll be pinned
1399 * at this point until we release them.
1401 page_count = obj->size / PAGE_SIZE;
1402 BUG_ON(obj_priv->page_list != NULL);
1403 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1405 if (obj_priv->page_list == NULL) {
1406 DRM_ERROR("Faled to allocate page list\n");
1410 inode = obj->filp->f_path.dentry->d_inode;
1411 mapping = inode->i_mapping;
1412 for (i = 0; i < page_count; i++) {
1413 page = read_mapping_page(mapping, i, NULL);
1415 ret = PTR_ERR(page);
1416 DRM_ERROR("read_mapping_page failed: %d\n", ret);
1417 i915_gem_object_free_page_list(obj);
1420 obj_priv->page_list[i] = page;
1425 static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
1427 struct drm_gem_object *obj = reg->obj;
1428 struct drm_device *dev = obj->dev;
1429 drm_i915_private_t *dev_priv = dev->dev_private;
1430 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1431 int regnum = obj_priv->fence_reg;
1434 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
1436 val |= obj_priv->gtt_offset & 0xfffff000;
1437 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
1438 if (obj_priv->tiling_mode == I915_TILING_Y)
1439 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
1440 val |= I965_FENCE_REG_VALID;
1442 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
1445 static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1447 struct drm_gem_object *obj = reg->obj;
1448 struct drm_device *dev = obj->dev;
1449 drm_i915_private_t *dev_priv = dev->dev_private;
1450 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1451 int regnum = obj_priv->fence_reg;
1456 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1457 (obj_priv->gtt_offset & (obj->size - 1))) {
1458 WARN(1, "%s: object 0x%08x not 1M or size (0x%x) aligned\n",
1459 __func__, obj_priv->gtt_offset, obj->size);
1463 if (obj_priv->tiling_mode == I915_TILING_Y &&
1464 HAS_128_BYTE_Y_TILING(dev))
1469 /* Note: pitch better be a power of two tile widths */
1470 pitch_val = obj_priv->stride / tile_width;
1471 pitch_val = ffs(pitch_val) - 1;
1473 val = obj_priv->gtt_offset;
1474 if (obj_priv->tiling_mode == I915_TILING_Y)
1475 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1476 val |= I915_FENCE_SIZE_BITS(obj->size);
1477 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1478 val |= I830_FENCE_REG_VALID;
1480 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
1483 static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1485 struct drm_gem_object *obj = reg->obj;
1486 struct drm_device *dev = obj->dev;
1487 drm_i915_private_t *dev_priv = dev->dev_private;
1488 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1489 int regnum = obj_priv->fence_reg;
1493 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1494 (obj_priv->gtt_offset & (obj->size - 1))) {
1495 WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
1496 __func__, obj_priv->gtt_offset);
1500 pitch_val = (obj_priv->stride / 128) - 1;
1502 val = obj_priv->gtt_offset;
1503 if (obj_priv->tiling_mode == I915_TILING_Y)
1504 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
1505 val |= I830_FENCE_SIZE_BITS(obj->size);
1506 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
1507 val |= I830_FENCE_REG_VALID;
1509 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
1514 * i915_gem_object_get_fence_reg - set up a fence reg for an object
1515 * @obj: object to map through a fence reg
1516 * @write: object is about to be written
1518 * When mapping objects through the GTT, userspace wants to be able to write
1519 * to them without having to worry about swizzling if the object is tiled.
1521 * This function walks the fence regs looking for a free one for @obj,
1522 * stealing one if it can't find any.
1524 * It then sets up the reg based on the object's properties: address, pitch
1525 * and tiling format.
1528 i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
1530 struct drm_device *dev = obj->dev;
1531 struct drm_i915_private *dev_priv = dev->dev_private;
1532 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1533 struct drm_i915_fence_reg *reg = NULL;
1536 switch (obj_priv->tiling_mode) {
1537 case I915_TILING_NONE:
1538 WARN(1, "allocating a fence for non-tiled object?\n");
1541 if (!obj_priv->stride)
1543 WARN((obj_priv->stride & (512 - 1)),
1544 "object 0x%08x is X tiled but has non-512B pitch\n",
1545 obj_priv->gtt_offset);
1548 if (!obj_priv->stride)
1550 WARN((obj_priv->stride & (128 - 1)),
1551 "object 0x%08x is Y tiled but has non-128B pitch\n",
1552 obj_priv->gtt_offset);
1556 /* First try to find a free reg */
1557 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
1558 reg = &dev_priv->fence_regs[i];
1563 /* None available, try to steal one or wait for a user to finish */
1564 if (i == dev_priv->num_fence_regs) {
1565 struct drm_i915_gem_object *old_obj_priv = NULL;
1569 /* Could try to use LRU here instead... */
1570 for (i = dev_priv->fence_reg_start;
1571 i < dev_priv->num_fence_regs; i++) {
1572 reg = &dev_priv->fence_regs[i];
1573 old_obj_priv = reg->obj->driver_private;
1574 if (!old_obj_priv->pin_count)
1579 * Now things get ugly... we have to wait for one of the
1580 * objects to finish before trying again.
1582 if (i == dev_priv->num_fence_regs) {
1583 ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0);
1585 WARN(ret != -ERESTARTSYS,
1586 "switch to GTT domain failed: %d\n", ret);
1593 * Zap this virtual mapping so we can set up a fence again
1594 * for this object next time we need it.
1596 offset = ((loff_t) reg->obj->map_list.hash.key) << PAGE_SHIFT;
1597 if (dev->dev_mapping)
1598 unmap_mapping_range(dev->dev_mapping, offset,
1600 old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
1603 obj_priv->fence_reg = i;
1607 i965_write_fence_reg(reg);
1608 else if (IS_I9XX(dev))
1609 i915_write_fence_reg(reg);
1611 i830_write_fence_reg(reg);
1617 * i915_gem_clear_fence_reg - clear out fence register info
1618 * @obj: object to clear
1620 * Zeroes out the fence register itself and clears out the associated
1621 * data structures in dev_priv and obj_priv.
1624 i915_gem_clear_fence_reg(struct drm_gem_object *obj)
1626 struct drm_device *dev = obj->dev;
1627 drm_i915_private_t *dev_priv = dev->dev_private;
1628 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1631 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
1633 I915_WRITE(FENCE_REG_830_0 + (obj_priv->fence_reg * 4), 0);
1635 dev_priv->fence_regs[obj_priv->fence_reg].obj = NULL;
1636 obj_priv->fence_reg = I915_FENCE_REG_NONE;
1640 * Finds free space in the GTT aperture and binds the object there.
1643 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1645 struct drm_device *dev = obj->dev;
1646 drm_i915_private_t *dev_priv = dev->dev_private;
1647 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1648 struct drm_mm_node *free_space;
1649 int page_count, ret;
1651 if (dev_priv->mm.suspended)
1654 alignment = i915_gem_get_gtt_alignment(obj);
1655 if (alignment & (PAGE_SIZE - 1)) {
1656 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1661 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
1662 obj->size, alignment, 0);
1663 if (free_space != NULL) {
1664 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
1666 if (obj_priv->gtt_space != NULL) {
1667 obj_priv->gtt_space->private = obj;
1668 obj_priv->gtt_offset = obj_priv->gtt_space->start;
1671 if (obj_priv->gtt_space == NULL) {
1672 /* If the gtt is empty and we're still having trouble
1673 * fitting our object in, we're out of memory.
1676 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1678 if (list_empty(&dev_priv->mm.inactive_list) &&
1679 list_empty(&dev_priv->mm.flushing_list) &&
1680 list_empty(&dev_priv->mm.active_list)) {
1681 DRM_ERROR("GTT full, but LRU list empty\n");
1685 ret = i915_gem_evict_something(dev);
1687 if (ret != -ERESTARTSYS)
1688 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1695 DRM_INFO("Binding object of size %d at 0x%08x\n",
1696 obj->size, obj_priv->gtt_offset);
1698 ret = i915_gem_object_get_page_list(obj);
1700 drm_mm_put_block(obj_priv->gtt_space);
1701 obj_priv->gtt_space = NULL;
1705 page_count = obj->size / PAGE_SIZE;
1706 /* Create an AGP memory structure pointing at our pages, and bind it
1709 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1710 obj_priv->page_list,
1712 obj_priv->gtt_offset,
1713 obj_priv->agp_type);
1714 if (obj_priv->agp_mem == NULL) {
1715 i915_gem_object_free_page_list(obj);
1716 drm_mm_put_block(obj_priv->gtt_space);
1717 obj_priv->gtt_space = NULL;
1720 atomic_inc(&dev->gtt_count);
1721 atomic_add(obj->size, &dev->gtt_memory);
1723 /* Assert that the object is not currently in any GPU domain. As it
1724 * wasn't in the GTT, there shouldn't be any way it could have been in
1727 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1728 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1734 i915_gem_clflush_object(struct drm_gem_object *obj)
1736 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1738 /* If we don't have a page list set up, then we're not pinned
1739 * to GPU, and we can ignore the cache flush because it'll happen
1740 * again at bind time.
1742 if (obj_priv->page_list == NULL)
1745 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
1748 /** Flushes any GPU write domain for the object if it's dirty. */
1750 i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
1752 struct drm_device *dev = obj->dev;
1755 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
1758 /* Queue the GPU write cache flushing we need. */
1759 i915_gem_flush(dev, 0, obj->write_domain);
1760 seqno = i915_add_request(dev, obj->write_domain);
1761 obj->write_domain = 0;
1762 i915_gem_object_move_to_active(obj, seqno);
1765 /** Flushes the GTT write domain for the object if it's dirty. */
1767 i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
1769 if (obj->write_domain != I915_GEM_DOMAIN_GTT)
1772 /* No actual flushing is required for the GTT write domain. Writes
1773 * to it immediately go to main memory as far as we know, so there's
1774 * no chipset flush. It also doesn't land in render cache.
1776 obj->write_domain = 0;
1779 /** Flushes the CPU write domain for the object if it's dirty. */
1781 i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
1783 struct drm_device *dev = obj->dev;
1785 if (obj->write_domain != I915_GEM_DOMAIN_CPU)
1788 i915_gem_clflush_object(obj);
1789 drm_agp_chipset_flush(dev);
1790 obj->write_domain = 0;
1794 * Moves a single object to the GTT read, and possibly write domain.
1796 * This function returns when the move is complete, including waiting on
1800 i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
1802 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1805 /* Not valid to be called on unbound objects. */
1806 if (obj_priv->gtt_space == NULL)
1809 i915_gem_object_flush_gpu_write_domain(obj);
1810 /* Wait on any GPU rendering and flushing to occur. */
1811 ret = i915_gem_object_wait_rendering(obj);
1815 /* If we're writing through the GTT domain, then CPU and GPU caches
1816 * will need to be invalidated at next use.
1819 obj->read_domains &= I915_GEM_DOMAIN_GTT;
1821 i915_gem_object_flush_cpu_write_domain(obj);
1823 /* It should now be out of any other write domains, and we can update
1824 * the domain values for our changes.
1826 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
1827 obj->read_domains |= I915_GEM_DOMAIN_GTT;
1829 obj->write_domain = I915_GEM_DOMAIN_GTT;
1830 obj_priv->dirty = 1;
1837 * Moves a single object to the CPU read, and possibly write domain.
1839 * This function returns when the move is complete, including waiting on
1843 i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1845 struct drm_device *dev = obj->dev;
1848 i915_gem_object_flush_gpu_write_domain(obj);
1849 /* Wait on any GPU rendering and flushing to occur. */
1850 ret = i915_gem_object_wait_rendering(obj);
1854 i915_gem_object_flush_gtt_write_domain(obj);
1856 /* If we have a partially-valid cache of the object in the CPU,
1857 * finish invalidating it and free the per-page flags.
1859 i915_gem_object_set_to_full_cpu_read_domain(obj);
1861 /* Flush the CPU cache if it's still invalid. */
1862 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
1863 i915_gem_clflush_object(obj);
1864 drm_agp_chipset_flush(dev);
1866 obj->read_domains |= I915_GEM_DOMAIN_CPU;
1869 /* It should now be out of any other write domains, and we can update
1870 * the domain values for our changes.
1872 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
1874 /* If we're writing through the CPU, then the GPU read domains will
1875 * need to be invalidated at next use.
1878 obj->read_domains &= I915_GEM_DOMAIN_CPU;
1879 obj->write_domain = I915_GEM_DOMAIN_CPU;
1886 * Set the next domain for the specified object. This
1887 * may not actually perform the necessary flushing/invaliding though,
1888 * as that may want to be batched with other set_domain operations
1890 * This is (we hope) the only really tricky part of gem. The goal
1891 * is fairly simple -- track which caches hold bits of the object
1892 * and make sure they remain coherent. A few concrete examples may
1893 * help to explain how it works. For shorthand, we use the notation
1894 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1895 * a pair of read and write domain masks.
1897 * Case 1: the batch buffer
1903 * 5. Unmapped from GTT
1906 * Let's take these a step at a time
1909 * Pages allocated from the kernel may still have
1910 * cache contents, so we set them to (CPU, CPU) always.
1911 * 2. Written by CPU (using pwrite)
1912 * The pwrite function calls set_domain (CPU, CPU) and
1913 * this function does nothing (as nothing changes)
1915 * This function asserts that the object is not
1916 * currently in any GPU-based read or write domains
1918 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1919 * As write_domain is zero, this function adds in the
1920 * current read domains (CPU+COMMAND, 0).
1921 * flush_domains is set to CPU.
1922 * invalidate_domains is set to COMMAND
1923 * clflush is run to get data out of the CPU caches
1924 * then i915_dev_set_domain calls i915_gem_flush to
1925 * emit an MI_FLUSH and drm_agp_chipset_flush
1926 * 5. Unmapped from GTT
1927 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1928 * flush_domains and invalidate_domains end up both zero
1929 * so no flushing/invalidating happens
1933 * Case 2: The shared render buffer
1937 * 3. Read/written by GPU
1938 * 4. set_domain to (CPU,CPU)
1939 * 5. Read/written by CPU
1940 * 6. Read/written by GPU
1943 * Same as last example, (CPU, CPU)
1945 * Nothing changes (assertions find that it is not in the GPU)
1946 * 3. Read/written by GPU
1947 * execbuffer calls set_domain (RENDER, RENDER)
1948 * flush_domains gets CPU
1949 * invalidate_domains gets GPU
1951 * MI_FLUSH and drm_agp_chipset_flush
1952 * 4. set_domain (CPU, CPU)
1953 * flush_domains gets GPU
1954 * invalidate_domains gets CPU
1955 * wait_rendering (obj) to make sure all drawing is complete.
1956 * This will include an MI_FLUSH to get the data from GPU
1958 * clflush (obj) to invalidate the CPU cache
1959 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1960 * 5. Read/written by CPU
1961 * cache lines are loaded and dirtied
1962 * 6. Read written by GPU
1963 * Same as last GPU access
1965 * Case 3: The constant buffer
1970 * 4. Updated (written) by CPU again
1979 * flush_domains = CPU
1980 * invalidate_domains = RENDER
1983 * drm_agp_chipset_flush
1984 * 4. Updated (written) by CPU again
1986 * flush_domains = 0 (no previous write domain)
1987 * invalidate_domains = 0 (no new read domains)
1990 * flush_domains = CPU
1991 * invalidate_domains = RENDER
1994 * drm_agp_chipset_flush
1997 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
1998 uint32_t read_domains,
1999 uint32_t write_domain)
2001 struct drm_device *dev = obj->dev;
2002 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2003 uint32_t invalidate_domains = 0;
2004 uint32_t flush_domains = 0;
2006 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU);
2007 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU);
2010 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
2012 obj->read_domains, read_domains,
2013 obj->write_domain, write_domain);
2016 * If the object isn't moving to a new write domain,
2017 * let the object stay in multiple read domains
2019 if (write_domain == 0)
2020 read_domains |= obj->read_domains;
2022 obj_priv->dirty = 1;
2025 * Flush the current write domain if
2026 * the new read domains don't match. Invalidate
2027 * any read domains which differ from the old
2030 if (obj->write_domain && obj->write_domain != read_domains) {
2031 flush_domains |= obj->write_domain;
2032 invalidate_domains |= read_domains & ~obj->write_domain;
2035 * Invalidate any read caches which may have
2036 * stale data. That is, any new read domains.
2038 invalidate_domains |= read_domains & ~obj->read_domains;
2039 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2041 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
2042 __func__, flush_domains, invalidate_domains);
2044 i915_gem_clflush_object(obj);
2047 if ((write_domain | flush_domains) != 0)
2048 obj->write_domain = write_domain;
2049 obj->read_domains = read_domains;
2051 dev->invalidate_domains |= invalidate_domains;
2052 dev->flush_domains |= flush_domains;
2054 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
2056 obj->read_domains, obj->write_domain,
2057 dev->invalidate_domains, dev->flush_domains);
2062 * Moves the object from a partially CPU read to a full one.
2064 * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(),
2065 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
2068 i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
2070 struct drm_device *dev = obj->dev;
2071 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2073 if (!obj_priv->page_cpu_valid)
2076 /* If we're partially in the CPU read domain, finish moving it in.
2078 if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
2081 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
2082 if (obj_priv->page_cpu_valid[i])
2084 drm_clflush_pages(obj_priv->page_list + i, 1);
2086 drm_agp_chipset_flush(dev);
2089 /* Free the page_cpu_valid mappings which are now stale, whether
2090 * or not we've got I915_GEM_DOMAIN_CPU.
2092 drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
2094 obj_priv->page_cpu_valid = NULL;
2098 * Set the CPU read domain on a range of the object.
2100 * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's
2101 * not entirely valid. The page_cpu_valid member of the object flags which
2102 * pages have been flushed, and will be respected by
2103 * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping
2104 * of the whole object.
2106 * This function returns when the move is complete, including waiting on
2110 i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
2111 uint64_t offset, uint64_t size)
2113 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2116 if (offset == 0 && size == obj->size)
2117 return i915_gem_object_set_to_cpu_domain(obj, 0);
2119 i915_gem_object_flush_gpu_write_domain(obj);
2120 /* Wait on any GPU rendering and flushing to occur. */
2121 ret = i915_gem_object_wait_rendering(obj);
2124 i915_gem_object_flush_gtt_write_domain(obj);
2126 /* If we're already fully in the CPU read domain, we're done. */
2127 if (obj_priv->page_cpu_valid == NULL &&
2128 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
2131 /* Otherwise, create/clear the per-page CPU read domain flag if we're
2132 * newly adding I915_GEM_DOMAIN_CPU
2134 if (obj_priv->page_cpu_valid == NULL) {
2135 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
2137 if (obj_priv->page_cpu_valid == NULL)
2139 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
2140 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
2142 /* Flush the cache on any pages that are still invalid from the CPU's
2145 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
2147 if (obj_priv->page_cpu_valid[i])
2150 drm_clflush_pages(obj_priv->page_list + i, 1);
2152 obj_priv->page_cpu_valid[i] = 1;
2155 /* It should now be out of any other write domains, and we can update
2156 * the domain values for our changes.
2158 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2160 obj->read_domains |= I915_GEM_DOMAIN_CPU;
2166 * Pin an object to the GTT and evaluate the relocations landing in it.
2169 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2170 struct drm_file *file_priv,
2171 struct drm_i915_gem_exec_object *entry)
2173 struct drm_device *dev = obj->dev;
2174 drm_i915_private_t *dev_priv = dev->dev_private;
2175 struct drm_i915_gem_relocation_entry reloc;
2176 struct drm_i915_gem_relocation_entry __user *relocs;
2177 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2179 void __iomem *reloc_page;
2181 /* Choose the GTT offset for our buffer and put it there. */
2182 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
2186 entry->offset = obj_priv->gtt_offset;
2188 relocs = (struct drm_i915_gem_relocation_entry __user *)
2189 (uintptr_t) entry->relocs_ptr;
2190 /* Apply the relocations, using the GTT aperture to avoid cache
2191 * flushing requirements.
2193 for (i = 0; i < entry->relocation_count; i++) {
2194 struct drm_gem_object *target_obj;
2195 struct drm_i915_gem_object *target_obj_priv;
2196 uint32_t reloc_val, reloc_offset;
2197 uint32_t __iomem *reloc_entry;
2199 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
2201 i915_gem_object_unpin(obj);
2205 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
2206 reloc.target_handle);
2207 if (target_obj == NULL) {
2208 i915_gem_object_unpin(obj);
2211 target_obj_priv = target_obj->driver_private;
2213 /* The target buffer should have appeared before us in the
2214 * exec_object list, so it should have a GTT space bound by now.
2216 if (target_obj_priv->gtt_space == NULL) {
2217 DRM_ERROR("No GTT space found for object %d\n",
2218 reloc.target_handle);
2219 drm_gem_object_unreference(target_obj);
2220 i915_gem_object_unpin(obj);
2224 if (reloc.offset > obj->size - 4) {
2225 DRM_ERROR("Relocation beyond object bounds: "
2226 "obj %p target %d offset %d size %d.\n",
2227 obj, reloc.target_handle,
2228 (int) reloc.offset, (int) obj->size);
2229 drm_gem_object_unreference(target_obj);
2230 i915_gem_object_unpin(obj);
2233 if (reloc.offset & 3) {
2234 DRM_ERROR("Relocation not 4-byte aligned: "
2235 "obj %p target %d offset %d.\n",
2236 obj, reloc.target_handle,
2237 (int) reloc.offset);
2238 drm_gem_object_unreference(target_obj);
2239 i915_gem_object_unpin(obj);
2243 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
2244 reloc.read_domains & I915_GEM_DOMAIN_CPU) {
2245 DRM_ERROR("reloc with read/write CPU domains: "
2246 "obj %p target %d offset %d "
2247 "read %08x write %08x",
2248 obj, reloc.target_handle,
2251 reloc.write_domain);
2255 if (reloc.write_domain && target_obj->pending_write_domain &&
2256 reloc.write_domain != target_obj->pending_write_domain) {
2257 DRM_ERROR("Write domain conflict: "
2258 "obj %p target %d offset %d "
2259 "new %08x old %08x\n",
2260 obj, reloc.target_handle,
2263 target_obj->pending_write_domain);
2264 drm_gem_object_unreference(target_obj);
2265 i915_gem_object_unpin(obj);
2270 DRM_INFO("%s: obj %p offset %08x target %d "
2271 "read %08x write %08x gtt %08x "
2272 "presumed %08x delta %08x\n",
2276 (int) reloc.target_handle,
2277 (int) reloc.read_domains,
2278 (int) reloc.write_domain,
2279 (int) target_obj_priv->gtt_offset,
2280 (int) reloc.presumed_offset,
2284 target_obj->pending_read_domains |= reloc.read_domains;
2285 target_obj->pending_write_domain |= reloc.write_domain;
2287 /* If the relocation already has the right value in it, no
2288 * more work needs to be done.
2290 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
2291 drm_gem_object_unreference(target_obj);
2295 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
2297 drm_gem_object_unreference(target_obj);
2298 i915_gem_object_unpin(obj);
2302 /* Map the page containing the relocation we're going to
2305 reloc_offset = obj_priv->gtt_offset + reloc.offset;
2306 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
2309 reloc_entry = (uint32_t __iomem *)(reloc_page +
2310 (reloc_offset & (PAGE_SIZE - 1)));
2311 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
2314 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
2315 obj, (unsigned int) reloc.offset,
2316 readl(reloc_entry), reloc_val);
2318 writel(reloc_val, reloc_entry);
2319 io_mapping_unmap_atomic(reloc_page);
2321 /* Write the updated presumed offset for this entry back out
2324 reloc.presumed_offset = target_obj_priv->gtt_offset;
2325 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
2327 drm_gem_object_unreference(target_obj);
2328 i915_gem_object_unpin(obj);
2332 drm_gem_object_unreference(target_obj);
2337 i915_gem_dump_object(obj, 128, __func__, ~0);
2342 /** Dispatch a batchbuffer to the ring
2345 i915_dispatch_gem_execbuffer(struct drm_device *dev,
2346 struct drm_i915_gem_execbuffer *exec,
2347 uint64_t exec_offset)
2349 drm_i915_private_t *dev_priv = dev->dev_private;
2350 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
2351 (uintptr_t) exec->cliprects_ptr;
2352 int nbox = exec->num_cliprects;
2354 uint32_t exec_start, exec_len;
2357 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
2358 exec_len = (uint32_t) exec->batch_len;
2360 if ((exec_start | exec_len) & 0x7) {
2361 DRM_ERROR("alignment\n");
2368 count = nbox ? nbox : 1;
2370 for (i = 0; i < count; i++) {
2372 int ret = i915_emit_box(dev, boxes, i,
2373 exec->DR1, exec->DR4);
2378 if (IS_I830(dev) || IS_845G(dev)) {
2380 OUT_RING(MI_BATCH_BUFFER);
2381 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
2382 OUT_RING(exec_start + exec_len - 4);
2387 if (IS_I965G(dev)) {
2388 OUT_RING(MI_BATCH_BUFFER_START |
2390 MI_BATCH_NON_SECURE_I965);
2391 OUT_RING(exec_start);
2393 OUT_RING(MI_BATCH_BUFFER_START |
2395 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
2401 /* XXX breadcrumb */
2405 /* Throttle our rendering by waiting until the ring has completed our requests
2406 * emitted over 20 msec ago.
2408 * This should get us reasonable parallelism between CPU and GPU but also
2409 * relatively low latency when blocking on a particular request to finish.
2412 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
2414 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
2418 mutex_lock(&dev->struct_mutex);
2419 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
2420 i915_file_priv->mm.last_gem_throttle_seqno =
2421 i915_file_priv->mm.last_gem_seqno;
2423 ret = i915_wait_request(dev, seqno);
2424 mutex_unlock(&dev->struct_mutex);
2429 i915_gem_execbuffer(struct drm_device *dev, void *data,
2430 struct drm_file *file_priv)
2432 drm_i915_private_t *dev_priv = dev->dev_private;
2433 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
2434 struct drm_i915_gem_execbuffer *args = data;
2435 struct drm_i915_gem_exec_object *exec_list = NULL;
2436 struct drm_gem_object **object_list = NULL;
2437 struct drm_gem_object *batch_obj;
2438 int ret, i, pinned = 0;
2439 uint64_t exec_offset;
2440 uint32_t seqno, flush_domains;
2444 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
2445 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
2448 if (args->buffer_count < 1) {
2449 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
2452 /* Copy in the exec list from userland */
2453 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
2455 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
2457 if (exec_list == NULL || object_list == NULL) {
2458 DRM_ERROR("Failed to allocate exec or object list "
2460 args->buffer_count);
2464 ret = copy_from_user(exec_list,
2465 (struct drm_i915_relocation_entry __user *)
2466 (uintptr_t) args->buffers_ptr,
2467 sizeof(*exec_list) * args->buffer_count);
2469 DRM_ERROR("copy %d exec entries failed %d\n",
2470 args->buffer_count, ret);
2474 mutex_lock(&dev->struct_mutex);
2476 i915_verify_inactive(dev, __FILE__, __LINE__);
2478 if (dev_priv->mm.wedged) {
2479 DRM_ERROR("Execbuf while wedged\n");
2480 mutex_unlock(&dev->struct_mutex);
2484 if (dev_priv->mm.suspended) {
2485 DRM_ERROR("Execbuf while VT-switched.\n");
2486 mutex_unlock(&dev->struct_mutex);
2490 /* Look up object handles */
2491 for (i = 0; i < args->buffer_count; i++) {
2492 object_list[i] = drm_gem_object_lookup(dev, file_priv,
2493 exec_list[i].handle);
2494 if (object_list[i] == NULL) {
2495 DRM_ERROR("Invalid object handle %d at index %d\n",
2496 exec_list[i].handle, i);
2502 /* Pin and relocate */
2503 for (pin_tries = 0; ; pin_tries++) {
2505 for (i = 0; i < args->buffer_count; i++) {
2506 object_list[i]->pending_read_domains = 0;
2507 object_list[i]->pending_write_domain = 0;
2508 ret = i915_gem_object_pin_and_relocate(object_list[i],
2519 /* error other than GTT full, or we've already tried again */
2520 if (ret != -ENOMEM || pin_tries >= 1) {
2521 if (ret != -ERESTARTSYS)
2522 DRM_ERROR("Failed to pin buffers %d\n", ret);
2526 /* unpin all of our buffers */
2527 for (i = 0; i < pinned; i++)
2528 i915_gem_object_unpin(object_list[i]);
2531 /* evict everyone we can from the aperture */
2532 ret = i915_gem_evict_everything(dev);
2537 /* Set the pending read domains for the batch buffer to COMMAND */
2538 batch_obj = object_list[args->buffer_count-1];
2539 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
2540 batch_obj->pending_write_domain = 0;
2542 i915_verify_inactive(dev, __FILE__, __LINE__);
2544 /* Zero the global flush/invalidate flags. These
2545 * will be modified as new domains are computed
2548 dev->invalidate_domains = 0;
2549 dev->flush_domains = 0;
2551 for (i = 0; i < args->buffer_count; i++) {
2552 struct drm_gem_object *obj = object_list[i];
2554 /* Compute new gpu domains and update invalidate/flush */
2555 i915_gem_object_set_to_gpu_domain(obj,
2556 obj->pending_read_domains,
2557 obj->pending_write_domain);
2560 i915_verify_inactive(dev, __FILE__, __LINE__);
2562 if (dev->invalidate_domains | dev->flush_domains) {
2564 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
2566 dev->invalidate_domains,
2567 dev->flush_domains);
2570 dev->invalidate_domains,
2571 dev->flush_domains);
2572 if (dev->flush_domains)
2573 (void)i915_add_request(dev, dev->flush_domains);
2576 i915_verify_inactive(dev, __FILE__, __LINE__);
2579 for (i = 0; i < args->buffer_count; i++) {
2580 i915_gem_object_check_coherency(object_list[i],
2581 exec_list[i].handle);
2585 exec_offset = exec_list[args->buffer_count - 1].offset;
2588 i915_gem_dump_object(object_list[args->buffer_count - 1],
2594 /* Exec the batchbuffer */
2595 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
2597 DRM_ERROR("dispatch failed %d\n", ret);
2602 * Ensure that the commands in the batch buffer are
2603 * finished before the interrupt fires
2605 flush_domains = i915_retire_commands(dev);
2607 i915_verify_inactive(dev, __FILE__, __LINE__);
2610 * Get a seqno representing the execution of the current buffer,
2611 * which we can wait on. We would like to mitigate these interrupts,
2612 * likely by only creating seqnos occasionally (so that we have
2613 * *some* interrupts representing completion of buffers that we can
2614 * wait on when trying to clear up gtt space).
2616 seqno = i915_add_request(dev, flush_domains);
2618 i915_file_priv->mm.last_gem_seqno = seqno;
2619 for (i = 0; i < args->buffer_count; i++) {
2620 struct drm_gem_object *obj = object_list[i];
2622 i915_gem_object_move_to_active(obj, seqno);
2624 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
2628 i915_dump_lru(dev, __func__);
2631 i915_verify_inactive(dev, __FILE__, __LINE__);
2633 /* Copy the new buffer offsets back to the user's exec list. */
2634 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2635 (uintptr_t) args->buffers_ptr,
2637 sizeof(*exec_list) * args->buffer_count);
2639 DRM_ERROR("failed to copy %d exec entries "
2640 "back to user (%d)\n",
2641 args->buffer_count, ret);
2643 for (i = 0; i < pinned; i++)
2644 i915_gem_object_unpin(object_list[i]);
2646 for (i = 0; i < args->buffer_count; i++)
2647 drm_gem_object_unreference(object_list[i]);
2649 mutex_unlock(&dev->struct_mutex);
2652 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2654 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
2661 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
2663 struct drm_device *dev = obj->dev;
2664 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2667 i915_verify_inactive(dev, __FILE__, __LINE__);
2668 if (obj_priv->gtt_space == NULL) {
2669 ret = i915_gem_object_bind_to_gtt(obj, alignment);
2671 if (ret != -EBUSY && ret != -ERESTARTSYS)
2672 DRM_ERROR("Failure to bind: %d", ret);
2676 * Pre-965 chips need a fence register set up in order to
2677 * properly handle tiled surfaces.
2679 if (!IS_I965G(dev) &&
2680 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
2681 obj_priv->tiling_mode != I915_TILING_NONE)
2682 i915_gem_object_get_fence_reg(obj, true);
2684 obj_priv->pin_count++;
2686 /* If the object is not active and not pending a flush,
2687 * remove it from the inactive list
2689 if (obj_priv->pin_count == 1) {
2690 atomic_inc(&dev->pin_count);
2691 atomic_add(obj->size, &dev->pin_memory);
2692 if (!obj_priv->active &&
2693 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2694 I915_GEM_DOMAIN_GTT)) == 0 &&
2695 !list_empty(&obj_priv->list))
2696 list_del_init(&obj_priv->list);
2698 i915_verify_inactive(dev, __FILE__, __LINE__);
2704 i915_gem_object_unpin(struct drm_gem_object *obj)
2706 struct drm_device *dev = obj->dev;
2707 drm_i915_private_t *dev_priv = dev->dev_private;
2708 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2710 i915_verify_inactive(dev, __FILE__, __LINE__);
2711 obj_priv->pin_count--;
2712 BUG_ON(obj_priv->pin_count < 0);
2713 BUG_ON(obj_priv->gtt_space == NULL);
2715 /* If the object is no longer pinned, and is
2716 * neither active nor being flushed, then stick it on
2719 if (obj_priv->pin_count == 0) {
2720 if (!obj_priv->active &&
2721 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2722 I915_GEM_DOMAIN_GTT)) == 0)
2723 list_move_tail(&obj_priv->list,
2724 &dev_priv->mm.inactive_list);
2725 atomic_dec(&dev->pin_count);
2726 atomic_sub(obj->size, &dev->pin_memory);
2728 i915_verify_inactive(dev, __FILE__, __LINE__);
2732 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2733 struct drm_file *file_priv)
2735 struct drm_i915_gem_pin *args = data;
2736 struct drm_gem_object *obj;
2737 struct drm_i915_gem_object *obj_priv;
2740 mutex_lock(&dev->struct_mutex);
2742 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2744 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2746 mutex_unlock(&dev->struct_mutex);
2749 obj_priv = obj->driver_private;
2751 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
2752 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2754 mutex_unlock(&dev->struct_mutex);
2758 obj_priv->user_pin_count++;
2759 obj_priv->pin_filp = file_priv;
2760 if (obj_priv->user_pin_count == 1) {
2761 ret = i915_gem_object_pin(obj, args->alignment);
2763 drm_gem_object_unreference(obj);
2764 mutex_unlock(&dev->struct_mutex);
2769 /* XXX - flush the CPU caches for pinned objects
2770 * as the X server doesn't manage domains yet
2772 i915_gem_object_flush_cpu_write_domain(obj);
2773 args->offset = obj_priv->gtt_offset;
2774 drm_gem_object_unreference(obj);
2775 mutex_unlock(&dev->struct_mutex);
2781 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2782 struct drm_file *file_priv)
2784 struct drm_i915_gem_pin *args = data;
2785 struct drm_gem_object *obj;
2786 struct drm_i915_gem_object *obj_priv;
2788 mutex_lock(&dev->struct_mutex);
2790 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2792 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2794 mutex_unlock(&dev->struct_mutex);
2798 obj_priv = obj->driver_private;
2799 if (obj_priv->pin_filp != file_priv) {
2800 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
2802 drm_gem_object_unreference(obj);
2803 mutex_unlock(&dev->struct_mutex);
2806 obj_priv->user_pin_count--;
2807 if (obj_priv->user_pin_count == 0) {
2808 obj_priv->pin_filp = NULL;
2809 i915_gem_object_unpin(obj);
2812 drm_gem_object_unreference(obj);
2813 mutex_unlock(&dev->struct_mutex);
2818 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2819 struct drm_file *file_priv)
2821 struct drm_i915_gem_busy *args = data;
2822 struct drm_gem_object *obj;
2823 struct drm_i915_gem_object *obj_priv;
2825 mutex_lock(&dev->struct_mutex);
2826 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2828 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2830 mutex_unlock(&dev->struct_mutex);
2834 obj_priv = obj->driver_private;
2835 /* Don't count being on the flushing list against the object being
2836 * done. Otherwise, a buffer left on the flushing list but not getting
2837 * flushed (because nobody's flushing that domain) won't ever return
2838 * unbusy and get reused by libdrm's bo cache. The other expected
2839 * consumer of this interface, OpenGL's occlusion queries, also specs
2840 * that the objects get unbusy "eventually" without any interference.
2842 args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
2844 drm_gem_object_unreference(obj);
2845 mutex_unlock(&dev->struct_mutex);
2850 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2851 struct drm_file *file_priv)
2853 return i915_gem_ring_throttle(dev, file_priv);
2856 int i915_gem_init_object(struct drm_gem_object *obj)
2858 struct drm_i915_gem_object *obj_priv;
2860 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2861 if (obj_priv == NULL)
2865 * We've just allocated pages from the kernel,
2866 * so they've just been written by the CPU with
2867 * zeros. They'll need to be clflushed before we
2868 * use them with the GPU.
2870 obj->write_domain = I915_GEM_DOMAIN_CPU;
2871 obj->read_domains = I915_GEM_DOMAIN_CPU;
2873 obj_priv->agp_type = AGP_USER_MEMORY;
2875 obj->driver_private = obj_priv;
2876 obj_priv->obj = obj;
2877 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2878 INIT_LIST_HEAD(&obj_priv->list);
2883 void i915_gem_free_object(struct drm_gem_object *obj)
2885 struct drm_device *dev = obj->dev;
2886 struct drm_gem_mm *mm = dev->mm_private;
2887 struct drm_map_list *list;
2888 struct drm_map *map;
2889 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2891 while (obj_priv->pin_count > 0)
2892 i915_gem_object_unpin(obj);
2894 if (obj_priv->phys_obj)
2895 i915_gem_detach_phys_object(dev, obj);
2897 i915_gem_object_unbind(obj);
2899 list = &obj->map_list;
2900 drm_ht_remove_item(&mm->offset_hash, &list->hash);
2902 if (list->file_offset_node) {
2903 drm_mm_put_block(list->file_offset_node);
2904 list->file_offset_node = NULL;
2909 drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
2913 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2914 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2917 /** Unbinds all objects that are on the given buffer list. */
2919 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2921 struct drm_gem_object *obj;
2922 struct drm_i915_gem_object *obj_priv;
2925 while (!list_empty(head)) {
2926 obj_priv = list_first_entry(head,
2927 struct drm_i915_gem_object,
2929 obj = obj_priv->obj;
2931 if (obj_priv->pin_count != 0) {
2932 DRM_ERROR("Pinned object in unbind list\n");
2933 mutex_unlock(&dev->struct_mutex);
2937 ret = i915_gem_object_unbind(obj);
2939 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2941 mutex_unlock(&dev->struct_mutex);
2951 i915_gem_idle(struct drm_device *dev)
2953 drm_i915_private_t *dev_priv = dev->dev_private;
2954 uint32_t seqno, cur_seqno, last_seqno;
2957 mutex_lock(&dev->struct_mutex);
2959 if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
2960 mutex_unlock(&dev->struct_mutex);
2964 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2965 * We need to replace this with a semaphore, or something.
2967 dev_priv->mm.suspended = 1;
2969 /* Cancel the retire work handler, wait for it to finish if running
2971 mutex_unlock(&dev->struct_mutex);
2972 cancel_delayed_work_sync(&dev_priv->mm.retire_work);
2973 mutex_lock(&dev->struct_mutex);
2975 i915_kernel_lost_context(dev);
2977 /* Flush the GPU along with all non-CPU write domains
2979 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2980 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2981 seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
2984 mutex_unlock(&dev->struct_mutex);
2988 dev_priv->mm.waiting_gem_seqno = seqno;
2992 cur_seqno = i915_get_gem_seqno(dev);
2993 if (i915_seqno_passed(cur_seqno, seqno))
2995 if (last_seqno == cur_seqno) {
2996 if (stuck++ > 100) {
2997 DRM_ERROR("hardware wedged\n");
2998 dev_priv->mm.wedged = 1;
2999 DRM_WAKEUP(&dev_priv->irq_queue);
3004 last_seqno = cur_seqno;
3006 dev_priv->mm.waiting_gem_seqno = 0;
3008 i915_gem_retire_requests(dev);
3010 if (!dev_priv->mm.wedged) {
3011 /* Active and flushing should now be empty as we've
3012 * waited for a sequence higher than any pending execbuffer
3014 WARN_ON(!list_empty(&dev_priv->mm.active_list));
3015 WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
3016 /* Request should now be empty as we've also waited
3017 * for the last request in the list
3019 WARN_ON(!list_empty(&dev_priv->mm.request_list));
3022 /* Empty the active and flushing lists to inactive. If there's
3023 * anything left at this point, it means that we're wedged and
3024 * nothing good's going to happen by leaving them there. So strip
3025 * the GPU domains and just stuff them onto inactive.
3027 while (!list_empty(&dev_priv->mm.active_list)) {
3028 struct drm_i915_gem_object *obj_priv;
3030 obj_priv = list_first_entry(&dev_priv->mm.active_list,
3031 struct drm_i915_gem_object,
3033 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3034 i915_gem_object_move_to_inactive(obj_priv->obj);
3037 while (!list_empty(&dev_priv->mm.flushing_list)) {
3038 struct drm_i915_gem_object *obj_priv;
3040 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
3041 struct drm_i915_gem_object,
3043 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
3044 i915_gem_object_move_to_inactive(obj_priv->obj);
3048 /* Move all inactive buffers out of the GTT. */
3049 ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
3050 WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
3052 mutex_unlock(&dev->struct_mutex);
3056 i915_gem_cleanup_ringbuffer(dev);
3057 mutex_unlock(&dev->struct_mutex);
3063 i915_gem_init_hws(struct drm_device *dev)
3065 drm_i915_private_t *dev_priv = dev->dev_private;
3066 struct drm_gem_object *obj;
3067 struct drm_i915_gem_object *obj_priv;
3070 /* If we need a physical address for the status page, it's already
3071 * initialized at driver load time.
3073 if (!I915_NEED_GFX_HWS(dev))
3076 obj = drm_gem_object_alloc(dev, 4096);
3078 DRM_ERROR("Failed to allocate status page\n");
3081 obj_priv = obj->driver_private;
3082 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
3084 ret = i915_gem_object_pin(obj, 4096);
3086 drm_gem_object_unreference(obj);
3090 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
3092 dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
3093 if (dev_priv->hw_status_page == NULL) {
3094 DRM_ERROR("Failed to map status page.\n");
3095 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3096 drm_gem_object_unreference(obj);
3099 dev_priv->hws_obj = obj;
3100 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
3101 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
3102 I915_READ(HWS_PGA); /* posting read */
3103 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
3109 i915_gem_init_ringbuffer(struct drm_device *dev)
3111 drm_i915_private_t *dev_priv = dev->dev_private;
3112 struct drm_gem_object *obj;
3113 struct drm_i915_gem_object *obj_priv;
3114 drm_i915_ring_buffer_t *ring = &dev_priv->ring;
3118 ret = i915_gem_init_hws(dev);
3122 obj = drm_gem_object_alloc(dev, 128 * 1024);
3124 DRM_ERROR("Failed to allocate ringbuffer\n");
3127 obj_priv = obj->driver_private;
3129 ret = i915_gem_object_pin(obj, 4096);
3131 drm_gem_object_unreference(obj);
3135 /* Set up the kernel mapping for the ring. */
3136 ring->Size = obj->size;
3137 ring->tail_mask = obj->size - 1;
3139 ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
3140 ring->map.size = obj->size;
3142 ring->map.flags = 0;
3145 drm_core_ioremap_wc(&ring->map, dev);
3146 if (ring->map.handle == NULL) {
3147 DRM_ERROR("Failed to map ringbuffer.\n");
3148 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3149 drm_gem_object_unreference(obj);
3152 ring->ring_obj = obj;
3153 ring->virtual_start = ring->map.handle;
3155 /* Stop the ring if it's running. */
3156 I915_WRITE(PRB0_CTL, 0);
3157 I915_WRITE(PRB0_TAIL, 0);
3158 I915_WRITE(PRB0_HEAD, 0);
3160 /* Initialize the ring. */
3161 I915_WRITE(PRB0_START, obj_priv->gtt_offset);
3162 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
3164 /* G45 ring initialization fails to reset head to zero */
3166 DRM_ERROR("Ring head not reset to zero "
3167 "ctl %08x head %08x tail %08x start %08x\n",
3168 I915_READ(PRB0_CTL),
3169 I915_READ(PRB0_HEAD),
3170 I915_READ(PRB0_TAIL),
3171 I915_READ(PRB0_START));
3172 I915_WRITE(PRB0_HEAD, 0);
3174 DRM_ERROR("Ring head forced to zero "
3175 "ctl %08x head %08x tail %08x start %08x\n",
3176 I915_READ(PRB0_CTL),
3177 I915_READ(PRB0_HEAD),
3178 I915_READ(PRB0_TAIL),
3179 I915_READ(PRB0_START));
3182 I915_WRITE(PRB0_CTL,
3183 ((obj->size - 4096) & RING_NR_PAGES) |
3187 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
3189 /* If the head is still not zero, the ring is dead */
3191 DRM_ERROR("Ring initialization failed "
3192 "ctl %08x head %08x tail %08x start %08x\n",
3193 I915_READ(PRB0_CTL),
3194 I915_READ(PRB0_HEAD),
3195 I915_READ(PRB0_TAIL),
3196 I915_READ(PRB0_START));
3200 /* Update our cache of the ring state */
3201 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3202 i915_kernel_lost_context(dev);
3204 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
3205 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
3206 ring->space = ring->head - (ring->tail + 8);
3207 if (ring->space < 0)
3208 ring->space += ring->Size;
3215 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3217 drm_i915_private_t *dev_priv = dev->dev_private;
3219 if (dev_priv->ring.ring_obj == NULL)
3222 drm_core_ioremapfree(&dev_priv->ring.map, dev);
3224 i915_gem_object_unpin(dev_priv->ring.ring_obj);
3225 drm_gem_object_unreference(dev_priv->ring.ring_obj);
3226 dev_priv->ring.ring_obj = NULL;
3227 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3229 if (dev_priv->hws_obj != NULL) {
3230 struct drm_gem_object *obj = dev_priv->hws_obj;
3231 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3233 kunmap(obj_priv->page_list[0]);
3234 i915_gem_object_unpin(obj);
3235 drm_gem_object_unreference(obj);
3236 dev_priv->hws_obj = NULL;
3237 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3238 dev_priv->hw_status_page = NULL;
3240 /* Write high address into HWS_PGA when disabling. */
3241 I915_WRITE(HWS_PGA, 0x1ffff000);
3246 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3247 struct drm_file *file_priv)
3249 drm_i915_private_t *dev_priv = dev->dev_private;
3252 if (drm_core_check_feature(dev, DRIVER_MODESET))
3255 if (dev_priv->mm.wedged) {
3256 DRM_ERROR("Reenabling wedged hardware, good luck\n");
3257 dev_priv->mm.wedged = 0;
3260 mutex_lock(&dev->struct_mutex);
3261 dev_priv->mm.suspended = 0;
3263 ret = i915_gem_init_ringbuffer(dev);
3267 BUG_ON(!list_empty(&dev_priv->mm.active_list));
3268 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
3269 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
3270 BUG_ON(!list_empty(&dev_priv->mm.request_list));
3271 mutex_unlock(&dev->struct_mutex);
3273 drm_irq_install(dev);
3279 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3280 struct drm_file *file_priv)
3284 if (drm_core_check_feature(dev, DRIVER_MODESET))
3287 ret = i915_gem_idle(dev);
3288 drm_irq_uninstall(dev);
3294 i915_gem_lastclose(struct drm_device *dev)
3298 if (drm_core_check_feature(dev, DRIVER_MODESET))
3301 ret = i915_gem_idle(dev);
3303 DRM_ERROR("failed to idle hardware: %d\n", ret);
3307 i915_gem_load(struct drm_device *dev)
3309 drm_i915_private_t *dev_priv = dev->dev_private;
3311 INIT_LIST_HEAD(&dev_priv->mm.active_list);
3312 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
3313 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
3314 INIT_LIST_HEAD(&dev_priv->mm.request_list);
3315 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3316 i915_gem_retire_work_handler);
3317 dev_priv->mm.next_gem_seqno = 1;
3319 /* Old X drivers will take 0-2 for front, back, depth buffers */
3320 dev_priv->fence_reg_start = 3;
3322 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3323 dev_priv->num_fence_regs = 16;
3325 dev_priv->num_fence_regs = 8;
3327 i915_gem_detect_bit_6_swizzle(dev);
3331 * Create a physically contiguous memory object for this object
3332 * e.g. for cursor + overlay regs
3334 int i915_gem_init_phys_object(struct drm_device *dev,
3337 drm_i915_private_t *dev_priv = dev->dev_private;
3338 struct drm_i915_gem_phys_object *phys_obj;
3341 if (dev_priv->mm.phys_objs[id - 1] || !size)
3344 phys_obj = drm_calloc(1, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
3350 phys_obj->handle = drm_pci_alloc(dev, size, 0, 0xffffffff);
3351 if (!phys_obj->handle) {
3356 set_memory_wc((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3359 dev_priv->mm.phys_objs[id - 1] = phys_obj;
3363 drm_free(phys_obj, sizeof(struct drm_i915_gem_phys_object), DRM_MEM_DRIVER);
3367 void i915_gem_free_phys_object(struct drm_device *dev, int id)
3369 drm_i915_private_t *dev_priv = dev->dev_private;
3370 struct drm_i915_gem_phys_object *phys_obj;
3372 if (!dev_priv->mm.phys_objs[id - 1])
3375 phys_obj = dev_priv->mm.phys_objs[id - 1];
3376 if (phys_obj->cur_obj) {
3377 i915_gem_detach_phys_object(dev, phys_obj->cur_obj);
3381 set_memory_wb((unsigned long)phys_obj->handle->vaddr, phys_obj->handle->size / PAGE_SIZE);
3383 drm_pci_free(dev, phys_obj->handle);
3385 dev_priv->mm.phys_objs[id - 1] = NULL;
3388 void i915_gem_free_all_phys_object(struct drm_device *dev)
3392 for (i = I915_GEM_PHYS_CURSOR_0; i <= I915_MAX_PHYS_OBJECT; i++)
3393 i915_gem_free_phys_object(dev, i);
3396 void i915_gem_detach_phys_object(struct drm_device *dev,
3397 struct drm_gem_object *obj)
3399 struct drm_i915_gem_object *obj_priv;
3404 obj_priv = obj->driver_private;
3405 if (!obj_priv->phys_obj)
3408 ret = i915_gem_object_get_page_list(obj);
3412 page_count = obj->size / PAGE_SIZE;
3414 for (i = 0; i < page_count; i++) {
3415 char *dst = kmap_atomic(obj_priv->page_list[i], KM_USER0);
3416 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3418 memcpy(dst, src, PAGE_SIZE);
3419 kunmap_atomic(dst, KM_USER0);
3421 drm_clflush_pages(obj_priv->page_list, page_count);
3422 drm_agp_chipset_flush(dev);
3424 obj_priv->phys_obj->cur_obj = NULL;
3425 obj_priv->phys_obj = NULL;
3429 i915_gem_attach_phys_object(struct drm_device *dev,
3430 struct drm_gem_object *obj, int id)
3432 drm_i915_private_t *dev_priv = dev->dev_private;
3433 struct drm_i915_gem_object *obj_priv;
3438 if (id > I915_MAX_PHYS_OBJECT)
3441 obj_priv = obj->driver_private;
3443 if (obj_priv->phys_obj) {
3444 if (obj_priv->phys_obj->id == id)
3446 i915_gem_detach_phys_object(dev, obj);
3450 /* create a new object */
3451 if (!dev_priv->mm.phys_objs[id - 1]) {
3452 ret = i915_gem_init_phys_object(dev, id,
3455 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
3460 /* bind to the object */
3461 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
3462 obj_priv->phys_obj->cur_obj = obj;
3464 ret = i915_gem_object_get_page_list(obj);
3466 DRM_ERROR("failed to get page list\n");
3470 page_count = obj->size / PAGE_SIZE;
3472 for (i = 0; i < page_count; i++) {
3473 char *src = kmap_atomic(obj_priv->page_list[i], KM_USER0);
3474 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
3476 memcpy(dst, src, PAGE_SIZE);
3477 kunmap_atomic(src, KM_USER0);
3486 i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
3487 struct drm_i915_gem_pwrite *args,
3488 struct drm_file *file_priv)
3490 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3493 char __user *user_data;
3495 user_data = (char __user *) (uintptr_t) args->data_ptr;
3496 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
3498 DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size);
3499 ret = copy_from_user(obj_addr, user_data, args->size);
3503 drm_agp_chipset_flush(dev);