2 * Copyright © 2008,2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
30 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
36 #define __EXEC_OBJECT_HAS_PIN (1<<31)
37 #define __EXEC_OBJECT_HAS_FENCE (1<<30)
38 #define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
40 #define BATCH_OFFSET_BIAS (256*1024)
43 struct list_head vmas;
46 struct i915_vma *lut[0];
47 struct hlist_head buckets[0];
51 static struct eb_vmas *
52 eb_create(struct drm_i915_gem_execbuffer2 *args)
54 struct eb_vmas *eb = NULL;
56 if (args->flags & I915_EXEC_HANDLE_LUT) {
57 unsigned size = args->buffer_count;
58 size *= sizeof(struct i915_vma *);
59 size += sizeof(struct eb_vmas);
60 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
64 unsigned size = args->buffer_count;
65 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
66 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
67 while (count > 2*size)
69 eb = kzalloc(count*sizeof(struct hlist_head) +
70 sizeof(struct eb_vmas),
77 eb->and = -args->buffer_count;
79 INIT_LIST_HEAD(&eb->vmas);
84 eb_reset(struct eb_vmas *eb)
87 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
91 eb_lookup_vmas(struct eb_vmas *eb,
92 struct drm_i915_gem_exec_object2 *exec,
93 const struct drm_i915_gem_execbuffer2 *args,
94 struct i915_address_space *vm,
95 struct drm_file *file)
97 struct drm_i915_private *dev_priv = vm->dev->dev_private;
98 struct drm_i915_gem_object *obj;
99 struct list_head objects;
102 INIT_LIST_HEAD(&objects);
103 spin_lock(&file->table_lock);
104 /* Grab a reference to the object and release the lock so we can lookup
105 * or create the VMA without using GFP_ATOMIC */
106 for (i = 0; i < args->buffer_count; i++) {
107 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
109 spin_unlock(&file->table_lock);
110 DRM_DEBUG("Invalid object handle %d at index %d\n",
116 if (!list_empty(&obj->obj_exec_link)) {
117 spin_unlock(&file->table_lock);
118 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
119 obj, exec[i].handle, i);
124 drm_gem_object_reference(&obj->base);
125 list_add_tail(&obj->obj_exec_link, &objects);
127 spin_unlock(&file->table_lock);
130 while (!list_empty(&objects)) {
131 struct i915_vma *vma;
132 struct i915_address_space *bind_vm = vm;
134 if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
135 USES_FULL_PPGTT(vm->dev)) {
140 /* If we have secure dispatch, or the userspace assures us that
141 * they know what they're doing, use the GGTT VM.
143 if (((args->flags & I915_EXEC_SECURE) &&
144 (i == (args->buffer_count - 1))))
145 bind_vm = &dev_priv->gtt.base;
147 obj = list_first_entry(&objects,
148 struct drm_i915_gem_object,
152 * NOTE: We can leak any vmas created here when something fails
153 * later on. But that's no issue since vma_unbind can deal with
154 * vmas which are not actually bound. And since only
155 * lookup_or_create exists as an interface to get at the vma
156 * from the (obj, vm) we don't run the risk of creating
157 * duplicated vmas for the same vm.
159 vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
161 DRM_DEBUG("Failed to lookup VMA\n");
166 /* Transfer ownership from the objects list to the vmas list. */
167 list_add_tail(&vma->exec_list, &eb->vmas);
168 list_del_init(&obj->obj_exec_link);
170 vma->exec_entry = &exec[i];
174 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
175 vma->exec_handle = handle;
176 hlist_add_head(&vma->exec_node,
177 &eb->buckets[handle & eb->and]);
186 while (!list_empty(&objects)) {
187 obj = list_first_entry(&objects,
188 struct drm_i915_gem_object,
190 list_del_init(&obj->obj_exec_link);
191 drm_gem_object_unreference(&obj->base);
194 * Objects already transfered to the vmas list will be unreferenced by
201 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
204 if (handle >= -eb->and)
206 return eb->lut[handle];
208 struct hlist_head *head;
209 struct hlist_node *node;
211 head = &eb->buckets[handle & eb->and];
212 hlist_for_each(node, head) {
213 struct i915_vma *vma;
215 vma = hlist_entry(node, struct i915_vma, exec_node);
216 if (vma->exec_handle == handle)
224 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
226 struct drm_i915_gem_exec_object2 *entry;
227 struct drm_i915_gem_object *obj = vma->obj;
229 if (!drm_mm_node_allocated(&vma->node))
232 entry = vma->exec_entry;
234 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
235 i915_gem_object_unpin_fence(obj);
237 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
240 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
243 static void eb_destroy(struct eb_vmas *eb)
245 while (!list_empty(&eb->vmas)) {
246 struct i915_vma *vma;
248 vma = list_first_entry(&eb->vmas,
251 list_del_init(&vma->exec_list);
252 i915_gem_execbuffer_unreserve_vma(vma);
253 drm_gem_object_unreference(&vma->obj->base);
258 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
260 return (HAS_LLC(obj->base.dev) ||
261 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
262 !obj->map_and_fenceable ||
263 obj->cache_level != I915_CACHE_NONE);
267 relocate_entry_cpu(struct drm_i915_gem_object *obj,
268 struct drm_i915_gem_relocation_entry *reloc,
269 uint64_t target_offset)
271 struct drm_device *dev = obj->base.dev;
272 uint32_t page_offset = offset_in_page(reloc->offset);
273 uint64_t delta = reloc->delta + target_offset;
277 ret = i915_gem_object_set_to_cpu_domain(obj, true);
281 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
282 reloc->offset >> PAGE_SHIFT));
283 *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
285 if (INTEL_INFO(dev)->gen >= 8) {
286 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
288 if (page_offset == 0) {
289 kunmap_atomic(vaddr);
290 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
291 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
294 *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
297 kunmap_atomic(vaddr);
303 relocate_entry_gtt(struct drm_i915_gem_object *obj,
304 struct drm_i915_gem_relocation_entry *reloc,
305 uint64_t target_offset)
307 struct drm_device *dev = obj->base.dev;
308 struct drm_i915_private *dev_priv = dev->dev_private;
309 uint64_t delta = reloc->delta + target_offset;
310 uint32_t __iomem *reloc_entry;
311 void __iomem *reloc_page;
314 ret = i915_gem_object_set_to_gtt_domain(obj, true);
318 ret = i915_gem_object_put_fence(obj);
322 /* Map the page containing the relocation we're going to perform. */
323 reloc->offset += i915_gem_obj_ggtt_offset(obj);
324 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
325 reloc->offset & PAGE_MASK);
326 reloc_entry = (uint32_t __iomem *)
327 (reloc_page + offset_in_page(reloc->offset));
328 iowrite32(lower_32_bits(delta), reloc_entry);
330 if (INTEL_INFO(dev)->gen >= 8) {
333 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
334 io_mapping_unmap_atomic(reloc_page);
335 reloc_page = io_mapping_map_atomic_wc(
336 dev_priv->gtt.mappable,
337 reloc->offset + sizeof(uint32_t));
338 reloc_entry = reloc_page;
341 iowrite32(upper_32_bits(delta), reloc_entry);
344 io_mapping_unmap_atomic(reloc_page);
350 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
352 struct drm_i915_gem_relocation_entry *reloc)
354 struct drm_device *dev = obj->base.dev;
355 struct drm_gem_object *target_obj;
356 struct drm_i915_gem_object *target_i915_obj;
357 struct i915_vma *target_vma;
358 uint64_t target_offset;
361 /* we've already hold a reference to all valid objects */
362 target_vma = eb_get_vma(eb, reloc->target_handle);
363 if (unlikely(target_vma == NULL))
365 target_i915_obj = target_vma->obj;
366 target_obj = &target_vma->obj->base;
368 target_offset = target_vma->node.start;
370 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
371 * pipe_control writes because the gpu doesn't properly redirect them
372 * through the ppgtt for non_secure batchbuffers. */
373 if (unlikely(IS_GEN6(dev) &&
374 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
375 !target_i915_obj->has_global_gtt_mapping)) {
376 struct i915_vma *vma =
377 list_first_entry(&target_i915_obj->vma_list,
378 typeof(*vma), vma_link);
379 vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
382 /* Validate that the target is in a valid r/w GPU domain */
383 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
384 DRM_DEBUG("reloc with multiple write domains: "
385 "obj %p target %d offset %d "
386 "read %08x write %08x",
387 obj, reloc->target_handle,
390 reloc->write_domain);
393 if (unlikely((reloc->write_domain | reloc->read_domains)
394 & ~I915_GEM_GPU_DOMAINS)) {
395 DRM_DEBUG("reloc with read/write non-GPU domains: "
396 "obj %p target %d offset %d "
397 "read %08x write %08x",
398 obj, reloc->target_handle,
401 reloc->write_domain);
405 target_obj->pending_read_domains |= reloc->read_domains;
406 target_obj->pending_write_domain |= reloc->write_domain;
408 /* If the relocation already has the right value in it, no
409 * more work needs to be done.
411 if (target_offset == reloc->presumed_offset)
414 /* Check that the relocation address is valid... */
415 if (unlikely(reloc->offset >
416 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
417 DRM_DEBUG("Relocation beyond object bounds: "
418 "obj %p target %d offset %d size %d.\n",
419 obj, reloc->target_handle,
421 (int) obj->base.size);
424 if (unlikely(reloc->offset & 3)) {
425 DRM_DEBUG("Relocation not 4-byte aligned: "
426 "obj %p target %d offset %d.\n",
427 obj, reloc->target_handle,
428 (int) reloc->offset);
432 /* We can't wait for rendering with pagefaults disabled */
433 if (obj->active && in_atomic())
436 if (use_cpu_reloc(obj))
437 ret = relocate_entry_cpu(obj, reloc, target_offset);
439 ret = relocate_entry_gtt(obj, reloc, target_offset);
444 /* and update the user's relocation entry */
445 reloc->presumed_offset = target_offset;
451 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
454 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
455 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
456 struct drm_i915_gem_relocation_entry __user *user_relocs;
457 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
460 user_relocs = to_user_ptr(entry->relocs_ptr);
462 remain = entry->relocation_count;
464 struct drm_i915_gem_relocation_entry *r = stack_reloc;
466 if (count > ARRAY_SIZE(stack_reloc))
467 count = ARRAY_SIZE(stack_reloc);
470 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
474 u64 offset = r->presumed_offset;
476 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
480 if (r->presumed_offset != offset &&
481 __copy_to_user_inatomic(&user_relocs->presumed_offset,
483 sizeof(r->presumed_offset))) {
497 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
499 struct drm_i915_gem_relocation_entry *relocs)
501 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
504 for (i = 0; i < entry->relocation_count; i++) {
505 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
514 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
516 struct i915_vma *vma;
519 /* This is the fast path and we cannot handle a pagefault whilst
520 * holding the struct mutex lest the user pass in the relocations
521 * contained within a mmaped bo. For in such a case we, the page
522 * fault handler would call i915_gem_fault() and we would try to
523 * acquire the struct mutex again. Obviously this is bad and so
524 * lockdep complains vehemently.
527 list_for_each_entry(vma, &eb->vmas, exec_list) {
528 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
538 need_reloc_mappable(struct i915_vma *vma)
540 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
541 return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
542 i915_is_ggtt(vma->vm);
546 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
547 struct intel_engine_cs *ring,
550 struct drm_i915_gem_object *obj = vma->obj;
551 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
552 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
560 has_fenced_gpu_access &&
561 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
562 obj->tiling_mode != I915_TILING_NONE;
563 if (need_fence || need_reloc_mappable(vma))
564 flags |= PIN_MAPPABLE;
566 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
568 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
569 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
571 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
575 entry->flags |= __EXEC_OBJECT_HAS_PIN;
577 if (has_fenced_gpu_access) {
578 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
579 ret = i915_gem_object_get_fence(obj);
583 if (i915_gem_object_pin_fence(obj))
584 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
586 obj->pending_fenced_gpu_access = true;
590 if (entry->offset != vma->node.start) {
591 entry->offset = vma->node.start;
595 if (entry->flags & EXEC_OBJECT_WRITE) {
596 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
597 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
604 eb_vma_misplaced(struct i915_vma *vma, bool has_fenced_gpu_access)
606 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
607 struct drm_i915_gem_object *obj = vma->obj;
608 bool need_fence, need_mappable;
611 has_fenced_gpu_access &&
612 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
613 obj->tiling_mode != I915_TILING_NONE;
614 need_mappable = need_fence || need_reloc_mappable(vma);
616 WARN_ON((need_mappable || need_fence) &&
617 !i915_is_ggtt(vma->vm));
619 if (entry->alignment &&
620 vma->node.start & (entry->alignment - 1))
623 if (need_mappable && !obj->map_and_fenceable)
626 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
627 vma->node.start < BATCH_OFFSET_BIAS)
634 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
635 struct list_head *vmas,
638 struct drm_i915_gem_object *obj;
639 struct i915_vma *vma;
640 struct i915_address_space *vm;
641 struct list_head ordered_vmas;
642 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
645 if (list_empty(vmas))
648 i915_gem_retire_requests_ring(ring);
650 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
652 INIT_LIST_HEAD(&ordered_vmas);
653 while (!list_empty(vmas)) {
654 struct drm_i915_gem_exec_object2 *entry;
655 bool need_fence, need_mappable;
657 vma = list_first_entry(vmas, struct i915_vma, exec_list);
659 entry = vma->exec_entry;
662 has_fenced_gpu_access &&
663 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
664 obj->tiling_mode != I915_TILING_NONE;
665 need_mappable = need_fence || need_reloc_mappable(vma);
668 list_move(&vma->exec_list, &ordered_vmas);
670 list_move_tail(&vma->exec_list, &ordered_vmas);
672 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
673 obj->base.pending_write_domain = 0;
674 obj->pending_fenced_gpu_access = false;
676 list_splice(&ordered_vmas, vmas);
678 /* Attempt to pin all of the buffers into the GTT.
679 * This is done in 3 phases:
681 * 1a. Unbind all objects that do not match the GTT constraints for
682 * the execbuffer (fenceable, mappable, alignment etc).
683 * 1b. Increment pin count for already bound objects.
684 * 2. Bind new objects.
685 * 3. Decrement pin count.
687 * This avoid unnecessary unbinding of later objects in order to make
688 * room for the earlier objects *unless* we need to defragment.
694 /* Unbind any ill-fitting objects or pin. */
695 list_for_each_entry(vma, vmas, exec_list) {
696 if (!drm_mm_node_allocated(&vma->node))
699 if (eb_vma_misplaced(vma, has_fenced_gpu_access))
700 ret = i915_vma_unbind(vma);
702 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
707 /* Bind fresh objects */
708 list_for_each_entry(vma, vmas, exec_list) {
709 if (drm_mm_node_allocated(&vma->node))
712 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
718 if (ret != -ENOSPC || retry++)
721 /* Decrement pin count for bound objects */
722 list_for_each_entry(vma, vmas, exec_list)
723 i915_gem_execbuffer_unreserve_vma(vma);
725 ret = i915_gem_evict_vm(vm, true);
732 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
733 struct drm_i915_gem_execbuffer2 *args,
734 struct drm_file *file,
735 struct intel_engine_cs *ring,
737 struct drm_i915_gem_exec_object2 *exec)
739 struct drm_i915_gem_relocation_entry *reloc;
740 struct i915_address_space *vm;
741 struct i915_vma *vma;
745 unsigned count = args->buffer_count;
747 if (WARN_ON(list_empty(&eb->vmas)))
750 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
752 /* We may process another execbuffer during the unlock... */
753 while (!list_empty(&eb->vmas)) {
754 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
755 list_del_init(&vma->exec_list);
756 i915_gem_execbuffer_unreserve_vma(vma);
757 drm_gem_object_unreference(&vma->obj->base);
760 mutex_unlock(&dev->struct_mutex);
763 for (i = 0; i < count; i++)
764 total += exec[i].relocation_count;
766 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
767 reloc = drm_malloc_ab(total, sizeof(*reloc));
768 if (reloc == NULL || reloc_offset == NULL) {
769 drm_free_large(reloc);
770 drm_free_large(reloc_offset);
771 mutex_lock(&dev->struct_mutex);
776 for (i = 0; i < count; i++) {
777 struct drm_i915_gem_relocation_entry __user *user_relocs;
778 u64 invalid_offset = (u64)-1;
781 user_relocs = to_user_ptr(exec[i].relocs_ptr);
783 if (copy_from_user(reloc+total, user_relocs,
784 exec[i].relocation_count * sizeof(*reloc))) {
786 mutex_lock(&dev->struct_mutex);
790 /* As we do not update the known relocation offsets after
791 * relocating (due to the complexities in lock handling),
792 * we need to mark them as invalid now so that we force the
793 * relocation processing next time. Just in case the target
794 * object is evicted and then rebound into its old
795 * presumed_offset before the next execbuffer - if that
796 * happened we would make the mistake of assuming that the
797 * relocations were valid.
799 for (j = 0; j < exec[i].relocation_count; j++) {
800 if (__copy_to_user(&user_relocs[j].presumed_offset,
802 sizeof(invalid_offset))) {
804 mutex_lock(&dev->struct_mutex);
809 reloc_offset[i] = total;
810 total += exec[i].relocation_count;
813 ret = i915_mutex_lock_interruptible(dev);
815 mutex_lock(&dev->struct_mutex);
819 /* reacquire the objects */
821 ret = eb_lookup_vmas(eb, exec, args, vm, file);
825 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
826 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
830 list_for_each_entry(vma, &eb->vmas, exec_list) {
831 int offset = vma->exec_entry - exec;
832 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
833 reloc + reloc_offset[offset]);
838 /* Leave the user relocations as are, this is the painfully slow path,
839 * and we want to avoid the complication of dropping the lock whilst
840 * having buffers reserved in the aperture and so causing spurious
841 * ENOSPC for random operations.
845 drm_free_large(reloc);
846 drm_free_large(reloc_offset);
851 i915_gem_execbuffer_move_to_gpu(struct intel_engine_cs *ring,
852 struct list_head *vmas)
854 struct i915_vma *vma;
855 uint32_t flush_domains = 0;
856 bool flush_chipset = false;
859 list_for_each_entry(vma, vmas, exec_list) {
860 struct drm_i915_gem_object *obj = vma->obj;
861 ret = i915_gem_object_sync(obj, ring);
865 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
866 flush_chipset |= i915_gem_clflush_object(obj, false);
868 flush_domains |= obj->base.write_domain;
872 i915_gem_chipset_flush(ring->dev);
874 if (flush_domains & I915_GEM_DOMAIN_GTT)
877 /* Unconditionally invalidate gpu caches and ensure that we do flush
878 * any residual writes from the previous batch.
880 return intel_ring_invalidate_all_caches(ring);
884 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
886 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
889 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
893 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
897 unsigned relocs_total = 0;
898 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
900 for (i = 0; i < count; i++) {
901 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
902 int length; /* limited by fault_in_pages_readable() */
904 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
907 /* First check for malicious input causing overflow in
908 * the worst case where we need to allocate the entire
909 * relocation tree as a single array.
911 if (exec[i].relocation_count > relocs_max - relocs_total)
913 relocs_total += exec[i].relocation_count;
915 length = exec[i].relocation_count *
916 sizeof(struct drm_i915_gem_relocation_entry);
918 * We must check that the entire relocation array is safe
919 * to read, but since we may need to update the presumed
920 * offsets during execution, check for full write access.
922 if (!access_ok(VERIFY_WRITE, ptr, length))
925 if (likely(!i915.prefault_disable)) {
926 if (fault_in_multipages_readable(ptr, length))
934 static struct intel_context *
935 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
936 struct intel_engine_cs *ring, const u32 ctx_id)
938 struct intel_context *ctx = NULL;
939 struct i915_ctx_hang_stats *hs;
941 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
942 return ERR_PTR(-EINVAL);
944 ctx = i915_gem_context_get(file->driver_priv, ctx_id);
948 hs = &ctx->hang_stats;
950 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
951 return ERR_PTR(-EIO);
958 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
959 struct intel_engine_cs *ring)
961 struct i915_vma *vma;
963 list_for_each_entry(vma, vmas, exec_list) {
964 struct drm_i915_gem_object *obj = vma->obj;
965 u32 old_read = obj->base.read_domains;
966 u32 old_write = obj->base.write_domain;
968 obj->base.write_domain = obj->base.pending_write_domain;
969 if (obj->base.write_domain == 0)
970 obj->base.pending_read_domains |= obj->base.read_domains;
971 obj->base.read_domains = obj->base.pending_read_domains;
972 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
974 i915_vma_move_to_active(vma, ring);
975 if (obj->base.write_domain) {
977 obj->last_write_seqno = intel_ring_get_seqno(ring);
979 intel_fb_obj_invalidate(obj, ring);
981 /* update for the implicit flush after a batch */
982 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
985 trace_i915_gem_object_change_domain(obj, old_read, old_write);
990 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
991 struct drm_file *file,
992 struct intel_engine_cs *ring,
993 struct drm_i915_gem_object *obj)
995 /* Unconditionally force add_request to emit a full flush. */
996 ring->gpu_caches_dirty = true;
998 /* Add a breadcrumb for the completion of the batch buffer */
999 (void)__i915_add_request(ring, file, obj, NULL);
1003 i915_reset_gen7_sol_offsets(struct drm_device *dev,
1004 struct intel_engine_cs *ring)
1006 struct drm_i915_private *dev_priv = dev->dev_private;
1009 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1010 DRM_DEBUG("sol reset is gen7/rcs only\n");
1014 ret = intel_ring_begin(ring, 4 * 3);
1018 for (i = 0; i < 4; i++) {
1019 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1020 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1021 intel_ring_emit(ring, 0);
1024 intel_ring_advance(ring);
1030 legacy_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1031 struct intel_engine_cs *ring,
1032 struct intel_context *ctx,
1033 struct drm_i915_gem_execbuffer2 *args,
1034 struct list_head *vmas,
1035 struct drm_i915_gem_object *batch_obj,
1036 u64 exec_start, u32 flags)
1038 struct drm_clip_rect *cliprects = NULL;
1039 struct drm_i915_private *dev_priv = dev->dev_private;
1045 if (args->num_cliprects != 0) {
1046 if (ring != &dev_priv->ring[RCS]) {
1047 DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1051 if (INTEL_INFO(dev)->gen >= 5) {
1052 DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1056 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1057 DRM_DEBUG("execbuf with %u cliprects\n",
1058 args->num_cliprects);
1062 cliprects = kcalloc(args->num_cliprects,
1065 if (cliprects == NULL) {
1070 if (copy_from_user(cliprects,
1071 to_user_ptr(args->cliprects_ptr),
1072 sizeof(*cliprects)*args->num_cliprects)) {
1077 if (args->DR4 == 0xffffffff) {
1078 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
1082 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1083 DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1088 ret = i915_gem_execbuffer_move_to_gpu(ring, vmas);
1092 ret = i915_switch_context(ring, ctx);
1096 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1097 instp_mask = I915_EXEC_CONSTANTS_MASK;
1098 switch (instp_mode) {
1099 case I915_EXEC_CONSTANTS_REL_GENERAL:
1100 case I915_EXEC_CONSTANTS_ABSOLUTE:
1101 case I915_EXEC_CONSTANTS_REL_SURFACE:
1102 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1103 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1108 if (instp_mode != dev_priv->relative_constants_mode) {
1109 if (INTEL_INFO(dev)->gen < 4) {
1110 DRM_DEBUG("no rel constants on pre-gen4\n");
1115 if (INTEL_INFO(dev)->gen > 5 &&
1116 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1117 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1122 /* The HW changed the meaning on this bit on gen6 */
1123 if (INTEL_INFO(dev)->gen >= 6)
1124 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1128 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1133 if (ring == &dev_priv->ring[RCS] &&
1134 instp_mode != dev_priv->relative_constants_mode) {
1135 ret = intel_ring_begin(ring, 4);
1139 intel_ring_emit(ring, MI_NOOP);
1140 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1141 intel_ring_emit(ring, INSTPM);
1142 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1143 intel_ring_advance(ring);
1145 dev_priv->relative_constants_mode = instp_mode;
1148 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1149 ret = i915_reset_gen7_sol_offsets(dev, ring);
1154 exec_len = args->batch_len;
1156 for (i = 0; i < args->num_cliprects; i++) {
1157 ret = i915_emit_box(dev, &cliprects[i],
1158 args->DR1, args->DR4);
1162 ret = ring->dispatch_execbuffer(ring,
1163 exec_start, exec_len,
1169 ret = ring->dispatch_execbuffer(ring,
1170 exec_start, exec_len,
1176 trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1178 i915_gem_execbuffer_move_to_active(vmas, ring);
1179 i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1187 * Find one BSD ring to dispatch the corresponding BSD command.
1188 * The Ring ID is returned.
1190 static int gen8_dispatch_bsd_ring(struct drm_device *dev,
1191 struct drm_file *file)
1193 struct drm_i915_private *dev_priv = dev->dev_private;
1194 struct drm_i915_file_private *file_priv = file->driver_priv;
1196 /* Check whether the file_priv is using one ring */
1197 if (file_priv->bsd_ring)
1198 return file_priv->bsd_ring->id;
1200 /* If no, use the ping-pong mechanism to select one ring */
1203 mutex_lock(&dev->struct_mutex);
1204 if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
1206 dev_priv->mm.bsd_ring_dispatch_index = 1;
1209 dev_priv->mm.bsd_ring_dispatch_index = 0;
1211 file_priv->bsd_ring = &dev_priv->ring[ring_id];
1212 mutex_unlock(&dev->struct_mutex);
1217 static struct drm_i915_gem_object *
1218 eb_get_batch(struct eb_vmas *eb)
1220 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1223 * SNA is doing fancy tricks with compressing batch buffers, which leads
1224 * to negative relocation deltas. Usually that works out ok since the
1225 * relocate address is still positive, except when the batch is placed
1226 * very low in the GTT. Ensure this doesn't happen.
1228 * Note that actual hangs have only been observed on gen7, but for
1229 * paranoia do it everywhere.
1231 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1237 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1238 struct drm_file *file,
1239 struct drm_i915_gem_execbuffer2 *args,
1240 struct drm_i915_gem_exec_object2 *exec)
1242 struct drm_i915_private *dev_priv = dev->dev_private;
1244 struct drm_i915_gem_object *batch_obj;
1245 struct intel_engine_cs *ring;
1246 struct intel_context *ctx;
1247 struct i915_address_space *vm;
1248 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1249 u64 exec_start = args->batch_start_offset;
1254 if (!i915_gem_check_execbuffer(args))
1257 ret = validate_exec_list(exec, args->buffer_count);
1262 if (args->flags & I915_EXEC_SECURE) {
1263 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1266 flags |= I915_DISPATCH_SECURE;
1268 if (args->flags & I915_EXEC_IS_PINNED)
1269 flags |= I915_DISPATCH_PINNED;
1271 if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1272 DRM_DEBUG("execbuf with unknown ring: %d\n",
1273 (int)(args->flags & I915_EXEC_RING_MASK));
1277 if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1278 ring = &dev_priv->ring[RCS];
1279 else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
1280 if (HAS_BSD2(dev)) {
1282 ring_id = gen8_dispatch_bsd_ring(dev, file);
1283 ring = &dev_priv->ring[ring_id];
1285 ring = &dev_priv->ring[VCS];
1287 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1289 if (!intel_ring_initialized(ring)) {
1290 DRM_DEBUG("execbuf with invalid ring: %d\n",
1291 (int)(args->flags & I915_EXEC_RING_MASK));
1295 if (args->buffer_count < 1) {
1296 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1300 intel_runtime_pm_get(dev_priv);
1302 ret = i915_mutex_lock_interruptible(dev);
1306 if (dev_priv->ums.mm_suspended) {
1307 mutex_unlock(&dev->struct_mutex);
1312 ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1314 mutex_unlock(&dev->struct_mutex);
1319 i915_gem_context_reference(ctx);
1322 if (!USES_FULL_PPGTT(dev))
1323 vm = &dev_priv->gtt.base;
1325 eb = eb_create(args);
1327 i915_gem_context_unreference(ctx);
1328 mutex_unlock(&dev->struct_mutex);
1333 /* Look up object handles */
1334 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1338 /* take note of the batch buffer before we might reorder the lists */
1339 batch_obj = eb_get_batch(eb);
1341 /* Move the objects en-masse into the GTT, evicting if necessary. */
1342 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1343 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1347 /* The objects are in their final locations, apply the relocations. */
1349 ret = i915_gem_execbuffer_relocate(eb);
1351 if (ret == -EFAULT) {
1352 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1354 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1360 /* Set the pending read domains for the batch buffer to COMMAND */
1361 if (batch_obj->base.pending_write_domain) {
1362 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1366 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1368 if (i915_needs_cmd_parser(ring)) {
1369 ret = i915_parse_cmds(ring,
1371 args->batch_start_offset,
1377 * XXX: Actually do this when enabling batch copy...
1379 * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
1380 * from MI_BATCH_BUFFER_START commands issued in the
1381 * dispatch_execbuffer implementations. We specifically don't
1382 * want that set when the command parser is enabled.
1386 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1387 * batch" bit. Hence we need to pin secure batches into the global gtt.
1388 * hsw should have this fixed, but bdw mucks it up again. */
1389 if (flags & I915_DISPATCH_SECURE &&
1390 !batch_obj->has_global_gtt_mapping) {
1391 /* When we have multiple VMs, we'll need to make sure that we
1392 * allocate space first */
1393 struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
1395 vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
1398 if (flags & I915_DISPATCH_SECURE)
1399 exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1401 exec_start += i915_gem_obj_offset(batch_obj, vm);
1403 ret = legacy_ringbuffer_submission(dev, file, ring, ctx,
1404 args, &eb->vmas, batch_obj, exec_start, flags);
1409 /* the request owns the ref now */
1410 i915_gem_context_unreference(ctx);
1413 mutex_unlock(&dev->struct_mutex);
1416 /* intel_gpu_busy should also get a ref, so it will free when the device
1417 * is really idle. */
1418 intel_runtime_pm_put(dev_priv);
1423 * Legacy execbuffer just creates an exec2 list from the original exec object
1424 * list array and passes it to the real function.
1427 i915_gem_execbuffer(struct drm_device *dev, void *data,
1428 struct drm_file *file)
1430 struct drm_i915_gem_execbuffer *args = data;
1431 struct drm_i915_gem_execbuffer2 exec2;
1432 struct drm_i915_gem_exec_object *exec_list = NULL;
1433 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1436 if (args->buffer_count < 1) {
1437 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1441 /* Copy in the exec list from userland */
1442 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1443 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1444 if (exec_list == NULL || exec2_list == NULL) {
1445 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1446 args->buffer_count);
1447 drm_free_large(exec_list);
1448 drm_free_large(exec2_list);
1451 ret = copy_from_user(exec_list,
1452 to_user_ptr(args->buffers_ptr),
1453 sizeof(*exec_list) * args->buffer_count);
1455 DRM_DEBUG("copy %d exec entries failed %d\n",
1456 args->buffer_count, ret);
1457 drm_free_large(exec_list);
1458 drm_free_large(exec2_list);
1462 for (i = 0; i < args->buffer_count; i++) {
1463 exec2_list[i].handle = exec_list[i].handle;
1464 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1465 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1466 exec2_list[i].alignment = exec_list[i].alignment;
1467 exec2_list[i].offset = exec_list[i].offset;
1468 if (INTEL_INFO(dev)->gen < 4)
1469 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1471 exec2_list[i].flags = 0;
1474 exec2.buffers_ptr = args->buffers_ptr;
1475 exec2.buffer_count = args->buffer_count;
1476 exec2.batch_start_offset = args->batch_start_offset;
1477 exec2.batch_len = args->batch_len;
1478 exec2.DR1 = args->DR1;
1479 exec2.DR4 = args->DR4;
1480 exec2.num_cliprects = args->num_cliprects;
1481 exec2.cliprects_ptr = args->cliprects_ptr;
1482 exec2.flags = I915_EXEC_RENDER;
1483 i915_execbuffer2_set_context_id(exec2, 0);
1485 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1487 struct drm_i915_gem_exec_object __user *user_exec_list =
1488 to_user_ptr(args->buffers_ptr);
1490 /* Copy the new buffer offsets back to the user's exec list. */
1491 for (i = 0; i < args->buffer_count; i++) {
1492 ret = __copy_to_user(&user_exec_list[i].offset,
1493 &exec2_list[i].offset,
1494 sizeof(user_exec_list[i].offset));
1497 DRM_DEBUG("failed to copy %d exec entries "
1498 "back to user (%d)\n",
1499 args->buffer_count, ret);
1505 drm_free_large(exec_list);
1506 drm_free_large(exec2_list);
1511 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1512 struct drm_file *file)
1514 struct drm_i915_gem_execbuffer2 *args = data;
1515 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1518 if (args->buffer_count < 1 ||
1519 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1520 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1524 if (args->rsvd2 != 0) {
1525 DRM_DEBUG("dirty rvsd2 field\n");
1529 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1530 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1531 if (exec2_list == NULL)
1532 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1533 args->buffer_count);
1534 if (exec2_list == NULL) {
1535 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1536 args->buffer_count);
1539 ret = copy_from_user(exec2_list,
1540 to_user_ptr(args->buffers_ptr),
1541 sizeof(*exec2_list) * args->buffer_count);
1543 DRM_DEBUG("copy %d exec entries failed %d\n",
1544 args->buffer_count, ret);
1545 drm_free_large(exec2_list);
1549 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1551 /* Copy the new buffer offsets back to the user's exec list. */
1552 struct drm_i915_gem_exec_object2 __user *user_exec_list =
1553 to_user_ptr(args->buffers_ptr);
1556 for (i = 0; i < args->buffer_count; i++) {
1557 ret = __copy_to_user(&user_exec_list[i].offset,
1558 &exec2_list[i].offset,
1559 sizeof(user_exec_list[i].offset));
1562 DRM_DEBUG("failed to copy %d exec entries "
1564 args->buffer_count);
1570 drm_free_large(exec2_list);