drm/i915: Catch dirt in unused execbuffer fields
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_gem_execbuffer.c
1 /*
2  * Copyright © 2008,2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *    Chris Wilson <chris@chris-wilson.co.uk>
26  *
27  */
28
29 #include <drm/drmP.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
35
36 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
37 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
38
39 struct eb_vmas {
40         struct list_head vmas;
41         int and;
42         union {
43                 struct i915_vma *lut[0];
44                 struct hlist_head buckets[0];
45         };
46 };
47
48 static struct eb_vmas *
49 eb_create(struct drm_i915_gem_execbuffer2 *args)
50 {
51         struct eb_vmas *eb = NULL;
52
53         if (args->flags & I915_EXEC_HANDLE_LUT) {
54                 unsigned size = args->buffer_count;
55                 size *= sizeof(struct i915_vma *);
56                 size += sizeof(struct eb_vmas);
57                 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
58         }
59
60         if (eb == NULL) {
61                 unsigned size = args->buffer_count;
62                 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
63                 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
64                 while (count > 2*size)
65                         count >>= 1;
66                 eb = kzalloc(count*sizeof(struct hlist_head) +
67                              sizeof(struct eb_vmas),
68                              GFP_TEMPORARY);
69                 if (eb == NULL)
70                         return eb;
71
72                 eb->and = count - 1;
73         } else
74                 eb->and = -args->buffer_count;
75
76         INIT_LIST_HEAD(&eb->vmas);
77         return eb;
78 }
79
80 static void
81 eb_reset(struct eb_vmas *eb)
82 {
83         if (eb->and >= 0)
84                 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
85 }
86
87 static int
88 eb_lookup_vmas(struct eb_vmas *eb,
89                struct drm_i915_gem_exec_object2 *exec,
90                const struct drm_i915_gem_execbuffer2 *args,
91                struct i915_address_space *vm,
92                struct drm_file *file)
93 {
94         struct drm_i915_private *dev_priv = vm->dev->dev_private;
95         struct drm_i915_gem_object *obj;
96         struct list_head objects;
97         int i, ret;
98
99         INIT_LIST_HEAD(&objects);
100         spin_lock(&file->table_lock);
101         /* Grab a reference to the object and release the lock so we can lookup
102          * or create the VMA without using GFP_ATOMIC */
103         for (i = 0; i < args->buffer_count; i++) {
104                 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
105                 if (obj == NULL) {
106                         spin_unlock(&file->table_lock);
107                         DRM_DEBUG("Invalid object handle %d at index %d\n",
108                                    exec[i].handle, i);
109                         ret = -ENOENT;
110                         goto err;
111                 }
112
113                 if (!list_empty(&obj->obj_exec_link)) {
114                         spin_unlock(&file->table_lock);
115                         DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
116                                    obj, exec[i].handle, i);
117                         ret = -EINVAL;
118                         goto err;
119                 }
120
121                 drm_gem_object_reference(&obj->base);
122                 list_add_tail(&obj->obj_exec_link, &objects);
123         }
124         spin_unlock(&file->table_lock);
125
126         i = 0;
127         while (!list_empty(&objects)) {
128                 struct i915_vma *vma;
129                 struct i915_address_space *bind_vm = vm;
130
131                 if (exec[i].flags & EXEC_OBJECT_NEEDS_GTT &&
132                     USES_FULL_PPGTT(vm->dev)) {
133                         ret = -EINVAL;
134                         goto err;
135                 }
136
137                 /* If we have secure dispatch, or the userspace assures us that
138                  * they know what they're doing, use the GGTT VM.
139                  */
140                 if (((args->flags & I915_EXEC_SECURE) &&
141                     (i == (args->buffer_count - 1))))
142                         bind_vm = &dev_priv->gtt.base;
143
144                 obj = list_first_entry(&objects,
145                                        struct drm_i915_gem_object,
146                                        obj_exec_link);
147
148                 /*
149                  * NOTE: We can leak any vmas created here when something fails
150                  * later on. But that's no issue since vma_unbind can deal with
151                  * vmas which are not actually bound. And since only
152                  * lookup_or_create exists as an interface to get at the vma
153                  * from the (obj, vm) we don't run the risk of creating
154                  * duplicated vmas for the same vm.
155                  */
156                 vma = i915_gem_obj_lookup_or_create_vma(obj, bind_vm);
157                 if (IS_ERR(vma)) {
158                         DRM_DEBUG("Failed to lookup VMA\n");
159                         ret = PTR_ERR(vma);
160                         goto err;
161                 }
162
163                 /* Transfer ownership from the objects list to the vmas list. */
164                 list_add_tail(&vma->exec_list, &eb->vmas);
165                 list_del_init(&obj->obj_exec_link);
166
167                 vma->exec_entry = &exec[i];
168                 if (eb->and < 0) {
169                         eb->lut[i] = vma;
170                 } else {
171                         uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
172                         vma->exec_handle = handle;
173                         hlist_add_head(&vma->exec_node,
174                                        &eb->buckets[handle & eb->and]);
175                 }
176                 ++i;
177         }
178
179         return 0;
180
181
182 err:
183         while (!list_empty(&objects)) {
184                 obj = list_first_entry(&objects,
185                                        struct drm_i915_gem_object,
186                                        obj_exec_link);
187                 list_del_init(&obj->obj_exec_link);
188                 drm_gem_object_unreference(&obj->base);
189         }
190         /*
191          * Objects already transfered to the vmas list will be unreferenced by
192          * eb_destroy.
193          */
194
195         return ret;
196 }
197
198 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
199 {
200         if (eb->and < 0) {
201                 if (handle >= -eb->and)
202                         return NULL;
203                 return eb->lut[handle];
204         } else {
205                 struct hlist_head *head;
206                 struct hlist_node *node;
207
208                 head = &eb->buckets[handle & eb->and];
209                 hlist_for_each(node, head) {
210                         struct i915_vma *vma;
211
212                         vma = hlist_entry(node, struct i915_vma, exec_node);
213                         if (vma->exec_handle == handle)
214                                 return vma;
215                 }
216                 return NULL;
217         }
218 }
219
220 static void
221 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
222 {
223         struct drm_i915_gem_exec_object2 *entry;
224         struct drm_i915_gem_object *obj = vma->obj;
225
226         if (!drm_mm_node_allocated(&vma->node))
227                 return;
228
229         entry = vma->exec_entry;
230
231         if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
232                 i915_gem_object_unpin_fence(obj);
233
234         if (entry->flags & __EXEC_OBJECT_HAS_PIN)
235                 vma->pin_count--;
236
237         entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
238 }
239
240 static void eb_destroy(struct eb_vmas *eb)
241 {
242         while (!list_empty(&eb->vmas)) {
243                 struct i915_vma *vma;
244
245                 vma = list_first_entry(&eb->vmas,
246                                        struct i915_vma,
247                                        exec_list);
248                 list_del_init(&vma->exec_list);
249                 i915_gem_execbuffer_unreserve_vma(vma);
250                 drm_gem_object_unreference(&vma->obj->base);
251         }
252         kfree(eb);
253 }
254
255 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
256 {
257         return (HAS_LLC(obj->base.dev) ||
258                 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
259                 !obj->map_and_fenceable ||
260                 obj->cache_level != I915_CACHE_NONE);
261 }
262
263 static int
264 relocate_entry_cpu(struct drm_i915_gem_object *obj,
265                    struct drm_i915_gem_relocation_entry *reloc)
266 {
267         struct drm_device *dev = obj->base.dev;
268         uint32_t page_offset = offset_in_page(reloc->offset);
269         char *vaddr;
270         int ret;
271
272         ret = i915_gem_object_set_to_cpu_domain(obj, true);
273         if (ret)
274                 return ret;
275
276         vaddr = kmap_atomic(i915_gem_object_get_page(obj,
277                                 reloc->offset >> PAGE_SHIFT));
278         *(uint32_t *)(vaddr + page_offset) = reloc->delta;
279
280         if (INTEL_INFO(dev)->gen >= 8) {
281                 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
282
283                 if (page_offset == 0) {
284                         kunmap_atomic(vaddr);
285                         vaddr = kmap_atomic(i915_gem_object_get_page(obj,
286                             (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
287                 }
288
289                 *(uint32_t *)(vaddr + page_offset) = 0;
290         }
291
292         kunmap_atomic(vaddr);
293
294         return 0;
295 }
296
297 static int
298 relocate_entry_gtt(struct drm_i915_gem_object *obj,
299                    struct drm_i915_gem_relocation_entry *reloc)
300 {
301         struct drm_device *dev = obj->base.dev;
302         struct drm_i915_private *dev_priv = dev->dev_private;
303         uint32_t __iomem *reloc_entry;
304         void __iomem *reloc_page;
305         int ret;
306
307         ret = i915_gem_object_set_to_gtt_domain(obj, true);
308         if (ret)
309                 return ret;
310
311         ret = i915_gem_object_put_fence(obj);
312         if (ret)
313                 return ret;
314
315         /* Map the page containing the relocation we're going to perform.  */
316         reloc->offset += i915_gem_obj_ggtt_offset(obj);
317         reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
318                         reloc->offset & PAGE_MASK);
319         reloc_entry = (uint32_t __iomem *)
320                 (reloc_page + offset_in_page(reloc->offset));
321         iowrite32(reloc->delta, reloc_entry);
322
323         if (INTEL_INFO(dev)->gen >= 8) {
324                 reloc_entry += 1;
325
326                 if (offset_in_page(reloc->offset + sizeof(uint32_t)) == 0) {
327                         io_mapping_unmap_atomic(reloc_page);
328                         reloc_page = io_mapping_map_atomic_wc(
329                                         dev_priv->gtt.mappable,
330                                         reloc->offset + sizeof(uint32_t));
331                         reloc_entry = reloc_page;
332                 }
333
334                 iowrite32(0, reloc_entry);
335         }
336
337         io_mapping_unmap_atomic(reloc_page);
338
339         return 0;
340 }
341
342 static int
343 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
344                                    struct eb_vmas *eb,
345                                    struct drm_i915_gem_relocation_entry *reloc)
346 {
347         struct drm_device *dev = obj->base.dev;
348         struct drm_gem_object *target_obj;
349         struct drm_i915_gem_object *target_i915_obj;
350         struct i915_vma *target_vma;
351         uint32_t target_offset;
352         int ret;
353
354         /* we've already hold a reference to all valid objects */
355         target_vma = eb_get_vma(eb, reloc->target_handle);
356         if (unlikely(target_vma == NULL))
357                 return -ENOENT;
358         target_i915_obj = target_vma->obj;
359         target_obj = &target_vma->obj->base;
360
361         target_offset = target_vma->node.start;
362
363         /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
364          * pipe_control writes because the gpu doesn't properly redirect them
365          * through the ppgtt for non_secure batchbuffers. */
366         if (unlikely(IS_GEN6(dev) &&
367             reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
368             !target_i915_obj->has_global_gtt_mapping)) {
369                 struct i915_vma *vma =
370                         list_first_entry(&target_i915_obj->vma_list,
371                                          typeof(*vma), vma_link);
372                 vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
373         }
374
375         /* Validate that the target is in a valid r/w GPU domain */
376         if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
377                 DRM_DEBUG("reloc with multiple write domains: "
378                           "obj %p target %d offset %d "
379                           "read %08x write %08x",
380                           obj, reloc->target_handle,
381                           (int) reloc->offset,
382                           reloc->read_domains,
383                           reloc->write_domain);
384                 return -EINVAL;
385         }
386         if (unlikely((reloc->write_domain | reloc->read_domains)
387                      & ~I915_GEM_GPU_DOMAINS)) {
388                 DRM_DEBUG("reloc with read/write non-GPU domains: "
389                           "obj %p target %d offset %d "
390                           "read %08x write %08x",
391                           obj, reloc->target_handle,
392                           (int) reloc->offset,
393                           reloc->read_domains,
394                           reloc->write_domain);
395                 return -EINVAL;
396         }
397
398         target_obj->pending_read_domains |= reloc->read_domains;
399         target_obj->pending_write_domain |= reloc->write_domain;
400
401         /* If the relocation already has the right value in it, no
402          * more work needs to be done.
403          */
404         if (target_offset == reloc->presumed_offset)
405                 return 0;
406
407         /* Check that the relocation address is valid... */
408         if (unlikely(reloc->offset >
409                 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
410                 DRM_DEBUG("Relocation beyond object bounds: "
411                           "obj %p target %d offset %d size %d.\n",
412                           obj, reloc->target_handle,
413                           (int) reloc->offset,
414                           (int) obj->base.size);
415                 return -EINVAL;
416         }
417         if (unlikely(reloc->offset & 3)) {
418                 DRM_DEBUG("Relocation not 4-byte aligned: "
419                           "obj %p target %d offset %d.\n",
420                           obj, reloc->target_handle,
421                           (int) reloc->offset);
422                 return -EINVAL;
423         }
424
425         /* We can't wait for rendering with pagefaults disabled */
426         if (obj->active && in_atomic())
427                 return -EFAULT;
428
429         reloc->delta += target_offset;
430         if (use_cpu_reloc(obj))
431                 ret = relocate_entry_cpu(obj, reloc);
432         else
433                 ret = relocate_entry_gtt(obj, reloc);
434
435         if (ret)
436                 return ret;
437
438         /* and update the user's relocation entry */
439         reloc->presumed_offset = target_offset;
440
441         return 0;
442 }
443
444 static int
445 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
446                                  struct eb_vmas *eb)
447 {
448 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
449         struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
450         struct drm_i915_gem_relocation_entry __user *user_relocs;
451         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
452         int remain, ret;
453
454         user_relocs = to_user_ptr(entry->relocs_ptr);
455
456         remain = entry->relocation_count;
457         while (remain) {
458                 struct drm_i915_gem_relocation_entry *r = stack_reloc;
459                 int count = remain;
460                 if (count > ARRAY_SIZE(stack_reloc))
461                         count = ARRAY_SIZE(stack_reloc);
462                 remain -= count;
463
464                 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
465                         return -EFAULT;
466
467                 do {
468                         u64 offset = r->presumed_offset;
469
470                         ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
471                         if (ret)
472                                 return ret;
473
474                         if (r->presumed_offset != offset &&
475                             __copy_to_user_inatomic(&user_relocs->presumed_offset,
476                                                     &r->presumed_offset,
477                                                     sizeof(r->presumed_offset))) {
478                                 return -EFAULT;
479                         }
480
481                         user_relocs++;
482                         r++;
483                 } while (--count);
484         }
485
486         return 0;
487 #undef N_RELOC
488 }
489
490 static int
491 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
492                                       struct eb_vmas *eb,
493                                       struct drm_i915_gem_relocation_entry *relocs)
494 {
495         const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
496         int i, ret;
497
498         for (i = 0; i < entry->relocation_count; i++) {
499                 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
500                 if (ret)
501                         return ret;
502         }
503
504         return 0;
505 }
506
507 static int
508 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
509 {
510         struct i915_vma *vma;
511         int ret = 0;
512
513         /* This is the fast path and we cannot handle a pagefault whilst
514          * holding the struct mutex lest the user pass in the relocations
515          * contained within a mmaped bo. For in such a case we, the page
516          * fault handler would call i915_gem_fault() and we would try to
517          * acquire the struct mutex again. Obviously this is bad and so
518          * lockdep complains vehemently.
519          */
520         pagefault_disable();
521         list_for_each_entry(vma, &eb->vmas, exec_list) {
522                 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
523                 if (ret)
524                         break;
525         }
526         pagefault_enable();
527
528         return ret;
529 }
530
531 static int
532 need_reloc_mappable(struct i915_vma *vma)
533 {
534         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
535         return entry->relocation_count && !use_cpu_reloc(vma->obj) &&
536                 i915_is_ggtt(vma->vm);
537 }
538
539 static int
540 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
541                                 struct intel_ring_buffer *ring,
542                                 bool *need_reloc)
543 {
544         struct drm_i915_gem_object *obj = vma->obj;
545         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
546         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
547         bool need_fence;
548         unsigned flags;
549         int ret;
550
551         flags = 0;
552
553         need_fence =
554                 has_fenced_gpu_access &&
555                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
556                 obj->tiling_mode != I915_TILING_NONE;
557         if (need_fence || need_reloc_mappable(vma))
558                 flags |= PIN_MAPPABLE;
559
560         if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
561                 flags |= PIN_GLOBAL;
562
563         ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
564         if (ret)
565                 return ret;
566
567         entry->flags |= __EXEC_OBJECT_HAS_PIN;
568
569         if (has_fenced_gpu_access) {
570                 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
571                         ret = i915_gem_object_get_fence(obj);
572                         if (ret)
573                                 return ret;
574
575                         if (i915_gem_object_pin_fence(obj))
576                                 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
577
578                         obj->pending_fenced_gpu_access = true;
579                 }
580         }
581
582         if (entry->offset != vma->node.start) {
583                 entry->offset = vma->node.start;
584                 *need_reloc = true;
585         }
586
587         if (entry->flags & EXEC_OBJECT_WRITE) {
588                 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
589                 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
590         }
591
592         return 0;
593 }
594
595 static int
596 i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
597                             struct list_head *vmas,
598                             bool *need_relocs)
599 {
600         struct drm_i915_gem_object *obj;
601         struct i915_vma *vma;
602         struct i915_address_space *vm;
603         struct list_head ordered_vmas;
604         bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
605         int retry;
606
607         if (list_empty(vmas))
608                 return 0;
609
610         vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
611
612         INIT_LIST_HEAD(&ordered_vmas);
613         while (!list_empty(vmas)) {
614                 struct drm_i915_gem_exec_object2 *entry;
615                 bool need_fence, need_mappable;
616
617                 vma = list_first_entry(vmas, struct i915_vma, exec_list);
618                 obj = vma->obj;
619                 entry = vma->exec_entry;
620
621                 need_fence =
622                         has_fenced_gpu_access &&
623                         entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
624                         obj->tiling_mode != I915_TILING_NONE;
625                 need_mappable = need_fence || need_reloc_mappable(vma);
626
627                 if (need_mappable)
628                         list_move(&vma->exec_list, &ordered_vmas);
629                 else
630                         list_move_tail(&vma->exec_list, &ordered_vmas);
631
632                 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
633                 obj->base.pending_write_domain = 0;
634                 obj->pending_fenced_gpu_access = false;
635         }
636         list_splice(&ordered_vmas, vmas);
637
638         /* Attempt to pin all of the buffers into the GTT.
639          * This is done in 3 phases:
640          *
641          * 1a. Unbind all objects that do not match the GTT constraints for
642          *     the execbuffer (fenceable, mappable, alignment etc).
643          * 1b. Increment pin count for already bound objects.
644          * 2.  Bind new objects.
645          * 3.  Decrement pin count.
646          *
647          * This avoid unnecessary unbinding of later objects in order to make
648          * room for the earlier objects *unless* we need to defragment.
649          */
650         retry = 0;
651         do {
652                 int ret = 0;
653
654                 /* Unbind any ill-fitting objects or pin. */
655                 list_for_each_entry(vma, vmas, exec_list) {
656                         struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
657                         bool need_fence, need_mappable;
658
659                         obj = vma->obj;
660
661                         if (!drm_mm_node_allocated(&vma->node))
662                                 continue;
663
664                         need_fence =
665                                 has_fenced_gpu_access &&
666                                 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
667                                 obj->tiling_mode != I915_TILING_NONE;
668                         need_mappable = need_fence || need_reloc_mappable(vma);
669
670                         WARN_ON((need_mappable || need_fence) &&
671                                !i915_is_ggtt(vma->vm));
672
673                         if ((entry->alignment &&
674                              vma->node.start & (entry->alignment - 1)) ||
675                             (need_mappable && !obj->map_and_fenceable))
676                                 ret = i915_vma_unbind(vma);
677                         else
678                                 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
679                         if (ret)
680                                 goto err;
681                 }
682
683                 /* Bind fresh objects */
684                 list_for_each_entry(vma, vmas, exec_list) {
685                         if (drm_mm_node_allocated(&vma->node))
686                                 continue;
687
688                         ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
689                         if (ret)
690                                 goto err;
691                 }
692
693 err:
694                 if (ret != -ENOSPC || retry++)
695                         return ret;
696
697                 /* Decrement pin count for bound objects */
698                 list_for_each_entry(vma, vmas, exec_list)
699                         i915_gem_execbuffer_unreserve_vma(vma);
700
701                 ret = i915_gem_evict_vm(vm, true);
702                 if (ret)
703                         return ret;
704         } while (1);
705 }
706
707 static int
708 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
709                                   struct drm_i915_gem_execbuffer2 *args,
710                                   struct drm_file *file,
711                                   struct intel_ring_buffer *ring,
712                                   struct eb_vmas *eb,
713                                   struct drm_i915_gem_exec_object2 *exec)
714 {
715         struct drm_i915_gem_relocation_entry *reloc;
716         struct i915_address_space *vm;
717         struct i915_vma *vma;
718         bool need_relocs;
719         int *reloc_offset;
720         int i, total, ret;
721         unsigned count = args->buffer_count;
722
723         if (WARN_ON(list_empty(&eb->vmas)))
724                 return 0;
725
726         vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
727
728         /* We may process another execbuffer during the unlock... */
729         while (!list_empty(&eb->vmas)) {
730                 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
731                 list_del_init(&vma->exec_list);
732                 i915_gem_execbuffer_unreserve_vma(vma);
733                 drm_gem_object_unreference(&vma->obj->base);
734         }
735
736         mutex_unlock(&dev->struct_mutex);
737
738         total = 0;
739         for (i = 0; i < count; i++)
740                 total += exec[i].relocation_count;
741
742         reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
743         reloc = drm_malloc_ab(total, sizeof(*reloc));
744         if (reloc == NULL || reloc_offset == NULL) {
745                 drm_free_large(reloc);
746                 drm_free_large(reloc_offset);
747                 mutex_lock(&dev->struct_mutex);
748                 return -ENOMEM;
749         }
750
751         total = 0;
752         for (i = 0; i < count; i++) {
753                 struct drm_i915_gem_relocation_entry __user *user_relocs;
754                 u64 invalid_offset = (u64)-1;
755                 int j;
756
757                 user_relocs = to_user_ptr(exec[i].relocs_ptr);
758
759                 if (copy_from_user(reloc+total, user_relocs,
760                                    exec[i].relocation_count * sizeof(*reloc))) {
761                         ret = -EFAULT;
762                         mutex_lock(&dev->struct_mutex);
763                         goto err;
764                 }
765
766                 /* As we do not update the known relocation offsets after
767                  * relocating (due to the complexities in lock handling),
768                  * we need to mark them as invalid now so that we force the
769                  * relocation processing next time. Just in case the target
770                  * object is evicted and then rebound into its old
771                  * presumed_offset before the next execbuffer - if that
772                  * happened we would make the mistake of assuming that the
773                  * relocations were valid.
774                  */
775                 for (j = 0; j < exec[i].relocation_count; j++) {
776                         if (copy_to_user(&user_relocs[j].presumed_offset,
777                                          &invalid_offset,
778                                          sizeof(invalid_offset))) {
779                                 ret = -EFAULT;
780                                 mutex_lock(&dev->struct_mutex);
781                                 goto err;
782                         }
783                 }
784
785                 reloc_offset[i] = total;
786                 total += exec[i].relocation_count;
787         }
788
789         ret = i915_mutex_lock_interruptible(dev);
790         if (ret) {
791                 mutex_lock(&dev->struct_mutex);
792                 goto err;
793         }
794
795         /* reacquire the objects */
796         eb_reset(eb);
797         ret = eb_lookup_vmas(eb, exec, args, vm, file);
798         if (ret)
799                 goto err;
800
801         need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
802         ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
803         if (ret)
804                 goto err;
805
806         list_for_each_entry(vma, &eb->vmas, exec_list) {
807                 int offset = vma->exec_entry - exec;
808                 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
809                                                             reloc + reloc_offset[offset]);
810                 if (ret)
811                         goto err;
812         }
813
814         /* Leave the user relocations as are, this is the painfully slow path,
815          * and we want to avoid the complication of dropping the lock whilst
816          * having buffers reserved in the aperture and so causing spurious
817          * ENOSPC for random operations.
818          */
819
820 err:
821         drm_free_large(reloc);
822         drm_free_large(reloc_offset);
823         return ret;
824 }
825
826 static int
827 i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
828                                 struct list_head *vmas)
829 {
830         struct i915_vma *vma;
831         uint32_t flush_domains = 0;
832         bool flush_chipset = false;
833         int ret;
834
835         list_for_each_entry(vma, vmas, exec_list) {
836                 struct drm_i915_gem_object *obj = vma->obj;
837                 ret = i915_gem_object_sync(obj, ring);
838                 if (ret)
839                         return ret;
840
841                 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
842                         flush_chipset |= i915_gem_clflush_object(obj, false);
843
844                 flush_domains |= obj->base.write_domain;
845         }
846
847         if (flush_chipset)
848                 i915_gem_chipset_flush(ring->dev);
849
850         if (flush_domains & I915_GEM_DOMAIN_GTT)
851                 wmb();
852
853         /* Unconditionally invalidate gpu caches and ensure that we do flush
854          * any residual writes from the previous batch.
855          */
856         return intel_ring_invalidate_all_caches(ring);
857 }
858
859 static bool
860 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
861 {
862         if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
863                 return false;
864
865         return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
866 }
867
868 static int
869 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
870                    int count)
871 {
872         int i;
873         unsigned relocs_total = 0;
874         unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
875
876         for (i = 0; i < count; i++) {
877                 char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
878                 int length; /* limited by fault_in_pages_readable() */
879
880                 if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
881                         return -EINVAL;
882
883                 /* First check for malicious input causing overflow in
884                  * the worst case where we need to allocate the entire
885                  * relocation tree as a single array.
886                  */
887                 if (exec[i].relocation_count > relocs_max - relocs_total)
888                         return -EINVAL;
889                 relocs_total += exec[i].relocation_count;
890
891                 length = exec[i].relocation_count *
892                         sizeof(struct drm_i915_gem_relocation_entry);
893                 /*
894                  * We must check that the entire relocation array is safe
895                  * to read, but since we may need to update the presumed
896                  * offsets during execution, check for full write access.
897                  */
898                 if (!access_ok(VERIFY_WRITE, ptr, length))
899                         return -EFAULT;
900
901                 if (likely(!i915.prefault_disable)) {
902                         if (fault_in_multipages_readable(ptr, length))
903                                 return -EFAULT;
904                 }
905         }
906
907         return 0;
908 }
909
910 static struct i915_hw_context *
911 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
912                           struct intel_ring_buffer *ring, const u32 ctx_id)
913 {
914         struct i915_hw_context *ctx = NULL;
915         struct i915_ctx_hang_stats *hs;
916
917         if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_ID)
918                 return ERR_PTR(-EINVAL);
919
920         ctx = i915_gem_context_get(file->driver_priv, ctx_id);
921         if (IS_ERR(ctx))
922                 return ctx;
923
924         hs = &ctx->hang_stats;
925         if (hs->banned) {
926                 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
927                 return ERR_PTR(-EIO);
928         }
929
930         return ctx;
931 }
932
933 static void
934 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
935                                    struct intel_ring_buffer *ring)
936 {
937         struct i915_vma *vma;
938
939         list_for_each_entry(vma, vmas, exec_list) {
940                 struct drm_i915_gem_object *obj = vma->obj;
941                 u32 old_read = obj->base.read_domains;
942                 u32 old_write = obj->base.write_domain;
943
944                 obj->base.write_domain = obj->base.pending_write_domain;
945                 if (obj->base.write_domain == 0)
946                         obj->base.pending_read_domains |= obj->base.read_domains;
947                 obj->base.read_domains = obj->base.pending_read_domains;
948                 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
949
950                 i915_vma_move_to_active(vma, ring);
951                 if (obj->base.write_domain) {
952                         obj->dirty = 1;
953                         obj->last_write_seqno = intel_ring_get_seqno(ring);
954                         /* check for potential scanout */
955                         if (i915_gem_obj_ggtt_bound(obj) &&
956                             i915_gem_obj_to_ggtt(obj)->pin_count)
957                                 intel_mark_fb_busy(obj, ring);
958                 }
959
960                 trace_i915_gem_object_change_domain(obj, old_read, old_write);
961         }
962 }
963
964 static void
965 i915_gem_execbuffer_retire_commands(struct drm_device *dev,
966                                     struct drm_file *file,
967                                     struct intel_ring_buffer *ring,
968                                     struct drm_i915_gem_object *obj)
969 {
970         /* Unconditionally force add_request to emit a full flush. */
971         ring->gpu_caches_dirty = true;
972
973         /* Add a breadcrumb for the completion of the batch buffer */
974         (void)__i915_add_request(ring, file, obj, NULL);
975 }
976
977 static int
978 i915_reset_gen7_sol_offsets(struct drm_device *dev,
979                             struct intel_ring_buffer *ring)
980 {
981         struct drm_i915_private *dev_priv = dev->dev_private;
982         int ret, i;
983
984         if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
985                 DRM_DEBUG("sol reset is gen7/rcs only\n");
986                 return -EINVAL;
987         }
988
989         ret = intel_ring_begin(ring, 4 * 3);
990         if (ret)
991                 return ret;
992
993         for (i = 0; i < 4; i++) {
994                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
995                 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
996                 intel_ring_emit(ring, 0);
997         }
998
999         intel_ring_advance(ring);
1000
1001         return 0;
1002 }
1003
1004 static int
1005 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1006                        struct drm_file *file,
1007                        struct drm_i915_gem_execbuffer2 *args,
1008                        struct drm_i915_gem_exec_object2 *exec)
1009 {
1010         struct drm_i915_private *dev_priv = dev->dev_private;
1011         struct eb_vmas *eb;
1012         struct drm_i915_gem_object *batch_obj;
1013         struct drm_clip_rect *cliprects = NULL;
1014         struct intel_ring_buffer *ring;
1015         struct i915_hw_context *ctx;
1016         struct i915_address_space *vm;
1017         const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1018         u32 exec_start = args->batch_start_offset, exec_len;
1019         u32 mask, flags;
1020         int ret, mode, i;
1021         bool need_relocs;
1022
1023         if (!i915_gem_check_execbuffer(args))
1024                 return -EINVAL;
1025
1026         ret = validate_exec_list(exec, args->buffer_count);
1027         if (ret)
1028                 return ret;
1029
1030         flags = 0;
1031         if (args->flags & I915_EXEC_SECURE) {
1032                 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1033                     return -EPERM;
1034
1035                 flags |= I915_DISPATCH_SECURE;
1036         }
1037         if (args->flags & I915_EXEC_IS_PINNED)
1038                 flags |= I915_DISPATCH_PINNED;
1039
1040         if ((args->flags & I915_EXEC_RING_MASK) > I915_NUM_RINGS) {
1041                 DRM_DEBUG("execbuf with unknown ring: %d\n",
1042                           (int)(args->flags & I915_EXEC_RING_MASK));
1043                 return -EINVAL;
1044         }
1045
1046         if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1047                 ring = &dev_priv->ring[RCS];
1048         else
1049                 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1050
1051         if (!intel_ring_initialized(ring)) {
1052                 DRM_DEBUG("execbuf with invalid ring: %d\n",
1053                           (int)(args->flags & I915_EXEC_RING_MASK));
1054                 return -EINVAL;
1055         }
1056
1057         mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1058         mask = I915_EXEC_CONSTANTS_MASK;
1059         switch (mode) {
1060         case I915_EXEC_CONSTANTS_REL_GENERAL:
1061         case I915_EXEC_CONSTANTS_ABSOLUTE:
1062         case I915_EXEC_CONSTANTS_REL_SURFACE:
1063                 if (mode != 0 && ring != &dev_priv->ring[RCS]) {
1064                         DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1065                         return -EINVAL;
1066                 }
1067
1068                 if (mode != dev_priv->relative_constants_mode) {
1069                         if (INTEL_INFO(dev)->gen < 4) {
1070                                 DRM_DEBUG("no rel constants on pre-gen4\n");
1071                                 return -EINVAL;
1072                         }
1073
1074                         if (INTEL_INFO(dev)->gen > 5 &&
1075                             mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1076                                 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1077                                 return -EINVAL;
1078                         }
1079
1080                         /* The HW changed the meaning on this bit on gen6 */
1081                         if (INTEL_INFO(dev)->gen >= 6)
1082                                 mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1083                 }
1084                 break;
1085         default:
1086                 DRM_DEBUG("execbuf with unknown constants: %d\n", mode);
1087                 return -EINVAL;
1088         }
1089
1090         if (args->buffer_count < 1) {
1091                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1092                 return -EINVAL;
1093         }
1094
1095         if (args->num_cliprects != 0) {
1096                 if (ring != &dev_priv->ring[RCS]) {
1097                         DRM_DEBUG("clip rectangles are only valid with the render ring\n");
1098                         return -EINVAL;
1099                 }
1100
1101                 if (INTEL_INFO(dev)->gen >= 5) {
1102                         DRM_DEBUG("clip rectangles are only valid on pre-gen5\n");
1103                         return -EINVAL;
1104                 }
1105
1106                 if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) {
1107                         DRM_DEBUG("execbuf with %u cliprects\n",
1108                                   args->num_cliprects);
1109                         return -EINVAL;
1110                 }
1111
1112                 cliprects = kcalloc(args->num_cliprects,
1113                                     sizeof(*cliprects),
1114                                     GFP_KERNEL);
1115                 if (cliprects == NULL) {
1116                         ret = -ENOMEM;
1117                         goto pre_mutex_err;
1118                 }
1119
1120                 if (copy_from_user(cliprects,
1121                                    to_user_ptr(args->cliprects_ptr),
1122                                    sizeof(*cliprects)*args->num_cliprects)) {
1123                         ret = -EFAULT;
1124                         goto pre_mutex_err;
1125                 }
1126         } else {
1127                 if (args->DR1 || args->DR4 || args->cliprects_ptr) {
1128                         DRM_DEBUG("0 cliprects but dirt in cliprects fields\n");
1129                         return -EINVAL;
1130                 }
1131         }
1132
1133         intel_runtime_pm_get(dev_priv);
1134
1135         ret = i915_mutex_lock_interruptible(dev);
1136         if (ret)
1137                 goto pre_mutex_err;
1138
1139         if (dev_priv->ums.mm_suspended) {
1140                 mutex_unlock(&dev->struct_mutex);
1141                 ret = -EBUSY;
1142                 goto pre_mutex_err;
1143         }
1144
1145         ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1146         if (IS_ERR(ctx)) {
1147                 mutex_unlock(&dev->struct_mutex);
1148                 ret = PTR_ERR(ctx);
1149                 goto pre_mutex_err;
1150         }
1151
1152         i915_gem_context_reference(ctx);
1153
1154         vm = ctx->vm;
1155         if (!USES_FULL_PPGTT(dev))
1156                 vm = &dev_priv->gtt.base;
1157
1158         eb = eb_create(args);
1159         if (eb == NULL) {
1160                 i915_gem_context_unreference(ctx);
1161                 mutex_unlock(&dev->struct_mutex);
1162                 ret = -ENOMEM;
1163                 goto pre_mutex_err;
1164         }
1165
1166         /* Look up object handles */
1167         ret = eb_lookup_vmas(eb, exec, args, vm, file);
1168         if (ret)
1169                 goto err;
1170
1171         /* take note of the batch buffer before we might reorder the lists */
1172         batch_obj = list_entry(eb->vmas.prev, struct i915_vma, exec_list)->obj;
1173
1174         /* Move the objects en-masse into the GTT, evicting if necessary. */
1175         need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1176         ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, &need_relocs);
1177         if (ret)
1178                 goto err;
1179
1180         /* The objects are in their final locations, apply the relocations. */
1181         if (need_relocs)
1182                 ret = i915_gem_execbuffer_relocate(eb);
1183         if (ret) {
1184                 if (ret == -EFAULT) {
1185                         ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1186                                                                 eb, exec);
1187                         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1188                 }
1189                 if (ret)
1190                         goto err;
1191         }
1192
1193         /* Set the pending read domains for the batch buffer to COMMAND */
1194         if (batch_obj->base.pending_write_domain) {
1195                 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1196                 ret = -EINVAL;
1197                 goto err;
1198         }
1199         batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1200
1201         if (i915_needs_cmd_parser(ring)) {
1202                 ret = i915_parse_cmds(ring,
1203                                       batch_obj,
1204                                       args->batch_start_offset,
1205                                       file->is_master);
1206                 if (ret)
1207                         goto err;
1208
1209                 /*
1210                  * XXX: Actually do this when enabling batch copy...
1211                  *
1212                  * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
1213                  * from MI_BATCH_BUFFER_START commands issued in the
1214                  * dispatch_execbuffer implementations. We specifically don't
1215                  * want that set when the command parser is enabled.
1216                  */
1217         }
1218
1219         /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1220          * batch" bit. Hence we need to pin secure batches into the global gtt.
1221          * hsw should have this fixed, but bdw mucks it up again. */
1222         if (flags & I915_DISPATCH_SECURE &&
1223             !batch_obj->has_global_gtt_mapping) {
1224                 /* When we have multiple VMs, we'll need to make sure that we
1225                  * allocate space first */
1226                 struct i915_vma *vma = i915_gem_obj_to_ggtt(batch_obj);
1227                 BUG_ON(!vma);
1228                 vma->bind_vma(vma, batch_obj->cache_level, GLOBAL_BIND);
1229         }
1230
1231         if (flags & I915_DISPATCH_SECURE)
1232                 exec_start += i915_gem_obj_ggtt_offset(batch_obj);
1233         else
1234                 exec_start += i915_gem_obj_offset(batch_obj, vm);
1235
1236         ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->vmas);
1237         if (ret)
1238                 goto err;
1239
1240         ret = i915_switch_context(ring, ctx);
1241         if (ret)
1242                 goto err;
1243
1244         if (ring == &dev_priv->ring[RCS] &&
1245             mode != dev_priv->relative_constants_mode) {
1246                 ret = intel_ring_begin(ring, 4);
1247                 if (ret)
1248                                 goto err;
1249
1250                 intel_ring_emit(ring, MI_NOOP);
1251                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1252                 intel_ring_emit(ring, INSTPM);
1253                 intel_ring_emit(ring, mask << 16 | mode);
1254                 intel_ring_advance(ring);
1255
1256                 dev_priv->relative_constants_mode = mode;
1257         }
1258
1259         if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1260                 ret = i915_reset_gen7_sol_offsets(dev, ring);
1261                 if (ret)
1262                         goto err;
1263         }
1264
1265
1266         exec_len = args->batch_len;
1267         if (cliprects) {
1268                 for (i = 0; i < args->num_cliprects; i++) {
1269                         ret = i915_emit_box(dev, &cliprects[i],
1270                                             args->DR1, args->DR4);
1271                         if (ret)
1272                                 goto err;
1273
1274                         ret = ring->dispatch_execbuffer(ring,
1275                                                         exec_start, exec_len,
1276                                                         flags);
1277                         if (ret)
1278                                 goto err;
1279                 }
1280         } else {
1281                 ret = ring->dispatch_execbuffer(ring,
1282                                                 exec_start, exec_len,
1283                                                 flags);
1284                 if (ret)
1285                         goto err;
1286         }
1287
1288         trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
1289
1290         i915_gem_execbuffer_move_to_active(&eb->vmas, ring);
1291         i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
1292
1293 err:
1294         /* the request owns the ref now */
1295         i915_gem_context_unreference(ctx);
1296         eb_destroy(eb);
1297
1298         mutex_unlock(&dev->struct_mutex);
1299
1300 pre_mutex_err:
1301         kfree(cliprects);
1302
1303         /* intel_gpu_busy should also get a ref, so it will free when the device
1304          * is really idle. */
1305         intel_runtime_pm_put(dev_priv);
1306         return ret;
1307 }
1308
1309 /*
1310  * Legacy execbuffer just creates an exec2 list from the original exec object
1311  * list array and passes it to the real function.
1312  */
1313 int
1314 i915_gem_execbuffer(struct drm_device *dev, void *data,
1315                     struct drm_file *file)
1316 {
1317         struct drm_i915_gem_execbuffer *args = data;
1318         struct drm_i915_gem_execbuffer2 exec2;
1319         struct drm_i915_gem_exec_object *exec_list = NULL;
1320         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1321         int ret, i;
1322
1323         if (args->buffer_count < 1) {
1324                 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1325                 return -EINVAL;
1326         }
1327
1328         /* Copy in the exec list from userland */
1329         exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1330         exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1331         if (exec_list == NULL || exec2_list == NULL) {
1332                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1333                           args->buffer_count);
1334                 drm_free_large(exec_list);
1335                 drm_free_large(exec2_list);
1336                 return -ENOMEM;
1337         }
1338         ret = copy_from_user(exec_list,
1339                              to_user_ptr(args->buffers_ptr),
1340                              sizeof(*exec_list) * args->buffer_count);
1341         if (ret != 0) {
1342                 DRM_DEBUG("copy %d exec entries failed %d\n",
1343                           args->buffer_count, ret);
1344                 drm_free_large(exec_list);
1345                 drm_free_large(exec2_list);
1346                 return -EFAULT;
1347         }
1348
1349         for (i = 0; i < args->buffer_count; i++) {
1350                 exec2_list[i].handle = exec_list[i].handle;
1351                 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1352                 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1353                 exec2_list[i].alignment = exec_list[i].alignment;
1354                 exec2_list[i].offset = exec_list[i].offset;
1355                 if (INTEL_INFO(dev)->gen < 4)
1356                         exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1357                 else
1358                         exec2_list[i].flags = 0;
1359         }
1360
1361         exec2.buffers_ptr = args->buffers_ptr;
1362         exec2.buffer_count = args->buffer_count;
1363         exec2.batch_start_offset = args->batch_start_offset;
1364         exec2.batch_len = args->batch_len;
1365         exec2.DR1 = args->DR1;
1366         exec2.DR4 = args->DR4;
1367         exec2.num_cliprects = args->num_cliprects;
1368         exec2.cliprects_ptr = args->cliprects_ptr;
1369         exec2.flags = I915_EXEC_RENDER;
1370         i915_execbuffer2_set_context_id(exec2, 0);
1371
1372         ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1373         if (!ret) {
1374                 /* Copy the new buffer offsets back to the user's exec list. */
1375                 for (i = 0; i < args->buffer_count; i++)
1376                         exec_list[i].offset = exec2_list[i].offset;
1377                 /* ... and back out to userspace */
1378                 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1379                                    exec_list,
1380                                    sizeof(*exec_list) * args->buffer_count);
1381                 if (ret) {
1382                         ret = -EFAULT;
1383                         DRM_DEBUG("failed to copy %d exec entries "
1384                                   "back to user (%d)\n",
1385                                   args->buffer_count, ret);
1386                 }
1387         }
1388
1389         drm_free_large(exec_list);
1390         drm_free_large(exec2_list);
1391         return ret;
1392 }
1393
1394 int
1395 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1396                      struct drm_file *file)
1397 {
1398         struct drm_i915_gem_execbuffer2 *args = data;
1399         struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1400         int ret;
1401
1402         if (args->buffer_count < 1 ||
1403             args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1404                 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1405                 return -EINVAL;
1406         }
1407
1408         if (args->rsvd2 != 0) {
1409                 DRM_DEBUG("dirty rvsd2 field\n");
1410                 return -EINVAL;
1411         }
1412
1413         exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1414                              GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1415         if (exec2_list == NULL)
1416                 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1417                                            args->buffer_count);
1418         if (exec2_list == NULL) {
1419                 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1420                           args->buffer_count);
1421                 return -ENOMEM;
1422         }
1423         ret = copy_from_user(exec2_list,
1424                              to_user_ptr(args->buffers_ptr),
1425                              sizeof(*exec2_list) * args->buffer_count);
1426         if (ret != 0) {
1427                 DRM_DEBUG("copy %d exec entries failed %d\n",
1428                           args->buffer_count, ret);
1429                 drm_free_large(exec2_list);
1430                 return -EFAULT;
1431         }
1432
1433         ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1434         if (!ret) {
1435                 /* Copy the new buffer offsets back to the user's exec list. */
1436                 ret = copy_to_user(to_user_ptr(args->buffers_ptr),
1437                                    exec2_list,
1438                                    sizeof(*exec2_list) * args->buffer_count);
1439                 if (ret) {
1440                         ret = -EFAULT;
1441                         DRM_DEBUG("failed to copy %d exec entries "
1442                                   "back to user (%d)\n",
1443                                   args->buffer_count, ret);
1444                 }
1445         }
1446
1447         drm_free_large(exec2_list);
1448         return ret;
1449 }