drm/i915: No-Op enter/leave vt gem ioctl
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/oom.h>
35 #include <linux/shmem_fs.h>
36 #include <linux/slab.h>
37 #include <linux/swap.h>
38 #include <linux/pci.h>
39 #include <linux/dma-buf.h>
40
41 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
42 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
43                                                    bool force);
44 static __must_check int
45 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
46                                bool readonly);
47 static void
48 i915_gem_object_retire(struct drm_i915_gem_object *obj);
49
50 static void i915_gem_write_fence(struct drm_device *dev, int reg,
51                                  struct drm_i915_gem_object *obj);
52 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
53                                          struct drm_i915_fence_reg *fence,
54                                          bool enable);
55
56 static unsigned long i915_gem_shrinker_count(struct shrinker *shrinker,
57                                              struct shrink_control *sc);
58 static unsigned long i915_gem_shrinker_scan(struct shrinker *shrinker,
59                                             struct shrink_control *sc);
60 static int i915_gem_shrinker_oom(struct notifier_block *nb,
61                                  unsigned long event,
62                                  void *ptr);
63 static unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
64
65 static bool cpu_cache_is_coherent(struct drm_device *dev,
66                                   enum i915_cache_level level)
67 {
68         return HAS_LLC(dev) || level != I915_CACHE_NONE;
69 }
70
71 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
72 {
73         if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
74                 return true;
75
76         return obj->pin_display;
77 }
78
79 static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
80 {
81         if (obj->tiling_mode)
82                 i915_gem_release_mmap(obj);
83
84         /* As we do not have an associated fence register, we will force
85          * a tiling change if we ever need to acquire one.
86          */
87         obj->fence_dirty = false;
88         obj->fence_reg = I915_FENCE_REG_NONE;
89 }
90
91 /* some bookkeeping */
92 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
93                                   size_t size)
94 {
95         spin_lock(&dev_priv->mm.object_stat_lock);
96         dev_priv->mm.object_count++;
97         dev_priv->mm.object_memory += size;
98         spin_unlock(&dev_priv->mm.object_stat_lock);
99 }
100
101 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
102                                      size_t size)
103 {
104         spin_lock(&dev_priv->mm.object_stat_lock);
105         dev_priv->mm.object_count--;
106         dev_priv->mm.object_memory -= size;
107         spin_unlock(&dev_priv->mm.object_stat_lock);
108 }
109
110 static int
111 i915_gem_wait_for_error(struct i915_gpu_error *error)
112 {
113         int ret;
114
115 #define EXIT_COND (!i915_reset_in_progress(error) || \
116                    i915_terminally_wedged(error))
117         if (EXIT_COND)
118                 return 0;
119
120         /*
121          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
122          * userspace. If it takes that long something really bad is going on and
123          * we should simply try to bail out and fail as gracefully as possible.
124          */
125         ret = wait_event_interruptible_timeout(error->reset_queue,
126                                                EXIT_COND,
127                                                10*HZ);
128         if (ret == 0) {
129                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
130                 return -EIO;
131         } else if (ret < 0) {
132                 return ret;
133         }
134 #undef EXIT_COND
135
136         return 0;
137 }
138
139 int i915_mutex_lock_interruptible(struct drm_device *dev)
140 {
141         struct drm_i915_private *dev_priv = dev->dev_private;
142         int ret;
143
144         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
145         if (ret)
146                 return ret;
147
148         ret = mutex_lock_interruptible(&dev->struct_mutex);
149         if (ret)
150                 return ret;
151
152         WARN_ON(i915_verify_lists(dev));
153         return 0;
154 }
155
156 static inline bool
157 i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
158 {
159         return i915_gem_obj_bound_any(obj) && !obj->active;
160 }
161
162 int
163 i915_gem_init_ioctl(struct drm_device *dev, void *data,
164                     struct drm_file *file)
165 {
166         struct drm_i915_private *dev_priv = dev->dev_private;
167         struct drm_i915_gem_init *args = data;
168
169         if (drm_core_check_feature(dev, DRIVER_MODESET))
170                 return -ENODEV;
171
172         if (args->gtt_start >= args->gtt_end ||
173             (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
174                 return -EINVAL;
175
176         /* GEM with user mode setting was never supported on ilk and later. */
177         if (INTEL_INFO(dev)->gen >= 5)
178                 return -ENODEV;
179
180         mutex_lock(&dev->struct_mutex);
181         i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
182                                   args->gtt_end);
183         dev_priv->gtt.mappable_end = args->gtt_end;
184         mutex_unlock(&dev->struct_mutex);
185
186         return 0;
187 }
188
189 int
190 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
191                             struct drm_file *file)
192 {
193         struct drm_i915_private *dev_priv = dev->dev_private;
194         struct drm_i915_gem_get_aperture *args = data;
195         struct drm_i915_gem_object *obj;
196         size_t pinned;
197
198         pinned = 0;
199         mutex_lock(&dev->struct_mutex);
200         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
201                 if (i915_gem_obj_is_pinned(obj))
202                         pinned += i915_gem_obj_ggtt_size(obj);
203         mutex_unlock(&dev->struct_mutex);
204
205         args->aper_size = dev_priv->gtt.base.total;
206         args->aper_available_size = args->aper_size - pinned;
207
208         return 0;
209 }
210
211 static int
212 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
213 {
214         struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
215         char *vaddr = obj->phys_handle->vaddr;
216         struct sg_table *st;
217         struct scatterlist *sg;
218         int i;
219
220         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
221                 return -EINVAL;
222
223         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
224                 struct page *page;
225                 char *src;
226
227                 page = shmem_read_mapping_page(mapping, i);
228                 if (IS_ERR(page))
229                         return PTR_ERR(page);
230
231                 src = kmap_atomic(page);
232                 memcpy(vaddr, src, PAGE_SIZE);
233                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
234                 kunmap_atomic(src);
235
236                 page_cache_release(page);
237                 vaddr += PAGE_SIZE;
238         }
239
240         i915_gem_chipset_flush(obj->base.dev);
241
242         st = kmalloc(sizeof(*st), GFP_KERNEL);
243         if (st == NULL)
244                 return -ENOMEM;
245
246         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
247                 kfree(st);
248                 return -ENOMEM;
249         }
250
251         sg = st->sgl;
252         sg->offset = 0;
253         sg->length = obj->base.size;
254
255         sg_dma_address(sg) = obj->phys_handle->busaddr;
256         sg_dma_len(sg) = obj->base.size;
257
258         obj->pages = st;
259         obj->has_dma_mapping = true;
260         return 0;
261 }
262
263 static void
264 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
265 {
266         int ret;
267
268         BUG_ON(obj->madv == __I915_MADV_PURGED);
269
270         ret = i915_gem_object_set_to_cpu_domain(obj, true);
271         if (ret) {
272                 /* In the event of a disaster, abandon all caches and
273                  * hope for the best.
274                  */
275                 WARN_ON(ret != -EIO);
276                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
277         }
278
279         if (obj->madv == I915_MADV_DONTNEED)
280                 obj->dirty = 0;
281
282         if (obj->dirty) {
283                 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
284                 char *vaddr = obj->phys_handle->vaddr;
285                 int i;
286
287                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
288                         struct page *page;
289                         char *dst;
290
291                         page = shmem_read_mapping_page(mapping, i);
292                         if (IS_ERR(page))
293                                 continue;
294
295                         dst = kmap_atomic(page);
296                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
297                         memcpy(dst, vaddr, PAGE_SIZE);
298                         kunmap_atomic(dst);
299
300                         set_page_dirty(page);
301                         if (obj->madv == I915_MADV_WILLNEED)
302                                 mark_page_accessed(page);
303                         page_cache_release(page);
304                         vaddr += PAGE_SIZE;
305                 }
306                 obj->dirty = 0;
307         }
308
309         sg_free_table(obj->pages);
310         kfree(obj->pages);
311
312         obj->has_dma_mapping = false;
313 }
314
315 static void
316 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
317 {
318         drm_pci_free(obj->base.dev, obj->phys_handle);
319 }
320
321 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
322         .get_pages = i915_gem_object_get_pages_phys,
323         .put_pages = i915_gem_object_put_pages_phys,
324         .release = i915_gem_object_release_phys,
325 };
326
327 static int
328 drop_pages(struct drm_i915_gem_object *obj)
329 {
330         struct i915_vma *vma, *next;
331         int ret;
332
333         drm_gem_object_reference(&obj->base);
334         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
335                 if (i915_vma_unbind(vma))
336                         break;
337
338         ret = i915_gem_object_put_pages(obj);
339         drm_gem_object_unreference(&obj->base);
340
341         return ret;
342 }
343
344 int
345 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
346                             int align)
347 {
348         drm_dma_handle_t *phys;
349         int ret;
350
351         if (obj->phys_handle) {
352                 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
353                         return -EBUSY;
354
355                 return 0;
356         }
357
358         if (obj->madv != I915_MADV_WILLNEED)
359                 return -EFAULT;
360
361         if (obj->base.filp == NULL)
362                 return -EINVAL;
363
364         ret = drop_pages(obj);
365         if (ret)
366                 return ret;
367
368         /* create a new object */
369         phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
370         if (!phys)
371                 return -ENOMEM;
372
373         obj->phys_handle = phys;
374         obj->ops = &i915_gem_phys_ops;
375
376         return i915_gem_object_get_pages(obj);
377 }
378
379 static int
380 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
381                      struct drm_i915_gem_pwrite *args,
382                      struct drm_file *file_priv)
383 {
384         struct drm_device *dev = obj->base.dev;
385         void *vaddr = obj->phys_handle->vaddr + args->offset;
386         char __user *user_data = to_user_ptr(args->data_ptr);
387         int ret;
388
389         /* We manually control the domain here and pretend that it
390          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
391          */
392         ret = i915_gem_object_wait_rendering(obj, false);
393         if (ret)
394                 return ret;
395
396         if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
397                 unsigned long unwritten;
398
399                 /* The physical object once assigned is fixed for the lifetime
400                  * of the obj, so we can safely drop the lock and continue
401                  * to access vaddr.
402                  */
403                 mutex_unlock(&dev->struct_mutex);
404                 unwritten = copy_from_user(vaddr, user_data, args->size);
405                 mutex_lock(&dev->struct_mutex);
406                 if (unwritten)
407                         return -EFAULT;
408         }
409
410         drm_clflush_virt_range(vaddr, args->size);
411         i915_gem_chipset_flush(dev);
412         return 0;
413 }
414
415 void *i915_gem_object_alloc(struct drm_device *dev)
416 {
417         struct drm_i915_private *dev_priv = dev->dev_private;
418         return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL);
419 }
420
421 void i915_gem_object_free(struct drm_i915_gem_object *obj)
422 {
423         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
424         kmem_cache_free(dev_priv->slab, obj);
425 }
426
427 static int
428 i915_gem_create(struct drm_file *file,
429                 struct drm_device *dev,
430                 uint64_t size,
431                 uint32_t *handle_p)
432 {
433         struct drm_i915_gem_object *obj;
434         int ret;
435         u32 handle;
436
437         size = roundup(size, PAGE_SIZE);
438         if (size == 0)
439                 return -EINVAL;
440
441         /* Allocate the new object */
442         obj = i915_gem_alloc_object(dev, size);
443         if (obj == NULL)
444                 return -ENOMEM;
445
446         ret = drm_gem_handle_create(file, &obj->base, &handle);
447         /* drop reference from allocate - handle holds it now */
448         drm_gem_object_unreference_unlocked(&obj->base);
449         if (ret)
450                 return ret;
451
452         *handle_p = handle;
453         return 0;
454 }
455
456 int
457 i915_gem_dumb_create(struct drm_file *file,
458                      struct drm_device *dev,
459                      struct drm_mode_create_dumb *args)
460 {
461         /* have to work out size/pitch and return them */
462         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
463         args->size = args->pitch * args->height;
464         return i915_gem_create(file, dev,
465                                args->size, &args->handle);
466 }
467
468 /**
469  * Creates a new mm object and returns a handle to it.
470  */
471 int
472 i915_gem_create_ioctl(struct drm_device *dev, void *data,
473                       struct drm_file *file)
474 {
475         struct drm_i915_gem_create *args = data;
476
477         return i915_gem_create(file, dev,
478                                args->size, &args->handle);
479 }
480
481 static inline int
482 __copy_to_user_swizzled(char __user *cpu_vaddr,
483                         const char *gpu_vaddr, int gpu_offset,
484                         int length)
485 {
486         int ret, cpu_offset = 0;
487
488         while (length > 0) {
489                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
490                 int this_length = min(cacheline_end - gpu_offset, length);
491                 int swizzled_gpu_offset = gpu_offset ^ 64;
492
493                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
494                                      gpu_vaddr + swizzled_gpu_offset,
495                                      this_length);
496                 if (ret)
497                         return ret + length;
498
499                 cpu_offset += this_length;
500                 gpu_offset += this_length;
501                 length -= this_length;
502         }
503
504         return 0;
505 }
506
507 static inline int
508 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
509                           const char __user *cpu_vaddr,
510                           int length)
511 {
512         int ret, cpu_offset = 0;
513
514         while (length > 0) {
515                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
516                 int this_length = min(cacheline_end - gpu_offset, length);
517                 int swizzled_gpu_offset = gpu_offset ^ 64;
518
519                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
520                                        cpu_vaddr + cpu_offset,
521                                        this_length);
522                 if (ret)
523                         return ret + length;
524
525                 cpu_offset += this_length;
526                 gpu_offset += this_length;
527                 length -= this_length;
528         }
529
530         return 0;
531 }
532
533 /*
534  * Pins the specified object's pages and synchronizes the object with
535  * GPU accesses. Sets needs_clflush to non-zero if the caller should
536  * flush the object from the CPU cache.
537  */
538 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
539                                     int *needs_clflush)
540 {
541         int ret;
542
543         *needs_clflush = 0;
544
545         if (!obj->base.filp)
546                 return -EINVAL;
547
548         if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
549                 /* If we're not in the cpu read domain, set ourself into the gtt
550                  * read domain and manually flush cachelines (if required). This
551                  * optimizes for the case when the gpu will dirty the data
552                  * anyway again before the next pread happens. */
553                 *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
554                                                         obj->cache_level);
555                 ret = i915_gem_object_wait_rendering(obj, true);
556                 if (ret)
557                         return ret;
558
559                 i915_gem_object_retire(obj);
560         }
561
562         ret = i915_gem_object_get_pages(obj);
563         if (ret)
564                 return ret;
565
566         i915_gem_object_pin_pages(obj);
567
568         return ret;
569 }
570
571 /* Per-page copy function for the shmem pread fastpath.
572  * Flushes invalid cachelines before reading the target if
573  * needs_clflush is set. */
574 static int
575 shmem_pread_fast(struct page *page, int shmem_page_offset, int page_length,
576                  char __user *user_data,
577                  bool page_do_bit17_swizzling, bool needs_clflush)
578 {
579         char *vaddr;
580         int ret;
581
582         if (unlikely(page_do_bit17_swizzling))
583                 return -EINVAL;
584
585         vaddr = kmap_atomic(page);
586         if (needs_clflush)
587                 drm_clflush_virt_range(vaddr + shmem_page_offset,
588                                        page_length);
589         ret = __copy_to_user_inatomic(user_data,
590                                       vaddr + shmem_page_offset,
591                                       page_length);
592         kunmap_atomic(vaddr);
593
594         return ret ? -EFAULT : 0;
595 }
596
597 static void
598 shmem_clflush_swizzled_range(char *addr, unsigned long length,
599                              bool swizzled)
600 {
601         if (unlikely(swizzled)) {
602                 unsigned long start = (unsigned long) addr;
603                 unsigned long end = (unsigned long) addr + length;
604
605                 /* For swizzling simply ensure that we always flush both
606                  * channels. Lame, but simple and it works. Swizzled
607                  * pwrite/pread is far from a hotpath - current userspace
608                  * doesn't use it at all. */
609                 start = round_down(start, 128);
610                 end = round_up(end, 128);
611
612                 drm_clflush_virt_range((void *)start, end - start);
613         } else {
614                 drm_clflush_virt_range(addr, length);
615         }
616
617 }
618
619 /* Only difference to the fast-path function is that this can handle bit17
620  * and uses non-atomic copy and kmap functions. */
621 static int
622 shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
623                  char __user *user_data,
624                  bool page_do_bit17_swizzling, bool needs_clflush)
625 {
626         char *vaddr;
627         int ret;
628
629         vaddr = kmap(page);
630         if (needs_clflush)
631                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
632                                              page_length,
633                                              page_do_bit17_swizzling);
634
635         if (page_do_bit17_swizzling)
636                 ret = __copy_to_user_swizzled(user_data,
637                                               vaddr, shmem_page_offset,
638                                               page_length);
639         else
640                 ret = __copy_to_user(user_data,
641                                      vaddr + shmem_page_offset,
642                                      page_length);
643         kunmap(page);
644
645         return ret ? - EFAULT : 0;
646 }
647
648 static int
649 i915_gem_shmem_pread(struct drm_device *dev,
650                      struct drm_i915_gem_object *obj,
651                      struct drm_i915_gem_pread *args,
652                      struct drm_file *file)
653 {
654         char __user *user_data;
655         ssize_t remain;
656         loff_t offset;
657         int shmem_page_offset, page_length, ret = 0;
658         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
659         int prefaulted = 0;
660         int needs_clflush = 0;
661         struct sg_page_iter sg_iter;
662
663         user_data = to_user_ptr(args->data_ptr);
664         remain = args->size;
665
666         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
667
668         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
669         if (ret)
670                 return ret;
671
672         offset = args->offset;
673
674         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
675                          offset >> PAGE_SHIFT) {
676                 struct page *page = sg_page_iter_page(&sg_iter);
677
678                 if (remain <= 0)
679                         break;
680
681                 /* Operation in this page
682                  *
683                  * shmem_page_offset = offset within page in shmem file
684                  * page_length = bytes to copy for this page
685                  */
686                 shmem_page_offset = offset_in_page(offset);
687                 page_length = remain;
688                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
689                         page_length = PAGE_SIZE - shmem_page_offset;
690
691                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
692                         (page_to_phys(page) & (1 << 17)) != 0;
693
694                 ret = shmem_pread_fast(page, shmem_page_offset, page_length,
695                                        user_data, page_do_bit17_swizzling,
696                                        needs_clflush);
697                 if (ret == 0)
698                         goto next_page;
699
700                 mutex_unlock(&dev->struct_mutex);
701
702                 if (likely(!i915.prefault_disable) && !prefaulted) {
703                         ret = fault_in_multipages_writeable(user_data, remain);
704                         /* Userspace is tricking us, but we've already clobbered
705                          * its pages with the prefault and promised to write the
706                          * data up to the first fault. Hence ignore any errors
707                          * and just continue. */
708                         (void)ret;
709                         prefaulted = 1;
710                 }
711
712                 ret = shmem_pread_slow(page, shmem_page_offset, page_length,
713                                        user_data, page_do_bit17_swizzling,
714                                        needs_clflush);
715
716                 mutex_lock(&dev->struct_mutex);
717
718                 if (ret)
719                         goto out;
720
721 next_page:
722                 remain -= page_length;
723                 user_data += page_length;
724                 offset += page_length;
725         }
726
727 out:
728         i915_gem_object_unpin_pages(obj);
729
730         return ret;
731 }
732
733 /**
734  * Reads data from the object referenced by handle.
735  *
736  * On error, the contents of *data are undefined.
737  */
738 int
739 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
740                      struct drm_file *file)
741 {
742         struct drm_i915_gem_pread *args = data;
743         struct drm_i915_gem_object *obj;
744         int ret = 0;
745
746         if (args->size == 0)
747                 return 0;
748
749         if (!access_ok(VERIFY_WRITE,
750                        to_user_ptr(args->data_ptr),
751                        args->size))
752                 return -EFAULT;
753
754         ret = i915_mutex_lock_interruptible(dev);
755         if (ret)
756                 return ret;
757
758         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
759         if (&obj->base == NULL) {
760                 ret = -ENOENT;
761                 goto unlock;
762         }
763
764         /* Bounds check source.  */
765         if (args->offset > obj->base.size ||
766             args->size > obj->base.size - args->offset) {
767                 ret = -EINVAL;
768                 goto out;
769         }
770
771         /* prime objects have no backing filp to GEM pread/pwrite
772          * pages from.
773          */
774         if (!obj->base.filp) {
775                 ret = -EINVAL;
776                 goto out;
777         }
778
779         trace_i915_gem_object_pread(obj, args->offset, args->size);
780
781         ret = i915_gem_shmem_pread(dev, obj, args, file);
782
783 out:
784         drm_gem_object_unreference(&obj->base);
785 unlock:
786         mutex_unlock(&dev->struct_mutex);
787         return ret;
788 }
789
790 /* This is the fast write path which cannot handle
791  * page faults in the source data
792  */
793
794 static inline int
795 fast_user_write(struct io_mapping *mapping,
796                 loff_t page_base, int page_offset,
797                 char __user *user_data,
798                 int length)
799 {
800         void __iomem *vaddr_atomic;
801         void *vaddr;
802         unsigned long unwritten;
803
804         vaddr_atomic = io_mapping_map_atomic_wc(mapping, page_base);
805         /* We can use the cpu mem copy function because this is X86. */
806         vaddr = (void __force*)vaddr_atomic + page_offset;
807         unwritten = __copy_from_user_inatomic_nocache(vaddr,
808                                                       user_data, length);
809         io_mapping_unmap_atomic(vaddr_atomic);
810         return unwritten;
811 }
812
813 /**
814  * This is the fast pwrite path, where we copy the data directly from the
815  * user into the GTT, uncached.
816  */
817 static int
818 i915_gem_gtt_pwrite_fast(struct drm_device *dev,
819                          struct drm_i915_gem_object *obj,
820                          struct drm_i915_gem_pwrite *args,
821                          struct drm_file *file)
822 {
823         struct drm_i915_private *dev_priv = dev->dev_private;
824         ssize_t remain;
825         loff_t offset, page_base;
826         char __user *user_data;
827         int page_offset, page_length, ret;
828
829         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
830         if (ret)
831                 goto out;
832
833         ret = i915_gem_object_set_to_gtt_domain(obj, true);
834         if (ret)
835                 goto out_unpin;
836
837         ret = i915_gem_object_put_fence(obj);
838         if (ret)
839                 goto out_unpin;
840
841         user_data = to_user_ptr(args->data_ptr);
842         remain = args->size;
843
844         offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
845
846         while (remain > 0) {
847                 /* Operation in this page
848                  *
849                  * page_base = page offset within aperture
850                  * page_offset = offset within page
851                  * page_length = bytes to copy for this page
852                  */
853                 page_base = offset & PAGE_MASK;
854                 page_offset = offset_in_page(offset);
855                 page_length = remain;
856                 if ((page_offset + remain) > PAGE_SIZE)
857                         page_length = PAGE_SIZE - page_offset;
858
859                 /* If we get a fault while copying data, then (presumably) our
860                  * source page isn't available.  Return the error and we'll
861                  * retry in the slow path.
862                  */
863                 if (fast_user_write(dev_priv->gtt.mappable, page_base,
864                                     page_offset, user_data, page_length)) {
865                         ret = -EFAULT;
866                         goto out_unpin;
867                 }
868
869                 remain -= page_length;
870                 user_data += page_length;
871                 offset += page_length;
872         }
873
874 out_unpin:
875         i915_gem_object_ggtt_unpin(obj);
876 out:
877         return ret;
878 }
879
880 /* Per-page copy function for the shmem pwrite fastpath.
881  * Flushes invalid cachelines before writing to the target if
882  * needs_clflush_before is set and flushes out any written cachelines after
883  * writing if needs_clflush is set. */
884 static int
885 shmem_pwrite_fast(struct page *page, int shmem_page_offset, int page_length,
886                   char __user *user_data,
887                   bool page_do_bit17_swizzling,
888                   bool needs_clflush_before,
889                   bool needs_clflush_after)
890 {
891         char *vaddr;
892         int ret;
893
894         if (unlikely(page_do_bit17_swizzling))
895                 return -EINVAL;
896
897         vaddr = kmap_atomic(page);
898         if (needs_clflush_before)
899                 drm_clflush_virt_range(vaddr + shmem_page_offset,
900                                        page_length);
901         ret = __copy_from_user_inatomic(vaddr + shmem_page_offset,
902                                         user_data, page_length);
903         if (needs_clflush_after)
904                 drm_clflush_virt_range(vaddr + shmem_page_offset,
905                                        page_length);
906         kunmap_atomic(vaddr);
907
908         return ret ? -EFAULT : 0;
909 }
910
911 /* Only difference to the fast-path function is that this can handle bit17
912  * and uses non-atomic copy and kmap functions. */
913 static int
914 shmem_pwrite_slow(struct page *page, int shmem_page_offset, int page_length,
915                   char __user *user_data,
916                   bool page_do_bit17_swizzling,
917                   bool needs_clflush_before,
918                   bool needs_clflush_after)
919 {
920         char *vaddr;
921         int ret;
922
923         vaddr = kmap(page);
924         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
925                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
926                                              page_length,
927                                              page_do_bit17_swizzling);
928         if (page_do_bit17_swizzling)
929                 ret = __copy_from_user_swizzled(vaddr, shmem_page_offset,
930                                                 user_data,
931                                                 page_length);
932         else
933                 ret = __copy_from_user(vaddr + shmem_page_offset,
934                                        user_data,
935                                        page_length);
936         if (needs_clflush_after)
937                 shmem_clflush_swizzled_range(vaddr + shmem_page_offset,
938                                              page_length,
939                                              page_do_bit17_swizzling);
940         kunmap(page);
941
942         return ret ? -EFAULT : 0;
943 }
944
945 static int
946 i915_gem_shmem_pwrite(struct drm_device *dev,
947                       struct drm_i915_gem_object *obj,
948                       struct drm_i915_gem_pwrite *args,
949                       struct drm_file *file)
950 {
951         ssize_t remain;
952         loff_t offset;
953         char __user *user_data;
954         int shmem_page_offset, page_length, ret = 0;
955         int obj_do_bit17_swizzling, page_do_bit17_swizzling;
956         int hit_slowpath = 0;
957         int needs_clflush_after = 0;
958         int needs_clflush_before = 0;
959         struct sg_page_iter sg_iter;
960
961         user_data = to_user_ptr(args->data_ptr);
962         remain = args->size;
963
964         obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
965
966         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
967                 /* If we're not in the cpu write domain, set ourself into the gtt
968                  * write domain and manually flush cachelines (if required). This
969                  * optimizes for the case when the gpu will use the data
970                  * right away and we therefore have to clflush anyway. */
971                 needs_clflush_after = cpu_write_needs_clflush(obj);
972                 ret = i915_gem_object_wait_rendering(obj, false);
973                 if (ret)
974                         return ret;
975
976                 i915_gem_object_retire(obj);
977         }
978         /* Same trick applies to invalidate partially written cachelines read
979          * before writing. */
980         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
981                 needs_clflush_before =
982                         !cpu_cache_is_coherent(dev, obj->cache_level);
983
984         ret = i915_gem_object_get_pages(obj);
985         if (ret)
986                 return ret;
987
988         i915_gem_object_pin_pages(obj);
989
990         offset = args->offset;
991         obj->dirty = 1;
992
993         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents,
994                          offset >> PAGE_SHIFT) {
995                 struct page *page = sg_page_iter_page(&sg_iter);
996                 int partial_cacheline_write;
997
998                 if (remain <= 0)
999                         break;
1000
1001                 /* Operation in this page
1002                  *
1003                  * shmem_page_offset = offset within page in shmem file
1004                  * page_length = bytes to copy for this page
1005                  */
1006                 shmem_page_offset = offset_in_page(offset);
1007
1008                 page_length = remain;
1009                 if ((shmem_page_offset + page_length) > PAGE_SIZE)
1010                         page_length = PAGE_SIZE - shmem_page_offset;
1011
1012                 /* If we don't overwrite a cacheline completely we need to be
1013                  * careful to have up-to-date data by first clflushing. Don't
1014                  * overcomplicate things and flush the entire patch. */
1015                 partial_cacheline_write = needs_clflush_before &&
1016                         ((shmem_page_offset | page_length)
1017                                 & (boot_cpu_data.x86_clflush_size - 1));
1018
1019                 page_do_bit17_swizzling = obj_do_bit17_swizzling &&
1020                         (page_to_phys(page) & (1 << 17)) != 0;
1021
1022                 ret = shmem_pwrite_fast(page, shmem_page_offset, page_length,
1023                                         user_data, page_do_bit17_swizzling,
1024                                         partial_cacheline_write,
1025                                         needs_clflush_after);
1026                 if (ret == 0)
1027                         goto next_page;
1028
1029                 hit_slowpath = 1;
1030                 mutex_unlock(&dev->struct_mutex);
1031                 ret = shmem_pwrite_slow(page, shmem_page_offset, page_length,
1032                                         user_data, page_do_bit17_swizzling,
1033                                         partial_cacheline_write,
1034                                         needs_clflush_after);
1035
1036                 mutex_lock(&dev->struct_mutex);
1037
1038                 if (ret)
1039                         goto out;
1040
1041 next_page:
1042                 remain -= page_length;
1043                 user_data += page_length;
1044                 offset += page_length;
1045         }
1046
1047 out:
1048         i915_gem_object_unpin_pages(obj);
1049
1050         if (hit_slowpath) {
1051                 /*
1052                  * Fixup: Flush cpu caches in case we didn't flush the dirty
1053                  * cachelines in-line while writing and the object moved
1054                  * out of the cpu write domain while we've dropped the lock.
1055                  */
1056                 if (!needs_clflush_after &&
1057                     obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1058                         if (i915_gem_clflush_object(obj, obj->pin_display))
1059                                 i915_gem_chipset_flush(dev);
1060                 }
1061         }
1062
1063         if (needs_clflush_after)
1064                 i915_gem_chipset_flush(dev);
1065
1066         return ret;
1067 }
1068
1069 /**
1070  * Writes data to the object referenced by handle.
1071  *
1072  * On error, the contents of the buffer that were to be modified are undefined.
1073  */
1074 int
1075 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1076                       struct drm_file *file)
1077 {
1078         struct drm_i915_gem_pwrite *args = data;
1079         struct drm_i915_gem_object *obj;
1080         int ret;
1081
1082         if (args->size == 0)
1083                 return 0;
1084
1085         if (!access_ok(VERIFY_READ,
1086                        to_user_ptr(args->data_ptr),
1087                        args->size))
1088                 return -EFAULT;
1089
1090         if (likely(!i915.prefault_disable)) {
1091                 ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
1092                                                    args->size);
1093                 if (ret)
1094                         return -EFAULT;
1095         }
1096
1097         ret = i915_mutex_lock_interruptible(dev);
1098         if (ret)
1099                 return ret;
1100
1101         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1102         if (&obj->base == NULL) {
1103                 ret = -ENOENT;
1104                 goto unlock;
1105         }
1106
1107         /* Bounds check destination. */
1108         if (args->offset > obj->base.size ||
1109             args->size > obj->base.size - args->offset) {
1110                 ret = -EINVAL;
1111                 goto out;
1112         }
1113
1114         /* prime objects have no backing filp to GEM pread/pwrite
1115          * pages from.
1116          */
1117         if (!obj->base.filp) {
1118                 ret = -EINVAL;
1119                 goto out;
1120         }
1121
1122         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1123
1124         ret = -EFAULT;
1125         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1126          * it would end up going through the fenced access, and we'll get
1127          * different detiling behavior between reading and writing.
1128          * pread/pwrite currently are reading and writing from the CPU
1129          * perspective, requiring manual detiling by the client.
1130          */
1131         if (obj->tiling_mode == I915_TILING_NONE &&
1132             obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1133             cpu_write_needs_clflush(obj)) {
1134                 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
1135                 /* Note that the gtt paths might fail with non-page-backed user
1136                  * pointers (e.g. gtt mappings when moving data between
1137                  * textures). Fallback to the shmem path in that case. */
1138         }
1139
1140         if (ret == -EFAULT || ret == -ENOSPC) {
1141                 if (obj->phys_handle)
1142                         ret = i915_gem_phys_pwrite(obj, args, file);
1143                 else
1144                         ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1145         }
1146
1147 out:
1148         drm_gem_object_unreference(&obj->base);
1149 unlock:
1150         mutex_unlock(&dev->struct_mutex);
1151         return ret;
1152 }
1153
1154 int
1155 i915_gem_check_wedge(struct i915_gpu_error *error,
1156                      bool interruptible)
1157 {
1158         if (i915_reset_in_progress(error)) {
1159                 /* Non-interruptible callers can't handle -EAGAIN, hence return
1160                  * -EIO unconditionally for these. */
1161                 if (!interruptible)
1162                         return -EIO;
1163
1164                 /* Recovery complete, but the reset failed ... */
1165                 if (i915_terminally_wedged(error))
1166                         return -EIO;
1167
1168                 /*
1169                  * Check if GPU Reset is in progress - we need intel_ring_begin
1170                  * to work properly to reinit the hw state while the gpu is
1171                  * still marked as reset-in-progress. Handle this with a flag.
1172                  */
1173                 if (!error->reload_in_reset)
1174                         return -EAGAIN;
1175         }
1176
1177         return 0;
1178 }
1179
1180 /*
1181  * Compare seqno against outstanding lazy request. Emit a request if they are
1182  * equal.
1183  */
1184 int
1185 i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
1186 {
1187         int ret;
1188
1189         BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
1190
1191         ret = 0;
1192         if (seqno == ring->outstanding_lazy_seqno)
1193                 ret = i915_add_request(ring, NULL);
1194
1195         return ret;
1196 }
1197
1198 static void fake_irq(unsigned long data)
1199 {
1200         wake_up_process((struct task_struct *)data);
1201 }
1202
1203 static bool missed_irq(struct drm_i915_private *dev_priv,
1204                        struct intel_engine_cs *ring)
1205 {
1206         return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
1207 }
1208
1209 static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1210 {
1211         if (file_priv == NULL)
1212                 return true;
1213
1214         return !atomic_xchg(&file_priv->rps_wait_boost, true);
1215 }
1216
1217 /**
1218  * __i915_wait_seqno - wait until execution of seqno has finished
1219  * @ring: the ring expected to report seqno
1220  * @seqno: duh!
1221  * @reset_counter: reset sequence associated with the given seqno
1222  * @interruptible: do an interruptible wait (normally yes)
1223  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1224  *
1225  * Note: It is of utmost importance that the passed in seqno and reset_counter
1226  * values have been read by the caller in an smp safe manner. Where read-side
1227  * locks are involved, it is sufficient to read the reset_counter before
1228  * unlocking the lock that protects the seqno. For lockless tricks, the
1229  * reset_counter _must_ be read before, and an appropriate smp_rmb must be
1230  * inserted.
1231  *
1232  * Returns 0 if the seqno was found within the alloted time. Else returns the
1233  * errno with remaining time filled in timeout argument.
1234  */
1235 int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1236                         unsigned reset_counter,
1237                         bool interruptible,
1238                         s64 *timeout,
1239                         struct drm_i915_file_private *file_priv)
1240 {
1241         struct drm_device *dev = ring->dev;
1242         struct drm_i915_private *dev_priv = dev->dev_private;
1243         const bool irq_test_in_progress =
1244                 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
1245         DEFINE_WAIT(wait);
1246         unsigned long timeout_expire;
1247         s64 before, now;
1248         int ret;
1249
1250         WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
1251
1252         if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1253                 return 0;
1254
1255         timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0;
1256
1257         if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1258                 gen6_rps_boost(dev_priv);
1259                 if (file_priv)
1260                         mod_delayed_work(dev_priv->wq,
1261                                          &file_priv->mm.idle_work,
1262                                          msecs_to_jiffies(100));
1263         }
1264
1265         if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring)))
1266                 return -ENODEV;
1267
1268         /* Record current time in case interrupted by signal, or wedged */
1269         trace_i915_gem_request_wait_begin(ring, seqno);
1270         before = ktime_get_raw_ns();
1271         for (;;) {
1272                 struct timer_list timer;
1273
1274                 prepare_to_wait(&ring->irq_queue, &wait,
1275                                 interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
1276
1277                 /* We need to check whether any gpu reset happened in between
1278                  * the caller grabbing the seqno and now ... */
1279                 if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
1280                         /* ... but upgrade the -EAGAIN to an -EIO if the gpu
1281                          * is truely gone. */
1282                         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1283                         if (ret == 0)
1284                                 ret = -EAGAIN;
1285                         break;
1286                 }
1287
1288                 if (i915_seqno_passed(ring->get_seqno(ring, false), seqno)) {
1289                         ret = 0;
1290                         break;
1291                 }
1292
1293                 if (interruptible && signal_pending(current)) {
1294                         ret = -ERESTARTSYS;
1295                         break;
1296                 }
1297
1298                 if (timeout && time_after_eq(jiffies, timeout_expire)) {
1299                         ret = -ETIME;
1300                         break;
1301                 }
1302
1303                 timer.function = NULL;
1304                 if (timeout || missed_irq(dev_priv, ring)) {
1305                         unsigned long expire;
1306
1307                         setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
1308                         expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
1309                         mod_timer(&timer, expire);
1310                 }
1311
1312                 io_schedule();
1313
1314                 if (timer.function) {
1315                         del_singleshot_timer_sync(&timer);
1316                         destroy_timer_on_stack(&timer);
1317                 }
1318         }
1319         now = ktime_get_raw_ns();
1320         trace_i915_gem_request_wait_end(ring, seqno);
1321
1322         if (!irq_test_in_progress)
1323                 ring->irq_put(ring);
1324
1325         finish_wait(&ring->irq_queue, &wait);
1326
1327         if (timeout) {
1328                 s64 tres = *timeout - (now - before);
1329
1330                 *timeout = tres < 0 ? 0 : tres;
1331         }
1332
1333         return ret;
1334 }
1335
1336 /**
1337  * Waits for a sequence number to be signaled, and cleans up the
1338  * request and object lists appropriately for that event.
1339  */
1340 int
1341 i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
1342 {
1343         struct drm_device *dev = ring->dev;
1344         struct drm_i915_private *dev_priv = dev->dev_private;
1345         bool interruptible = dev_priv->mm.interruptible;
1346         unsigned reset_counter;
1347         int ret;
1348
1349         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1350         BUG_ON(seqno == 0);
1351
1352         ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
1353         if (ret)
1354                 return ret;
1355
1356         ret = i915_gem_check_olr(ring, seqno);
1357         if (ret)
1358                 return ret;
1359
1360         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1361         return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
1362                                  NULL, NULL);
1363 }
1364
1365 static int
1366 i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
1367 {
1368         if (!obj->active)
1369                 return 0;
1370
1371         /* Manually manage the write flush as we may have not yet
1372          * retired the buffer.
1373          *
1374          * Note that the last_write_seqno is always the earlier of
1375          * the two (read/write) seqno, so if we haved successfully waited,
1376          * we know we have passed the last write.
1377          */
1378         obj->last_write_seqno = 0;
1379
1380         return 0;
1381 }
1382
1383 /**
1384  * Ensures that all rendering to the object has completed and the object is
1385  * safe to unbind from the GTT or access from the CPU.
1386  */
1387 static __must_check int
1388 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1389                                bool readonly)
1390 {
1391         struct intel_engine_cs *ring = obj->ring;
1392         u32 seqno;
1393         int ret;
1394
1395         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1396         if (seqno == 0)
1397                 return 0;
1398
1399         ret = i915_wait_seqno(ring, seqno);
1400         if (ret)
1401                 return ret;
1402
1403         return i915_gem_object_wait_rendering__tail(obj);
1404 }
1405
1406 /* A nonblocking variant of the above wait. This is a highly dangerous routine
1407  * as the object state may change during this call.
1408  */
1409 static __must_check int
1410 i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1411                                             struct drm_i915_file_private *file_priv,
1412                                             bool readonly)
1413 {
1414         struct drm_device *dev = obj->base.dev;
1415         struct drm_i915_private *dev_priv = dev->dev_private;
1416         struct intel_engine_cs *ring = obj->ring;
1417         unsigned reset_counter;
1418         u32 seqno;
1419         int ret;
1420
1421         BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1422         BUG_ON(!dev_priv->mm.interruptible);
1423
1424         seqno = readonly ? obj->last_write_seqno : obj->last_read_seqno;
1425         if (seqno == 0)
1426                 return 0;
1427
1428         ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
1429         if (ret)
1430                 return ret;
1431
1432         ret = i915_gem_check_olr(ring, seqno);
1433         if (ret)
1434                 return ret;
1435
1436         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1437         mutex_unlock(&dev->struct_mutex);
1438         ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
1439                                 file_priv);
1440         mutex_lock(&dev->struct_mutex);
1441         if (ret)
1442                 return ret;
1443
1444         return i915_gem_object_wait_rendering__tail(obj);
1445 }
1446
1447 /**
1448  * Called when user space prepares to use an object with the CPU, either
1449  * through the mmap ioctl's mapping or a GTT mapping.
1450  */
1451 int
1452 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1453                           struct drm_file *file)
1454 {
1455         struct drm_i915_gem_set_domain *args = data;
1456         struct drm_i915_gem_object *obj;
1457         uint32_t read_domains = args->read_domains;
1458         uint32_t write_domain = args->write_domain;
1459         int ret;
1460
1461         /* Only handle setting domains to types used by the CPU. */
1462         if (write_domain & I915_GEM_GPU_DOMAINS)
1463                 return -EINVAL;
1464
1465         if (read_domains & I915_GEM_GPU_DOMAINS)
1466                 return -EINVAL;
1467
1468         /* Having something in the write domain implies it's in the read
1469          * domain, and only that read domain.  Enforce that in the request.
1470          */
1471         if (write_domain != 0 && read_domains != write_domain)
1472                 return -EINVAL;
1473
1474         ret = i915_mutex_lock_interruptible(dev);
1475         if (ret)
1476                 return ret;
1477
1478         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1479         if (&obj->base == NULL) {
1480                 ret = -ENOENT;
1481                 goto unlock;
1482         }
1483
1484         /* Try to flush the object off the GPU without holding the lock.
1485          * We will repeat the flush holding the lock in the normal manner
1486          * to catch cases where we are gazumped.
1487          */
1488         ret = i915_gem_object_wait_rendering__nonblocking(obj,
1489                                                           file->driver_priv,
1490                                                           !write_domain);
1491         if (ret)
1492                 goto unref;
1493
1494         if (read_domains & I915_GEM_DOMAIN_GTT) {
1495                 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1496
1497                 /* Silently promote "you're not bound, there was nothing to do"
1498                  * to success, since the client was just asking us to
1499                  * make sure everything was done.
1500                  */
1501                 if (ret == -EINVAL)
1502                         ret = 0;
1503         } else {
1504                 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1505         }
1506
1507 unref:
1508         drm_gem_object_unreference(&obj->base);
1509 unlock:
1510         mutex_unlock(&dev->struct_mutex);
1511         return ret;
1512 }
1513
1514 /**
1515  * Called when user space has done writes to this buffer
1516  */
1517 int
1518 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1519                          struct drm_file *file)
1520 {
1521         struct drm_i915_gem_sw_finish *args = data;
1522         struct drm_i915_gem_object *obj;
1523         int ret = 0;
1524
1525         ret = i915_mutex_lock_interruptible(dev);
1526         if (ret)
1527                 return ret;
1528
1529         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1530         if (&obj->base == NULL) {
1531                 ret = -ENOENT;
1532                 goto unlock;
1533         }
1534
1535         /* Pinned buffers may be scanout, so flush the cache */
1536         if (obj->pin_display)
1537                 i915_gem_object_flush_cpu_write_domain(obj, true);
1538
1539         drm_gem_object_unreference(&obj->base);
1540 unlock:
1541         mutex_unlock(&dev->struct_mutex);
1542         return ret;
1543 }
1544
1545 /**
1546  * Maps the contents of an object, returning the address it is mapped
1547  * into.
1548  *
1549  * While the mapping holds a reference on the contents of the object, it doesn't
1550  * imply a ref on the object itself.
1551  *
1552  * IMPORTANT:
1553  *
1554  * DRM driver writers who look a this function as an example for how to do GEM
1555  * mmap support, please don't implement mmap support like here. The modern way
1556  * to implement DRM mmap support is with an mmap offset ioctl (like
1557  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1558  * That way debug tooling like valgrind will understand what's going on, hiding
1559  * the mmap call in a driver private ioctl will break that. The i915 driver only
1560  * does cpu mmaps this way because we didn't know better.
1561  */
1562 int
1563 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1564                     struct drm_file *file)
1565 {
1566         struct drm_i915_gem_mmap *args = data;
1567         struct drm_gem_object *obj;
1568         unsigned long addr;
1569
1570         obj = drm_gem_object_lookup(dev, file, args->handle);
1571         if (obj == NULL)
1572                 return -ENOENT;
1573
1574         /* prime objects have no backing filp to GEM mmap
1575          * pages from.
1576          */
1577         if (!obj->filp) {
1578                 drm_gem_object_unreference_unlocked(obj);
1579                 return -EINVAL;
1580         }
1581
1582         addr = vm_mmap(obj->filp, 0, args->size,
1583                        PROT_READ | PROT_WRITE, MAP_SHARED,
1584                        args->offset);
1585         drm_gem_object_unreference_unlocked(obj);
1586         if (IS_ERR((void *)addr))
1587                 return addr;
1588
1589         args->addr_ptr = (uint64_t) addr;
1590
1591         return 0;
1592 }
1593
1594 /**
1595  * i915_gem_fault - fault a page into the GTT
1596  * vma: VMA in question
1597  * vmf: fault info
1598  *
1599  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1600  * from userspace.  The fault handler takes care of binding the object to
1601  * the GTT (if needed), allocating and programming a fence register (again,
1602  * only if needed based on whether the old reg is still valid or the object
1603  * is tiled) and inserting a new PTE into the faulting process.
1604  *
1605  * Note that the faulting process may involve evicting existing objects
1606  * from the GTT and/or fence registers to make room.  So performance may
1607  * suffer if the GTT working set is large or there are few fence registers
1608  * left.
1609  */
1610 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1611 {
1612         struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1613         struct drm_device *dev = obj->base.dev;
1614         struct drm_i915_private *dev_priv = dev->dev_private;
1615         pgoff_t page_offset;
1616         unsigned long pfn;
1617         int ret = 0;
1618         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1619
1620         intel_runtime_pm_get(dev_priv);
1621
1622         /* We don't use vmf->pgoff since that has the fake offset */
1623         page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
1624                 PAGE_SHIFT;
1625
1626         ret = i915_mutex_lock_interruptible(dev);
1627         if (ret)
1628                 goto out;
1629
1630         trace_i915_gem_object_fault(obj, page_offset, true, write);
1631
1632         /* Try to flush the object off the GPU first without holding the lock.
1633          * Upon reacquiring the lock, we will perform our sanity checks and then
1634          * repeat the flush holding the lock in the normal manner to catch cases
1635          * where we are gazumped.
1636          */
1637         ret = i915_gem_object_wait_rendering__nonblocking(obj, NULL, !write);
1638         if (ret)
1639                 goto unlock;
1640
1641         /* Access to snoopable pages through the GTT is incoherent. */
1642         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1643                 ret = -EFAULT;
1644                 goto unlock;
1645         }
1646
1647         /* Now bind it into the GTT if needed */
1648         ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
1649         if (ret)
1650                 goto unlock;
1651
1652         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1653         if (ret)
1654                 goto unpin;
1655
1656         ret = i915_gem_object_get_fence(obj);
1657         if (ret)
1658                 goto unpin;
1659
1660         /* Finally, remap it using the new GTT offset */
1661         pfn = dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj);
1662         pfn >>= PAGE_SHIFT;
1663
1664         if (!obj->fault_mappable) {
1665                 unsigned long size = min_t(unsigned long,
1666                                            vma->vm_end - vma->vm_start,
1667                                            obj->base.size);
1668                 int i;
1669
1670                 for (i = 0; i < size >> PAGE_SHIFT; i++) {
1671                         ret = vm_insert_pfn(vma,
1672                                             (unsigned long)vma->vm_start + i * PAGE_SIZE,
1673                                             pfn + i);
1674                         if (ret)
1675                                 break;
1676                 }
1677
1678                 obj->fault_mappable = true;
1679         } else
1680                 ret = vm_insert_pfn(vma,
1681                                     (unsigned long)vmf->virtual_address,
1682                                     pfn + page_offset);
1683 unpin:
1684         i915_gem_object_ggtt_unpin(obj);
1685 unlock:
1686         mutex_unlock(&dev->struct_mutex);
1687 out:
1688         switch (ret) {
1689         case -EIO:
1690                 /*
1691                  * We eat errors when the gpu is terminally wedged to avoid
1692                  * userspace unduly crashing (gl has no provisions for mmaps to
1693                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
1694                  * and so needs to be reported.
1695                  */
1696                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1697                         ret = VM_FAULT_SIGBUS;
1698                         break;
1699                 }
1700         case -EAGAIN:
1701                 /*
1702                  * EAGAIN means the gpu is hung and we'll wait for the error
1703                  * handler to reset everything when re-faulting in
1704                  * i915_mutex_lock_interruptible.
1705                  */
1706         case 0:
1707         case -ERESTARTSYS:
1708         case -EINTR:
1709         case -EBUSY:
1710                 /*
1711                  * EBUSY is ok: this just means that another thread
1712                  * already did the job.
1713                  */
1714                 ret = VM_FAULT_NOPAGE;
1715                 break;
1716         case -ENOMEM:
1717                 ret = VM_FAULT_OOM;
1718                 break;
1719         case -ENOSPC:
1720         case -EFAULT:
1721                 ret = VM_FAULT_SIGBUS;
1722                 break;
1723         default:
1724                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1725                 ret = VM_FAULT_SIGBUS;
1726                 break;
1727         }
1728
1729         intel_runtime_pm_put(dev_priv);
1730         return ret;
1731 }
1732
1733 /**
1734  * i915_gem_release_mmap - remove physical page mappings
1735  * @obj: obj in question
1736  *
1737  * Preserve the reservation of the mmapping with the DRM core code, but
1738  * relinquish ownership of the pages back to the system.
1739  *
1740  * It is vital that we remove the page mapping if we have mapped a tiled
1741  * object through the GTT and then lose the fence register due to
1742  * resource pressure. Similarly if the object has been moved out of the
1743  * aperture, than pages mapped into userspace must be revoked. Removing the
1744  * mapping will then trigger a page fault on the next user access, allowing
1745  * fixup by i915_gem_fault().
1746  */
1747 void
1748 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1749 {
1750         if (!obj->fault_mappable)
1751                 return;
1752
1753         drm_vma_node_unmap(&obj->base.vma_node,
1754                            obj->base.dev->anon_inode->i_mapping);
1755         obj->fault_mappable = false;
1756 }
1757
1758 void
1759 i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
1760 {
1761         struct drm_i915_gem_object *obj;
1762
1763         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1764                 i915_gem_release_mmap(obj);
1765 }
1766
1767 uint32_t
1768 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1769 {
1770         uint32_t gtt_size;
1771
1772         if (INTEL_INFO(dev)->gen >= 4 ||
1773             tiling_mode == I915_TILING_NONE)
1774                 return size;
1775
1776         /* Previous chips need a power-of-two fence region when tiling */
1777         if (INTEL_INFO(dev)->gen == 3)
1778                 gtt_size = 1024*1024;
1779         else
1780                 gtt_size = 512*1024;
1781
1782         while (gtt_size < size)
1783                 gtt_size <<= 1;
1784
1785         return gtt_size;
1786 }
1787
1788 /**
1789  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1790  * @obj: object to check
1791  *
1792  * Return the required GTT alignment for an object, taking into account
1793  * potential fence register mapping.
1794  */
1795 uint32_t
1796 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1797                            int tiling_mode, bool fenced)
1798 {
1799         /*
1800          * Minimum alignment is 4k (GTT page size), but might be greater
1801          * if a fence register is needed for the object.
1802          */
1803         if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
1804             tiling_mode == I915_TILING_NONE)
1805                 return 4096;
1806
1807         /*
1808          * Previous chips need to be aligned to the size of the smallest
1809          * fence register that can contain the object.
1810          */
1811         return i915_gem_get_gtt_size(dev, size, tiling_mode);
1812 }
1813
1814 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
1815 {
1816         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1817         int ret;
1818
1819         if (drm_vma_node_has_offset(&obj->base.vma_node))
1820                 return 0;
1821
1822         dev_priv->mm.shrinker_no_lock_stealing = true;
1823
1824         ret = drm_gem_create_mmap_offset(&obj->base);
1825         if (ret != -ENOSPC)
1826                 goto out;
1827
1828         /* Badly fragmented mmap space? The only way we can recover
1829          * space is by destroying unwanted objects. We can't randomly release
1830          * mmap_offsets as userspace expects them to be persistent for the
1831          * lifetime of the objects. The closest we can is to release the
1832          * offsets on purgeable objects by truncating it and marking it purged,
1833          * which prevents userspace from ever using that object again.
1834          */
1835         i915_gem_shrink(dev_priv,
1836                         obj->base.size >> PAGE_SHIFT,
1837                         I915_SHRINK_BOUND |
1838                         I915_SHRINK_UNBOUND |
1839                         I915_SHRINK_PURGEABLE);
1840         ret = drm_gem_create_mmap_offset(&obj->base);
1841         if (ret != -ENOSPC)
1842                 goto out;
1843
1844         i915_gem_shrink_all(dev_priv);
1845         ret = drm_gem_create_mmap_offset(&obj->base);
1846 out:
1847         dev_priv->mm.shrinker_no_lock_stealing = false;
1848
1849         return ret;
1850 }
1851
1852 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1853 {
1854         drm_gem_free_mmap_offset(&obj->base);
1855 }
1856
1857 int
1858 i915_gem_mmap_gtt(struct drm_file *file,
1859                   struct drm_device *dev,
1860                   uint32_t handle,
1861                   uint64_t *offset)
1862 {
1863         struct drm_i915_private *dev_priv = dev->dev_private;
1864         struct drm_i915_gem_object *obj;
1865         int ret;
1866
1867         ret = i915_mutex_lock_interruptible(dev);
1868         if (ret)
1869                 return ret;
1870
1871         obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
1872         if (&obj->base == NULL) {
1873                 ret = -ENOENT;
1874                 goto unlock;
1875         }
1876
1877         if (obj->base.size > dev_priv->gtt.mappable_end) {
1878                 ret = -E2BIG;
1879                 goto out;
1880         }
1881
1882         if (obj->madv != I915_MADV_WILLNEED) {
1883                 DRM_DEBUG("Attempting to mmap a purgeable buffer\n");
1884                 ret = -EFAULT;
1885                 goto out;
1886         }
1887
1888         ret = i915_gem_object_create_mmap_offset(obj);
1889         if (ret)
1890                 goto out;
1891
1892         *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
1893
1894 out:
1895         drm_gem_object_unreference(&obj->base);
1896 unlock:
1897         mutex_unlock(&dev->struct_mutex);
1898         return ret;
1899 }
1900
1901 /**
1902  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1903  * @dev: DRM device
1904  * @data: GTT mapping ioctl data
1905  * @file: GEM object info
1906  *
1907  * Simply returns the fake offset to userspace so it can mmap it.
1908  * The mmap call will end up in drm_gem_mmap(), which will set things
1909  * up so we can get faults in the handler above.
1910  *
1911  * The fault handler will take care of binding the object into the GTT
1912  * (since it may have been evicted to make room for something), allocating
1913  * a fence register, and mapping the appropriate aperture address into
1914  * userspace.
1915  */
1916 int
1917 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1918                         struct drm_file *file)
1919 {
1920         struct drm_i915_gem_mmap_gtt *args = data;
1921
1922         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
1923 }
1924
1925 static inline int
1926 i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1927 {
1928         return obj->madv == I915_MADV_DONTNEED;
1929 }
1930
1931 /* Immediately discard the backing storage */
1932 static void
1933 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1934 {
1935         i915_gem_object_free_mmap_offset(obj);
1936
1937         if (obj->base.filp == NULL)
1938                 return;
1939
1940         /* Our goal here is to return as much of the memory as
1941          * is possible back to the system as we are called from OOM.
1942          * To do this we must instruct the shmfs to drop all of its
1943          * backing pages, *now*.
1944          */
1945         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
1946         obj->madv = __I915_MADV_PURGED;
1947 }
1948
1949 /* Try to discard unwanted pages */
1950 static void
1951 i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
1952 {
1953         struct address_space *mapping;
1954
1955         switch (obj->madv) {
1956         case I915_MADV_DONTNEED:
1957                 i915_gem_object_truncate(obj);
1958         case __I915_MADV_PURGED:
1959                 return;
1960         }
1961
1962         if (obj->base.filp == NULL)
1963                 return;
1964
1965         mapping = file_inode(obj->base.filp)->i_mapping,
1966         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
1967 }
1968
1969 static void
1970 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1971 {
1972         struct sg_page_iter sg_iter;
1973         int ret;
1974
1975         BUG_ON(obj->madv == __I915_MADV_PURGED);
1976
1977         ret = i915_gem_object_set_to_cpu_domain(obj, true);
1978         if (ret) {
1979                 /* In the event of a disaster, abandon all caches and
1980                  * hope for the best.
1981                  */
1982                 WARN_ON(ret != -EIO);
1983                 i915_gem_clflush_object(obj, true);
1984                 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
1985         }
1986
1987         if (i915_gem_object_needs_bit17_swizzle(obj))
1988                 i915_gem_object_save_bit_17_swizzle(obj);
1989
1990         if (obj->madv == I915_MADV_DONTNEED)
1991                 obj->dirty = 0;
1992
1993         for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
1994                 struct page *page = sg_page_iter_page(&sg_iter);
1995
1996                 if (obj->dirty)
1997                         set_page_dirty(page);
1998
1999                 if (obj->madv == I915_MADV_WILLNEED)
2000                         mark_page_accessed(page);
2001
2002                 page_cache_release(page);
2003         }
2004         obj->dirty = 0;
2005
2006         sg_free_table(obj->pages);
2007         kfree(obj->pages);
2008 }
2009
2010 int
2011 i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2012 {
2013         const struct drm_i915_gem_object_ops *ops = obj->ops;
2014
2015         if (obj->pages == NULL)
2016                 return 0;
2017
2018         if (obj->pages_pin_count)
2019                 return -EBUSY;
2020
2021         BUG_ON(i915_gem_obj_bound_any(obj));
2022
2023         /* ->put_pages might need to allocate memory for the bit17 swizzle
2024          * array, hence protect them from being reaped by removing them from gtt
2025          * lists early. */
2026         list_del(&obj->global_list);
2027
2028         ops->put_pages(obj);
2029         obj->pages = NULL;
2030
2031         i915_gem_object_invalidate(obj);
2032
2033         return 0;
2034 }
2035
2036 unsigned long
2037 i915_gem_shrink(struct drm_i915_private *dev_priv,
2038                 long target, unsigned flags)
2039 {
2040         const struct {
2041                 struct list_head *list;
2042                 unsigned int bit;
2043         } phases[] = {
2044                 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
2045                 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
2046                 { NULL, 0 },
2047         }, *phase;
2048         unsigned long count = 0;
2049
2050         /*
2051          * As we may completely rewrite the (un)bound list whilst unbinding
2052          * (due to retiring requests) we have to strictly process only
2053          * one element of the list at the time, and recheck the list
2054          * on every iteration.
2055          *
2056          * In particular, we must hold a reference whilst removing the
2057          * object as we may end up waiting for and/or retiring the objects.
2058          * This might release the final reference (held by the active list)
2059          * and result in the object being freed from under us. This is
2060          * similar to the precautions the eviction code must take whilst
2061          * removing objects.
2062          *
2063          * Also note that although these lists do not hold a reference to
2064          * the object we can safely grab one here: The final object
2065          * unreferencing and the bound_list are both protected by the
2066          * dev->struct_mutex and so we won't ever be able to observe an
2067          * object on the bound_list with a reference count equals 0.
2068          */
2069         for (phase = phases; phase->list; phase++) {
2070                 struct list_head still_in_list;
2071
2072                 if ((flags & phase->bit) == 0)
2073                         continue;
2074
2075                 INIT_LIST_HEAD(&still_in_list);
2076                 while (count < target && !list_empty(phase->list)) {
2077                         struct drm_i915_gem_object *obj;
2078                         struct i915_vma *vma, *v;
2079
2080                         obj = list_first_entry(phase->list,
2081                                                typeof(*obj), global_list);
2082                         list_move_tail(&obj->global_list, &still_in_list);
2083
2084                         if (flags & I915_SHRINK_PURGEABLE &&
2085                             !i915_gem_object_is_purgeable(obj))
2086                                 continue;
2087
2088                         drm_gem_object_reference(&obj->base);
2089
2090                         /* For the unbound phase, this should be a no-op! */
2091                         list_for_each_entry_safe(vma, v,
2092                                                  &obj->vma_list, vma_link)
2093                                 if (i915_vma_unbind(vma))
2094                                         break;
2095
2096                         if (i915_gem_object_put_pages(obj) == 0)
2097                                 count += obj->base.size >> PAGE_SHIFT;
2098
2099                         drm_gem_object_unreference(&obj->base);
2100                 }
2101                 list_splice(&still_in_list, phase->list);
2102         }
2103
2104         return count;
2105 }
2106
2107 static unsigned long
2108 i915_gem_shrink_all(struct drm_i915_private *dev_priv)
2109 {
2110         i915_gem_evict_everything(dev_priv->dev);
2111         return i915_gem_shrink(dev_priv, LONG_MAX,
2112                                I915_SHRINK_BOUND | I915_SHRINK_UNBOUND);
2113 }
2114
2115 static int
2116 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2117 {
2118         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2119         int page_count, i;
2120         struct address_space *mapping;
2121         struct sg_table *st;
2122         struct scatterlist *sg;
2123         struct sg_page_iter sg_iter;
2124         struct page *page;
2125         unsigned long last_pfn = 0;     /* suppress gcc warning */
2126         gfp_t gfp;
2127
2128         /* Assert that the object is not currently in any GPU domain. As it
2129          * wasn't in the GTT, there shouldn't be any way it could have been in
2130          * a GPU cache
2131          */
2132         BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2133         BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2134
2135         st = kmalloc(sizeof(*st), GFP_KERNEL);
2136         if (st == NULL)
2137                 return -ENOMEM;
2138
2139         page_count = obj->base.size / PAGE_SIZE;
2140         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2141                 kfree(st);
2142                 return -ENOMEM;
2143         }
2144
2145         /* Get the list of pages out of our struct file.  They'll be pinned
2146          * at this point until we release them.
2147          *
2148          * Fail silently without starting the shrinker
2149          */
2150         mapping = file_inode(obj->base.filp)->i_mapping;
2151         gfp = mapping_gfp_mask(mapping);
2152         gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
2153         gfp &= ~(__GFP_IO | __GFP_WAIT);
2154         sg = st->sgl;
2155         st->nents = 0;
2156         for (i = 0; i < page_count; i++) {
2157                 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2158                 if (IS_ERR(page)) {
2159                         i915_gem_shrink(dev_priv,
2160                                         page_count,
2161                                         I915_SHRINK_BOUND |
2162                                         I915_SHRINK_UNBOUND |
2163                                         I915_SHRINK_PURGEABLE);
2164                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2165                 }
2166                 if (IS_ERR(page)) {
2167                         /* We've tried hard to allocate the memory by reaping
2168                          * our own buffer, now let the real VM do its job and
2169                          * go down in flames if truly OOM.
2170                          */
2171                         i915_gem_shrink_all(dev_priv);
2172                         page = shmem_read_mapping_page(mapping, i);
2173                         if (IS_ERR(page))
2174                                 goto err_pages;
2175                 }
2176 #ifdef CONFIG_SWIOTLB
2177                 if (swiotlb_nr_tbl()) {
2178                         st->nents++;
2179                         sg_set_page(sg, page, PAGE_SIZE, 0);
2180                         sg = sg_next(sg);
2181                         continue;
2182                 }
2183 #endif
2184                 if (!i || page_to_pfn(page) != last_pfn + 1) {
2185                         if (i)
2186                                 sg = sg_next(sg);
2187                         st->nents++;
2188                         sg_set_page(sg, page, PAGE_SIZE, 0);
2189                 } else {
2190                         sg->length += PAGE_SIZE;
2191                 }
2192                 last_pfn = page_to_pfn(page);
2193
2194                 /* Check that the i965g/gm workaround works. */
2195                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2196         }
2197 #ifdef CONFIG_SWIOTLB
2198         if (!swiotlb_nr_tbl())
2199 #endif
2200                 sg_mark_end(sg);
2201         obj->pages = st;
2202
2203         if (i915_gem_object_needs_bit17_swizzle(obj))
2204                 i915_gem_object_do_bit_17_swizzle(obj);
2205
2206         return 0;
2207
2208 err_pages:
2209         sg_mark_end(sg);
2210         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
2211                 page_cache_release(sg_page_iter_page(&sg_iter));
2212         sg_free_table(st);
2213         kfree(st);
2214
2215         /* shmemfs first checks if there is enough memory to allocate the page
2216          * and reports ENOSPC should there be insufficient, along with the usual
2217          * ENOMEM for a genuine allocation failure.
2218          *
2219          * We use ENOSPC in our driver to mean that we have run out of aperture
2220          * space and so want to translate the error from shmemfs back to our
2221          * usual understanding of ENOMEM.
2222          */
2223         if (PTR_ERR(page) == -ENOSPC)
2224                 return -ENOMEM;
2225         else
2226                 return PTR_ERR(page);
2227 }
2228
2229 /* Ensure that the associated pages are gathered from the backing storage
2230  * and pinned into our object. i915_gem_object_get_pages() may be called
2231  * multiple times before they are released by a single call to
2232  * i915_gem_object_put_pages() - once the pages are no longer referenced
2233  * either as a result of memory pressure (reaping pages under the shrinker)
2234  * or as the object is itself released.
2235  */
2236 int
2237 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2238 {
2239         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2240         const struct drm_i915_gem_object_ops *ops = obj->ops;
2241         int ret;
2242
2243         if (obj->pages)
2244                 return 0;
2245
2246         if (obj->madv != I915_MADV_WILLNEED) {
2247                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2248                 return -EFAULT;
2249         }
2250
2251         BUG_ON(obj->pages_pin_count);
2252
2253         ret = ops->get_pages(obj);
2254         if (ret)
2255                 return ret;
2256
2257         list_add_tail(&obj->global_list, &dev_priv->mm.unbound_list);
2258         return 0;
2259 }
2260
2261 static void
2262 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
2263                                struct intel_engine_cs *ring)
2264 {
2265         u32 seqno = intel_ring_get_seqno(ring);
2266
2267         BUG_ON(ring == NULL);
2268         if (obj->ring != ring && obj->last_write_seqno) {
2269                 /* Keep the seqno relative to the current ring */
2270                 obj->last_write_seqno = seqno;
2271         }
2272         obj->ring = ring;
2273
2274         /* Add a reference if we're newly entering the active list. */
2275         if (!obj->active) {
2276                 drm_gem_object_reference(&obj->base);
2277                 obj->active = 1;
2278         }
2279
2280         list_move_tail(&obj->ring_list, &ring->active_list);
2281
2282         obj->last_read_seqno = seqno;
2283 }
2284
2285 void i915_vma_move_to_active(struct i915_vma *vma,
2286                              struct intel_engine_cs *ring)
2287 {
2288         list_move_tail(&vma->mm_list, &vma->vm->active_list);
2289         return i915_gem_object_move_to_active(vma->obj, ring);
2290 }
2291
2292 static void
2293 i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
2294 {
2295         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2296         struct i915_address_space *vm;
2297         struct i915_vma *vma;
2298
2299         BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
2300         BUG_ON(!obj->active);
2301
2302         list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
2303                 vma = i915_gem_obj_to_vma(obj, vm);
2304                 if (vma && !list_empty(&vma->mm_list))
2305                         list_move_tail(&vma->mm_list, &vm->inactive_list);
2306         }
2307
2308         intel_fb_obj_flush(obj, true);
2309
2310         list_del_init(&obj->ring_list);
2311         obj->ring = NULL;
2312
2313         obj->last_read_seqno = 0;
2314         obj->last_write_seqno = 0;
2315         obj->base.write_domain = 0;
2316
2317         obj->last_fenced_seqno = 0;
2318
2319         obj->active = 0;
2320         drm_gem_object_unreference(&obj->base);
2321
2322         WARN_ON(i915_verify_lists(dev));
2323 }
2324
2325 static void
2326 i915_gem_object_retire(struct drm_i915_gem_object *obj)
2327 {
2328         struct intel_engine_cs *ring = obj->ring;
2329
2330         if (ring == NULL)
2331                 return;
2332
2333         if (i915_seqno_passed(ring->get_seqno(ring, true),
2334                               obj->last_read_seqno))
2335                 i915_gem_object_move_to_inactive(obj);
2336 }
2337
2338 static int
2339 i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2340 {
2341         struct drm_i915_private *dev_priv = dev->dev_private;
2342         struct intel_engine_cs *ring;
2343         int ret, i, j;
2344
2345         /* Carefully retire all requests without writing to the rings */
2346         for_each_ring(ring, dev_priv, i) {
2347                 ret = intel_ring_idle(ring);
2348                 if (ret)
2349                         return ret;
2350         }
2351         i915_gem_retire_requests(dev);
2352
2353         /* Finally reset hw state */
2354         for_each_ring(ring, dev_priv, i) {
2355                 intel_ring_init_seqno(ring, seqno);
2356
2357                 for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
2358                         ring->semaphore.sync_seqno[j] = 0;
2359         }
2360
2361         return 0;
2362 }
2363
2364 int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2365 {
2366         struct drm_i915_private *dev_priv = dev->dev_private;
2367         int ret;
2368
2369         if (seqno == 0)
2370                 return -EINVAL;
2371
2372         /* HWS page needs to be set less than what we
2373          * will inject to ring
2374          */
2375         ret = i915_gem_init_seqno(dev, seqno - 1);
2376         if (ret)
2377                 return ret;
2378
2379         /* Carefully set the last_seqno value so that wrap
2380          * detection still works
2381          */
2382         dev_priv->next_seqno = seqno;
2383         dev_priv->last_seqno = seqno - 1;
2384         if (dev_priv->last_seqno == 0)
2385                 dev_priv->last_seqno--;
2386
2387         return 0;
2388 }
2389
2390 int
2391 i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2392 {
2393         struct drm_i915_private *dev_priv = dev->dev_private;
2394
2395         /* reserve 0 for non-seqno */
2396         if (dev_priv->next_seqno == 0) {
2397                 int ret = i915_gem_init_seqno(dev, 0);
2398                 if (ret)
2399                         return ret;
2400
2401                 dev_priv->next_seqno = 1;
2402         }
2403
2404         *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
2405         return 0;
2406 }
2407
2408 int __i915_add_request(struct intel_engine_cs *ring,
2409                        struct drm_file *file,
2410                        struct drm_i915_gem_object *obj,
2411                        u32 *out_seqno)
2412 {
2413         struct drm_i915_private *dev_priv = ring->dev->dev_private;
2414         struct drm_i915_gem_request *request;
2415         struct intel_ringbuffer *ringbuf;
2416         u32 request_ring_position, request_start;
2417         int ret;
2418
2419         request = ring->preallocated_lazy_request;
2420         if (WARN_ON(request == NULL))
2421                 return -ENOMEM;
2422
2423         if (i915.enable_execlists) {
2424                 struct intel_context *ctx = request->ctx;
2425                 ringbuf = ctx->engine[ring->id].ringbuf;
2426         } else
2427                 ringbuf = ring->buffer;
2428
2429         request_start = intel_ring_get_tail(ringbuf);
2430         /*
2431          * Emit any outstanding flushes - execbuf can fail to emit the flush
2432          * after having emitted the batchbuffer command. Hence we need to fix
2433          * things up similar to emitting the lazy request. The difference here
2434          * is that the flush _must_ happen before the next request, no matter
2435          * what.
2436          */
2437         if (i915.enable_execlists) {
2438                 ret = logical_ring_flush_all_caches(ringbuf);
2439                 if (ret)
2440                         return ret;
2441         } else {
2442                 ret = intel_ring_flush_all_caches(ring);
2443                 if (ret)
2444                         return ret;
2445         }
2446
2447         /* Record the position of the start of the request so that
2448          * should we detect the updated seqno part-way through the
2449          * GPU processing the request, we never over-estimate the
2450          * position of the head.
2451          */
2452         request_ring_position = intel_ring_get_tail(ringbuf);
2453
2454         if (i915.enable_execlists) {
2455                 ret = ring->emit_request(ringbuf);
2456                 if (ret)
2457                         return ret;
2458         } else {
2459                 ret = ring->add_request(ring);
2460                 if (ret)
2461                         return ret;
2462         }
2463
2464         request->seqno = intel_ring_get_seqno(ring);
2465         request->ring = ring;
2466         request->head = request_start;
2467         request->tail = request_ring_position;
2468
2469         /* Whilst this request exists, batch_obj will be on the
2470          * active_list, and so will hold the active reference. Only when this
2471          * request is retired will the the batch_obj be moved onto the
2472          * inactive_list and lose its active reference. Hence we do not need
2473          * to explicitly hold another reference here.
2474          */
2475         request->batch_obj = obj;
2476
2477         if (!i915.enable_execlists) {
2478                 /* Hold a reference to the current context so that we can inspect
2479                  * it later in case a hangcheck error event fires.
2480                  */
2481                 request->ctx = ring->last_context;
2482                 if (request->ctx)
2483                         i915_gem_context_reference(request->ctx);
2484         }
2485
2486         request->emitted_jiffies = jiffies;
2487         list_add_tail(&request->list, &ring->request_list);
2488         request->file_priv = NULL;
2489
2490         if (file) {
2491                 struct drm_i915_file_private *file_priv = file->driver_priv;
2492
2493                 spin_lock(&file_priv->mm.lock);
2494                 request->file_priv = file_priv;
2495                 list_add_tail(&request->client_list,
2496                               &file_priv->mm.request_list);
2497                 spin_unlock(&file_priv->mm.lock);
2498         }
2499
2500         trace_i915_gem_request_add(ring, request->seqno);
2501         ring->outstanding_lazy_seqno = 0;
2502         ring->preallocated_lazy_request = NULL;
2503
2504         if (!dev_priv->ums.mm_suspended) {
2505                 i915_queue_hangcheck(ring->dev);
2506
2507                 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2508                 queue_delayed_work(dev_priv->wq,
2509                                    &dev_priv->mm.retire_work,
2510                                    round_jiffies_up_relative(HZ));
2511                 intel_mark_busy(dev_priv->dev);
2512         }
2513
2514         if (out_seqno)
2515                 *out_seqno = request->seqno;
2516         return 0;
2517 }
2518
2519 static inline void
2520 i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
2521 {
2522         struct drm_i915_file_private *file_priv = request->file_priv;
2523
2524         if (!file_priv)
2525                 return;
2526
2527         spin_lock(&file_priv->mm.lock);
2528         list_del(&request->client_list);
2529         request->file_priv = NULL;
2530         spin_unlock(&file_priv->mm.lock);
2531 }
2532
2533 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
2534                                    const struct intel_context *ctx)
2535 {
2536         unsigned long elapsed;
2537
2538         elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2539
2540         if (ctx->hang_stats.banned)
2541                 return true;
2542
2543         if (elapsed <= DRM_I915_CTX_BAN_PERIOD) {
2544                 if (!i915_gem_context_is_default(ctx)) {
2545                         DRM_DEBUG("context hanging too fast, banning!\n");
2546                         return true;
2547                 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2548                         if (i915_stop_ring_allow_warn(dev_priv))
2549                                 DRM_ERROR("gpu hanging too fast, banning!\n");
2550                         return true;
2551                 }
2552         }
2553
2554         return false;
2555 }
2556
2557 static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2558                                   struct intel_context *ctx,
2559                                   const bool guilty)
2560 {
2561         struct i915_ctx_hang_stats *hs;
2562
2563         if (WARN_ON(!ctx))
2564                 return;
2565
2566         hs = &ctx->hang_stats;
2567
2568         if (guilty) {
2569                 hs->banned = i915_context_is_banned(dev_priv, ctx);
2570                 hs->batch_active++;
2571                 hs->guilty_ts = get_seconds();
2572         } else {
2573                 hs->batch_pending++;
2574         }
2575 }
2576
2577 static void i915_gem_free_request(struct drm_i915_gem_request *request)
2578 {
2579         struct intel_context *ctx = request->ctx;
2580
2581         list_del(&request->list);
2582         i915_gem_request_remove_from_client(request);
2583
2584         if (i915.enable_execlists && ctx) {
2585                 struct intel_engine_cs *ring = request->ring;
2586
2587                 if (ctx != ring->default_context)
2588                         intel_lr_context_unpin(ring, ctx);
2589                 i915_gem_context_unreference(ctx);
2590         }
2591         kfree(request);
2592 }
2593
2594 struct drm_i915_gem_request *
2595 i915_gem_find_active_request(struct intel_engine_cs *ring)
2596 {
2597         struct drm_i915_gem_request *request;
2598         u32 completed_seqno;
2599
2600         completed_seqno = ring->get_seqno(ring, false);
2601
2602         list_for_each_entry(request, &ring->request_list, list) {
2603                 if (i915_seqno_passed(completed_seqno, request->seqno))
2604                         continue;
2605
2606                 return request;
2607         }
2608
2609         return NULL;
2610 }
2611
2612 static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
2613                                        struct intel_engine_cs *ring)
2614 {
2615         struct drm_i915_gem_request *request;
2616         bool ring_hung;
2617
2618         request = i915_gem_find_active_request(ring);
2619
2620         if (request == NULL)
2621                 return;
2622
2623         ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2624
2625         i915_set_reset_status(dev_priv, request->ctx, ring_hung);
2626
2627         list_for_each_entry_continue(request, &ring->request_list, list)
2628                 i915_set_reset_status(dev_priv, request->ctx, false);
2629 }
2630
2631 static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2632                                         struct intel_engine_cs *ring)
2633 {
2634         while (!list_empty(&ring->active_list)) {
2635                 struct drm_i915_gem_object *obj;
2636
2637                 obj = list_first_entry(&ring->active_list,
2638                                        struct drm_i915_gem_object,
2639                                        ring_list);
2640
2641                 i915_gem_object_move_to_inactive(obj);
2642         }
2643
2644         /*
2645          * Clear the execlists queue up before freeing the requests, as those
2646          * are the ones that keep the context and ringbuffer backing objects
2647          * pinned in place.
2648          */
2649         while (!list_empty(&ring->execlist_queue)) {
2650                 struct intel_ctx_submit_request *submit_req;
2651
2652                 submit_req = list_first_entry(&ring->execlist_queue,
2653                                 struct intel_ctx_submit_request,
2654                                 execlist_link);
2655                 list_del(&submit_req->execlist_link);
2656                 intel_runtime_pm_put(dev_priv);
2657                 i915_gem_context_unreference(submit_req->ctx);
2658                 kfree(submit_req);
2659         }
2660
2661         /*
2662          * We must free the requests after all the corresponding objects have
2663          * been moved off active lists. Which is the same order as the normal
2664          * retire_requests function does. This is important if object hold
2665          * implicit references on things like e.g. ppgtt address spaces through
2666          * the request.
2667          */
2668         while (!list_empty(&ring->request_list)) {
2669                 struct drm_i915_gem_request *request;
2670
2671                 request = list_first_entry(&ring->request_list,
2672                                            struct drm_i915_gem_request,
2673                                            list);
2674
2675                 i915_gem_free_request(request);
2676         }
2677
2678         /* These may not have been flush before the reset, do so now */
2679         kfree(ring->preallocated_lazy_request);
2680         ring->preallocated_lazy_request = NULL;
2681         ring->outstanding_lazy_seqno = 0;
2682 }
2683
2684 void i915_gem_restore_fences(struct drm_device *dev)
2685 {
2686         struct drm_i915_private *dev_priv = dev->dev_private;
2687         int i;
2688
2689         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2690                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2691
2692                 /*
2693                  * Commit delayed tiling changes if we have an object still
2694                  * attached to the fence, otherwise just clear the fence.
2695                  */
2696                 if (reg->obj) {
2697                         i915_gem_object_update_fence(reg->obj, reg,
2698                                                      reg->obj->tiling_mode);
2699                 } else {
2700                         i915_gem_write_fence(dev, i, NULL);
2701                 }
2702         }
2703 }
2704
2705 void i915_gem_reset(struct drm_device *dev)
2706 {
2707         struct drm_i915_private *dev_priv = dev->dev_private;
2708         struct intel_engine_cs *ring;
2709         int i;
2710
2711         /*
2712          * Before we free the objects from the requests, we need to inspect
2713          * them for finding the guilty party. As the requests only borrow
2714          * their reference to the objects, the inspection must be done first.
2715          */
2716         for_each_ring(ring, dev_priv, i)
2717                 i915_gem_reset_ring_status(dev_priv, ring);
2718
2719         for_each_ring(ring, dev_priv, i)
2720                 i915_gem_reset_ring_cleanup(dev_priv, ring);
2721
2722         i915_gem_context_reset(dev);
2723
2724         i915_gem_restore_fences(dev);
2725 }
2726
2727 /**
2728  * This function clears the request list as sequence numbers are passed.
2729  */
2730 void
2731 i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
2732 {
2733         uint32_t seqno;
2734
2735         if (list_empty(&ring->request_list))
2736                 return;
2737
2738         WARN_ON(i915_verify_lists(ring->dev));
2739
2740         seqno = ring->get_seqno(ring, true);
2741
2742         /* Move any buffers on the active list that are no longer referenced
2743          * by the ringbuffer to the flushing/inactive lists as appropriate,
2744          * before we free the context associated with the requests.
2745          */
2746         while (!list_empty(&ring->active_list)) {
2747                 struct drm_i915_gem_object *obj;
2748
2749                 obj = list_first_entry(&ring->active_list,
2750                                       struct drm_i915_gem_object,
2751                                       ring_list);
2752
2753                 if (!i915_seqno_passed(seqno, obj->last_read_seqno))
2754                         break;
2755
2756                 i915_gem_object_move_to_inactive(obj);
2757         }
2758
2759
2760         while (!list_empty(&ring->request_list)) {
2761                 struct drm_i915_gem_request *request;
2762                 struct intel_ringbuffer *ringbuf;
2763
2764                 request = list_first_entry(&ring->request_list,
2765                                            struct drm_i915_gem_request,
2766                                            list);
2767
2768                 if (!i915_seqno_passed(seqno, request->seqno))
2769                         break;
2770
2771                 trace_i915_gem_request_retire(ring, request->seqno);
2772
2773                 /* This is one of the few common intersection points
2774                  * between legacy ringbuffer submission and execlists:
2775                  * we need to tell them apart in order to find the correct
2776                  * ringbuffer to which the request belongs to.
2777                  */
2778                 if (i915.enable_execlists) {
2779                         struct intel_context *ctx = request->ctx;
2780                         ringbuf = ctx->engine[ring->id].ringbuf;
2781                 } else
2782                         ringbuf = ring->buffer;
2783
2784                 /* We know the GPU must have read the request to have
2785                  * sent us the seqno + interrupt, so use the position
2786                  * of tail of the request to update the last known position
2787                  * of the GPU head.
2788                  */
2789                 ringbuf->last_retired_head = request->tail;
2790
2791                 i915_gem_free_request(request);
2792         }
2793
2794         if (unlikely(ring->trace_irq_seqno &&
2795                      i915_seqno_passed(seqno, ring->trace_irq_seqno))) {
2796                 ring->irq_put(ring);
2797                 ring->trace_irq_seqno = 0;
2798         }
2799
2800         WARN_ON(i915_verify_lists(ring->dev));
2801 }
2802
2803 bool
2804 i915_gem_retire_requests(struct drm_device *dev)
2805 {
2806         struct drm_i915_private *dev_priv = dev->dev_private;
2807         struct intel_engine_cs *ring;
2808         bool idle = true;
2809         int i;
2810
2811         for_each_ring(ring, dev_priv, i) {
2812                 i915_gem_retire_requests_ring(ring);
2813                 idle &= list_empty(&ring->request_list);
2814                 if (i915.enable_execlists) {
2815                         unsigned long flags;
2816
2817                         spin_lock_irqsave(&ring->execlist_lock, flags);
2818                         idle &= list_empty(&ring->execlist_queue);
2819                         spin_unlock_irqrestore(&ring->execlist_lock, flags);
2820
2821                         intel_execlists_retire_requests(ring);
2822                 }
2823         }
2824
2825         if (idle)
2826                 mod_delayed_work(dev_priv->wq,
2827                                    &dev_priv->mm.idle_work,
2828                                    msecs_to_jiffies(100));
2829
2830         return idle;
2831 }
2832
2833 static void
2834 i915_gem_retire_work_handler(struct work_struct *work)
2835 {
2836         struct drm_i915_private *dev_priv =
2837                 container_of(work, typeof(*dev_priv), mm.retire_work.work);
2838         struct drm_device *dev = dev_priv->dev;
2839         bool idle;
2840
2841         /* Come back later if the device is busy... */
2842         idle = false;
2843         if (mutex_trylock(&dev->struct_mutex)) {
2844                 idle = i915_gem_retire_requests(dev);
2845                 mutex_unlock(&dev->struct_mutex);
2846         }
2847         if (!idle)
2848                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
2849                                    round_jiffies_up_relative(HZ));
2850 }
2851
2852 static void
2853 i915_gem_idle_work_handler(struct work_struct *work)
2854 {
2855         struct drm_i915_private *dev_priv =
2856                 container_of(work, typeof(*dev_priv), mm.idle_work.work);
2857
2858         intel_mark_idle(dev_priv->dev);
2859 }
2860
2861 /**
2862  * Ensures that an object will eventually get non-busy by flushing any required
2863  * write domains, emitting any outstanding lazy request and retiring and
2864  * completed requests.
2865  */
2866 static int
2867 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
2868 {
2869         int ret;
2870
2871         if (obj->active) {
2872                 ret = i915_gem_check_olr(obj->ring, obj->last_read_seqno);
2873                 if (ret)
2874                         return ret;
2875
2876                 i915_gem_retire_requests_ring(obj->ring);
2877         }
2878
2879         return 0;
2880 }
2881
2882 /**
2883  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2884  * @DRM_IOCTL_ARGS: standard ioctl arguments
2885  *
2886  * Returns 0 if successful, else an error is returned with the remaining time in
2887  * the timeout parameter.
2888  *  -ETIME: object is still busy after timeout
2889  *  -ERESTARTSYS: signal interrupted the wait
2890  *  -ENONENT: object doesn't exist
2891  * Also possible, but rare:
2892  *  -EAGAIN: GPU wedged
2893  *  -ENOMEM: damn
2894  *  -ENODEV: Internal IRQ fail
2895  *  -E?: The add request failed
2896  *
2897  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
2898  * non-zero timeout parameter the wait ioctl will wait for the given number of
2899  * nanoseconds on an object becoming unbusy. Since the wait itself does so
2900  * without holding struct_mutex the object may become re-busied before this
2901  * function completes. A similar but shorter * race condition exists in the busy
2902  * ioctl
2903  */
2904 int
2905 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2906 {
2907         struct drm_i915_private *dev_priv = dev->dev_private;
2908         struct drm_i915_gem_wait *args = data;
2909         struct drm_i915_gem_object *obj;
2910         struct intel_engine_cs *ring = NULL;
2911         unsigned reset_counter;
2912         u32 seqno = 0;
2913         int ret = 0;
2914
2915         if (args->flags != 0)
2916                 return -EINVAL;
2917
2918         ret = i915_mutex_lock_interruptible(dev);
2919         if (ret)
2920                 return ret;
2921
2922         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
2923         if (&obj->base == NULL) {
2924                 mutex_unlock(&dev->struct_mutex);
2925                 return -ENOENT;
2926         }
2927
2928         /* Need to make sure the object gets inactive eventually. */
2929         ret = i915_gem_object_flush_active(obj);
2930         if (ret)
2931                 goto out;
2932
2933         if (obj->active) {
2934                 seqno = obj->last_read_seqno;
2935                 ring = obj->ring;
2936         }
2937
2938         if (seqno == 0)
2939                  goto out;
2940
2941         /* Do this after OLR check to make sure we make forward progress polling
2942          * on this IOCTL with a timeout <=0 (like busy ioctl)
2943          */
2944         if (args->timeout_ns <= 0) {
2945                 ret = -ETIME;
2946                 goto out;
2947         }
2948
2949         drm_gem_object_unreference(&obj->base);
2950         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2951         mutex_unlock(&dev->struct_mutex);
2952
2953         return __i915_wait_seqno(ring, seqno, reset_counter, true,
2954                                  &args->timeout_ns, file->driver_priv);
2955
2956 out:
2957         drm_gem_object_unreference(&obj->base);
2958         mutex_unlock(&dev->struct_mutex);
2959         return ret;
2960 }
2961
2962 /**
2963  * i915_gem_object_sync - sync an object to a ring.
2964  *
2965  * @obj: object which may be in use on another ring.
2966  * @to: ring we wish to use the object on. May be NULL.
2967  *
2968  * This code is meant to abstract object synchronization with the GPU.
2969  * Calling with NULL implies synchronizing the object with the CPU
2970  * rather than a particular GPU ring.
2971  *
2972  * Returns 0 if successful, else propagates up the lower layer error.
2973  */
2974 int
2975 i915_gem_object_sync(struct drm_i915_gem_object *obj,
2976                      struct intel_engine_cs *to)
2977 {
2978         struct intel_engine_cs *from = obj->ring;
2979         u32 seqno;
2980         int ret, idx;
2981
2982         if (from == NULL || to == from)
2983                 return 0;
2984
2985         if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
2986                 return i915_gem_object_wait_rendering(obj, false);
2987
2988         idx = intel_ring_sync_index(from, to);
2989
2990         seqno = obj->last_read_seqno;
2991         /* Optimization: Avoid semaphore sync when we are sure we already
2992          * waited for an object with higher seqno */
2993         if (seqno <= from->semaphore.sync_seqno[idx])
2994                 return 0;
2995
2996         ret = i915_gem_check_olr(obj->ring, seqno);
2997         if (ret)
2998                 return ret;
2999
3000         trace_i915_gem_ring_sync_to(from, to, seqno);
3001         ret = to->semaphore.sync_to(to, from, seqno);
3002         if (!ret)
3003                 /* We use last_read_seqno because sync_to()
3004                  * might have just caused seqno wrap under
3005                  * the radar.
3006                  */
3007                 from->semaphore.sync_seqno[idx] = obj->last_read_seqno;
3008
3009         return ret;
3010 }
3011
3012 static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3013 {
3014         u32 old_write_domain, old_read_domains;
3015
3016         /* Force a pagefault for domain tracking on next user access */
3017         i915_gem_release_mmap(obj);
3018
3019         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3020                 return;
3021
3022         /* Wait for any direct GTT access to complete */
3023         mb();
3024
3025         old_read_domains = obj->base.read_domains;
3026         old_write_domain = obj->base.write_domain;
3027
3028         obj->base.read_domains &= ~I915_GEM_DOMAIN_GTT;
3029         obj->base.write_domain &= ~I915_GEM_DOMAIN_GTT;
3030
3031         trace_i915_gem_object_change_domain(obj,
3032                                             old_read_domains,
3033                                             old_write_domain);
3034 }
3035
3036 int i915_vma_unbind(struct i915_vma *vma)
3037 {
3038         struct drm_i915_gem_object *obj = vma->obj;
3039         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3040         int ret;
3041
3042         if (list_empty(&vma->vma_link))
3043                 return 0;
3044
3045         if (!drm_mm_node_allocated(&vma->node)) {
3046                 i915_gem_vma_destroy(vma);
3047                 return 0;
3048         }
3049
3050         if (vma->pin_count)
3051                 return -EBUSY;
3052
3053         BUG_ON(obj->pages == NULL);
3054
3055         ret = i915_gem_object_finish_gpu(obj);
3056         if (ret)
3057                 return ret;
3058         /* Continue on if we fail due to EIO, the GPU is hung so we
3059          * should be safe and we need to cleanup or else we might
3060          * cause memory corruption through use-after-free.
3061          */
3062
3063         /* Throw away the active reference before moving to the unbound list */
3064         i915_gem_object_retire(obj);
3065
3066         if (i915_is_ggtt(vma->vm)) {
3067                 i915_gem_object_finish_gtt(obj);
3068
3069                 /* release the fence reg _after_ flushing */
3070                 ret = i915_gem_object_put_fence(obj);
3071                 if (ret)
3072                         return ret;
3073         }
3074
3075         trace_i915_vma_unbind(vma);
3076
3077         vma->unbind_vma(vma);
3078
3079         list_del_init(&vma->mm_list);
3080         if (i915_is_ggtt(vma->vm))
3081                 obj->map_and_fenceable = false;
3082
3083         drm_mm_remove_node(&vma->node);
3084         i915_gem_vma_destroy(vma);
3085
3086         /* Since the unbound list is global, only move to that list if
3087          * no more VMAs exist. */
3088         if (list_empty(&obj->vma_list)) {
3089                 i915_gem_gtt_finish_object(obj);
3090                 list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
3091         }
3092
3093         /* And finally now the object is completely decoupled from this vma,
3094          * we can drop its hold on the backing storage and allow it to be
3095          * reaped by the shrinker.
3096          */
3097         i915_gem_object_unpin_pages(obj);
3098
3099         return 0;
3100 }
3101
3102 int i915_gpu_idle(struct drm_device *dev)
3103 {
3104         struct drm_i915_private *dev_priv = dev->dev_private;
3105         struct intel_engine_cs *ring;
3106         int ret, i;
3107
3108         /* Flush everything onto the inactive list. */
3109         for_each_ring(ring, dev_priv, i) {
3110                 if (!i915.enable_execlists) {
3111                         ret = i915_switch_context(ring, ring->default_context);
3112                         if (ret)
3113                                 return ret;
3114                 }
3115
3116                 ret = intel_ring_idle(ring);
3117                 if (ret)
3118                         return ret;
3119         }
3120
3121         return 0;
3122 }
3123
3124 static void i965_write_fence_reg(struct drm_device *dev, int reg,
3125                                  struct drm_i915_gem_object *obj)
3126 {
3127         struct drm_i915_private *dev_priv = dev->dev_private;
3128         int fence_reg;
3129         int fence_pitch_shift;
3130
3131         if (INTEL_INFO(dev)->gen >= 6) {
3132                 fence_reg = FENCE_REG_SANDYBRIDGE_0;
3133                 fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
3134         } else {
3135                 fence_reg = FENCE_REG_965_0;
3136                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
3137         }
3138
3139         fence_reg += reg * 8;
3140
3141         /* To w/a incoherency with non-atomic 64-bit register updates,
3142          * we split the 64-bit update into two 32-bit writes. In order
3143          * for a partial fence not to be evaluated between writes, we
3144          * precede the update with write to turn off the fence register,
3145          * and only enable the fence as the last step.
3146          *
3147          * For extra levels of paranoia, we make sure each step lands
3148          * before applying the next step.
3149          */
3150         I915_WRITE(fence_reg, 0);
3151         POSTING_READ(fence_reg);
3152
3153         if (obj) {
3154                 u32 size = i915_gem_obj_ggtt_size(obj);
3155                 uint64_t val;
3156
3157                 val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
3158                                  0xfffff000) << 32;
3159                 val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
3160                 val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
3161                 if (obj->tiling_mode == I915_TILING_Y)
3162                         val |= 1 << I965_FENCE_TILING_Y_SHIFT;
3163                 val |= I965_FENCE_REG_VALID;
3164
3165                 I915_WRITE(fence_reg + 4, val >> 32);
3166                 POSTING_READ(fence_reg + 4);
3167
3168                 I915_WRITE(fence_reg + 0, val);
3169                 POSTING_READ(fence_reg);
3170         } else {
3171                 I915_WRITE(fence_reg + 4, 0);
3172                 POSTING_READ(fence_reg + 4);
3173         }
3174 }
3175
3176 static void i915_write_fence_reg(struct drm_device *dev, int reg,
3177                                  struct drm_i915_gem_object *obj)
3178 {
3179         struct drm_i915_private *dev_priv = dev->dev_private;
3180         u32 val;
3181
3182         if (obj) {
3183                 u32 size = i915_gem_obj_ggtt_size(obj);
3184                 int pitch_val;
3185                 int tile_width;
3186
3187                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) ||
3188                      (size & -size) != size ||
3189                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3190                      "object 0x%08lx [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
3191                      i915_gem_obj_ggtt_offset(obj), obj->map_and_fenceable, size);
3192
3193                 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
3194                         tile_width = 128;
3195                 else
3196                         tile_width = 512;
3197
3198                 /* Note: pitch better be a power of two tile widths */
3199                 pitch_val = obj->stride / tile_width;
3200                 pitch_val = ffs(pitch_val) - 1;
3201
3202                 val = i915_gem_obj_ggtt_offset(obj);
3203                 if (obj->tiling_mode == I915_TILING_Y)
3204                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3205                 val |= I915_FENCE_SIZE_BITS(size);
3206                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3207                 val |= I830_FENCE_REG_VALID;
3208         } else
3209                 val = 0;
3210
3211         if (reg < 8)
3212                 reg = FENCE_REG_830_0 + reg * 4;
3213         else
3214                 reg = FENCE_REG_945_8 + (reg - 8) * 4;
3215
3216         I915_WRITE(reg, val);
3217         POSTING_READ(reg);
3218 }
3219
3220 static void i830_write_fence_reg(struct drm_device *dev, int reg,
3221                                 struct drm_i915_gem_object *obj)
3222 {
3223         struct drm_i915_private *dev_priv = dev->dev_private;
3224         uint32_t val;
3225
3226         if (obj) {
3227                 u32 size = i915_gem_obj_ggtt_size(obj);
3228                 uint32_t pitch_val;
3229
3230                 WARN((i915_gem_obj_ggtt_offset(obj) & ~I830_FENCE_START_MASK) ||
3231                      (size & -size) != size ||
3232                      (i915_gem_obj_ggtt_offset(obj) & (size - 1)),
3233                      "object 0x%08lx not 512K or pot-size 0x%08x aligned\n",
3234                      i915_gem_obj_ggtt_offset(obj), size);
3235
3236                 pitch_val = obj->stride / 128;
3237                 pitch_val = ffs(pitch_val) - 1;
3238
3239                 val = i915_gem_obj_ggtt_offset(obj);
3240                 if (obj->tiling_mode == I915_TILING_Y)
3241                         val |= 1 << I830_FENCE_TILING_Y_SHIFT;
3242                 val |= I830_FENCE_SIZE_BITS(size);
3243                 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
3244                 val |= I830_FENCE_REG_VALID;
3245         } else
3246                 val = 0;
3247
3248         I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
3249         POSTING_READ(FENCE_REG_830_0 + reg * 4);
3250 }
3251
3252 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
3253 {
3254         return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
3255 }
3256
3257 static void i915_gem_write_fence(struct drm_device *dev, int reg,
3258                                  struct drm_i915_gem_object *obj)
3259 {
3260         struct drm_i915_private *dev_priv = dev->dev_private;
3261
3262         /* Ensure that all CPU reads are completed before installing a fence
3263          * and all writes before removing the fence.
3264          */
3265         if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
3266                 mb();
3267
3268         WARN(obj && (!obj->stride || !obj->tiling_mode),
3269              "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
3270              obj->stride, obj->tiling_mode);
3271
3272         switch (INTEL_INFO(dev)->gen) {
3273         case 9:
3274         case 8:
3275         case 7:
3276         case 6:
3277         case 5:
3278         case 4: i965_write_fence_reg(dev, reg, obj); break;
3279         case 3: i915_write_fence_reg(dev, reg, obj); break;
3280         case 2: i830_write_fence_reg(dev, reg, obj); break;
3281         default: BUG();
3282         }
3283
3284         /* And similarly be paranoid that no direct access to this region
3285          * is reordered to before the fence is installed.
3286          */
3287         if (i915_gem_object_needs_mb(obj))
3288                 mb();
3289 }
3290
3291 static inline int fence_number(struct drm_i915_private *dev_priv,
3292                                struct drm_i915_fence_reg *fence)
3293 {
3294         return fence - dev_priv->fence_regs;
3295 }
3296
3297 static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
3298                                          struct drm_i915_fence_reg *fence,
3299                                          bool enable)
3300 {
3301         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3302         int reg = fence_number(dev_priv, fence);
3303
3304         i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
3305
3306         if (enable) {
3307                 obj->fence_reg = reg;
3308                 fence->obj = obj;
3309                 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
3310         } else {
3311                 obj->fence_reg = I915_FENCE_REG_NONE;
3312                 fence->obj = NULL;
3313                 list_del_init(&fence->lru_list);
3314         }
3315         obj->fence_dirty = false;
3316 }
3317
3318 static int
3319 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
3320 {
3321         if (obj->last_fenced_seqno) {
3322                 int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
3323                 if (ret)
3324                         return ret;
3325
3326                 obj->last_fenced_seqno = 0;
3327         }
3328
3329         return 0;
3330 }
3331
3332 int
3333 i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
3334 {
3335         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3336         struct drm_i915_fence_reg *fence;
3337         int ret;
3338
3339         ret = i915_gem_object_wait_fence(obj);
3340         if (ret)
3341                 return ret;
3342
3343         if (obj->fence_reg == I915_FENCE_REG_NONE)
3344                 return 0;
3345
3346         fence = &dev_priv->fence_regs[obj->fence_reg];
3347
3348         if (WARN_ON(fence->pin_count))
3349                 return -EBUSY;
3350
3351         i915_gem_object_fence_lost(obj);
3352         i915_gem_object_update_fence(obj, fence, false);
3353
3354         return 0;
3355 }
3356
3357 static struct drm_i915_fence_reg *
3358 i915_find_fence_reg(struct drm_device *dev)
3359 {
3360         struct drm_i915_private *dev_priv = dev->dev_private;
3361         struct drm_i915_fence_reg *reg, *avail;
3362         int i;
3363
3364         /* First try to find a free reg */
3365         avail = NULL;
3366         for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
3367                 reg = &dev_priv->fence_regs[i];
3368                 if (!reg->obj)
3369                         return reg;
3370
3371                 if (!reg->pin_count)
3372                         avail = reg;
3373         }
3374
3375         if (avail == NULL)
3376                 goto deadlock;
3377
3378         /* None available, try to steal one or wait for a user to finish */
3379         list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
3380                 if (reg->pin_count)
3381                         continue;
3382
3383                 return reg;
3384         }
3385
3386 deadlock:
3387         /* Wait for completion of pending flips which consume fences */
3388         if (intel_has_pending_fb_unpin(dev))
3389                 return ERR_PTR(-EAGAIN);
3390
3391         return ERR_PTR(-EDEADLK);
3392 }
3393
3394 /**
3395  * i915_gem_object_get_fence - set up fencing for an object
3396  * @obj: object to map through a fence reg
3397  *
3398  * When mapping objects through the GTT, userspace wants to be able to write
3399  * to them without having to worry about swizzling if the object is tiled.
3400  * This function walks the fence regs looking for a free one for @obj,
3401  * stealing one if it can't find any.
3402  *
3403  * It then sets up the reg based on the object's properties: address, pitch
3404  * and tiling format.
3405  *
3406  * For an untiled surface, this removes any existing fence.
3407  */
3408 int
3409 i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
3410 {
3411         struct drm_device *dev = obj->base.dev;
3412         struct drm_i915_private *dev_priv = dev->dev_private;
3413         bool enable = obj->tiling_mode != I915_TILING_NONE;
3414         struct drm_i915_fence_reg *reg;
3415         int ret;
3416
3417         /* Have we updated the tiling parameters upon the object and so
3418          * will need to serialise the write to the associated fence register?
3419          */
3420         if (obj->fence_dirty) {
3421                 ret = i915_gem_object_wait_fence(obj);
3422                 if (ret)
3423                         return ret;
3424         }
3425
3426         /* Just update our place in the LRU if our fence is getting reused. */
3427         if (obj->fence_reg != I915_FENCE_REG_NONE) {
3428                 reg = &dev_priv->fence_regs[obj->fence_reg];
3429                 if (!obj->fence_dirty) {
3430                         list_move_tail(&reg->lru_list,
3431                                        &dev_priv->mm.fence_list);
3432                         return 0;
3433                 }
3434         } else if (enable) {
3435                 if (WARN_ON(!obj->map_and_fenceable))
3436                         return -EINVAL;
3437
3438                 reg = i915_find_fence_reg(dev);
3439                 if (IS_ERR(reg))
3440                         return PTR_ERR(reg);
3441
3442                 if (reg->obj) {
3443                         struct drm_i915_gem_object *old = reg->obj;
3444
3445                         ret = i915_gem_object_wait_fence(old);
3446                         if (ret)
3447                                 return ret;
3448
3449                         i915_gem_object_fence_lost(old);
3450                 }
3451         } else
3452                 return 0;
3453
3454         i915_gem_object_update_fence(obj, reg, enable);
3455
3456         return 0;
3457 }
3458
3459 static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3460                                      unsigned long cache_level)
3461 {
3462         struct drm_mm_node *gtt_space = &vma->node;
3463         struct drm_mm_node *other;
3464
3465         /*
3466          * On some machines we have to be careful when putting differing types
3467          * of snoopable memory together to avoid the prefetcher crossing memory
3468          * domains and dying. During vm initialisation, we decide whether or not
3469          * these constraints apply and set the drm_mm.color_adjust
3470          * appropriately.
3471          */
3472         if (vma->vm->mm.color_adjust == NULL)
3473                 return true;
3474
3475         if (!drm_mm_node_allocated(gtt_space))
3476                 return true;
3477
3478         if (list_empty(&gtt_space->node_list))
3479                 return true;
3480
3481         other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
3482         if (other->allocated && !other->hole_follows && other->color != cache_level)
3483                 return false;
3484
3485         other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
3486         if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
3487                 return false;
3488
3489         return true;
3490 }
3491
3492 /**
3493  * Finds free space in the GTT aperture and binds the object there.
3494  */
3495 static struct i915_vma *
3496 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
3497                            struct i915_address_space *vm,
3498                            unsigned alignment,
3499                            uint64_t flags)
3500 {
3501         struct drm_device *dev = obj->base.dev;
3502         struct drm_i915_private *dev_priv = dev->dev_private;
3503         u32 size, fence_size, fence_alignment, unfenced_alignment;
3504         unsigned long start =
3505                 flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3506         unsigned long end =
3507                 flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
3508         struct i915_vma *vma;
3509         int ret;
3510
3511         fence_size = i915_gem_get_gtt_size(dev,
3512                                            obj->base.size,
3513                                            obj->tiling_mode);
3514         fence_alignment = i915_gem_get_gtt_alignment(dev,
3515                                                      obj->base.size,
3516                                                      obj->tiling_mode, true);
3517         unfenced_alignment =
3518                 i915_gem_get_gtt_alignment(dev,
3519                                            obj->base.size,
3520                                            obj->tiling_mode, false);
3521
3522         if (alignment == 0)
3523                 alignment = flags & PIN_MAPPABLE ? fence_alignment :
3524                                                 unfenced_alignment;
3525         if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
3526                 DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
3527                 return ERR_PTR(-EINVAL);
3528         }
3529
3530         size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
3531
3532         /* If the object is bigger than the entire aperture, reject it early
3533          * before evicting everything in a vain attempt to find space.
3534          */
3535         if (obj->base.size > end) {
3536                 DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%lu\n",
3537                           obj->base.size,
3538                           flags & PIN_MAPPABLE ? "mappable" : "total",
3539                           end);
3540                 return ERR_PTR(-E2BIG);
3541         }
3542
3543         ret = i915_gem_object_get_pages(obj);
3544         if (ret)
3545                 return ERR_PTR(ret);
3546
3547         i915_gem_object_pin_pages(obj);
3548
3549         vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
3550         if (IS_ERR(vma))
3551                 goto err_unpin;
3552
3553 search_free:
3554         ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
3555                                                   size, alignment,
3556                                                   obj->cache_level,
3557                                                   start, end,
3558                                                   DRM_MM_SEARCH_DEFAULT,
3559                                                   DRM_MM_CREATE_DEFAULT);
3560         if (ret) {
3561                 ret = i915_gem_evict_something(dev, vm, size, alignment,
3562                                                obj->cache_level,
3563                                                start, end,
3564                                                flags);
3565                 if (ret == 0)
3566                         goto search_free;
3567
3568                 goto err_free_vma;
3569         }
3570         if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
3571                 ret = -EINVAL;
3572                 goto err_remove_node;
3573         }
3574
3575         ret = i915_gem_gtt_prepare_object(obj);
3576         if (ret)
3577                 goto err_remove_node;
3578
3579         list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3580         list_add_tail(&vma->mm_list, &vm->inactive_list);
3581
3582         trace_i915_vma_bind(vma, flags);
3583         vma->bind_vma(vma, obj->cache_level,
3584                       flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
3585
3586         return vma;
3587
3588 err_remove_node:
3589         drm_mm_remove_node(&vma->node);
3590 err_free_vma:
3591         i915_gem_vma_destroy(vma);
3592         vma = ERR_PTR(ret);
3593 err_unpin:
3594         i915_gem_object_unpin_pages(obj);
3595         return vma;
3596 }
3597
3598 bool
3599 i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3600                         bool force)
3601 {
3602         /* If we don't have a page list set up, then we're not pinned
3603          * to GPU, and we can ignore the cache flush because it'll happen
3604          * again at bind time.
3605          */
3606         if (obj->pages == NULL)
3607                 return false;
3608
3609         /*
3610          * Stolen memory is always coherent with the GPU as it is explicitly
3611          * marked as wc by the system, or the system is cache-coherent.
3612          */
3613         if (obj->stolen || obj->phys_handle)
3614                 return false;
3615
3616         /* If the GPU is snooping the contents of the CPU cache,
3617          * we do not need to manually clear the CPU cache lines.  However,
3618          * the caches are only snooped when the render cache is
3619          * flushed/invalidated.  As we always have to emit invalidations
3620          * and flushes when moving into and out of the RENDER domain, correct
3621          * snooping behaviour occurs naturally as the result of our domain
3622          * tracking.
3623          */
3624         if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
3625                 return false;
3626
3627         trace_i915_gem_object_clflush(obj);
3628         drm_clflush_sg(obj->pages);
3629
3630         return true;
3631 }
3632
3633 /** Flushes the GTT write domain for the object if it's dirty. */
3634 static void
3635 i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3636 {
3637         uint32_t old_write_domain;
3638
3639         if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3640                 return;
3641
3642         /* No actual flushing is required for the GTT write domain.  Writes
3643          * to it immediately go to main memory as far as we know, so there's
3644          * no chipset flush.  It also doesn't land in render cache.
3645          *
3646          * However, we do have to enforce the order so that all writes through
3647          * the GTT land before any writes to the device, such as updates to
3648          * the GATT itself.
3649          */
3650         wmb();
3651
3652         old_write_domain = obj->base.write_domain;
3653         obj->base.write_domain = 0;
3654
3655         intel_fb_obj_flush(obj, false);
3656
3657         trace_i915_gem_object_change_domain(obj,
3658                                             obj->base.read_domains,
3659                                             old_write_domain);
3660 }
3661
3662 /** Flushes the CPU write domain for the object if it's dirty. */
3663 static void
3664 i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj,
3665                                        bool force)
3666 {
3667         uint32_t old_write_domain;
3668
3669         if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3670                 return;
3671
3672         if (i915_gem_clflush_object(obj, force))
3673                 i915_gem_chipset_flush(obj->base.dev);
3674
3675         old_write_domain = obj->base.write_domain;
3676         obj->base.write_domain = 0;
3677
3678         intel_fb_obj_flush(obj, false);
3679
3680         trace_i915_gem_object_change_domain(obj,
3681                                             obj->base.read_domains,
3682                                             old_write_domain);
3683 }
3684
3685 /**
3686  * Moves a single object to the GTT read, and possibly write domain.
3687  *
3688  * This function returns when the move is complete, including waiting on
3689  * flushes to occur.
3690  */
3691 int
3692 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3693 {
3694         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
3695         struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
3696         uint32_t old_write_domain, old_read_domains;
3697         int ret;
3698
3699         /* Not valid to be called on unbound objects. */
3700         if (vma == NULL)
3701                 return -EINVAL;
3702
3703         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3704                 return 0;
3705
3706         ret = i915_gem_object_wait_rendering(obj, !write);
3707         if (ret)
3708                 return ret;
3709
3710         i915_gem_object_retire(obj);
3711         i915_gem_object_flush_cpu_write_domain(obj, false);
3712
3713         /* Serialise direct access to this object with the barriers for
3714          * coherent writes from the GPU, by effectively invalidating the
3715          * GTT domain upon first access.
3716          */
3717         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3718                 mb();
3719
3720         old_write_domain = obj->base.write_domain;
3721         old_read_domains = obj->base.read_domains;
3722
3723         /* It should now be out of any other write domains, and we can update
3724          * the domain values for our changes.
3725          */
3726         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3727         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3728         if (write) {
3729                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3730                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3731                 obj->dirty = 1;
3732         }
3733
3734         if (write)
3735                 intel_fb_obj_invalidate(obj, NULL);
3736
3737         trace_i915_gem_object_change_domain(obj,
3738                                             old_read_domains,
3739                                             old_write_domain);
3740
3741         /* And bump the LRU for this access */
3742         if (i915_gem_object_is_inactive(obj))
3743                 list_move_tail(&vma->mm_list,
3744                                &dev_priv->gtt.base.inactive_list);
3745
3746         return 0;
3747 }
3748
3749 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3750                                     enum i915_cache_level cache_level)
3751 {
3752         struct drm_device *dev = obj->base.dev;
3753         struct i915_vma *vma, *next;
3754         int ret;
3755
3756         if (obj->cache_level == cache_level)
3757                 return 0;
3758
3759         if (i915_gem_obj_is_pinned(obj)) {
3760                 DRM_DEBUG("can not change the cache level of pinned objects\n");
3761                 return -EBUSY;
3762         }
3763
3764         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
3765                 if (!i915_gem_valid_gtt_space(vma, cache_level)) {
3766                         ret = i915_vma_unbind(vma);
3767                         if (ret)
3768                                 return ret;
3769                 }
3770         }
3771
3772         if (i915_gem_obj_bound_any(obj)) {
3773                 ret = i915_gem_object_finish_gpu(obj);
3774                 if (ret)
3775                         return ret;
3776
3777                 i915_gem_object_finish_gtt(obj);
3778
3779                 /* Before SandyBridge, you could not use tiling or fence
3780                  * registers with snooped memory, so relinquish any fences
3781                  * currently pointing to our region in the aperture.
3782                  */
3783                 if (INTEL_INFO(dev)->gen < 6) {
3784                         ret = i915_gem_object_put_fence(obj);
3785                         if (ret)
3786                                 return ret;
3787                 }
3788
3789                 list_for_each_entry(vma, &obj->vma_list, vma_link)
3790                         if (drm_mm_node_allocated(&vma->node))
3791                                 vma->bind_vma(vma, cache_level,
3792                                                 vma->bound & GLOBAL_BIND);
3793         }
3794
3795         list_for_each_entry(vma, &obj->vma_list, vma_link)
3796                 vma->node.color = cache_level;
3797         obj->cache_level = cache_level;
3798
3799         if (cpu_write_needs_clflush(obj)) {
3800                 u32 old_read_domains, old_write_domain;
3801
3802                 /* If we're coming from LLC cached, then we haven't
3803                  * actually been tracking whether the data is in the
3804                  * CPU cache or not, since we only allow one bit set
3805                  * in obj->write_domain and have been skipping the clflushes.
3806                  * Just set it to the CPU cache for now.
3807                  */
3808                 i915_gem_object_retire(obj);
3809                 WARN_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
3810
3811                 old_read_domains = obj->base.read_domains;
3812                 old_write_domain = obj->base.write_domain;
3813
3814                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3815                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3816
3817                 trace_i915_gem_object_change_domain(obj,
3818                                                     old_read_domains,
3819                                                     old_write_domain);
3820         }
3821
3822         return 0;
3823 }
3824
3825 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3826                                struct drm_file *file)
3827 {
3828         struct drm_i915_gem_caching *args = data;
3829         struct drm_i915_gem_object *obj;
3830         int ret;
3831
3832         ret = i915_mutex_lock_interruptible(dev);
3833         if (ret)
3834                 return ret;
3835
3836         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3837         if (&obj->base == NULL) {
3838                 ret = -ENOENT;
3839                 goto unlock;
3840         }
3841
3842         switch (obj->cache_level) {
3843         case I915_CACHE_LLC:
3844         case I915_CACHE_L3_LLC:
3845                 args->caching = I915_CACHING_CACHED;
3846                 break;
3847
3848         case I915_CACHE_WT:
3849                 args->caching = I915_CACHING_DISPLAY;
3850                 break;
3851
3852         default:
3853                 args->caching = I915_CACHING_NONE;
3854                 break;
3855         }
3856
3857         drm_gem_object_unreference(&obj->base);
3858 unlock:
3859         mutex_unlock(&dev->struct_mutex);
3860         return ret;
3861 }
3862
3863 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3864                                struct drm_file *file)
3865 {
3866         struct drm_i915_gem_caching *args = data;
3867         struct drm_i915_gem_object *obj;
3868         enum i915_cache_level level;
3869         int ret;
3870
3871         switch (args->caching) {
3872         case I915_CACHING_NONE:
3873                 level = I915_CACHE_NONE;
3874                 break;
3875         case I915_CACHING_CACHED:
3876                 level = I915_CACHE_LLC;
3877                 break;
3878         case I915_CACHING_DISPLAY:
3879                 level = HAS_WT(dev) ? I915_CACHE_WT : I915_CACHE_NONE;
3880                 break;
3881         default:
3882                 return -EINVAL;
3883         }
3884
3885         ret = i915_mutex_lock_interruptible(dev);
3886         if (ret)
3887                 return ret;
3888
3889         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
3890         if (&obj->base == NULL) {
3891                 ret = -ENOENT;
3892                 goto unlock;
3893         }
3894
3895         ret = i915_gem_object_set_cache_level(obj, level);
3896
3897         drm_gem_object_unreference(&obj->base);
3898 unlock:
3899         mutex_unlock(&dev->struct_mutex);
3900         return ret;
3901 }
3902
3903 static bool is_pin_display(struct drm_i915_gem_object *obj)
3904 {
3905         struct i915_vma *vma;
3906
3907         vma = i915_gem_obj_to_ggtt(obj);
3908         if (!vma)
3909                 return false;
3910
3911         /* There are 3 sources that pin objects:
3912          *   1. The display engine (scanouts, sprites, cursors);
3913          *   2. Reservations for execbuffer;
3914          *   3. The user.
3915          *
3916          * We can ignore reservations as we hold the struct_mutex and
3917          * are only called outside of the reservation path.  The user
3918          * can only increment pin_count once, and so if after
3919          * subtracting the potential reference by the user, any pin_count
3920          * remains, it must be due to another use by the display engine.
3921          */
3922         return vma->pin_count - !!obj->user_pin_count;
3923 }
3924
3925 /*
3926  * Prepare buffer for display plane (scanout, cursors, etc).
3927  * Can be called from an uninterruptible phase (modesetting) and allows
3928  * any flushes to be pipelined (for pageflips).
3929  */
3930 int
3931 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3932                                      u32 alignment,
3933                                      struct intel_engine_cs *pipelined)
3934 {
3935         u32 old_read_domains, old_write_domain;
3936         bool was_pin_display;
3937         int ret;
3938
3939         if (pipelined != obj->ring) {
3940                 ret = i915_gem_object_sync(obj, pipelined);
3941                 if (ret)
3942                         return ret;
3943         }
3944
3945         /* Mark the pin_display early so that we account for the
3946          * display coherency whilst setting up the cache domains.
3947          */
3948         was_pin_display = obj->pin_display;
3949         obj->pin_display = true;
3950
3951         /* The display engine is not coherent with the LLC cache on gen6.  As
3952          * a result, we make sure that the pinning that is about to occur is
3953          * done with uncached PTEs. This is lowest common denominator for all
3954          * chipsets.
3955          *
3956          * However for gen6+, we could do better by using the GFDT bit instead
3957          * of uncaching, which would allow us to flush all the LLC-cached data
3958          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
3959          */
3960         ret = i915_gem_object_set_cache_level(obj,
3961                                               HAS_WT(obj->base.dev) ? I915_CACHE_WT : I915_CACHE_NONE);
3962         if (ret)
3963                 goto err_unpin_display;
3964
3965         /* As the user may map the buffer once pinned in the display plane
3966          * (e.g. libkms for the bootup splash), we have to ensure that we
3967          * always use map_and_fenceable for all scanout buffers.
3968          */
3969         ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
3970         if (ret)
3971                 goto err_unpin_display;
3972
3973         i915_gem_object_flush_cpu_write_domain(obj, true);
3974
3975         old_write_domain = obj->base.write_domain;
3976         old_read_domains = obj->base.read_domains;
3977
3978         /* It should now be out of any other write domains, and we can update
3979          * the domain values for our changes.
3980          */
3981         obj->base.write_domain = 0;
3982         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3983
3984         trace_i915_gem_object_change_domain(obj,
3985                                             old_read_domains,
3986                                             old_write_domain);
3987
3988         return 0;
3989
3990 err_unpin_display:
3991         WARN_ON(was_pin_display != is_pin_display(obj));
3992         obj->pin_display = was_pin_display;
3993         return ret;
3994 }
3995
3996 void
3997 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
3998 {
3999         i915_gem_object_ggtt_unpin(obj);
4000         obj->pin_display = is_pin_display(obj);
4001 }
4002
4003 int
4004 i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj)
4005 {
4006         int ret;
4007
4008         if ((obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0)
4009                 return 0;
4010
4011         ret = i915_gem_object_wait_rendering(obj, false);
4012         if (ret)
4013                 return ret;
4014
4015         /* Ensure that we invalidate the GPU's caches and TLBs. */
4016         obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
4017         return 0;
4018 }
4019
4020 /**
4021  * Moves a single object to the CPU read, and possibly write domain.
4022  *
4023  * This function returns when the move is complete, including waiting on
4024  * flushes to occur.
4025  */
4026 int
4027 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4028 {
4029         uint32_t old_write_domain, old_read_domains;
4030         int ret;
4031
4032         if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
4033                 return 0;
4034
4035         ret = i915_gem_object_wait_rendering(obj, !write);
4036         if (ret)
4037                 return ret;
4038
4039         i915_gem_object_retire(obj);
4040         i915_gem_object_flush_gtt_write_domain(obj);
4041
4042         old_write_domain = obj->base.write_domain;
4043         old_read_domains = obj->base.read_domains;
4044
4045         /* Flush the CPU cache if it's still invalid. */
4046         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4047                 i915_gem_clflush_object(obj, false);
4048
4049                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4050         }
4051
4052         /* It should now be out of any other write domains, and we can update
4053          * the domain values for our changes.
4054          */
4055         BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
4056
4057         /* If we're writing through the CPU, then the GPU read domains will
4058          * need to be invalidated at next use.
4059          */
4060         if (write) {
4061                 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4062                 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4063         }
4064
4065         if (write)
4066                 intel_fb_obj_invalidate(obj, NULL);
4067
4068         trace_i915_gem_object_change_domain(obj,
4069                                             old_read_domains,
4070                                             old_write_domain);
4071
4072         return 0;
4073 }
4074
4075 /* Throttle our rendering by waiting until the ring has completed our requests
4076  * emitted over 20 msec ago.
4077  *
4078  * Note that if we were to use the current jiffies each time around the loop,
4079  * we wouldn't escape the function with any frames outstanding if the time to
4080  * render a frame was over 20ms.
4081  *
4082  * This should get us reasonable parallelism between CPU and GPU but also
4083  * relatively low latency when blocking on a particular request to finish.
4084  */
4085 static int
4086 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4087 {
4088         struct drm_i915_private *dev_priv = dev->dev_private;
4089         struct drm_i915_file_private *file_priv = file->driver_priv;
4090         unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
4091         struct drm_i915_gem_request *request;
4092         struct intel_engine_cs *ring = NULL;
4093         unsigned reset_counter;
4094         u32 seqno = 0;
4095         int ret;
4096
4097         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
4098         if (ret)
4099                 return ret;
4100
4101         ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
4102         if (ret)
4103                 return ret;
4104
4105         spin_lock(&file_priv->mm.lock);
4106         list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
4107                 if (time_after_eq(request->emitted_jiffies, recent_enough))
4108                         break;
4109
4110                 ring = request->ring;
4111                 seqno = request->seqno;
4112         }
4113         reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
4114         spin_unlock(&file_priv->mm.lock);
4115
4116         if (seqno == 0)
4117                 return 0;
4118
4119         ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
4120         if (ret == 0)
4121                 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4122
4123         return ret;
4124 }
4125
4126 static bool
4127 i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
4128 {
4129         struct drm_i915_gem_object *obj = vma->obj;
4130
4131         if (alignment &&
4132             vma->node.start & (alignment - 1))
4133                 return true;
4134
4135         if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
4136                 return true;
4137
4138         if (flags & PIN_OFFSET_BIAS &&
4139             vma->node.start < (flags & PIN_OFFSET_MASK))
4140                 return true;
4141
4142         return false;
4143 }
4144
4145 int
4146 i915_gem_object_pin(struct drm_i915_gem_object *obj,
4147                     struct i915_address_space *vm,
4148                     uint32_t alignment,
4149                     uint64_t flags)
4150 {
4151         struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4152         struct i915_vma *vma;
4153         unsigned bound;
4154         int ret;
4155
4156         if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
4157                 return -ENODEV;
4158
4159         if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4160                 return -EINVAL;
4161
4162         if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4163                 return -EINVAL;
4164
4165         vma = i915_gem_obj_to_vma(obj, vm);
4166         if (vma) {
4167                 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
4168                         return -EBUSY;
4169
4170                 if (i915_vma_misplaced(vma, alignment, flags)) {
4171                         WARN(vma->pin_count,
4172                              "bo is already pinned with incorrect alignment:"
4173                              " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
4174                              " obj->map_and_fenceable=%d\n",
4175                              i915_gem_obj_offset(obj, vm), alignment,
4176                              !!(flags & PIN_MAPPABLE),
4177                              obj->map_and_fenceable);
4178                         ret = i915_vma_unbind(vma);
4179                         if (ret)
4180                                 return ret;
4181
4182                         vma = NULL;
4183                 }
4184         }
4185
4186         bound = vma ? vma->bound : 0;
4187         if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4188                 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
4189                 if (IS_ERR(vma))
4190                         return PTR_ERR(vma);
4191         }
4192
4193         if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
4194                 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
4195
4196         if ((bound ^ vma->bound) & GLOBAL_BIND) {
4197                 bool mappable, fenceable;
4198                 u32 fence_size, fence_alignment;
4199
4200                 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4201                                                    obj->base.size,
4202                                                    obj->tiling_mode);
4203                 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4204                                                              obj->base.size,
4205                                                              obj->tiling_mode,
4206                                                              true);
4207
4208                 fenceable = (vma->node.size == fence_size &&
4209                              (vma->node.start & (fence_alignment - 1)) == 0);
4210
4211                 mappable = (vma->node.start + obj->base.size <=
4212                             dev_priv->gtt.mappable_end);
4213
4214                 obj->map_and_fenceable = mappable && fenceable;
4215         }
4216
4217         WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4218
4219         vma->pin_count++;
4220         if (flags & PIN_MAPPABLE)
4221                 obj->pin_mappable |= true;
4222
4223         return 0;
4224 }
4225
4226 void
4227 i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
4228 {
4229         struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
4230
4231         BUG_ON(!vma);
4232         BUG_ON(vma->pin_count == 0);
4233         BUG_ON(!i915_gem_obj_ggtt_bound(obj));
4234
4235         if (--vma->pin_count == 0)
4236                 obj->pin_mappable = false;
4237 }
4238
4239 bool
4240 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
4241 {
4242         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4243                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4244                 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
4245
4246                 WARN_ON(!ggtt_vma ||
4247                         dev_priv->fence_regs[obj->fence_reg].pin_count >
4248                         ggtt_vma->pin_count);
4249                 dev_priv->fence_regs[obj->fence_reg].pin_count++;
4250                 return true;
4251         } else
4252                 return false;
4253 }
4254
4255 void
4256 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
4257 {
4258         if (obj->fence_reg != I915_FENCE_REG_NONE) {
4259                 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4260                 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
4261                 dev_priv->fence_regs[obj->fence_reg].pin_count--;
4262         }
4263 }
4264
4265 int
4266 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4267                    struct drm_file *file)
4268 {
4269         struct drm_i915_gem_pin *args = data;
4270         struct drm_i915_gem_object *obj;
4271         int ret;
4272
4273         if (INTEL_INFO(dev)->gen >= 6)
4274                 return -ENODEV;
4275
4276         ret = i915_mutex_lock_interruptible(dev);
4277         if (ret)
4278                 return ret;
4279
4280         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4281         if (&obj->base == NULL) {
4282                 ret = -ENOENT;
4283                 goto unlock;
4284         }
4285
4286         if (obj->madv != I915_MADV_WILLNEED) {
4287                 DRM_DEBUG("Attempting to pin a purgeable buffer\n");
4288                 ret = -EFAULT;
4289                 goto out;
4290         }
4291
4292         if (obj->pin_filp != NULL && obj->pin_filp != file) {
4293                 DRM_DEBUG("Already pinned in i915_gem_pin_ioctl(): %d\n",
4294                           args->handle);
4295                 ret = -EINVAL;
4296                 goto out;
4297         }
4298
4299         if (obj->user_pin_count == ULONG_MAX) {
4300                 ret = -EBUSY;
4301                 goto out;
4302         }
4303
4304         if (obj->user_pin_count == 0) {
4305                 ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
4306                 if (ret)
4307                         goto out;
4308         }
4309
4310         obj->user_pin_count++;
4311         obj->pin_filp = file;
4312
4313         args->offset = i915_gem_obj_ggtt_offset(obj);
4314 out:
4315         drm_gem_object_unreference(&obj->base);
4316 unlock:
4317         mutex_unlock(&dev->struct_mutex);
4318         return ret;
4319 }
4320
4321 int
4322 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4323                      struct drm_file *file)
4324 {
4325         struct drm_i915_gem_pin *args = data;
4326         struct drm_i915_gem_object *obj;
4327         int ret;
4328
4329         ret = i915_mutex_lock_interruptible(dev);
4330         if (ret)
4331                 return ret;
4332
4333         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4334         if (&obj->base == NULL) {
4335                 ret = -ENOENT;
4336                 goto unlock;
4337         }
4338
4339         if (obj->pin_filp != file) {
4340                 DRM_DEBUG("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4341                           args->handle);
4342                 ret = -EINVAL;
4343                 goto out;
4344         }
4345         obj->user_pin_count--;
4346         if (obj->user_pin_count == 0) {
4347                 obj->pin_filp = NULL;
4348                 i915_gem_object_ggtt_unpin(obj);
4349         }
4350
4351 out:
4352         drm_gem_object_unreference(&obj->base);
4353 unlock:
4354         mutex_unlock(&dev->struct_mutex);
4355         return ret;
4356 }
4357
4358 int
4359 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4360                     struct drm_file *file)
4361 {
4362         struct drm_i915_gem_busy *args = data;
4363         struct drm_i915_gem_object *obj;
4364         int ret;
4365
4366         ret = i915_mutex_lock_interruptible(dev);
4367         if (ret)
4368                 return ret;
4369
4370         obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4371         if (&obj->base == NULL) {
4372                 ret = -ENOENT;
4373                 goto unlock;
4374         }
4375
4376         /* Count all active objects as busy, even if they are currently not used
4377          * by the gpu. Users of this interface expect objects to eventually
4378          * become non-busy without any further actions, therefore emit any
4379          * necessary flushes here.
4380          */
4381         ret = i915_gem_object_flush_active(obj);
4382
4383         args->busy = obj->active;
4384         if (obj->ring) {
4385                 BUILD_BUG_ON(I915_NUM_RINGS > 16);
4386                 args->busy |= intel_ring_flag(obj->ring) << 16;
4387         }
4388
4389         drm_gem_object_unreference(&obj->base);
4390 unlock:
4391         mutex_unlock(&dev->struct_mutex);
4392         return ret;
4393 }
4394
4395 int
4396 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4397                         struct drm_file *file_priv)
4398 {
4399         return i915_gem_ring_throttle(dev, file_priv);
4400 }
4401
4402 int
4403 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4404                        struct drm_file *file_priv)
4405 {
4406         struct drm_i915_gem_madvise *args = data;
4407         struct drm_i915_gem_object *obj;
4408         int ret;
4409
4410         switch (args->madv) {
4411         case I915_MADV_DONTNEED:
4412         case I915_MADV_WILLNEED:
4413             break;
4414         default:
4415             return -EINVAL;
4416         }
4417
4418         ret = i915_mutex_lock_interruptible(dev);
4419         if (ret)
4420                 return ret;
4421
4422         obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4423         if (&obj->base == NULL) {
4424                 ret = -ENOENT;
4425                 goto unlock;
4426         }
4427
4428         if (i915_gem_obj_is_pinned(obj)) {
4429                 ret = -EINVAL;
4430                 goto out;
4431         }
4432
4433         if (obj->madv != __I915_MADV_PURGED)
4434                 obj->madv = args->madv;
4435
4436         /* if the object is no longer attached, discard its backing storage */
4437         if (i915_gem_object_is_purgeable(obj) && obj->pages == NULL)
4438                 i915_gem_object_truncate(obj);
4439
4440         args->retained = obj->madv != __I915_MADV_PURGED;
4441
4442 out:
4443         drm_gem_object_unreference(&obj->base);
4444 unlock:
4445         mutex_unlock(&dev->struct_mutex);
4446         return ret;
4447 }
4448
4449 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4450                           const struct drm_i915_gem_object_ops *ops)
4451 {
4452         INIT_LIST_HEAD(&obj->global_list);
4453         INIT_LIST_HEAD(&obj->ring_list);
4454         INIT_LIST_HEAD(&obj->obj_exec_link);
4455         INIT_LIST_HEAD(&obj->vma_list);
4456
4457         obj->ops = ops;
4458
4459         obj->fence_reg = I915_FENCE_REG_NONE;
4460         obj->madv = I915_MADV_WILLNEED;
4461
4462         i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
4463 }
4464
4465 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4466         .get_pages = i915_gem_object_get_pages_gtt,
4467         .put_pages = i915_gem_object_put_pages_gtt,
4468 };
4469
4470 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4471                                                   size_t size)
4472 {
4473         struct drm_i915_gem_object *obj;
4474         struct address_space *mapping;
4475         gfp_t mask;
4476
4477         obj = i915_gem_object_alloc(dev);
4478         if (obj == NULL)
4479                 return NULL;
4480
4481         if (drm_gem_object_init(dev, &obj->base, size) != 0) {
4482                 i915_gem_object_free(obj);
4483                 return NULL;
4484         }
4485
4486         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4487         if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
4488                 /* 965gm cannot relocate objects above 4GiB. */
4489                 mask &= ~__GFP_HIGHMEM;
4490                 mask |= __GFP_DMA32;
4491         }
4492
4493         mapping = file_inode(obj->base.filp)->i_mapping;
4494         mapping_set_gfp_mask(mapping, mask);
4495
4496         i915_gem_object_init(obj, &i915_gem_object_ops);
4497
4498         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4499         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4500
4501         if (HAS_LLC(dev)) {
4502                 /* On some devices, we can have the GPU use the LLC (the CPU
4503                  * cache) for about a 10% performance improvement
4504                  * compared to uncached.  Graphics requests other than
4505                  * display scanout are coherent with the CPU in
4506                  * accessing this cache.  This means in this mode we
4507                  * don't need to clflush on the CPU side, and on the
4508                  * GPU side we only need to flush internal caches to
4509                  * get data visible to the CPU.
4510                  *
4511                  * However, we maintain the display planes as UC, and so
4512                  * need to rebind when first used as such.
4513                  */
4514                 obj->cache_level = I915_CACHE_LLC;
4515         } else
4516                 obj->cache_level = I915_CACHE_NONE;
4517
4518         trace_i915_gem_object_create(obj);
4519
4520         return obj;
4521 }
4522
4523 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4524 {
4525         /* If we are the last user of the backing storage (be it shmemfs
4526          * pages or stolen etc), we know that the pages are going to be
4527          * immediately released. In this case, we can then skip copying
4528          * back the contents from the GPU.
4529          */
4530
4531         if (obj->madv != I915_MADV_WILLNEED)
4532                 return false;
4533
4534         if (obj->base.filp == NULL)
4535                 return true;
4536
4537         /* At first glance, this looks racy, but then again so would be
4538          * userspace racing mmap against close. However, the first external
4539          * reference to the filp can only be obtained through the
4540          * i915_gem_mmap_ioctl() which safeguards us against the user
4541          * acquiring such a reference whilst we are in the middle of
4542          * freeing the object.
4543          */
4544         return atomic_long_read(&obj->base.filp->f_count) == 1;
4545 }
4546
4547 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4548 {
4549         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4550         struct drm_device *dev = obj->base.dev;
4551         struct drm_i915_private *dev_priv = dev->dev_private;
4552         struct i915_vma *vma, *next;
4553
4554         intel_runtime_pm_get(dev_priv);
4555
4556         trace_i915_gem_object_destroy(obj);
4557
4558         list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
4559                 int ret;
4560
4561                 vma->pin_count = 0;
4562                 ret = i915_vma_unbind(vma);
4563                 if (WARN_ON(ret == -ERESTARTSYS)) {
4564                         bool was_interruptible;
4565
4566                         was_interruptible = dev_priv->mm.interruptible;
4567                         dev_priv->mm.interruptible = false;
4568
4569                         WARN_ON(i915_vma_unbind(vma));
4570
4571                         dev_priv->mm.interruptible = was_interruptible;
4572                 }
4573         }
4574
4575         /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4576          * before progressing. */
4577         if (obj->stolen)
4578                 i915_gem_object_unpin_pages(obj);
4579
4580         WARN_ON(obj->frontbuffer_bits);
4581
4582         if (WARN_ON(obj->pages_pin_count))
4583                 obj->pages_pin_count = 0;
4584         if (discard_backing_storage(obj))
4585                 obj->madv = I915_MADV_DONTNEED;
4586         i915_gem_object_put_pages(obj);
4587         i915_gem_object_free_mmap_offset(obj);
4588
4589         BUG_ON(obj->pages);
4590
4591         if (obj->base.import_attach)
4592                 drm_prime_gem_destroy(&obj->base, NULL);
4593
4594         if (obj->ops->release)
4595                 obj->ops->release(obj);
4596
4597         drm_gem_object_release(&obj->base);
4598         i915_gem_info_remove_obj(dev_priv, obj->base.size);
4599
4600         kfree(obj->bit_17);
4601         i915_gem_object_free(obj);
4602
4603         intel_runtime_pm_put(dev_priv);
4604 }
4605
4606 struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4607                                      struct i915_address_space *vm)
4608 {
4609         struct i915_vma *vma;
4610         list_for_each_entry(vma, &obj->vma_list, vma_link)
4611                 if (vma->vm == vm)
4612                         return vma;
4613
4614         return NULL;
4615 }
4616
4617 void i915_gem_vma_destroy(struct i915_vma *vma)
4618 {
4619         struct i915_address_space *vm = NULL;
4620         WARN_ON(vma->node.allocated);
4621
4622         /* Keep the vma as a placeholder in the execbuffer reservation lists */
4623         if (!list_empty(&vma->exec_list))
4624                 return;
4625
4626         vm = vma->vm;
4627
4628         if (!i915_is_ggtt(vm))
4629                 i915_ppgtt_put(i915_vm_to_ppgtt(vm));
4630
4631         list_del(&vma->vma_link);
4632
4633         kfree(vma);
4634 }
4635
4636 static void
4637 i915_gem_stop_ringbuffers(struct drm_device *dev)
4638 {
4639         struct drm_i915_private *dev_priv = dev->dev_private;
4640         struct intel_engine_cs *ring;
4641         int i;
4642
4643         for_each_ring(ring, dev_priv, i)
4644                 dev_priv->gt.stop_ring(ring);
4645 }
4646
4647 int
4648 i915_gem_suspend(struct drm_device *dev)
4649 {
4650         struct drm_i915_private *dev_priv = dev->dev_private;
4651         int ret = 0;
4652
4653         mutex_lock(&dev->struct_mutex);
4654         if (dev_priv->ums.mm_suspended)
4655                 goto err;
4656
4657         ret = i915_gpu_idle(dev);
4658         if (ret)
4659                 goto err;
4660
4661         i915_gem_retire_requests(dev);
4662
4663         /* Under UMS, be paranoid and evict. */
4664         if (!drm_core_check_feature(dev, DRIVER_MODESET))
4665                 i915_gem_evict_everything(dev);
4666
4667         i915_gem_stop_ringbuffers(dev);
4668
4669         /* Hack!  Don't let anybody do execbuf while we don't control the chip.
4670          * We need to replace this with a semaphore, or something.
4671          * And not confound ums.mm_suspended!
4672          */
4673         dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4674                                                              DRIVER_MODESET);
4675         mutex_unlock(&dev->struct_mutex);
4676
4677         del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
4678         cancel_delayed_work_sync(&dev_priv->mm.retire_work);
4679         flush_delayed_work(&dev_priv->mm.idle_work);
4680
4681         return 0;
4682
4683 err:
4684         mutex_unlock(&dev->struct_mutex);
4685         return ret;
4686 }
4687
4688 int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice)
4689 {
4690         struct drm_device *dev = ring->dev;
4691         struct drm_i915_private *dev_priv = dev->dev_private;
4692         u32 reg_base = GEN7_L3LOG_BASE + (slice * 0x200);
4693         u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4694         int i, ret;
4695
4696         if (!HAS_L3_DPF(dev) || !remap_info)
4697                 return 0;
4698
4699         ret = intel_ring_begin(ring, GEN7_L3LOG_SIZE / 4 * 3);
4700         if (ret)
4701                 return ret;
4702
4703         /*
4704          * Note: We do not worry about the concurrent register cacheline hang
4705          * here because no other code should access these registers other than
4706          * at initialization time.
4707          */
4708         for (i = 0; i < GEN7_L3LOG_SIZE; i += 4) {
4709                 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
4710                 intel_ring_emit(ring, reg_base + i);
4711                 intel_ring_emit(ring, remap_info[i/4]);
4712         }
4713
4714         intel_ring_advance(ring);
4715
4716         return ret;
4717 }
4718
4719 void i915_gem_init_swizzling(struct drm_device *dev)
4720 {
4721         struct drm_i915_private *dev_priv = dev->dev_private;
4722
4723         if (INTEL_INFO(dev)->gen < 5 ||
4724             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4725                 return;
4726
4727         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4728                                  DISP_TILE_SURFACE_SWIZZLING);
4729
4730         if (IS_GEN5(dev))
4731                 return;
4732
4733         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4734         if (IS_GEN6(dev))
4735                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4736         else if (IS_GEN7(dev))
4737                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4738         else if (IS_GEN8(dev))
4739                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4740         else
4741                 BUG();
4742 }
4743
4744 static bool
4745 intel_enable_blt(struct drm_device *dev)
4746 {
4747         if (!HAS_BLT(dev))
4748                 return false;
4749
4750         /* The blitter was dysfunctional on early prototypes */
4751         if (IS_GEN6(dev) && dev->pdev->revision < 8) {
4752                 DRM_INFO("BLT not supported on this pre-production hardware;"
4753                          " graphics performance will be degraded.\n");
4754                 return false;
4755         }
4756
4757         return true;
4758 }
4759
4760 static void init_unused_ring(struct drm_device *dev, u32 base)
4761 {
4762         struct drm_i915_private *dev_priv = dev->dev_private;
4763
4764         I915_WRITE(RING_CTL(base), 0);
4765         I915_WRITE(RING_HEAD(base), 0);
4766         I915_WRITE(RING_TAIL(base), 0);
4767         I915_WRITE(RING_START(base), 0);
4768 }
4769
4770 static void init_unused_rings(struct drm_device *dev)
4771 {
4772         if (IS_I830(dev)) {
4773                 init_unused_ring(dev, PRB1_BASE);
4774                 init_unused_ring(dev, SRB0_BASE);
4775                 init_unused_ring(dev, SRB1_BASE);
4776                 init_unused_ring(dev, SRB2_BASE);
4777                 init_unused_ring(dev, SRB3_BASE);
4778         } else if (IS_GEN2(dev)) {
4779                 init_unused_ring(dev, SRB0_BASE);
4780                 init_unused_ring(dev, SRB1_BASE);
4781         } else if (IS_GEN3(dev)) {
4782                 init_unused_ring(dev, PRB1_BASE);
4783                 init_unused_ring(dev, PRB2_BASE);
4784         }
4785 }
4786
4787 int i915_gem_init_rings(struct drm_device *dev)
4788 {
4789         struct drm_i915_private *dev_priv = dev->dev_private;
4790         int ret;
4791
4792         /*
4793          * At least 830 can leave some of the unused rings
4794          * "active" (ie. head != tail) after resume which
4795          * will prevent c3 entry. Makes sure all unused rings
4796          * are totally idle.
4797          */
4798         init_unused_rings(dev);
4799
4800         ret = intel_init_render_ring_buffer(dev);
4801         if (ret)
4802                 return ret;
4803
4804         if (HAS_BSD(dev)) {
4805                 ret = intel_init_bsd_ring_buffer(dev);
4806                 if (ret)
4807                         goto cleanup_render_ring;
4808         }
4809
4810         if (intel_enable_blt(dev)) {
4811                 ret = intel_init_blt_ring_buffer(dev);
4812                 if (ret)
4813                         goto cleanup_bsd_ring;
4814         }
4815
4816         if (HAS_VEBOX(dev)) {
4817                 ret = intel_init_vebox_ring_buffer(dev);
4818                 if (ret)
4819                         goto cleanup_blt_ring;
4820         }
4821
4822         if (HAS_BSD2(dev)) {
4823                 ret = intel_init_bsd2_ring_buffer(dev);
4824                 if (ret)
4825                         goto cleanup_vebox_ring;
4826         }
4827
4828         ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
4829         if (ret)
4830                 goto cleanup_bsd2_ring;
4831
4832         return 0;
4833
4834 cleanup_bsd2_ring:
4835         intel_cleanup_ring_buffer(&dev_priv->ring[VCS2]);
4836 cleanup_vebox_ring:
4837         intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
4838 cleanup_blt_ring:
4839         intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
4840 cleanup_bsd_ring:
4841         intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4842 cleanup_render_ring:
4843         intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4844
4845         return ret;
4846 }
4847
4848 int
4849 i915_gem_init_hw(struct drm_device *dev)
4850 {
4851         struct drm_i915_private *dev_priv = dev->dev_private;
4852         int ret, i;
4853
4854         if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
4855                 return -EIO;
4856
4857         if (dev_priv->ellc_size)
4858                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4859
4860         if (IS_HASWELL(dev))
4861                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev) ?
4862                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4863
4864         if (HAS_PCH_NOP(dev)) {
4865                 if (IS_IVYBRIDGE(dev)) {
4866                         u32 temp = I915_READ(GEN7_MSG_CTL);
4867                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
4868                         I915_WRITE(GEN7_MSG_CTL, temp);
4869                 } else if (INTEL_INFO(dev)->gen >= 7) {
4870                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
4871                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
4872                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
4873                 }
4874         }
4875
4876         i915_gem_init_swizzling(dev);
4877
4878         ret = dev_priv->gt.init_rings(dev);
4879         if (ret)
4880                 return ret;
4881
4882         for (i = 0; i < NUM_L3_SLICES(dev); i++)
4883                 i915_gem_l3_remap(&dev_priv->ring[RCS], i);
4884
4885         /*
4886          * XXX: Contexts should only be initialized once. Doing a switch to the
4887          * default context switch however is something we'd like to do after
4888          * reset or thaw (the latter may not actually be necessary for HW, but
4889          * goes with our code better). Context switching requires rings (for
4890          * the do_switch), but before enabling PPGTT. So don't move this.
4891          */
4892         ret = i915_gem_context_enable(dev_priv);
4893         if (ret && ret != -EIO) {
4894                 DRM_ERROR("Context enable failed %d\n", ret);
4895                 i915_gem_cleanup_ringbuffer(dev);
4896
4897                 return ret;
4898         }
4899
4900         ret = i915_ppgtt_init_hw(dev);
4901         if (ret && ret != -EIO) {
4902                 DRM_ERROR("PPGTT enable failed %d\n", ret);
4903                 i915_gem_cleanup_ringbuffer(dev);
4904         }
4905
4906         return ret;
4907 }
4908
4909 int i915_gem_init(struct drm_device *dev)
4910 {
4911         struct drm_i915_private *dev_priv = dev->dev_private;
4912         int ret;
4913
4914         i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4915                         i915.enable_execlists);
4916
4917         mutex_lock(&dev->struct_mutex);
4918
4919         if (IS_VALLEYVIEW(dev)) {
4920                 /* VLVA0 (potential hack), BIOS isn't actually waking us */
4921                 I915_WRITE(VLV_GTLC_WAKE_CTRL, VLV_GTLC_ALLOWWAKEREQ);
4922                 if (wait_for((I915_READ(VLV_GTLC_PW_STATUS) &
4923                               VLV_GTLC_ALLOWWAKEACK), 10))
4924                         DRM_DEBUG_DRIVER("allow wake ack timed out\n");
4925         }
4926
4927         if (!i915.enable_execlists) {
4928                 dev_priv->gt.do_execbuf = i915_gem_ringbuffer_submission;
4929                 dev_priv->gt.init_rings = i915_gem_init_rings;
4930                 dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
4931                 dev_priv->gt.stop_ring = intel_stop_ring_buffer;
4932         } else {
4933                 dev_priv->gt.do_execbuf = intel_execlists_submission;
4934                 dev_priv->gt.init_rings = intel_logical_rings_init;
4935                 dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
4936                 dev_priv->gt.stop_ring = intel_logical_ring_stop;
4937         }
4938
4939         ret = i915_gem_init_userptr(dev);
4940         if (ret) {
4941                 mutex_unlock(&dev->struct_mutex);
4942                 return ret;
4943         }
4944
4945         i915_gem_init_global_gtt(dev);
4946
4947         ret = i915_gem_context_init(dev);
4948         if (ret) {
4949                 mutex_unlock(&dev->struct_mutex);
4950                 return ret;
4951         }
4952
4953         ret = i915_gem_init_hw(dev);
4954         if (ret == -EIO) {
4955                 /* Allow ring initialisation to fail by marking the GPU as
4956                  * wedged. But we only want to do this where the GPU is angry,
4957                  * for all other failure, such as an allocation failure, bail.
4958                  */
4959                 DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4960                 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
4961                 ret = 0;
4962         }
4963         mutex_unlock(&dev->struct_mutex);
4964
4965         return ret;
4966 }
4967
4968 void
4969 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4970 {
4971         struct drm_i915_private *dev_priv = dev->dev_private;
4972         struct intel_engine_cs *ring;
4973         int i;
4974
4975         for_each_ring(ring, dev_priv, i)
4976                 dev_priv->gt.cleanup_ring(ring);
4977 }
4978
4979 void
4980 i915_gem_lastclose(struct drm_device *dev)
4981 {
4982         int ret;
4983
4984         if (drm_core_check_feature(dev, DRIVER_MODESET))
4985                 return;
4986
4987         ret = i915_gem_suspend(dev);
4988         if (ret)
4989                 DRM_ERROR("failed to idle hardware: %d\n", ret);
4990 }
4991
4992 static void
4993 init_ring_lists(struct intel_engine_cs *ring)
4994 {
4995         INIT_LIST_HEAD(&ring->active_list);
4996         INIT_LIST_HEAD(&ring->request_list);
4997 }
4998
4999 void i915_init_vm(struct drm_i915_private *dev_priv,
5000                   struct i915_address_space *vm)
5001 {
5002         if (!i915_is_ggtt(vm))
5003                 drm_mm_init(&vm->mm, vm->start, vm->total);
5004         vm->dev = dev_priv->dev;
5005         INIT_LIST_HEAD(&vm->active_list);
5006         INIT_LIST_HEAD(&vm->inactive_list);
5007         INIT_LIST_HEAD(&vm->global_link);
5008         list_add_tail(&vm->global_link, &dev_priv->vm_list);
5009 }
5010
5011 void
5012 i915_gem_load(struct drm_device *dev)
5013 {
5014         struct drm_i915_private *dev_priv = dev->dev_private;
5015         int i;
5016
5017         dev_priv->slab =
5018                 kmem_cache_create("i915_gem_object",
5019                                   sizeof(struct drm_i915_gem_object), 0,
5020                                   SLAB_HWCACHE_ALIGN,
5021                                   NULL);
5022
5023         INIT_LIST_HEAD(&dev_priv->vm_list);
5024         i915_init_vm(dev_priv, &dev_priv->gtt.base);
5025
5026         INIT_LIST_HEAD(&dev_priv->context_list);
5027         INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
5028         INIT_LIST_HEAD(&dev_priv->mm.bound_list);
5029         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5030         for (i = 0; i < I915_NUM_RINGS; i++)
5031                 init_ring_lists(&dev_priv->ring[i]);
5032         for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5033                 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5034         INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
5035                           i915_gem_retire_work_handler);
5036         INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
5037                           i915_gem_idle_work_handler);
5038         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5039
5040         /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
5041         if (!drm_core_check_feature(dev, DRIVER_MODESET) && IS_GEN3(dev)) {
5042                 I915_WRITE(MI_ARB_STATE,
5043                            _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
5044         }
5045
5046         dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5047
5048         /* Old X drivers will take 0-2 for front, back, depth buffers */
5049         if (!drm_core_check_feature(dev, DRIVER_MODESET))
5050                 dev_priv->fence_reg_start = 3;
5051
5052         if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev))
5053                 dev_priv->num_fence_regs = 32;
5054         else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
5055                 dev_priv->num_fence_regs = 16;
5056         else
5057                 dev_priv->num_fence_regs = 8;
5058
5059         /* Initialize fence registers to zero */
5060         INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5061         i915_gem_restore_fences(dev);
5062
5063         i915_gem_detect_bit_6_swizzle(dev);
5064         init_waitqueue_head(&dev_priv->pending_flip_queue);
5065
5066         dev_priv->mm.interruptible = true;
5067
5068         dev_priv->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
5069         dev_priv->mm.shrinker.count_objects = i915_gem_shrinker_count;
5070         dev_priv->mm.shrinker.seeks = DEFAULT_SEEKS;
5071         register_shrinker(&dev_priv->mm.shrinker);
5072
5073         dev_priv->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
5074         register_oom_notifier(&dev_priv->mm.oom_notifier);
5075
5076         mutex_init(&dev_priv->fb_tracking.lock);
5077 }
5078
5079 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5080 {
5081         struct drm_i915_file_private *file_priv = file->driver_priv;
5082
5083         cancel_delayed_work_sync(&file_priv->mm.idle_work);
5084
5085         /* Clean up our request list when the client is going away, so that
5086          * later retire_requests won't dereference our soon-to-be-gone
5087          * file_priv.
5088          */
5089         spin_lock(&file_priv->mm.lock);
5090         while (!list_empty(&file_priv->mm.request_list)) {
5091                 struct drm_i915_gem_request *request;
5092
5093                 request = list_first_entry(&file_priv->mm.request_list,
5094                                            struct drm_i915_gem_request,
5095                                            client_list);
5096                 list_del(&request->client_list);
5097                 request->file_priv = NULL;
5098         }
5099         spin_unlock(&file_priv->mm.lock);
5100 }
5101
5102 static void
5103 i915_gem_file_idle_work_handler(struct work_struct *work)
5104 {
5105         struct drm_i915_file_private *file_priv =
5106                 container_of(work, typeof(*file_priv), mm.idle_work.work);
5107
5108         atomic_set(&file_priv->rps_wait_boost, false);
5109 }
5110
5111 int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5112 {
5113         struct drm_i915_file_private *file_priv;
5114         int ret;
5115
5116         DRM_DEBUG_DRIVER("\n");
5117
5118         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5119         if (!file_priv)
5120                 return -ENOMEM;
5121
5122         file->driver_priv = file_priv;
5123         file_priv->dev_priv = dev->dev_private;
5124         file_priv->file = file;
5125
5126         spin_lock_init(&file_priv->mm.lock);
5127         INIT_LIST_HEAD(&file_priv->mm.request_list);
5128         INIT_DELAYED_WORK(&file_priv->mm.idle_work,
5129                           i915_gem_file_idle_work_handler);
5130
5131         ret = i915_gem_context_open(dev, file);
5132         if (ret)
5133                 kfree(file_priv);
5134
5135         return ret;
5136 }
5137
5138 /**
5139  * i915_gem_track_fb - update frontbuffer tracking
5140  * old: current GEM buffer for the frontbuffer slots
5141  * new: new GEM buffer for the frontbuffer slots
5142  * frontbuffer_bits: bitmask of frontbuffer slots
5143  *
5144  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5145  * from @old and setting them in @new. Both @old and @new can be NULL.
5146  */
5147 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5148                        struct drm_i915_gem_object *new,
5149                        unsigned frontbuffer_bits)
5150 {
5151         if (old) {
5152                 WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
5153                 WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
5154                 old->frontbuffer_bits &= ~frontbuffer_bits;
5155         }
5156
5157         if (new) {
5158                 WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
5159                 WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
5160                 new->frontbuffer_bits |= frontbuffer_bits;
5161         }
5162 }
5163
5164 static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
5165 {
5166         if (!mutex_is_locked(mutex))
5167                 return false;
5168
5169 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
5170         return mutex->owner == task;
5171 #else
5172         /* Since UP may be pre-empted, we cannot assume that we own the lock */
5173         return false;
5174 #endif
5175 }
5176
5177 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
5178 {
5179         if (!mutex_trylock(&dev->struct_mutex)) {
5180                 if (!mutex_is_locked_by(&dev->struct_mutex, current))
5181                         return false;
5182
5183                 if (to_i915(dev)->mm.shrinker_no_lock_stealing)
5184                         return false;
5185
5186                 *unlock = false;
5187         } else
5188                 *unlock = true;
5189
5190         return true;
5191 }
5192
5193 static int num_vma_bound(struct drm_i915_gem_object *obj)
5194 {
5195         struct i915_vma *vma;
5196         int count = 0;
5197
5198         list_for_each_entry(vma, &obj->vma_list, vma_link)
5199                 if (drm_mm_node_allocated(&vma->node))
5200                         count++;
5201
5202         return count;
5203 }
5204
5205 static unsigned long
5206 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
5207 {
5208         struct drm_i915_private *dev_priv =
5209                 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5210         struct drm_device *dev = dev_priv->dev;
5211         struct drm_i915_gem_object *obj;
5212         unsigned long count;
5213         bool unlock;
5214
5215         if (!i915_gem_shrinker_lock(dev, &unlock))
5216                 return 0;
5217
5218         count = 0;
5219         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
5220                 if (obj->pages_pin_count == 0)
5221                         count += obj->base.size >> PAGE_SHIFT;
5222
5223         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5224                 if (!i915_gem_obj_is_pinned(obj) &&
5225                     obj->pages_pin_count == num_vma_bound(obj))
5226                         count += obj->base.size >> PAGE_SHIFT;
5227         }
5228
5229         if (unlock)
5230                 mutex_unlock(&dev->struct_mutex);
5231
5232         return count;
5233 }
5234
5235 /* All the new VM stuff */
5236 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
5237                                   struct i915_address_space *vm)
5238 {
5239         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5240         struct i915_vma *vma;
5241
5242         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5243
5244         list_for_each_entry(vma, &o->vma_list, vma_link) {
5245                 if (vma->vm == vm)
5246                         return vma->node.start;
5247
5248         }
5249         WARN(1, "%s vma for this object not found.\n",
5250              i915_is_ggtt(vm) ? "global" : "ppgtt");
5251         return -1;
5252 }
5253
5254 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5255                         struct i915_address_space *vm)
5256 {
5257         struct i915_vma *vma;
5258
5259         list_for_each_entry(vma, &o->vma_list, vma_link)
5260                 if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
5261                         return true;
5262
5263         return false;
5264 }
5265
5266 bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5267 {
5268         struct i915_vma *vma;
5269
5270         list_for_each_entry(vma, &o->vma_list, vma_link)
5271                 if (drm_mm_node_allocated(&vma->node))
5272                         return true;
5273
5274         return false;
5275 }
5276
5277 unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
5278                                 struct i915_address_space *vm)
5279 {
5280         struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5281         struct i915_vma *vma;
5282
5283         WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
5284
5285         BUG_ON(list_empty(&o->vma_list));
5286
5287         list_for_each_entry(vma, &o->vma_list, vma_link)
5288                 if (vma->vm == vm)
5289                         return vma->node.size;
5290
5291         return 0;
5292 }
5293
5294 static unsigned long
5295 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
5296 {
5297         struct drm_i915_private *dev_priv =
5298                 container_of(shrinker, struct drm_i915_private, mm.shrinker);
5299         struct drm_device *dev = dev_priv->dev;
5300         unsigned long freed;
5301         bool unlock;
5302
5303         if (!i915_gem_shrinker_lock(dev, &unlock))
5304                 return SHRINK_STOP;
5305
5306         freed = i915_gem_shrink(dev_priv,
5307                                 sc->nr_to_scan,
5308                                 I915_SHRINK_BOUND |
5309                                 I915_SHRINK_UNBOUND |
5310                                 I915_SHRINK_PURGEABLE);
5311         if (freed < sc->nr_to_scan)
5312                 freed += i915_gem_shrink(dev_priv,
5313                                          sc->nr_to_scan - freed,
5314                                          I915_SHRINK_BOUND |
5315                                          I915_SHRINK_UNBOUND);
5316         if (unlock)
5317                 mutex_unlock(&dev->struct_mutex);
5318
5319         return freed;
5320 }
5321
5322 static int
5323 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5324 {
5325         struct drm_i915_private *dev_priv =
5326                 container_of(nb, struct drm_i915_private, mm.oom_notifier);
5327         struct drm_device *dev = dev_priv->dev;
5328         struct drm_i915_gem_object *obj;
5329         unsigned long timeout = msecs_to_jiffies(5000) + 1;
5330         unsigned long pinned, bound, unbound, freed_pages;
5331         bool was_interruptible;
5332         bool unlock;
5333
5334         while (!i915_gem_shrinker_lock(dev, &unlock) && --timeout) {
5335                 schedule_timeout_killable(1);
5336                 if (fatal_signal_pending(current))
5337                         return NOTIFY_DONE;
5338         }
5339         if (timeout == 0) {
5340                 pr_err("Unable to purge GPU memory due lock contention.\n");
5341                 return NOTIFY_DONE;
5342         }
5343
5344         was_interruptible = dev_priv->mm.interruptible;
5345         dev_priv->mm.interruptible = false;
5346
5347         freed_pages = i915_gem_shrink_all(dev_priv);
5348
5349         dev_priv->mm.interruptible = was_interruptible;
5350
5351         /* Because we may be allocating inside our own driver, we cannot
5352          * assert that there are no objects with pinned pages that are not
5353          * being pointed to by hardware.
5354          */
5355         unbound = bound = pinned = 0;
5356         list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5357                 if (!obj->base.filp) /* not backed by a freeable object */
5358                         continue;
5359
5360                 if (obj->pages_pin_count)
5361                         pinned += obj->base.size;
5362                 else
5363                         unbound += obj->base.size;
5364         }
5365         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5366                 if (!obj->base.filp)
5367                         continue;
5368
5369                 if (obj->pages_pin_count)
5370                         pinned += obj->base.size;
5371                 else
5372                         bound += obj->base.size;
5373         }
5374
5375         if (unlock)
5376                 mutex_unlock(&dev->struct_mutex);
5377
5378         if (freed_pages || unbound || bound)
5379                 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
5380                         freed_pages << PAGE_SHIFT, pinned);
5381         if (unbound || bound)
5382                 pr_err("%lu and %lu bytes still available in the "
5383                        "bound and unbound GPU page lists.\n",
5384                        bound, unbound);
5385
5386         *(unsigned long *)ptr += freed_pages;
5387         return NOTIFY_DONE;
5388 }
5389
5390 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj)
5391 {
5392         struct i915_vma *vma;
5393
5394         vma = list_first_entry(&obj->vma_list, typeof(*vma), vma_link);
5395         if (vma->vm != i915_obj_to_ggtt(obj))
5396                 return NULL;
5397
5398         return vma;
5399 }