2 * Copyright © 2012-2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "i915_trace.h"
29 #include "intel_drv.h"
30 #include <linux/mmu_context.h>
31 #include <linux/mmu_notifier.h>
32 #include <linux/mempolicy.h>
33 #include <linux/swap.h>
35 struct i915_mm_struct {
37 struct drm_device *dev;
38 struct i915_mmu_notifier *mn;
39 struct hlist_node node;
41 struct work_struct work;
44 #if defined(CONFIG_MMU_NOTIFIER)
45 #include <linux/interval_tree.h>
47 struct i915_mmu_notifier {
49 struct hlist_node node;
50 struct mmu_notifier mn;
51 struct rb_root objects;
52 struct list_head linear;
57 struct i915_mmu_object {
58 struct i915_mmu_notifier *mn;
59 struct interval_tree_node it;
60 struct list_head link;
61 struct drm_i915_gem_object *obj;
65 static unsigned long cancel_userptr(struct drm_i915_gem_object *obj)
67 struct drm_device *dev = obj->base.dev;
70 mutex_lock(&dev->struct_mutex);
71 /* Cancel any active worker and force us to re-evaluate gup */
72 obj->userptr.work = NULL;
74 if (obj->pages != NULL) {
75 struct drm_i915_private *dev_priv = to_i915(dev);
76 struct i915_vma *vma, *tmp;
77 bool was_interruptible;
79 was_interruptible = dev_priv->mm.interruptible;
80 dev_priv->mm.interruptible = false;
82 list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
83 int ret = i915_vma_unbind(vma);
84 WARN_ON(ret && ret != -EIO);
86 WARN_ON(i915_gem_object_put_pages(obj));
88 dev_priv->mm.interruptible = was_interruptible;
91 end = obj->userptr.ptr + obj->base.size;
93 drm_gem_object_unreference(&obj->base);
94 mutex_unlock(&dev->struct_mutex);
99 static void *invalidate_range__linear(struct i915_mmu_notifier *mn,
100 struct mm_struct *mm,
104 struct i915_mmu_object *mo;
105 unsigned long serial;
109 list_for_each_entry(mo, &mn->linear, link) {
110 struct drm_i915_gem_object *obj;
112 if (mo->it.last < start || mo->it.start > end)
116 drm_gem_object_reference(&obj->base);
117 spin_unlock(&mn->lock);
121 spin_lock(&mn->lock);
122 if (serial != mn->serial)
129 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
130 struct mm_struct *mm,
134 struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn);
135 struct interval_tree_node *it = NULL;
136 unsigned long next = start;
137 unsigned long serial = 0;
139 end--; /* interval ranges are inclusive, but invalidate range is exclusive */
141 struct drm_i915_gem_object *obj = NULL;
143 spin_lock(&mn->lock);
145 it = invalidate_range__linear(mn, mm, start, end);
146 else if (serial == mn->serial)
147 it = interval_tree_iter_next(it, next, end);
149 it = interval_tree_iter_first(&mn->objects, start, end);
151 obj = container_of(it, struct i915_mmu_object, it)->obj;
152 drm_gem_object_reference(&obj->base);
155 spin_unlock(&mn->lock);
159 next = cancel_userptr(obj);
163 static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
164 .invalidate_range_start = i915_gem_userptr_mn_invalidate_range_start,
167 static struct i915_mmu_notifier *
168 i915_mmu_notifier_create(struct mm_struct *mm)
170 struct i915_mmu_notifier *mn;
173 mn = kmalloc(sizeof(*mn), GFP_KERNEL);
175 return ERR_PTR(-ENOMEM);
177 spin_lock_init(&mn->lock);
178 mn->mn.ops = &i915_gem_userptr_notifier;
179 mn->objects = RB_ROOT;
181 INIT_LIST_HEAD(&mn->linear);
182 mn->has_linear = false;
184 /* Protected by mmap_sem (write-lock) */
185 ret = __mmu_notifier_register(&mn->mn, mm);
194 static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn)
196 if (++mn->serial == 0)
201 i915_mmu_notifier_add(struct drm_device *dev,
202 struct i915_mmu_notifier *mn,
203 struct i915_mmu_object *mo)
205 struct interval_tree_node *it;
208 ret = i915_mutex_lock_interruptible(dev);
212 /* Make sure we drop the final active reference (and thereby
213 * remove the objects from the interval tree) before we do
214 * the check for overlapping objects.
216 i915_gem_retire_requests(dev);
218 spin_lock(&mn->lock);
219 it = interval_tree_iter_first(&mn->objects,
220 mo->it.start, mo->it.last);
222 struct drm_i915_gem_object *obj;
224 /* We only need to check the first object in the range as it
225 * either has cancelled gup work queued and we need to
226 * return back to the user to give time for the gup-workers
227 * to flush their object references upon which the object will
228 * be removed from the interval-tree, or the the range is
229 * still in use by another client and the overlap is invalid.
231 * If we do have an overlap, we cannot use the interval tree
232 * for fast range invalidation.
235 obj = container_of(it, struct i915_mmu_object, it)->obj;
236 if (!obj->userptr.workers)
237 mn->has_linear = mo->is_linear = true;
241 interval_tree_insert(&mo->it, &mn->objects);
244 list_add(&mo->link, &mn->linear);
245 __i915_mmu_notifier_update_serial(mn);
247 spin_unlock(&mn->lock);
248 mutex_unlock(&dev->struct_mutex);
253 static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
255 struct i915_mmu_object *mo;
257 list_for_each_entry(mo, &mn->linear, link)
265 i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
266 struct i915_mmu_object *mo)
268 spin_lock(&mn->lock);
271 mn->has_linear = i915_mmu_notifier_has_linear(mn);
273 interval_tree_remove(&mo->it, &mn->objects);
274 __i915_mmu_notifier_update_serial(mn);
275 spin_unlock(&mn->lock);
279 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
281 struct i915_mmu_object *mo;
283 mo = obj->userptr.mmu_object;
287 i915_mmu_notifier_del(mo->mn, mo);
290 obj->userptr.mmu_object = NULL;
293 static struct i915_mmu_notifier *
294 i915_mmu_notifier_find(struct i915_mm_struct *mm)
296 if (mm->mn == NULL) {
297 down_write(&mm->mm->mmap_sem);
298 mutex_lock(&to_i915(mm->dev)->mm_lock);
300 mm->mn = i915_mmu_notifier_create(mm->mm);
301 mutex_unlock(&to_i915(mm->dev)->mm_lock);
302 up_write(&mm->mm->mmap_sem);
308 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
311 struct i915_mmu_notifier *mn;
312 struct i915_mmu_object *mo;
315 if (flags & I915_USERPTR_UNSYNCHRONIZED)
316 return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
318 if (WARN_ON(obj->userptr.mm == NULL))
321 mn = i915_mmu_notifier_find(obj->userptr.mm);
325 mo = kzalloc(sizeof(*mo), GFP_KERNEL);
330 mo->it.start = obj->userptr.ptr;
331 mo->it.last = mo->it.start + obj->base.size - 1;
334 ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
340 obj->userptr.mmu_object = mo;
345 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
346 struct mm_struct *mm)
351 mmu_notifier_unregister(&mn->mn, mm);
358 i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
363 i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj,
366 if ((flags & I915_USERPTR_UNSYNCHRONIZED) == 0)
369 if (!capable(CAP_SYS_ADMIN))
376 i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
377 struct mm_struct *mm)
383 static struct i915_mm_struct *
384 __i915_mm_struct_find(struct drm_i915_private *dev_priv, struct mm_struct *real)
386 struct i915_mm_struct *mm;
388 /* Protected by dev_priv->mm_lock */
389 hash_for_each_possible(dev_priv->mm_structs, mm, node, (unsigned long)real)
397 i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
399 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
400 struct i915_mm_struct *mm;
403 /* During release of the GEM object we hold the struct_mutex. This
404 * precludes us from calling mmput() at that time as that may be
405 * the last reference and so call exit_mmap(). exit_mmap() will
406 * attempt to reap the vma, and if we were holding a GTT mmap
407 * would then call drm_gem_vm_close() and attempt to reacquire
408 * the struct mutex. So in order to avoid that recursion, we have
409 * to defer releasing the mm reference until after we drop the
410 * struct_mutex, i.e. we need to schedule a worker to do the clean
413 mutex_lock(&dev_priv->mm_lock);
414 mm = __i915_mm_struct_find(dev_priv, current->mm);
416 mm = kmalloc(sizeof(*mm), GFP_KERNEL);
422 kref_init(&mm->kref);
423 mm->dev = obj->base.dev;
425 mm->mm = current->mm;
426 atomic_inc(¤t->mm->mm_count);
430 /* Protected by dev_priv->mm_lock */
431 hash_add(dev_priv->mm_structs,
432 &mm->node, (unsigned long)mm->mm);
436 obj->userptr.mm = mm;
438 mutex_unlock(&dev_priv->mm_lock);
443 __i915_mm_struct_free__worker(struct work_struct *work)
445 struct i915_mm_struct *mm = container_of(work, typeof(*mm), work);
446 i915_mmu_notifier_free(mm->mn, mm->mm);
452 __i915_mm_struct_free(struct kref *kref)
454 struct i915_mm_struct *mm = container_of(kref, typeof(*mm), kref);
456 /* Protected by dev_priv->mm_lock */
458 mutex_unlock(&to_i915(mm->dev)->mm_lock);
460 INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
461 schedule_work(&mm->work);
465 i915_gem_userptr_release__mm_struct(struct drm_i915_gem_object *obj)
467 if (obj->userptr.mm == NULL)
470 kref_put_mutex(&obj->userptr.mm->kref,
471 __i915_mm_struct_free,
472 &to_i915(obj->base.dev)->mm_lock);
473 obj->userptr.mm = NULL;
476 struct get_pages_work {
477 struct work_struct work;
478 struct drm_i915_gem_object *obj;
479 struct task_struct *task;
482 #if IS_ENABLED(CONFIG_SWIOTLB)
483 #define swiotlb_active() swiotlb_nr_tbl()
485 #define swiotlb_active() 0
489 st_set_pages(struct sg_table **st, struct page **pvec, int num_pages)
491 struct scatterlist *sg;
494 *st = kmalloc(sizeof(**st), GFP_KERNEL);
498 if (swiotlb_active()) {
499 ret = sg_alloc_table(*st, num_pages, GFP_KERNEL);
503 for_each_sg((*st)->sgl, sg, num_pages, n)
504 sg_set_page(sg, pvec[n], PAGE_SIZE, 0);
506 ret = sg_alloc_table_from_pages(*st, pvec, num_pages,
507 0, num_pages << PAGE_SHIFT,
522 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
524 struct get_pages_work *work = container_of(_work, typeof(*work), work);
525 struct drm_i915_gem_object *obj = work->obj;
526 struct drm_device *dev = obj->base.dev;
527 const int num_pages = obj->base.size >> PAGE_SHIFT;
534 pvec = kmalloc(num_pages*sizeof(struct page *),
535 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
537 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
539 struct mm_struct *mm = obj->userptr.mm->mm;
541 down_read(&mm->mmap_sem);
542 while (pinned < num_pages) {
543 ret = get_user_pages(work->task, mm,
544 obj->userptr.ptr + pinned * PAGE_SIZE,
546 !obj->userptr.read_only, 0,
547 pvec + pinned, NULL);
553 up_read(&mm->mmap_sem);
556 mutex_lock(&dev->struct_mutex);
557 if (obj->userptr.work != &work->work) {
559 } else if (pinned == num_pages) {
560 ret = st_set_pages(&obj->pages, pvec, num_pages);
562 list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list);
567 obj->userptr.work = ERR_PTR(ret);
568 obj->userptr.workers--;
569 drm_gem_object_unreference(&obj->base);
570 mutex_unlock(&dev->struct_mutex);
572 release_pages(pvec, pinned, 0);
573 drm_free_large(pvec);
575 put_task_struct(work->task);
580 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
582 const int num_pages = obj->base.size >> PAGE_SHIFT;
586 /* If userspace should engineer that these pages are replaced in
587 * the vma between us binding this page into the GTT and completion
588 * of rendering... Their loss. If they change the mapping of their
589 * pages they need to create a new bo to point to the new vma.
591 * However, that still leaves open the possibility of the vma
592 * being copied upon fork. Which falls under the same userspace
593 * synchronisation issue as a regular bo, except that this time
594 * the process may not be expecting that a particular piece of
595 * memory is tied to the GPU.
597 * Fortunately, we can hook into the mmu_notifier in order to
598 * discard the page references prior to anything nasty happening
599 * to the vma (discard or cloning) which should prevent the more
600 * egregious cases from causing harm.
605 if (obj->userptr.mm->mm == current->mm) {
606 pvec = kmalloc(num_pages*sizeof(struct page *),
607 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
609 pvec = drm_malloc_ab(num_pages, sizeof(struct page *));
614 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
615 !obj->userptr.read_only, pvec);
617 if (pinned < num_pages) {
622 /* Spawn a worker so that we can acquire the
623 * user pages without holding our mutex. Access
624 * to the user pages requires mmap_sem, and we have
625 * a strict lock ordering of mmap_sem, struct_mutex -
626 * we already hold struct_mutex here and so cannot
627 * call gup without encountering a lock inversion.
629 * Userspace will keep on repeating the operation
630 * (thanks to EAGAIN) until either we hit the fast
631 * path or the worker completes. If the worker is
632 * cancelled or superseded, the task is still run
633 * but the results ignored. (This leads to
634 * complications that we may have a stray object
635 * refcount that we need to be wary of when
636 * checking for existing objects during creation.)
637 * If the worker encounters an error, it reports
638 * that error back to this function through
639 * obj->userptr.work = ERR_PTR.
642 if (obj->userptr.work == NULL &&
643 obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) {
644 struct get_pages_work *work;
646 work = kmalloc(sizeof(*work), GFP_KERNEL);
648 obj->userptr.work = &work->work;
649 obj->userptr.workers++;
652 drm_gem_object_reference(&obj->base);
654 work->task = current;
655 get_task_struct(work->task);
657 INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
658 schedule_work(&work->work);
662 if (IS_ERR(obj->userptr.work)) {
663 ret = PTR_ERR(obj->userptr.work);
664 obj->userptr.work = NULL;
669 ret = st_set_pages(&obj->pages, pvec, num_pages);
671 obj->userptr.work = NULL;
676 release_pages(pvec, pinned, 0);
677 drm_free_large(pvec);
682 i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
684 struct scatterlist *sg;
687 BUG_ON(obj->userptr.work != NULL);
689 if (obj->madv != I915_MADV_WILLNEED)
692 for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i) {
693 struct page *page = sg_page(sg);
696 set_page_dirty(page);
698 mark_page_accessed(page);
699 page_cache_release(page);
703 sg_free_table(obj->pages);
708 i915_gem_userptr_release(struct drm_i915_gem_object *obj)
710 i915_gem_userptr_release__mmu_notifier(obj);
711 i915_gem_userptr_release__mm_struct(obj);
715 i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
717 if (obj->userptr.mmu_object)
720 return i915_gem_userptr_init__mmu_notifier(obj, 0);
723 static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
724 .dmabuf_export = i915_gem_userptr_dmabuf_export,
725 .get_pages = i915_gem_userptr_get_pages,
726 .put_pages = i915_gem_userptr_put_pages,
727 .release = i915_gem_userptr_release,
731 * Creates a new mm object that wraps some normal memory from the process
732 * context - user memory.
734 * We impose several restrictions upon the memory being mapped
736 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
737 * 2. It must be normal system memory, not a pointer into another map of IO
738 * space (e.g. it must not be a GTT mmapping of another object).
739 * 3. We only allow a bo as large as we could in theory map into the GTT,
740 * that is we limit the size to the total size of the GTT.
741 * 4. The bo is marked as being snoopable. The backing pages are left
742 * accessible directly by the CPU, but reads and writes by the GPU may
743 * incur the cost of a snoop (unless you have an LLC architecture).
745 * Synchronisation between multiple users and the GPU is left to userspace
746 * through the normal set-domain-ioctl. The kernel will enforce that the
747 * GPU relinquishes the VMA before it is returned back to the system
748 * i.e. upon free(), munmap() or process termination. However, the userspace
749 * malloc() library may not immediately relinquish the VMA after free() and
750 * instead reuse it whilst the GPU is still reading and writing to the VMA.
753 * Also note, that the object created here is not currently a "first class"
754 * object, in that several ioctls are banned. These are the CPU access
755 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
756 * direct access via your pointer rather than use those ioctls.
758 * If you think this is a good interface to use to pass GPU memory between
759 * drivers, please use dma-buf instead. In fact, wherever possible use
763 i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
765 struct drm_i915_private *dev_priv = dev->dev_private;
766 struct drm_i915_gem_userptr *args = data;
767 struct drm_i915_gem_object *obj;
771 if (args->flags & ~(I915_USERPTR_READ_ONLY |
772 I915_USERPTR_UNSYNCHRONIZED))
775 if (offset_in_page(args->user_ptr | args->user_size))
778 if (args->user_size > dev_priv->gtt.base.total)
781 if (!access_ok(args->flags & I915_USERPTR_READ_ONLY ? VERIFY_READ : VERIFY_WRITE,
782 (char __user *)(unsigned long)args->user_ptr, args->user_size))
785 if (args->flags & I915_USERPTR_READ_ONLY) {
786 /* On almost all of the current hw, we cannot tell the GPU that a
787 * page is readonly, so this is just a placeholder in the uAPI.
792 obj = i915_gem_object_alloc(dev);
796 drm_gem_private_object_init(dev, &obj->base, args->user_size);
797 i915_gem_object_init(obj, &i915_gem_userptr_ops);
798 obj->cache_level = I915_CACHE_LLC;
799 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
800 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
802 obj->userptr.ptr = args->user_ptr;
803 obj->userptr.read_only = !!(args->flags & I915_USERPTR_READ_ONLY);
805 /* And keep a pointer to the current->mm for resolving the user pages
806 * at binding. This means that we need to hook into the mmu_notifier
807 * in order to detect if the mmu is destroyed.
809 ret = i915_gem_userptr_init__mm_struct(obj);
811 ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
813 ret = drm_gem_handle_create(file, &obj->base, &handle);
815 /* drop reference from allocate - handle holds it now */
816 drm_gem_object_unreference_unlocked(&obj->base);
820 args->handle = handle;
825 i915_gem_init_userptr(struct drm_device *dev)
827 struct drm_i915_private *dev_priv = to_i915(dev);
828 mutex_init(&dev_priv->mm_lock);
829 hash_init(dev_priv->mm_structs);