2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <christian.koenig@amd.com>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
33 #include <linux/mmu_notifier.h>
40 /* constant after initialisation */
41 struct radeon_device *rdev;
43 struct mmu_notifier mn;
45 /* only used on destruction */
46 struct work_struct work;
48 /* protected by rdev->mn_lock */
49 struct hlist_node node;
51 /* objects protected by lock */
53 struct rb_root objects;
57 * radeon_mn_destroy - destroy the rmn
59 * @work: previously sheduled work item
61 * Lazy destroys the notifier from a work item
63 static void radeon_mn_destroy(struct work_struct *work)
65 struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
66 struct radeon_device *rdev = rmn->rdev;
67 struct radeon_bo *bo, *next;
69 mutex_lock(&rdev->mn_lock);
70 mutex_lock(&rmn->lock);
72 rbtree_postorder_for_each_entry_safe(bo, next, &rmn->objects, mn_it.rb) {
73 interval_tree_remove(&bo->mn_it, &rmn->objects);
76 mutex_unlock(&rmn->lock);
77 mutex_unlock(&rdev->mn_lock);
78 mmu_notifier_unregister(&rmn->mn, rmn->mm);
83 * radeon_mn_release - callback to notify about mm destruction
86 * @mn: the mm this callback is about
88 * Shedule a work item to lazy destroy our notifier.
90 static void radeon_mn_release(struct mmu_notifier *mn,
93 struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
94 INIT_WORK(&rmn->work, radeon_mn_destroy);
95 schedule_work(&rmn->work);
99 * radeon_mn_invalidate_range_start - callback to notify about mm change
102 * @mn: the mm this callback is about
103 * @start: start of updated range
104 * @end: end of updated range
106 * We block for all BOs between start and end to be idle and
107 * unmap them by move them into system domain again.
109 static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
110 struct mm_struct *mm,
114 struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
115 struct interval_tree_node *it;
117 /* notification is exclusive, but interval is inclusive */
120 mutex_lock(&rmn->lock);
122 it = interval_tree_iter_first(&rmn->objects, start, end);
124 struct radeon_bo *bo;
128 bo = container_of(it, struct radeon_bo, mn_it);
129 it = interval_tree_iter_next(it, start, end);
131 r = radeon_bo_reserve(bo, true);
133 DRM_ERROR("(%d) failed to reserve user bo\n", r);
137 fence = reservation_object_get_excl(bo->tbo.resv);
139 r = radeon_fence_wait((struct radeon_fence *)fence, false);
141 DRM_ERROR("(%d) failed to wait for user bo\n", r);
144 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
145 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
147 DRM_ERROR("(%d) failed to validate user bo\n", r);
149 radeon_bo_unreserve(bo);
152 mutex_unlock(&rmn->lock);
155 static const struct mmu_notifier_ops radeon_mn_ops = {
156 .release = radeon_mn_release,
157 .invalidate_range_start = radeon_mn_invalidate_range_start,
161 * radeon_mn_get - create notifier context
163 * @rdev: radeon device pointer
165 * Creates a notifier context for current->mm.
167 static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
169 struct mm_struct *mm = current->mm;
170 struct radeon_mn *rmn;
173 down_write(&mm->mmap_sem);
174 mutex_lock(&rdev->mn_lock);
176 hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
180 rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
182 rmn = ERR_PTR(-ENOMEM);
188 rmn->mn.ops = &radeon_mn_ops;
189 mutex_init(&rmn->lock);
190 rmn->objects = RB_ROOT;
192 r = __mmu_notifier_register(&rmn->mn, mm);
196 hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm);
199 mutex_unlock(&rdev->mn_lock);
200 up_write(&mm->mmap_sem);
205 mutex_unlock(&rdev->mn_lock);
206 up_write(&mm->mmap_sem);
213 * radeon_mn_register - register a BO for notifier updates
215 * @bo: radeon buffer object
216 * @addr: userptr addr we should monitor
218 * Registers an MMU notifier for the given BO at the specified address.
219 * Returns 0 on success, -ERRNO if anything goes wrong.
221 int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
223 unsigned long end = addr + radeon_bo_size(bo) - 1;
224 struct radeon_device *rdev = bo->rdev;
225 struct radeon_mn *rmn;
226 struct interval_tree_node *it;
228 rmn = radeon_mn_get(rdev);
232 mutex_lock(&rmn->lock);
234 it = interval_tree_iter_first(&rmn->objects, addr, end);
236 mutex_unlock(&rmn->lock);
241 bo->mn_it.start = addr;
242 bo->mn_it.last = end;
243 interval_tree_insert(&bo->mn_it, &rmn->objects);
245 mutex_unlock(&rmn->lock);
251 * radeon_mn_unregister - unregister a BO for notifier updates
253 * @bo: radeon buffer object
255 * Remove any registration of MMU notifier updates from the buffer object.
257 void radeon_mn_unregister(struct radeon_bo *bo)
259 struct radeon_device *rdev = bo->rdev;
260 struct radeon_mn *rmn;
262 mutex_lock(&rdev->mn_lock);
265 mutex_unlock(&rdev->mn_lock);
269 mutex_lock(&rmn->lock);
270 interval_tree_remove(&bo->mn_it, &rmn->objects);
272 mutex_unlock(&rmn->lock);
273 mutex_unlock(&rdev->mn_lock);