2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/mutex.h>
24 #include <linux/log2.h>
25 #include <linux/sched.h>
26 #include <linux/slab.h>
27 #include <linux/notifier.h>
33 * Initial size for the array of queues.
34 * The allocated size is doubled each time
35 * it is exceeded up to MAX_PROCESS_QUEUES.
37 #define INITIAL_QUEUE_ARRAY_SIZE 16
40 * List of struct kfd_process (field kfd_process).
41 * Unique/indexed by mm_struct*
43 #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
44 static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
45 static DEFINE_MUTEX(kfd_processes_mutex);
47 DEFINE_STATIC_SRCU(kfd_processes_srcu);
49 static struct workqueue_struct *kfd_process_wq;
51 struct kfd_process_release_work {
52 struct work_struct kfd_work;
53 struct kfd_process *p;
56 static struct kfd_process *find_process(const struct task_struct *thread);
57 static struct kfd_process *create_process(const struct task_struct *thread);
59 void kfd_process_create_wq(void)
62 kfd_process_wq = create_workqueue("kfd_process_wq");
65 void kfd_process_destroy_wq(void)
68 flush_workqueue(kfd_process_wq);
69 destroy_workqueue(kfd_process_wq);
70 kfd_process_wq = NULL;
74 struct kfd_process *kfd_create_process(const struct task_struct *thread)
76 struct kfd_process *process;
78 BUG_ON(!kfd_process_wq);
80 if (thread->mm == NULL)
81 return ERR_PTR(-EINVAL);
83 /* Only the pthreads threading model is supported. */
84 if (thread->group_leader->mm != thread->mm)
85 return ERR_PTR(-EINVAL);
87 /* Take mmap_sem because we call __mmu_notifier_register inside */
88 down_write(&thread->mm->mmap_sem);
91 * take kfd processes mutex before starting of process creation
92 * so there won't be a case where two threads of the same process
93 * create two kfd_process structures
95 mutex_lock(&kfd_processes_mutex);
97 /* A prior open of /dev/kfd could have already created the process. */
98 process = find_process(thread);
100 pr_debug("kfd: process already found\n");
103 process = create_process(thread);
105 mutex_unlock(&kfd_processes_mutex);
107 up_write(&thread->mm->mmap_sem);
112 struct kfd_process *kfd_get_process(const struct task_struct *thread)
114 struct kfd_process *process;
116 if (thread->mm == NULL)
117 return ERR_PTR(-EINVAL);
119 /* Only the pthreads threading model is supported. */
120 if (thread->group_leader->mm != thread->mm)
121 return ERR_PTR(-EINVAL);
123 process = find_process(thread);
128 static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
130 struct kfd_process *process;
132 hash_for_each_possible_rcu(kfd_processes_table, process,
133 kfd_processes, (uintptr_t)mm)
134 if (process->mm == mm)
140 static struct kfd_process *find_process(const struct task_struct *thread)
142 struct kfd_process *p;
145 idx = srcu_read_lock(&kfd_processes_srcu);
146 p = find_process_by_mm(thread->mm);
147 srcu_read_unlock(&kfd_processes_srcu, idx);
152 static void kfd_process_wq_release(struct work_struct *work)
154 struct kfd_process_release_work *my_work;
155 struct kfd_process_device *pdd, *temp;
156 struct kfd_process *p;
158 my_work = (struct kfd_process_release_work *) work;
162 mutex_lock(&p->mutex);
164 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
166 list_del(&pdd->per_device_list);
171 kfd_pasid_free(p->pasid);
173 mutex_unlock(&p->mutex);
175 mutex_destroy(&p->mutex);
184 static void kfd_process_destroy_delayed(struct rcu_head *rcu)
186 struct kfd_process_release_work *work;
187 struct kfd_process *p;
189 BUG_ON(!kfd_process_wq);
191 p = container_of(rcu, struct kfd_process, rcu);
192 BUG_ON(atomic_read(&p->mm->mm_count) <= 0);
196 work = (struct kfd_process_release_work *)
197 kmalloc(sizeof(struct kfd_process_release_work), GFP_KERNEL);
200 INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
202 queue_work(kfd_process_wq, (struct work_struct *) work);
206 static void kfd_process_notifier_release(struct mmu_notifier *mn,
207 struct mm_struct *mm)
209 struct kfd_process *p;
212 * The kfd_process structure can not be free because the
213 * mmu_notifier srcu is read locked
215 p = container_of(mn, struct kfd_process, mmu_notifier);
218 mutex_lock(&kfd_processes_mutex);
219 hash_del_rcu(&p->kfd_processes);
220 mutex_unlock(&kfd_processes_mutex);
221 synchronize_srcu(&kfd_processes_srcu);
224 * Because we drop mm_count inside kfd_process_destroy_delayed
225 * and because the mmu_notifier_unregister function also drop
226 * mm_count we need to take an extra count here.
228 atomic_inc(&p->mm->mm_count);
229 mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
230 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
233 static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
234 .release = kfd_process_notifier_release,
237 static struct kfd_process *create_process(const struct task_struct *thread)
239 struct kfd_process *process;
242 process = kzalloc(sizeof(*process), GFP_KERNEL);
245 goto err_alloc_process;
247 process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE,
248 sizeof(process->queues[0]), GFP_KERNEL);
249 if (!process->queues)
250 goto err_alloc_queues;
252 process->pasid = kfd_pasid_alloc();
253 if (process->pasid == 0)
254 goto err_alloc_pasid;
256 mutex_init(&process->mutex);
258 process->mm = thread->mm;
260 /* register notifier */
261 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
262 err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
264 goto err_mmu_notifier;
266 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
267 (uintptr_t)process->mm);
269 process->lead_thread = thread->group_leader;
271 process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE;
273 INIT_LIST_HEAD(&process->per_device_data);
278 kfd_pasid_free(process->pasid);
280 kfree(process->queues);
287 struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
288 struct kfd_process *p,
291 struct kfd_process_device *pdd = NULL;
293 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
298 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
301 list_add(&pdd->per_device_list, &p->per_device_data);
309 * Direct the IOMMU to bind the process (specifically the pasid->mm)
311 * Unbinding occurs when the process dies or the device is removed.
313 * Assumes that the process lock is held.
315 struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
316 struct kfd_process *p)
318 struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p, 1);
321 return ERR_PTR(-ENOMEM);
331 void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
333 struct kfd_process *p;
334 struct kfd_process_device *pdd;
339 idx = srcu_read_lock(&kfd_processes_srcu);
341 hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
342 if (p->pasid == pasid)
345 srcu_read_unlock(&kfd_processes_srcu, idx);
347 BUG_ON(p->pasid != pasid);
349 mutex_lock(&p->mutex);
351 pdd = kfd_get_process_device_data(dev, p, 0);
354 * Just mark pdd as unbound, because we still need it to call
355 * amd_iommu_unbind_pasid() in when the process exits.
356 * We don't call amd_iommu_unbind_pasid() here
357 * because the IOMMU called us.
362 mutex_unlock(&p->mutex);
365 struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
367 return list_first_entry(&p->per_device_data,
368 struct kfd_process_device,
372 struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
373 struct kfd_process_device *pdd)
375 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
377 return list_next_entry(pdd, per_device_list);
380 bool kfd_has_process_device_data(struct kfd_process *p)
382 return !(list_empty(&p->per_device_data));