2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/device.h>
24 #include <linux/export.h>
25 #include <linux/err.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/uaccess.h>
30 #include <linux/compat.h>
31 #include <uapi/linux/kfd_ioctl.h>
32 #include <linux/time.h>
34 #include <uapi/asm-generic/mman-common.h>
35 #include <asm/processor.h>
37 #include "kfd_device_queue_manager.h"
39 static long kfd_ioctl(struct file *, unsigned int, unsigned long);
40 static int kfd_open(struct inode *, struct file *);
41 static int kfd_mmap(struct file *, struct vm_area_struct *);
43 static const char kfd_dev_name[] = "kfd";
45 static const struct file_operations kfd_fops = {
47 .unlocked_ioctl = kfd_ioctl,
48 .compat_ioctl = kfd_ioctl,
53 static int kfd_char_dev_major = -1;
54 static struct class *kfd_class;
55 struct device *kfd_device;
57 int kfd_chardev_init(void)
61 kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
62 err = kfd_char_dev_major;
64 goto err_register_chrdev;
66 kfd_class = class_create(THIS_MODULE, kfd_dev_name);
67 err = PTR_ERR(kfd_class);
68 if (IS_ERR(kfd_class))
69 goto err_class_create;
71 kfd_device = device_create(kfd_class, NULL,
72 MKDEV(kfd_char_dev_major, 0),
74 err = PTR_ERR(kfd_device);
75 if (IS_ERR(kfd_device))
76 goto err_device_create;
81 class_destroy(kfd_class);
83 unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
88 void kfd_chardev_exit(void)
90 device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
91 class_destroy(kfd_class);
92 unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
95 struct device *kfd_chardev(void)
101 static int kfd_open(struct inode *inode, struct file *filep)
103 struct kfd_process *process;
104 bool is_32bit_user_mode;
106 if (iminor(inode) != 0)
109 is_32bit_user_mode = is_compat_task();
111 if (is_32bit_user_mode == true) {
113 "Process %d (32-bit) failed to open /dev/kfd\n"
114 "32-bit processes are not supported by amdkfd\n",
119 process = kfd_create_process(current);
121 return PTR_ERR(process);
123 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
124 process->pasid, process->is_32bit_user_mode);
129 static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
132 struct kfd_ioctl_get_version_args *args = data;
135 args->major_version = KFD_IOCTL_MAJOR_VERSION;
136 args->minor_version = KFD_IOCTL_MINOR_VERSION;
141 static int set_queue_properties_from_user(struct queue_properties *q_properties,
142 struct kfd_ioctl_create_queue_args *args)
144 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
145 pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
149 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
150 pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
154 if ((args->ring_base_address) &&
155 (!access_ok(VERIFY_WRITE,
156 (const void __user *) args->ring_base_address,
157 sizeof(uint64_t)))) {
158 pr_err("kfd: can't access ring base address\n");
162 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
163 pr_err("kfd: ring size must be a power of 2 or 0\n");
167 if (!access_ok(VERIFY_WRITE,
168 (const void __user *) args->read_pointer_address,
170 pr_err("kfd: can't access read pointer\n");
174 if (!access_ok(VERIFY_WRITE,
175 (const void __user *) args->write_pointer_address,
177 pr_err("kfd: can't access write pointer\n");
181 if (args->eop_buffer_address &&
182 !access_ok(VERIFY_WRITE,
183 (const void __user *) args->eop_buffer_address,
185 pr_debug("kfd: can't access eop buffer");
189 if (args->ctx_save_restore_address &&
190 !access_ok(VERIFY_WRITE,
191 (const void __user *) args->ctx_save_restore_address,
193 pr_debug("kfd: can't access ctx save restore buffer");
197 q_properties->is_interop = false;
198 q_properties->queue_percent = args->queue_percentage;
199 q_properties->priority = args->queue_priority;
200 q_properties->queue_address = args->ring_base_address;
201 q_properties->queue_size = args->ring_size;
202 q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
203 q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
204 q_properties->eop_ring_buffer_address = args->eop_buffer_address;
205 q_properties->eop_ring_buffer_size = args->eop_buffer_size;
206 q_properties->ctx_save_restore_area_address =
207 args->ctx_save_restore_address;
208 q_properties->ctx_save_restore_area_size = args->ctx_save_restore_size;
209 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
210 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
211 q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
212 else if (args->queue_type == KFD_IOC_QUEUE_TYPE_SDMA)
213 q_properties->type = KFD_QUEUE_TYPE_SDMA;
217 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
218 q_properties->format = KFD_QUEUE_FORMAT_AQL;
220 q_properties->format = KFD_QUEUE_FORMAT_PM4;
222 pr_debug("Queue Percentage (%d, %d)\n",
223 q_properties->queue_percent, args->queue_percentage);
225 pr_debug("Queue Priority (%d, %d)\n",
226 q_properties->priority, args->queue_priority);
228 pr_debug("Queue Address (0x%llX, 0x%llX)\n",
229 q_properties->queue_address, args->ring_base_address);
231 pr_debug("Queue Size (0x%llX, %u)\n",
232 q_properties->queue_size, args->ring_size);
234 pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n",
235 (uint64_t) q_properties->read_ptr,
236 (uint64_t) q_properties->write_ptr);
238 pr_debug("Queue Format (%d)\n", q_properties->format);
240 pr_debug("Queue EOP (0x%llX)\n", q_properties->eop_ring_buffer_address);
242 pr_debug("Queue CTX save arex (0x%llX)\n",
243 q_properties->ctx_save_restore_area_address);
248 static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
251 struct kfd_ioctl_create_queue_args *args = data;
254 unsigned int queue_id;
255 struct kfd_process_device *pdd;
256 struct queue_properties q_properties;
258 memset(&q_properties, 0, sizeof(struct queue_properties));
260 pr_debug("kfd: creating queue ioctl\n");
262 err = set_queue_properties_from_user(&q_properties, args);
266 pr_debug("kfd: looking for gpu id 0x%x\n", args->gpu_id);
267 dev = kfd_device_by_id(args->gpu_id);
269 pr_debug("kfd: gpu id 0x%x was not found\n", args->gpu_id);
273 mutex_lock(&p->mutex);
275 pdd = kfd_bind_process_to_device(dev, p);
278 goto err_bind_process;
281 pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n",
285 err = pqm_create_queue(&p->pqm, dev, filep, &q_properties,
286 0, q_properties.type, &queue_id);
288 goto err_create_queue;
290 args->queue_id = queue_id;
293 /* Return gpu_id as doorbell offset for mmap usage */
294 args->doorbell_offset = (KFD_MMAP_DOORBELL_MASK | args->gpu_id);
295 args->doorbell_offset <<= PAGE_SHIFT;
297 mutex_unlock(&p->mutex);
299 pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
301 pr_debug("ring buffer address == 0x%016llX\n",
302 args->ring_base_address);
304 pr_debug("read ptr address == 0x%016llX\n",
305 args->read_pointer_address);
307 pr_debug("write ptr address == 0x%016llX\n",
308 args->write_pointer_address);
314 mutex_unlock(&p->mutex);
318 static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
322 struct kfd_ioctl_destroy_queue_args *args = data;
324 pr_debug("kfd: destroying queue id %d for PASID %d\n",
328 mutex_lock(&p->mutex);
330 retval = pqm_destroy_queue(&p->pqm, args->queue_id);
332 mutex_unlock(&p->mutex);
336 static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
340 struct kfd_ioctl_update_queue_args *args = data;
341 struct queue_properties properties;
343 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
344 pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
348 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
349 pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
353 if ((args->ring_base_address) &&
354 (!access_ok(VERIFY_WRITE,
355 (const void __user *) args->ring_base_address,
356 sizeof(uint64_t)))) {
357 pr_err("kfd: can't access ring base address\n");
361 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
362 pr_err("kfd: ring size must be a power of 2 or 0\n");
366 properties.queue_address = args->ring_base_address;
367 properties.queue_size = args->ring_size;
368 properties.queue_percent = args->queue_percentage;
369 properties.priority = args->queue_priority;
371 pr_debug("kfd: updating queue id %d for PASID %d\n",
372 args->queue_id, p->pasid);
374 mutex_lock(&p->mutex);
376 retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
378 mutex_unlock(&p->mutex);
383 static int kfd_ioctl_set_memory_policy(struct file *filep,
384 struct kfd_process *p, void *data)
386 struct kfd_ioctl_set_memory_policy_args *args = data;
389 struct kfd_process_device *pdd;
390 enum cache_policy default_policy, alternate_policy;
392 if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
393 && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
397 if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
398 && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
402 dev = kfd_device_by_id(args->gpu_id);
406 mutex_lock(&p->mutex);
408 pdd = kfd_bind_process_to_device(dev, p);
414 default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
415 ? cache_policy_coherent : cache_policy_noncoherent;
418 (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
419 ? cache_policy_coherent : cache_policy_noncoherent;
421 if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm,
425 (void __user *)args->alternate_aperture_base,
426 args->alternate_aperture_size))
430 mutex_unlock(&p->mutex);
435 static int kfd_ioctl_get_clock_counters(struct file *filep,
436 struct kfd_process *p, void *data)
438 struct kfd_ioctl_get_clock_counters_args *args = data;
440 struct timespec64 time;
442 dev = kfd_device_by_id(args->gpu_id);
446 /* Reading GPU clock counter from KGD */
447 args->gpu_clock_counter =
448 dev->kfd2kgd->get_gpu_clock_counter(dev->kgd);
450 /* No access to rdtsc. Using raw monotonic time */
451 getrawmonotonic64(&time);
452 args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time);
454 get_monotonic_boottime64(&time);
455 args->system_clock_counter = (uint64_t)timespec64_to_ns(&time);
457 /* Since the counter is in nano-seconds we use 1GHz frequency */
458 args->system_clock_freq = 1000000000;
464 static int kfd_ioctl_get_process_apertures(struct file *filp,
465 struct kfd_process *p, void *data)
467 struct kfd_ioctl_get_process_apertures_args *args = data;
468 struct kfd_process_device_apertures *pAperture;
469 struct kfd_process_device *pdd;
471 dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
473 args->num_of_nodes = 0;
475 mutex_lock(&p->mutex);
477 /*if the process-device list isn't empty*/
478 if (kfd_has_process_device_data(p)) {
479 /* Run over all pdd of the process */
480 pdd = kfd_get_first_process_device_data(p);
483 &args->process_apertures[args->num_of_nodes];
484 pAperture->gpu_id = pdd->dev->id;
485 pAperture->lds_base = pdd->lds_base;
486 pAperture->lds_limit = pdd->lds_limit;
487 pAperture->gpuvm_base = pdd->gpuvm_base;
488 pAperture->gpuvm_limit = pdd->gpuvm_limit;
489 pAperture->scratch_base = pdd->scratch_base;
490 pAperture->scratch_limit = pdd->scratch_limit;
493 "node id %u\n", args->num_of_nodes);
495 "gpu id %u\n", pdd->dev->id);
497 "lds_base %llX\n", pdd->lds_base);
499 "lds_limit %llX\n", pdd->lds_limit);
501 "gpuvm_base %llX\n", pdd->gpuvm_base);
503 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
505 "scratch_base %llX\n", pdd->scratch_base);
507 "scratch_limit %llX\n", pdd->scratch_limit);
509 args->num_of_nodes++;
510 } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
511 (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
514 mutex_unlock(&p->mutex);
519 static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
525 static int kfd_ioctl_destroy_event(struct file *filp, struct kfd_process *p,
531 static int kfd_ioctl_set_event(struct file *filp, struct kfd_process *p,
537 static int kfd_ioctl_reset_event(struct file *filp, struct kfd_process *p,
543 static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
549 #define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
550 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
553 static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
554 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
555 kfd_ioctl_get_version, 0),
557 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
558 kfd_ioctl_create_queue, 0),
560 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
561 kfd_ioctl_destroy_queue, 0),
563 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
564 kfd_ioctl_set_memory_policy, 0),
566 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
567 kfd_ioctl_get_clock_counters, 0),
569 AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
570 kfd_ioctl_get_process_apertures, 0),
572 AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
573 kfd_ioctl_update_queue, 0),
575 AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_EVENT,
576 kfd_ioctl_create_event, 0),
578 AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_EVENT,
579 kfd_ioctl_destroy_event, 0),
581 AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_EVENT,
582 kfd_ioctl_set_event, 0),
584 AMDKFD_IOCTL_DEF(AMDKFD_IOC_RESET_EVENT,
585 kfd_ioctl_reset_event, 0),
587 AMDKFD_IOCTL_DEF(AMDKFD_IOC_WAIT_EVENTS,
588 kfd_ioctl_wait_events, 0),
591 #define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
593 static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
595 struct kfd_process *process;
596 amdkfd_ioctl_t *func;
597 const struct amdkfd_ioctl_desc *ioctl = NULL;
598 unsigned int nr = _IOC_NR(cmd);
599 char stack_kdata[128];
601 unsigned int usize, asize;
602 int retcode = -EINVAL;
604 if (nr >= AMDKFD_CORE_IOCTL_COUNT)
607 if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
610 ioctl = &amdkfd_ioctls[nr];
612 amdkfd_size = _IOC_SIZE(ioctl->cmd);
613 usize = asize = _IOC_SIZE(cmd);
614 if (amdkfd_size > asize)
621 dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
623 process = kfd_get_process(current);
624 if (IS_ERR(process)) {
625 dev_dbg(kfd_device, "no process\n");
629 /* Do not trust userspace, use our own definition */
632 if (unlikely(!func)) {
633 dev_dbg(kfd_device, "no function\n");
638 if (cmd & (IOC_IN | IOC_OUT)) {
639 if (asize <= sizeof(stack_kdata)) {
642 kdata = kmalloc(asize, GFP_KERNEL);
649 memset(kdata + usize, 0, asize - usize);
653 if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
657 } else if (cmd & IOC_OUT) {
658 memset(kdata, 0, usize);
661 retcode = func(filep, process, kdata);
664 if (copy_to_user((void __user *)arg, kdata, usize) != 0)
669 dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
670 task_pid_nr(current), cmd, nr);
672 if (kdata != stack_kdata)
676 dev_dbg(kfd_device, "ret = %d\n", retcode);
681 static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
683 struct kfd_process *process;
685 process = kfd_get_process(current);
687 return PTR_ERR(process);
689 if ((vma->vm_pgoff & KFD_MMAP_DOORBELL_MASK) ==
690 KFD_MMAP_DOORBELL_MASK) {
691 vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_DOORBELL_MASK;
692 return kfd_doorbell_mmap(process, vma);
693 } else if ((vma->vm_pgoff & KFD_MMAP_EVENTS_MASK) ==
694 KFD_MMAP_EVENTS_MASK) {
695 vma->vm_pgoff = vma->vm_pgoff ^ KFD_MMAP_EVENTS_MASK;
696 return kfd_event_mmap(process, vma);