2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include <linux/types.h>
27 #include <linux/printk.h>
28 #include <linux/bitops.h>
29 #include <linux/sched.h>
31 #include "kfd_device_queue_manager.h"
32 #include "kfd_mqd_manager.h"
34 #include "kfd_kernel_queue.h"
36 /* Size of the per-pipe EOP queue */
37 #define CIK_HPD_EOP_BYTES_LOG2 11
38 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
40 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
41 unsigned int pasid, unsigned int vmid);
43 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
45 struct qcm_process_device *qpd);
47 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
48 static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
50 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
52 struct qcm_process_device *qpd);
54 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
55 unsigned int sdma_queue_id);
58 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
60 if (type == KFD_QUEUE_TYPE_SDMA)
61 return KFD_MQD_TYPE_SDMA;
62 return KFD_MQD_TYPE_CP;
65 unsigned int get_first_pipe(struct device_queue_manager *dqm)
67 BUG_ON(!dqm || !dqm->dev);
68 return dqm->dev->shared_resources.first_compute_pipe;
71 unsigned int get_pipes_num(struct device_queue_manager *dqm)
73 BUG_ON(!dqm || !dqm->dev);
74 return dqm->dev->shared_resources.compute_pipe_count;
77 static inline unsigned int get_pipes_num_cpsch(void)
79 return PIPE_PER_ME_CP_SCHEDULING;
82 void program_sh_mem_settings(struct device_queue_manager *dqm,
83 struct qcm_process_device *qpd)
85 return dqm->dev->kfd2kgd->program_sh_mem_settings(
86 dqm->dev->kgd, qpd->vmid,
88 qpd->sh_mem_ape1_base,
89 qpd->sh_mem_ape1_limit,
93 static int allocate_vmid(struct device_queue_manager *dqm,
94 struct qcm_process_device *qpd,
97 int bit, allocated_vmid;
99 if (dqm->vmid_bitmap == 0)
102 bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
103 clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
105 /* Kaveri kfd vmid's starts from vmid 8 */
106 allocated_vmid = bit + KFD_VMID_START_OFFSET;
107 pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
108 qpd->vmid = allocated_vmid;
109 q->properties.vmid = allocated_vmid;
111 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
112 program_sh_mem_settings(dqm, qpd);
117 static void deallocate_vmid(struct device_queue_manager *dqm,
118 struct qcm_process_device *qpd,
121 int bit = qpd->vmid - KFD_VMID_START_OFFSET;
123 /* Release the vmid mapping */
124 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
126 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
128 q->properties.vmid = 0;
131 static int create_queue_nocpsch(struct device_queue_manager *dqm,
133 struct qcm_process_device *qpd,
138 BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
140 pr_debug("kfd: In func %s\n", __func__);
143 mutex_lock(&dqm->lock);
145 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
146 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
147 dqm->total_queue_count);
148 mutex_unlock(&dqm->lock);
152 if (list_empty(&qpd->queues_list)) {
153 retval = allocate_vmid(dqm, qpd, q);
155 mutex_unlock(&dqm->lock);
159 *allocated_vmid = qpd->vmid;
160 q->properties.vmid = qpd->vmid;
162 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
163 retval = create_compute_queue_nocpsch(dqm, q, qpd);
164 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
165 retval = create_sdma_queue_nocpsch(dqm, q, qpd);
168 if (list_empty(&qpd->queues_list)) {
169 deallocate_vmid(dqm, qpd, q);
172 mutex_unlock(&dqm->lock);
176 list_add(&q->list, &qpd->queues_list);
177 if (q->properties.is_active)
180 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
181 dqm->sdma_queue_count++;
184 * Unconditionally increment this counter, regardless of the queue's
185 * type or whether the queue is active.
187 dqm->total_queue_count++;
188 pr_debug("Total of %d queues are accountable so far\n",
189 dqm->total_queue_count);
191 mutex_unlock(&dqm->lock);
195 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
202 for (pipe = dqm->next_pipe_to_allocate, i = 0; i < get_pipes_num(dqm);
203 pipe = ((pipe + 1) % get_pipes_num(dqm)), ++i) {
204 if (dqm->allocated_queues[pipe] != 0) {
205 bit = find_first_bit(
206 (unsigned long *)&dqm->allocated_queues[pipe],
210 (unsigned long *)&dqm->allocated_queues[pipe]);
221 pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
222 __func__, q->pipe, q->queue);
223 /* horizontal hqd allocation */
224 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm);
229 static inline void deallocate_hqd(struct device_queue_manager *dqm,
232 set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
235 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
237 struct qcm_process_device *qpd)
240 struct mqd_manager *mqd;
242 BUG_ON(!dqm || !q || !qpd);
244 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
248 retval = allocate_hqd(dqm, q);
252 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
253 &q->gart_mqd_addr, &q->properties);
255 deallocate_hqd(dqm, q);
259 pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
263 retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
264 q->queue, (uint32_t __user *) q->properties.write_ptr);
266 deallocate_hqd(dqm, q);
267 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
274 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
275 struct qcm_process_device *qpd,
279 struct mqd_manager *mqd;
281 BUG_ON(!dqm || !q || !q->mqd || !qpd);
285 pr_debug("kfd: In Func %s\n", __func__);
287 mutex_lock(&dqm->lock);
289 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
290 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
295 deallocate_hqd(dqm, q);
296 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
297 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
302 dqm->sdma_queue_count--;
303 deallocate_sdma_queue(dqm, q->sdma_id);
305 pr_debug("q->properties.type is invalid (%d)\n",
311 retval = mqd->destroy_mqd(mqd, q->mqd,
312 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
313 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
319 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
322 if (list_empty(&qpd->queues_list))
323 deallocate_vmid(dqm, qpd, q);
324 if (q->properties.is_active)
328 * Unconditionally decrement this counter, regardless of the queue's
331 dqm->total_queue_count--;
332 pr_debug("Total of %d queues are accountable so far\n",
333 dqm->total_queue_count);
336 mutex_unlock(&dqm->lock);
340 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
343 struct mqd_manager *mqd;
344 bool prev_active = false;
346 BUG_ON(!dqm || !q || !q->mqd);
348 mutex_lock(&dqm->lock);
349 mqd = dqm->ops.get_mqd_manager(dqm,
350 get_mqd_type_from_queue_type(q->properties.type));
352 mutex_unlock(&dqm->lock);
356 if (q->properties.is_active == true)
361 * check active state vs. the previous state
362 * and modify counter accordingly
364 retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
365 if ((q->properties.is_active == true) && (prev_active == false))
367 else if ((q->properties.is_active == false) && (prev_active == true))
370 if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
371 retval = execute_queues_cpsch(dqm, false);
373 mutex_unlock(&dqm->lock);
377 static struct mqd_manager *get_mqd_manager_nocpsch(
378 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
380 struct mqd_manager *mqd;
382 BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
384 pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
386 mqd = dqm->mqds[type];
388 mqd = mqd_manager_init(type, dqm->dev);
390 pr_err("kfd: mqd manager is NULL");
391 dqm->mqds[type] = mqd;
397 static int register_process_nocpsch(struct device_queue_manager *dqm,
398 struct qcm_process_device *qpd)
400 struct device_process_node *n;
403 BUG_ON(!dqm || !qpd);
405 pr_debug("kfd: In func %s\n", __func__);
407 n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
413 mutex_lock(&dqm->lock);
414 list_add(&n->list, &dqm->queues);
416 retval = dqm->ops_asic_specific.register_process(dqm, qpd);
418 dqm->processes_count++;
420 mutex_unlock(&dqm->lock);
425 static int unregister_process_nocpsch(struct device_queue_manager *dqm,
426 struct qcm_process_device *qpd)
429 struct device_process_node *cur, *next;
431 BUG_ON(!dqm || !qpd);
433 BUG_ON(!list_empty(&qpd->queues_list));
435 pr_debug("kfd: In func %s\n", __func__);
438 mutex_lock(&dqm->lock);
440 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
441 if (qpd == cur->qpd) {
442 list_del(&cur->list);
444 dqm->processes_count--;
448 /* qpd not found in dqm list */
451 mutex_unlock(&dqm->lock);
456 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
459 uint32_t pasid_mapping;
461 pasid_mapping = (pasid == 0) ? 0 :
463 ATC_VMID_PASID_MAPPING_VALID;
465 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
466 dqm->dev->kgd, pasid_mapping,
470 int init_pipelines(struct device_queue_manager *dqm,
471 unsigned int pipes_num, unsigned int first_pipe)
474 struct mqd_manager *mqd;
475 unsigned int i, err, inx;
476 uint64_t pipe_hpd_addr;
478 BUG_ON(!dqm || !dqm->dev);
480 pr_debug("kfd: In func %s\n", __func__);
483 * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
484 * The driver never accesses this memory after zeroing it.
485 * It doesn't even have to be saved/restored on suspend/resume
486 * because it contains no data when there are no active queues.
489 err = kfd_gtt_sa_allocate(dqm->dev, CIK_HPD_EOP_BYTES * pipes_num,
493 pr_err("kfd: error allocate vidmem num pipes: %d\n",
498 hpdptr = dqm->pipeline_mem->cpu_ptr;
499 dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr;
501 memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
503 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
505 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
509 for (i = 0; i < pipes_num; i++) {
510 inx = i + first_pipe;
512 * HPD buffer on GTT is allocated by amdkfd, no need to waste
513 * space in GTT for pipelines we don't initialize
515 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
516 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
517 /* = log2(bytes/4)-1 */
518 dqm->dev->kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
519 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
525 static void init_interrupts(struct device_queue_manager *dqm)
531 for (i = 0 ; i < get_pipes_num(dqm) ; i++)
532 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd,
533 i + get_first_pipe(dqm));
536 static int init_scheduler(struct device_queue_manager *dqm)
542 pr_debug("kfd: In %s\n", __func__);
544 retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
548 static int initialize_nocpsch(struct device_queue_manager *dqm)
554 pr_debug("kfd: In func %s num of pipes: %d\n",
555 __func__, get_pipes_num(dqm));
557 mutex_init(&dqm->lock);
558 INIT_LIST_HEAD(&dqm->queues);
559 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
560 dqm->sdma_queue_count = 0;
561 dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
562 sizeof(unsigned int), GFP_KERNEL);
563 if (!dqm->allocated_queues) {
564 mutex_destroy(&dqm->lock);
568 for (i = 0; i < get_pipes_num(dqm); i++)
569 dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
571 dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
572 dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
578 static void uninitialize_nocpsch(struct device_queue_manager *dqm)
584 BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
586 kfree(dqm->allocated_queues);
587 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
589 mutex_destroy(&dqm->lock);
590 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
593 static int start_nocpsch(struct device_queue_manager *dqm)
595 init_interrupts(dqm);
599 static int stop_nocpsch(struct device_queue_manager *dqm)
604 static int allocate_sdma_queue(struct device_queue_manager *dqm,
605 unsigned int *sdma_queue_id)
609 if (dqm->sdma_bitmap == 0)
612 bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap,
615 clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
616 *sdma_queue_id = bit;
621 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
622 unsigned int sdma_queue_id)
624 if (sdma_queue_id >= CIK_SDMA_QUEUES)
626 set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
629 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
631 struct qcm_process_device *qpd)
633 struct mqd_manager *mqd;
636 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
640 retval = allocate_sdma_queue(dqm, &q->sdma_id);
644 q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
645 q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
647 pr_debug("kfd: sdma id is: %d\n", q->sdma_id);
648 pr_debug(" sdma queue id: %d\n", q->properties.sdma_queue_id);
649 pr_debug(" sdma engine id: %d\n", q->properties.sdma_engine_id);
651 dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
652 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
653 &q->gart_mqd_addr, &q->properties);
655 deallocate_sdma_queue(dqm, q->sdma_id);
659 retval = mqd->load_mqd(mqd, q->mqd, 0,
662 deallocate_sdma_queue(dqm, q->sdma_id);
663 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
671 * Device Queue Manager implementation for cp scheduler
674 static int set_sched_resources(struct device_queue_manager *dqm)
676 struct scheduling_resources res;
677 unsigned int queue_num, queue_mask;
681 pr_debug("kfd: In func %s\n", __func__);
683 queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE;
684 queue_mask = (1 << queue_num) - 1;
685 res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
686 res.vmid_mask <<= KFD_VMID_START_OFFSET;
687 res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE);
688 res.gws_mask = res.oac_mask = res.gds_heap_base =
689 res.gds_heap_size = 0;
691 pr_debug("kfd: scheduling resources:\n"
692 " vmid mask: 0x%8X\n"
693 " queue mask: 0x%8llX\n",
694 res.vmid_mask, res.queue_mask);
696 return pm_send_set_resources(&dqm->packets, &res);
699 static int initialize_cpsch(struct device_queue_manager *dqm)
705 pr_debug("kfd: In func %s num of pipes: %d\n",
706 __func__, get_pipes_num_cpsch());
708 mutex_init(&dqm->lock);
709 INIT_LIST_HEAD(&dqm->queues);
710 dqm->queue_count = dqm->processes_count = 0;
711 dqm->sdma_queue_count = 0;
712 dqm->active_runlist = false;
713 retval = dqm->ops_asic_specific.initialize(dqm);
715 goto fail_init_pipelines;
720 mutex_destroy(&dqm->lock);
724 static int start_cpsch(struct device_queue_manager *dqm)
726 struct device_process_node *node;
733 retval = pm_init(&dqm->packets, dqm);
735 goto fail_packet_manager_init;
737 retval = set_sched_resources(dqm);
739 goto fail_set_sched_resources;
741 pr_debug("kfd: allocating fence memory\n");
743 /* allocate fence memory on the gart */
744 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
748 goto fail_allocate_vidmem;
750 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
751 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
753 init_interrupts(dqm);
755 list_for_each_entry(node, &dqm->queues, list)
756 if (node->qpd->pqm->process && dqm->dev)
757 kfd_bind_process_to_device(dqm->dev,
758 node->qpd->pqm->process);
760 execute_queues_cpsch(dqm, true);
763 fail_allocate_vidmem:
764 fail_set_sched_resources:
765 pm_uninit(&dqm->packets);
766 fail_packet_manager_init:
770 static int stop_cpsch(struct device_queue_manager *dqm)
772 struct device_process_node *node;
773 struct kfd_process_device *pdd;
777 destroy_queues_cpsch(dqm, true);
779 list_for_each_entry(node, &dqm->queues, list) {
780 pdd = qpd_to_pdd(node->qpd);
783 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
784 pm_uninit(&dqm->packets);
789 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
790 struct kernel_queue *kq,
791 struct qcm_process_device *qpd)
793 BUG_ON(!dqm || !kq || !qpd);
795 pr_debug("kfd: In func %s\n", __func__);
797 mutex_lock(&dqm->lock);
798 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
799 pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
800 dqm->total_queue_count);
801 mutex_unlock(&dqm->lock);
806 * Unconditionally increment this counter, regardless of the queue's
807 * type or whether the queue is active.
809 dqm->total_queue_count++;
810 pr_debug("Total of %d queues are accountable so far\n",
811 dqm->total_queue_count);
813 list_add(&kq->list, &qpd->priv_queue_list);
815 qpd->is_debug = true;
816 execute_queues_cpsch(dqm, false);
817 mutex_unlock(&dqm->lock);
822 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
823 struct kernel_queue *kq,
824 struct qcm_process_device *qpd)
828 pr_debug("kfd: In %s\n", __func__);
830 mutex_lock(&dqm->lock);
831 destroy_queues_cpsch(dqm, false);
834 qpd->is_debug = false;
835 execute_queues_cpsch(dqm, false);
837 * Unconditionally decrement this counter, regardless of the queue's
840 dqm->total_queue_count--;
841 pr_debug("Total of %d queues are accountable so far\n",
842 dqm->total_queue_count);
843 mutex_unlock(&dqm->lock);
846 static void select_sdma_engine_id(struct queue *q)
850 q->sdma_id = sdma_id;
851 sdma_id = (sdma_id + 1) % 2;
854 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
855 struct qcm_process_device *qpd, int *allocate_vmid)
858 struct mqd_manager *mqd;
860 BUG_ON(!dqm || !q || !qpd);
867 mutex_lock(&dqm->lock);
869 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
870 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
871 dqm->total_queue_count);
876 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
877 select_sdma_engine_id(q);
879 mqd = dqm->ops.get_mqd_manager(dqm,
880 get_mqd_type_from_queue_type(q->properties.type));
883 mutex_unlock(&dqm->lock);
887 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
888 &q->gart_mqd_addr, &q->properties);
892 list_add(&q->list, &qpd->queues_list);
893 if (q->properties.is_active) {
895 retval = execute_queues_cpsch(dqm, false);
898 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
899 dqm->sdma_queue_count++;
901 * Unconditionally increment this counter, regardless of the queue's
902 * type or whether the queue is active.
904 dqm->total_queue_count++;
906 pr_debug("Total of %d queues are accountable so far\n",
907 dqm->total_queue_count);
910 mutex_unlock(&dqm->lock);
914 static int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
915 unsigned int fence_value,
916 unsigned long timeout)
921 while (*fence_addr != fence_value) {
922 if (time_after(jiffies, timeout)) {
923 pr_err("kfd: qcm fence wait loop timeout expired\n");
932 static int destroy_sdma_queues(struct device_queue_manager *dqm,
933 unsigned int sdma_engine)
935 return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
936 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false,
940 static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
949 mutex_lock(&dqm->lock);
950 if (dqm->active_runlist == false)
953 pr_debug("kfd: Before destroying queues, sdma queue count is : %u\n",
954 dqm->sdma_queue_count);
956 if (dqm->sdma_queue_count > 0) {
957 destroy_sdma_queues(dqm, 0);
958 destroy_sdma_queues(dqm, 1);
961 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
962 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
966 *dqm->fence_addr = KFD_FENCE_INIT;
967 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
968 KFD_FENCE_COMPLETED);
969 /* should be timed out */
970 amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
971 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
972 pm_release_ib(&dqm->packets);
973 dqm->active_runlist = false;
977 mutex_unlock(&dqm->lock);
981 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
988 mutex_lock(&dqm->lock);
990 retval = destroy_queues_cpsch(dqm, false);
992 pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
996 if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
1001 if (dqm->active_runlist) {
1006 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1008 pr_err("kfd: failed to execute runlist");
1011 dqm->active_runlist = true;
1015 mutex_unlock(&dqm->lock);
1019 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1020 struct qcm_process_device *qpd,
1024 struct mqd_manager *mqd;
1026 BUG_ON(!dqm || !qpd || !q);
1030 /* remove queue from list to prevent rescheduling after preemption */
1031 mutex_lock(&dqm->lock);
1032 mqd = dqm->ops.get_mqd_manager(dqm,
1033 get_mqd_type_from_queue_type(q->properties.type));
1039 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1040 dqm->sdma_queue_count--;
1043 if (q->properties.is_active)
1046 execute_queues_cpsch(dqm, false);
1048 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
1051 * Unconditionally decrement this counter, regardless of the queue's
1054 dqm->total_queue_count--;
1055 pr_debug("Total of %d queues are accountable so far\n",
1056 dqm->total_queue_count);
1058 mutex_unlock(&dqm->lock);
1063 mutex_unlock(&dqm->lock);
1068 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1069 * stay in user mode.
1071 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1072 /* APE1 limit is inclusive and 64K aligned. */
1073 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1075 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1076 struct qcm_process_device *qpd,
1077 enum cache_policy default_policy,
1078 enum cache_policy alternate_policy,
1079 void __user *alternate_aperture_base,
1080 uint64_t alternate_aperture_size)
1084 pr_debug("kfd: In func %s\n", __func__);
1086 mutex_lock(&dqm->lock);
1088 if (alternate_aperture_size == 0) {
1089 /* base > limit disables APE1 */
1090 qpd->sh_mem_ape1_base = 1;
1091 qpd->sh_mem_ape1_limit = 0;
1094 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1095 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1096 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1097 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1098 * Verify that the base and size parameters can be
1099 * represented in this format and convert them.
1100 * Additionally restrict APE1 to user-mode addresses.
1103 uint64_t base = (uintptr_t)alternate_aperture_base;
1104 uint64_t limit = base + alternate_aperture_size - 1;
1109 if ((base & APE1_FIXED_BITS_MASK) != 0)
1112 if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
1115 qpd->sh_mem_ape1_base = base >> 16;
1116 qpd->sh_mem_ape1_limit = limit >> 16;
1119 retval = dqm->ops_asic_specific.set_cache_memory_policy(
1124 alternate_aperture_base,
1125 alternate_aperture_size);
1127 if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1128 program_sh_mem_settings(dqm, qpd);
1130 pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1131 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1132 qpd->sh_mem_ape1_limit);
1134 mutex_unlock(&dqm->lock);
1138 mutex_unlock(&dqm->lock);
1142 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1144 struct device_queue_manager *dqm;
1148 pr_debug("kfd: loading device queue manager\n");
1150 dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
1155 switch (sched_policy) {
1156 case KFD_SCHED_POLICY_HWS:
1157 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1158 /* initialize dqm for cp scheduling */
1159 dqm->ops.create_queue = create_queue_cpsch;
1160 dqm->ops.initialize = initialize_cpsch;
1161 dqm->ops.start = start_cpsch;
1162 dqm->ops.stop = stop_cpsch;
1163 dqm->ops.destroy_queue = destroy_queue_cpsch;
1164 dqm->ops.update_queue = update_queue;
1165 dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
1166 dqm->ops.register_process = register_process_nocpsch;
1167 dqm->ops.unregister_process = unregister_process_nocpsch;
1168 dqm->ops.uninitialize = uninitialize_nocpsch;
1169 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1170 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1171 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1173 case KFD_SCHED_POLICY_NO_HWS:
1174 /* initialize dqm for no cp scheduling */
1175 dqm->ops.start = start_nocpsch;
1176 dqm->ops.stop = stop_nocpsch;
1177 dqm->ops.create_queue = create_queue_nocpsch;
1178 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1179 dqm->ops.update_queue = update_queue;
1180 dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
1181 dqm->ops.register_process = register_process_nocpsch;
1182 dqm->ops.unregister_process = unregister_process_nocpsch;
1183 dqm->ops.initialize = initialize_nocpsch;
1184 dqm->ops.uninitialize = uninitialize_nocpsch;
1185 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1192 switch (dev->device_info->asic_family) {
1194 device_queue_manager_init_vi(&dqm->ops_asic_specific);
1198 device_queue_manager_init_cik(&dqm->ops_asic_specific);
1202 if (dqm->ops.initialize(dqm) != 0) {
1210 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1214 dqm->ops.uninitialize(dqm);