drm/amdkfd: Allow user to limit only queues per device
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / amd / amdkfd / kfd_device_queue_manager.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include <linux/types.h>
27 #include <linux/printk.h>
28 #include <linux/bitops.h>
29 #include "kfd_priv.h"
30 #include "kfd_device_queue_manager.h"
31 #include "kfd_mqd_manager.h"
32 #include "cik_regs.h"
33 #include "kfd_kernel_queue.h"
34 #include "../../radeon/cik_reg.h"
35
36 /* Size of the per-pipe EOP queue */
37 #define CIK_HPD_EOP_BYTES_LOG2 11
38 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
39
40 static bool is_mem_initialized;
41
42 static int init_memory(struct device_queue_manager *dqm);
43 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
44                                         unsigned int pasid, unsigned int vmid);
45
46 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
47                                         struct queue *q,
48                                         struct qcm_process_device *qpd);
49 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
50 static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
51
52
53 static inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
54 {
55         BUG_ON(!dqm || !dqm->dev);
56         return dqm->dev->shared_resources.compute_pipe_count;
57 }
58
59 static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
60 {
61         BUG_ON(!dqm);
62         return dqm->dev->shared_resources.first_compute_pipe;
63 }
64
65 static inline unsigned int get_pipes_num_cpsch(void)
66 {
67         return PIPE_PER_ME_CP_SCHEDULING;
68 }
69
70 static inline unsigned int
71 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
72 {
73         uint32_t nybble;
74
75         nybble = (pdd->lds_base >> 60) & 0x0E;
76
77         return nybble;
78
79 }
80
81 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
82 {
83         unsigned int shared_base;
84
85         shared_base = (pdd->lds_base >> 16) & 0xFF;
86
87         return shared_base;
88 }
89
90 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble);
91 static void init_process_memory(struct device_queue_manager *dqm,
92                                 struct qcm_process_device *qpd)
93 {
94         struct kfd_process_device *pdd;
95         unsigned int temp;
96
97         BUG_ON(!dqm || !qpd);
98
99         pdd = qpd_to_pdd(qpd);
100
101         /* check if sh_mem_config register already configured */
102         if (qpd->sh_mem_config == 0) {
103                 qpd->sh_mem_config =
104                         ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
105                         DEFAULT_MTYPE(MTYPE_NONCACHED) |
106                         APE1_MTYPE(MTYPE_NONCACHED);
107                 qpd->sh_mem_ape1_limit = 0;
108                 qpd->sh_mem_ape1_base = 0;
109         }
110
111         if (qpd->pqm->process->is_32bit_user_mode) {
112                 temp = get_sh_mem_bases_32(pdd);
113                 qpd->sh_mem_bases = SHARED_BASE(temp);
114                 qpd->sh_mem_config |= PTR32;
115         } else {
116                 temp = get_sh_mem_bases_nybble_64(pdd);
117                 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
118         }
119
120         pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
121                 qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
122 }
123
124 static void program_sh_mem_settings(struct device_queue_manager *dqm,
125                                         struct qcm_process_device *qpd)
126 {
127         return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
128                                                 qpd->sh_mem_config,
129                                                 qpd->sh_mem_ape1_base,
130                                                 qpd->sh_mem_ape1_limit,
131                                                 qpd->sh_mem_bases);
132 }
133
134 static int allocate_vmid(struct device_queue_manager *dqm,
135                         struct qcm_process_device *qpd,
136                         struct queue *q)
137 {
138         int bit, allocated_vmid;
139
140         if (dqm->vmid_bitmap == 0)
141                 return -ENOMEM;
142
143         bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
144         clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
145
146         /* Kaveri kfd vmid's starts from vmid 8 */
147         allocated_vmid = bit + KFD_VMID_START_OFFSET;
148         pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
149         qpd->vmid = allocated_vmid;
150         q->properties.vmid = allocated_vmid;
151
152         set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
153         program_sh_mem_settings(dqm, qpd);
154
155         return 0;
156 }
157
158 static void deallocate_vmid(struct device_queue_manager *dqm,
159                                 struct qcm_process_device *qpd,
160                                 struct queue *q)
161 {
162         int bit = qpd->vmid - KFD_VMID_START_OFFSET;
163
164         /* Release the vmid mapping */
165         set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
166
167         set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
168         qpd->vmid = 0;
169         q->properties.vmid = 0;
170 }
171
172 static int create_queue_nocpsch(struct device_queue_manager *dqm,
173                                 struct queue *q,
174                                 struct qcm_process_device *qpd,
175                                 int *allocated_vmid)
176 {
177         int retval;
178
179         BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
180
181         pr_debug("kfd: In func %s\n", __func__);
182         print_queue(q);
183
184         mutex_lock(&dqm->lock);
185
186         if (dqm->total_queue_count >= max_num_of_queues_per_device) {
187                 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
188                                 dqm->total_queue_count);
189                 mutex_unlock(&dqm->lock);
190                 return -EPERM;
191         }
192
193         if (list_empty(&qpd->queues_list)) {
194                 retval = allocate_vmid(dqm, qpd, q);
195                 if (retval != 0) {
196                         mutex_unlock(&dqm->lock);
197                         return retval;
198                 }
199         }
200         *allocated_vmid = qpd->vmid;
201         q->properties.vmid = qpd->vmid;
202
203         retval = create_compute_queue_nocpsch(dqm, q, qpd);
204
205         if (retval != 0) {
206                 if (list_empty(&qpd->queues_list)) {
207                         deallocate_vmid(dqm, qpd, q);
208                         *allocated_vmid = 0;
209                 }
210                 mutex_unlock(&dqm->lock);
211                 return retval;
212         }
213
214         list_add(&q->list, &qpd->queues_list);
215         dqm->queue_count++;
216
217         /*
218          * Unconditionally increment this counter, regardless of the queue's
219          * type or whether the queue is active.
220          */
221         dqm->total_queue_count++;
222         pr_debug("Total of %d queues are accountable so far\n",
223                         dqm->total_queue_count);
224
225         mutex_unlock(&dqm->lock);
226         return 0;
227 }
228
229 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
230 {
231         bool set;
232         int pipe, bit;
233
234         set = false;
235
236         for (pipe = dqm->next_pipe_to_allocate; pipe < get_pipes_num(dqm);
237                         pipe = (pipe + 1) % get_pipes_num(dqm)) {
238                 if (dqm->allocated_queues[pipe] != 0) {
239                         bit = find_first_bit(
240                                 (unsigned long *)&dqm->allocated_queues[pipe],
241                                 QUEUES_PER_PIPE);
242
243                         clear_bit(bit,
244                                 (unsigned long *)&dqm->allocated_queues[pipe]);
245                         q->pipe = pipe;
246                         q->queue = bit;
247                         set = true;
248                         break;
249                 }
250         }
251
252         if (set == false)
253                 return -EBUSY;
254
255         pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
256                                 __func__, q->pipe, q->queue);
257         /* horizontal hqd allocation */
258         dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm);
259
260         return 0;
261 }
262
263 static inline void deallocate_hqd(struct device_queue_manager *dqm,
264                                 struct queue *q)
265 {
266         set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
267 }
268
269 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
270                                         struct queue *q,
271                                         struct qcm_process_device *qpd)
272 {
273         int retval;
274         struct mqd_manager *mqd;
275
276         BUG_ON(!dqm || !q || !qpd);
277
278         mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
279         if (mqd == NULL)
280                 return -ENOMEM;
281
282         retval = allocate_hqd(dqm, q);
283         if (retval != 0)
284                 return retval;
285
286         retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
287                                 &q->gart_mqd_addr, &q->properties);
288         if (retval != 0) {
289                 deallocate_hqd(dqm, q);
290                 return retval;
291         }
292
293         pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
294                         q->pipe,
295                         q->queue);
296
297         retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
298                         q->queue, (uint32_t __user *) q->properties.write_ptr);
299         if (retval != 0) {
300                 deallocate_hqd(dqm, q);
301                 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
302                 return retval;
303         }
304
305         return 0;
306 }
307
308 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
309                                 struct qcm_process_device *qpd,
310                                 struct queue *q)
311 {
312         int retval;
313         struct mqd_manager *mqd;
314
315         BUG_ON(!dqm || !q || !q->mqd || !qpd);
316
317         retval = 0;
318
319         pr_debug("kfd: In Func %s\n", __func__);
320
321         mutex_lock(&dqm->lock);
322         mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
323         if (mqd == NULL) {
324                 retval = -ENOMEM;
325                 goto out;
326         }
327
328         retval = mqd->destroy_mqd(mqd, q->mqd,
329                                 KFD_PREEMPT_TYPE_WAVEFRONT,
330                                 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
331                                 q->pipe, q->queue);
332
333         if (retval != 0)
334                 goto out;
335
336         deallocate_hqd(dqm, q);
337
338         mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
339
340         list_del(&q->list);
341         if (list_empty(&qpd->queues_list))
342                 deallocate_vmid(dqm, qpd, q);
343         dqm->queue_count--;
344
345         /*
346          * Unconditionally decrement this counter, regardless of the queue's
347          * type
348          */
349         dqm->total_queue_count--;
350         pr_debug("Total of %d queues are accountable so far\n",
351                         dqm->total_queue_count);
352
353 out:
354         mutex_unlock(&dqm->lock);
355         return retval;
356 }
357
358 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
359 {
360         int retval;
361         struct mqd_manager *mqd;
362         bool prev_active = false;
363
364         BUG_ON(!dqm || !q || !q->mqd);
365
366         mutex_lock(&dqm->lock);
367         mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
368         if (mqd == NULL) {
369                 mutex_unlock(&dqm->lock);
370                 return -ENOMEM;
371         }
372
373         if (q->properties.is_active == true)
374                 prev_active = true;
375
376         /*
377          *
378          * check active state vs. the previous state
379          * and modify counter accordingly
380          */
381         retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
382         if ((q->properties.is_active == true) && (prev_active == false))
383                 dqm->queue_count++;
384         else if ((q->properties.is_active == false) && (prev_active == true))
385                 dqm->queue_count--;
386
387         if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
388                 retval = execute_queues_cpsch(dqm, false);
389
390         mutex_unlock(&dqm->lock);
391         return retval;
392 }
393
394 static struct mqd_manager *get_mqd_manager_nocpsch(
395                 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
396 {
397         struct mqd_manager *mqd;
398
399         BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
400
401         pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
402
403         mqd = dqm->mqds[type];
404         if (!mqd) {
405                 mqd = mqd_manager_init(type, dqm->dev);
406                 if (mqd == NULL)
407                         pr_err("kfd: mqd manager is NULL");
408                 dqm->mqds[type] = mqd;
409         }
410
411         return mqd;
412 }
413
414 static int register_process_nocpsch(struct device_queue_manager *dqm,
415                                         struct qcm_process_device *qpd)
416 {
417         struct device_process_node *n;
418
419         BUG_ON(!dqm || !qpd);
420
421         pr_debug("kfd: In func %s\n", __func__);
422
423         n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
424         if (!n)
425                 return -ENOMEM;
426
427         n->qpd = qpd;
428
429         mutex_lock(&dqm->lock);
430         list_add(&n->list, &dqm->queues);
431
432         init_process_memory(dqm, qpd);
433         dqm->processes_count++;
434
435         mutex_unlock(&dqm->lock);
436
437         return 0;
438 }
439
440 static int unregister_process_nocpsch(struct device_queue_manager *dqm,
441                                         struct qcm_process_device *qpd)
442 {
443         int retval;
444         struct device_process_node *cur, *next;
445
446         BUG_ON(!dqm || !qpd);
447
448         BUG_ON(!list_empty(&qpd->queues_list));
449
450         pr_debug("kfd: In func %s\n", __func__);
451
452         retval = 0;
453         mutex_lock(&dqm->lock);
454
455         list_for_each_entry_safe(cur, next, &dqm->queues, list) {
456                 if (qpd == cur->qpd) {
457                         list_del(&cur->list);
458                         kfree(cur);
459                         dqm->processes_count--;
460                         goto out;
461                 }
462         }
463         /* qpd not found in dqm list */
464         retval = 1;
465 out:
466         mutex_unlock(&dqm->lock);
467         return retval;
468 }
469
470 static int
471 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
472                         unsigned int vmid)
473 {
474         uint32_t pasid_mapping;
475
476         pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
477                                                 ATC_VMID_PASID_MAPPING_VALID;
478         return kfd2kgd->set_pasid_vmid_mapping(dqm->dev->kgd, pasid_mapping,
479                                                 vmid);
480 }
481
482 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
483 {
484         /* In 64-bit mode, we can only control the top 3 bits of the LDS,
485          * scratch and GPUVM apertures.
486          * The hardware fills in the remaining 59 bits according to the
487          * following pattern:
488          * LDS:         X0000000'00000000 - X0000001'00000000 (4GB)
489          * Scratch:     X0000001'00000000 - X0000002'00000000 (4GB)
490          * GPUVM:       Y0010000'00000000 - Y0020000'00000000 (1TB)
491          *
492          * (where X/Y is the configurable nybble with the low-bit 0)
493          *
494          * LDS and scratch will have the same top nybble programmed in the
495          * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
496          * GPUVM can have a different top nybble programmed in the
497          * top 3 bits of SH_MEM_BASES.SHARED_BASE.
498          * We don't bother to support different top nybbles
499          * for LDS/Scratch and GPUVM.
500          */
501
502         BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
503                 top_address_nybble == 0);
504
505         return PRIVATE_BASE(top_address_nybble << 12) |
506                         SHARED_BASE(top_address_nybble << 12);
507 }
508
509 static int init_memory(struct device_queue_manager *dqm)
510 {
511         int i, retval;
512
513         for (i = 8; i < 16; i++)
514                 set_pasid_vmid_mapping(dqm, 0, i);
515
516         retval = kfd2kgd->init_memory(dqm->dev->kgd);
517         if (retval == 0)
518                 is_mem_initialized = true;
519         return retval;
520 }
521
522
523 static int init_pipelines(struct device_queue_manager *dqm,
524                         unsigned int pipes_num, unsigned int first_pipe)
525 {
526         void *hpdptr;
527         struct mqd_manager *mqd;
528         unsigned int i, err, inx;
529         uint64_t pipe_hpd_addr;
530
531         BUG_ON(!dqm || !dqm->dev);
532
533         pr_debug("kfd: In func %s\n", __func__);
534
535         /*
536          * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
537          * The driver never accesses this memory after zeroing it.
538          * It doesn't even have to be saved/restored on suspend/resume
539          * because it contains no data when there are no active queues.
540          */
541
542         err = kfd2kgd->allocate_mem(dqm->dev->kgd,
543                                 CIK_HPD_EOP_BYTES * pipes_num,
544                                 PAGE_SIZE,
545                                 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
546                                 (struct kgd_mem **) &dqm->pipeline_mem);
547
548         if (err) {
549                 pr_err("kfd: error allocate vidmem num pipes: %d\n",
550                         pipes_num);
551                 return -ENOMEM;
552         }
553
554         hpdptr = dqm->pipeline_mem->cpu_ptr;
555         dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr;
556
557         memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
558
559         mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
560         if (mqd == NULL) {
561                 kfd2kgd->free_mem(dqm->dev->kgd,
562                                 (struct kgd_mem *) dqm->pipeline_mem);
563                 return -ENOMEM;
564         }
565
566         for (i = 0; i < pipes_num; i++) {
567                 inx = i + first_pipe;
568                 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
569                 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
570                 /* = log2(bytes/4)-1 */
571                 kfd2kgd->init_pipeline(dqm->dev->kgd, i,
572                                 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
573         }
574
575         return 0;
576 }
577
578
579 static int init_scheduler(struct device_queue_manager *dqm)
580 {
581         int retval;
582
583         BUG_ON(!dqm);
584
585         pr_debug("kfd: In %s\n", __func__);
586
587         retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
588         if (retval != 0)
589                 return retval;
590
591         retval = init_memory(dqm);
592
593         return retval;
594 }
595
596 static int initialize_nocpsch(struct device_queue_manager *dqm)
597 {
598         int i;
599
600         BUG_ON(!dqm);
601
602         pr_debug("kfd: In func %s num of pipes: %d\n",
603                         __func__, get_pipes_num(dqm));
604
605         mutex_init(&dqm->lock);
606         INIT_LIST_HEAD(&dqm->queues);
607         dqm->queue_count = dqm->next_pipe_to_allocate = 0;
608         dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
609                                         sizeof(unsigned int), GFP_KERNEL);
610         if (!dqm->allocated_queues) {
611                 mutex_destroy(&dqm->lock);
612                 return -ENOMEM;
613         }
614
615         for (i = 0; i < get_pipes_num(dqm); i++)
616                 dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
617
618         dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
619
620         init_scheduler(dqm);
621         return 0;
622 }
623
624 static void uninitialize_nocpsch(struct device_queue_manager *dqm)
625 {
626         int i;
627
628         BUG_ON(!dqm);
629
630         BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
631
632         kfree(dqm->allocated_queues);
633         for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
634                 kfree(dqm->mqds[i]);
635         mutex_destroy(&dqm->lock);
636         kfd2kgd->free_mem(dqm->dev->kgd,
637                         (struct kgd_mem *) dqm->pipeline_mem);
638 }
639
640 static int start_nocpsch(struct device_queue_manager *dqm)
641 {
642         return 0;
643 }
644
645 static int stop_nocpsch(struct device_queue_manager *dqm)
646 {
647         return 0;
648 }
649
650 /*
651  * Device Queue Manager implementation for cp scheduler
652  */
653
654 static int set_sched_resources(struct device_queue_manager *dqm)
655 {
656         struct scheduling_resources res;
657         unsigned int queue_num, queue_mask;
658
659         BUG_ON(!dqm);
660
661         pr_debug("kfd: In func %s\n", __func__);
662
663         queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE;
664         queue_mask = (1 << queue_num) - 1;
665         res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
666         res.vmid_mask <<= KFD_VMID_START_OFFSET;
667         res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE);
668         res.gws_mask = res.oac_mask = res.gds_heap_base =
669                                                 res.gds_heap_size = 0;
670
671         pr_debug("kfd: scheduling resources:\n"
672                         "      vmid mask: 0x%8X\n"
673                         "      queue mask: 0x%8llX\n",
674                         res.vmid_mask, res.queue_mask);
675
676         return pm_send_set_resources(&dqm->packets, &res);
677 }
678
679 static int initialize_cpsch(struct device_queue_manager *dqm)
680 {
681         int retval;
682
683         BUG_ON(!dqm);
684
685         pr_debug("kfd: In func %s num of pipes: %d\n",
686                         __func__, get_pipes_num_cpsch());
687
688         mutex_init(&dqm->lock);
689         INIT_LIST_HEAD(&dqm->queues);
690         dqm->queue_count = dqm->processes_count = 0;
691         dqm->active_runlist = false;
692         retval = init_pipelines(dqm, get_pipes_num(dqm), 0);
693         if (retval != 0)
694                 goto fail_init_pipelines;
695
696         return 0;
697
698 fail_init_pipelines:
699         mutex_destroy(&dqm->lock);
700         return retval;
701 }
702
703 static int start_cpsch(struct device_queue_manager *dqm)
704 {
705         struct device_process_node *node;
706         int retval;
707
708         BUG_ON(!dqm);
709
710         retval = 0;
711
712         retval = pm_init(&dqm->packets, dqm);
713         if (retval != 0)
714                 goto fail_packet_manager_init;
715
716         retval = set_sched_resources(dqm);
717         if (retval != 0)
718                 goto fail_set_sched_resources;
719
720         pr_debug("kfd: allocating fence memory\n");
721
722         /* allocate fence memory on the gart */
723         retval = kfd2kgd->allocate_mem(dqm->dev->kgd,
724                                         sizeof(*dqm->fence_addr),
725                                         32,
726                                         KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
727                                         (struct kgd_mem **) &dqm->fence_mem);
728
729         if (retval != 0)
730                 goto fail_allocate_vidmem;
731
732         dqm->fence_addr = dqm->fence_mem->cpu_ptr;
733         dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
734
735         list_for_each_entry(node, &dqm->queues, list)
736                 if (node->qpd->pqm->process && dqm->dev)
737                         kfd_bind_process_to_device(dqm->dev,
738                                                 node->qpd->pqm->process);
739
740         execute_queues_cpsch(dqm, true);
741
742         return 0;
743 fail_allocate_vidmem:
744 fail_set_sched_resources:
745         pm_uninit(&dqm->packets);
746 fail_packet_manager_init:
747         return retval;
748 }
749
750 static int stop_cpsch(struct device_queue_manager *dqm)
751 {
752         struct device_process_node *node;
753         struct kfd_process_device *pdd;
754
755         BUG_ON(!dqm);
756
757         destroy_queues_cpsch(dqm, true);
758
759         list_for_each_entry(node, &dqm->queues, list) {
760                 pdd = qpd_to_pdd(node->qpd);
761                 pdd->bound = false;
762         }
763         kfd2kgd->free_mem(dqm->dev->kgd,
764                         (struct kgd_mem *) dqm->fence_mem);
765         pm_uninit(&dqm->packets);
766
767         return 0;
768 }
769
770 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
771                                         struct kernel_queue *kq,
772                                         struct qcm_process_device *qpd)
773 {
774         BUG_ON(!dqm || !kq || !qpd);
775
776         pr_debug("kfd: In func %s\n", __func__);
777
778         mutex_lock(&dqm->lock);
779         if (dqm->total_queue_count >= max_num_of_queues_per_device) {
780                 pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
781                                 dqm->total_queue_count);
782                 mutex_unlock(&dqm->lock);
783                 return -EPERM;
784         }
785
786         /*
787          * Unconditionally increment this counter, regardless of the queue's
788          * type or whether the queue is active.
789          */
790         dqm->total_queue_count++;
791         pr_debug("Total of %d queues are accountable so far\n",
792                         dqm->total_queue_count);
793
794         list_add(&kq->list, &qpd->priv_queue_list);
795         dqm->queue_count++;
796         qpd->is_debug = true;
797         execute_queues_cpsch(dqm, false);
798         mutex_unlock(&dqm->lock);
799
800         return 0;
801 }
802
803 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
804                                         struct kernel_queue *kq,
805                                         struct qcm_process_device *qpd)
806 {
807         BUG_ON(!dqm || !kq);
808
809         pr_debug("kfd: In %s\n", __func__);
810
811         mutex_lock(&dqm->lock);
812         destroy_queues_cpsch(dqm, false);
813         list_del(&kq->list);
814         dqm->queue_count--;
815         qpd->is_debug = false;
816         execute_queues_cpsch(dqm, false);
817         /*
818          * Unconditionally decrement this counter, regardless of the queue's
819          * type.
820          */
821         dqm->total_queue_count++;
822         pr_debug("Total of %d queues are accountable so far\n",
823                         dqm->total_queue_count);
824         mutex_unlock(&dqm->lock);
825 }
826
827 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
828                         struct qcm_process_device *qpd, int *allocate_vmid)
829 {
830         int retval;
831         struct mqd_manager *mqd;
832
833         BUG_ON(!dqm || !q || !qpd);
834
835         retval = 0;
836
837         if (allocate_vmid)
838                 *allocate_vmid = 0;
839
840         mutex_lock(&dqm->lock);
841
842         if (dqm->total_queue_count >= max_num_of_queues_per_device) {
843                 pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
844                                 dqm->total_queue_count);
845                 retval = -EPERM;
846                 goto out;
847         }
848
849         mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
850         if (mqd == NULL) {
851                 mutex_unlock(&dqm->lock);
852                 return -ENOMEM;
853         }
854
855         retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
856                                 &q->gart_mqd_addr, &q->properties);
857         if (retval != 0)
858                 goto out;
859
860         list_add(&q->list, &qpd->queues_list);
861         if (q->properties.is_active) {
862                 dqm->queue_count++;
863                 retval = execute_queues_cpsch(dqm, false);
864         }
865
866         /*
867          * Unconditionally increment this counter, regardless of the queue's
868          * type or whether the queue is active.
869          */
870         dqm->total_queue_count++;
871
872         pr_debug("Total of %d queues are accountable so far\n",
873                         dqm->total_queue_count);
874
875 out:
876         mutex_unlock(&dqm->lock);
877         return retval;
878 }
879
880 static int fence_wait_timeout(unsigned int *fence_addr,
881                                 unsigned int fence_value,
882                                 unsigned long timeout)
883 {
884         BUG_ON(!fence_addr);
885         timeout += jiffies;
886
887         while (*fence_addr != fence_value) {
888                 if (time_after(jiffies, timeout)) {
889                         pr_err("kfd: qcm fence wait loop timeout expired\n");
890                         return -ETIME;
891                 }
892                 cpu_relax();
893         }
894
895         return 0;
896 }
897
898 static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
899 {
900         int retval;
901
902         BUG_ON(!dqm);
903
904         retval = 0;
905
906         if (lock)
907                 mutex_lock(&dqm->lock);
908         if (dqm->active_runlist == false)
909                 goto out;
910         retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
911                         KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
912         if (retval != 0)
913                 goto out;
914
915         *dqm->fence_addr = KFD_FENCE_INIT;
916         pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
917                                 KFD_FENCE_COMPLETED);
918         /* should be timed out */
919         fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
920                                 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
921         pm_release_ib(&dqm->packets);
922         dqm->active_runlist = false;
923
924 out:
925         if (lock)
926                 mutex_unlock(&dqm->lock);
927         return retval;
928 }
929
930 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
931 {
932         int retval;
933
934         BUG_ON(!dqm);
935
936         if (lock)
937                 mutex_lock(&dqm->lock);
938
939         retval = destroy_queues_cpsch(dqm, false);
940         if (retval != 0) {
941                 pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
942                 goto out;
943         }
944
945         if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
946                 retval = 0;
947                 goto out;
948         }
949
950         if (dqm->active_runlist) {
951                 retval = 0;
952                 goto out;
953         }
954
955         retval = pm_send_runlist(&dqm->packets, &dqm->queues);
956         if (retval != 0) {
957                 pr_err("kfd: failed to execute runlist");
958                 goto out;
959         }
960         dqm->active_runlist = true;
961
962 out:
963         if (lock)
964                 mutex_unlock(&dqm->lock);
965         return retval;
966 }
967
968 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
969                                 struct qcm_process_device *qpd,
970                                 struct queue *q)
971 {
972         int retval;
973         struct mqd_manager *mqd;
974
975         BUG_ON(!dqm || !qpd || !q);
976
977         retval = 0;
978
979         /* remove queue from list to prevent rescheduling after preemption */
980         mutex_lock(&dqm->lock);
981
982         mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
983         if (!mqd) {
984                 retval = -ENOMEM;
985                 goto failed;
986         }
987
988         list_del(&q->list);
989         dqm->queue_count--;
990
991         execute_queues_cpsch(dqm, false);
992
993         mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
994
995         /*
996          * Unconditionally decrement this counter, regardless of the queue's
997          * type
998          */
999         dqm->total_queue_count--;
1000         pr_debug("Total of %d queues are accountable so far\n",
1001                         dqm->total_queue_count);
1002
1003         mutex_unlock(&dqm->lock);
1004
1005         return 0;
1006
1007 failed:
1008         mutex_unlock(&dqm->lock);
1009         return retval;
1010 }
1011
1012 /*
1013  * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1014  * stay in user mode.
1015  */
1016 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1017 /* APE1 limit is inclusive and 64K aligned. */
1018 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1019
1020 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1021                                    struct qcm_process_device *qpd,
1022                                    enum cache_policy default_policy,
1023                                    enum cache_policy alternate_policy,
1024                                    void __user *alternate_aperture_base,
1025                                    uint64_t alternate_aperture_size)
1026 {
1027         uint32_t default_mtype;
1028         uint32_t ape1_mtype;
1029
1030         pr_debug("kfd: In func %s\n", __func__);
1031
1032         mutex_lock(&dqm->lock);
1033
1034         if (alternate_aperture_size == 0) {
1035                 /* base > limit disables APE1 */
1036                 qpd->sh_mem_ape1_base = 1;
1037                 qpd->sh_mem_ape1_limit = 0;
1038         } else {
1039                 /*
1040                  * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1041                  *                      SH_MEM_APE1_BASE[31:0], 0x0000 }
1042                  * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1043                  *                      SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1044                  * Verify that the base and size parameters can be
1045                  * represented in this format and convert them.
1046                  * Additionally restrict APE1 to user-mode addresses.
1047                  */
1048
1049                 uint64_t base = (uintptr_t)alternate_aperture_base;
1050                 uint64_t limit = base + alternate_aperture_size - 1;
1051
1052                 if (limit <= base)
1053                         goto out;
1054
1055                 if ((base & APE1_FIXED_BITS_MASK) != 0)
1056                         goto out;
1057
1058                 if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
1059                         goto out;
1060
1061                 qpd->sh_mem_ape1_base = base >> 16;
1062                 qpd->sh_mem_ape1_limit = limit >> 16;
1063         }
1064
1065         default_mtype = (default_policy == cache_policy_coherent) ?
1066                         MTYPE_NONCACHED :
1067                         MTYPE_CACHED;
1068
1069         ape1_mtype = (alternate_policy == cache_policy_coherent) ?
1070                         MTYPE_NONCACHED :
1071                         MTYPE_CACHED;
1072
1073         qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
1074                         | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
1075                         | DEFAULT_MTYPE(default_mtype)
1076                         | APE1_MTYPE(ape1_mtype);
1077
1078         if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1079                 program_sh_mem_settings(dqm, qpd);
1080
1081         pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1082                 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1083                 qpd->sh_mem_ape1_limit);
1084
1085         mutex_unlock(&dqm->lock);
1086         return true;
1087
1088 out:
1089         mutex_unlock(&dqm->lock);
1090         return false;
1091 }
1092
1093 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1094 {
1095         struct device_queue_manager *dqm;
1096
1097         BUG_ON(!dev);
1098
1099         dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
1100         if (!dqm)
1101                 return NULL;
1102
1103         dqm->dev = dev;
1104         switch (sched_policy) {
1105         case KFD_SCHED_POLICY_HWS:
1106         case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1107                 /* initialize dqm for cp scheduling */
1108                 dqm->create_queue = create_queue_cpsch;
1109                 dqm->initialize = initialize_cpsch;
1110                 dqm->start = start_cpsch;
1111                 dqm->stop = stop_cpsch;
1112                 dqm->destroy_queue = destroy_queue_cpsch;
1113                 dqm->update_queue = update_queue;
1114                 dqm->get_mqd_manager = get_mqd_manager_nocpsch;
1115                 dqm->register_process = register_process_nocpsch;
1116                 dqm->unregister_process = unregister_process_nocpsch;
1117                 dqm->uninitialize = uninitialize_nocpsch;
1118                 dqm->create_kernel_queue = create_kernel_queue_cpsch;
1119                 dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch;
1120                 dqm->set_cache_memory_policy = set_cache_memory_policy;
1121                 break;
1122         case KFD_SCHED_POLICY_NO_HWS:
1123                 /* initialize dqm for no cp scheduling */
1124                 dqm->start = start_nocpsch;
1125                 dqm->stop = stop_nocpsch;
1126                 dqm->create_queue = create_queue_nocpsch;
1127                 dqm->destroy_queue = destroy_queue_nocpsch;
1128                 dqm->update_queue = update_queue;
1129                 dqm->get_mqd_manager = get_mqd_manager_nocpsch;
1130                 dqm->register_process = register_process_nocpsch;
1131                 dqm->unregister_process = unregister_process_nocpsch;
1132                 dqm->initialize = initialize_nocpsch;
1133                 dqm->uninitialize = uninitialize_nocpsch;
1134                 dqm->set_cache_memory_policy = set_cache_memory_policy;
1135                 break;
1136         default:
1137                 BUG();
1138                 break;
1139         }
1140
1141         if (dqm->initialize(dqm) != 0) {
1142                 kfree(dqm);
1143                 return NULL;
1144         }
1145
1146         return dqm;
1147 }
1148
1149 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1150 {
1151         BUG_ON(!dqm);
1152
1153         dqm->uninitialize(dqm);
1154         kfree(dqm);
1155 }
1156