2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include <linux/types.h>
27 #include <linux/printk.h>
28 #include <linux/bitops.h>
29 #include <linux/sched.h>
31 #include "kfd_device_queue_manager.h"
32 #include "kfd_mqd_manager.h"
34 #include "kfd_kernel_queue.h"
36 /* Size of the per-pipe EOP queue */
37 #define CIK_HPD_EOP_BYTES_LOG2 11
38 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
40 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
41 unsigned int pasid, unsigned int vmid);
43 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
45 struct qcm_process_device *qpd);
47 static int execute_queues_cpsch(struct device_queue_manager *dqm,
48 enum kfd_unmap_queues_filter filter,
49 uint32_t filter_param);
50 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
51 enum kfd_unmap_queues_filter filter,
52 uint32_t filter_param);
54 static int map_queues_cpsch(struct device_queue_manager *dqm);
56 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
58 struct qcm_process_device *qpd);
60 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
61 unsigned int sdma_queue_id);
64 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
66 if (type == KFD_QUEUE_TYPE_SDMA)
67 return KFD_MQD_TYPE_SDMA;
68 return KFD_MQD_TYPE_CP;
71 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
74 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
75 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
77 /* queue is available for KFD usage if bit is 1 */
78 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
79 if (test_bit(pipe_offset + i,
80 dqm->dev->shared_resources.queue_bitmap))
85 unsigned int get_queues_num(struct device_queue_manager *dqm)
87 return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
91 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
93 return dqm->dev->shared_resources.num_queue_per_pipe;
96 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
98 return dqm->dev->shared_resources.num_pipe_per_mec;
101 void program_sh_mem_settings(struct device_queue_manager *dqm,
102 struct qcm_process_device *qpd)
104 return dqm->dev->kfd2kgd->program_sh_mem_settings(
105 dqm->dev->kgd, qpd->vmid,
107 qpd->sh_mem_ape1_base,
108 qpd->sh_mem_ape1_limit,
112 static int allocate_vmid(struct device_queue_manager *dqm,
113 struct qcm_process_device *qpd,
116 int bit, allocated_vmid;
118 if (dqm->vmid_bitmap == 0)
121 bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap,
122 dqm->dev->vm_info.vmid_num_kfd);
123 clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
125 allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
126 pr_debug("vmid allocation %d\n", allocated_vmid);
127 qpd->vmid = allocated_vmid;
128 q->properties.vmid = allocated_vmid;
130 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
131 program_sh_mem_settings(dqm, qpd);
136 static void deallocate_vmid(struct device_queue_manager *dqm,
137 struct qcm_process_device *qpd,
140 int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
142 /* Release the vmid mapping */
143 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
145 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
147 q->properties.vmid = 0;
150 static int create_queue_nocpsch(struct device_queue_manager *dqm,
152 struct qcm_process_device *qpd)
158 mutex_lock(&dqm->lock);
160 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
161 pr_warn("Can't create new usermode queue because %d queues were already created\n",
162 dqm->total_queue_count);
167 if (list_empty(&qpd->queues_list)) {
168 retval = allocate_vmid(dqm, qpd, q);
172 q->properties.vmid = qpd->vmid;
174 q->properties.tba_addr = qpd->tba_addr;
175 q->properties.tma_addr = qpd->tma_addr;
177 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
178 retval = create_compute_queue_nocpsch(dqm, q, qpd);
179 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
180 retval = create_sdma_queue_nocpsch(dqm, q, qpd);
185 if (list_empty(&qpd->queues_list))
186 deallocate_vmid(dqm, qpd, q);
190 list_add(&q->list, &qpd->queues_list);
192 if (q->properties.is_active)
195 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
196 dqm->sdma_queue_count++;
199 * Unconditionally increment this counter, regardless of the queue's
200 * type or whether the queue is active.
202 dqm->total_queue_count++;
203 pr_debug("Total of %d queues are accountable so far\n",
204 dqm->total_queue_count);
207 mutex_unlock(&dqm->lock);
211 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
218 for (pipe = dqm->next_pipe_to_allocate, i = 0;
219 i < get_pipes_per_mec(dqm);
220 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
222 if (!is_pipe_enabled(dqm, 0, pipe))
225 if (dqm->allocated_queues[pipe] != 0) {
226 bit = find_first_bit(
227 (unsigned long *)&dqm->allocated_queues[pipe],
228 get_queues_per_pipe(dqm));
231 (unsigned long *)&dqm->allocated_queues[pipe]);
242 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
243 /* horizontal hqd allocation */
244 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
249 static inline void deallocate_hqd(struct device_queue_manager *dqm,
252 set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
255 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
257 struct qcm_process_device *qpd)
260 struct mqd_manager *mqd;
262 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
266 retval = allocate_hqd(dqm, q);
270 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
271 &q->gart_mqd_addr, &q->properties);
273 goto out_deallocate_hqd;
275 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
278 dqm->dev->kfd2kgd->set_scratch_backing_va(
279 dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
281 if (!q->properties.is_active)
284 retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
292 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
294 deallocate_hqd(dqm, q);
299 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
300 * to avoid asynchronized access
302 static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
303 struct qcm_process_device *qpd,
307 struct mqd_manager *mqd;
309 mqd = dqm->ops.get_mqd_manager(dqm,
310 get_mqd_type_from_queue_type(q->properties.type));
314 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
315 deallocate_hqd(dqm, q);
316 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
317 dqm->sdma_queue_count--;
318 deallocate_sdma_queue(dqm, q->sdma_id);
320 pr_debug("q->properties.type %d is invalid\n",
324 dqm->total_queue_count--;
326 retval = mqd->destroy_mqd(mqd, q->mqd,
327 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
328 KFD_UNMAP_LATENCY_MS,
330 if (retval == -ETIME)
331 qpd->reset_wavefronts = true;
333 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
336 if (list_empty(&qpd->queues_list)) {
337 if (qpd->reset_wavefronts) {
338 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
340 /* dbgdev_wave_reset_wavefronts has to be called before
341 * deallocate_vmid(), i.e. when vmid is still in use.
343 dbgdev_wave_reset_wavefronts(dqm->dev,
345 qpd->reset_wavefronts = false;
348 deallocate_vmid(dqm, qpd, q);
351 if (q->properties.is_active)
357 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
358 struct qcm_process_device *qpd,
363 mutex_lock(&dqm->lock);
364 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
365 mutex_unlock(&dqm->lock);
370 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
373 struct mqd_manager *mqd;
374 bool prev_active = false;
376 mutex_lock(&dqm->lock);
377 mqd = dqm->ops.get_mqd_manager(dqm,
378 get_mqd_type_from_queue_type(q->properties.type));
384 /* Save previous activity state for counters */
385 prev_active = q->properties.is_active;
387 /* Make sure the queue is unmapped before updating the MQD */
388 if (sched_policy != KFD_SCHED_POLICY_NO_HWS) {
389 retval = unmap_queues_cpsch(dqm,
390 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
392 pr_err("unmap queue failed\n");
395 } else if (prev_active &&
396 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
397 q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
398 retval = mqd->destroy_mqd(mqd, q->mqd,
399 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
400 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
402 pr_err("destroy mqd failed\n");
407 retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
410 * check active state vs. the previous state and modify
411 * counter accordingly. map_queues_cpsch uses the
412 * dqm->queue_count to determine whether a new runlist must be
415 if (q->properties.is_active && !prev_active)
417 else if (!q->properties.is_active && prev_active)
420 if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
421 retval = map_queues_cpsch(dqm);
422 else if (q->properties.is_active &&
423 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
424 q->properties.type == KFD_QUEUE_TYPE_SDMA))
425 retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
426 &q->properties, q->process->mm);
429 mutex_unlock(&dqm->lock);
433 static struct mqd_manager *get_mqd_manager(
434 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
436 struct mqd_manager *mqd;
438 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
441 pr_debug("mqd type %d\n", type);
443 mqd = dqm->mqds[type];
445 mqd = mqd_manager_init(type, dqm->dev);
447 pr_err("mqd manager is NULL");
448 dqm->mqds[type] = mqd;
454 static int register_process(struct device_queue_manager *dqm,
455 struct qcm_process_device *qpd)
457 struct device_process_node *n;
460 n = kzalloc(sizeof(*n), GFP_KERNEL);
466 mutex_lock(&dqm->lock);
467 list_add(&n->list, &dqm->queues);
469 retval = dqm->asic_ops.update_qpd(dqm, qpd);
471 dqm->processes_count++;
473 mutex_unlock(&dqm->lock);
478 static int unregister_process(struct device_queue_manager *dqm,
479 struct qcm_process_device *qpd)
482 struct device_process_node *cur, *next;
484 pr_debug("qpd->queues_list is %s\n",
485 list_empty(&qpd->queues_list) ? "empty" : "not empty");
488 mutex_lock(&dqm->lock);
490 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
491 if (qpd == cur->qpd) {
492 list_del(&cur->list);
494 dqm->processes_count--;
498 /* qpd not found in dqm list */
501 mutex_unlock(&dqm->lock);
506 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
509 uint32_t pasid_mapping;
511 pasid_mapping = (pasid == 0) ? 0 :
513 ATC_VMID_PASID_MAPPING_VALID;
515 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
516 dqm->dev->kgd, pasid_mapping,
520 static void init_interrupts(struct device_queue_manager *dqm)
524 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
525 if (is_pipe_enabled(dqm, 0, i))
526 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
529 static int initialize_nocpsch(struct device_queue_manager *dqm)
533 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
535 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
536 sizeof(unsigned int), GFP_KERNEL);
537 if (!dqm->allocated_queues)
540 mutex_init(&dqm->lock);
541 INIT_LIST_HEAD(&dqm->queues);
542 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
543 dqm->sdma_queue_count = 0;
545 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
546 int pipe_offset = pipe * get_queues_per_pipe(dqm);
548 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
549 if (test_bit(pipe_offset + queue,
550 dqm->dev->shared_resources.queue_bitmap))
551 dqm->allocated_queues[pipe] |= 1 << queue;
554 dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
555 dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
560 static void uninitialize(struct device_queue_manager *dqm)
564 WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
566 kfree(dqm->allocated_queues);
567 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
569 mutex_destroy(&dqm->lock);
570 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
573 static int start_nocpsch(struct device_queue_manager *dqm)
575 init_interrupts(dqm);
579 static int stop_nocpsch(struct device_queue_manager *dqm)
584 static int allocate_sdma_queue(struct device_queue_manager *dqm,
585 unsigned int *sdma_queue_id)
589 if (dqm->sdma_bitmap == 0)
592 bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap,
595 clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
596 *sdma_queue_id = bit;
601 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
602 unsigned int sdma_queue_id)
604 if (sdma_queue_id >= CIK_SDMA_QUEUES)
606 set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
609 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
611 struct qcm_process_device *qpd)
613 struct mqd_manager *mqd;
616 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
620 retval = allocate_sdma_queue(dqm, &q->sdma_id);
624 q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
625 q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
627 pr_debug("SDMA id is: %d\n", q->sdma_id);
628 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
629 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
631 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
632 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
633 &q->gart_mqd_addr, &q->properties);
635 goto out_deallocate_sdma_queue;
637 retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
644 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
645 out_deallocate_sdma_queue:
646 deallocate_sdma_queue(dqm, q->sdma_id);
652 * Device Queue Manager implementation for cp scheduler
655 static int set_sched_resources(struct device_queue_manager *dqm)
658 struct scheduling_resources res;
660 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
663 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
664 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
665 / dqm->dev->shared_resources.num_pipe_per_mec;
667 if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
670 /* only acquire queues from the first MEC */
674 /* This situation may be hit in the future if a new HW
675 * generation exposes more than 64 queues. If so, the
676 * definition of res.queue_mask needs updating
678 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
679 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
683 res.queue_mask |= (1ull << i);
685 res.gws_mask = res.oac_mask = res.gds_heap_base =
686 res.gds_heap_size = 0;
688 pr_debug("Scheduling resources:\n"
690 "queue mask: 0x%8llX\n",
691 res.vmid_mask, res.queue_mask);
693 return pm_send_set_resources(&dqm->packets, &res);
696 static int initialize_cpsch(struct device_queue_manager *dqm)
698 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
700 mutex_init(&dqm->lock);
701 INIT_LIST_HEAD(&dqm->queues);
702 dqm->queue_count = dqm->processes_count = 0;
703 dqm->sdma_queue_count = 0;
704 dqm->active_runlist = false;
705 dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
710 static int start_cpsch(struct device_queue_manager *dqm)
716 retval = pm_init(&dqm->packets, dqm);
718 goto fail_packet_manager_init;
720 retval = set_sched_resources(dqm);
722 goto fail_set_sched_resources;
724 pr_debug("Allocating fence memory\n");
726 /* allocate fence memory on the gart */
727 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
731 goto fail_allocate_vidmem;
733 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
734 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
736 init_interrupts(dqm);
738 mutex_lock(&dqm->lock);
739 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
740 mutex_unlock(&dqm->lock);
743 fail_allocate_vidmem:
744 fail_set_sched_resources:
745 pm_uninit(&dqm->packets);
746 fail_packet_manager_init:
750 static int stop_cpsch(struct device_queue_manager *dqm)
752 mutex_lock(&dqm->lock);
753 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
754 mutex_unlock(&dqm->lock);
756 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
757 pm_uninit(&dqm->packets);
762 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
763 struct kernel_queue *kq,
764 struct qcm_process_device *qpd)
766 mutex_lock(&dqm->lock);
767 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
768 pr_warn("Can't create new kernel queue because %d queues were already created\n",
769 dqm->total_queue_count);
770 mutex_unlock(&dqm->lock);
775 * Unconditionally increment this counter, regardless of the queue's
776 * type or whether the queue is active.
778 dqm->total_queue_count++;
779 pr_debug("Total of %d queues are accountable so far\n",
780 dqm->total_queue_count);
782 list_add(&kq->list, &qpd->priv_queue_list);
784 qpd->is_debug = true;
785 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
786 mutex_unlock(&dqm->lock);
791 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
792 struct kernel_queue *kq,
793 struct qcm_process_device *qpd)
795 mutex_lock(&dqm->lock);
798 qpd->is_debug = false;
799 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
801 * Unconditionally decrement this counter, regardless of the queue's
804 dqm->total_queue_count--;
805 pr_debug("Total of %d queues are accountable so far\n",
806 dqm->total_queue_count);
807 mutex_unlock(&dqm->lock);
810 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
811 struct qcm_process_device *qpd)
814 struct mqd_manager *mqd;
818 mutex_lock(&dqm->lock);
820 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
821 pr_warn("Can't create new usermode queue because %d queues were already created\n",
822 dqm->total_queue_count);
827 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
828 retval = allocate_sdma_queue(dqm, &q->sdma_id);
831 q->properties.sdma_queue_id =
832 q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
833 q->properties.sdma_engine_id =
834 q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
836 mqd = dqm->ops.get_mqd_manager(dqm,
837 get_mqd_type_from_queue_type(q->properties.type));
844 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
846 q->properties.tba_addr = qpd->tba_addr;
847 q->properties.tma_addr = qpd->tma_addr;
848 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
849 &q->gart_mqd_addr, &q->properties);
853 list_add(&q->list, &qpd->queues_list);
855 if (q->properties.is_active) {
857 retval = execute_queues_cpsch(dqm,
858 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
861 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
862 dqm->sdma_queue_count++;
864 * Unconditionally increment this counter, regardless of the queue's
865 * type or whether the queue is active.
867 dqm->total_queue_count++;
869 pr_debug("Total of %d queues are accountable so far\n",
870 dqm->total_queue_count);
873 mutex_unlock(&dqm->lock);
877 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
878 unsigned int fence_value,
879 unsigned int timeout_ms)
881 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
883 while (*fence_addr != fence_value) {
884 if (time_after(jiffies, end_jiffies)) {
885 pr_err("qcm fence wait loop timeout expired\n");
894 static int unmap_sdma_queues(struct device_queue_manager *dqm,
895 unsigned int sdma_engine)
897 return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
898 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
902 /* dqm->lock mutex has to be locked before calling this function */
903 static int map_queues_cpsch(struct device_queue_manager *dqm)
907 if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
910 if (dqm->active_runlist)
913 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
915 pr_err("failed to execute runlist\n");
918 dqm->active_runlist = true;
923 /* dqm->lock mutex has to be locked before calling this function */
924 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
925 enum kfd_unmap_queues_filter filter,
926 uint32_t filter_param)
930 if (!dqm->active_runlist)
933 pr_debug("Before destroying queues, sdma queue count is : %u\n",
934 dqm->sdma_queue_count);
936 if (dqm->sdma_queue_count > 0) {
937 unmap_sdma_queues(dqm, 0);
938 unmap_sdma_queues(dqm, 1);
941 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
942 filter, filter_param, false, 0);
946 *dqm->fence_addr = KFD_FENCE_INIT;
947 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
948 KFD_FENCE_COMPLETED);
949 /* should be timed out */
950 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
951 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
955 pm_release_ib(&dqm->packets);
956 dqm->active_runlist = false;
961 /* dqm->lock mutex has to be locked before calling this function */
962 static int execute_queues_cpsch(struct device_queue_manager *dqm,
963 enum kfd_unmap_queues_filter filter,
964 uint32_t filter_param)
968 retval = unmap_queues_cpsch(dqm, filter, filter_param);
970 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
974 return map_queues_cpsch(dqm);
977 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
978 struct qcm_process_device *qpd,
982 struct mqd_manager *mqd;
983 bool preempt_all_queues;
985 preempt_all_queues = false;
989 /* remove queue from list to prevent rescheduling after preemption */
990 mutex_lock(&dqm->lock);
994 * error, currently we do not allow to destroy a queue
995 * of a currently debugged process
998 goto failed_try_destroy_debugged_queue;
1002 mqd = dqm->ops.get_mqd_manager(dqm,
1003 get_mqd_type_from_queue_type(q->properties.type));
1009 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1010 dqm->sdma_queue_count--;
1011 deallocate_sdma_queue(dqm, q->sdma_id);
1016 if (q->properties.is_active) {
1018 retval = execute_queues_cpsch(dqm,
1019 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1020 if (retval == -ETIME)
1021 qpd->reset_wavefronts = true;
1024 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
1027 * Unconditionally decrement this counter, regardless of the queue's
1030 dqm->total_queue_count--;
1031 pr_debug("Total of %d queues are accountable so far\n",
1032 dqm->total_queue_count);
1034 mutex_unlock(&dqm->lock);
1039 failed_try_destroy_debugged_queue:
1041 mutex_unlock(&dqm->lock);
1046 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1047 * stay in user mode.
1049 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1050 /* APE1 limit is inclusive and 64K aligned. */
1051 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1053 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1054 struct qcm_process_device *qpd,
1055 enum cache_policy default_policy,
1056 enum cache_policy alternate_policy,
1057 void __user *alternate_aperture_base,
1058 uint64_t alternate_aperture_size)
1062 mutex_lock(&dqm->lock);
1064 if (alternate_aperture_size == 0) {
1065 /* base > limit disables APE1 */
1066 qpd->sh_mem_ape1_base = 1;
1067 qpd->sh_mem_ape1_limit = 0;
1070 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1071 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1072 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1073 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1074 * Verify that the base and size parameters can be
1075 * represented in this format and convert them.
1076 * Additionally restrict APE1 to user-mode addresses.
1079 uint64_t base = (uintptr_t)alternate_aperture_base;
1080 uint64_t limit = base + alternate_aperture_size - 1;
1082 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1083 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1088 qpd->sh_mem_ape1_base = base >> 16;
1089 qpd->sh_mem_ape1_limit = limit >> 16;
1092 retval = dqm->asic_ops.set_cache_memory_policy(
1097 alternate_aperture_base,
1098 alternate_aperture_size);
1100 if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1101 program_sh_mem_settings(dqm, qpd);
1103 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1104 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1105 qpd->sh_mem_ape1_limit);
1108 mutex_unlock(&dqm->lock);
1112 static int set_trap_handler(struct device_queue_manager *dqm,
1113 struct qcm_process_device *qpd,
1119 if (dqm->dev->cwsr_enabled) {
1120 /* Jump from CWSR trap handler to user trap */
1121 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1125 qpd->tba_addr = tba_addr;
1126 qpd->tma_addr = tma_addr;
1132 static int process_termination_nocpsch(struct device_queue_manager *dqm,
1133 struct qcm_process_device *qpd)
1135 struct queue *q, *next;
1136 struct device_process_node *cur, *next_dpn;
1139 mutex_lock(&dqm->lock);
1141 /* Clear all user mode queues */
1142 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1145 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1150 /* Unregister process */
1151 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1152 if (qpd == cur->qpd) {
1153 list_del(&cur->list);
1155 dqm->processes_count--;
1160 mutex_unlock(&dqm->lock);
1165 static int process_termination_cpsch(struct device_queue_manager *dqm,
1166 struct qcm_process_device *qpd)
1169 struct queue *q, *next;
1170 struct kernel_queue *kq, *kq_next;
1171 struct mqd_manager *mqd;
1172 struct device_process_node *cur, *next_dpn;
1173 enum kfd_unmap_queues_filter filter =
1174 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1178 mutex_lock(&dqm->lock);
1180 /* Clean all kernel queues */
1181 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1182 list_del(&kq->list);
1184 qpd->is_debug = false;
1185 dqm->total_queue_count--;
1186 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1189 /* Clear all user mode queues */
1190 list_for_each_entry(q, &qpd->queues_list, list) {
1191 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1192 dqm->sdma_queue_count--;
1194 if (q->properties.is_active)
1197 dqm->total_queue_count--;
1200 /* Unregister process */
1201 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1202 if (qpd == cur->qpd) {
1203 list_del(&cur->list);
1205 dqm->processes_count--;
1210 retval = execute_queues_cpsch(dqm, filter, 0);
1211 if (retval || qpd->reset_wavefronts) {
1212 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1213 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1214 qpd->reset_wavefronts = false;
1217 /* lastly, free mqd resources */
1218 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1219 mqd = dqm->ops.get_mqd_manager(dqm,
1220 get_mqd_type_from_queue_type(q->properties.type));
1227 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
1231 mutex_unlock(&dqm->lock);
1235 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1237 struct device_queue_manager *dqm;
1239 pr_debug("Loading device queue manager\n");
1241 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1246 switch (sched_policy) {
1247 case KFD_SCHED_POLICY_HWS:
1248 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1249 /* initialize dqm for cp scheduling */
1250 dqm->ops.create_queue = create_queue_cpsch;
1251 dqm->ops.initialize = initialize_cpsch;
1252 dqm->ops.start = start_cpsch;
1253 dqm->ops.stop = stop_cpsch;
1254 dqm->ops.destroy_queue = destroy_queue_cpsch;
1255 dqm->ops.update_queue = update_queue;
1256 dqm->ops.get_mqd_manager = get_mqd_manager;
1257 dqm->ops.register_process = register_process;
1258 dqm->ops.unregister_process = unregister_process;
1259 dqm->ops.uninitialize = uninitialize;
1260 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1261 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1262 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1263 dqm->ops.set_trap_handler = set_trap_handler;
1264 dqm->ops.process_termination = process_termination_cpsch;
1266 case KFD_SCHED_POLICY_NO_HWS:
1267 /* initialize dqm for no cp scheduling */
1268 dqm->ops.start = start_nocpsch;
1269 dqm->ops.stop = stop_nocpsch;
1270 dqm->ops.create_queue = create_queue_nocpsch;
1271 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1272 dqm->ops.update_queue = update_queue;
1273 dqm->ops.get_mqd_manager = get_mqd_manager;
1274 dqm->ops.register_process = register_process;
1275 dqm->ops.unregister_process = unregister_process;
1276 dqm->ops.initialize = initialize_nocpsch;
1277 dqm->ops.uninitialize = uninitialize;
1278 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1279 dqm->ops.set_trap_handler = set_trap_handler;
1280 dqm->ops.process_termination = process_termination_nocpsch;
1283 pr_err("Invalid scheduling policy %d\n", sched_policy);
1287 switch (dev->device_info->asic_family) {
1289 device_queue_manager_init_vi(&dqm->asic_ops);
1293 device_queue_manager_init_cik(&dqm->asic_ops);
1296 WARN(1, "Unexpected ASIC family %u",
1297 dev->device_info->asic_family);
1301 if (!dqm->ops.initialize(dqm))
1309 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1311 dqm->ops.uninitialize(dqm);
1315 #if defined(CONFIG_DEBUG_FS)
1317 static void seq_reg_dump(struct seq_file *m,
1318 uint32_t (*dump)[2], uint32_t n_regs)
1322 for (i = 0, count = 0; i < n_regs; i++) {
1324 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1325 seq_printf(m, "%s %08x: %08x",
1327 dump[i][0], dump[i][1]);
1330 seq_printf(m, " %08x", dump[i][1]);
1338 int dqm_debugfs_hqds(struct seq_file *m, void *data)
1340 struct device_queue_manager *dqm = data;
1341 uint32_t (*dump)[2], n_regs;
1345 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1346 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1348 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
1349 if (!test_bit(pipe_offset + queue,
1350 dqm->dev->shared_resources.queue_bitmap))
1353 r = dqm->dev->kfd2kgd->hqd_dump(
1354 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1358 seq_printf(m, " CP Pipe %d, Queue %d\n",
1360 seq_reg_dump(m, dump, n_regs);
1366 for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) {
1367 for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) {
1368 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
1369 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1373 seq_printf(m, " SDMA Engine %d, RLC %d\n",
1375 seq_reg_dump(m, dump, n_regs);