2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include <linux/types.h>
27 #include <linux/printk.h>
28 #include <linux/bitops.h>
29 #include <linux/sched.h>
31 #include "kfd_device_queue_manager.h"
32 #include "kfd_mqd_manager.h"
34 #include "kfd_kernel_queue.h"
36 /* Size of the per-pipe EOP queue */
37 #define CIK_HPD_EOP_BYTES_LOG2 11
38 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
40 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
41 unsigned int pasid, unsigned int vmid);
43 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
45 struct qcm_process_device *qpd);
47 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
48 static int destroy_queues_cpsch(struct device_queue_manager *dqm,
49 bool preempt_static_queues, bool lock);
51 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
53 struct qcm_process_device *qpd);
55 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
56 unsigned int sdma_queue_id);
59 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
61 if (type == KFD_QUEUE_TYPE_SDMA)
62 return KFD_MQD_TYPE_SDMA;
63 return KFD_MQD_TYPE_CP;
66 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
69 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
70 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
72 /* queue is available for KFD usage if bit is 1 */
73 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
74 if (test_bit(pipe_offset + i,
75 dqm->dev->shared_resources.queue_bitmap))
80 unsigned int get_queues_num(struct device_queue_manager *dqm)
82 return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
86 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
88 return dqm->dev->shared_resources.num_queue_per_pipe;
91 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
93 return dqm->dev->shared_resources.num_pipe_per_mec;
96 void program_sh_mem_settings(struct device_queue_manager *dqm,
97 struct qcm_process_device *qpd)
99 return dqm->dev->kfd2kgd->program_sh_mem_settings(
100 dqm->dev->kgd, qpd->vmid,
102 qpd->sh_mem_ape1_base,
103 qpd->sh_mem_ape1_limit,
107 static int allocate_vmid(struct device_queue_manager *dqm,
108 struct qcm_process_device *qpd,
111 int bit, allocated_vmid;
113 if (dqm->vmid_bitmap == 0)
116 bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap,
117 dqm->dev->vm_info.vmid_num_kfd);
118 clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
120 allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
121 pr_debug("vmid allocation %d\n", allocated_vmid);
122 qpd->vmid = allocated_vmid;
123 q->properties.vmid = allocated_vmid;
125 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
126 program_sh_mem_settings(dqm, qpd);
131 static void deallocate_vmid(struct device_queue_manager *dqm,
132 struct qcm_process_device *qpd,
135 int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
137 /* Release the vmid mapping */
138 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
140 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
142 q->properties.vmid = 0;
145 static int create_queue_nocpsch(struct device_queue_manager *dqm,
147 struct qcm_process_device *qpd,
154 mutex_lock(&dqm->lock);
156 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
157 pr_warn("Can't create new usermode queue because %d queues were already created\n",
158 dqm->total_queue_count);
163 if (list_empty(&qpd->queues_list)) {
164 retval = allocate_vmid(dqm, qpd, q);
168 *allocated_vmid = qpd->vmid;
169 q->properties.vmid = qpd->vmid;
171 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
172 retval = create_compute_queue_nocpsch(dqm, q, qpd);
173 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
174 retval = create_sdma_queue_nocpsch(dqm, q, qpd);
179 if (list_empty(&qpd->queues_list)) {
180 deallocate_vmid(dqm, qpd, q);
186 list_add(&q->list, &qpd->queues_list);
187 if (q->properties.is_active)
190 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
191 dqm->sdma_queue_count++;
194 * Unconditionally increment this counter, regardless of the queue's
195 * type or whether the queue is active.
197 dqm->total_queue_count++;
198 pr_debug("Total of %d queues are accountable so far\n",
199 dqm->total_queue_count);
202 mutex_unlock(&dqm->lock);
206 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
213 for (pipe = dqm->next_pipe_to_allocate, i = 0;
214 i < get_pipes_per_mec(dqm);
215 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
217 if (!is_pipe_enabled(dqm, 0, pipe))
220 if (dqm->allocated_queues[pipe] != 0) {
221 bit = find_first_bit(
222 (unsigned long *)&dqm->allocated_queues[pipe],
223 get_queues_per_pipe(dqm));
226 (unsigned long *)&dqm->allocated_queues[pipe]);
237 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
238 /* horizontal hqd allocation */
239 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
244 static inline void deallocate_hqd(struct device_queue_manager *dqm,
247 set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
250 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
252 struct qcm_process_device *qpd)
255 struct mqd_manager *mqd;
257 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
261 retval = allocate_hqd(dqm, q);
265 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
266 &q->gart_mqd_addr, &q->properties);
268 goto out_deallocate_hqd;
270 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
273 dqm->dev->kfd2kgd->set_scratch_backing_va(
274 dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
276 retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
284 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
286 deallocate_hqd(dqm, q);
291 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
292 struct qcm_process_device *qpd,
296 struct mqd_manager *mqd;
300 mutex_lock(&dqm->lock);
302 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
303 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
308 deallocate_hqd(dqm, q);
309 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
310 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
315 dqm->sdma_queue_count--;
316 deallocate_sdma_queue(dqm, q->sdma_id);
318 pr_debug("q->properties.type %d is invalid\n",
324 retval = mqd->destroy_mqd(mqd, q->mqd,
325 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
326 KFD_UNMAP_LATENCY_MS,
332 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
335 if (list_empty(&qpd->queues_list))
336 deallocate_vmid(dqm, qpd, q);
337 if (q->properties.is_active)
341 * Unconditionally decrement this counter, regardless of the queue's
344 dqm->total_queue_count--;
345 pr_debug("Total of %d queues are accountable so far\n",
346 dqm->total_queue_count);
349 mutex_unlock(&dqm->lock);
353 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
356 struct mqd_manager *mqd;
357 bool prev_active = false;
359 mutex_lock(&dqm->lock);
360 mqd = dqm->ops.get_mqd_manager(dqm,
361 get_mqd_type_from_queue_type(q->properties.type));
367 if (q->properties.is_active)
372 * check active state vs. the previous state
373 * and modify counter accordingly
375 retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
376 if ((q->properties.is_active) && (!prev_active))
378 else if (!q->properties.is_active && prev_active)
381 if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
382 retval = execute_queues_cpsch(dqm, false);
385 mutex_unlock(&dqm->lock);
389 static struct mqd_manager *get_mqd_manager_nocpsch(
390 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
392 struct mqd_manager *mqd;
394 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
397 pr_debug("mqd type %d\n", type);
399 mqd = dqm->mqds[type];
401 mqd = mqd_manager_init(type, dqm->dev);
403 pr_err("mqd manager is NULL");
404 dqm->mqds[type] = mqd;
410 static int register_process_nocpsch(struct device_queue_manager *dqm,
411 struct qcm_process_device *qpd)
413 struct device_process_node *n;
416 n = kzalloc(sizeof(*n), GFP_KERNEL);
422 mutex_lock(&dqm->lock);
423 list_add(&n->list, &dqm->queues);
425 retval = dqm->ops_asic_specific.register_process(dqm, qpd);
427 dqm->processes_count++;
429 mutex_unlock(&dqm->lock);
434 static int unregister_process_nocpsch(struct device_queue_manager *dqm,
435 struct qcm_process_device *qpd)
438 struct device_process_node *cur, *next;
440 pr_debug("qpd->queues_list is %s\n",
441 list_empty(&qpd->queues_list) ? "empty" : "not empty");
444 mutex_lock(&dqm->lock);
446 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
447 if (qpd == cur->qpd) {
448 list_del(&cur->list);
450 dqm->processes_count--;
454 /* qpd not found in dqm list */
457 mutex_unlock(&dqm->lock);
462 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
465 uint32_t pasid_mapping;
467 pasid_mapping = (pasid == 0) ? 0 :
469 ATC_VMID_PASID_MAPPING_VALID;
471 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
472 dqm->dev->kgd, pasid_mapping,
476 static void init_interrupts(struct device_queue_manager *dqm)
480 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
481 if (is_pipe_enabled(dqm, 0, i))
482 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
485 static int initialize_nocpsch(struct device_queue_manager *dqm)
489 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
491 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
492 sizeof(unsigned int), GFP_KERNEL);
493 if (!dqm->allocated_queues)
496 mutex_init(&dqm->lock);
497 INIT_LIST_HEAD(&dqm->queues);
498 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
499 dqm->sdma_queue_count = 0;
501 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
502 int pipe_offset = pipe * get_queues_per_pipe(dqm);
504 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
505 if (test_bit(pipe_offset + queue,
506 dqm->dev->shared_resources.queue_bitmap))
507 dqm->allocated_queues[pipe] |= 1 << queue;
510 dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
511 dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
516 static void uninitialize_nocpsch(struct device_queue_manager *dqm)
520 WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
522 kfree(dqm->allocated_queues);
523 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
525 mutex_destroy(&dqm->lock);
526 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
529 static int start_nocpsch(struct device_queue_manager *dqm)
531 init_interrupts(dqm);
535 static int stop_nocpsch(struct device_queue_manager *dqm)
540 static int allocate_sdma_queue(struct device_queue_manager *dqm,
541 unsigned int *sdma_queue_id)
545 if (dqm->sdma_bitmap == 0)
548 bit = find_first_bit((unsigned long *)&dqm->sdma_bitmap,
551 clear_bit(bit, (unsigned long *)&dqm->sdma_bitmap);
552 *sdma_queue_id = bit;
557 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
558 unsigned int sdma_queue_id)
560 if (sdma_queue_id >= CIK_SDMA_QUEUES)
562 set_bit(sdma_queue_id, (unsigned long *)&dqm->sdma_bitmap);
565 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
567 struct qcm_process_device *qpd)
569 struct mqd_manager *mqd;
572 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
576 retval = allocate_sdma_queue(dqm, &q->sdma_id);
580 q->properties.sdma_queue_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
581 q->properties.sdma_engine_id = q->sdma_id / CIK_SDMA_ENGINE_NUM;
583 pr_debug("SDMA id is: %d\n", q->sdma_id);
584 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
585 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
587 dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
588 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
589 &q->gart_mqd_addr, &q->properties);
591 goto out_deallocate_sdma_queue;
593 retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
600 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
601 out_deallocate_sdma_queue:
602 deallocate_sdma_queue(dqm, q->sdma_id);
608 * Device Queue Manager implementation for cp scheduler
611 static int set_sched_resources(struct device_queue_manager *dqm)
614 struct scheduling_resources res;
616 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
619 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
620 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
621 / dqm->dev->shared_resources.num_pipe_per_mec;
623 if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
626 /* only acquire queues from the first MEC */
630 /* This situation may be hit in the future if a new HW
631 * generation exposes more than 64 queues. If so, the
632 * definition of res.queue_mask needs updating
634 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
635 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
639 res.queue_mask |= (1ull << i);
641 res.gws_mask = res.oac_mask = res.gds_heap_base =
642 res.gds_heap_size = 0;
644 pr_debug("Scheduling resources:\n"
646 "queue mask: 0x%8llX\n",
647 res.vmid_mask, res.queue_mask);
649 return pm_send_set_resources(&dqm->packets, &res);
652 static int initialize_cpsch(struct device_queue_manager *dqm)
656 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
658 mutex_init(&dqm->lock);
659 INIT_LIST_HEAD(&dqm->queues);
660 dqm->queue_count = dqm->processes_count = 0;
661 dqm->sdma_queue_count = 0;
662 dqm->active_runlist = false;
663 retval = dqm->ops_asic_specific.initialize(dqm);
665 mutex_destroy(&dqm->lock);
670 static int start_cpsch(struct device_queue_manager *dqm)
676 retval = pm_init(&dqm->packets, dqm);
678 goto fail_packet_manager_init;
680 retval = set_sched_resources(dqm);
682 goto fail_set_sched_resources;
684 pr_debug("Allocating fence memory\n");
686 /* allocate fence memory on the gart */
687 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
691 goto fail_allocate_vidmem;
693 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
694 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
696 init_interrupts(dqm);
698 execute_queues_cpsch(dqm, true);
701 fail_allocate_vidmem:
702 fail_set_sched_resources:
703 pm_uninit(&dqm->packets);
704 fail_packet_manager_init:
708 static int stop_cpsch(struct device_queue_manager *dqm)
710 destroy_queues_cpsch(dqm, true, true);
712 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
713 pm_uninit(&dqm->packets);
718 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
719 struct kernel_queue *kq,
720 struct qcm_process_device *qpd)
722 mutex_lock(&dqm->lock);
723 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
724 pr_warn("Can't create new kernel queue because %d queues were already created\n",
725 dqm->total_queue_count);
726 mutex_unlock(&dqm->lock);
731 * Unconditionally increment this counter, regardless of the queue's
732 * type or whether the queue is active.
734 dqm->total_queue_count++;
735 pr_debug("Total of %d queues are accountable so far\n",
736 dqm->total_queue_count);
738 list_add(&kq->list, &qpd->priv_queue_list);
740 qpd->is_debug = true;
741 execute_queues_cpsch(dqm, false);
742 mutex_unlock(&dqm->lock);
747 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
748 struct kernel_queue *kq,
749 struct qcm_process_device *qpd)
751 mutex_lock(&dqm->lock);
752 /* here we actually preempt the DIQ */
753 destroy_queues_cpsch(dqm, true, false);
756 qpd->is_debug = false;
757 execute_queues_cpsch(dqm, false);
759 * Unconditionally decrement this counter, regardless of the queue's
762 dqm->total_queue_count--;
763 pr_debug("Total of %d queues are accountable so far\n",
764 dqm->total_queue_count);
765 mutex_unlock(&dqm->lock);
768 static void select_sdma_engine_id(struct queue *q)
772 q->sdma_id = sdma_id;
773 sdma_id = (sdma_id + 1) % 2;
776 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
777 struct qcm_process_device *qpd, int *allocate_vmid)
780 struct mqd_manager *mqd;
787 mutex_lock(&dqm->lock);
789 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
790 pr_warn("Can't create new usermode queue because %d queues were already created\n",
791 dqm->total_queue_count);
796 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
797 select_sdma_engine_id(q);
799 mqd = dqm->ops.get_mqd_manager(dqm,
800 get_mqd_type_from_queue_type(q->properties.type));
807 dqm->ops_asic_specific.init_sdma_vm(dqm, q, qpd);
808 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
809 &q->gart_mqd_addr, &q->properties);
813 list_add(&q->list, &qpd->queues_list);
814 if (q->properties.is_active) {
816 retval = execute_queues_cpsch(dqm, false);
819 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
820 dqm->sdma_queue_count++;
822 * Unconditionally increment this counter, regardless of the queue's
823 * type or whether the queue is active.
825 dqm->total_queue_count++;
827 pr_debug("Total of %d queues are accountable so far\n",
828 dqm->total_queue_count);
831 mutex_unlock(&dqm->lock);
835 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
836 unsigned int fence_value,
837 unsigned int timeout_ms)
839 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
841 while (*fence_addr != fence_value) {
842 if (time_after(jiffies, end_jiffies)) {
843 pr_err("qcm fence wait loop timeout expired\n");
852 static int destroy_sdma_queues(struct device_queue_manager *dqm,
853 unsigned int sdma_engine)
855 return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
856 KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES, 0, false,
860 static int destroy_queues_cpsch(struct device_queue_manager *dqm,
861 bool preempt_static_queues, bool lock)
864 enum kfd_preempt_type_filter preempt_type;
865 struct kfd_process_device *pdd;
870 mutex_lock(&dqm->lock);
871 if (!dqm->active_runlist)
874 pr_debug("Before destroying queues, sdma queue count is : %u\n",
875 dqm->sdma_queue_count);
877 if (dqm->sdma_queue_count > 0) {
878 destroy_sdma_queues(dqm, 0);
879 destroy_sdma_queues(dqm, 1);
882 preempt_type = preempt_static_queues ?
883 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES :
884 KFD_PREEMPT_TYPE_FILTER_DYNAMIC_QUEUES;
886 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
887 preempt_type, 0, false, 0);
891 *dqm->fence_addr = KFD_FENCE_INIT;
892 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
893 KFD_FENCE_COMPLETED);
894 /* should be timed out */
895 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
896 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
898 pdd = kfd_get_process_device_data(dqm->dev,
899 kfd_get_process(current));
900 pdd->reset_wavefronts = true;
903 pm_release_ib(&dqm->packets);
904 dqm->active_runlist = false;
908 mutex_unlock(&dqm->lock);
912 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
917 mutex_lock(&dqm->lock);
919 retval = destroy_queues_cpsch(dqm, false, false);
921 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption");
925 if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
930 if (dqm->active_runlist) {
935 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
937 pr_err("failed to execute runlist");
940 dqm->active_runlist = true;
944 mutex_unlock(&dqm->lock);
948 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
949 struct qcm_process_device *qpd,
953 struct mqd_manager *mqd;
954 bool preempt_all_queues;
956 preempt_all_queues = false;
960 /* remove queue from list to prevent rescheduling after preemption */
961 mutex_lock(&dqm->lock);
965 * error, currently we do not allow to destroy a queue
966 * of a currently debugged process
969 goto failed_try_destroy_debugged_queue;
973 mqd = dqm->ops.get_mqd_manager(dqm,
974 get_mqd_type_from_queue_type(q->properties.type));
980 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
981 dqm->sdma_queue_count--;
984 if (q->properties.is_active)
987 execute_queues_cpsch(dqm, false);
989 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
992 * Unconditionally decrement this counter, regardless of the queue's
995 dqm->total_queue_count--;
996 pr_debug("Total of %d queues are accountable so far\n",
997 dqm->total_queue_count);
999 mutex_unlock(&dqm->lock);
1004 failed_try_destroy_debugged_queue:
1006 mutex_unlock(&dqm->lock);
1011 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1012 * stay in user mode.
1014 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1015 /* APE1 limit is inclusive and 64K aligned. */
1016 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1018 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1019 struct qcm_process_device *qpd,
1020 enum cache_policy default_policy,
1021 enum cache_policy alternate_policy,
1022 void __user *alternate_aperture_base,
1023 uint64_t alternate_aperture_size)
1027 mutex_lock(&dqm->lock);
1029 if (alternate_aperture_size == 0) {
1030 /* base > limit disables APE1 */
1031 qpd->sh_mem_ape1_base = 1;
1032 qpd->sh_mem_ape1_limit = 0;
1035 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1036 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1037 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1038 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1039 * Verify that the base and size parameters can be
1040 * represented in this format and convert them.
1041 * Additionally restrict APE1 to user-mode addresses.
1044 uint64_t base = (uintptr_t)alternate_aperture_base;
1045 uint64_t limit = base + alternate_aperture_size - 1;
1047 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1048 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1053 qpd->sh_mem_ape1_base = base >> 16;
1054 qpd->sh_mem_ape1_limit = limit >> 16;
1057 retval = dqm->ops_asic_specific.set_cache_memory_policy(
1062 alternate_aperture_base,
1063 alternate_aperture_size);
1065 if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1066 program_sh_mem_settings(dqm, qpd);
1068 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1069 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1070 qpd->sh_mem_ape1_limit);
1073 mutex_unlock(&dqm->lock);
1077 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1079 struct device_queue_manager *dqm;
1081 pr_debug("Loading device queue manager\n");
1083 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1088 switch (sched_policy) {
1089 case KFD_SCHED_POLICY_HWS:
1090 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1091 /* initialize dqm for cp scheduling */
1092 dqm->ops.create_queue = create_queue_cpsch;
1093 dqm->ops.initialize = initialize_cpsch;
1094 dqm->ops.start = start_cpsch;
1095 dqm->ops.stop = stop_cpsch;
1096 dqm->ops.destroy_queue = destroy_queue_cpsch;
1097 dqm->ops.update_queue = update_queue;
1098 dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
1099 dqm->ops.register_process = register_process_nocpsch;
1100 dqm->ops.unregister_process = unregister_process_nocpsch;
1101 dqm->ops.uninitialize = uninitialize_nocpsch;
1102 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1103 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1104 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1106 case KFD_SCHED_POLICY_NO_HWS:
1107 /* initialize dqm for no cp scheduling */
1108 dqm->ops.start = start_nocpsch;
1109 dqm->ops.stop = stop_nocpsch;
1110 dqm->ops.create_queue = create_queue_nocpsch;
1111 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1112 dqm->ops.update_queue = update_queue;
1113 dqm->ops.get_mqd_manager = get_mqd_manager_nocpsch;
1114 dqm->ops.register_process = register_process_nocpsch;
1115 dqm->ops.unregister_process = unregister_process_nocpsch;
1116 dqm->ops.initialize = initialize_nocpsch;
1117 dqm->ops.uninitialize = uninitialize_nocpsch;
1118 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1121 pr_err("Invalid scheduling policy %d\n", sched_policy);
1125 switch (dev->device_info->asic_family) {
1127 device_queue_manager_init_vi(&dqm->ops_asic_specific);
1131 device_queue_manager_init_cik(&dqm->ops_asic_specific);
1134 WARN(1, "Unexpected ASIC family %u",
1135 dev->device_info->asic_family);
1139 if (!dqm->ops.initialize(dqm))
1147 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1149 dqm->ops.uninitialize(dqm);