2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/ratelimit.h>
25 #include <linux/printk.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/types.h>
29 #include <linux/bitops.h>
30 #include <linux/sched.h>
32 #include "kfd_device_queue_manager.h"
33 #include "kfd_mqd_manager.h"
35 #include "kfd_kernel_queue.h"
37 /* Size of the per-pipe EOP queue */
38 #define CIK_HPD_EOP_BYTES_LOG2 11
39 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
41 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
42 unsigned int pasid, unsigned int vmid);
44 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
46 struct qcm_process_device *qpd);
48 static int execute_queues_cpsch(struct device_queue_manager *dqm,
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
51 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
52 enum kfd_unmap_queues_filter filter,
53 uint32_t filter_param);
55 static int map_queues_cpsch(struct device_queue_manager *dqm);
57 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
59 struct qcm_process_device *qpd);
61 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
62 unsigned int sdma_queue_id);
65 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
67 if (type == KFD_QUEUE_TYPE_SDMA)
68 return KFD_MQD_TYPE_SDMA;
69 return KFD_MQD_TYPE_CP;
72 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
75 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
76 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
78 /* queue is available for KFD usage if bit is 1 */
79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
80 if (test_bit(pipe_offset + i,
81 dqm->dev->shared_resources.queue_bitmap))
86 unsigned int get_queues_num(struct device_queue_manager *dqm)
88 return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
92 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
94 return dqm->dev->shared_resources.num_queue_per_pipe;
97 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
99 return dqm->dev->shared_resources.num_pipe_per_mec;
102 void program_sh_mem_settings(struct device_queue_manager *dqm,
103 struct qcm_process_device *qpd)
105 return dqm->dev->kfd2kgd->program_sh_mem_settings(
106 dqm->dev->kgd, qpd->vmid,
108 qpd->sh_mem_ape1_base,
109 qpd->sh_mem_ape1_limit,
113 static int allocate_vmid(struct device_queue_manager *dqm,
114 struct qcm_process_device *qpd,
117 int bit, allocated_vmid;
119 if (dqm->vmid_bitmap == 0)
122 bit = ffs(dqm->vmid_bitmap) - 1;
123 dqm->vmid_bitmap &= ~(1 << bit);
125 allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
126 pr_debug("vmid allocation %d\n", allocated_vmid);
127 qpd->vmid = allocated_vmid;
128 q->properties.vmid = allocated_vmid;
130 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
131 program_sh_mem_settings(dqm, qpd);
133 /* qpd->page_table_base is set earlier when register_process()
134 * is called, i.e. when the first queue is created.
136 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
138 qpd->page_table_base);
139 /* invalidate the VM context after pasid and vmid mapping is set up */
140 kfd_flush_tlb(qpd_to_pdd(qpd));
145 static void deallocate_vmid(struct device_queue_manager *dqm,
146 struct qcm_process_device *qpd,
149 int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
151 kfd_flush_tlb(qpd_to_pdd(qpd));
153 /* Release the vmid mapping */
154 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
156 dqm->vmid_bitmap |= (1 << bit);
158 q->properties.vmid = 0;
161 static int create_queue_nocpsch(struct device_queue_manager *dqm,
163 struct qcm_process_device *qpd)
169 mutex_lock(&dqm->lock);
171 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
172 pr_warn("Can't create new usermode queue because %d queues were already created\n",
173 dqm->total_queue_count);
178 if (list_empty(&qpd->queues_list)) {
179 retval = allocate_vmid(dqm, qpd, q);
183 q->properties.vmid = qpd->vmid;
185 * Eviction state logic: we only mark active queues as evicted
186 * to avoid the overhead of restoring inactive queues later
189 q->properties.is_evicted = (q->properties.queue_size > 0 &&
190 q->properties.queue_percent > 0 &&
191 q->properties.queue_address != 0);
193 q->properties.tba_addr = qpd->tba_addr;
194 q->properties.tma_addr = qpd->tma_addr;
196 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
197 retval = create_compute_queue_nocpsch(dqm, q, qpd);
198 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
199 retval = create_sdma_queue_nocpsch(dqm, q, qpd);
204 if (list_empty(&qpd->queues_list))
205 deallocate_vmid(dqm, qpd, q);
209 list_add(&q->list, &qpd->queues_list);
211 if (q->properties.is_active)
214 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
215 dqm->sdma_queue_count++;
218 * Unconditionally increment this counter, regardless of the queue's
219 * type or whether the queue is active.
221 dqm->total_queue_count++;
222 pr_debug("Total of %d queues are accountable so far\n",
223 dqm->total_queue_count);
226 mutex_unlock(&dqm->lock);
230 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
237 for (pipe = dqm->next_pipe_to_allocate, i = 0;
238 i < get_pipes_per_mec(dqm);
239 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
241 if (!is_pipe_enabled(dqm, 0, pipe))
244 if (dqm->allocated_queues[pipe] != 0) {
245 bit = ffs(dqm->allocated_queues[pipe]) - 1;
246 dqm->allocated_queues[pipe] &= ~(1 << bit);
257 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
258 /* horizontal hqd allocation */
259 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
264 static inline void deallocate_hqd(struct device_queue_manager *dqm,
267 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
270 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
272 struct qcm_process_device *qpd)
275 struct mqd_manager *mqd;
277 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
281 retval = allocate_hqd(dqm, q);
285 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
286 &q->gart_mqd_addr, &q->properties);
288 goto out_deallocate_hqd;
290 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
293 dqm->dev->kfd2kgd->set_scratch_backing_va(
294 dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
296 if (!q->properties.is_active)
299 retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
307 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
309 deallocate_hqd(dqm, q);
314 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
315 * to avoid asynchronized access
317 static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
318 struct qcm_process_device *qpd,
322 struct mqd_manager *mqd;
324 mqd = dqm->ops.get_mqd_manager(dqm,
325 get_mqd_type_from_queue_type(q->properties.type));
329 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
330 deallocate_hqd(dqm, q);
331 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
332 dqm->sdma_queue_count--;
333 deallocate_sdma_queue(dqm, q->sdma_id);
335 pr_debug("q->properties.type %d is invalid\n",
339 dqm->total_queue_count--;
341 retval = mqd->destroy_mqd(mqd, q->mqd,
342 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
343 KFD_UNMAP_LATENCY_MS,
345 if (retval == -ETIME)
346 qpd->reset_wavefronts = true;
348 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
351 if (list_empty(&qpd->queues_list)) {
352 if (qpd->reset_wavefronts) {
353 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
355 /* dbgdev_wave_reset_wavefronts has to be called before
356 * deallocate_vmid(), i.e. when vmid is still in use.
358 dbgdev_wave_reset_wavefronts(dqm->dev,
360 qpd->reset_wavefronts = false;
363 deallocate_vmid(dqm, qpd, q);
366 if (q->properties.is_active)
372 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
373 struct qcm_process_device *qpd,
378 mutex_lock(&dqm->lock);
379 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
380 mutex_unlock(&dqm->lock);
385 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
388 struct mqd_manager *mqd;
389 struct kfd_process_device *pdd;
390 bool prev_active = false;
392 mutex_lock(&dqm->lock);
393 pdd = kfd_get_process_device_data(q->device, q->process);
398 mqd = dqm->ops.get_mqd_manager(dqm,
399 get_mqd_type_from_queue_type(q->properties.type));
405 * Eviction state logic: we only mark active queues as evicted
406 * to avoid the overhead of restoring inactive queues later
408 if (pdd->qpd.evicted)
409 q->properties.is_evicted = (q->properties.queue_size > 0 &&
410 q->properties.queue_percent > 0 &&
411 q->properties.queue_address != 0);
413 /* Save previous activity state for counters */
414 prev_active = q->properties.is_active;
416 /* Make sure the queue is unmapped before updating the MQD */
417 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
418 retval = unmap_queues_cpsch(dqm,
419 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
421 pr_err("unmap queue failed\n");
424 } else if (prev_active &&
425 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
426 q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
427 retval = mqd->destroy_mqd(mqd, q->mqd,
428 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
429 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
431 pr_err("destroy mqd failed\n");
436 retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
439 * check active state vs. the previous state and modify
440 * counter accordingly. map_queues_cpsch uses the
441 * dqm->queue_count to determine whether a new runlist must be
444 if (q->properties.is_active && !prev_active)
446 else if (!q->properties.is_active && prev_active)
449 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
450 retval = map_queues_cpsch(dqm);
451 else if (q->properties.is_active &&
452 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
453 q->properties.type == KFD_QUEUE_TYPE_SDMA))
454 retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
455 &q->properties, q->process->mm);
458 mutex_unlock(&dqm->lock);
462 static struct mqd_manager *get_mqd_manager(
463 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
465 struct mqd_manager *mqd;
467 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
470 pr_debug("mqd type %d\n", type);
472 mqd = dqm->mqds[type];
474 mqd = mqd_manager_init(type, dqm->dev);
476 pr_err("mqd manager is NULL");
477 dqm->mqds[type] = mqd;
483 static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
484 struct qcm_process_device *qpd)
487 struct mqd_manager *mqd;
488 struct kfd_process_device *pdd;
491 mutex_lock(&dqm->lock);
492 if (qpd->evicted++ > 0) /* already evicted, do nothing */
495 pdd = qpd_to_pdd(qpd);
496 pr_info_ratelimited("Evicting PASID %u queues\n",
497 pdd->process->pasid);
499 /* unactivate all active queues on the qpd */
500 list_for_each_entry(q, &qpd->queues_list, list) {
501 if (!q->properties.is_active)
503 mqd = dqm->ops.get_mqd_manager(dqm,
504 get_mqd_type_from_queue_type(q->properties.type));
505 if (!mqd) { /* should not be here */
506 pr_err("Cannot evict queue, mqd mgr is NULL\n");
510 q->properties.is_evicted = true;
511 q->properties.is_active = false;
512 retval = mqd->destroy_mqd(mqd, q->mqd,
513 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
514 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
521 mutex_unlock(&dqm->lock);
525 static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
526 struct qcm_process_device *qpd)
529 struct kfd_process_device *pdd;
532 mutex_lock(&dqm->lock);
533 if (qpd->evicted++ > 0) /* already evicted, do nothing */
536 pdd = qpd_to_pdd(qpd);
537 pr_info_ratelimited("Evicting PASID %u queues\n",
538 pdd->process->pasid);
540 /* unactivate all active queues on the qpd */
541 list_for_each_entry(q, &qpd->queues_list, list) {
542 if (!q->properties.is_active)
544 q->properties.is_evicted = true;
545 q->properties.is_active = false;
548 retval = execute_queues_cpsch(dqm,
550 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
551 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
554 mutex_unlock(&dqm->lock);
558 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
559 struct qcm_process_device *qpd)
562 struct mqd_manager *mqd;
563 struct kfd_process_device *pdd;
567 pdd = qpd_to_pdd(qpd);
568 /* Retrieve PD base */
569 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
571 mutex_lock(&dqm->lock);
572 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
574 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
579 pr_info_ratelimited("Restoring PASID %u queues\n",
580 pdd->process->pasid);
582 /* Update PD Base in QPD */
583 qpd->page_table_base = pd_base;
584 pr_debug("Updated PD address to 0x%08x\n", pd_base);
586 if (!list_empty(&qpd->queues_list)) {
587 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
590 qpd->page_table_base);
594 /* activate all active queues on the qpd */
595 list_for_each_entry(q, &qpd->queues_list, list) {
596 if (!q->properties.is_evicted)
598 mqd = dqm->ops.get_mqd_manager(dqm,
599 get_mqd_type_from_queue_type(q->properties.type));
600 if (!mqd) { /* should not be here */
601 pr_err("Cannot restore queue, mqd mgr is NULL\n");
605 q->properties.is_evicted = false;
606 q->properties.is_active = true;
607 retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
608 q->queue, &q->properties,
616 mutex_unlock(&dqm->lock);
620 static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
621 struct qcm_process_device *qpd)
624 struct kfd_process_device *pdd;
628 pdd = qpd_to_pdd(qpd);
629 /* Retrieve PD base */
630 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
632 mutex_lock(&dqm->lock);
633 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
635 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
640 pr_info_ratelimited("Restoring PASID %u queues\n",
641 pdd->process->pasid);
643 /* Update PD Base in QPD */
644 qpd->page_table_base = pd_base;
645 pr_debug("Updated PD address to 0x%08x\n", pd_base);
647 /* activate all active queues on the qpd */
648 list_for_each_entry(q, &qpd->queues_list, list) {
649 if (!q->properties.is_evicted)
651 q->properties.is_evicted = false;
652 q->properties.is_active = true;
655 retval = execute_queues_cpsch(dqm,
656 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
660 mutex_unlock(&dqm->lock);
664 static int register_process(struct device_queue_manager *dqm,
665 struct qcm_process_device *qpd)
667 struct device_process_node *n;
668 struct kfd_process_device *pdd;
672 n = kzalloc(sizeof(*n), GFP_KERNEL);
678 pdd = qpd_to_pdd(qpd);
679 /* Retrieve PD base */
680 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
682 mutex_lock(&dqm->lock);
683 list_add(&n->list, &dqm->queues);
685 /* Update PD Base in QPD */
686 qpd->page_table_base = pd_base;
688 retval = dqm->asic_ops.update_qpd(dqm, qpd);
690 dqm->processes_count++;
692 mutex_unlock(&dqm->lock);
697 static int unregister_process(struct device_queue_manager *dqm,
698 struct qcm_process_device *qpd)
701 struct device_process_node *cur, *next;
703 pr_debug("qpd->queues_list is %s\n",
704 list_empty(&qpd->queues_list) ? "empty" : "not empty");
707 mutex_lock(&dqm->lock);
709 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
710 if (qpd == cur->qpd) {
711 list_del(&cur->list);
713 dqm->processes_count--;
717 /* qpd not found in dqm list */
720 mutex_unlock(&dqm->lock);
725 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
728 uint32_t pasid_mapping;
730 pasid_mapping = (pasid == 0) ? 0 :
732 ATC_VMID_PASID_MAPPING_VALID;
734 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
735 dqm->dev->kgd, pasid_mapping,
739 static void init_interrupts(struct device_queue_manager *dqm)
743 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
744 if (is_pipe_enabled(dqm, 0, i))
745 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
748 static int initialize_nocpsch(struct device_queue_manager *dqm)
752 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
754 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
755 sizeof(unsigned int), GFP_KERNEL);
756 if (!dqm->allocated_queues)
759 mutex_init(&dqm->lock);
760 INIT_LIST_HEAD(&dqm->queues);
761 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
762 dqm->sdma_queue_count = 0;
764 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
765 int pipe_offset = pipe * get_queues_per_pipe(dqm);
767 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
768 if (test_bit(pipe_offset + queue,
769 dqm->dev->shared_resources.queue_bitmap))
770 dqm->allocated_queues[pipe] |= 1 << queue;
773 dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
774 dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
779 static void uninitialize(struct device_queue_manager *dqm)
783 WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
785 kfree(dqm->allocated_queues);
786 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
788 mutex_destroy(&dqm->lock);
789 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
792 static int start_nocpsch(struct device_queue_manager *dqm)
794 init_interrupts(dqm);
798 static int stop_nocpsch(struct device_queue_manager *dqm)
803 static int allocate_sdma_queue(struct device_queue_manager *dqm,
804 unsigned int *sdma_queue_id)
808 if (dqm->sdma_bitmap == 0)
811 bit = ffs(dqm->sdma_bitmap) - 1;
812 dqm->sdma_bitmap &= ~(1 << bit);
813 *sdma_queue_id = bit;
818 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
819 unsigned int sdma_queue_id)
821 if (sdma_queue_id >= CIK_SDMA_QUEUES)
823 dqm->sdma_bitmap |= (1 << sdma_queue_id);
826 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
828 struct qcm_process_device *qpd)
830 struct mqd_manager *mqd;
833 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
837 retval = allocate_sdma_queue(dqm, &q->sdma_id);
841 q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
842 q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
844 pr_debug("SDMA id is: %d\n", q->sdma_id);
845 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
846 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
848 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
849 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
850 &q->gart_mqd_addr, &q->properties);
852 goto out_deallocate_sdma_queue;
854 retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
861 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
862 out_deallocate_sdma_queue:
863 deallocate_sdma_queue(dqm, q->sdma_id);
869 * Device Queue Manager implementation for cp scheduler
872 static int set_sched_resources(struct device_queue_manager *dqm)
875 struct scheduling_resources res;
877 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
880 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
881 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
882 / dqm->dev->shared_resources.num_pipe_per_mec;
884 if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
887 /* only acquire queues from the first MEC */
891 /* This situation may be hit in the future if a new HW
892 * generation exposes more than 64 queues. If so, the
893 * definition of res.queue_mask needs updating
895 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
896 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
900 res.queue_mask |= (1ull << i);
902 res.gws_mask = res.oac_mask = res.gds_heap_base =
903 res.gds_heap_size = 0;
905 pr_debug("Scheduling resources:\n"
907 "queue mask: 0x%8llX\n",
908 res.vmid_mask, res.queue_mask);
910 return pm_send_set_resources(&dqm->packets, &res);
913 static int initialize_cpsch(struct device_queue_manager *dqm)
915 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
917 mutex_init(&dqm->lock);
918 INIT_LIST_HEAD(&dqm->queues);
919 dqm->queue_count = dqm->processes_count = 0;
920 dqm->sdma_queue_count = 0;
921 dqm->active_runlist = false;
922 dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
927 static int start_cpsch(struct device_queue_manager *dqm)
933 retval = pm_init(&dqm->packets, dqm);
935 goto fail_packet_manager_init;
937 retval = set_sched_resources(dqm);
939 goto fail_set_sched_resources;
941 pr_debug("Allocating fence memory\n");
943 /* allocate fence memory on the gart */
944 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
948 goto fail_allocate_vidmem;
950 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
951 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
953 init_interrupts(dqm);
955 mutex_lock(&dqm->lock);
956 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
957 mutex_unlock(&dqm->lock);
960 fail_allocate_vidmem:
961 fail_set_sched_resources:
962 pm_uninit(&dqm->packets);
963 fail_packet_manager_init:
967 static int stop_cpsch(struct device_queue_manager *dqm)
969 mutex_lock(&dqm->lock);
970 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
971 mutex_unlock(&dqm->lock);
973 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
974 pm_uninit(&dqm->packets);
979 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
980 struct kernel_queue *kq,
981 struct qcm_process_device *qpd)
983 mutex_lock(&dqm->lock);
984 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
985 pr_warn("Can't create new kernel queue because %d queues were already created\n",
986 dqm->total_queue_count);
987 mutex_unlock(&dqm->lock);
992 * Unconditionally increment this counter, regardless of the queue's
993 * type or whether the queue is active.
995 dqm->total_queue_count++;
996 pr_debug("Total of %d queues are accountable so far\n",
997 dqm->total_queue_count);
999 list_add(&kq->list, &qpd->priv_queue_list);
1001 qpd->is_debug = true;
1002 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1003 mutex_unlock(&dqm->lock);
1008 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1009 struct kernel_queue *kq,
1010 struct qcm_process_device *qpd)
1012 mutex_lock(&dqm->lock);
1013 list_del(&kq->list);
1015 qpd->is_debug = false;
1016 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1018 * Unconditionally decrement this counter, regardless of the queue's
1021 dqm->total_queue_count--;
1022 pr_debug("Total of %d queues are accountable so far\n",
1023 dqm->total_queue_count);
1024 mutex_unlock(&dqm->lock);
1027 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1028 struct qcm_process_device *qpd)
1031 struct mqd_manager *mqd;
1035 mutex_lock(&dqm->lock);
1037 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1038 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1039 dqm->total_queue_count);
1044 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1045 retval = allocate_sdma_queue(dqm, &q->sdma_id);
1048 q->properties.sdma_queue_id =
1049 q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
1050 q->properties.sdma_engine_id =
1051 q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
1053 mqd = dqm->ops.get_mqd_manager(dqm,
1054 get_mqd_type_from_queue_type(q->properties.type));
1061 * Eviction state logic: we only mark active queues as evicted
1062 * to avoid the overhead of restoring inactive queues later
1065 q->properties.is_evicted = (q->properties.queue_size > 0 &&
1066 q->properties.queue_percent > 0 &&
1067 q->properties.queue_address != 0);
1069 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1071 q->properties.tba_addr = qpd->tba_addr;
1072 q->properties.tma_addr = qpd->tma_addr;
1073 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
1074 &q->gart_mqd_addr, &q->properties);
1078 list_add(&q->list, &qpd->queues_list);
1080 if (q->properties.is_active) {
1082 retval = execute_queues_cpsch(dqm,
1083 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1086 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1087 dqm->sdma_queue_count++;
1089 * Unconditionally increment this counter, regardless of the queue's
1090 * type or whether the queue is active.
1092 dqm->total_queue_count++;
1094 pr_debug("Total of %d queues are accountable so far\n",
1095 dqm->total_queue_count);
1098 mutex_unlock(&dqm->lock);
1102 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1103 unsigned int fence_value,
1104 unsigned int timeout_ms)
1106 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1108 while (*fence_addr != fence_value) {
1109 if (time_after(jiffies, end_jiffies)) {
1110 pr_err("qcm fence wait loop timeout expired\n");
1119 static int unmap_sdma_queues(struct device_queue_manager *dqm,
1120 unsigned int sdma_engine)
1122 return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
1123 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
1127 /* dqm->lock mutex has to be locked before calling this function */
1128 static int map_queues_cpsch(struct device_queue_manager *dqm)
1132 if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
1135 if (dqm->active_runlist)
1138 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1140 pr_err("failed to execute runlist\n");
1143 dqm->active_runlist = true;
1148 /* dqm->lock mutex has to be locked before calling this function */
1149 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1150 enum kfd_unmap_queues_filter filter,
1151 uint32_t filter_param)
1155 if (!dqm->active_runlist)
1158 pr_debug("Before destroying queues, sdma queue count is : %u\n",
1159 dqm->sdma_queue_count);
1161 if (dqm->sdma_queue_count > 0) {
1162 unmap_sdma_queues(dqm, 0);
1163 unmap_sdma_queues(dqm, 1);
1166 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1167 filter, filter_param, false, 0);
1171 *dqm->fence_addr = KFD_FENCE_INIT;
1172 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1173 KFD_FENCE_COMPLETED);
1174 /* should be timed out */
1175 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1176 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
1180 pm_release_ib(&dqm->packets);
1181 dqm->active_runlist = false;
1186 /* dqm->lock mutex has to be locked before calling this function */
1187 static int execute_queues_cpsch(struct device_queue_manager *dqm,
1188 enum kfd_unmap_queues_filter filter,
1189 uint32_t filter_param)
1193 retval = unmap_queues_cpsch(dqm, filter, filter_param);
1195 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1199 return map_queues_cpsch(dqm);
1202 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1203 struct qcm_process_device *qpd,
1207 struct mqd_manager *mqd;
1208 bool preempt_all_queues;
1210 preempt_all_queues = false;
1214 /* remove queue from list to prevent rescheduling after preemption */
1215 mutex_lock(&dqm->lock);
1217 if (qpd->is_debug) {
1219 * error, currently we do not allow to destroy a queue
1220 * of a currently debugged process
1223 goto failed_try_destroy_debugged_queue;
1227 mqd = dqm->ops.get_mqd_manager(dqm,
1228 get_mqd_type_from_queue_type(q->properties.type));
1234 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1235 dqm->sdma_queue_count--;
1236 deallocate_sdma_queue(dqm, q->sdma_id);
1241 if (q->properties.is_active) {
1243 retval = execute_queues_cpsch(dqm,
1244 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1245 if (retval == -ETIME)
1246 qpd->reset_wavefronts = true;
1249 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
1252 * Unconditionally decrement this counter, regardless of the queue's
1255 dqm->total_queue_count--;
1256 pr_debug("Total of %d queues are accountable so far\n",
1257 dqm->total_queue_count);
1259 mutex_unlock(&dqm->lock);
1264 failed_try_destroy_debugged_queue:
1266 mutex_unlock(&dqm->lock);
1271 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1272 * stay in user mode.
1274 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1275 /* APE1 limit is inclusive and 64K aligned. */
1276 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1278 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1279 struct qcm_process_device *qpd,
1280 enum cache_policy default_policy,
1281 enum cache_policy alternate_policy,
1282 void __user *alternate_aperture_base,
1283 uint64_t alternate_aperture_size)
1287 mutex_lock(&dqm->lock);
1289 if (alternate_aperture_size == 0) {
1290 /* base > limit disables APE1 */
1291 qpd->sh_mem_ape1_base = 1;
1292 qpd->sh_mem_ape1_limit = 0;
1295 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1296 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1297 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1298 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1299 * Verify that the base and size parameters can be
1300 * represented in this format and convert them.
1301 * Additionally restrict APE1 to user-mode addresses.
1304 uint64_t base = (uintptr_t)alternate_aperture_base;
1305 uint64_t limit = base + alternate_aperture_size - 1;
1307 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1308 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1313 qpd->sh_mem_ape1_base = base >> 16;
1314 qpd->sh_mem_ape1_limit = limit >> 16;
1317 retval = dqm->asic_ops.set_cache_memory_policy(
1322 alternate_aperture_base,
1323 alternate_aperture_size);
1325 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1326 program_sh_mem_settings(dqm, qpd);
1328 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1329 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1330 qpd->sh_mem_ape1_limit);
1333 mutex_unlock(&dqm->lock);
1337 static int set_trap_handler(struct device_queue_manager *dqm,
1338 struct qcm_process_device *qpd,
1344 if (dqm->dev->cwsr_enabled) {
1345 /* Jump from CWSR trap handler to user trap */
1346 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1350 qpd->tba_addr = tba_addr;
1351 qpd->tma_addr = tma_addr;
1357 static int process_termination_nocpsch(struct device_queue_manager *dqm,
1358 struct qcm_process_device *qpd)
1360 struct queue *q, *next;
1361 struct device_process_node *cur, *next_dpn;
1364 mutex_lock(&dqm->lock);
1366 /* Clear all user mode queues */
1367 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1370 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1375 /* Unregister process */
1376 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1377 if (qpd == cur->qpd) {
1378 list_del(&cur->list);
1380 dqm->processes_count--;
1385 mutex_unlock(&dqm->lock);
1390 static int process_termination_cpsch(struct device_queue_manager *dqm,
1391 struct qcm_process_device *qpd)
1394 struct queue *q, *next;
1395 struct kernel_queue *kq, *kq_next;
1396 struct mqd_manager *mqd;
1397 struct device_process_node *cur, *next_dpn;
1398 enum kfd_unmap_queues_filter filter =
1399 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1403 mutex_lock(&dqm->lock);
1405 /* Clean all kernel queues */
1406 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1407 list_del(&kq->list);
1409 qpd->is_debug = false;
1410 dqm->total_queue_count--;
1411 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1414 /* Clear all user mode queues */
1415 list_for_each_entry(q, &qpd->queues_list, list) {
1416 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1417 dqm->sdma_queue_count--;
1419 if (q->properties.is_active)
1422 dqm->total_queue_count--;
1425 /* Unregister process */
1426 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1427 if (qpd == cur->qpd) {
1428 list_del(&cur->list);
1430 dqm->processes_count--;
1435 retval = execute_queues_cpsch(dqm, filter, 0);
1436 if (retval || qpd->reset_wavefronts) {
1437 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1438 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1439 qpd->reset_wavefronts = false;
1442 /* lastly, free mqd resources */
1443 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1444 mqd = dqm->ops.get_mqd_manager(dqm,
1445 get_mqd_type_from_queue_type(q->properties.type));
1452 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
1456 mutex_unlock(&dqm->lock);
1460 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1462 struct device_queue_manager *dqm;
1464 pr_debug("Loading device queue manager\n");
1466 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1470 switch (dev->device_info->asic_family) {
1471 /* HWS is not available on Hawaii. */
1473 /* HWS depends on CWSR for timely dequeue. CWSR is not
1474 * available on Tonga.
1476 * FIXME: This argument also applies to Kaveri.
1479 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1482 dqm->sched_policy = sched_policy;
1487 switch (dqm->sched_policy) {
1488 case KFD_SCHED_POLICY_HWS:
1489 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1490 /* initialize dqm for cp scheduling */
1491 dqm->ops.create_queue = create_queue_cpsch;
1492 dqm->ops.initialize = initialize_cpsch;
1493 dqm->ops.start = start_cpsch;
1494 dqm->ops.stop = stop_cpsch;
1495 dqm->ops.destroy_queue = destroy_queue_cpsch;
1496 dqm->ops.update_queue = update_queue;
1497 dqm->ops.get_mqd_manager = get_mqd_manager;
1498 dqm->ops.register_process = register_process;
1499 dqm->ops.unregister_process = unregister_process;
1500 dqm->ops.uninitialize = uninitialize;
1501 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1502 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1503 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1504 dqm->ops.set_trap_handler = set_trap_handler;
1505 dqm->ops.process_termination = process_termination_cpsch;
1506 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1507 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1509 case KFD_SCHED_POLICY_NO_HWS:
1510 /* initialize dqm for no cp scheduling */
1511 dqm->ops.start = start_nocpsch;
1512 dqm->ops.stop = stop_nocpsch;
1513 dqm->ops.create_queue = create_queue_nocpsch;
1514 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1515 dqm->ops.update_queue = update_queue;
1516 dqm->ops.get_mqd_manager = get_mqd_manager;
1517 dqm->ops.register_process = register_process;
1518 dqm->ops.unregister_process = unregister_process;
1519 dqm->ops.initialize = initialize_nocpsch;
1520 dqm->ops.uninitialize = uninitialize;
1521 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1522 dqm->ops.set_trap_handler = set_trap_handler;
1523 dqm->ops.process_termination = process_termination_nocpsch;
1524 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1525 dqm->ops.restore_process_queues =
1526 restore_process_queues_nocpsch;
1529 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1533 switch (dev->device_info->asic_family) {
1535 device_queue_manager_init_vi(&dqm->asic_ops);
1539 device_queue_manager_init_cik(&dqm->asic_ops);
1543 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1548 case CHIP_POLARIS10:
1549 case CHIP_POLARIS11:
1550 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1553 WARN(1, "Unexpected ASIC family %u",
1554 dev->device_info->asic_family);
1558 if (!dqm->ops.initialize(dqm))
1566 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1568 dqm->ops.uninitialize(dqm);
1572 #if defined(CONFIG_DEBUG_FS)
1574 static void seq_reg_dump(struct seq_file *m,
1575 uint32_t (*dump)[2], uint32_t n_regs)
1579 for (i = 0, count = 0; i < n_regs; i++) {
1581 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1582 seq_printf(m, "%s %08x: %08x",
1584 dump[i][0], dump[i][1]);
1587 seq_printf(m, " %08x", dump[i][1]);
1595 int dqm_debugfs_hqds(struct seq_file *m, void *data)
1597 struct device_queue_manager *dqm = data;
1598 uint32_t (*dump)[2], n_regs;
1602 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1603 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1605 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
1606 if (!test_bit(pipe_offset + queue,
1607 dqm->dev->shared_resources.queue_bitmap))
1610 r = dqm->dev->kfd2kgd->hqd_dump(
1611 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1615 seq_printf(m, " CP Pipe %d, Queue %d\n",
1617 seq_reg_dump(m, dump, n_regs);
1623 for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) {
1624 for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) {
1625 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
1626 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1630 seq_printf(m, " SDMA Engine %d, RLC %d\n",
1632 seq_reg_dump(m, dump, n_regs);