2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/ratelimit.h>
25 #include <linux/printk.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/types.h>
29 #include <linux/bitops.h>
30 #include <linux/sched.h>
32 #include "kfd_device_queue_manager.h"
33 #include "kfd_mqd_manager.h"
35 #include "kfd_kernel_queue.h"
37 /* Size of the per-pipe EOP queue */
38 #define CIK_HPD_EOP_BYTES_LOG2 11
39 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
41 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
42 unsigned int pasid, unsigned int vmid);
44 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
46 struct qcm_process_device *qpd);
48 static int execute_queues_cpsch(struct device_queue_manager *dqm,
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
51 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
52 enum kfd_unmap_queues_filter filter,
53 uint32_t filter_param);
55 static int map_queues_cpsch(struct device_queue_manager *dqm);
57 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
59 struct qcm_process_device *qpd);
61 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
62 unsigned int sdma_queue_id);
65 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
67 if (type == KFD_QUEUE_TYPE_SDMA)
68 return KFD_MQD_TYPE_SDMA;
69 return KFD_MQD_TYPE_CP;
72 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
75 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
76 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
78 /* queue is available for KFD usage if bit is 1 */
79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
80 if (test_bit(pipe_offset + i,
81 dqm->dev->shared_resources.queue_bitmap))
86 unsigned int get_queues_num(struct device_queue_manager *dqm)
88 return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
92 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
94 return dqm->dev->shared_resources.num_queue_per_pipe;
97 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
99 return dqm->dev->shared_resources.num_pipe_per_mec;
102 void program_sh_mem_settings(struct device_queue_manager *dqm,
103 struct qcm_process_device *qpd)
105 return dqm->dev->kfd2kgd->program_sh_mem_settings(
106 dqm->dev->kgd, qpd->vmid,
108 qpd->sh_mem_ape1_base,
109 qpd->sh_mem_ape1_limit,
113 static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
115 struct kfd_dev *dev = qpd->dqm->dev;
117 if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
118 /* On pre-SOC15 chips we need to use the queue ID to
119 * preserve the user mode ABI.
121 q->doorbell_id = q->properties.queue_id;
122 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
123 /* For SDMA queues on SOC15, use static doorbell
124 * assignments based on the engine and queue.
126 q->doorbell_id = dev->shared_resources.sdma_doorbell
127 [q->properties.sdma_engine_id]
128 [q->properties.sdma_queue_id];
130 /* For CP queues on SOC15 reserve a free doorbell ID */
133 found = find_first_zero_bit(qpd->doorbell_bitmap,
134 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
135 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
136 pr_debug("No doorbells available");
139 set_bit(found, qpd->doorbell_bitmap);
140 q->doorbell_id = found;
143 q->properties.doorbell_off =
144 kfd_doorbell_id_to_offset(dev, q->process,
150 static void deallocate_doorbell(struct qcm_process_device *qpd,
154 struct kfd_dev *dev = qpd->dqm->dev;
156 if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
157 q->properties.type == KFD_QUEUE_TYPE_SDMA)
160 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
164 static int allocate_vmid(struct device_queue_manager *dqm,
165 struct qcm_process_device *qpd,
168 int bit, allocated_vmid;
170 if (dqm->vmid_bitmap == 0)
173 bit = ffs(dqm->vmid_bitmap) - 1;
174 dqm->vmid_bitmap &= ~(1 << bit);
176 allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
177 pr_debug("vmid allocation %d\n", allocated_vmid);
178 qpd->vmid = allocated_vmid;
179 q->properties.vmid = allocated_vmid;
181 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
182 program_sh_mem_settings(dqm, qpd);
184 /* qpd->page_table_base is set earlier when register_process()
185 * is called, i.e. when the first queue is created.
187 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
189 qpd->page_table_base);
190 /* invalidate the VM context after pasid and vmid mapping is set up */
191 kfd_flush_tlb(qpd_to_pdd(qpd));
196 static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
197 struct qcm_process_device *qpd)
199 const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
205 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
209 return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
210 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
211 pmf->release_mem_size / sizeof(uint32_t));
214 static void deallocate_vmid(struct device_queue_manager *dqm,
215 struct qcm_process_device *qpd,
218 int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
220 /* On GFX v7, CP doesn't flush TC at dequeue */
221 if (q->device->device_info->asic_family == CHIP_HAWAII)
222 if (flush_texture_cache_nocpsch(q->device, qpd))
223 pr_err("Failed to flush TC\n");
225 kfd_flush_tlb(qpd_to_pdd(qpd));
227 /* Release the vmid mapping */
228 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
230 dqm->vmid_bitmap |= (1 << bit);
232 q->properties.vmid = 0;
235 static int create_queue_nocpsch(struct device_queue_manager *dqm,
237 struct qcm_process_device *qpd)
243 mutex_lock(&dqm->lock);
245 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
246 pr_warn("Can't create new usermode queue because %d queues were already created\n",
247 dqm->total_queue_count);
252 if (list_empty(&qpd->queues_list)) {
253 retval = allocate_vmid(dqm, qpd, q);
257 q->properties.vmid = qpd->vmid;
259 * Eviction state logic: we only mark active queues as evicted
260 * to avoid the overhead of restoring inactive queues later
263 q->properties.is_evicted = (q->properties.queue_size > 0 &&
264 q->properties.queue_percent > 0 &&
265 q->properties.queue_address != 0);
267 q->properties.tba_addr = qpd->tba_addr;
268 q->properties.tma_addr = qpd->tma_addr;
270 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
271 retval = create_compute_queue_nocpsch(dqm, q, qpd);
272 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
273 retval = create_sdma_queue_nocpsch(dqm, q, qpd);
278 if (list_empty(&qpd->queues_list))
279 deallocate_vmid(dqm, qpd, q);
283 list_add(&q->list, &qpd->queues_list);
285 if (q->properties.is_active)
288 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
289 dqm->sdma_queue_count++;
292 * Unconditionally increment this counter, regardless of the queue's
293 * type or whether the queue is active.
295 dqm->total_queue_count++;
296 pr_debug("Total of %d queues are accountable so far\n",
297 dqm->total_queue_count);
300 mutex_unlock(&dqm->lock);
304 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
311 for (pipe = dqm->next_pipe_to_allocate, i = 0;
312 i < get_pipes_per_mec(dqm);
313 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
315 if (!is_pipe_enabled(dqm, 0, pipe))
318 if (dqm->allocated_queues[pipe] != 0) {
319 bit = ffs(dqm->allocated_queues[pipe]) - 1;
320 dqm->allocated_queues[pipe] &= ~(1 << bit);
331 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
332 /* horizontal hqd allocation */
333 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
338 static inline void deallocate_hqd(struct device_queue_manager *dqm,
341 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
344 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
346 struct qcm_process_device *qpd)
349 struct mqd_manager *mqd;
351 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
355 retval = allocate_hqd(dqm, q);
359 retval = allocate_doorbell(qpd, q);
361 goto out_deallocate_hqd;
363 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
364 &q->gart_mqd_addr, &q->properties);
366 goto out_deallocate_doorbell;
368 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
371 dqm->dev->kfd2kgd->set_scratch_backing_va(
372 dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
374 if (!q->properties.is_active)
377 retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
385 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
386 out_deallocate_doorbell:
387 deallocate_doorbell(qpd, q);
389 deallocate_hqd(dqm, q);
394 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
395 * to avoid asynchronized access
397 static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
398 struct qcm_process_device *qpd,
402 struct mqd_manager *mqd;
404 mqd = dqm->ops.get_mqd_manager(dqm,
405 get_mqd_type_from_queue_type(q->properties.type));
409 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
410 deallocate_hqd(dqm, q);
411 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
412 dqm->sdma_queue_count--;
413 deallocate_sdma_queue(dqm, q->sdma_id);
415 pr_debug("q->properties.type %d is invalid\n",
419 dqm->total_queue_count--;
421 deallocate_doorbell(qpd, q);
423 retval = mqd->destroy_mqd(mqd, q->mqd,
424 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
425 KFD_UNMAP_LATENCY_MS,
427 if (retval == -ETIME)
428 qpd->reset_wavefronts = true;
430 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
433 if (list_empty(&qpd->queues_list)) {
434 if (qpd->reset_wavefronts) {
435 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
437 /* dbgdev_wave_reset_wavefronts has to be called before
438 * deallocate_vmid(), i.e. when vmid is still in use.
440 dbgdev_wave_reset_wavefronts(dqm->dev,
442 qpd->reset_wavefronts = false;
445 deallocate_vmid(dqm, qpd, q);
448 if (q->properties.is_active)
454 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
455 struct qcm_process_device *qpd,
460 mutex_lock(&dqm->lock);
461 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
462 mutex_unlock(&dqm->lock);
467 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
470 struct mqd_manager *mqd;
471 struct kfd_process_device *pdd;
472 bool prev_active = false;
474 mutex_lock(&dqm->lock);
475 pdd = kfd_get_process_device_data(q->device, q->process);
480 mqd = dqm->ops.get_mqd_manager(dqm,
481 get_mqd_type_from_queue_type(q->properties.type));
487 * Eviction state logic: we only mark active queues as evicted
488 * to avoid the overhead of restoring inactive queues later
490 if (pdd->qpd.evicted)
491 q->properties.is_evicted = (q->properties.queue_size > 0 &&
492 q->properties.queue_percent > 0 &&
493 q->properties.queue_address != 0);
495 /* Save previous activity state for counters */
496 prev_active = q->properties.is_active;
498 /* Make sure the queue is unmapped before updating the MQD */
499 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
500 retval = unmap_queues_cpsch(dqm,
501 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
503 pr_err("unmap queue failed\n");
506 } else if (prev_active &&
507 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
508 q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
509 retval = mqd->destroy_mqd(mqd, q->mqd,
510 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
511 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
513 pr_err("destroy mqd failed\n");
518 retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
521 * check active state vs. the previous state and modify
522 * counter accordingly. map_queues_cpsch uses the
523 * dqm->queue_count to determine whether a new runlist must be
526 if (q->properties.is_active && !prev_active)
528 else if (!q->properties.is_active && prev_active)
531 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
532 retval = map_queues_cpsch(dqm);
533 else if (q->properties.is_active &&
534 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
535 q->properties.type == KFD_QUEUE_TYPE_SDMA))
536 retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
537 &q->properties, q->process->mm);
540 mutex_unlock(&dqm->lock);
544 static struct mqd_manager *get_mqd_manager(
545 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
547 struct mqd_manager *mqd;
549 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
552 pr_debug("mqd type %d\n", type);
554 mqd = dqm->mqds[type];
556 mqd = mqd_manager_init(type, dqm->dev);
558 pr_err("mqd manager is NULL");
559 dqm->mqds[type] = mqd;
565 static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
566 struct qcm_process_device *qpd)
569 struct mqd_manager *mqd;
570 struct kfd_process_device *pdd;
573 mutex_lock(&dqm->lock);
574 if (qpd->evicted++ > 0) /* already evicted, do nothing */
577 pdd = qpd_to_pdd(qpd);
578 pr_info_ratelimited("Evicting PASID %u queues\n",
579 pdd->process->pasid);
581 /* unactivate all active queues on the qpd */
582 list_for_each_entry(q, &qpd->queues_list, list) {
583 if (!q->properties.is_active)
585 mqd = dqm->ops.get_mqd_manager(dqm,
586 get_mqd_type_from_queue_type(q->properties.type));
587 if (!mqd) { /* should not be here */
588 pr_err("Cannot evict queue, mqd mgr is NULL\n");
592 q->properties.is_evicted = true;
593 q->properties.is_active = false;
594 retval = mqd->destroy_mqd(mqd, q->mqd,
595 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
596 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
603 mutex_unlock(&dqm->lock);
607 static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
608 struct qcm_process_device *qpd)
611 struct kfd_process_device *pdd;
614 mutex_lock(&dqm->lock);
615 if (qpd->evicted++ > 0) /* already evicted, do nothing */
618 pdd = qpd_to_pdd(qpd);
619 pr_info_ratelimited("Evicting PASID %u queues\n",
620 pdd->process->pasid);
622 /* unactivate all active queues on the qpd */
623 list_for_each_entry(q, &qpd->queues_list, list) {
624 if (!q->properties.is_active)
626 q->properties.is_evicted = true;
627 q->properties.is_active = false;
630 retval = execute_queues_cpsch(dqm,
632 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
633 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
636 mutex_unlock(&dqm->lock);
640 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
641 struct qcm_process_device *qpd)
644 struct mqd_manager *mqd;
645 struct kfd_process_device *pdd;
649 pdd = qpd_to_pdd(qpd);
650 /* Retrieve PD base */
651 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
653 mutex_lock(&dqm->lock);
654 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
656 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
661 pr_info_ratelimited("Restoring PASID %u queues\n",
662 pdd->process->pasid);
664 /* Update PD Base in QPD */
665 qpd->page_table_base = pd_base;
666 pr_debug("Updated PD address to 0x%08x\n", pd_base);
668 if (!list_empty(&qpd->queues_list)) {
669 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
672 qpd->page_table_base);
676 /* activate all active queues on the qpd */
677 list_for_each_entry(q, &qpd->queues_list, list) {
678 if (!q->properties.is_evicted)
680 mqd = dqm->ops.get_mqd_manager(dqm,
681 get_mqd_type_from_queue_type(q->properties.type));
682 if (!mqd) { /* should not be here */
683 pr_err("Cannot restore queue, mqd mgr is NULL\n");
687 q->properties.is_evicted = false;
688 q->properties.is_active = true;
689 retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
690 q->queue, &q->properties,
698 mutex_unlock(&dqm->lock);
702 static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
703 struct qcm_process_device *qpd)
706 struct kfd_process_device *pdd;
710 pdd = qpd_to_pdd(qpd);
711 /* Retrieve PD base */
712 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
714 mutex_lock(&dqm->lock);
715 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
717 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
722 pr_info_ratelimited("Restoring PASID %u queues\n",
723 pdd->process->pasid);
725 /* Update PD Base in QPD */
726 qpd->page_table_base = pd_base;
727 pr_debug("Updated PD address to 0x%08x\n", pd_base);
729 /* activate all active queues on the qpd */
730 list_for_each_entry(q, &qpd->queues_list, list) {
731 if (!q->properties.is_evicted)
733 q->properties.is_evicted = false;
734 q->properties.is_active = true;
737 retval = execute_queues_cpsch(dqm,
738 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
742 mutex_unlock(&dqm->lock);
746 static int register_process(struct device_queue_manager *dqm,
747 struct qcm_process_device *qpd)
749 struct device_process_node *n;
750 struct kfd_process_device *pdd;
754 n = kzalloc(sizeof(*n), GFP_KERNEL);
760 pdd = qpd_to_pdd(qpd);
761 /* Retrieve PD base */
762 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
764 mutex_lock(&dqm->lock);
765 list_add(&n->list, &dqm->queues);
767 /* Update PD Base in QPD */
768 qpd->page_table_base = pd_base;
770 retval = dqm->asic_ops.update_qpd(dqm, qpd);
772 dqm->processes_count++;
774 mutex_unlock(&dqm->lock);
779 static int unregister_process(struct device_queue_manager *dqm,
780 struct qcm_process_device *qpd)
783 struct device_process_node *cur, *next;
785 pr_debug("qpd->queues_list is %s\n",
786 list_empty(&qpd->queues_list) ? "empty" : "not empty");
789 mutex_lock(&dqm->lock);
791 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
792 if (qpd == cur->qpd) {
793 list_del(&cur->list);
795 dqm->processes_count--;
799 /* qpd not found in dqm list */
802 mutex_unlock(&dqm->lock);
807 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
810 uint32_t pasid_mapping;
812 pasid_mapping = (pasid == 0) ? 0 :
814 ATC_VMID_PASID_MAPPING_VALID;
816 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
817 dqm->dev->kgd, pasid_mapping,
821 static void init_interrupts(struct device_queue_manager *dqm)
825 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
826 if (is_pipe_enabled(dqm, 0, i))
827 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
830 static int initialize_nocpsch(struct device_queue_manager *dqm)
834 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
836 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
837 sizeof(unsigned int), GFP_KERNEL);
838 if (!dqm->allocated_queues)
841 mutex_init(&dqm->lock);
842 INIT_LIST_HEAD(&dqm->queues);
843 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
844 dqm->sdma_queue_count = 0;
846 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
847 int pipe_offset = pipe * get_queues_per_pipe(dqm);
849 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
850 if (test_bit(pipe_offset + queue,
851 dqm->dev->shared_resources.queue_bitmap))
852 dqm->allocated_queues[pipe] |= 1 << queue;
855 dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
856 dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
861 static void uninitialize(struct device_queue_manager *dqm)
865 WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
867 kfree(dqm->allocated_queues);
868 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
870 mutex_destroy(&dqm->lock);
871 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
874 static int start_nocpsch(struct device_queue_manager *dqm)
876 init_interrupts(dqm);
877 return pm_init(&dqm->packets, dqm);
880 static int stop_nocpsch(struct device_queue_manager *dqm)
882 pm_uninit(&dqm->packets);
886 static int allocate_sdma_queue(struct device_queue_manager *dqm,
887 unsigned int *sdma_queue_id)
891 if (dqm->sdma_bitmap == 0)
894 bit = ffs(dqm->sdma_bitmap) - 1;
895 dqm->sdma_bitmap &= ~(1 << bit);
896 *sdma_queue_id = bit;
901 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
902 unsigned int sdma_queue_id)
904 if (sdma_queue_id >= CIK_SDMA_QUEUES)
906 dqm->sdma_bitmap |= (1 << sdma_queue_id);
909 static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
911 struct qcm_process_device *qpd)
913 struct mqd_manager *mqd;
916 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
920 retval = allocate_sdma_queue(dqm, &q->sdma_id);
924 q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
925 q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
927 retval = allocate_doorbell(qpd, q);
929 goto out_deallocate_sdma_queue;
931 pr_debug("SDMA id is: %d\n", q->sdma_id);
932 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
933 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
935 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
936 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
937 &q->gart_mqd_addr, &q->properties);
939 goto out_deallocate_doorbell;
941 retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
948 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
949 out_deallocate_doorbell:
950 deallocate_doorbell(qpd, q);
951 out_deallocate_sdma_queue:
952 deallocate_sdma_queue(dqm, q->sdma_id);
958 * Device Queue Manager implementation for cp scheduler
961 static int set_sched_resources(struct device_queue_manager *dqm)
964 struct scheduling_resources res;
966 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
969 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
970 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
971 / dqm->dev->shared_resources.num_pipe_per_mec;
973 if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
976 /* only acquire queues from the first MEC */
980 /* This situation may be hit in the future if a new HW
981 * generation exposes more than 64 queues. If so, the
982 * definition of res.queue_mask needs updating
984 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
985 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
989 res.queue_mask |= (1ull << i);
991 res.gws_mask = res.oac_mask = res.gds_heap_base =
992 res.gds_heap_size = 0;
994 pr_debug("Scheduling resources:\n"
996 "queue mask: 0x%8llX\n",
997 res.vmid_mask, res.queue_mask);
999 return pm_send_set_resources(&dqm->packets, &res);
1002 static int initialize_cpsch(struct device_queue_manager *dqm)
1004 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1006 mutex_init(&dqm->lock);
1007 INIT_LIST_HEAD(&dqm->queues);
1008 dqm->queue_count = dqm->processes_count = 0;
1009 dqm->sdma_queue_count = 0;
1010 dqm->active_runlist = false;
1011 dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
1016 static int start_cpsch(struct device_queue_manager *dqm)
1022 retval = pm_init(&dqm->packets, dqm);
1024 goto fail_packet_manager_init;
1026 retval = set_sched_resources(dqm);
1028 goto fail_set_sched_resources;
1030 pr_debug("Allocating fence memory\n");
1032 /* allocate fence memory on the gart */
1033 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1037 goto fail_allocate_vidmem;
1039 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
1040 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1042 init_interrupts(dqm);
1044 mutex_lock(&dqm->lock);
1045 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1046 mutex_unlock(&dqm->lock);
1049 fail_allocate_vidmem:
1050 fail_set_sched_resources:
1051 pm_uninit(&dqm->packets);
1052 fail_packet_manager_init:
1056 static int stop_cpsch(struct device_queue_manager *dqm)
1058 mutex_lock(&dqm->lock);
1059 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1060 mutex_unlock(&dqm->lock);
1062 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1063 pm_uninit(&dqm->packets);
1068 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1069 struct kernel_queue *kq,
1070 struct qcm_process_device *qpd)
1072 mutex_lock(&dqm->lock);
1073 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1074 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1075 dqm->total_queue_count);
1076 mutex_unlock(&dqm->lock);
1081 * Unconditionally increment this counter, regardless of the queue's
1082 * type or whether the queue is active.
1084 dqm->total_queue_count++;
1085 pr_debug("Total of %d queues are accountable so far\n",
1086 dqm->total_queue_count);
1088 list_add(&kq->list, &qpd->priv_queue_list);
1090 qpd->is_debug = true;
1091 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1092 mutex_unlock(&dqm->lock);
1097 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1098 struct kernel_queue *kq,
1099 struct qcm_process_device *qpd)
1101 mutex_lock(&dqm->lock);
1102 list_del(&kq->list);
1104 qpd->is_debug = false;
1105 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1107 * Unconditionally decrement this counter, regardless of the queue's
1110 dqm->total_queue_count--;
1111 pr_debug("Total of %d queues are accountable so far\n",
1112 dqm->total_queue_count);
1113 mutex_unlock(&dqm->lock);
1116 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1117 struct qcm_process_device *qpd)
1120 struct mqd_manager *mqd;
1124 mutex_lock(&dqm->lock);
1126 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1127 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1128 dqm->total_queue_count);
1133 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1134 retval = allocate_sdma_queue(dqm, &q->sdma_id);
1137 q->properties.sdma_queue_id =
1138 q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
1139 q->properties.sdma_engine_id =
1140 q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
1143 retval = allocate_doorbell(qpd, q);
1145 goto out_deallocate_sdma_queue;
1147 mqd = dqm->ops.get_mqd_manager(dqm,
1148 get_mqd_type_from_queue_type(q->properties.type));
1152 goto out_deallocate_doorbell;
1155 * Eviction state logic: we only mark active queues as evicted
1156 * to avoid the overhead of restoring inactive queues later
1159 q->properties.is_evicted = (q->properties.queue_size > 0 &&
1160 q->properties.queue_percent > 0 &&
1161 q->properties.queue_address != 0);
1163 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1165 q->properties.tba_addr = qpd->tba_addr;
1166 q->properties.tma_addr = qpd->tma_addr;
1167 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
1168 &q->gart_mqd_addr, &q->properties);
1170 goto out_deallocate_doorbell;
1172 list_add(&q->list, &qpd->queues_list);
1174 if (q->properties.is_active) {
1176 retval = execute_queues_cpsch(dqm,
1177 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1180 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1181 dqm->sdma_queue_count++;
1183 * Unconditionally increment this counter, regardless of the queue's
1184 * type or whether the queue is active.
1186 dqm->total_queue_count++;
1188 pr_debug("Total of %d queues are accountable so far\n",
1189 dqm->total_queue_count);
1191 mutex_unlock(&dqm->lock);
1194 out_deallocate_doorbell:
1195 deallocate_doorbell(qpd, q);
1196 out_deallocate_sdma_queue:
1197 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1198 deallocate_sdma_queue(dqm, q->sdma_id);
1200 mutex_unlock(&dqm->lock);
1204 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1205 unsigned int fence_value,
1206 unsigned int timeout_ms)
1208 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1210 while (*fence_addr != fence_value) {
1211 if (time_after(jiffies, end_jiffies)) {
1212 pr_err("qcm fence wait loop timeout expired\n");
1221 static int unmap_sdma_queues(struct device_queue_manager *dqm,
1222 unsigned int sdma_engine)
1224 return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
1225 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
1229 /* dqm->lock mutex has to be locked before calling this function */
1230 static int map_queues_cpsch(struct device_queue_manager *dqm)
1234 if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
1237 if (dqm->active_runlist)
1240 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1242 pr_err("failed to execute runlist\n");
1245 dqm->active_runlist = true;
1250 /* dqm->lock mutex has to be locked before calling this function */
1251 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1252 enum kfd_unmap_queues_filter filter,
1253 uint32_t filter_param)
1257 if (!dqm->active_runlist)
1260 pr_debug("Before destroying queues, sdma queue count is : %u\n",
1261 dqm->sdma_queue_count);
1263 if (dqm->sdma_queue_count > 0) {
1264 unmap_sdma_queues(dqm, 0);
1265 unmap_sdma_queues(dqm, 1);
1268 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1269 filter, filter_param, false, 0);
1273 *dqm->fence_addr = KFD_FENCE_INIT;
1274 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1275 KFD_FENCE_COMPLETED);
1276 /* should be timed out */
1277 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1278 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
1282 pm_release_ib(&dqm->packets);
1283 dqm->active_runlist = false;
1288 /* dqm->lock mutex has to be locked before calling this function */
1289 static int execute_queues_cpsch(struct device_queue_manager *dqm,
1290 enum kfd_unmap_queues_filter filter,
1291 uint32_t filter_param)
1295 retval = unmap_queues_cpsch(dqm, filter, filter_param);
1297 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1301 return map_queues_cpsch(dqm);
1304 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1305 struct qcm_process_device *qpd,
1309 struct mqd_manager *mqd;
1310 bool preempt_all_queues;
1312 preempt_all_queues = false;
1316 /* remove queue from list to prevent rescheduling after preemption */
1317 mutex_lock(&dqm->lock);
1319 if (qpd->is_debug) {
1321 * error, currently we do not allow to destroy a queue
1322 * of a currently debugged process
1325 goto failed_try_destroy_debugged_queue;
1329 mqd = dqm->ops.get_mqd_manager(dqm,
1330 get_mqd_type_from_queue_type(q->properties.type));
1336 deallocate_doorbell(qpd, q);
1338 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1339 dqm->sdma_queue_count--;
1340 deallocate_sdma_queue(dqm, q->sdma_id);
1345 if (q->properties.is_active) {
1347 retval = execute_queues_cpsch(dqm,
1348 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1349 if (retval == -ETIME)
1350 qpd->reset_wavefronts = true;
1353 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
1356 * Unconditionally decrement this counter, regardless of the queue's
1359 dqm->total_queue_count--;
1360 pr_debug("Total of %d queues are accountable so far\n",
1361 dqm->total_queue_count);
1363 mutex_unlock(&dqm->lock);
1368 failed_try_destroy_debugged_queue:
1370 mutex_unlock(&dqm->lock);
1375 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1376 * stay in user mode.
1378 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1379 /* APE1 limit is inclusive and 64K aligned. */
1380 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1382 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1383 struct qcm_process_device *qpd,
1384 enum cache_policy default_policy,
1385 enum cache_policy alternate_policy,
1386 void __user *alternate_aperture_base,
1387 uint64_t alternate_aperture_size)
1391 if (!dqm->asic_ops.set_cache_memory_policy)
1394 mutex_lock(&dqm->lock);
1396 if (alternate_aperture_size == 0) {
1397 /* base > limit disables APE1 */
1398 qpd->sh_mem_ape1_base = 1;
1399 qpd->sh_mem_ape1_limit = 0;
1402 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1403 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1404 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1405 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1406 * Verify that the base and size parameters can be
1407 * represented in this format and convert them.
1408 * Additionally restrict APE1 to user-mode addresses.
1411 uint64_t base = (uintptr_t)alternate_aperture_base;
1412 uint64_t limit = base + alternate_aperture_size - 1;
1414 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1415 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1420 qpd->sh_mem_ape1_base = base >> 16;
1421 qpd->sh_mem_ape1_limit = limit >> 16;
1424 retval = dqm->asic_ops.set_cache_memory_policy(
1429 alternate_aperture_base,
1430 alternate_aperture_size);
1432 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1433 program_sh_mem_settings(dqm, qpd);
1435 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1436 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1437 qpd->sh_mem_ape1_limit);
1440 mutex_unlock(&dqm->lock);
1444 static int set_trap_handler(struct device_queue_manager *dqm,
1445 struct qcm_process_device *qpd,
1451 if (dqm->dev->cwsr_enabled) {
1452 /* Jump from CWSR trap handler to user trap */
1453 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1457 qpd->tba_addr = tba_addr;
1458 qpd->tma_addr = tma_addr;
1464 static int process_termination_nocpsch(struct device_queue_manager *dqm,
1465 struct qcm_process_device *qpd)
1467 struct queue *q, *next;
1468 struct device_process_node *cur, *next_dpn;
1471 mutex_lock(&dqm->lock);
1473 /* Clear all user mode queues */
1474 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1477 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1482 /* Unregister process */
1483 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1484 if (qpd == cur->qpd) {
1485 list_del(&cur->list);
1487 dqm->processes_count--;
1492 mutex_unlock(&dqm->lock);
1497 static int process_termination_cpsch(struct device_queue_manager *dqm,
1498 struct qcm_process_device *qpd)
1501 struct queue *q, *next;
1502 struct kernel_queue *kq, *kq_next;
1503 struct mqd_manager *mqd;
1504 struct device_process_node *cur, *next_dpn;
1505 enum kfd_unmap_queues_filter filter =
1506 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1510 mutex_lock(&dqm->lock);
1512 /* Clean all kernel queues */
1513 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1514 list_del(&kq->list);
1516 qpd->is_debug = false;
1517 dqm->total_queue_count--;
1518 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1521 /* Clear all user mode queues */
1522 list_for_each_entry(q, &qpd->queues_list, list) {
1523 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1524 dqm->sdma_queue_count--;
1525 deallocate_sdma_queue(dqm, q->sdma_id);
1528 if (q->properties.is_active)
1531 dqm->total_queue_count--;
1534 /* Unregister process */
1535 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1536 if (qpd == cur->qpd) {
1537 list_del(&cur->list);
1539 dqm->processes_count--;
1544 retval = execute_queues_cpsch(dqm, filter, 0);
1545 if (retval || qpd->reset_wavefronts) {
1546 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1547 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1548 qpd->reset_wavefronts = false;
1551 /* lastly, free mqd resources */
1552 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1553 mqd = dqm->ops.get_mqd_manager(dqm,
1554 get_mqd_type_from_queue_type(q->properties.type));
1561 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
1565 mutex_unlock(&dqm->lock);
1569 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1571 struct device_queue_manager *dqm;
1573 pr_debug("Loading device queue manager\n");
1575 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1579 switch (dev->device_info->asic_family) {
1580 /* HWS is not available on Hawaii. */
1582 /* HWS depends on CWSR for timely dequeue. CWSR is not
1583 * available on Tonga.
1585 * FIXME: This argument also applies to Kaveri.
1588 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1591 dqm->sched_policy = sched_policy;
1596 switch (dqm->sched_policy) {
1597 case KFD_SCHED_POLICY_HWS:
1598 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1599 /* initialize dqm for cp scheduling */
1600 dqm->ops.create_queue = create_queue_cpsch;
1601 dqm->ops.initialize = initialize_cpsch;
1602 dqm->ops.start = start_cpsch;
1603 dqm->ops.stop = stop_cpsch;
1604 dqm->ops.destroy_queue = destroy_queue_cpsch;
1605 dqm->ops.update_queue = update_queue;
1606 dqm->ops.get_mqd_manager = get_mqd_manager;
1607 dqm->ops.register_process = register_process;
1608 dqm->ops.unregister_process = unregister_process;
1609 dqm->ops.uninitialize = uninitialize;
1610 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1611 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1612 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1613 dqm->ops.set_trap_handler = set_trap_handler;
1614 dqm->ops.process_termination = process_termination_cpsch;
1615 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1616 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1618 case KFD_SCHED_POLICY_NO_HWS:
1619 /* initialize dqm for no cp scheduling */
1620 dqm->ops.start = start_nocpsch;
1621 dqm->ops.stop = stop_nocpsch;
1622 dqm->ops.create_queue = create_queue_nocpsch;
1623 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1624 dqm->ops.update_queue = update_queue;
1625 dqm->ops.get_mqd_manager = get_mqd_manager;
1626 dqm->ops.register_process = register_process;
1627 dqm->ops.unregister_process = unregister_process;
1628 dqm->ops.initialize = initialize_nocpsch;
1629 dqm->ops.uninitialize = uninitialize;
1630 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1631 dqm->ops.set_trap_handler = set_trap_handler;
1632 dqm->ops.process_termination = process_termination_nocpsch;
1633 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1634 dqm->ops.restore_process_queues =
1635 restore_process_queues_nocpsch;
1638 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1642 switch (dev->device_info->asic_family) {
1644 device_queue_manager_init_vi(&dqm->asic_ops);
1648 device_queue_manager_init_cik(&dqm->asic_ops);
1652 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1657 case CHIP_POLARIS10:
1658 case CHIP_POLARIS11:
1659 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1664 device_queue_manager_init_v9(&dqm->asic_ops);
1667 WARN(1, "Unexpected ASIC family %u",
1668 dev->device_info->asic_family);
1672 if (!dqm->ops.initialize(dqm))
1680 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1682 dqm->ops.uninitialize(dqm);
1686 #if defined(CONFIG_DEBUG_FS)
1688 static void seq_reg_dump(struct seq_file *m,
1689 uint32_t (*dump)[2], uint32_t n_regs)
1693 for (i = 0, count = 0; i < n_regs; i++) {
1695 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1696 seq_printf(m, "%s %08x: %08x",
1698 dump[i][0], dump[i][1]);
1701 seq_printf(m, " %08x", dump[i][1]);
1709 int dqm_debugfs_hqds(struct seq_file *m, void *data)
1711 struct device_queue_manager *dqm = data;
1712 uint32_t (*dump)[2], n_regs;
1716 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
1717 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs);
1719 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
1720 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
1721 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
1723 seq_reg_dump(m, dump, n_regs);
1728 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1729 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1731 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
1732 if (!test_bit(pipe_offset + queue,
1733 dqm->dev->shared_resources.queue_bitmap))
1736 r = dqm->dev->kfd2kgd->hqd_dump(
1737 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1741 seq_printf(m, " CP Pipe %d, Queue %d\n",
1743 seq_reg_dump(m, dump, n_regs);
1749 for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) {
1750 for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) {
1751 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
1752 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1756 seq_printf(m, " SDMA Engine %d, RLC %d\n",
1758 seq_reg_dump(m, dump, n_regs);