drm/amdgpu: add configurable grace period for unmap queues
[linux-block.git] / drivers / gpu / drm / amd / amdkfd / kfd_device_queue_manager.c
CommitLineData
d87f36a0 1// SPDX-License-Identifier: GPL-2.0 OR MIT
64c7f8cf 2/*
d87f36a0 3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
64c7f8cf
BG
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
26103436
FK
25#include <linux/ratelimit.h>
26#include <linux/printk.h>
64c7f8cf
BG
27#include <linux/slab.h>
28#include <linux/list.h>
29#include <linux/types.h>
64c7f8cf 30#include <linux/bitops.h>
99331a51 31#include <linux/sched.h>
64c7f8cf
BG
32#include "kfd_priv.h"
33#include "kfd_device_queue_manager.h"
34#include "kfd_mqd_manager.h"
35#include "cik_regs.h"
36#include "kfd_kernel_queue.h"
5b87245f 37#include "amdgpu_amdkfd.h"
cc009e61 38#include "mes_api_def.h"
64c7f8cf
BG
39
40/* Size of the per-pipe EOP queue */
41#define CIK_HPD_EOP_BYTES_LOG2 11
42#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
43
64c7f8cf 44static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
c7b6bac9 45 u32 pasid, unsigned int vmid);
64c7f8cf 46
c4744e24
YZ
47static int execute_queues_cpsch(struct device_queue_manager *dqm,
48 enum kfd_unmap_queues_filter filter,
7cee6a68
JK
49 uint32_t filter_param,
50 uint32_t grace_period);
7da2bcf8 51static int unmap_queues_cpsch(struct device_queue_manager *dqm,
4465f466 52 enum kfd_unmap_queues_filter filter,
7cee6a68
JK
53 uint32_t filter_param,
54 uint32_t grace_period,
55 bool reset);
64c7f8cf 56
60a00956
FK
57static int map_queues_cpsch(struct device_queue_manager *dqm);
58
bcea3081 59static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1b4670f6 60 struct queue *q);
64c7f8cf 61
d39b7737
OZ
62static inline void deallocate_hqd(struct device_queue_manager *dqm,
63 struct queue *q);
64static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
65static int allocate_sdma_queue(struct device_queue_manager *dqm,
2485c12c 66 struct queue *q, const uint32_t *restore_sdma_id);
73ea648d
SL
67static void kfd_process_hw_exception(struct work_struct *work);
68
bcea3081
BG
69static inline
70enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
64c7f8cf 71{
1b4670f6 72 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
85d258f9
BG
73 return KFD_MQD_TYPE_SDMA;
74 return KFD_MQD_TYPE_CP;
64c7f8cf
BG
75}
76
d0b63bb3
AR
77static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
78{
79 int i;
8dc1db31
MJ
80 int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec
81 + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe;
d0b63bb3
AR
82
83 /* queue is available for KFD usage if bit is 1 */
8dc1db31 84 for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i)
d0b63bb3 85 if (test_bit(pipe_offset + i,
8dc1db31 86 dqm->dev->kfd->shared_resources.cp_queue_bitmap))
d0b63bb3
AR
87 return true;
88 return false;
89}
90
e6945304 91unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
64ea8f4a 92{
8dc1db31 93 return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap,
d0b63bb3 94 KGD_MAX_QUEUES);
64ea8f4a
OG
95}
96
d0b63bb3 97unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
64c7f8cf 98{
8dc1db31 99 return dqm->dev->kfd->shared_resources.num_queue_per_pipe;
d0b63bb3
AR
100}
101
102unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
103{
8dc1db31 104 return dqm->dev->kfd->shared_resources.num_pipe_per_mec;
64c7f8cf
BG
105}
106
c7637c95
YZ
107static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
108{
ee2f17f4
AL
109 return kfd_get_num_sdma_engines(dqm->dev) +
110 kfd_get_num_xgmi_sdma_engines(dqm->dev);
c7637c95
YZ
111}
112
98bb9222
YZ
113unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
114{
ee2f17f4 115 return kfd_get_num_sdma_engines(dqm->dev) *
8dc1db31 116 dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
98bb9222
YZ
117}
118
1b4670f6
OZ
119unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
120{
ee2f17f4 121 return kfd_get_num_xgmi_sdma_engines(dqm->dev) *
8dc1db31 122 dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
1b4670f6
OZ
123}
124
cc009e61
MJ
125static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manager *dqm)
126{
8dc1db31 127 return dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap;
cc009e61
MJ
128}
129
a805889a
MJ
130static void init_sdma_bitmaps(struct device_queue_manager *dqm)
131{
132 bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES);
133 bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm));
134
135 bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES);
136 bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm));
137}
138
a22fc854 139void program_sh_mem_settings(struct device_queue_manager *dqm,
64c7f8cf
BG
140 struct qcm_process_device *qpd)
141{
c4050ff1
LL
142 uint32_t xcc_mask = dqm->dev->xcc_mask;
143 int xcc_id;
e2069a7b 144
c4050ff1 145 for_each_inst(xcc_id, xcc_mask)
e2069a7b 146 dqm->dev->kfd2kgd->program_sh_mem_settings(
c4050ff1
LL
147 dqm->dev->adev, qpd->vmid, qpd->sh_mem_config,
148 qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit,
149 qpd->sh_mem_bases, xcc_id);
64c7f8cf
BG
150}
151
cc009e61
MJ
152static void kfd_hws_hang(struct device_queue_manager *dqm)
153{
154 /*
155 * Issue a GPU reset if HWS is unresponsive
156 */
157 dqm->is_hws_hang = true;
158
159 /* It's possible we're detecting a HWS hang in the
160 * middle of a GPU reset. No need to schedule another
161 * reset in this case.
162 */
163 if (!dqm->is_resetting)
164 schedule_work(&dqm->hw_exception_work);
165}
166
167static int convert_to_mes_queue_type(int queue_type)
168{
169 int mes_queue_type;
170
171 switch (queue_type) {
172 case KFD_QUEUE_TYPE_COMPUTE:
173 mes_queue_type = MES_QUEUE_TYPE_COMPUTE;
174 break;
175 case KFD_QUEUE_TYPE_SDMA:
176 mes_queue_type = MES_QUEUE_TYPE_SDMA;
177 break;
178 default:
179 WARN(1, "Invalid queue type %d", queue_type);
180 mes_queue_type = -EINVAL;
181 break;
182 }
183
184 return mes_queue_type;
185}
186
187static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
188 struct qcm_process_device *qpd)
189{
190 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
191 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
192 struct mes_add_queue_input queue_input;
04fd0739 193 int r, queue_type;
e77a541f 194 uint64_t wptr_addr_off;
cc009e61
MJ
195
196 if (dqm->is_hws_hang)
197 return -EIO;
198
199 memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
200 queue_input.process_id = qpd->pqm->process->pasid;
201 queue_input.page_table_base_addr = qpd->page_table_base;
202 queue_input.process_va_start = 0;
203 queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
204 /* MES unit for quantum is 100ns */
205 queue_input.process_quantum = KFD_MES_PROCESS_QUANTUM; /* Equivalent to 10ms. */
206 queue_input.process_context_addr = pdd->proc_ctx_gpu_addr;
207 queue_input.gang_quantum = KFD_MES_GANG_QUANTUM; /* Equivalent to 1ms */
208 queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
209 queue_input.inprocess_gang_priority = q->properties.priority;
210 queue_input.gang_global_priority_level =
211 AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
212 queue_input.doorbell_offset = q->properties.doorbell_off;
213 queue_input.mqd_addr = q->gart_mqd_addr;
fe4e9ff9 214 queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
e77a541f
GS
215
216 if (q->wptr_bo) {
7f347e3f 217 wptr_addr_off = (uint64_t)q->properties.write_ptr & (PAGE_SIZE - 1);
fe4e9ff9
JX
218 queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
219 }
e77a541f 220
a9579956 221 queue_input.is_kfd_process = 1;
3e9cf234
GS
222 queue_input.is_aql_queue = (q->properties.format == KFD_QUEUE_FORMAT_AQL);
223 queue_input.queue_size = q->properties.queue_size >> 2;
a9579956 224
cc009e61
MJ
225 queue_input.paging = false;
226 queue_input.tba_addr = qpd->tba_addr;
227 queue_input.tma_addr = qpd->tma_addr;
228
04fd0739
GS
229 queue_type = convert_to_mes_queue_type(q->properties.type);
230 if (queue_type < 0) {
cc009e61
MJ
231 pr_err("Queue type not supported with MES, queue:%d\n",
232 q->properties.type);
233 return -EINVAL;
234 }
04fd0739 235 queue_input.queue_type = (uint32_t)queue_type;
cc009e61
MJ
236
237 if (q->gws) {
238 queue_input.gws_base = 0;
239 queue_input.gws_size = qpd->num_gws;
240 }
241
242 amdgpu_mes_lock(&adev->mes);
243 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
244 amdgpu_mes_unlock(&adev->mes);
245 if (r) {
246 pr_err("failed to add hardware queue to MES, doorbell=0x%x\n",
247 q->properties.doorbell_off);
248 pr_err("MES might be in unrecoverable state, issue a GPU reset\n");
249 kfd_hws_hang(dqm);
250}
251
252 return r;
253}
254
255static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
256 struct qcm_process_device *qpd)
257{
258 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
259 int r;
260 struct mes_remove_queue_input queue_input;
261
262 if (dqm->is_hws_hang)
263 return -EIO;
264
265 memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
266 queue_input.doorbell_offset = q->properties.doorbell_off;
267 queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
268
269 amdgpu_mes_lock(&adev->mes);
270 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
271 amdgpu_mes_unlock(&adev->mes);
272
273 if (r) {
274 pr_err("failed to remove hardware queue from MES, doorbell=0x%x\n",
275 q->properties.doorbell_off);
276 pr_err("MES might be in unrecoverable state, issue a GPU reset\n");
277 kfd_hws_hang(dqm);
278 }
279
280 return r;
281}
282
283static int remove_all_queues_mes(struct device_queue_manager *dqm)
284{
285 struct device_process_node *cur;
286 struct qcm_process_device *qpd;
287 struct queue *q;
288 int retval = 0;
289
290 list_for_each_entry(cur, &dqm->queues, list) {
291 qpd = cur->qpd;
292 list_for_each_entry(q, &qpd->queues_list, list) {
293 if (q->properties.is_active) {
294 retval = remove_queue_mes(dqm, q, qpd);
295 if (retval) {
296 pr_err("%s: Failed to remove queue %d for dev %d",
297 __func__,
298 q->properties.queue_id,
299 dqm->dev->id);
300 return retval;
301 }
302 }
303 }
304 }
305
306 return retval;
307}
308
204d8998 309static void increment_queue_count(struct device_queue_manager *dqm,
ab4d51d4
DYS
310 struct qcm_process_device *qpd,
311 struct queue *q)
b42902f4
YZ
312{
313 dqm->active_queue_count++;
ab4d51d4
DYS
314 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
315 q->properties.type == KFD_QUEUE_TYPE_DIQ)
b42902f4 316 dqm->active_cp_queue_count++;
ab4d51d4
DYS
317
318 if (q->properties.is_gws) {
319 dqm->gws_queue_count++;
320 qpd->mapped_gws_queue = true;
321 }
b42902f4
YZ
322}
323
204d8998 324static void decrement_queue_count(struct device_queue_manager *dqm,
ab4d51d4
DYS
325 struct qcm_process_device *qpd,
326 struct queue *q)
b42902f4
YZ
327{
328 dqm->active_queue_count--;
ab4d51d4
DYS
329 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
330 q->properties.type == KFD_QUEUE_TYPE_DIQ)
b42902f4 331 dqm->active_cp_queue_count--;
ab4d51d4
DYS
332
333 if (q->properties.is_gws) {
334 dqm->gws_queue_count--;
335 qpd->mapped_gws_queue = false;
336 }
b42902f4
YZ
337}
338
5bb6a8fa
DYS
339/*
340 * Allocate a doorbell ID to this queue.
341 * If doorbell_id is passed in, make sure requested ID is valid then allocate it.
342 */
343static int allocate_doorbell(struct qcm_process_device *qpd,
344 struct queue *q,
345 uint32_t const *restore_id)
ef568db7 346{
8dc1db31 347 struct kfd_node *dev = qpd->dqm->dev;
ef568db7 348
dd0ae064 349 if (!KFD_IS_SOC15(dev)) {
ef568db7
FK
350 /* On pre-SOC15 chips we need to use the queue ID to
351 * preserve the user mode ABI.
352 */
5bb6a8fa
DYS
353
354 if (restore_id && *restore_id != q->properties.queue_id)
355 return -EINVAL;
356
ef568db7 357 q->doorbell_id = q->properties.queue_id;
1b4670f6
OZ
358 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
359 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
234441dd
YZ
360 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
361 * doorbell assignments based on the engine and queue id.
362 * The doobell index distance between RLC (2*i) and (2*i+1)
363 * for a SDMA engine is 512.
ef568db7 364 */
234441dd 365
8dc1db31 366 uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx;
643e40d4
MJ
367
368 /*
369 * q->properties.sdma_engine_id corresponds to the virtual
370 * sdma engine number. However, for doorbell allocation,
371 * we need the physical sdma engine id in order to get the
372 * correct doorbell offset.
373 */
374 uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id *
375 get_num_all_sdma_engines(qpd->dqm) +
376 q->properties.sdma_engine_id]
5bb6a8fa
DYS
377 + (q->properties.sdma_queue_id & 1)
378 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
379 + (q->properties.sdma_queue_id >> 1);
380
381 if (restore_id && *restore_id != valid_id)
382 return -EINVAL;
383 q->doorbell_id = valid_id;
ef568db7 384 } else {
5bb6a8fa
DYS
385 /* For CP queues on SOC15 */
386 if (restore_id) {
387 /* make sure that ID is free */
388 if (__test_and_set_bit(*restore_id, qpd->doorbell_bitmap))
389 return -EINVAL;
390
391 q->doorbell_id = *restore_id;
392 } else {
393 /* or reserve a free doorbell ID */
394 unsigned int found;
395
396 found = find_first_zero_bit(qpd->doorbell_bitmap,
397 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
398 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
399 pr_debug("No doorbells available");
400 return -EBUSY;
401 }
402 set_bit(found, qpd->doorbell_bitmap);
403 q->doorbell_id = found;
ef568db7 404 }
ef568db7
FK
405 }
406
407 q->properties.doorbell_off =
8dc1db31 408 kfd_get_doorbell_dw_offset_in_bar(dev->kfd, qpd_to_pdd(qpd),
ef568db7 409 q->doorbell_id);
ef568db7
FK
410 return 0;
411}
412
413static void deallocate_doorbell(struct qcm_process_device *qpd,
414 struct queue *q)
415{
416 unsigned int old;
8dc1db31 417 struct kfd_node *dev = qpd->dqm->dev;
ef568db7 418
dd0ae064 419 if (!KFD_IS_SOC15(dev) ||
1b4670f6
OZ
420 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
421 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
ef568db7
FK
422 return;
423
424 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
425 WARN_ON(!old);
426}
427
b53ef0df
MJ
428static void program_trap_handler_settings(struct device_queue_manager *dqm,
429 struct qcm_process_device *qpd)
430{
c4050ff1
LL
431 uint32_t xcc_mask = dqm->dev->xcc_mask;
432 int xcc_id;
e2069a7b 433
b53ef0df 434 if (dqm->dev->kfd2kgd->program_trap_handler_settings)
c4050ff1 435 for_each_inst(xcc_id, xcc_mask)
e2069a7b 436 dqm->dev->kfd2kgd->program_trap_handler_settings(
c4050ff1
LL
437 dqm->dev->adev, qpd->vmid, qpd->tba_addr,
438 qpd->tma_addr, xcc_id);
b53ef0df
MJ
439}
440
64c7f8cf
BG
441static int allocate_vmid(struct device_queue_manager *dqm,
442 struct qcm_process_device *qpd,
443 struct queue *q)
444{
d9d4623c 445 int allocated_vmid = -1, i;
64c7f8cf 446
d9d4623c
YZ
447 for (i = dqm->dev->vm_info.first_vmid_kfd;
448 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
449 if (!dqm->vmid_pasid[i]) {
450 allocated_vmid = i;
451 break;
452 }
453 }
454
455 if (allocated_vmid < 0) {
456 pr_err("no more vmid to allocate\n");
457 return -ENOSPC;
458 }
459
460 pr_debug("vmid allocated: %d\n", allocated_vmid);
461
462 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
64c7f8cf 463
d9d4623c 464 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
64c7f8cf 465
64c7f8cf
BG
466 qpd->vmid = allocated_vmid;
467 q->properties.vmid = allocated_vmid;
468
64c7f8cf
BG
469 program_sh_mem_settings(dqm, qpd);
470
8dc1db31 471 if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled)
b53ef0df
MJ
472 program_trap_handler_settings(dqm, qpd);
473
403575c4
FK
474 /* qpd->page_table_base is set earlier when register_process()
475 * is called, i.e. when the first queue is created.
476 */
3356c38d 477 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev,
403575c4
FK
478 qpd->vmid,
479 qpd->page_table_base);
480 /* invalidate the VM context after pasid and vmid mapping is set up */
3543b055 481 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
403575c4 482
c637b36a 483 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
3356c38d 484 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
c637b36a 485 qpd->sh_hidden_private_base, qpd->vmid);
d39b7737 486
64c7f8cf
BG
487 return 0;
488}
489
8dc1db31 490static int flush_texture_cache_nocpsch(struct kfd_node *kdev,
552764b6
FK
491 struct qcm_process_device *qpd)
492{
9af5379c 493 const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf;
f6e27ff1 494 int ret;
552764b6
FK
495
496 if (!qpd->ib_kaddr)
497 return -ENOMEM;
498
f6e27ff1
FK
499 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
500 if (ret)
501 return ret;
552764b6 502
6bfc7c7e 503 return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid,
f6e27ff1
FK
504 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
505 pmf->release_mem_size / sizeof(uint32_t));
552764b6
FK
506}
507
64c7f8cf
BG
508static void deallocate_vmid(struct device_queue_manager *dqm,
509 struct qcm_process_device *qpd,
510 struct queue *q)
511{
552764b6 512 /* On GFX v7, CP doesn't flush TC at dequeue */
7eb0502a 513 if (q->device->adev->asic_type == CHIP_HAWAII)
552764b6
FK
514 if (flush_texture_cache_nocpsch(q->device, qpd))
515 pr_err("Failed to flush TC\n");
516
3543b055 517 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
403575c4 518
2030664b
BG
519 /* Release the vmid mapping */
520 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
d9d4623c 521 dqm->vmid_pasid[qpd->vmid] = 0;
2030664b 522
64c7f8cf
BG
523 qpd->vmid = 0;
524 q->properties.vmid = 0;
525}
526
527static int create_queue_nocpsch(struct device_queue_manager *dqm,
528 struct queue *q,
2485c12c 529 struct qcm_process_device *qpd,
42c6c482 530 const struct kfd_criu_queue_priv_data *qd,
3a9822d7 531 const void *restore_mqd, const void *restore_ctl_stack)
64c7f8cf 532{
d39b7737 533 struct mqd_manager *mqd_mgr;
64c7f8cf
BG
534 int retval;
535
efeaed4d 536 dqm_lock(dqm);
64c7f8cf 537
b8cbab04 538 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 539 pr_warn("Can't create new usermode queue because %d queues were already created\n",
b8cbab04 540 dqm->total_queue_count);
ab7c1648
KR
541 retval = -EPERM;
542 goto out_unlock;
b8cbab04
OG
543 }
544
64c7f8cf
BG
545 if (list_empty(&qpd->queues_list)) {
546 retval = allocate_vmid(dqm, qpd, q);
ab7c1648
KR
547 if (retval)
548 goto out_unlock;
64c7f8cf 549 }
64c7f8cf 550 q->properties.vmid = qpd->vmid;
26103436 551 /*
bb2d2128
FK
552 * Eviction state logic: mark all queues as evicted, even ones
553 * not currently active. Restoring inactive queues later only
554 * updates the is_evicted flag but is a no-op otherwise.
26103436 555 */
bb2d2128 556 q->properties.is_evicted = !!qpd->evicted;
64c7f8cf 557
373d7080
FK
558 q->properties.tba_addr = qpd->tba_addr;
559 q->properties.tma_addr = qpd->tma_addr;
560
d091bc0a
OZ
561 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
562 q->properties.type)];
d39b7737
OZ
563 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
564 retval = allocate_hqd(dqm, q);
565 if (retval)
566 goto deallocate_vmid;
567 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
568 q->pipe, q->queue);
569 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
570 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
2485c12c 571 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
d39b7737
OZ
572 if (retval)
573 goto deallocate_vmid;
574 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
575 }
576
5bb6a8fa 577 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
d39b7737
OZ
578 if (retval)
579 goto out_deallocate_hqd;
580
6a6ef5ee
OZ
581 /* Temporarily release dqm lock to avoid a circular lock dependency */
582 dqm_unlock(dqm);
d091bc0a 583 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
6a6ef5ee
OZ
584 dqm_lock(dqm);
585
d091bc0a
OZ
586 if (!q->mqd_mem_obj) {
587 retval = -ENOMEM;
588 goto out_deallocate_doorbell;
589 }
42c6c482
DYS
590
591 if (qd)
592 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
3a9822d7
DYS
593 &q->properties, restore_mqd, restore_ctl_stack,
594 qd->ctl_stack_size);
42c6c482
DYS
595 else
596 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
597 &q->gart_mqd_addr, &q->properties);
598
d39b7737 599 if (q->properties.is_active) {
2c99a547
PY
600 if (!dqm->sched_running) {
601 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
602 goto add_queue_to_list;
603 }
d39b7737
OZ
604
605 if (WARN(q->process->mm != current->mm,
606 "should only run in user thread"))
607 retval = -EFAULT;
608 else
609 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
610 q->queue, &q->properties, current->mm);
611 if (retval)
d091bc0a 612 goto out_free_mqd;
64c7f8cf
BG
613 }
614
2c99a547 615add_queue_to_list:
64c7f8cf 616 list_add(&q->list, &qpd->queues_list);
bc920fd4 617 qpd->queue_count++;
b6819cec 618 if (q->properties.is_active)
ab4d51d4 619 increment_queue_count(dqm, qpd, q);
64c7f8cf 620
b8cbab04
OG
621 /*
622 * Unconditionally increment this counter, regardless of the queue's
623 * type or whether the queue is active.
624 */
625 dqm->total_queue_count++;
626 pr_debug("Total of %d queues are accountable so far\n",
627 dqm->total_queue_count);
d091bc0a 628 goto out_unlock;
b8cbab04 629
d091bc0a
OZ
630out_free_mqd:
631 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
d39b7737
OZ
632out_deallocate_doorbell:
633 deallocate_doorbell(qpd, q);
634out_deallocate_hqd:
635 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
636 deallocate_hqd(dqm, q);
637 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
638 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
639 deallocate_sdma_queue(dqm, q);
640deallocate_vmid:
641 if (list_empty(&qpd->queues_list))
642 deallocate_vmid(dqm, qpd, q);
ab7c1648 643out_unlock:
efeaed4d 644 dqm_unlock(dqm);
ab7c1648 645 return retval;
64c7f8cf
BG
646}
647
648static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
649{
650 bool set;
f0ec5b99 651 int pipe, bit, i;
64c7f8cf
BG
652
653 set = false;
654
8eabaf54
KR
655 for (pipe = dqm->next_pipe_to_allocate, i = 0;
656 i < get_pipes_per_mec(dqm);
d0b63bb3
AR
657 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
658
659 if (!is_pipe_enabled(dqm, 0, pipe))
660 continue;
661
64c7f8cf 662 if (dqm->allocated_queues[pipe] != 0) {
4252bf68
HK
663 bit = ffs(dqm->allocated_queues[pipe]) - 1;
664 dqm->allocated_queues[pipe] &= ~(1 << bit);
64c7f8cf
BG
665 q->pipe = pipe;
666 q->queue = bit;
667 set = true;
668 break;
669 }
670 }
671
991ca8ee 672 if (!set)
64c7f8cf
BG
673 return -EBUSY;
674
79775b62 675 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
64c7f8cf 676 /* horizontal hqd allocation */
d0b63bb3 677 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
64c7f8cf
BG
678
679 return 0;
680}
681
682static inline void deallocate_hqd(struct device_queue_manager *dqm,
683 struct queue *q)
684{
4252bf68 685 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
64c7f8cf
BG
686}
687
5bdd3eb2
MJ
688#define SQ_IND_CMD_CMD_KILL 0x00000003
689#define SQ_IND_CMD_MODE_BROADCAST 0x00000001
690
8dc1db31 691static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p)
5bdd3eb2
MJ
692{
693 int status = 0;
694 unsigned int vmid;
695 uint16_t queried_pasid;
696 union SQ_CMD_BITS reg_sq_cmd;
697 union GRBM_GFX_INDEX_BITS reg_gfx_index;
698 struct kfd_process_device *pdd;
699 int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
700 int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
c4050ff1
LL
701 uint32_t xcc_mask = dev->xcc_mask;
702 int xcc_id;
5bdd3eb2
MJ
703
704 reg_sq_cmd.u32All = 0;
705 reg_gfx_index.u32All = 0;
706
707 pr_debug("Killing all process wavefronts\n");
708
d55957fb
YZ
709 if (!dev->kfd2kgd->get_atc_vmid_pasid_mapping_info) {
710 pr_err("no vmid pasid mapping supported \n");
711 return -EOPNOTSUPP;
712 }
713
5bdd3eb2
MJ
714 /* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
715 * ATC_VMID15_PASID_MAPPING
716 * to check which VMID the current process is mapped to.
717 */
718
d55957fb
YZ
719 for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
720 status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
721 (dev->adev, vmid, &queried_pasid);
c8b0507f 722
d55957fb
YZ
723 if (status && queried_pasid == p->pasid) {
724 pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
725 vmid, p->pasid);
726 break;
5bdd3eb2
MJ
727 }
728 }
729
730 if (vmid > last_vmid_to_scan) {
731 pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
732 return -EFAULT;
733 }
734
735 /* taking the VMID for that process on the safe way using PDD */
736 pdd = kfd_get_process_device_data(dev, p);
737 if (!pdd)
738 return -EFAULT;
739
740 reg_gfx_index.bits.sh_broadcast_writes = 1;
741 reg_gfx_index.bits.se_broadcast_writes = 1;
742 reg_gfx_index.bits.instance_broadcast_writes = 1;
743 reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
744 reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
745 reg_sq_cmd.bits.vm_id = vmid;
746
c4050ff1
LL
747 for_each_inst(xcc_id, xcc_mask)
748 dev->kfd2kgd->wave_control_execute(
749 dev->adev, reg_gfx_index.u32All,
750 reg_sq_cmd.u32All, xcc_id);
5bdd3eb2
MJ
751
752 return 0;
753}
754
9fd3f1bf
FK
755/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
756 * to avoid asynchronized access
757 */
758static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
64c7f8cf
BG
759 struct qcm_process_device *qpd,
760 struct queue *q)
761{
762 int retval;
8d5f3552 763 struct mqd_manager *mqd_mgr;
64c7f8cf 764
fdfa090b
OZ
765 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
766 q->properties.type)];
64c7f8cf 767
c7637c95 768 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
c2e1b3a4 769 deallocate_hqd(dqm, q);
c7637c95 770 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1b4670f6 771 deallocate_sdma_queue(dqm, q);
c7637c95 772 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1b4670f6 773 deallocate_sdma_queue(dqm, q);
c7637c95 774 else {
79775b62 775 pr_debug("q->properties.type %d is invalid\n",
7113cd65 776 q->properties.type);
9fd3f1bf 777 return -EINVAL;
64c7f8cf 778 }
9fd3f1bf 779 dqm->total_queue_count--;
64c7f8cf 780
ef568db7
FK
781 deallocate_doorbell(qpd, q);
782
2c99a547
PY
783 if (!dqm->sched_running) {
784 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
785 return 0;
786 }
787
8d5f3552 788 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
c2e1b3a4 789 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
b90e3fbe 790 KFD_UNMAP_LATENCY_MS,
64c7f8cf 791 q->pipe, q->queue);
9fd3f1bf
FK
792 if (retval == -ETIME)
793 qpd->reset_wavefronts = true;
64c7f8cf 794
64c7f8cf 795 list_del(&q->list);
9fd3f1bf
FK
796 if (list_empty(&qpd->queues_list)) {
797 if (qpd->reset_wavefronts) {
798 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
799 dqm->dev);
800 /* dbgdev_wave_reset_wavefronts has to be called before
801 * deallocate_vmid(), i.e. when vmid is still in use.
802 */
803 dbgdev_wave_reset_wavefronts(dqm->dev,
804 qpd->pqm->process);
805 qpd->reset_wavefronts = false;
806 }
807
64c7f8cf 808 deallocate_vmid(dqm, qpd, q);
9fd3f1bf 809 }
bc920fd4 810 qpd->queue_count--;
ab4d51d4
DYS
811 if (q->properties.is_active)
812 decrement_queue_count(dqm, qpd, q);
b8cbab04 813
9fd3f1bf
FK
814 return retval;
815}
b8cbab04 816
9fd3f1bf
FK
817static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
818 struct qcm_process_device *qpd,
819 struct queue *q)
820{
821 int retval;
d69fd951
MJ
822 uint64_t sdma_val = 0;
823 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
a7b2451d
AL
824 struct mqd_manager *mqd_mgr =
825 dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
d69fd951
MJ
826
827 /* Get the SDMA queue stats */
828 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
829 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
818b0324 830 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
d69fd951
MJ
831 &sdma_val);
832 if (retval)
833 pr_err("Failed to read SDMA queue counter for queue: %d\n",
834 q->properties.queue_id);
835 }
9fd3f1bf 836
efeaed4d 837 dqm_lock(dqm);
9fd3f1bf 838 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
d69fd951
MJ
839 if (!retval)
840 pdd->sdma_past_activity_counter += sdma_val;
efeaed4d 841 dqm_unlock(dqm);
9fd3f1bf 842
a7b2451d
AL
843 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
844
64c7f8cf
BG
845 return retval;
846}
847
c6e559eb
LY
848static int update_queue(struct device_queue_manager *dqm, struct queue *q,
849 struct mqd_update_info *minfo)
64c7f8cf 850{
8636e53c 851 int retval = 0;
8d5f3552 852 struct mqd_manager *mqd_mgr;
26103436 853 struct kfd_process_device *pdd;
b6ffbab8 854 bool prev_active = false;
64c7f8cf 855
efeaed4d 856 dqm_lock(dqm);
26103436
FK
857 pdd = kfd_get_process_device_data(q->device, q->process);
858 if (!pdd) {
859 retval = -ENODEV;
860 goto out_unlock;
861 }
fdfa090b
OZ
862 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
863 q->properties.type)];
64c7f8cf 864
60a00956
FK
865 /* Save previous activity state for counters */
866 prev_active = q->properties.is_active;
867
868 /* Make sure the queue is unmapped before updating the MQD */
d146c5a7 869 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
8dc1db31 870 if (!dqm->dev->kfd->shared_resources.enable_mes)
cc009e61 871 retval = unmap_queues_cpsch(dqm,
7cee6a68 872 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
cc009e61
MJ
873 else if (prev_active)
874 retval = remove_queue_mes(dqm, q, &pdd->qpd);
875
894a8293 876 if (retval) {
60a00956
FK
877 pr_err("unmap queue failed\n");
878 goto out_unlock;
879 }
894a8293 880 } else if (prev_active &&
60a00956 881 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1b4670f6
OZ
882 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
883 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
2c99a547
PY
884
885 if (!dqm->sched_running) {
886 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
887 goto out_unlock;
888 }
889
8d5f3552 890 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
8dc1db31 891 (dqm->dev->kfd->cwsr_enabled ?
2243f493
RB
892 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
893 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
60a00956
FK
894 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
895 if (retval) {
896 pr_err("destroy mqd failed\n");
897 goto out_unlock;
898 }
899 }
900
c6e559eb 901 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo);
60a00956 902
096d1a3e
FK
903 /*
904 * check active state vs. the previous state and modify
905 * counter accordingly. map_queues_cpsch uses the
81b820b3 906 * dqm->active_queue_count to determine whether a new runlist must be
096d1a3e
FK
907 * uploaded.
908 */
ab4d51d4
DYS
909 if (q->properties.is_active && !prev_active) {
910 increment_queue_count(dqm, &pdd->qpd, q);
911 } else if (!q->properties.is_active && prev_active) {
912 decrement_queue_count(dqm, &pdd->qpd, q);
913 } else if (q->gws && !q->properties.is_gws) {
b8020b03
JG
914 if (q->properties.is_active) {
915 dqm->gws_queue_count++;
916 pdd->qpd.mapped_gws_queue = true;
917 }
918 q->properties.is_gws = true;
919 } else if (!q->gws && q->properties.is_gws) {
920 if (q->properties.is_active) {
921 dqm->gws_queue_count--;
922 pdd->qpd.mapped_gws_queue = false;
923 }
924 q->properties.is_gws = false;
925 }
926
cc009e61 927 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
8dc1db31 928 if (!dqm->dev->kfd->shared_resources.enable_mes)
cc009e61 929 retval = map_queues_cpsch(dqm);
f4f9b827 930 else if (q->properties.is_active)
cc009e61
MJ
931 retval = add_queue_mes(dqm, q, &pdd->qpd);
932 } else if (q->properties.is_active &&
60a00956 933 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1b4670f6
OZ
934 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
935 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1b19aa5a
FK
936 if (WARN(q->process->mm != current->mm,
937 "should only run in user thread"))
938 retval = -EFAULT;
939 else
940 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
941 q->pipe, q->queue,
942 &q->properties, current->mm);
943 }
b6ffbab8 944
ab7c1648 945out_unlock:
efeaed4d 946 dqm_unlock(dqm);
64c7f8cf
BG
947 return retval;
948}
949
26103436
FK
950static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
951 struct qcm_process_device *qpd)
952{
953 struct queue *q;
8d5f3552 954 struct mqd_manager *mqd_mgr;
26103436 955 struct kfd_process_device *pdd;
bb2d2128 956 int retval, ret = 0;
26103436 957
efeaed4d 958 dqm_lock(dqm);
26103436
FK
959 if (qpd->evicted++ > 0) /* already evicted, do nothing */
960 goto out;
961
962 pdd = qpd_to_pdd(qpd);
783a25f4 963 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
26103436
FK
964 pdd->process->pasid);
965
4327bed2 966 pdd->last_evict_timestamp = get_jiffies_64();
bb2d2128
FK
967 /* Mark all queues as evicted. Deactivate all active queues on
968 * the qpd.
969 */
26103436 970 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128 971 q->properties.is_evicted = true;
26103436
FK
972 if (!q->properties.is_active)
973 continue;
bb2d2128 974
fdfa090b
OZ
975 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
976 q->properties.type)];
26103436 977 q->properties.is_active = false;
ab4d51d4 978 decrement_queue_count(dqm, qpd, q);
2c99a547
PY
979
980 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
981 continue;
982
8d5f3552 983 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
8dc1db31 984 (dqm->dev->kfd->cwsr_enabled ?
2243f493
RB
985 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
986 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
26103436 987 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
bb2d2128
FK
988 if (retval && !ret)
989 /* Return the first error, but keep going to
990 * maintain a consistent eviction state
991 */
992 ret = retval;
26103436
FK
993 }
994
995out:
efeaed4d 996 dqm_unlock(dqm);
bb2d2128 997 return ret;
26103436
FK
998}
999
1000static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
1001 struct qcm_process_device *qpd)
1002{
1003 struct queue *q;
1004 struct kfd_process_device *pdd;
1005 int retval = 0;
1006
efeaed4d 1007 dqm_lock(dqm);
26103436
FK
1008 if (qpd->evicted++ > 0) /* already evicted, do nothing */
1009 goto out;
1010
1011 pdd = qpd_to_pdd(qpd);
0ab2d753
JK
1012
1013 /* The debugger creates processes that temporarily have not acquired
1014 * all VMs for all devices and has no VMs itself.
1015 * Skip queue eviction on process eviction.
1016 */
1017 if (!pdd->drm_priv)
1018 goto out;
1019
783a25f4 1020 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
26103436
FK
1021 pdd->process->pasid);
1022
bb2d2128
FK
1023 /* Mark all queues as evicted. Deactivate all active queues on
1024 * the qpd.
1025 */
26103436 1026 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128 1027 q->properties.is_evicted = true;
26103436
FK
1028 if (!q->properties.is_active)
1029 continue;
bb2d2128 1030
26103436 1031 q->properties.is_active = false;
ab4d51d4 1032 decrement_queue_count(dqm, qpd, q);
cc009e61 1033
8dc1db31 1034 if (dqm->dev->kfd->shared_resources.enable_mes) {
cc009e61
MJ
1035 retval = remove_queue_mes(dqm, q, qpd);
1036 if (retval) {
1037 pr_err("Failed to evict queue %d\n",
1038 q->properties.queue_id);
1039 goto out;
1040 }
1041 }
26103436 1042 }
4327bed2 1043 pdd->last_evict_timestamp = get_jiffies_64();
8dc1db31 1044 if (!dqm->dev->kfd->shared_resources.enable_mes)
cc009e61
MJ
1045 retval = execute_queues_cpsch(dqm,
1046 qpd->is_debug ?
1047 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
7cee6a68
JK
1048 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
1049 USE_DEFAULT_GRACE_PERIOD);
26103436
FK
1050
1051out:
efeaed4d 1052 dqm_unlock(dqm);
26103436
FK
1053 return retval;
1054}
1055
1056static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
1057 struct qcm_process_device *qpd)
1058{
1b19aa5a 1059 struct mm_struct *mm = NULL;
26103436 1060 struct queue *q;
8d5f3552 1061 struct mqd_manager *mqd_mgr;
26103436 1062 struct kfd_process_device *pdd;
e715c6d0 1063 uint64_t pd_base;
4327bed2 1064 uint64_t eviction_duration;
bb2d2128 1065 int retval, ret = 0;
26103436
FK
1066
1067 pdd = qpd_to_pdd(qpd);
1068 /* Retrieve PD base */
b40a6ab2 1069 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
26103436 1070
efeaed4d 1071 dqm_lock(dqm);
26103436
FK
1072 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
1073 goto out;
1074 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
1075 qpd->evicted--;
1076 goto out;
1077 }
1078
783a25f4 1079 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
26103436
FK
1080 pdd->process->pasid);
1081
1082 /* Update PD Base in QPD */
1083 qpd->page_table_base = pd_base;
e715c6d0 1084 pr_debug("Updated PD address to 0x%llx\n", pd_base);
26103436
FK
1085
1086 if (!list_empty(&qpd->queues_list)) {
1087 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
3356c38d 1088 dqm->dev->adev,
26103436
FK
1089 qpd->vmid,
1090 qpd->page_table_base);
3543b055 1091 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
26103436
FK
1092 }
1093
1b19aa5a
FK
1094 /* Take a safe reference to the mm_struct, which may otherwise
1095 * disappear even while the kfd_process is still referenced.
1096 */
1097 mm = get_task_mm(pdd->process->lead_thread);
1098 if (!mm) {
bb2d2128 1099 ret = -EFAULT;
1b19aa5a
FK
1100 goto out;
1101 }
1102
bb2d2128
FK
1103 /* Remove the eviction flags. Activate queues that are not
1104 * inactive for other reasons.
1105 */
26103436 1106 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128
FK
1107 q->properties.is_evicted = false;
1108 if (!QUEUE_IS_ACTIVE(q->properties))
26103436 1109 continue;
bb2d2128 1110
fdfa090b
OZ
1111 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1112 q->properties.type)];
26103436 1113 q->properties.is_active = true;
ab4d51d4 1114 increment_queue_count(dqm, qpd, q);
2c99a547
PY
1115
1116 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
1117 continue;
1118
8d5f3552 1119 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
1b19aa5a 1120 q->queue, &q->properties, mm);
bb2d2128
FK
1121 if (retval && !ret)
1122 /* Return the first error, but keep going to
1123 * maintain a consistent eviction state
1124 */
1125 ret = retval;
26103436
FK
1126 }
1127 qpd->evicted = 0;
4327bed2
PC
1128 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
1129 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
26103436 1130out:
1b19aa5a
FK
1131 if (mm)
1132 mmput(mm);
efeaed4d 1133 dqm_unlock(dqm);
bb2d2128 1134 return ret;
26103436
FK
1135}
1136
1137static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
1138 struct qcm_process_device *qpd)
1139{
1140 struct queue *q;
1141 struct kfd_process_device *pdd;
4327bed2 1142 uint64_t eviction_duration;
26103436
FK
1143 int retval = 0;
1144
1145 pdd = qpd_to_pdd(qpd);
26103436 1146
efeaed4d 1147 dqm_lock(dqm);
26103436
FK
1148 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
1149 goto out;
1150 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
1151 qpd->evicted--;
1152 goto out;
1153 }
1154
0ab2d753
JK
1155 /* The debugger creates processes that temporarily have not acquired
1156 * all VMs for all devices and has no VMs itself.
1157 * Skip queue restore on process restore.
1158 */
1159 if (!pdd->drm_priv)
1160 goto vm_not_acquired;
1161
783a25f4 1162 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
26103436
FK
1163 pdd->process->pasid);
1164
1165 /* Update PD Base in QPD */
0ab2d753
JK
1166 qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
1167 pr_debug("Updated PD address to 0x%llx\n", qpd->page_table_base);
26103436
FK
1168
1169 /* activate all active queues on the qpd */
1170 list_for_each_entry(q, &qpd->queues_list, list) {
26103436 1171 q->properties.is_evicted = false;
bb2d2128
FK
1172 if (!QUEUE_IS_ACTIVE(q->properties))
1173 continue;
1174
26103436 1175 q->properties.is_active = true;
ab4d51d4 1176 increment_queue_count(dqm, &pdd->qpd, q);
cc009e61 1177
8dc1db31 1178 if (dqm->dev->kfd->shared_resources.enable_mes) {
cc009e61
MJ
1179 retval = add_queue_mes(dqm, q, qpd);
1180 if (retval) {
1181 pr_err("Failed to restore queue %d\n",
1182 q->properties.queue_id);
1183 goto out;
1184 }
1185 }
26103436 1186 }
8dc1db31 1187 if (!dqm->dev->kfd->shared_resources.enable_mes)
cc009e61 1188 retval = execute_queues_cpsch(dqm,
7cee6a68 1189 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
4327bed2
PC
1190 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
1191 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
0ab2d753
JK
1192vm_not_acquired:
1193 qpd->evicted = 0;
26103436 1194out:
efeaed4d 1195 dqm_unlock(dqm);
26103436
FK
1196 return retval;
1197}
1198
58dcd5bf 1199static int register_process(struct device_queue_manager *dqm,
64c7f8cf
BG
1200 struct qcm_process_device *qpd)
1201{
1202 struct device_process_node *n;
403575c4 1203 struct kfd_process_device *pdd;
e715c6d0 1204 uint64_t pd_base;
a22fc854 1205 int retval;
64c7f8cf 1206
dbf56ab1 1207 n = kzalloc(sizeof(*n), GFP_KERNEL);
64c7f8cf
BG
1208 if (!n)
1209 return -ENOMEM;
1210
1211 n->qpd = qpd;
1212
403575c4
FK
1213 pdd = qpd_to_pdd(qpd);
1214 /* Retrieve PD base */
b40a6ab2 1215 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
403575c4 1216
efeaed4d 1217 dqm_lock(dqm);
64c7f8cf
BG
1218 list_add(&n->list, &dqm->queues);
1219
403575c4
FK
1220 /* Update PD Base in QPD */
1221 qpd->page_table_base = pd_base;
e715c6d0 1222 pr_debug("Updated PD address to 0x%llx\n", pd_base);
403575c4 1223
bfd5e378 1224 retval = dqm->asic_ops.update_qpd(dqm, qpd);
a22fc854 1225
f756e631 1226 dqm->processes_count++;
64c7f8cf 1227
efeaed4d 1228 dqm_unlock(dqm);
64c7f8cf 1229
32cce8bc
FK
1230 /* Outside the DQM lock because under the DQM lock we can't do
1231 * reclaim or take other locks that others hold while reclaiming.
1232 */
1233 kfd_inc_compute_active(dqm->dev);
1234
a22fc854 1235 return retval;
64c7f8cf
BG
1236}
1237
58dcd5bf 1238static int unregister_process(struct device_queue_manager *dqm,
64c7f8cf
BG
1239 struct qcm_process_device *qpd)
1240{
1241 int retval;
1242 struct device_process_node *cur, *next;
1243
1e5ec956
OG
1244 pr_debug("qpd->queues_list is %s\n",
1245 list_empty(&qpd->queues_list) ? "empty" : "not empty");
64c7f8cf
BG
1246
1247 retval = 0;
efeaed4d 1248 dqm_lock(dqm);
64c7f8cf
BG
1249
1250 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
1251 if (qpd == cur->qpd) {
1252 list_del(&cur->list);
f5d896bb 1253 kfree(cur);
f756e631 1254 dqm->processes_count--;
64c7f8cf
BG
1255 goto out;
1256 }
1257 }
1258 /* qpd not found in dqm list */
1259 retval = 1;
1260out:
efeaed4d 1261 dqm_unlock(dqm);
32cce8bc
FK
1262
1263 /* Outside the DQM lock because under the DQM lock we can't do
1264 * reclaim or take other locks that others hold while reclaiming.
1265 */
1266 if (!retval)
1267 kfd_dec_compute_active(dqm->dev);
1268
64c7f8cf
BG
1269 return retval;
1270}
1271
1272static int
c7b6bac9 1273set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
64c7f8cf
BG
1274 unsigned int vmid)
1275{
c4050ff1
LL
1276 uint32_t xcc_mask = dqm->dev->xcc_mask;
1277 int xcc_id, ret;
e2069a7b 1278
c4050ff1 1279 for_each_inst(xcc_id, xcc_mask) {
e2069a7b 1280 ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
c4050ff1 1281 dqm->dev->adev, pasid, vmid, xcc_id);
e2069a7b
MJ
1282 if (ret)
1283 break;
1284 }
1285
1286 return ret;
64c7f8cf
BG
1287}
1288
2249d558
AL
1289static void init_interrupts(struct device_queue_manager *dqm)
1290{
c4050ff1
LL
1291 uint32_t xcc_mask = dqm->dev->xcc_mask;
1292 unsigned int i, xcc_id;
2249d558 1293
b695c97b
LL
1294 for_each_inst(xcc_id, xcc_mask) {
1295 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) {
1296 if (is_pipe_enabled(dqm, 0, i)) {
e2069a7b 1297 dqm->dev->kfd2kgd->init_interrupts(
c4050ff1 1298 dqm->dev->adev, i, xcc_id);
b695c97b 1299 }
e2069a7b
MJ
1300 }
1301 }
2249d558
AL
1302}
1303
64c7f8cf
BG
1304static int initialize_nocpsch(struct device_queue_manager *dqm)
1305{
86194cf8 1306 int pipe, queue;
64c7f8cf 1307
79775b62 1308 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
64c7f8cf 1309
ab7c1648
KR
1310 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
1311 sizeof(unsigned int), GFP_KERNEL);
1312 if (!dqm->allocated_queues)
1313 return -ENOMEM;
1314
efeaed4d 1315 mutex_init(&dqm->lock_hidden);
64c7f8cf 1316 INIT_LIST_HEAD(&dqm->queues);
81b820b3 1317 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
b42902f4 1318 dqm->active_cp_queue_count = 0;
b8020b03 1319 dqm->gws_queue_count = 0;
64c7f8cf 1320
86194cf8
FK
1321 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1322 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1323
1324 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
1325 if (test_bit(pipe_offset + queue,
8dc1db31 1326 dqm->dev->kfd->shared_resources.cp_queue_bitmap))
86194cf8
FK
1327 dqm->allocated_queues[pipe] |= 1 << queue;
1328 }
64c7f8cf 1329
d9d4623c
YZ
1330 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
1331
b292cafe 1332 init_sdma_bitmaps(dqm);
64c7f8cf 1333
64c7f8cf
BG
1334 return 0;
1335}
1336
58dcd5bf 1337static void uninitialize(struct device_queue_manager *dqm)
64c7f8cf 1338{
6f9d54fd
OG
1339 int i;
1340
81b820b3 1341 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
64c7f8cf
BG
1342
1343 kfree(dqm->allocated_queues);
6f9d54fd 1344 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
8d5f3552 1345 kfree(dqm->mqd_mgrs[i]);
efeaed4d 1346 mutex_destroy(&dqm->lock_hidden);
64c7f8cf
BG
1347}
1348
1349static int start_nocpsch(struct device_queue_manager *dqm)
1350{
6f4cb84a
FK
1351 int r = 0;
1352
52055039 1353 pr_info("SW scheduler is used");
2249d558 1354 init_interrupts(dqm);
2243f493 1355
7eb0502a 1356 if (dqm->dev->adev->asic_type == CHIP_HAWAII)
6f4cb84a
FK
1357 r = pm_init(&dqm->packet_mgr, dqm);
1358 if (!r)
1359 dqm->sched_running = true;
2c99a547 1360
6f4cb84a 1361 return r;
64c7f8cf
BG
1362}
1363
1364static int stop_nocpsch(struct device_queue_manager *dqm)
1365{
5e406012
MJ
1366 dqm_lock(dqm);
1367 if (!dqm->sched_running) {
1368 dqm_unlock(dqm);
1369 return 0;
1370 }
1371
7eb0502a 1372 if (dqm->dev->adev->asic_type == CHIP_HAWAII)
9af5379c 1373 pm_uninit(&dqm->packet_mgr, false);
2c99a547 1374 dqm->sched_running = false;
5e406012 1375 dqm_unlock(dqm);
2c99a547 1376
64c7f8cf
BG
1377 return 0;
1378}
1379
09c34e8d
FK
1380static void pre_reset(struct device_queue_manager *dqm)
1381{
1382 dqm_lock(dqm);
1383 dqm->is_resetting = true;
1384 dqm_unlock(dqm);
1385}
1386
bcea3081 1387static int allocate_sdma_queue(struct device_queue_manager *dqm,
2485c12c 1388 struct queue *q, const uint32_t *restore_sdma_id)
bcea3081
BG
1389{
1390 int bit;
1391
1b4670f6 1392 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
a805889a 1393 if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
c7637c95 1394 pr_err("No more SDMA queue to allocate\n");
1b4670f6 1395 return -ENOMEM;
c7637c95
YZ
1396 }
1397
2485c12c
DYS
1398 if (restore_sdma_id) {
1399 /* Re-use existing sdma_id */
a805889a 1400 if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) {
2485c12c
DYS
1401 pr_err("SDMA queue already in use\n");
1402 return -EBUSY;
1403 }
a805889a 1404 clear_bit(*restore_sdma_id, dqm->sdma_bitmap);
2485c12c
DYS
1405 q->sdma_id = *restore_sdma_id;
1406 } else {
1407 /* Find first available sdma_id */
a805889a
MJ
1408 bit = find_first_bit(dqm->sdma_bitmap,
1409 get_num_sdma_queues(dqm));
1410 clear_bit(bit, dqm->sdma_bitmap);
2485c12c
DYS
1411 q->sdma_id = bit;
1412 }
1413
a805889a 1414 q->properties.sdma_engine_id =
a805889a 1415 q->sdma_id % kfd_get_num_sdma_engines(dqm->dev);
1b4670f6 1416 q->properties.sdma_queue_id = q->sdma_id /
ee2f17f4 1417 kfd_get_num_sdma_engines(dqm->dev);
1b4670f6 1418 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
a805889a 1419 if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) {
c7637c95 1420 pr_err("No more XGMI SDMA queue to allocate\n");
1b4670f6 1421 return -ENOMEM;
c7637c95 1422 }
2485c12c
DYS
1423 if (restore_sdma_id) {
1424 /* Re-use existing sdma_id */
a805889a 1425 if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) {
2485c12c
DYS
1426 pr_err("SDMA queue already in use\n");
1427 return -EBUSY;
1428 }
a805889a 1429 clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap);
2485c12c
DYS
1430 q->sdma_id = *restore_sdma_id;
1431 } else {
a805889a
MJ
1432 bit = find_first_bit(dqm->xgmi_sdma_bitmap,
1433 get_num_xgmi_sdma_queues(dqm));
1434 clear_bit(bit, dqm->xgmi_sdma_bitmap);
2485c12c
DYS
1435 q->sdma_id = bit;
1436 }
1b4670f6
OZ
1437 /* sdma_engine_id is sdma id including
1438 * both PCIe-optimized SDMAs and XGMI-
1439 * optimized SDMAs. The calculation below
1440 * assumes the first N engines are always
1441 * PCIe-optimized ones
1442 */
ee2f17f4
AL
1443 q->properties.sdma_engine_id =
1444 kfd_get_num_sdma_engines(dqm->dev) +
1445 q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
1b4670f6 1446 q->properties.sdma_queue_id = q->sdma_id /
ee2f17f4 1447 kfd_get_num_xgmi_sdma_engines(dqm->dev);
1b4670f6 1448 }
e78579aa 1449
e78579aa
YZ
1450 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1451 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
bcea3081
BG
1452
1453 return 0;
1454}
1455
1456static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1b4670f6 1457 struct queue *q)
bcea3081 1458{
1b4670f6
OZ
1459 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1460 if (q->sdma_id >= get_num_sdma_queues(dqm))
1461 return;
a805889a 1462 set_bit(q->sdma_id, dqm->sdma_bitmap);
1b4670f6
OZ
1463 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1464 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1465 return;
a805889a 1466 set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap);
1b4670f6 1467 }
bcea3081
BG
1468}
1469
64c7f8cf
BG
1470/*
1471 * Device Queue Manager implementation for cp scheduler
1472 */
1473
1474static int set_sched_resources(struct device_queue_manager *dqm)
1475{
d0b63bb3 1476 int i, mec;
64c7f8cf 1477 struct scheduling_resources res;
64c7f8cf 1478
74c5b85d 1479 res.vmid_mask = dqm->dev->compute_vmid_bitmap;
d0b63bb3
AR
1480
1481 res.queue_mask = 0;
1482 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
8dc1db31
MJ
1483 mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe)
1484 / dqm->dev->kfd->shared_resources.num_pipe_per_mec;
d0b63bb3 1485
8dc1db31 1486 if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap))
d0b63bb3
AR
1487 continue;
1488
1489 /* only acquire queues from the first MEC */
1490 if (mec > 0)
1491 continue;
1492
1493 /* This situation may be hit in the future if a new HW
1494 * generation exposes more than 64 queues. If so, the
8eabaf54
KR
1495 * definition of res.queue_mask needs updating
1496 */
1d11ee89 1497 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
d0b63bb3
AR
1498 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1499 break;
1500 }
1501
d09f85d5
YZ
1502 res.queue_mask |= 1ull
1503 << amdgpu_queue_mask_bit_to_set_resource_bit(
56c5977e 1504 dqm->dev->adev, i);
d0b63bb3 1505 }
d9848e14
OZ
1506 res.gws_mask = ~0ull;
1507 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
64c7f8cf 1508
79775b62
KR
1509 pr_debug("Scheduling resources:\n"
1510 "vmid mask: 0x%8X\n"
1511 "queue mask: 0x%8llX\n",
64c7f8cf
BG
1512 res.vmid_mask, res.queue_mask);
1513
9af5379c 1514 return pm_send_set_resources(&dqm->packet_mgr, &res);
64c7f8cf
BG
1515}
1516
1517static int initialize_cpsch(struct device_queue_manager *dqm)
1518{
79775b62 1519 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
64c7f8cf 1520
efeaed4d 1521 mutex_init(&dqm->lock_hidden);
64c7f8cf 1522 INIT_LIST_HEAD(&dqm->queues);
81b820b3 1523 dqm->active_queue_count = dqm->processes_count = 0;
b42902f4 1524 dqm->active_cp_queue_count = 0;
b8020b03 1525 dqm->gws_queue_count = 0;
64c7f8cf 1526 dqm->active_runlist = false;
73ea648d
SL
1527 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1528
b292cafe
FK
1529 init_sdma_bitmaps(dqm);
1530
7cee6a68
JK
1531 if (dqm->dev->kfd2kgd->get_iq_wait_times)
1532 dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev,
1533 &dqm->wait_times);
bfd5e378 1534 return 0;
64c7f8cf
BG
1535}
1536
1537static int start_cpsch(struct device_queue_manager *dqm)
1538{
64c7f8cf
BG
1539 int retval;
1540
64c7f8cf
BG
1541 retval = 0;
1542
4f942aae 1543 dqm_lock(dqm);
64c7f8cf 1544
8dc1db31 1545 if (!dqm->dev->kfd->shared_resources.enable_mes) {
cc009e61
MJ
1546 retval = pm_init(&dqm->packet_mgr, dqm);
1547 if (retval)
1548 goto fail_packet_manager_init;
64c7f8cf 1549
cc009e61
MJ
1550 retval = set_sched_resources(dqm);
1551 if (retval)
1552 goto fail_set_sched_resources;
1553 }
79775b62 1554 pr_debug("Allocating fence memory\n");
64c7f8cf
BG
1555
1556 /* allocate fence memory on the gart */
a86aa3ca
OG
1557 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1558 &dqm->fence_mem);
64c7f8cf 1559
4eacc26b 1560 if (retval)
64c7f8cf
BG
1561 goto fail_allocate_vidmem;
1562
b010affe 1563 dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
64c7f8cf 1564 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
2249d558
AL
1565
1566 init_interrupts(dqm);
1567
73ea648d
SL
1568 /* clear hang status when driver try to start the hw scheduler */
1569 dqm->is_hws_hang = false;
09c34e8d 1570 dqm->is_resetting = false;
2c99a547 1571 dqm->sched_running = true;
7cee6a68 1572
8dc1db31 1573 if (!dqm->dev->kfd->shared_resources.enable_mes)
7cee6a68 1574 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
efeaed4d 1575 dqm_unlock(dqm);
64c7f8cf
BG
1576
1577 return 0;
1578fail_allocate_vidmem:
1579fail_set_sched_resources:
8dc1db31 1580 if (!dqm->dev->kfd->shared_resources.enable_mes)
cc009e61 1581 pm_uninit(&dqm->packet_mgr, false);
64c7f8cf 1582fail_packet_manager_init:
4f942aae 1583 dqm_unlock(dqm);
64c7f8cf
BG
1584 return retval;
1585}
1586
1587static int stop_cpsch(struct device_queue_manager *dqm)
1588{
c2a77fde
FK
1589 bool hanging;
1590
efeaed4d 1591 dqm_lock(dqm);
c96cb659 1592 if (!dqm->sched_running) {
1593 dqm_unlock(dqm);
1594 return 0;
1595 }
1596
cc009e61 1597 if (!dqm->is_hws_hang) {
8dc1db31 1598 if (!dqm->dev->kfd->shared_resources.enable_mes)
7cee6a68 1599 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false);
cc009e61
MJ
1600 else
1601 remove_all_queues_mes(dqm);
1602 }
1603
c2a77fde 1604 hanging = dqm->is_hws_hang || dqm->is_resetting;
2c99a547 1605 dqm->sched_running = false;
64c7f8cf 1606
8dc1db31 1607 if (!dqm->dev->kfd->shared_resources.enable_mes)
cc009e61 1608 pm_release_ib(&dqm->packet_mgr);
087d7641 1609
a86aa3ca 1610 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
8dc1db31 1611 if (!dqm->dev->kfd->shared_resources.enable_mes)
cc009e61 1612 pm_uninit(&dqm->packet_mgr, hanging);
4f942aae 1613 dqm_unlock(dqm);
64c7f8cf
BG
1614
1615 return 0;
1616}
1617
1618static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1619 struct kernel_queue *kq,
1620 struct qcm_process_device *qpd)
1621{
efeaed4d 1622 dqm_lock(dqm);
b8cbab04 1623 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 1624 pr_warn("Can't create new kernel queue because %d queues were already created\n",
b8cbab04 1625 dqm->total_queue_count);
efeaed4d 1626 dqm_unlock(dqm);
b8cbab04
OG
1627 return -EPERM;
1628 }
1629
1630 /*
1631 * Unconditionally increment this counter, regardless of the queue's
1632 * type or whether the queue is active.
1633 */
1634 dqm->total_queue_count++;
1635 pr_debug("Total of %d queues are accountable so far\n",
1636 dqm->total_queue_count);
1637
64c7f8cf 1638 list_add(&kq->list, &qpd->priv_queue_list);
ab4d51d4 1639 increment_queue_count(dqm, qpd, kq->queue);
64c7f8cf 1640 qpd->is_debug = true;
7cee6a68
JK
1641 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
1642 USE_DEFAULT_GRACE_PERIOD);
efeaed4d 1643 dqm_unlock(dqm);
64c7f8cf
BG
1644
1645 return 0;
1646}
1647
1648static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1649 struct kernel_queue *kq,
1650 struct qcm_process_device *qpd)
1651{
efeaed4d 1652 dqm_lock(dqm);
64c7f8cf 1653 list_del(&kq->list);
ab4d51d4 1654 decrement_queue_count(dqm, qpd, kq->queue);
64c7f8cf 1655 qpd->is_debug = false;
7cee6a68
JK
1656 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0,
1657 USE_DEFAULT_GRACE_PERIOD);
b8cbab04
OG
1658 /*
1659 * Unconditionally decrement this counter, regardless of the queue's
1660 * type.
1661 */
8b58f261 1662 dqm->total_queue_count--;
b8cbab04
OG
1663 pr_debug("Total of %d queues are accountable so far\n",
1664 dqm->total_queue_count);
efeaed4d 1665 dqm_unlock(dqm);
64c7f8cf
BG
1666}
1667
1668static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
2485c12c 1669 struct qcm_process_device *qpd,
42c6c482 1670 const struct kfd_criu_queue_priv_data *qd,
3a9822d7 1671 const void *restore_mqd, const void *restore_ctl_stack)
64c7f8cf
BG
1672{
1673 int retval;
8d5f3552 1674 struct mqd_manager *mqd_mgr;
64c7f8cf 1675
b8cbab04 1676 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 1677 pr_warn("Can't create new usermode queue because %d queues were already created\n",
b8cbab04 1678 dqm->total_queue_count);
70d488fb
OZ
1679 retval = -EPERM;
1680 goto out;
b8cbab04
OG
1681 }
1682
1b4670f6
OZ
1683 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1684 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
38bb4226 1685 dqm_lock(dqm);
2485c12c 1686 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
38bb4226 1687 dqm_unlock(dqm);
894a8293 1688 if (retval)
70d488fb 1689 goto out;
e139cd2a 1690 }
ef568db7 1691
5bb6a8fa 1692 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
ef568db7
FK
1693 if (retval)
1694 goto out_deallocate_sdma_queue;
1695
70d488fb
OZ
1696 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1697 q->properties.type)];
70df8273 1698
eec0b4cf
OZ
1699 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1700 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1701 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
373d7080
FK
1702 q->properties.tba_addr = qpd->tba_addr;
1703 q->properties.tma_addr = qpd->tma_addr;
70d488fb
OZ
1704 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1705 if (!q->mqd_mem_obj) {
1706 retval = -ENOMEM;
1707 goto out_deallocate_doorbell;
1708 }
70df8273
EH
1709
1710 dqm_lock(dqm);
1711 /*
1712 * Eviction state logic: mark all queues as evicted, even ones
1713 * not currently active. Restoring inactive queues later only
1714 * updates the is_evicted flag but is a no-op otherwise.
1715 */
1716 q->properties.is_evicted = !!qpd->evicted;
42c6c482
DYS
1717
1718 if (qd)
1719 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
3a9822d7
DYS
1720 &q->properties, restore_mqd, restore_ctl_stack,
1721 qd->ctl_stack_size);
42c6c482
DYS
1722 else
1723 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1724 &q->gart_mqd_addr, &q->properties);
89cd9d23 1725
64c7f8cf 1726 list_add(&q->list, &qpd->queues_list);
bc920fd4 1727 qpd->queue_count++;
f38abc15 1728
64c7f8cf 1729 if (q->properties.is_active) {
ab4d51d4 1730 increment_queue_count(dqm, qpd, q);
b42902f4 1731
8dc1db31 1732 if (!dqm->dev->kfd->shared_resources.enable_mes)
cc009e61 1733 retval = execute_queues_cpsch(dqm,
7cee6a68 1734 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD);
cc3cb791 1735 else
cc009e61 1736 retval = add_queue_mes(dqm, q, qpd);
cc3cb791 1737 if (retval)
1738 goto cleanup_queue;
64c7f8cf
BG
1739 }
1740
b8cbab04
OG
1741 /*
1742 * Unconditionally increment this counter, regardless of the queue's
1743 * type or whether the queue is active.
1744 */
1745 dqm->total_queue_count++;
1746
1747 pr_debug("Total of %d queues are accountable so far\n",
1748 dqm->total_queue_count);
1749
efeaed4d 1750 dqm_unlock(dqm);
72a01d23
FK
1751 return retval;
1752
cc009e61
MJ
1753cleanup_queue:
1754 qpd->queue_count--;
1755 list_del(&q->list);
1756 if (q->properties.is_active)
1757 decrement_queue_count(dqm, qpd, q);
1758 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1759 dqm_unlock(dqm);
70d488fb
OZ
1760out_deallocate_doorbell:
1761 deallocate_doorbell(qpd, q);
72a01d23 1762out_deallocate_sdma_queue:
1b4670f6 1763 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
38bb4226
OZ
1764 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1765 dqm_lock(dqm);
1b4670f6 1766 deallocate_sdma_queue(dqm, q);
38bb4226
OZ
1767 dqm_unlock(dqm);
1768 }
70d488fb 1769out:
64c7f8cf
BG
1770 return retval;
1771}
1772
b010affe
QH
1773int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
1774 uint64_t fence_value,
8c72c3d7 1775 unsigned int timeout_ms)
64c7f8cf 1776{
8c72c3d7 1777 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
64c7f8cf
BG
1778
1779 while (*fence_addr != fence_value) {
8c72c3d7 1780 if (time_after(jiffies, end_jiffies)) {
79775b62 1781 pr_err("qcm fence wait loop timeout expired\n");
0e9a860c
YZ
1782 /* In HWS case, this is used to halt the driver thread
1783 * in order not to mess up CP states before doing
1784 * scandumps for FW debugging.
1785 */
1786 while (halt_if_hws_hang)
1787 schedule();
1788
64c7f8cf
BG
1789 return -ETIME;
1790 }
99331a51 1791 schedule();
64c7f8cf
BG
1792 }
1793
1794 return 0;
1795}
1796
60a00956
FK
1797/* dqm->lock mutex has to be locked before calling this function */
1798static int map_queues_cpsch(struct device_queue_manager *dqm)
1799{
1800 int retval;
1801
2c99a547
PY
1802 if (!dqm->sched_running)
1803 return 0;
81b820b3 1804 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
60a00956 1805 return 0;
60a00956
FK
1806 if (dqm->active_runlist)
1807 return 0;
1808
9af5379c 1809 retval = pm_send_runlist(&dqm->packet_mgr, &dqm->queues);
14328aa5 1810 pr_debug("%s sent runlist\n", __func__);
60a00956
FK
1811 if (retval) {
1812 pr_err("failed to execute runlist\n");
1813 return retval;
1814 }
1815 dqm->active_runlist = true;
1816
1817 return retval;
1818}
1819
ac30c783 1820/* dqm->lock mutex has to be locked before calling this function */
7da2bcf8 1821static int unmap_queues_cpsch(struct device_queue_manager *dqm,
4465f466 1822 enum kfd_unmap_queues_filter filter,
7cee6a68
JK
1823 uint32_t filter_param,
1824 uint32_t grace_period,
1825 bool reset)
64c7f8cf 1826{
9fd3f1bf 1827 int retval = 0;
51a0f459 1828 struct mqd_manager *mqd_mgr;
64c7f8cf 1829
2c99a547
PY
1830 if (!dqm->sched_running)
1831 return 0;
b8c20c74 1832 if (dqm->is_hws_hang || dqm->is_resetting)
73ea648d 1833 return -EIO;
991ca8ee 1834 if (!dqm->active_runlist)
ac30c783 1835 return retval;
bcea3081 1836
7cee6a68
JK
1837 if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
1838 retval = pm_update_grace_period(&dqm->packet_mgr, grace_period);
1839 if (retval)
1840 return retval;
1841 }
1842
d2cb0b21 1843 retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset);
4eacc26b 1844 if (retval)
ac30c783 1845 return retval;
64c7f8cf
BG
1846
1847 *dqm->fence_addr = KFD_FENCE_INIT;
9af5379c 1848 pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
64c7f8cf
BG
1849 KFD_FENCE_COMPLETED);
1850 /* should be timed out */
c3447e81 1851 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
14328aa5 1852 queue_preemption_timeout_ms);
09c34e8d
FK
1853 if (retval) {
1854 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
cc009e61 1855 kfd_hws_hang(dqm);
ac30c783 1856 return retval;
09c34e8d 1857 }
9fd3f1bf 1858
51a0f459
OZ
1859 /* In the current MEC firmware implementation, if compute queue
1860 * doesn't response to the preemption request in time, HIQ will
1861 * abandon the unmap request without returning any timeout error
1862 * to driver. Instead, MEC firmware will log the doorbell of the
1863 * unresponding compute queue to HIQ.MQD.queue_doorbell_id fields.
1864 * To make sure the queue unmap was successful, driver need to
1865 * check those fields
1866 */
1867 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
9af5379c 1868 if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) {
51a0f459
OZ
1869 pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
1870 while (halt_if_hws_hang)
1871 schedule();
1872 return -ETIME;
1873 }
1874
7cee6a68
JK
1875 /* We need to reset the grace period value for this device */
1876 if (grace_period != USE_DEFAULT_GRACE_PERIOD) {
1877 if (pm_update_grace_period(&dqm->packet_mgr,
1878 USE_DEFAULT_GRACE_PERIOD))
1879 pr_err("Failed to reset grace period\n");
1880 }
1881
9af5379c 1882 pm_release_ib(&dqm->packet_mgr);
64c7f8cf
BG
1883 dqm->active_runlist = false;
1884
64c7f8cf
BG
1885 return retval;
1886}
1887
dec63443
TZ
1888/* only for compute queue */
1889static int reset_queues_cpsch(struct device_queue_manager *dqm,
1890 uint16_t pasid)
1891{
1892 int retval;
1893
1894 dqm_lock(dqm);
1895
1896 retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
7cee6a68 1897 pasid, USE_DEFAULT_GRACE_PERIOD, true);
dec63443
TZ
1898
1899 dqm_unlock(dqm);
1900 return retval;
1901}
1902
ac30c783 1903/* dqm->lock mutex has to be locked before calling this function */
c4744e24
YZ
1904static int execute_queues_cpsch(struct device_queue_manager *dqm,
1905 enum kfd_unmap_queues_filter filter,
7cee6a68
JK
1906 uint32_t filter_param,
1907 uint32_t grace_period)
64c7f8cf
BG
1908{
1909 int retval;
1910
73ea648d
SL
1911 if (dqm->is_hws_hang)
1912 return -EIO;
7cee6a68 1913 retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false);
09c34e8d 1914 if (retval)
ac30c783 1915 return retval;
64c7f8cf 1916
60a00956 1917 return map_queues_cpsch(dqm);
64c7f8cf
BG
1918}
1919
1920static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1921 struct qcm_process_device *qpd,
1922 struct queue *q)
1923{
1924 int retval;
8d5f3552 1925 struct mqd_manager *mqd_mgr;
d69fd951
MJ
1926 uint64_t sdma_val = 0;
1927 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
1928
1929 /* Get the SDMA queue stats */
1930 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1931 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
818b0324 1932 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
d69fd951
MJ
1933 &sdma_val);
1934 if (retval)
1935 pr_err("Failed to read SDMA queue counter for queue: %d\n",
1936 q->properties.queue_id);
1937 }
992839ad 1938
64c7f8cf
BG
1939 retval = 0;
1940
1941 /* remove queue from list to prevent rescheduling after preemption */
efeaed4d 1942 dqm_lock(dqm);
992839ad
YS
1943
1944 if (qpd->is_debug) {
1945 /*
1946 * error, currently we do not allow to destroy a queue
1947 * of a currently debugged process
1948 */
1949 retval = -EBUSY;
1950 goto failed_try_destroy_debugged_queue;
1951
1952 }
1953
fdfa090b
OZ
1954 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1955 q->properties.type)];
64c7f8cf 1956
ef568db7
FK
1957 deallocate_doorbell(qpd, q);
1958
d69fd951
MJ
1959 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1960 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1b4670f6 1961 deallocate_sdma_queue(dqm, q);
d69fd951
MJ
1962 pdd->sdma_past_activity_counter += sdma_val;
1963 }
bcea3081 1964
64c7f8cf 1965 list_del(&q->list);
bc920fd4 1966 qpd->queue_count--;
8c07f33e 1967 if (q->properties.is_active) {
8dc1db31 1968 if (!dqm->dev->kfd->shared_resources.enable_mes) {
8c07f33e
PY
1969 decrement_queue_count(dqm, qpd, q);
1970 retval = execute_queues_cpsch(dqm,
7cee6a68
JK
1971 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0,
1972 USE_DEFAULT_GRACE_PERIOD);
8c07f33e
PY
1973 if (retval == -ETIME)
1974 qpd->reset_wavefronts = true;
1975 } else {
1976 retval = remove_queue_mes(dqm, q, qpd);
1977 }
1978 }
64c7f8cf 1979
b8cbab04
OG
1980 /*
1981 * Unconditionally decrement this counter, regardless of the queue's
1982 * type
1983 */
1984 dqm->total_queue_count--;
1985 pr_debug("Total of %d queues are accountable so far\n",
1986 dqm->total_queue_count);
64c7f8cf 1987
efeaed4d 1988 dqm_unlock(dqm);
64c7f8cf 1989
8636e53c
OZ
1990 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1991 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
89cd9d23 1992
9e827224 1993 return retval;
64c7f8cf 1994
992839ad
YS
1995failed_try_destroy_debugged_queue:
1996
efeaed4d 1997 dqm_unlock(dqm);
64c7f8cf
BG
1998 return retval;
1999}
2000
2001/*
2002 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
2003 * stay in user mode.
2004 */
2005#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
2006/* APE1 limit is inclusive and 64K aligned. */
2007#define APE1_LIMIT_ALIGNMENT 0xFFFF
2008
2009static bool set_cache_memory_policy(struct device_queue_manager *dqm,
2010 struct qcm_process_device *qpd,
2011 enum cache_policy default_policy,
2012 enum cache_policy alternate_policy,
2013 void __user *alternate_aperture_base,
2014 uint64_t alternate_aperture_size)
2015{
bed4f110
FK
2016 bool retval = true;
2017
2018 if (!dqm->asic_ops.set_cache_memory_policy)
2019 return retval;
64c7f8cf 2020
efeaed4d 2021 dqm_lock(dqm);
64c7f8cf
BG
2022
2023 if (alternate_aperture_size == 0) {
2024 /* base > limit disables APE1 */
2025 qpd->sh_mem_ape1_base = 1;
2026 qpd->sh_mem_ape1_limit = 0;
2027 } else {
2028 /*
2029 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
2030 * SH_MEM_APE1_BASE[31:0], 0x0000 }
2031 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
2032 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
2033 * Verify that the base and size parameters can be
2034 * represented in this format and convert them.
2035 * Additionally restrict APE1 to user-mode addresses.
2036 */
2037
2038 uint64_t base = (uintptr_t)alternate_aperture_base;
2039 uint64_t limit = base + alternate_aperture_size - 1;
2040
ab7c1648
KR
2041 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
2042 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
2043 retval = false;
64c7f8cf 2044 goto out;
ab7c1648 2045 }
64c7f8cf
BG
2046
2047 qpd->sh_mem_ape1_base = base >> 16;
2048 qpd->sh_mem_ape1_limit = limit >> 16;
2049 }
2050
bfd5e378 2051 retval = dqm->asic_ops.set_cache_memory_policy(
a22fc854
BG
2052 dqm,
2053 qpd,
2054 default_policy,
2055 alternate_policy,
2056 alternate_aperture_base,
2057 alternate_aperture_size);
64c7f8cf 2058
d146c5a7 2059 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
64c7f8cf
BG
2060 program_sh_mem_settings(dqm, qpd);
2061
79775b62 2062 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
64c7f8cf
BG
2063 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
2064 qpd->sh_mem_ape1_limit);
2065
64c7f8cf 2066out:
efeaed4d 2067 dqm_unlock(dqm);
ab7c1648 2068 return retval;
64c7f8cf
BG
2069}
2070
9fd3f1bf
FK
2071static int process_termination_nocpsch(struct device_queue_manager *dqm,
2072 struct qcm_process_device *qpd)
2073{
a7b2451d 2074 struct queue *q;
9fd3f1bf
FK
2075 struct device_process_node *cur, *next_dpn;
2076 int retval = 0;
32cce8bc 2077 bool found = false;
9fd3f1bf 2078
efeaed4d 2079 dqm_lock(dqm);
9fd3f1bf
FK
2080
2081 /* Clear all user mode queues */
a7b2451d
AL
2082 while (!list_empty(&qpd->queues_list)) {
2083 struct mqd_manager *mqd_mgr;
9fd3f1bf
FK
2084 int ret;
2085
a7b2451d
AL
2086 q = list_first_entry(&qpd->queues_list, struct queue, list);
2087 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2088 q->properties.type)];
9fd3f1bf
FK
2089 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
2090 if (ret)
2091 retval = ret;
a7b2451d
AL
2092 dqm_unlock(dqm);
2093 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2094 dqm_lock(dqm);
9fd3f1bf
FK
2095 }
2096
2097 /* Unregister process */
2098 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
2099 if (qpd == cur->qpd) {
2100 list_del(&cur->list);
2101 kfree(cur);
2102 dqm->processes_count--;
32cce8bc 2103 found = true;
9fd3f1bf
FK
2104 break;
2105 }
2106 }
2107
efeaed4d 2108 dqm_unlock(dqm);
32cce8bc
FK
2109
2110 /* Outside the DQM lock because under the DQM lock we can't do
2111 * reclaim or take other locks that others hold while reclaiming.
2112 */
2113 if (found)
2114 kfd_dec_compute_active(dqm->dev);
2115
9fd3f1bf
FK
2116 return retval;
2117}
2118
5df099e8
JC
2119static int get_wave_state(struct device_queue_manager *dqm,
2120 struct queue *q,
2121 void __user *ctl_stack,
2122 u32 *ctl_stack_used_size,
2123 u32 *save_area_used_size)
2124{
4e6c6fc1 2125 struct mqd_manager *mqd_mgr;
5df099e8
JC
2126
2127 dqm_lock(dqm);
2128
d7c0b047 2129 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
5df099e8 2130
63f6e012 2131 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
8dc1db31 2132 q->properties.is_active || !q->device->kfd->cwsr_enabled ||
63f6e012
JK
2133 !mqd_mgr->get_wave_state) {
2134 dqm_unlock(dqm);
2135 return -EINVAL;
5df099e8
JC
2136 }
2137
5df099e8 2138 dqm_unlock(dqm);
63f6e012
JK
2139
2140 /*
2141 * get_wave_state is outside the dqm lock to prevent circular locking
2142 * and the queue should be protected against destruction by the process
2143 * lock.
2144 */
7fe51e6f
MJ
2145 return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties,
2146 ctl_stack, ctl_stack_used_size, save_area_used_size);
5df099e8 2147}
9fd3f1bf 2148
42c6c482
DYS
2149static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
2150 const struct queue *q,
3a9822d7
DYS
2151 u32 *mqd_size,
2152 u32 *ctl_stack_size)
42c6c482
DYS
2153{
2154 struct mqd_manager *mqd_mgr;
2155 enum KFD_MQD_TYPE mqd_type =
2156 get_mqd_type_from_queue_type(q->properties.type);
2157
2158 dqm_lock(dqm);
2159 mqd_mgr = dqm->mqd_mgrs[mqd_type];
2160 *mqd_size = mqd_mgr->mqd_size;
3a9822d7
DYS
2161 *ctl_stack_size = 0;
2162
2163 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
2164 mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
42c6c482
DYS
2165
2166 dqm_unlock(dqm);
2167}
2168
2169static int checkpoint_mqd(struct device_queue_manager *dqm,
2170 const struct queue *q,
3a9822d7
DYS
2171 void *mqd,
2172 void *ctl_stack)
42c6c482
DYS
2173{
2174 struct mqd_manager *mqd_mgr;
2175 int r = 0;
2176 enum KFD_MQD_TYPE mqd_type =
2177 get_mqd_type_from_queue_type(q->properties.type);
2178
2179 dqm_lock(dqm);
2180
8dc1db31 2181 if (q->properties.is_active || !q->device->kfd->cwsr_enabled) {
42c6c482
DYS
2182 r = -EINVAL;
2183 goto dqm_unlock;
2184 }
2185
2186 mqd_mgr = dqm->mqd_mgrs[mqd_type];
2187 if (!mqd_mgr->checkpoint_mqd) {
2188 r = -EOPNOTSUPP;
2189 goto dqm_unlock;
2190 }
2191
3a9822d7 2192 mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack);
42c6c482
DYS
2193
2194dqm_unlock:
2195 dqm_unlock(dqm);
2196 return r;
2197}
2198
9fd3f1bf
FK
2199static int process_termination_cpsch(struct device_queue_manager *dqm,
2200 struct qcm_process_device *qpd)
2201{
2202 int retval;
56f221b6 2203 struct queue *q;
9fd3f1bf 2204 struct kernel_queue *kq, *kq_next;
8d5f3552 2205 struct mqd_manager *mqd_mgr;
9fd3f1bf
FK
2206 struct device_process_node *cur, *next_dpn;
2207 enum kfd_unmap_queues_filter filter =
2208 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
32cce8bc 2209 bool found = false;
9fd3f1bf
FK
2210
2211 retval = 0;
2212
efeaed4d 2213 dqm_lock(dqm);
9fd3f1bf
FK
2214
2215 /* Clean all kernel queues */
2216 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
2217 list_del(&kq->list);
ab4d51d4 2218 decrement_queue_count(dqm, qpd, kq->queue);
9fd3f1bf
FK
2219 qpd->is_debug = false;
2220 dqm->total_queue_count--;
2221 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
2222 }
2223
2224 /* Clear all user mode queues */
2225 list_for_each_entry(q, &qpd->queues_list, list) {
c7637c95 2226 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1b4670f6 2227 deallocate_sdma_queue(dqm, q);
c7637c95 2228 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1b4670f6 2229 deallocate_sdma_queue(dqm, q);
9fd3f1bf 2230
cc009e61 2231 if (q->properties.is_active) {
ab4d51d4 2232 decrement_queue_count(dqm, qpd, q);
9fd3f1bf 2233
8dc1db31 2234 if (dqm->dev->kfd->shared_resources.enable_mes) {
cc009e61
MJ
2235 retval = remove_queue_mes(dqm, q, qpd);
2236 if (retval)
2237 pr_err("Failed to remove queue %d\n",
2238 q->properties.queue_id);
2239 }
2240 }
2241
9fd3f1bf
FK
2242 dqm->total_queue_count--;
2243 }
2244
2245 /* Unregister process */
2246 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
2247 if (qpd == cur->qpd) {
2248 list_del(&cur->list);
2249 kfree(cur);
2250 dqm->processes_count--;
32cce8bc 2251 found = true;
9fd3f1bf
FK
2252 break;
2253 }
2254 }
2255
8dc1db31 2256 if (!dqm->dev->kfd->shared_resources.enable_mes)
7cee6a68 2257 retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD);
cc009e61 2258
73ea648d 2259 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
9fd3f1bf
FK
2260 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
2261 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
2262 qpd->reset_wavefronts = false;
2263 }
2264
89cd9d23 2265 /* Lastly, free mqd resources.
8636e53c 2266 * Do free_mqd() after dqm_unlock to avoid circular locking.
89cd9d23 2267 */
56f221b6 2268 while (!list_empty(&qpd->queues_list)) {
2269 q = list_first_entry(&qpd->queues_list, struct queue, list);
fdfa090b
OZ
2270 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2271 q->properties.type)];
9fd3f1bf 2272 list_del(&q->list);
bc920fd4 2273 qpd->queue_count--;
56f221b6 2274 dqm_unlock(dqm);
8636e53c 2275 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
56f221b6 2276 dqm_lock(dqm);
9fd3f1bf 2277 }
56f221b6 2278 dqm_unlock(dqm);
2279
2280 /* Outside the DQM lock because under the DQM lock we can't do
2281 * reclaim or take other locks that others hold while reclaiming.
2282 */
2283 if (found)
2284 kfd_dec_compute_active(dqm->dev);
9fd3f1bf 2285
9fd3f1bf
FK
2286 return retval;
2287}
2288
fdfa090b
OZ
2289static int init_mqd_managers(struct device_queue_manager *dqm)
2290{
2291 int i, j;
2292 struct mqd_manager *mqd_mgr;
2293
2294 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
2295 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
2296 if (!mqd_mgr) {
2297 pr_err("mqd manager [%d] initialization failed\n", i);
2298 goto out_free;
2299 }
2300 dqm->mqd_mgrs[i] = mqd_mgr;
2301 }
2302
2303 return 0;
2304
2305out_free:
2306 for (j = 0; j < i; j++) {
2307 kfree(dqm->mqd_mgrs[j]);
2308 dqm->mqd_mgrs[j] = NULL;
2309 }
2310
2311 return -ENOMEM;
2312}
11614c36
OZ
2313
2314/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
2315static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
2316{
2317 int retval;
8dc1db31 2318 struct kfd_node *dev = dqm->dev;
11614c36
OZ
2319 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
2320 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
c7637c95 2321 get_num_all_sdma_engines(dqm) *
8dc1db31 2322 dev->kfd->device_info.num_sdma_queues_per_engine +
2f77b9a2 2323 (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size *
c4050ff1 2324 NUM_XCC(dqm->dev->xcc_mask));
11614c36 2325
6bfc7c7e 2326 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size,
11614c36 2327 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
f2cc50ce 2328 (void *)&(mem_obj->cpu_ptr), false);
11614c36
OZ
2329
2330 return retval;
2331}
2332
8dc1db31 2333struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
64c7f8cf
BG
2334{
2335 struct device_queue_manager *dqm;
2336
79775b62 2337 pr_debug("Loading device queue manager\n");
a22fc854 2338
dbf56ab1 2339 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
64c7f8cf
BG
2340 if (!dqm)
2341 return NULL;
2342
7eb0502a 2343 switch (dev->adev->asic_type) {
d146c5a7
FK
2344 /* HWS is not available on Hawaii. */
2345 case CHIP_HAWAII:
2346 /* HWS depends on CWSR for timely dequeue. CWSR is not
2347 * available on Tonga.
2348 *
2349 * FIXME: This argument also applies to Kaveri.
2350 */
2351 case CHIP_TONGA:
2352 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
2353 break;
2354 default:
2355 dqm->sched_policy = sched_policy;
2356 break;
2357 }
2358
64c7f8cf 2359 dqm->dev = dev;
d146c5a7 2360 switch (dqm->sched_policy) {
64c7f8cf
BG
2361 case KFD_SCHED_POLICY_HWS:
2362 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
2363 /* initialize dqm for cp scheduling */
45c9a5e4
OG
2364 dqm->ops.create_queue = create_queue_cpsch;
2365 dqm->ops.initialize = initialize_cpsch;
2366 dqm->ops.start = start_cpsch;
2367 dqm->ops.stop = stop_cpsch;
09c34e8d 2368 dqm->ops.pre_reset = pre_reset;
45c9a5e4
OG
2369 dqm->ops.destroy_queue = destroy_queue_cpsch;
2370 dqm->ops.update_queue = update_queue;
58dcd5bf
YZ
2371 dqm->ops.register_process = register_process;
2372 dqm->ops.unregister_process = unregister_process;
2373 dqm->ops.uninitialize = uninitialize;
45c9a5e4
OG
2374 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
2375 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
2376 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
9fd3f1bf 2377 dqm->ops.process_termination = process_termination_cpsch;
26103436
FK
2378 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
2379 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
5df099e8 2380 dqm->ops.get_wave_state = get_wave_state;
dec63443 2381 dqm->ops.reset_queues = reset_queues_cpsch;
42c6c482
DYS
2382 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2383 dqm->ops.checkpoint_mqd = checkpoint_mqd;
64c7f8cf
BG
2384 break;
2385 case KFD_SCHED_POLICY_NO_HWS:
2386 /* initialize dqm for no cp scheduling */
45c9a5e4
OG
2387 dqm->ops.start = start_nocpsch;
2388 dqm->ops.stop = stop_nocpsch;
09c34e8d 2389 dqm->ops.pre_reset = pre_reset;
45c9a5e4
OG
2390 dqm->ops.create_queue = create_queue_nocpsch;
2391 dqm->ops.destroy_queue = destroy_queue_nocpsch;
2392 dqm->ops.update_queue = update_queue;
58dcd5bf
YZ
2393 dqm->ops.register_process = register_process;
2394 dqm->ops.unregister_process = unregister_process;
45c9a5e4 2395 dqm->ops.initialize = initialize_nocpsch;
58dcd5bf 2396 dqm->ops.uninitialize = uninitialize;
45c9a5e4 2397 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
9fd3f1bf 2398 dqm->ops.process_termination = process_termination_nocpsch;
26103436
FK
2399 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
2400 dqm->ops.restore_process_queues =
2401 restore_process_queues_nocpsch;
5df099e8 2402 dqm->ops.get_wave_state = get_wave_state;
42c6c482
DYS
2403 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2404 dqm->ops.checkpoint_mqd = checkpoint_mqd;
64c7f8cf
BG
2405 break;
2406 default:
d146c5a7 2407 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
32fa8219 2408 goto out_free;
64c7f8cf
BG
2409 }
2410
7eb0502a 2411 switch (dev->adev->asic_type) {
a22fc854 2412 case CHIP_CARRIZO:
bfd5e378 2413 device_queue_manager_init_vi(&dqm->asic_ops);
300dec95
OG
2414 break;
2415
a22fc854 2416 case CHIP_KAVERI:
bfd5e378 2417 device_queue_manager_init_cik(&dqm->asic_ops);
300dec95 2418 break;
97672cbe
FK
2419
2420 case CHIP_HAWAII:
2421 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
2422 break;
2423
2424 case CHIP_TONGA:
2425 case CHIP_FIJI:
2426 case CHIP_POLARIS10:
2427 case CHIP_POLARIS11:
846a44d7 2428 case CHIP_POLARIS12:
ed81cd6e 2429 case CHIP_VEGAM:
97672cbe
FK
2430 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
2431 break;
bed4f110 2432
e596b903 2433 default:
cc009e61
MJ
2434 if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0))
2435 device_queue_manager_init_v11(&dqm->asic_ops);
2436 else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
e4804a39
GS
2437 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
2438 else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
2439 device_queue_manager_init_v9(&dqm->asic_ops);
2440 else {
2441 WARN(1, "Unexpected ASIC family %u",
7eb0502a 2442 dev->adev->asic_type);
e4804a39
GS
2443 goto out_free;
2444 }
a22fc854
BG
2445 }
2446
fdfa090b
OZ
2447 if (init_mqd_managers(dqm))
2448 goto out_free;
2449
8dc1db31 2450 if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) {
11614c36
OZ
2451 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
2452 goto out_free;
2453 }
2454
32fa8219
FK
2455 if (!dqm->ops.initialize(dqm))
2456 return dqm;
64c7f8cf 2457
32fa8219
FK
2458out_free:
2459 kfree(dqm);
2460 return NULL;
64c7f8cf
BG
2461}
2462
8dc1db31 2463static void deallocate_hiq_sdma_mqd(struct kfd_node *dev,
7fd5a6fb 2464 struct kfd_mem_obj *mqd)
11614c36
OZ
2465{
2466 WARN(!mqd, "No hiq sdma mqd trunk to free");
2467
6bfc7c7e 2468 amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
11614c36
OZ
2469}
2470
64c7f8cf
BG
2471void device_queue_manager_uninit(struct device_queue_manager *dqm)
2472{
5e406012 2473 dqm->ops.stop(dqm);
45c9a5e4 2474 dqm->ops.uninitialize(dqm);
8dc1db31 2475 if (!dqm->dev->kfd->shared_resources.enable_mes)
2e2b9baf 2476 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
64c7f8cf
BG
2477 kfree(dqm);
2478}
851a645e 2479
03e5b167 2480int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
2640c3fa 2481{
2482 struct kfd_process_device *pdd;
2483 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
2484 int ret = 0;
2485
2486 if (!p)
2487 return -EINVAL;
8a491bb3 2488 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
2640c3fa 2489 pdd = kfd_get_process_device_data(dqm->dev, p);
2490 if (pdd)
2491 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
2492 kfd_unref_process(p);
2493
2494 return ret;
2495}
2496
73ea648d
SL
2497static void kfd_process_hw_exception(struct work_struct *work)
2498{
2499 struct device_queue_manager *dqm = container_of(work,
2500 struct device_queue_manager, hw_exception_work);
6bfc7c7e 2501 amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
73ea648d
SL
2502}
2503
851a645e
FK
2504#if defined(CONFIG_DEBUG_FS)
2505
2506static void seq_reg_dump(struct seq_file *m,
2507 uint32_t (*dump)[2], uint32_t n_regs)
2508{
2509 uint32_t i, count;
2510
2511 for (i = 0, count = 0; i < n_regs; i++) {
2512 if (count == 0 ||
2513 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
2514 seq_printf(m, "%s %08x: %08x",
2515 i ? "\n" : "",
2516 dump[i][0], dump[i][1]);
2517 count = 7;
2518 } else {
2519 seq_printf(m, " %08x", dump[i][1]);
2520 count--;
2521 }
2522 }
2523
2524 seq_puts(m, "\n");
2525}
2526
2527int dqm_debugfs_hqds(struct seq_file *m, void *data)
2528{
2529 struct device_queue_manager *dqm = data;
c4050ff1 2530 uint32_t xcc_mask = dqm->dev->xcc_mask;
851a645e
FK
2531 uint32_t (*dump)[2], n_regs;
2532 int pipe, queue;
c4050ff1 2533 int r = 0, xcc_id;
643e40d4 2534 uint32_t sdma_engine_start;
851a645e 2535
2c99a547 2536 if (!dqm->sched_running) {
2243f493 2537 seq_puts(m, " Device is stopped\n");
2c99a547
PY
2538 return 0;
2539 }
2540
c4050ff1 2541 for_each_inst(xcc_id, xcc_mask) {
e2069a7b 2542 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
c4050ff1
LL
2543 KFD_CIK_HIQ_PIPE,
2544 KFD_CIK_HIQ_QUEUE, &dump,
2545 &n_regs, xcc_id);
e2069a7b 2546 if (!r) {
c4050ff1
LL
2547 seq_printf(
2548 m,
e2069a7b 2549 " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n",
c4050ff1
LL
2550 xcc_id,
2551 KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1,
2552 KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm),
e2069a7b
MJ
2553 KFD_CIK_HIQ_QUEUE);
2554 seq_reg_dump(m, dump, n_regs);
24f48a42 2555
e2069a7b
MJ
2556 kfree(dump);
2557 }
24f48a42 2558
e2069a7b
MJ
2559 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
2560 int pipe_offset = pipe * get_queues_per_pipe(dqm);
851a645e 2561
e2069a7b
MJ
2562 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
2563 if (!test_bit(pipe_offset + queue,
8dc1db31 2564 dqm->dev->kfd->shared_resources.cp_queue_bitmap))
e2069a7b 2565 continue;
851a645e 2566
c4050ff1
LL
2567 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
2568 pipe, queue,
2569 &dump, &n_regs,
2570 xcc_id);
e2069a7b
MJ
2571 if (r)
2572 break;
851a645e 2573
c4050ff1
LL
2574 seq_printf(m,
2575 " Inst %d, CP Pipe %d, Queue %d\n",
2576 xcc_id, pipe, queue);
e2069a7b 2577 seq_reg_dump(m, dump, n_regs);
851a645e 2578
e2069a7b
MJ
2579 kfree(dump);
2580 }
851a645e
FK
2581 }
2582 }
2583
643e40d4
MJ
2584 sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm);
2585 for (pipe = sdma_engine_start;
2586 pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm));
2587 pipe++) {
d5094189 2588 for (queue = 0;
8dc1db31 2589 queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine;
d5094189 2590 queue++) {
851a645e 2591 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
420185fd 2592 dqm->dev->adev, pipe, queue, &dump, &n_regs);
851a645e
FK
2593 if (r)
2594 break;
2595
2596 seq_printf(m, " SDMA Engine %d, RLC %d\n",
2597 pipe, queue);
2598 seq_reg_dump(m, dump, n_regs);
2599
2600 kfree(dump);
2601 }
2602 }
2603
2604 return r;
2605}
2606
4f942aae 2607int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
a29ec470
SL
2608{
2609 int r = 0;
2610
2611 dqm_lock(dqm);
4f942aae
OZ
2612 r = pm_debugfs_hang_hws(&dqm->packet_mgr);
2613 if (r) {
2614 dqm_unlock(dqm);
2615 return r;
2616 }
a29ec470 2617 dqm->active_runlist = true;
7cee6a68
JK
2618 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES,
2619 0, USE_DEFAULT_GRACE_PERIOD);
a29ec470
SL
2620 dqm_unlock(dqm);
2621
2622 return r;
2623}
2624
851a645e 2625#endif