drm/amdkfd: add helper to generate cache info from gfx config
[linux-block.git] / drivers / gpu / drm / amd / amdkfd / kfd_device_queue_manager.c
CommitLineData
d87f36a0 1// SPDX-License-Identifier: GPL-2.0 OR MIT
64c7f8cf 2/*
d87f36a0 3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
64c7f8cf
BG
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
26103436
FK
25#include <linux/ratelimit.h>
26#include <linux/printk.h>
64c7f8cf
BG
27#include <linux/slab.h>
28#include <linux/list.h>
29#include <linux/types.h>
64c7f8cf 30#include <linux/bitops.h>
99331a51 31#include <linux/sched.h>
64c7f8cf
BG
32#include "kfd_priv.h"
33#include "kfd_device_queue_manager.h"
34#include "kfd_mqd_manager.h"
35#include "cik_regs.h"
36#include "kfd_kernel_queue.h"
5b87245f 37#include "amdgpu_amdkfd.h"
64c7f8cf
BG
38
39/* Size of the per-pipe EOP queue */
40#define CIK_HPD_EOP_BYTES_LOG2 11
41#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
42
64c7f8cf 43static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
c7b6bac9 44 u32 pasid, unsigned int vmid);
64c7f8cf 45
c4744e24
YZ
46static int execute_queues_cpsch(struct device_queue_manager *dqm,
47 enum kfd_unmap_queues_filter filter,
48 uint32_t filter_param);
7da2bcf8 49static int unmap_queues_cpsch(struct device_queue_manager *dqm,
4465f466 50 enum kfd_unmap_queues_filter filter,
f6b80c04 51 uint32_t filter_param, bool reset);
64c7f8cf 52
60a00956
FK
53static int map_queues_cpsch(struct device_queue_manager *dqm);
54
bcea3081 55static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1b4670f6 56 struct queue *q);
64c7f8cf 57
d39b7737
OZ
58static inline void deallocate_hqd(struct device_queue_manager *dqm,
59 struct queue *q);
60static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
61static int allocate_sdma_queue(struct device_queue_manager *dqm,
2485c12c 62 struct queue *q, const uint32_t *restore_sdma_id);
73ea648d
SL
63static void kfd_process_hw_exception(struct work_struct *work);
64
bcea3081
BG
65static inline
66enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
64c7f8cf 67{
1b4670f6 68 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
85d258f9
BG
69 return KFD_MQD_TYPE_SDMA;
70 return KFD_MQD_TYPE_CP;
64c7f8cf
BG
71}
72
d0b63bb3
AR
73static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
74{
75 int i;
0d801007
JC
76 int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec
77 + pipe) * dqm->dev->shared_resources.num_queue_per_pipe;
d0b63bb3
AR
78
79 /* queue is available for KFD usage if bit is 1 */
80 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
81 if (test_bit(pipe_offset + i,
e6945304 82 dqm->dev->shared_resources.cp_queue_bitmap))
d0b63bb3
AR
83 return true;
84 return false;
85}
86
e6945304 87unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
64ea8f4a 88{
e6945304 89 return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
d0b63bb3 90 KGD_MAX_QUEUES);
64ea8f4a
OG
91}
92
d0b63bb3 93unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
64c7f8cf 94{
d0b63bb3
AR
95 return dqm->dev->shared_resources.num_queue_per_pipe;
96}
97
98unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
99{
d0b63bb3 100 return dqm->dev->shared_resources.num_pipe_per_mec;
64c7f8cf
BG
101}
102
c7637c95
YZ
103static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
104{
ee2f17f4
AL
105 return kfd_get_num_sdma_engines(dqm->dev) +
106 kfd_get_num_xgmi_sdma_engines(dqm->dev);
c7637c95
YZ
107}
108
98bb9222
YZ
109unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
110{
ee2f17f4 111 return kfd_get_num_sdma_engines(dqm->dev) *
f0dc99a6 112 dqm->dev->device_info.num_sdma_queues_per_engine;
98bb9222
YZ
113}
114
1b4670f6
OZ
115unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
116{
ee2f17f4 117 return kfd_get_num_xgmi_sdma_engines(dqm->dev) *
f0dc99a6 118 dqm->dev->device_info.num_sdma_queues_per_engine;
1b4670f6
OZ
119}
120
a22fc854 121void program_sh_mem_settings(struct device_queue_manager *dqm,
64c7f8cf
BG
122 struct qcm_process_device *qpd)
123{
cea405b1 124 return dqm->dev->kfd2kgd->program_sh_mem_settings(
3356c38d 125 dqm->dev->adev, qpd->vmid,
64c7f8cf
BG
126 qpd->sh_mem_config,
127 qpd->sh_mem_ape1_base,
128 qpd->sh_mem_ape1_limit,
129 qpd->sh_mem_bases);
130}
131
204d8998 132static void increment_queue_count(struct device_queue_manager *dqm,
ab4d51d4
DYS
133 struct qcm_process_device *qpd,
134 struct queue *q)
b42902f4
YZ
135{
136 dqm->active_queue_count++;
ab4d51d4
DYS
137 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
138 q->properties.type == KFD_QUEUE_TYPE_DIQ)
b42902f4 139 dqm->active_cp_queue_count++;
ab4d51d4
DYS
140
141 if (q->properties.is_gws) {
142 dqm->gws_queue_count++;
143 qpd->mapped_gws_queue = true;
144 }
b42902f4
YZ
145}
146
204d8998 147static void decrement_queue_count(struct device_queue_manager *dqm,
ab4d51d4
DYS
148 struct qcm_process_device *qpd,
149 struct queue *q)
b42902f4
YZ
150{
151 dqm->active_queue_count--;
ab4d51d4
DYS
152 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
153 q->properties.type == KFD_QUEUE_TYPE_DIQ)
b42902f4 154 dqm->active_cp_queue_count--;
ab4d51d4
DYS
155
156 if (q->properties.is_gws) {
157 dqm->gws_queue_count--;
158 qpd->mapped_gws_queue = false;
159 }
b42902f4
YZ
160}
161
5bb6a8fa
DYS
162/*
163 * Allocate a doorbell ID to this queue.
164 * If doorbell_id is passed in, make sure requested ID is valid then allocate it.
165 */
166static int allocate_doorbell(struct qcm_process_device *qpd,
167 struct queue *q,
168 uint32_t const *restore_id)
ef568db7
FK
169{
170 struct kfd_dev *dev = qpd->dqm->dev;
171
dd0ae064 172 if (!KFD_IS_SOC15(dev)) {
ef568db7
FK
173 /* On pre-SOC15 chips we need to use the queue ID to
174 * preserve the user mode ABI.
175 */
5bb6a8fa
DYS
176
177 if (restore_id && *restore_id != q->properties.queue_id)
178 return -EINVAL;
179
ef568db7 180 q->doorbell_id = q->properties.queue_id;
1b4670f6
OZ
181 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
182 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
234441dd
YZ
183 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
184 * doorbell assignments based on the engine and queue id.
185 * The doobell index distance between RLC (2*i) and (2*i+1)
186 * for a SDMA engine is 512.
ef568db7 187 */
234441dd 188
5bb6a8fa
DYS
189 uint32_t *idx_offset = dev->shared_resources.sdma_doorbell_idx;
190 uint32_t valid_id = idx_offset[q->properties.sdma_engine_id]
191 + (q->properties.sdma_queue_id & 1)
192 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
193 + (q->properties.sdma_queue_id >> 1);
194
195 if (restore_id && *restore_id != valid_id)
196 return -EINVAL;
197 q->doorbell_id = valid_id;
ef568db7 198 } else {
5bb6a8fa
DYS
199 /* For CP queues on SOC15 */
200 if (restore_id) {
201 /* make sure that ID is free */
202 if (__test_and_set_bit(*restore_id, qpd->doorbell_bitmap))
203 return -EINVAL;
204
205 q->doorbell_id = *restore_id;
206 } else {
207 /* or reserve a free doorbell ID */
208 unsigned int found;
209
210 found = find_first_zero_bit(qpd->doorbell_bitmap,
211 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
212 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
213 pr_debug("No doorbells available");
214 return -EBUSY;
215 }
216 set_bit(found, qpd->doorbell_bitmap);
217 q->doorbell_id = found;
ef568db7 218 }
ef568db7
FK
219 }
220
221 q->properties.doorbell_off =
59d7115d 222 kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd),
ef568db7 223 q->doorbell_id);
ef568db7
FK
224 return 0;
225}
226
227static void deallocate_doorbell(struct qcm_process_device *qpd,
228 struct queue *q)
229{
230 unsigned int old;
231 struct kfd_dev *dev = qpd->dqm->dev;
232
dd0ae064 233 if (!KFD_IS_SOC15(dev) ||
1b4670f6
OZ
234 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
235 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
ef568db7
FK
236 return;
237
238 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
239 WARN_ON(!old);
240}
241
b53ef0df
MJ
242static void program_trap_handler_settings(struct device_queue_manager *dqm,
243 struct qcm_process_device *qpd)
244{
245 if (dqm->dev->kfd2kgd->program_trap_handler_settings)
246 dqm->dev->kfd2kgd->program_trap_handler_settings(
3356c38d 247 dqm->dev->adev, qpd->vmid,
b53ef0df
MJ
248 qpd->tba_addr, qpd->tma_addr);
249}
250
64c7f8cf
BG
251static int allocate_vmid(struct device_queue_manager *dqm,
252 struct qcm_process_device *qpd,
253 struct queue *q)
254{
d9d4623c 255 int allocated_vmid = -1, i;
64c7f8cf 256
d9d4623c
YZ
257 for (i = dqm->dev->vm_info.first_vmid_kfd;
258 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
259 if (!dqm->vmid_pasid[i]) {
260 allocated_vmid = i;
261 break;
262 }
263 }
264
265 if (allocated_vmid < 0) {
266 pr_err("no more vmid to allocate\n");
267 return -ENOSPC;
268 }
269
270 pr_debug("vmid allocated: %d\n", allocated_vmid);
271
272 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
64c7f8cf 273
d9d4623c 274 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
64c7f8cf 275
64c7f8cf
BG
276 qpd->vmid = allocated_vmid;
277 q->properties.vmid = allocated_vmid;
278
64c7f8cf
BG
279 program_sh_mem_settings(dqm, qpd);
280
046e674b 281 if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled)
b53ef0df
MJ
282 program_trap_handler_settings(dqm, qpd);
283
403575c4
FK
284 /* qpd->page_table_base is set earlier when register_process()
285 * is called, i.e. when the first queue is created.
286 */
3356c38d 287 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev,
403575c4
FK
288 qpd->vmid,
289 qpd->page_table_base);
290 /* invalidate the VM context after pasid and vmid mapping is set up */
3543b055 291 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
403575c4 292
c637b36a 293 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
3356c38d 294 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
c637b36a 295 qpd->sh_hidden_private_base, qpd->vmid);
d39b7737 296
64c7f8cf
BG
297 return 0;
298}
299
552764b6
FK
300static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
301 struct qcm_process_device *qpd)
302{
9af5379c 303 const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf;
f6e27ff1 304 int ret;
552764b6
FK
305
306 if (!qpd->ib_kaddr)
307 return -ENOMEM;
308
f6e27ff1
FK
309 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
310 if (ret)
311 return ret;
552764b6 312
6bfc7c7e 313 return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid,
f6e27ff1
FK
314 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
315 pmf->release_mem_size / sizeof(uint32_t));
552764b6
FK
316}
317
64c7f8cf
BG
318static void deallocate_vmid(struct device_queue_manager *dqm,
319 struct qcm_process_device *qpd,
320 struct queue *q)
321{
552764b6 322 /* On GFX v7, CP doesn't flush TC at dequeue */
7eb0502a 323 if (q->device->adev->asic_type == CHIP_HAWAII)
552764b6
FK
324 if (flush_texture_cache_nocpsch(q->device, qpd))
325 pr_err("Failed to flush TC\n");
326
3543b055 327 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
403575c4 328
2030664b
BG
329 /* Release the vmid mapping */
330 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
d9d4623c 331 dqm->vmid_pasid[qpd->vmid] = 0;
2030664b 332
64c7f8cf
BG
333 qpd->vmid = 0;
334 q->properties.vmid = 0;
335}
336
337static int create_queue_nocpsch(struct device_queue_manager *dqm,
338 struct queue *q,
2485c12c 339 struct qcm_process_device *qpd,
42c6c482 340 const struct kfd_criu_queue_priv_data *qd,
3a9822d7 341 const void *restore_mqd, const void *restore_ctl_stack)
64c7f8cf 342{
d39b7737 343 struct mqd_manager *mqd_mgr;
64c7f8cf
BG
344 int retval;
345
efeaed4d 346 dqm_lock(dqm);
64c7f8cf 347
b8cbab04 348 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 349 pr_warn("Can't create new usermode queue because %d queues were already created\n",
b8cbab04 350 dqm->total_queue_count);
ab7c1648
KR
351 retval = -EPERM;
352 goto out_unlock;
b8cbab04
OG
353 }
354
64c7f8cf
BG
355 if (list_empty(&qpd->queues_list)) {
356 retval = allocate_vmid(dqm, qpd, q);
ab7c1648
KR
357 if (retval)
358 goto out_unlock;
64c7f8cf 359 }
64c7f8cf 360 q->properties.vmid = qpd->vmid;
26103436 361 /*
bb2d2128
FK
362 * Eviction state logic: mark all queues as evicted, even ones
363 * not currently active. Restoring inactive queues later only
364 * updates the is_evicted flag but is a no-op otherwise.
26103436 365 */
bb2d2128 366 q->properties.is_evicted = !!qpd->evicted;
64c7f8cf 367
373d7080
FK
368 q->properties.tba_addr = qpd->tba_addr;
369 q->properties.tma_addr = qpd->tma_addr;
370
d091bc0a
OZ
371 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
372 q->properties.type)];
d39b7737
OZ
373 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
374 retval = allocate_hqd(dqm, q);
375 if (retval)
376 goto deallocate_vmid;
377 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
378 q->pipe, q->queue);
379 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
380 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
2485c12c 381 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
d39b7737
OZ
382 if (retval)
383 goto deallocate_vmid;
384 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
385 }
386
5bb6a8fa 387 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
d39b7737
OZ
388 if (retval)
389 goto out_deallocate_hqd;
390
6a6ef5ee
OZ
391 /* Temporarily release dqm lock to avoid a circular lock dependency */
392 dqm_unlock(dqm);
d091bc0a 393 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
6a6ef5ee
OZ
394 dqm_lock(dqm);
395
d091bc0a
OZ
396 if (!q->mqd_mem_obj) {
397 retval = -ENOMEM;
398 goto out_deallocate_doorbell;
399 }
42c6c482
DYS
400
401 if (qd)
402 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
3a9822d7
DYS
403 &q->properties, restore_mqd, restore_ctl_stack,
404 qd->ctl_stack_size);
42c6c482
DYS
405 else
406 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
407 &q->gart_mqd_addr, &q->properties);
408
d39b7737 409 if (q->properties.is_active) {
2c99a547
PY
410 if (!dqm->sched_running) {
411 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
412 goto add_queue_to_list;
413 }
d39b7737
OZ
414
415 if (WARN(q->process->mm != current->mm,
416 "should only run in user thread"))
417 retval = -EFAULT;
418 else
419 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
420 q->queue, &q->properties, current->mm);
421 if (retval)
d091bc0a 422 goto out_free_mqd;
64c7f8cf
BG
423 }
424
2c99a547 425add_queue_to_list:
64c7f8cf 426 list_add(&q->list, &qpd->queues_list);
bc920fd4 427 qpd->queue_count++;
b6819cec 428 if (q->properties.is_active)
ab4d51d4 429 increment_queue_count(dqm, qpd, q);
64c7f8cf 430
b8cbab04
OG
431 /*
432 * Unconditionally increment this counter, regardless of the queue's
433 * type or whether the queue is active.
434 */
435 dqm->total_queue_count++;
436 pr_debug("Total of %d queues are accountable so far\n",
437 dqm->total_queue_count);
d091bc0a 438 goto out_unlock;
b8cbab04 439
d091bc0a
OZ
440out_free_mqd:
441 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
d39b7737
OZ
442out_deallocate_doorbell:
443 deallocate_doorbell(qpd, q);
444out_deallocate_hqd:
445 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
446 deallocate_hqd(dqm, q);
447 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
448 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
449 deallocate_sdma_queue(dqm, q);
450deallocate_vmid:
451 if (list_empty(&qpd->queues_list))
452 deallocate_vmid(dqm, qpd, q);
ab7c1648 453out_unlock:
efeaed4d 454 dqm_unlock(dqm);
ab7c1648 455 return retval;
64c7f8cf
BG
456}
457
458static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
459{
460 bool set;
f0ec5b99 461 int pipe, bit, i;
64c7f8cf
BG
462
463 set = false;
464
8eabaf54
KR
465 for (pipe = dqm->next_pipe_to_allocate, i = 0;
466 i < get_pipes_per_mec(dqm);
d0b63bb3
AR
467 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
468
469 if (!is_pipe_enabled(dqm, 0, pipe))
470 continue;
471
64c7f8cf 472 if (dqm->allocated_queues[pipe] != 0) {
4252bf68
HK
473 bit = ffs(dqm->allocated_queues[pipe]) - 1;
474 dqm->allocated_queues[pipe] &= ~(1 << bit);
64c7f8cf
BG
475 q->pipe = pipe;
476 q->queue = bit;
477 set = true;
478 break;
479 }
480 }
481
991ca8ee 482 if (!set)
64c7f8cf
BG
483 return -EBUSY;
484
79775b62 485 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
64c7f8cf 486 /* horizontal hqd allocation */
d0b63bb3 487 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
64c7f8cf
BG
488
489 return 0;
490}
491
492static inline void deallocate_hqd(struct device_queue_manager *dqm,
493 struct queue *q)
494{
4252bf68 495 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
64c7f8cf
BG
496}
497
5bdd3eb2
MJ
498#define SQ_IND_CMD_CMD_KILL 0x00000003
499#define SQ_IND_CMD_MODE_BROADCAST 0x00000001
500
501static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
502{
503 int status = 0;
504 unsigned int vmid;
505 uint16_t queried_pasid;
506 union SQ_CMD_BITS reg_sq_cmd;
507 union GRBM_GFX_INDEX_BITS reg_gfx_index;
508 struct kfd_process_device *pdd;
509 int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
510 int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
511
512 reg_sq_cmd.u32All = 0;
513 reg_gfx_index.u32All = 0;
514
515 pr_debug("Killing all process wavefronts\n");
516
d55957fb
YZ
517 if (!dev->kfd2kgd->get_atc_vmid_pasid_mapping_info) {
518 pr_err("no vmid pasid mapping supported \n");
519 return -EOPNOTSUPP;
520 }
521
5bdd3eb2
MJ
522 /* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
523 * ATC_VMID15_PASID_MAPPING
524 * to check which VMID the current process is mapped to.
525 */
526
d55957fb
YZ
527 for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
528 status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
529 (dev->adev, vmid, &queried_pasid);
c8b0507f 530
d55957fb
YZ
531 if (status && queried_pasid == p->pasid) {
532 pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
533 vmid, p->pasid);
534 break;
5bdd3eb2
MJ
535 }
536 }
537
538 if (vmid > last_vmid_to_scan) {
539 pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
540 return -EFAULT;
541 }
542
543 /* taking the VMID for that process on the safe way using PDD */
544 pdd = kfd_get_process_device_data(dev, p);
545 if (!pdd)
546 return -EFAULT;
547
548 reg_gfx_index.bits.sh_broadcast_writes = 1;
549 reg_gfx_index.bits.se_broadcast_writes = 1;
550 reg_gfx_index.bits.instance_broadcast_writes = 1;
551 reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
552 reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
553 reg_sq_cmd.bits.vm_id = vmid;
554
555 dev->kfd2kgd->wave_control_execute(dev->adev,
556 reg_gfx_index.u32All,
557 reg_sq_cmd.u32All);
558
559 return 0;
560}
561
9fd3f1bf
FK
562/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
563 * to avoid asynchronized access
564 */
565static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
64c7f8cf
BG
566 struct qcm_process_device *qpd,
567 struct queue *q)
568{
569 int retval;
8d5f3552 570 struct mqd_manager *mqd_mgr;
64c7f8cf 571
fdfa090b
OZ
572 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
573 q->properties.type)];
64c7f8cf 574
c7637c95 575 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
c2e1b3a4 576 deallocate_hqd(dqm, q);
c7637c95 577 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1b4670f6 578 deallocate_sdma_queue(dqm, q);
c7637c95 579 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1b4670f6 580 deallocate_sdma_queue(dqm, q);
c7637c95 581 else {
79775b62 582 pr_debug("q->properties.type %d is invalid\n",
7113cd65 583 q->properties.type);
9fd3f1bf 584 return -EINVAL;
64c7f8cf 585 }
9fd3f1bf 586 dqm->total_queue_count--;
64c7f8cf 587
ef568db7
FK
588 deallocate_doorbell(qpd, q);
589
2c99a547
PY
590 if (!dqm->sched_running) {
591 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
592 return 0;
593 }
594
8d5f3552 595 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
c2e1b3a4 596 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
b90e3fbe 597 KFD_UNMAP_LATENCY_MS,
64c7f8cf 598 q->pipe, q->queue);
9fd3f1bf
FK
599 if (retval == -ETIME)
600 qpd->reset_wavefronts = true;
64c7f8cf 601
64c7f8cf 602 list_del(&q->list);
9fd3f1bf
FK
603 if (list_empty(&qpd->queues_list)) {
604 if (qpd->reset_wavefronts) {
605 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
606 dqm->dev);
607 /* dbgdev_wave_reset_wavefronts has to be called before
608 * deallocate_vmid(), i.e. when vmid is still in use.
609 */
610 dbgdev_wave_reset_wavefronts(dqm->dev,
611 qpd->pqm->process);
612 qpd->reset_wavefronts = false;
613 }
614
64c7f8cf 615 deallocate_vmid(dqm, qpd, q);
9fd3f1bf 616 }
bc920fd4 617 qpd->queue_count--;
ab4d51d4
DYS
618 if (q->properties.is_active)
619 decrement_queue_count(dqm, qpd, q);
b8cbab04 620
9fd3f1bf
FK
621 return retval;
622}
b8cbab04 623
9fd3f1bf
FK
624static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
625 struct qcm_process_device *qpd,
626 struct queue *q)
627{
628 int retval;
d69fd951
MJ
629 uint64_t sdma_val = 0;
630 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
a7b2451d
AL
631 struct mqd_manager *mqd_mgr =
632 dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
d69fd951
MJ
633
634 /* Get the SDMA queue stats */
635 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
636 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
818b0324 637 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
d69fd951
MJ
638 &sdma_val);
639 if (retval)
640 pr_err("Failed to read SDMA queue counter for queue: %d\n",
641 q->properties.queue_id);
642 }
9fd3f1bf 643
efeaed4d 644 dqm_lock(dqm);
9fd3f1bf 645 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
d69fd951
MJ
646 if (!retval)
647 pdd->sdma_past_activity_counter += sdma_val;
efeaed4d 648 dqm_unlock(dqm);
9fd3f1bf 649
a7b2451d
AL
650 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
651
64c7f8cf
BG
652 return retval;
653}
654
c6e559eb
LY
655static int update_queue(struct device_queue_manager *dqm, struct queue *q,
656 struct mqd_update_info *minfo)
64c7f8cf 657{
8636e53c 658 int retval = 0;
8d5f3552 659 struct mqd_manager *mqd_mgr;
26103436 660 struct kfd_process_device *pdd;
b6ffbab8 661 bool prev_active = false;
64c7f8cf 662
efeaed4d 663 dqm_lock(dqm);
26103436
FK
664 pdd = kfd_get_process_device_data(q->device, q->process);
665 if (!pdd) {
666 retval = -ENODEV;
667 goto out_unlock;
668 }
fdfa090b
OZ
669 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
670 q->properties.type)];
64c7f8cf 671
60a00956
FK
672 /* Save previous activity state for counters */
673 prev_active = q->properties.is_active;
674
675 /* Make sure the queue is unmapped before updating the MQD */
d146c5a7 676 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
60a00956 677 retval = unmap_queues_cpsch(dqm,
f6b80c04 678 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false);
894a8293 679 if (retval) {
60a00956
FK
680 pr_err("unmap queue failed\n");
681 goto out_unlock;
682 }
894a8293 683 } else if (prev_active &&
60a00956 684 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1b4670f6
OZ
685 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
686 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
2c99a547
PY
687
688 if (!dqm->sched_running) {
689 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
690 goto out_unlock;
691 }
692
8d5f3552 693 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
2243f493
RB
694 (dqm->dev->cwsr_enabled ?
695 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
696 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
60a00956
FK
697 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
698 if (retval) {
699 pr_err("destroy mqd failed\n");
700 goto out_unlock;
701 }
702 }
703
c6e559eb 704 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo);
60a00956 705
096d1a3e
FK
706 /*
707 * check active state vs. the previous state and modify
708 * counter accordingly. map_queues_cpsch uses the
81b820b3 709 * dqm->active_queue_count to determine whether a new runlist must be
096d1a3e
FK
710 * uploaded.
711 */
ab4d51d4
DYS
712 if (q->properties.is_active && !prev_active) {
713 increment_queue_count(dqm, &pdd->qpd, q);
714 } else if (!q->properties.is_active && prev_active) {
715 decrement_queue_count(dqm, &pdd->qpd, q);
716 } else if (q->gws && !q->properties.is_gws) {
b8020b03
JG
717 if (q->properties.is_active) {
718 dqm->gws_queue_count++;
719 pdd->qpd.mapped_gws_queue = true;
720 }
721 q->properties.is_gws = true;
722 } else if (!q->gws && q->properties.is_gws) {
723 if (q->properties.is_active) {
724 dqm->gws_queue_count--;
725 pdd->qpd.mapped_gws_queue = false;
726 }
727 q->properties.is_gws = false;
728 }
729
d146c5a7 730 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
60a00956 731 retval = map_queues_cpsch(dqm);
894a8293 732 else if (q->properties.is_active &&
60a00956 733 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1b4670f6
OZ
734 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
735 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1b19aa5a
FK
736 if (WARN(q->process->mm != current->mm,
737 "should only run in user thread"))
738 retval = -EFAULT;
739 else
740 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
741 q->pipe, q->queue,
742 &q->properties, current->mm);
743 }
b6ffbab8 744
ab7c1648 745out_unlock:
efeaed4d 746 dqm_unlock(dqm);
64c7f8cf
BG
747 return retval;
748}
749
26103436
FK
750static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
751 struct qcm_process_device *qpd)
752{
753 struct queue *q;
8d5f3552 754 struct mqd_manager *mqd_mgr;
26103436 755 struct kfd_process_device *pdd;
bb2d2128 756 int retval, ret = 0;
26103436 757
efeaed4d 758 dqm_lock(dqm);
26103436
FK
759 if (qpd->evicted++ > 0) /* already evicted, do nothing */
760 goto out;
761
762 pdd = qpd_to_pdd(qpd);
783a25f4 763 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
26103436
FK
764 pdd->process->pasid);
765
4327bed2 766 pdd->last_evict_timestamp = get_jiffies_64();
bb2d2128
FK
767 /* Mark all queues as evicted. Deactivate all active queues on
768 * the qpd.
769 */
26103436 770 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128 771 q->properties.is_evicted = true;
26103436
FK
772 if (!q->properties.is_active)
773 continue;
bb2d2128 774
fdfa090b
OZ
775 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
776 q->properties.type)];
26103436 777 q->properties.is_active = false;
ab4d51d4 778 decrement_queue_count(dqm, qpd, q);
2c99a547
PY
779
780 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
781 continue;
782
8d5f3552 783 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
2243f493
RB
784 (dqm->dev->cwsr_enabled ?
785 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
786 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
26103436 787 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
bb2d2128
FK
788 if (retval && !ret)
789 /* Return the first error, but keep going to
790 * maintain a consistent eviction state
791 */
792 ret = retval;
26103436
FK
793 }
794
795out:
efeaed4d 796 dqm_unlock(dqm);
bb2d2128 797 return ret;
26103436
FK
798}
799
800static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
801 struct qcm_process_device *qpd)
802{
803 struct queue *q;
804 struct kfd_process_device *pdd;
805 int retval = 0;
806
efeaed4d 807 dqm_lock(dqm);
26103436
FK
808 if (qpd->evicted++ > 0) /* already evicted, do nothing */
809 goto out;
810
811 pdd = qpd_to_pdd(qpd);
783a25f4 812 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
26103436
FK
813 pdd->process->pasid);
814
bb2d2128
FK
815 /* Mark all queues as evicted. Deactivate all active queues on
816 * the qpd.
817 */
26103436 818 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128 819 q->properties.is_evicted = true;
26103436
FK
820 if (!q->properties.is_active)
821 continue;
bb2d2128 822
26103436 823 q->properties.is_active = false;
ab4d51d4 824 decrement_queue_count(dqm, qpd, q);
26103436 825 }
4327bed2 826 pdd->last_evict_timestamp = get_jiffies_64();
26103436
FK
827 retval = execute_queues_cpsch(dqm,
828 qpd->is_debug ?
829 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
830 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
831
832out:
efeaed4d 833 dqm_unlock(dqm);
26103436
FK
834 return retval;
835}
836
837static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
838 struct qcm_process_device *qpd)
839{
1b19aa5a 840 struct mm_struct *mm = NULL;
26103436 841 struct queue *q;
8d5f3552 842 struct mqd_manager *mqd_mgr;
26103436 843 struct kfd_process_device *pdd;
e715c6d0 844 uint64_t pd_base;
4327bed2 845 uint64_t eviction_duration;
bb2d2128 846 int retval, ret = 0;
26103436
FK
847
848 pdd = qpd_to_pdd(qpd);
849 /* Retrieve PD base */
b40a6ab2 850 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
26103436 851
efeaed4d 852 dqm_lock(dqm);
26103436
FK
853 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
854 goto out;
855 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
856 qpd->evicted--;
857 goto out;
858 }
859
783a25f4 860 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
26103436
FK
861 pdd->process->pasid);
862
863 /* Update PD Base in QPD */
864 qpd->page_table_base = pd_base;
e715c6d0 865 pr_debug("Updated PD address to 0x%llx\n", pd_base);
26103436
FK
866
867 if (!list_empty(&qpd->queues_list)) {
868 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
3356c38d 869 dqm->dev->adev,
26103436
FK
870 qpd->vmid,
871 qpd->page_table_base);
3543b055 872 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
26103436
FK
873 }
874
1b19aa5a
FK
875 /* Take a safe reference to the mm_struct, which may otherwise
876 * disappear even while the kfd_process is still referenced.
877 */
878 mm = get_task_mm(pdd->process->lead_thread);
879 if (!mm) {
bb2d2128 880 ret = -EFAULT;
1b19aa5a
FK
881 goto out;
882 }
883
bb2d2128
FK
884 /* Remove the eviction flags. Activate queues that are not
885 * inactive for other reasons.
886 */
26103436 887 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128
FK
888 q->properties.is_evicted = false;
889 if (!QUEUE_IS_ACTIVE(q->properties))
26103436 890 continue;
bb2d2128 891
fdfa090b
OZ
892 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
893 q->properties.type)];
26103436 894 q->properties.is_active = true;
ab4d51d4 895 increment_queue_count(dqm, qpd, q);
2c99a547
PY
896
897 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
898 continue;
899
8d5f3552 900 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
1b19aa5a 901 q->queue, &q->properties, mm);
bb2d2128
FK
902 if (retval && !ret)
903 /* Return the first error, but keep going to
904 * maintain a consistent eviction state
905 */
906 ret = retval;
26103436
FK
907 }
908 qpd->evicted = 0;
4327bed2
PC
909 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
910 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
26103436 911out:
1b19aa5a
FK
912 if (mm)
913 mmput(mm);
efeaed4d 914 dqm_unlock(dqm);
bb2d2128 915 return ret;
26103436
FK
916}
917
918static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
919 struct qcm_process_device *qpd)
920{
921 struct queue *q;
922 struct kfd_process_device *pdd;
e715c6d0 923 uint64_t pd_base;
4327bed2 924 uint64_t eviction_duration;
26103436
FK
925 int retval = 0;
926
927 pdd = qpd_to_pdd(qpd);
928 /* Retrieve PD base */
b40a6ab2 929 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
26103436 930
efeaed4d 931 dqm_lock(dqm);
26103436
FK
932 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
933 goto out;
934 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
935 qpd->evicted--;
936 goto out;
937 }
938
783a25f4 939 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
26103436
FK
940 pdd->process->pasid);
941
942 /* Update PD Base in QPD */
943 qpd->page_table_base = pd_base;
e715c6d0 944 pr_debug("Updated PD address to 0x%llx\n", pd_base);
26103436
FK
945
946 /* activate all active queues on the qpd */
947 list_for_each_entry(q, &qpd->queues_list, list) {
26103436 948 q->properties.is_evicted = false;
bb2d2128
FK
949 if (!QUEUE_IS_ACTIVE(q->properties))
950 continue;
951
26103436 952 q->properties.is_active = true;
ab4d51d4 953 increment_queue_count(dqm, &pdd->qpd, q);
26103436
FK
954 }
955 retval = execute_queues_cpsch(dqm,
956 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
bb2d2128 957 qpd->evicted = 0;
4327bed2
PC
958 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
959 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
26103436 960out:
efeaed4d 961 dqm_unlock(dqm);
26103436
FK
962 return retval;
963}
964
58dcd5bf 965static int register_process(struct device_queue_manager *dqm,
64c7f8cf
BG
966 struct qcm_process_device *qpd)
967{
968 struct device_process_node *n;
403575c4 969 struct kfd_process_device *pdd;
e715c6d0 970 uint64_t pd_base;
a22fc854 971 int retval;
64c7f8cf 972
dbf56ab1 973 n = kzalloc(sizeof(*n), GFP_KERNEL);
64c7f8cf
BG
974 if (!n)
975 return -ENOMEM;
976
977 n->qpd = qpd;
978
403575c4
FK
979 pdd = qpd_to_pdd(qpd);
980 /* Retrieve PD base */
b40a6ab2 981 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
403575c4 982
efeaed4d 983 dqm_lock(dqm);
64c7f8cf
BG
984 list_add(&n->list, &dqm->queues);
985
403575c4
FK
986 /* Update PD Base in QPD */
987 qpd->page_table_base = pd_base;
e715c6d0 988 pr_debug("Updated PD address to 0x%llx\n", pd_base);
403575c4 989
bfd5e378 990 retval = dqm->asic_ops.update_qpd(dqm, qpd);
a22fc854 991
f756e631 992 dqm->processes_count++;
64c7f8cf 993
efeaed4d 994 dqm_unlock(dqm);
64c7f8cf 995
32cce8bc
FK
996 /* Outside the DQM lock because under the DQM lock we can't do
997 * reclaim or take other locks that others hold while reclaiming.
998 */
999 kfd_inc_compute_active(dqm->dev);
1000
a22fc854 1001 return retval;
64c7f8cf
BG
1002}
1003
58dcd5bf 1004static int unregister_process(struct device_queue_manager *dqm,
64c7f8cf
BG
1005 struct qcm_process_device *qpd)
1006{
1007 int retval;
1008 struct device_process_node *cur, *next;
1009
1e5ec956
OG
1010 pr_debug("qpd->queues_list is %s\n",
1011 list_empty(&qpd->queues_list) ? "empty" : "not empty");
64c7f8cf
BG
1012
1013 retval = 0;
efeaed4d 1014 dqm_lock(dqm);
64c7f8cf
BG
1015
1016 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
1017 if (qpd == cur->qpd) {
1018 list_del(&cur->list);
f5d896bb 1019 kfree(cur);
f756e631 1020 dqm->processes_count--;
64c7f8cf
BG
1021 goto out;
1022 }
1023 }
1024 /* qpd not found in dqm list */
1025 retval = 1;
1026out:
efeaed4d 1027 dqm_unlock(dqm);
32cce8bc
FK
1028
1029 /* Outside the DQM lock because under the DQM lock we can't do
1030 * reclaim or take other locks that others hold while reclaiming.
1031 */
1032 if (!retval)
1033 kfd_dec_compute_active(dqm->dev);
1034
64c7f8cf
BG
1035 return retval;
1036}
1037
1038static int
c7b6bac9 1039set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
64c7f8cf
BG
1040 unsigned int vmid)
1041{
cea405b1 1042 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
3356c38d 1043 dqm->dev->adev, pasid, vmid);
64c7f8cf
BG
1044}
1045
2249d558
AL
1046static void init_interrupts(struct device_queue_manager *dqm)
1047{
1048 unsigned int i;
1049
d0b63bb3
AR
1050 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
1051 if (is_pipe_enabled(dqm, 0, i))
3356c38d 1052 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i);
2249d558
AL
1053}
1054
64c7f8cf
BG
1055static int initialize_nocpsch(struct device_queue_manager *dqm)
1056{
86194cf8 1057 int pipe, queue;
64c7f8cf 1058
79775b62 1059 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
64c7f8cf 1060
ab7c1648
KR
1061 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
1062 sizeof(unsigned int), GFP_KERNEL);
1063 if (!dqm->allocated_queues)
1064 return -ENOMEM;
1065
efeaed4d 1066 mutex_init(&dqm->lock_hidden);
64c7f8cf 1067 INIT_LIST_HEAD(&dqm->queues);
81b820b3 1068 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
b42902f4 1069 dqm->active_cp_queue_count = 0;
b8020b03 1070 dqm->gws_queue_count = 0;
64c7f8cf 1071
86194cf8
FK
1072 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1073 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1074
1075 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
1076 if (test_bit(pipe_offset + queue,
e6945304 1077 dqm->dev->shared_resources.cp_queue_bitmap))
86194cf8
FK
1078 dqm->allocated_queues[pipe] |= 1 << queue;
1079 }
64c7f8cf 1080
d9d4623c
YZ
1081 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
1082
35cdc81b
OZ
1083 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
1084 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
64c7f8cf 1085
64c7f8cf
BG
1086 return 0;
1087}
1088
58dcd5bf 1089static void uninitialize(struct device_queue_manager *dqm)
64c7f8cf 1090{
6f9d54fd
OG
1091 int i;
1092
81b820b3 1093 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
64c7f8cf
BG
1094
1095 kfree(dqm->allocated_queues);
6f9d54fd 1096 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
8d5f3552 1097 kfree(dqm->mqd_mgrs[i]);
efeaed4d 1098 mutex_destroy(&dqm->lock_hidden);
64c7f8cf
BG
1099}
1100
1101static int start_nocpsch(struct device_queue_manager *dqm)
1102{
6f4cb84a
FK
1103 int r = 0;
1104
52055039 1105 pr_info("SW scheduler is used");
2249d558 1106 init_interrupts(dqm);
2243f493 1107
7eb0502a 1108 if (dqm->dev->adev->asic_type == CHIP_HAWAII)
6f4cb84a
FK
1109 r = pm_init(&dqm->packet_mgr, dqm);
1110 if (!r)
1111 dqm->sched_running = true;
2c99a547 1112
6f4cb84a 1113 return r;
64c7f8cf
BG
1114}
1115
1116static int stop_nocpsch(struct device_queue_manager *dqm)
1117{
7eb0502a 1118 if (dqm->dev->adev->asic_type == CHIP_HAWAII)
9af5379c 1119 pm_uninit(&dqm->packet_mgr, false);
2c99a547
PY
1120 dqm->sched_running = false;
1121
64c7f8cf
BG
1122 return 0;
1123}
1124
09c34e8d
FK
1125static void pre_reset(struct device_queue_manager *dqm)
1126{
1127 dqm_lock(dqm);
1128 dqm->is_resetting = true;
1129 dqm_unlock(dqm);
1130}
1131
bcea3081 1132static int allocate_sdma_queue(struct device_queue_manager *dqm,
2485c12c 1133 struct queue *q, const uint32_t *restore_sdma_id)
bcea3081
BG
1134{
1135 int bit;
1136
1b4670f6 1137 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
c7637c95
YZ
1138 if (dqm->sdma_bitmap == 0) {
1139 pr_err("No more SDMA queue to allocate\n");
1b4670f6 1140 return -ENOMEM;
c7637c95
YZ
1141 }
1142
2485c12c
DYS
1143 if (restore_sdma_id) {
1144 /* Re-use existing sdma_id */
1145 if (!(dqm->sdma_bitmap & (1ULL << *restore_sdma_id))) {
1146 pr_err("SDMA queue already in use\n");
1147 return -EBUSY;
1148 }
1149 dqm->sdma_bitmap &= ~(1ULL << *restore_sdma_id);
1150 q->sdma_id = *restore_sdma_id;
1151 } else {
1152 /* Find first available sdma_id */
1153 bit = __ffs64(dqm->sdma_bitmap);
1154 dqm->sdma_bitmap &= ~(1ULL << bit);
1155 q->sdma_id = bit;
1156 }
1157
1b4670f6 1158 q->properties.sdma_engine_id = q->sdma_id %
ee2f17f4 1159 kfd_get_num_sdma_engines(dqm->dev);
1b4670f6 1160 q->properties.sdma_queue_id = q->sdma_id /
ee2f17f4 1161 kfd_get_num_sdma_engines(dqm->dev);
1b4670f6 1162 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
c7637c95
YZ
1163 if (dqm->xgmi_sdma_bitmap == 0) {
1164 pr_err("No more XGMI SDMA queue to allocate\n");
1b4670f6 1165 return -ENOMEM;
c7637c95 1166 }
2485c12c
DYS
1167 if (restore_sdma_id) {
1168 /* Re-use existing sdma_id */
1169 if (!(dqm->xgmi_sdma_bitmap & (1ULL << *restore_sdma_id))) {
1170 pr_err("SDMA queue already in use\n");
1171 return -EBUSY;
1172 }
1173 dqm->xgmi_sdma_bitmap &= ~(1ULL << *restore_sdma_id);
1174 q->sdma_id = *restore_sdma_id;
1175 } else {
1176 bit = __ffs64(dqm->xgmi_sdma_bitmap);
1177 dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
1178 q->sdma_id = bit;
1179 }
1b4670f6
OZ
1180 /* sdma_engine_id is sdma id including
1181 * both PCIe-optimized SDMAs and XGMI-
1182 * optimized SDMAs. The calculation below
1183 * assumes the first N engines are always
1184 * PCIe-optimized ones
1185 */
ee2f17f4
AL
1186 q->properties.sdma_engine_id =
1187 kfd_get_num_sdma_engines(dqm->dev) +
1188 q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
1b4670f6 1189 q->properties.sdma_queue_id = q->sdma_id /
ee2f17f4 1190 kfd_get_num_xgmi_sdma_engines(dqm->dev);
1b4670f6 1191 }
e78579aa 1192
e78579aa
YZ
1193 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1194 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
bcea3081
BG
1195
1196 return 0;
1197}
1198
1199static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1b4670f6 1200 struct queue *q)
bcea3081 1201{
1b4670f6
OZ
1202 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1203 if (q->sdma_id >= get_num_sdma_queues(dqm))
1204 return;
1205 dqm->sdma_bitmap |= (1ULL << q->sdma_id);
1206 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1207 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1208 return;
1209 dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
1210 }
bcea3081
BG
1211}
1212
64c7f8cf
BG
1213/*
1214 * Device Queue Manager implementation for cp scheduler
1215 */
1216
1217static int set_sched_resources(struct device_queue_manager *dqm)
1218{
d0b63bb3 1219 int i, mec;
64c7f8cf 1220 struct scheduling_resources res;
64c7f8cf 1221
44008d7a 1222 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
d0b63bb3
AR
1223
1224 res.queue_mask = 0;
1225 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
1226 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
1227 / dqm->dev->shared_resources.num_pipe_per_mec;
1228
e6945304 1229 if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
d0b63bb3
AR
1230 continue;
1231
1232 /* only acquire queues from the first MEC */
1233 if (mec > 0)
1234 continue;
1235
1236 /* This situation may be hit in the future if a new HW
1237 * generation exposes more than 64 queues. If so, the
8eabaf54
KR
1238 * definition of res.queue_mask needs updating
1239 */
1d11ee89 1240 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
d0b63bb3
AR
1241 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1242 break;
1243 }
1244
d09f85d5
YZ
1245 res.queue_mask |= 1ull
1246 << amdgpu_queue_mask_bit_to_set_resource_bit(
56c5977e 1247 dqm->dev->adev, i);
d0b63bb3 1248 }
d9848e14
OZ
1249 res.gws_mask = ~0ull;
1250 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
64c7f8cf 1251
79775b62
KR
1252 pr_debug("Scheduling resources:\n"
1253 "vmid mask: 0x%8X\n"
1254 "queue mask: 0x%8llX\n",
64c7f8cf
BG
1255 res.vmid_mask, res.queue_mask);
1256
9af5379c 1257 return pm_send_set_resources(&dqm->packet_mgr, &res);
64c7f8cf
BG
1258}
1259
1260static int initialize_cpsch(struct device_queue_manager *dqm)
1261{
50e2fc36
AJ
1262 uint64_t num_sdma_queues;
1263 uint64_t num_xgmi_sdma_queues;
1264
79775b62 1265 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
64c7f8cf 1266
efeaed4d 1267 mutex_init(&dqm->lock_hidden);
64c7f8cf 1268 INIT_LIST_HEAD(&dqm->queues);
81b820b3 1269 dqm->active_queue_count = dqm->processes_count = 0;
b42902f4 1270 dqm->active_cp_queue_count = 0;
b8020b03 1271 dqm->gws_queue_count = 0;
64c7f8cf 1272 dqm->active_runlist = false;
50e2fc36
AJ
1273
1274 num_sdma_queues = get_num_sdma_queues(dqm);
1275 if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
1276 dqm->sdma_bitmap = ULLONG_MAX;
1277 else
1278 dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
1279
1280 num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
1281 if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
1282 dqm->xgmi_sdma_bitmap = ULLONG_MAX;
1283 else
1284 dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
64c7f8cf 1285
73ea648d
SL
1286 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1287
bfd5e378 1288 return 0;
64c7f8cf
BG
1289}
1290
1291static int start_cpsch(struct device_queue_manager *dqm)
1292{
64c7f8cf
BG
1293 int retval;
1294
64c7f8cf
BG
1295 retval = 0;
1296
4f942aae 1297 dqm_lock(dqm);
9af5379c 1298 retval = pm_init(&dqm->packet_mgr, dqm);
4eacc26b 1299 if (retval)
64c7f8cf
BG
1300 goto fail_packet_manager_init;
1301
1302 retval = set_sched_resources(dqm);
4eacc26b 1303 if (retval)
64c7f8cf
BG
1304 goto fail_set_sched_resources;
1305
79775b62 1306 pr_debug("Allocating fence memory\n");
64c7f8cf
BG
1307
1308 /* allocate fence memory on the gart */
a86aa3ca
OG
1309 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1310 &dqm->fence_mem);
64c7f8cf 1311
4eacc26b 1312 if (retval)
64c7f8cf
BG
1313 goto fail_allocate_vidmem;
1314
b010affe 1315 dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
64c7f8cf 1316 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
2249d558
AL
1317
1318 init_interrupts(dqm);
1319
73ea648d
SL
1320 /* clear hang status when driver try to start the hw scheduler */
1321 dqm->is_hws_hang = false;
09c34e8d 1322 dqm->is_resetting = false;
2c99a547 1323 dqm->sched_running = true;
c4744e24 1324 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
efeaed4d 1325 dqm_unlock(dqm);
64c7f8cf
BG
1326
1327 return 0;
1328fail_allocate_vidmem:
1329fail_set_sched_resources:
9af5379c 1330 pm_uninit(&dqm->packet_mgr, false);
64c7f8cf 1331fail_packet_manager_init:
4f942aae 1332 dqm_unlock(dqm);
64c7f8cf
BG
1333 return retval;
1334}
1335
1336static int stop_cpsch(struct device_queue_manager *dqm)
1337{
c2a77fde
FK
1338 bool hanging;
1339
efeaed4d 1340 dqm_lock(dqm);
c96cb659 1341 if (!dqm->sched_running) {
1342 dqm_unlock(dqm);
1343 return 0;
1344 }
1345
c2a77fde 1346 if (!dqm->is_hws_hang)
f6b80c04 1347 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false);
c2a77fde 1348 hanging = dqm->is_hws_hang || dqm->is_resetting;
2c99a547 1349 dqm->sched_running = false;
64c7f8cf 1350
9af5379c 1351 pm_release_ib(&dqm->packet_mgr);
087d7641 1352
a86aa3ca 1353 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
9af5379c 1354 pm_uninit(&dqm->packet_mgr, hanging);
4f942aae 1355 dqm_unlock(dqm);
64c7f8cf
BG
1356
1357 return 0;
1358}
1359
1360static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1361 struct kernel_queue *kq,
1362 struct qcm_process_device *qpd)
1363{
efeaed4d 1364 dqm_lock(dqm);
b8cbab04 1365 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 1366 pr_warn("Can't create new kernel queue because %d queues were already created\n",
b8cbab04 1367 dqm->total_queue_count);
efeaed4d 1368 dqm_unlock(dqm);
b8cbab04
OG
1369 return -EPERM;
1370 }
1371
1372 /*
1373 * Unconditionally increment this counter, regardless of the queue's
1374 * type or whether the queue is active.
1375 */
1376 dqm->total_queue_count++;
1377 pr_debug("Total of %d queues are accountable so far\n",
1378 dqm->total_queue_count);
1379
64c7f8cf 1380 list_add(&kq->list, &qpd->priv_queue_list);
ab4d51d4 1381 increment_queue_count(dqm, qpd, kq->queue);
64c7f8cf 1382 qpd->is_debug = true;
c4744e24 1383 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
efeaed4d 1384 dqm_unlock(dqm);
64c7f8cf
BG
1385
1386 return 0;
1387}
1388
1389static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1390 struct kernel_queue *kq,
1391 struct qcm_process_device *qpd)
1392{
efeaed4d 1393 dqm_lock(dqm);
64c7f8cf 1394 list_del(&kq->list);
ab4d51d4 1395 decrement_queue_count(dqm, qpd, kq->queue);
64c7f8cf 1396 qpd->is_debug = false;
c4744e24 1397 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
b8cbab04
OG
1398 /*
1399 * Unconditionally decrement this counter, regardless of the queue's
1400 * type.
1401 */
8b58f261 1402 dqm->total_queue_count--;
b8cbab04
OG
1403 pr_debug("Total of %d queues are accountable so far\n",
1404 dqm->total_queue_count);
efeaed4d 1405 dqm_unlock(dqm);
64c7f8cf
BG
1406}
1407
1408static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
2485c12c 1409 struct qcm_process_device *qpd,
42c6c482 1410 const struct kfd_criu_queue_priv_data *qd,
3a9822d7 1411 const void *restore_mqd, const void *restore_ctl_stack)
64c7f8cf
BG
1412{
1413 int retval;
8d5f3552 1414 struct mqd_manager *mqd_mgr;
64c7f8cf 1415
b8cbab04 1416 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 1417 pr_warn("Can't create new usermode queue because %d queues were already created\n",
b8cbab04 1418 dqm->total_queue_count);
70d488fb
OZ
1419 retval = -EPERM;
1420 goto out;
b8cbab04
OG
1421 }
1422
1b4670f6
OZ
1423 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1424 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
38bb4226 1425 dqm_lock(dqm);
2485c12c 1426 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
38bb4226 1427 dqm_unlock(dqm);
894a8293 1428 if (retval)
70d488fb 1429 goto out;
e139cd2a 1430 }
ef568db7 1431
5bb6a8fa 1432 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
ef568db7
FK
1433 if (retval)
1434 goto out_deallocate_sdma_queue;
1435
70d488fb
OZ
1436 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1437 q->properties.type)];
70df8273 1438
eec0b4cf
OZ
1439 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1440 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1441 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
373d7080
FK
1442 q->properties.tba_addr = qpd->tba_addr;
1443 q->properties.tma_addr = qpd->tma_addr;
70d488fb
OZ
1444 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1445 if (!q->mqd_mem_obj) {
1446 retval = -ENOMEM;
1447 goto out_deallocate_doorbell;
1448 }
70df8273
EH
1449
1450 dqm_lock(dqm);
1451 /*
1452 * Eviction state logic: mark all queues as evicted, even ones
1453 * not currently active. Restoring inactive queues later only
1454 * updates the is_evicted flag but is a no-op otherwise.
1455 */
1456 q->properties.is_evicted = !!qpd->evicted;
42c6c482
DYS
1457
1458 if (qd)
1459 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
3a9822d7
DYS
1460 &q->properties, restore_mqd, restore_ctl_stack,
1461 qd->ctl_stack_size);
42c6c482
DYS
1462 else
1463 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1464 &q->gart_mqd_addr, &q->properties);
89cd9d23 1465
64c7f8cf 1466 list_add(&q->list, &qpd->queues_list);
bc920fd4 1467 qpd->queue_count++;
f38abc15 1468
64c7f8cf 1469 if (q->properties.is_active) {
ab4d51d4 1470 increment_queue_count(dqm, qpd, q);
b42902f4 1471
66a5710b 1472 execute_queues_cpsch(dqm,
c4744e24 1473 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
64c7f8cf
BG
1474 }
1475
b8cbab04
OG
1476 /*
1477 * Unconditionally increment this counter, regardless of the queue's
1478 * type or whether the queue is active.
1479 */
1480 dqm->total_queue_count++;
1481
1482 pr_debug("Total of %d queues are accountable so far\n",
1483 dqm->total_queue_count);
1484
efeaed4d 1485 dqm_unlock(dqm);
72a01d23
FK
1486 return retval;
1487
70d488fb
OZ
1488out_deallocate_doorbell:
1489 deallocate_doorbell(qpd, q);
72a01d23 1490out_deallocate_sdma_queue:
1b4670f6 1491 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
38bb4226
OZ
1492 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1493 dqm_lock(dqm);
1b4670f6 1494 deallocate_sdma_queue(dqm, q);
38bb4226
OZ
1495 dqm_unlock(dqm);
1496 }
70d488fb 1497out:
64c7f8cf
BG
1498 return retval;
1499}
1500
b010affe
QH
1501int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
1502 uint64_t fence_value,
8c72c3d7 1503 unsigned int timeout_ms)
64c7f8cf 1504{
8c72c3d7 1505 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
64c7f8cf
BG
1506
1507 while (*fence_addr != fence_value) {
8c72c3d7 1508 if (time_after(jiffies, end_jiffies)) {
79775b62 1509 pr_err("qcm fence wait loop timeout expired\n");
0e9a860c
YZ
1510 /* In HWS case, this is used to halt the driver thread
1511 * in order not to mess up CP states before doing
1512 * scandumps for FW debugging.
1513 */
1514 while (halt_if_hws_hang)
1515 schedule();
1516
64c7f8cf
BG
1517 return -ETIME;
1518 }
99331a51 1519 schedule();
64c7f8cf
BG
1520 }
1521
1522 return 0;
1523}
1524
60a00956
FK
1525/* dqm->lock mutex has to be locked before calling this function */
1526static int map_queues_cpsch(struct device_queue_manager *dqm)
1527{
1528 int retval;
1529
2c99a547
PY
1530 if (!dqm->sched_running)
1531 return 0;
81b820b3 1532 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
60a00956 1533 return 0;
60a00956
FK
1534 if (dqm->active_runlist)
1535 return 0;
1536
9af5379c 1537 retval = pm_send_runlist(&dqm->packet_mgr, &dqm->queues);
14328aa5 1538 pr_debug("%s sent runlist\n", __func__);
60a00956
FK
1539 if (retval) {
1540 pr_err("failed to execute runlist\n");
1541 return retval;
1542 }
1543 dqm->active_runlist = true;
1544
1545 return retval;
1546}
1547
ac30c783 1548/* dqm->lock mutex has to be locked before calling this function */
7da2bcf8 1549static int unmap_queues_cpsch(struct device_queue_manager *dqm,
4465f466 1550 enum kfd_unmap_queues_filter filter,
f6b80c04 1551 uint32_t filter_param, bool reset)
64c7f8cf 1552{
9fd3f1bf 1553 int retval = 0;
51a0f459 1554 struct mqd_manager *mqd_mgr;
64c7f8cf 1555
2c99a547
PY
1556 if (!dqm->sched_running)
1557 return 0;
b8c20c74 1558 if (dqm->is_hws_hang || dqm->is_resetting)
73ea648d 1559 return -EIO;
991ca8ee 1560 if (!dqm->active_runlist)
ac30c783 1561 return retval;
bcea3081 1562
d2cb0b21 1563 retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset);
4eacc26b 1564 if (retval)
ac30c783 1565 return retval;
64c7f8cf
BG
1566
1567 *dqm->fence_addr = KFD_FENCE_INIT;
9af5379c 1568 pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
64c7f8cf
BG
1569 KFD_FENCE_COMPLETED);
1570 /* should be timed out */
c3447e81 1571 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
14328aa5 1572 queue_preemption_timeout_ms);
09c34e8d
FK
1573 if (retval) {
1574 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1575 dqm->is_hws_hang = true;
1576 /* It's possible we're detecting a HWS hang in the
1577 * middle of a GPU reset. No need to schedule another
1578 * reset in this case.
1579 */
1580 if (!dqm->is_resetting)
1581 schedule_work(&dqm->hw_exception_work);
ac30c783 1582 return retval;
09c34e8d 1583 }
9fd3f1bf 1584
51a0f459
OZ
1585 /* In the current MEC firmware implementation, if compute queue
1586 * doesn't response to the preemption request in time, HIQ will
1587 * abandon the unmap request without returning any timeout error
1588 * to driver. Instead, MEC firmware will log the doorbell of the
1589 * unresponding compute queue to HIQ.MQD.queue_doorbell_id fields.
1590 * To make sure the queue unmap was successful, driver need to
1591 * check those fields
1592 */
1593 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
9af5379c 1594 if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) {
51a0f459
OZ
1595 pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
1596 while (halt_if_hws_hang)
1597 schedule();
1598 return -ETIME;
1599 }
1600
9af5379c 1601 pm_release_ib(&dqm->packet_mgr);
64c7f8cf
BG
1602 dqm->active_runlist = false;
1603
64c7f8cf
BG
1604 return retval;
1605}
1606
dec63443
TZ
1607/* only for compute queue */
1608static int reset_queues_cpsch(struct device_queue_manager *dqm,
1609 uint16_t pasid)
1610{
1611 int retval;
1612
1613 dqm_lock(dqm);
1614
1615 retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
1616 pasid, true);
1617
1618 dqm_unlock(dqm);
1619 return retval;
1620}
1621
ac30c783 1622/* dqm->lock mutex has to be locked before calling this function */
c4744e24
YZ
1623static int execute_queues_cpsch(struct device_queue_manager *dqm,
1624 enum kfd_unmap_queues_filter filter,
1625 uint32_t filter_param)
64c7f8cf
BG
1626{
1627 int retval;
1628
73ea648d
SL
1629 if (dqm->is_hws_hang)
1630 return -EIO;
f6b80c04 1631 retval = unmap_queues_cpsch(dqm, filter, filter_param, false);
09c34e8d 1632 if (retval)
ac30c783 1633 return retval;
64c7f8cf 1634
60a00956 1635 return map_queues_cpsch(dqm);
64c7f8cf
BG
1636}
1637
1638static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1639 struct qcm_process_device *qpd,
1640 struct queue *q)
1641{
1642 int retval;
8d5f3552 1643 struct mqd_manager *mqd_mgr;
d69fd951
MJ
1644 uint64_t sdma_val = 0;
1645 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
1646
1647 /* Get the SDMA queue stats */
1648 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1649 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
818b0324 1650 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
d69fd951
MJ
1651 &sdma_val);
1652 if (retval)
1653 pr_err("Failed to read SDMA queue counter for queue: %d\n",
1654 q->properties.queue_id);
1655 }
992839ad 1656
64c7f8cf
BG
1657 retval = 0;
1658
1659 /* remove queue from list to prevent rescheduling after preemption */
efeaed4d 1660 dqm_lock(dqm);
992839ad
YS
1661
1662 if (qpd->is_debug) {
1663 /*
1664 * error, currently we do not allow to destroy a queue
1665 * of a currently debugged process
1666 */
1667 retval = -EBUSY;
1668 goto failed_try_destroy_debugged_queue;
1669
1670 }
1671
fdfa090b
OZ
1672 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1673 q->properties.type)];
64c7f8cf 1674
ef568db7
FK
1675 deallocate_doorbell(qpd, q);
1676
d69fd951
MJ
1677 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1678 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1b4670f6 1679 deallocate_sdma_queue(dqm, q);
d69fd951
MJ
1680 pdd->sdma_past_activity_counter += sdma_val;
1681 }
bcea3081 1682
64c7f8cf 1683 list_del(&q->list);
bc920fd4 1684 qpd->queue_count--;
40a526dc 1685 if (q->properties.is_active) {
ab4d51d4 1686 decrement_queue_count(dqm, qpd, q);
40a526dc 1687 retval = execute_queues_cpsch(dqm,
9fd3f1bf 1688 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
40a526dc
YZ
1689 if (retval == -ETIME)
1690 qpd->reset_wavefronts = true;
1691 }
64c7f8cf 1692
b8cbab04
OG
1693 /*
1694 * Unconditionally decrement this counter, regardless of the queue's
1695 * type
1696 */
1697 dqm->total_queue_count--;
1698 pr_debug("Total of %d queues are accountable so far\n",
1699 dqm->total_queue_count);
64c7f8cf 1700
efeaed4d 1701 dqm_unlock(dqm);
64c7f8cf 1702
8636e53c
OZ
1703 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1704 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
89cd9d23 1705
9e827224 1706 return retval;
64c7f8cf 1707
992839ad
YS
1708failed_try_destroy_debugged_queue:
1709
efeaed4d 1710 dqm_unlock(dqm);
64c7f8cf
BG
1711 return retval;
1712}
1713
1714/*
1715 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1716 * stay in user mode.
1717 */
1718#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1719/* APE1 limit is inclusive and 64K aligned. */
1720#define APE1_LIMIT_ALIGNMENT 0xFFFF
1721
1722static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1723 struct qcm_process_device *qpd,
1724 enum cache_policy default_policy,
1725 enum cache_policy alternate_policy,
1726 void __user *alternate_aperture_base,
1727 uint64_t alternate_aperture_size)
1728{
bed4f110
FK
1729 bool retval = true;
1730
1731 if (!dqm->asic_ops.set_cache_memory_policy)
1732 return retval;
64c7f8cf 1733
efeaed4d 1734 dqm_lock(dqm);
64c7f8cf
BG
1735
1736 if (alternate_aperture_size == 0) {
1737 /* base > limit disables APE1 */
1738 qpd->sh_mem_ape1_base = 1;
1739 qpd->sh_mem_ape1_limit = 0;
1740 } else {
1741 /*
1742 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1743 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1744 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1745 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1746 * Verify that the base and size parameters can be
1747 * represented in this format and convert them.
1748 * Additionally restrict APE1 to user-mode addresses.
1749 */
1750
1751 uint64_t base = (uintptr_t)alternate_aperture_base;
1752 uint64_t limit = base + alternate_aperture_size - 1;
1753
ab7c1648
KR
1754 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1755 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1756 retval = false;
64c7f8cf 1757 goto out;
ab7c1648 1758 }
64c7f8cf
BG
1759
1760 qpd->sh_mem_ape1_base = base >> 16;
1761 qpd->sh_mem_ape1_limit = limit >> 16;
1762 }
1763
bfd5e378 1764 retval = dqm->asic_ops.set_cache_memory_policy(
a22fc854
BG
1765 dqm,
1766 qpd,
1767 default_policy,
1768 alternate_policy,
1769 alternate_aperture_base,
1770 alternate_aperture_size);
64c7f8cf 1771
d146c5a7 1772 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
64c7f8cf
BG
1773 program_sh_mem_settings(dqm, qpd);
1774
79775b62 1775 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
64c7f8cf
BG
1776 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1777 qpd->sh_mem_ape1_limit);
1778
64c7f8cf 1779out:
efeaed4d 1780 dqm_unlock(dqm);
ab7c1648 1781 return retval;
64c7f8cf
BG
1782}
1783
9fd3f1bf
FK
1784static int process_termination_nocpsch(struct device_queue_manager *dqm,
1785 struct qcm_process_device *qpd)
1786{
a7b2451d 1787 struct queue *q;
9fd3f1bf
FK
1788 struct device_process_node *cur, *next_dpn;
1789 int retval = 0;
32cce8bc 1790 bool found = false;
9fd3f1bf 1791
efeaed4d 1792 dqm_lock(dqm);
9fd3f1bf
FK
1793
1794 /* Clear all user mode queues */
a7b2451d
AL
1795 while (!list_empty(&qpd->queues_list)) {
1796 struct mqd_manager *mqd_mgr;
9fd3f1bf
FK
1797 int ret;
1798
a7b2451d
AL
1799 q = list_first_entry(&qpd->queues_list, struct queue, list);
1800 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1801 q->properties.type)];
9fd3f1bf
FK
1802 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1803 if (ret)
1804 retval = ret;
a7b2451d
AL
1805 dqm_unlock(dqm);
1806 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1807 dqm_lock(dqm);
9fd3f1bf
FK
1808 }
1809
1810 /* Unregister process */
1811 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1812 if (qpd == cur->qpd) {
1813 list_del(&cur->list);
1814 kfree(cur);
1815 dqm->processes_count--;
32cce8bc 1816 found = true;
9fd3f1bf
FK
1817 break;
1818 }
1819 }
1820
efeaed4d 1821 dqm_unlock(dqm);
32cce8bc
FK
1822
1823 /* Outside the DQM lock because under the DQM lock we can't do
1824 * reclaim or take other locks that others hold while reclaiming.
1825 */
1826 if (found)
1827 kfd_dec_compute_active(dqm->dev);
1828
9fd3f1bf
FK
1829 return retval;
1830}
1831
5df099e8
JC
1832static int get_wave_state(struct device_queue_manager *dqm,
1833 struct queue *q,
1834 void __user *ctl_stack,
1835 u32 *ctl_stack_used_size,
1836 u32 *save_area_used_size)
1837{
4e6c6fc1 1838 struct mqd_manager *mqd_mgr;
5df099e8
JC
1839
1840 dqm_lock(dqm);
1841
d7c0b047 1842 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
5df099e8 1843
63f6e012
JK
1844 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
1845 q->properties.is_active || !q->device->cwsr_enabled ||
1846 !mqd_mgr->get_wave_state) {
1847 dqm_unlock(dqm);
1848 return -EINVAL;
5df099e8
JC
1849 }
1850
5df099e8 1851 dqm_unlock(dqm);
63f6e012
JK
1852
1853 /*
1854 * get_wave_state is outside the dqm lock to prevent circular locking
1855 * and the queue should be protected against destruction by the process
1856 * lock.
1857 */
1858 return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
1859 ctl_stack_used_size, save_area_used_size);
5df099e8 1860}
9fd3f1bf 1861
42c6c482
DYS
1862static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
1863 const struct queue *q,
3a9822d7
DYS
1864 u32 *mqd_size,
1865 u32 *ctl_stack_size)
42c6c482
DYS
1866{
1867 struct mqd_manager *mqd_mgr;
1868 enum KFD_MQD_TYPE mqd_type =
1869 get_mqd_type_from_queue_type(q->properties.type);
1870
1871 dqm_lock(dqm);
1872 mqd_mgr = dqm->mqd_mgrs[mqd_type];
1873 *mqd_size = mqd_mgr->mqd_size;
3a9822d7
DYS
1874 *ctl_stack_size = 0;
1875
1876 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
1877 mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
42c6c482
DYS
1878
1879 dqm_unlock(dqm);
1880}
1881
1882static int checkpoint_mqd(struct device_queue_manager *dqm,
1883 const struct queue *q,
3a9822d7
DYS
1884 void *mqd,
1885 void *ctl_stack)
42c6c482
DYS
1886{
1887 struct mqd_manager *mqd_mgr;
1888 int r = 0;
1889 enum KFD_MQD_TYPE mqd_type =
1890 get_mqd_type_from_queue_type(q->properties.type);
1891
1892 dqm_lock(dqm);
1893
1894 if (q->properties.is_active || !q->device->cwsr_enabled) {
1895 r = -EINVAL;
1896 goto dqm_unlock;
1897 }
1898
1899 mqd_mgr = dqm->mqd_mgrs[mqd_type];
1900 if (!mqd_mgr->checkpoint_mqd) {
1901 r = -EOPNOTSUPP;
1902 goto dqm_unlock;
1903 }
1904
3a9822d7 1905 mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack);
42c6c482
DYS
1906
1907dqm_unlock:
1908 dqm_unlock(dqm);
1909 return r;
1910}
1911
9fd3f1bf
FK
1912static int process_termination_cpsch(struct device_queue_manager *dqm,
1913 struct qcm_process_device *qpd)
1914{
1915 int retval;
56f221b6 1916 struct queue *q;
9fd3f1bf 1917 struct kernel_queue *kq, *kq_next;
8d5f3552 1918 struct mqd_manager *mqd_mgr;
9fd3f1bf
FK
1919 struct device_process_node *cur, *next_dpn;
1920 enum kfd_unmap_queues_filter filter =
1921 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
32cce8bc 1922 bool found = false;
9fd3f1bf
FK
1923
1924 retval = 0;
1925
efeaed4d 1926 dqm_lock(dqm);
9fd3f1bf
FK
1927
1928 /* Clean all kernel queues */
1929 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1930 list_del(&kq->list);
ab4d51d4 1931 decrement_queue_count(dqm, qpd, kq->queue);
9fd3f1bf
FK
1932 qpd->is_debug = false;
1933 dqm->total_queue_count--;
1934 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1935 }
1936
1937 /* Clear all user mode queues */
1938 list_for_each_entry(q, &qpd->queues_list, list) {
c7637c95 1939 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1b4670f6 1940 deallocate_sdma_queue(dqm, q);
c7637c95 1941 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1b4670f6 1942 deallocate_sdma_queue(dqm, q);
9fd3f1bf 1943
ab4d51d4
DYS
1944 if (q->properties.is_active)
1945 decrement_queue_count(dqm, qpd, q);
9fd3f1bf
FK
1946
1947 dqm->total_queue_count--;
1948 }
1949
1950 /* Unregister process */
1951 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1952 if (qpd == cur->qpd) {
1953 list_del(&cur->list);
1954 kfree(cur);
1955 dqm->processes_count--;
32cce8bc 1956 found = true;
9fd3f1bf
FK
1957 break;
1958 }
1959 }
1960
1961 retval = execute_queues_cpsch(dqm, filter, 0);
73ea648d 1962 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
9fd3f1bf
FK
1963 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1964 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1965 qpd->reset_wavefronts = false;
1966 }
1967
89cd9d23 1968 /* Lastly, free mqd resources.
8636e53c 1969 * Do free_mqd() after dqm_unlock to avoid circular locking.
89cd9d23 1970 */
56f221b6 1971 while (!list_empty(&qpd->queues_list)) {
1972 q = list_first_entry(&qpd->queues_list, struct queue, list);
fdfa090b
OZ
1973 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1974 q->properties.type)];
9fd3f1bf 1975 list_del(&q->list);
bc920fd4 1976 qpd->queue_count--;
56f221b6 1977 dqm_unlock(dqm);
8636e53c 1978 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
56f221b6 1979 dqm_lock(dqm);
9fd3f1bf 1980 }
56f221b6 1981 dqm_unlock(dqm);
1982
1983 /* Outside the DQM lock because under the DQM lock we can't do
1984 * reclaim or take other locks that others hold while reclaiming.
1985 */
1986 if (found)
1987 kfd_dec_compute_active(dqm->dev);
9fd3f1bf 1988
9fd3f1bf
FK
1989 return retval;
1990}
1991
fdfa090b
OZ
1992static int init_mqd_managers(struct device_queue_manager *dqm)
1993{
1994 int i, j;
1995 struct mqd_manager *mqd_mgr;
1996
1997 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
1998 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
1999 if (!mqd_mgr) {
2000 pr_err("mqd manager [%d] initialization failed\n", i);
2001 goto out_free;
2002 }
2003 dqm->mqd_mgrs[i] = mqd_mgr;
2004 }
2005
2006 return 0;
2007
2008out_free:
2009 for (j = 0; j < i; j++) {
2010 kfree(dqm->mqd_mgrs[j]);
2011 dqm->mqd_mgrs[j] = NULL;
2012 }
2013
2014 return -ENOMEM;
2015}
11614c36
OZ
2016
2017/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
2018static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
2019{
2020 int retval;
2021 struct kfd_dev *dev = dqm->dev;
2022 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
2023 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
c7637c95 2024 get_num_all_sdma_engines(dqm) *
f0dc99a6 2025 dev->device_info.num_sdma_queues_per_engine +
11614c36
OZ
2026 dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
2027
6bfc7c7e 2028 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size,
11614c36 2029 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
f2cc50ce 2030 (void *)&(mem_obj->cpu_ptr), false);
11614c36
OZ
2031
2032 return retval;
2033}
2034
64c7f8cf
BG
2035struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
2036{
2037 struct device_queue_manager *dqm;
2038
79775b62 2039 pr_debug("Loading device queue manager\n");
a22fc854 2040
dbf56ab1 2041 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
64c7f8cf
BG
2042 if (!dqm)
2043 return NULL;
2044
7eb0502a 2045 switch (dev->adev->asic_type) {
d146c5a7
FK
2046 /* HWS is not available on Hawaii. */
2047 case CHIP_HAWAII:
2048 /* HWS depends on CWSR for timely dequeue. CWSR is not
2049 * available on Tonga.
2050 *
2051 * FIXME: This argument also applies to Kaveri.
2052 */
2053 case CHIP_TONGA:
2054 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
2055 break;
2056 default:
2057 dqm->sched_policy = sched_policy;
2058 break;
2059 }
2060
64c7f8cf 2061 dqm->dev = dev;
d146c5a7 2062 switch (dqm->sched_policy) {
64c7f8cf
BG
2063 case KFD_SCHED_POLICY_HWS:
2064 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
2065 /* initialize dqm for cp scheduling */
45c9a5e4
OG
2066 dqm->ops.create_queue = create_queue_cpsch;
2067 dqm->ops.initialize = initialize_cpsch;
2068 dqm->ops.start = start_cpsch;
2069 dqm->ops.stop = stop_cpsch;
09c34e8d 2070 dqm->ops.pre_reset = pre_reset;
45c9a5e4
OG
2071 dqm->ops.destroy_queue = destroy_queue_cpsch;
2072 dqm->ops.update_queue = update_queue;
58dcd5bf
YZ
2073 dqm->ops.register_process = register_process;
2074 dqm->ops.unregister_process = unregister_process;
2075 dqm->ops.uninitialize = uninitialize;
45c9a5e4
OG
2076 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
2077 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
2078 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
9fd3f1bf 2079 dqm->ops.process_termination = process_termination_cpsch;
26103436
FK
2080 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
2081 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
5df099e8 2082 dqm->ops.get_wave_state = get_wave_state;
dec63443 2083 dqm->ops.reset_queues = reset_queues_cpsch;
42c6c482
DYS
2084 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2085 dqm->ops.checkpoint_mqd = checkpoint_mqd;
64c7f8cf
BG
2086 break;
2087 case KFD_SCHED_POLICY_NO_HWS:
2088 /* initialize dqm for no cp scheduling */
45c9a5e4
OG
2089 dqm->ops.start = start_nocpsch;
2090 dqm->ops.stop = stop_nocpsch;
09c34e8d 2091 dqm->ops.pre_reset = pre_reset;
45c9a5e4
OG
2092 dqm->ops.create_queue = create_queue_nocpsch;
2093 dqm->ops.destroy_queue = destroy_queue_nocpsch;
2094 dqm->ops.update_queue = update_queue;
58dcd5bf
YZ
2095 dqm->ops.register_process = register_process;
2096 dqm->ops.unregister_process = unregister_process;
45c9a5e4 2097 dqm->ops.initialize = initialize_nocpsch;
58dcd5bf 2098 dqm->ops.uninitialize = uninitialize;
45c9a5e4 2099 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
9fd3f1bf 2100 dqm->ops.process_termination = process_termination_nocpsch;
26103436
FK
2101 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
2102 dqm->ops.restore_process_queues =
2103 restore_process_queues_nocpsch;
5df099e8 2104 dqm->ops.get_wave_state = get_wave_state;
42c6c482
DYS
2105 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2106 dqm->ops.checkpoint_mqd = checkpoint_mqd;
64c7f8cf
BG
2107 break;
2108 default:
d146c5a7 2109 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
32fa8219 2110 goto out_free;
64c7f8cf
BG
2111 }
2112
7eb0502a 2113 switch (dev->adev->asic_type) {
a22fc854 2114 case CHIP_CARRIZO:
bfd5e378 2115 device_queue_manager_init_vi(&dqm->asic_ops);
300dec95
OG
2116 break;
2117
a22fc854 2118 case CHIP_KAVERI:
bfd5e378 2119 device_queue_manager_init_cik(&dqm->asic_ops);
300dec95 2120 break;
97672cbe
FK
2121
2122 case CHIP_HAWAII:
2123 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
2124 break;
2125
2126 case CHIP_TONGA:
2127 case CHIP_FIJI:
2128 case CHIP_POLARIS10:
2129 case CHIP_POLARIS11:
846a44d7 2130 case CHIP_POLARIS12:
ed81cd6e 2131 case CHIP_VEGAM:
97672cbe
FK
2132 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
2133 break;
bed4f110 2134
e596b903 2135 default:
e4804a39
GS
2136 if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
2137 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
2138 else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
2139 device_queue_manager_init_v9(&dqm->asic_ops);
2140 else {
2141 WARN(1, "Unexpected ASIC family %u",
7eb0502a 2142 dev->adev->asic_type);
e4804a39
GS
2143 goto out_free;
2144 }
a22fc854
BG
2145 }
2146
fdfa090b
OZ
2147 if (init_mqd_managers(dqm))
2148 goto out_free;
2149
11614c36
OZ
2150 if (allocate_hiq_sdma_mqd(dqm)) {
2151 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
2152 goto out_free;
2153 }
2154
32fa8219
FK
2155 if (!dqm->ops.initialize(dqm))
2156 return dqm;
64c7f8cf 2157
32fa8219
FK
2158out_free:
2159 kfree(dqm);
2160 return NULL;
64c7f8cf
BG
2161}
2162
7fd5a6fb
Y
2163static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
2164 struct kfd_mem_obj *mqd)
11614c36
OZ
2165{
2166 WARN(!mqd, "No hiq sdma mqd trunk to free");
2167
6bfc7c7e 2168 amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
11614c36
OZ
2169}
2170
64c7f8cf
BG
2171void device_queue_manager_uninit(struct device_queue_manager *dqm)
2172{
45c9a5e4 2173 dqm->ops.uninitialize(dqm);
11614c36 2174 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
64c7f8cf
BG
2175 kfree(dqm);
2176}
851a645e 2177
03e5b167 2178int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
2640c3fa 2179{
2180 struct kfd_process_device *pdd;
2181 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
2182 int ret = 0;
2183
2184 if (!p)
2185 return -EINVAL;
8a491bb3 2186 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
2640c3fa 2187 pdd = kfd_get_process_device_data(dqm->dev, p);
2188 if (pdd)
2189 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
2190 kfd_unref_process(p);
2191
2192 return ret;
2193}
2194
73ea648d
SL
2195static void kfd_process_hw_exception(struct work_struct *work)
2196{
2197 struct device_queue_manager *dqm = container_of(work,
2198 struct device_queue_manager, hw_exception_work);
6bfc7c7e 2199 amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
73ea648d
SL
2200}
2201
851a645e
FK
2202#if defined(CONFIG_DEBUG_FS)
2203
2204static void seq_reg_dump(struct seq_file *m,
2205 uint32_t (*dump)[2], uint32_t n_regs)
2206{
2207 uint32_t i, count;
2208
2209 for (i = 0, count = 0; i < n_regs; i++) {
2210 if (count == 0 ||
2211 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
2212 seq_printf(m, "%s %08x: %08x",
2213 i ? "\n" : "",
2214 dump[i][0], dump[i][1]);
2215 count = 7;
2216 } else {
2217 seq_printf(m, " %08x", dump[i][1]);
2218 count--;
2219 }
2220 }
2221
2222 seq_puts(m, "\n");
2223}
2224
2225int dqm_debugfs_hqds(struct seq_file *m, void *data)
2226{
2227 struct device_queue_manager *dqm = data;
2228 uint32_t (*dump)[2], n_regs;
2229 int pipe, queue;
2230 int r = 0;
2231
2c99a547 2232 if (!dqm->sched_running) {
2243f493 2233 seq_puts(m, " Device is stopped\n");
2c99a547
PY
2234 return 0;
2235 }
2236
420185fd 2237 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
14328aa5
PC
2238 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
2239 &dump, &n_regs);
24f48a42
OZ
2240 if (!r) {
2241 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
14328aa5
PC
2242 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
2243 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
2244 KFD_CIK_HIQ_QUEUE);
24f48a42
OZ
2245 seq_reg_dump(m, dump, n_regs);
2246
2247 kfree(dump);
2248 }
2249
851a645e
FK
2250 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
2251 int pipe_offset = pipe * get_queues_per_pipe(dqm);
2252
2253 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
2254 if (!test_bit(pipe_offset + queue,
e6945304 2255 dqm->dev->shared_resources.cp_queue_bitmap))
851a645e
FK
2256 continue;
2257
2258 r = dqm->dev->kfd2kgd->hqd_dump(
420185fd 2259 dqm->dev->adev, pipe, queue, &dump, &n_regs);
851a645e
FK
2260 if (r)
2261 break;
2262
2263 seq_printf(m, " CP Pipe %d, Queue %d\n",
2264 pipe, queue);
2265 seq_reg_dump(m, dump, n_regs);
2266
2267 kfree(dump);
2268 }
2269 }
2270
c7637c95 2271 for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
d5094189 2272 for (queue = 0;
f0dc99a6 2273 queue < dqm->dev->device_info.num_sdma_queues_per_engine;
d5094189 2274 queue++) {
851a645e 2275 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
420185fd 2276 dqm->dev->adev, pipe, queue, &dump, &n_regs);
851a645e
FK
2277 if (r)
2278 break;
2279
2280 seq_printf(m, " SDMA Engine %d, RLC %d\n",
2281 pipe, queue);
2282 seq_reg_dump(m, dump, n_regs);
2283
2284 kfree(dump);
2285 }
2286 }
2287
2288 return r;
2289}
2290
4f942aae 2291int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
a29ec470
SL
2292{
2293 int r = 0;
2294
2295 dqm_lock(dqm);
4f942aae
OZ
2296 r = pm_debugfs_hang_hws(&dqm->packet_mgr);
2297 if (r) {
2298 dqm_unlock(dqm);
2299 return r;
2300 }
a29ec470
SL
2301 dqm->active_runlist = true;
2302 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
2303 dqm_unlock(dqm);
2304
2305 return r;
2306}
2307
851a645e 2308#endif