drm/radeon: avoid bogus "vram limit (0) must be a power of 2" warning
[linux-block.git] / drivers / gpu / drm / amd / amdkfd / kfd_device_queue_manager.c
CommitLineData
d87f36a0 1// SPDX-License-Identifier: GPL-2.0 OR MIT
64c7f8cf 2/*
d87f36a0 3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
64c7f8cf
BG
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 */
24
26103436
FK
25#include <linux/ratelimit.h>
26#include <linux/printk.h>
64c7f8cf
BG
27#include <linux/slab.h>
28#include <linux/list.h>
29#include <linux/types.h>
64c7f8cf 30#include <linux/bitops.h>
99331a51 31#include <linux/sched.h>
64c7f8cf
BG
32#include "kfd_priv.h"
33#include "kfd_device_queue_manager.h"
34#include "kfd_mqd_manager.h"
35#include "cik_regs.h"
36#include "kfd_kernel_queue.h"
5b87245f 37#include "amdgpu_amdkfd.h"
cc009e61 38#include "mes_api_def.h"
64c7f8cf
BG
39
40/* Size of the per-pipe EOP queue */
41#define CIK_HPD_EOP_BYTES_LOG2 11
42#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
43
64c7f8cf 44static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
c7b6bac9 45 u32 pasid, unsigned int vmid);
64c7f8cf 46
c4744e24
YZ
47static int execute_queues_cpsch(struct device_queue_manager *dqm,
48 enum kfd_unmap_queues_filter filter,
49 uint32_t filter_param);
7da2bcf8 50static int unmap_queues_cpsch(struct device_queue_manager *dqm,
4465f466 51 enum kfd_unmap_queues_filter filter,
f6b80c04 52 uint32_t filter_param, bool reset);
64c7f8cf 53
60a00956
FK
54static int map_queues_cpsch(struct device_queue_manager *dqm);
55
bcea3081 56static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1b4670f6 57 struct queue *q);
64c7f8cf 58
d39b7737
OZ
59static inline void deallocate_hqd(struct device_queue_manager *dqm,
60 struct queue *q);
61static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
62static int allocate_sdma_queue(struct device_queue_manager *dqm,
2485c12c 63 struct queue *q, const uint32_t *restore_sdma_id);
73ea648d
SL
64static void kfd_process_hw_exception(struct work_struct *work);
65
bcea3081
BG
66static inline
67enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
64c7f8cf 68{
1b4670f6 69 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
85d258f9
BG
70 return KFD_MQD_TYPE_SDMA;
71 return KFD_MQD_TYPE_CP;
64c7f8cf
BG
72}
73
d0b63bb3
AR
74static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
75{
76 int i;
0d801007
JC
77 int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec
78 + pipe) * dqm->dev->shared_resources.num_queue_per_pipe;
d0b63bb3
AR
79
80 /* queue is available for KFD usage if bit is 1 */
81 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
82 if (test_bit(pipe_offset + i,
e6945304 83 dqm->dev->shared_resources.cp_queue_bitmap))
d0b63bb3
AR
84 return true;
85 return false;
86}
87
e6945304 88unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
64ea8f4a 89{
e6945304 90 return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
d0b63bb3 91 KGD_MAX_QUEUES);
64ea8f4a
OG
92}
93
d0b63bb3 94unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
64c7f8cf 95{
d0b63bb3
AR
96 return dqm->dev->shared_resources.num_queue_per_pipe;
97}
98
99unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
100{
d0b63bb3 101 return dqm->dev->shared_resources.num_pipe_per_mec;
64c7f8cf
BG
102}
103
c7637c95
YZ
104static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
105{
ee2f17f4
AL
106 return kfd_get_num_sdma_engines(dqm->dev) +
107 kfd_get_num_xgmi_sdma_engines(dqm->dev);
c7637c95
YZ
108}
109
98bb9222
YZ
110unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
111{
ee2f17f4 112 return kfd_get_num_sdma_engines(dqm->dev) *
f0dc99a6 113 dqm->dev->device_info.num_sdma_queues_per_engine;
98bb9222
YZ
114}
115
1b4670f6
OZ
116unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
117{
ee2f17f4 118 return kfd_get_num_xgmi_sdma_engines(dqm->dev) *
f0dc99a6 119 dqm->dev->device_info.num_sdma_queues_per_engine;
1b4670f6
OZ
120}
121
cc009e61
MJ
122static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manager *dqm)
123{
124 return dqm->dev->device_info.reserved_sdma_queues_bitmap;
125}
126
a22fc854 127void program_sh_mem_settings(struct device_queue_manager *dqm,
64c7f8cf
BG
128 struct qcm_process_device *qpd)
129{
cea405b1 130 return dqm->dev->kfd2kgd->program_sh_mem_settings(
3356c38d 131 dqm->dev->adev, qpd->vmid,
64c7f8cf
BG
132 qpd->sh_mem_config,
133 qpd->sh_mem_ape1_base,
134 qpd->sh_mem_ape1_limit,
135 qpd->sh_mem_bases);
136}
137
cc009e61
MJ
138static void kfd_hws_hang(struct device_queue_manager *dqm)
139{
140 /*
141 * Issue a GPU reset if HWS is unresponsive
142 */
143 dqm->is_hws_hang = true;
144
145 /* It's possible we're detecting a HWS hang in the
146 * middle of a GPU reset. No need to schedule another
147 * reset in this case.
148 */
149 if (!dqm->is_resetting)
150 schedule_work(&dqm->hw_exception_work);
151}
152
153static int convert_to_mes_queue_type(int queue_type)
154{
155 int mes_queue_type;
156
157 switch (queue_type) {
158 case KFD_QUEUE_TYPE_COMPUTE:
159 mes_queue_type = MES_QUEUE_TYPE_COMPUTE;
160 break;
161 case KFD_QUEUE_TYPE_SDMA:
162 mes_queue_type = MES_QUEUE_TYPE_SDMA;
163 break;
164 default:
165 WARN(1, "Invalid queue type %d", queue_type);
166 mes_queue_type = -EINVAL;
167 break;
168 }
169
170 return mes_queue_type;
171}
172
173static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q,
174 struct qcm_process_device *qpd)
175{
176 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
177 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
178 struct mes_add_queue_input queue_input;
04fd0739 179 int r, queue_type;
e77a541f 180 uint64_t wptr_addr_off;
cc009e61
MJ
181
182 if (dqm->is_hws_hang)
183 return -EIO;
184
185 memset(&queue_input, 0x0, sizeof(struct mes_add_queue_input));
186 queue_input.process_id = qpd->pqm->process->pasid;
187 queue_input.page_table_base_addr = qpd->page_table_base;
188 queue_input.process_va_start = 0;
189 queue_input.process_va_end = adev->vm_manager.max_pfn - 1;
190 /* MES unit for quantum is 100ns */
191 queue_input.process_quantum = KFD_MES_PROCESS_QUANTUM; /* Equivalent to 10ms. */
192 queue_input.process_context_addr = pdd->proc_ctx_gpu_addr;
193 queue_input.gang_quantum = KFD_MES_GANG_QUANTUM; /* Equivalent to 1ms */
194 queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
195 queue_input.inprocess_gang_priority = q->properties.priority;
196 queue_input.gang_global_priority_level =
197 AMDGPU_MES_PRIORITY_LEVEL_NORMAL;
198 queue_input.doorbell_offset = q->properties.doorbell_off;
199 queue_input.mqd_addr = q->gart_mqd_addr;
fe4e9ff9 200 queue_input.wptr_addr = (uint64_t)q->properties.write_ptr;
e77a541f
GS
201
202 if (q->wptr_bo) {
203 wptr_addr_off = (uint64_t)q->properties.write_ptr - (uint64_t)q->wptr_bo->kfd_bo->va;
fe4e9ff9
JX
204 queue_input.wptr_mc_addr = ((uint64_t)q->wptr_bo->tbo.resource->start << PAGE_SHIFT) + wptr_addr_off;
205 }
e77a541f 206
a9579956
GS
207 queue_input.is_kfd_process = 1;
208
cc009e61
MJ
209 queue_input.paging = false;
210 queue_input.tba_addr = qpd->tba_addr;
211 queue_input.tma_addr = qpd->tma_addr;
212
04fd0739
GS
213 queue_type = convert_to_mes_queue_type(q->properties.type);
214 if (queue_type < 0) {
cc009e61
MJ
215 pr_err("Queue type not supported with MES, queue:%d\n",
216 q->properties.type);
217 return -EINVAL;
218 }
04fd0739 219 queue_input.queue_type = (uint32_t)queue_type;
cc009e61
MJ
220
221 if (q->gws) {
222 queue_input.gws_base = 0;
223 queue_input.gws_size = qpd->num_gws;
224 }
225
226 amdgpu_mes_lock(&adev->mes);
227 r = adev->mes.funcs->add_hw_queue(&adev->mes, &queue_input);
228 amdgpu_mes_unlock(&adev->mes);
229 if (r) {
230 pr_err("failed to add hardware queue to MES, doorbell=0x%x\n",
231 q->properties.doorbell_off);
232 pr_err("MES might be in unrecoverable state, issue a GPU reset\n");
233 kfd_hws_hang(dqm);
234}
235
236 return r;
237}
238
239static int remove_queue_mes(struct device_queue_manager *dqm, struct queue *q,
240 struct qcm_process_device *qpd)
241{
242 struct amdgpu_device *adev = (struct amdgpu_device *)dqm->dev->adev;
243 int r;
244 struct mes_remove_queue_input queue_input;
245
246 if (dqm->is_hws_hang)
247 return -EIO;
248
249 memset(&queue_input, 0x0, sizeof(struct mes_remove_queue_input));
250 queue_input.doorbell_offset = q->properties.doorbell_off;
251 queue_input.gang_context_addr = q->gang_ctx_gpu_addr;
252
253 amdgpu_mes_lock(&adev->mes);
254 r = adev->mes.funcs->remove_hw_queue(&adev->mes, &queue_input);
255 amdgpu_mes_unlock(&adev->mes);
256
257 if (r) {
258 pr_err("failed to remove hardware queue from MES, doorbell=0x%x\n",
259 q->properties.doorbell_off);
260 pr_err("MES might be in unrecoverable state, issue a GPU reset\n");
261 kfd_hws_hang(dqm);
262 }
263
264 return r;
265}
266
267static int remove_all_queues_mes(struct device_queue_manager *dqm)
268{
269 struct device_process_node *cur;
270 struct qcm_process_device *qpd;
271 struct queue *q;
272 int retval = 0;
273
274 list_for_each_entry(cur, &dqm->queues, list) {
275 qpd = cur->qpd;
276 list_for_each_entry(q, &qpd->queues_list, list) {
277 if (q->properties.is_active) {
278 retval = remove_queue_mes(dqm, q, qpd);
279 if (retval) {
280 pr_err("%s: Failed to remove queue %d for dev %d",
281 __func__,
282 q->properties.queue_id,
283 dqm->dev->id);
284 return retval;
285 }
286 }
287 }
288 }
289
290 return retval;
291}
292
204d8998 293static void increment_queue_count(struct device_queue_manager *dqm,
ab4d51d4
DYS
294 struct qcm_process_device *qpd,
295 struct queue *q)
b42902f4
YZ
296{
297 dqm->active_queue_count++;
ab4d51d4
DYS
298 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
299 q->properties.type == KFD_QUEUE_TYPE_DIQ)
b42902f4 300 dqm->active_cp_queue_count++;
ab4d51d4
DYS
301
302 if (q->properties.is_gws) {
303 dqm->gws_queue_count++;
304 qpd->mapped_gws_queue = true;
305 }
b42902f4
YZ
306}
307
204d8998 308static void decrement_queue_count(struct device_queue_manager *dqm,
ab4d51d4
DYS
309 struct qcm_process_device *qpd,
310 struct queue *q)
b42902f4
YZ
311{
312 dqm->active_queue_count--;
ab4d51d4
DYS
313 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
314 q->properties.type == KFD_QUEUE_TYPE_DIQ)
b42902f4 315 dqm->active_cp_queue_count--;
ab4d51d4
DYS
316
317 if (q->properties.is_gws) {
318 dqm->gws_queue_count--;
319 qpd->mapped_gws_queue = false;
320 }
b42902f4
YZ
321}
322
5bb6a8fa
DYS
323/*
324 * Allocate a doorbell ID to this queue.
325 * If doorbell_id is passed in, make sure requested ID is valid then allocate it.
326 */
327static int allocate_doorbell(struct qcm_process_device *qpd,
328 struct queue *q,
329 uint32_t const *restore_id)
ef568db7
FK
330{
331 struct kfd_dev *dev = qpd->dqm->dev;
332
dd0ae064 333 if (!KFD_IS_SOC15(dev)) {
ef568db7
FK
334 /* On pre-SOC15 chips we need to use the queue ID to
335 * preserve the user mode ABI.
336 */
5bb6a8fa
DYS
337
338 if (restore_id && *restore_id != q->properties.queue_id)
339 return -EINVAL;
340
ef568db7 341 q->doorbell_id = q->properties.queue_id;
1b4670f6
OZ
342 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
343 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
234441dd
YZ
344 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
345 * doorbell assignments based on the engine and queue id.
346 * The doobell index distance between RLC (2*i) and (2*i+1)
347 * for a SDMA engine is 512.
ef568db7 348 */
234441dd 349
5bb6a8fa
DYS
350 uint32_t *idx_offset = dev->shared_resources.sdma_doorbell_idx;
351 uint32_t valid_id = idx_offset[q->properties.sdma_engine_id]
352 + (q->properties.sdma_queue_id & 1)
353 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
354 + (q->properties.sdma_queue_id >> 1);
355
356 if (restore_id && *restore_id != valid_id)
357 return -EINVAL;
358 q->doorbell_id = valid_id;
ef568db7 359 } else {
5bb6a8fa
DYS
360 /* For CP queues on SOC15 */
361 if (restore_id) {
362 /* make sure that ID is free */
363 if (__test_and_set_bit(*restore_id, qpd->doorbell_bitmap))
364 return -EINVAL;
365
366 q->doorbell_id = *restore_id;
367 } else {
368 /* or reserve a free doorbell ID */
369 unsigned int found;
370
371 found = find_first_zero_bit(qpd->doorbell_bitmap,
372 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
373 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
374 pr_debug("No doorbells available");
375 return -EBUSY;
376 }
377 set_bit(found, qpd->doorbell_bitmap);
378 q->doorbell_id = found;
ef568db7 379 }
ef568db7
FK
380 }
381
382 q->properties.doorbell_off =
59d7115d 383 kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd),
ef568db7 384 q->doorbell_id);
ef568db7
FK
385 return 0;
386}
387
388static void deallocate_doorbell(struct qcm_process_device *qpd,
389 struct queue *q)
390{
391 unsigned int old;
392 struct kfd_dev *dev = qpd->dqm->dev;
393
dd0ae064 394 if (!KFD_IS_SOC15(dev) ||
1b4670f6
OZ
395 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
396 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
ef568db7
FK
397 return;
398
399 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
400 WARN_ON(!old);
401}
402
b53ef0df
MJ
403static void program_trap_handler_settings(struct device_queue_manager *dqm,
404 struct qcm_process_device *qpd)
405{
406 if (dqm->dev->kfd2kgd->program_trap_handler_settings)
407 dqm->dev->kfd2kgd->program_trap_handler_settings(
3356c38d 408 dqm->dev->adev, qpd->vmid,
b53ef0df
MJ
409 qpd->tba_addr, qpd->tma_addr);
410}
411
64c7f8cf
BG
412static int allocate_vmid(struct device_queue_manager *dqm,
413 struct qcm_process_device *qpd,
414 struct queue *q)
415{
d9d4623c 416 int allocated_vmid = -1, i;
64c7f8cf 417
d9d4623c
YZ
418 for (i = dqm->dev->vm_info.first_vmid_kfd;
419 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
420 if (!dqm->vmid_pasid[i]) {
421 allocated_vmid = i;
422 break;
423 }
424 }
425
426 if (allocated_vmid < 0) {
427 pr_err("no more vmid to allocate\n");
428 return -ENOSPC;
429 }
430
431 pr_debug("vmid allocated: %d\n", allocated_vmid);
432
433 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
64c7f8cf 434
d9d4623c 435 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
64c7f8cf 436
64c7f8cf
BG
437 qpd->vmid = allocated_vmid;
438 q->properties.vmid = allocated_vmid;
439
64c7f8cf
BG
440 program_sh_mem_settings(dqm, qpd);
441
046e674b 442 if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled)
b53ef0df
MJ
443 program_trap_handler_settings(dqm, qpd);
444
403575c4
FK
445 /* qpd->page_table_base is set earlier when register_process()
446 * is called, i.e. when the first queue is created.
447 */
3356c38d 448 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->adev,
403575c4
FK
449 qpd->vmid,
450 qpd->page_table_base);
451 /* invalidate the VM context after pasid and vmid mapping is set up */
3543b055 452 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
403575c4 453
c637b36a 454 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
3356c38d 455 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->adev,
c637b36a 456 qpd->sh_hidden_private_base, qpd->vmid);
d39b7737 457
64c7f8cf
BG
458 return 0;
459}
460
552764b6
FK
461static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
462 struct qcm_process_device *qpd)
463{
9af5379c 464 const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf;
f6e27ff1 465 int ret;
552764b6
FK
466
467 if (!qpd->ib_kaddr)
468 return -ENOMEM;
469
f6e27ff1
FK
470 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
471 if (ret)
472 return ret;
552764b6 473
6bfc7c7e 474 return amdgpu_amdkfd_submit_ib(kdev->adev, KGD_ENGINE_MEC1, qpd->vmid,
f6e27ff1
FK
475 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
476 pmf->release_mem_size / sizeof(uint32_t));
552764b6
FK
477}
478
64c7f8cf
BG
479static void deallocate_vmid(struct device_queue_manager *dqm,
480 struct qcm_process_device *qpd,
481 struct queue *q)
482{
552764b6 483 /* On GFX v7, CP doesn't flush TC at dequeue */
7eb0502a 484 if (q->device->adev->asic_type == CHIP_HAWAII)
552764b6
FK
485 if (flush_texture_cache_nocpsch(q->device, qpd))
486 pr_err("Failed to flush TC\n");
487
3543b055 488 kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
403575c4 489
2030664b
BG
490 /* Release the vmid mapping */
491 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
d9d4623c 492 dqm->vmid_pasid[qpd->vmid] = 0;
2030664b 493
64c7f8cf
BG
494 qpd->vmid = 0;
495 q->properties.vmid = 0;
496}
497
498static int create_queue_nocpsch(struct device_queue_manager *dqm,
499 struct queue *q,
2485c12c 500 struct qcm_process_device *qpd,
42c6c482 501 const struct kfd_criu_queue_priv_data *qd,
3a9822d7 502 const void *restore_mqd, const void *restore_ctl_stack)
64c7f8cf 503{
d39b7737 504 struct mqd_manager *mqd_mgr;
64c7f8cf
BG
505 int retval;
506
efeaed4d 507 dqm_lock(dqm);
64c7f8cf 508
b8cbab04 509 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 510 pr_warn("Can't create new usermode queue because %d queues were already created\n",
b8cbab04 511 dqm->total_queue_count);
ab7c1648
KR
512 retval = -EPERM;
513 goto out_unlock;
b8cbab04
OG
514 }
515
64c7f8cf
BG
516 if (list_empty(&qpd->queues_list)) {
517 retval = allocate_vmid(dqm, qpd, q);
ab7c1648
KR
518 if (retval)
519 goto out_unlock;
64c7f8cf 520 }
64c7f8cf 521 q->properties.vmid = qpd->vmid;
26103436 522 /*
bb2d2128
FK
523 * Eviction state logic: mark all queues as evicted, even ones
524 * not currently active. Restoring inactive queues later only
525 * updates the is_evicted flag but is a no-op otherwise.
26103436 526 */
bb2d2128 527 q->properties.is_evicted = !!qpd->evicted;
64c7f8cf 528
373d7080
FK
529 q->properties.tba_addr = qpd->tba_addr;
530 q->properties.tma_addr = qpd->tma_addr;
531
d091bc0a
OZ
532 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
533 q->properties.type)];
d39b7737
OZ
534 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
535 retval = allocate_hqd(dqm, q);
536 if (retval)
537 goto deallocate_vmid;
538 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
539 q->pipe, q->queue);
540 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
541 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
2485c12c 542 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
d39b7737
OZ
543 if (retval)
544 goto deallocate_vmid;
545 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
546 }
547
5bb6a8fa 548 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
d39b7737
OZ
549 if (retval)
550 goto out_deallocate_hqd;
551
6a6ef5ee
OZ
552 /* Temporarily release dqm lock to avoid a circular lock dependency */
553 dqm_unlock(dqm);
d091bc0a 554 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
6a6ef5ee
OZ
555 dqm_lock(dqm);
556
d091bc0a
OZ
557 if (!q->mqd_mem_obj) {
558 retval = -ENOMEM;
559 goto out_deallocate_doorbell;
560 }
42c6c482
DYS
561
562 if (qd)
563 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
3a9822d7
DYS
564 &q->properties, restore_mqd, restore_ctl_stack,
565 qd->ctl_stack_size);
42c6c482
DYS
566 else
567 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
568 &q->gart_mqd_addr, &q->properties);
569
d39b7737 570 if (q->properties.is_active) {
2c99a547
PY
571 if (!dqm->sched_running) {
572 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
573 goto add_queue_to_list;
574 }
d39b7737
OZ
575
576 if (WARN(q->process->mm != current->mm,
577 "should only run in user thread"))
578 retval = -EFAULT;
579 else
580 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
581 q->queue, &q->properties, current->mm);
582 if (retval)
d091bc0a 583 goto out_free_mqd;
64c7f8cf
BG
584 }
585
2c99a547 586add_queue_to_list:
64c7f8cf 587 list_add(&q->list, &qpd->queues_list);
bc920fd4 588 qpd->queue_count++;
b6819cec 589 if (q->properties.is_active)
ab4d51d4 590 increment_queue_count(dqm, qpd, q);
64c7f8cf 591
b8cbab04
OG
592 /*
593 * Unconditionally increment this counter, regardless of the queue's
594 * type or whether the queue is active.
595 */
596 dqm->total_queue_count++;
597 pr_debug("Total of %d queues are accountable so far\n",
598 dqm->total_queue_count);
d091bc0a 599 goto out_unlock;
b8cbab04 600
d091bc0a
OZ
601out_free_mqd:
602 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
d39b7737
OZ
603out_deallocate_doorbell:
604 deallocate_doorbell(qpd, q);
605out_deallocate_hqd:
606 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
607 deallocate_hqd(dqm, q);
608 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
609 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
610 deallocate_sdma_queue(dqm, q);
611deallocate_vmid:
612 if (list_empty(&qpd->queues_list))
613 deallocate_vmid(dqm, qpd, q);
ab7c1648 614out_unlock:
efeaed4d 615 dqm_unlock(dqm);
ab7c1648 616 return retval;
64c7f8cf
BG
617}
618
619static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
620{
621 bool set;
f0ec5b99 622 int pipe, bit, i;
64c7f8cf
BG
623
624 set = false;
625
8eabaf54
KR
626 for (pipe = dqm->next_pipe_to_allocate, i = 0;
627 i < get_pipes_per_mec(dqm);
d0b63bb3
AR
628 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
629
630 if (!is_pipe_enabled(dqm, 0, pipe))
631 continue;
632
64c7f8cf 633 if (dqm->allocated_queues[pipe] != 0) {
4252bf68
HK
634 bit = ffs(dqm->allocated_queues[pipe]) - 1;
635 dqm->allocated_queues[pipe] &= ~(1 << bit);
64c7f8cf
BG
636 q->pipe = pipe;
637 q->queue = bit;
638 set = true;
639 break;
640 }
641 }
642
991ca8ee 643 if (!set)
64c7f8cf
BG
644 return -EBUSY;
645
79775b62 646 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
64c7f8cf 647 /* horizontal hqd allocation */
d0b63bb3 648 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
64c7f8cf
BG
649
650 return 0;
651}
652
653static inline void deallocate_hqd(struct device_queue_manager *dqm,
654 struct queue *q)
655{
4252bf68 656 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
64c7f8cf
BG
657}
658
5bdd3eb2
MJ
659#define SQ_IND_CMD_CMD_KILL 0x00000003
660#define SQ_IND_CMD_MODE_BROADCAST 0x00000001
661
662static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p)
663{
664 int status = 0;
665 unsigned int vmid;
666 uint16_t queried_pasid;
667 union SQ_CMD_BITS reg_sq_cmd;
668 union GRBM_GFX_INDEX_BITS reg_gfx_index;
669 struct kfd_process_device *pdd;
670 int first_vmid_to_scan = dev->vm_info.first_vmid_kfd;
671 int last_vmid_to_scan = dev->vm_info.last_vmid_kfd;
672
673 reg_sq_cmd.u32All = 0;
674 reg_gfx_index.u32All = 0;
675
676 pr_debug("Killing all process wavefronts\n");
677
d55957fb
YZ
678 if (!dev->kfd2kgd->get_atc_vmid_pasid_mapping_info) {
679 pr_err("no vmid pasid mapping supported \n");
680 return -EOPNOTSUPP;
681 }
682
5bdd3eb2
MJ
683 /* Scan all registers in the range ATC_VMID8_PASID_MAPPING ..
684 * ATC_VMID15_PASID_MAPPING
685 * to check which VMID the current process is mapped to.
686 */
687
d55957fb
YZ
688 for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) {
689 status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info
690 (dev->adev, vmid, &queried_pasid);
c8b0507f 691
d55957fb
YZ
692 if (status && queried_pasid == p->pasid) {
693 pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n",
694 vmid, p->pasid);
695 break;
5bdd3eb2
MJ
696 }
697 }
698
699 if (vmid > last_vmid_to_scan) {
700 pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid);
701 return -EFAULT;
702 }
703
704 /* taking the VMID for that process on the safe way using PDD */
705 pdd = kfd_get_process_device_data(dev, p);
706 if (!pdd)
707 return -EFAULT;
708
709 reg_gfx_index.bits.sh_broadcast_writes = 1;
710 reg_gfx_index.bits.se_broadcast_writes = 1;
711 reg_gfx_index.bits.instance_broadcast_writes = 1;
712 reg_sq_cmd.bits.mode = SQ_IND_CMD_MODE_BROADCAST;
713 reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL;
714 reg_sq_cmd.bits.vm_id = vmid;
715
716 dev->kfd2kgd->wave_control_execute(dev->adev,
717 reg_gfx_index.u32All,
718 reg_sq_cmd.u32All);
719
720 return 0;
721}
722
9fd3f1bf
FK
723/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
724 * to avoid asynchronized access
725 */
726static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
64c7f8cf
BG
727 struct qcm_process_device *qpd,
728 struct queue *q)
729{
730 int retval;
8d5f3552 731 struct mqd_manager *mqd_mgr;
64c7f8cf 732
fdfa090b
OZ
733 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
734 q->properties.type)];
64c7f8cf 735
c7637c95 736 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
c2e1b3a4 737 deallocate_hqd(dqm, q);
c7637c95 738 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1b4670f6 739 deallocate_sdma_queue(dqm, q);
c7637c95 740 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1b4670f6 741 deallocate_sdma_queue(dqm, q);
c7637c95 742 else {
79775b62 743 pr_debug("q->properties.type %d is invalid\n",
7113cd65 744 q->properties.type);
9fd3f1bf 745 return -EINVAL;
64c7f8cf 746 }
9fd3f1bf 747 dqm->total_queue_count--;
64c7f8cf 748
ef568db7
FK
749 deallocate_doorbell(qpd, q);
750
2c99a547
PY
751 if (!dqm->sched_running) {
752 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
753 return 0;
754 }
755
8d5f3552 756 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
c2e1b3a4 757 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
b90e3fbe 758 KFD_UNMAP_LATENCY_MS,
64c7f8cf 759 q->pipe, q->queue);
9fd3f1bf
FK
760 if (retval == -ETIME)
761 qpd->reset_wavefronts = true;
64c7f8cf 762
64c7f8cf 763 list_del(&q->list);
9fd3f1bf
FK
764 if (list_empty(&qpd->queues_list)) {
765 if (qpd->reset_wavefronts) {
766 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
767 dqm->dev);
768 /* dbgdev_wave_reset_wavefronts has to be called before
769 * deallocate_vmid(), i.e. when vmid is still in use.
770 */
771 dbgdev_wave_reset_wavefronts(dqm->dev,
772 qpd->pqm->process);
773 qpd->reset_wavefronts = false;
774 }
775
64c7f8cf 776 deallocate_vmid(dqm, qpd, q);
9fd3f1bf 777 }
bc920fd4 778 qpd->queue_count--;
ab4d51d4
DYS
779 if (q->properties.is_active)
780 decrement_queue_count(dqm, qpd, q);
b8cbab04 781
9fd3f1bf
FK
782 return retval;
783}
b8cbab04 784
9fd3f1bf
FK
785static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
786 struct qcm_process_device *qpd,
787 struct queue *q)
788{
789 int retval;
d69fd951
MJ
790 uint64_t sdma_val = 0;
791 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
a7b2451d
AL
792 struct mqd_manager *mqd_mgr =
793 dqm->mqd_mgrs[get_mqd_type_from_queue_type(q->properties.type)];
d69fd951
MJ
794
795 /* Get the SDMA queue stats */
796 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
797 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
818b0324 798 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
d69fd951
MJ
799 &sdma_val);
800 if (retval)
801 pr_err("Failed to read SDMA queue counter for queue: %d\n",
802 q->properties.queue_id);
803 }
9fd3f1bf 804
efeaed4d 805 dqm_lock(dqm);
9fd3f1bf 806 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
d69fd951
MJ
807 if (!retval)
808 pdd->sdma_past_activity_counter += sdma_val;
efeaed4d 809 dqm_unlock(dqm);
9fd3f1bf 810
a7b2451d
AL
811 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
812
64c7f8cf
BG
813 return retval;
814}
815
c6e559eb
LY
816static int update_queue(struct device_queue_manager *dqm, struct queue *q,
817 struct mqd_update_info *minfo)
64c7f8cf 818{
8636e53c 819 int retval = 0;
8d5f3552 820 struct mqd_manager *mqd_mgr;
26103436 821 struct kfd_process_device *pdd;
b6ffbab8 822 bool prev_active = false;
64c7f8cf 823
efeaed4d 824 dqm_lock(dqm);
26103436
FK
825 pdd = kfd_get_process_device_data(q->device, q->process);
826 if (!pdd) {
827 retval = -ENODEV;
828 goto out_unlock;
829 }
fdfa090b
OZ
830 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
831 q->properties.type)];
64c7f8cf 832
60a00956
FK
833 /* Save previous activity state for counters */
834 prev_active = q->properties.is_active;
835
836 /* Make sure the queue is unmapped before updating the MQD */
d146c5a7 837 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
cc009e61
MJ
838 if (!dqm->dev->shared_resources.enable_mes)
839 retval = unmap_queues_cpsch(dqm,
840 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false);
841 else if (prev_active)
842 retval = remove_queue_mes(dqm, q, &pdd->qpd);
843
894a8293 844 if (retval) {
60a00956
FK
845 pr_err("unmap queue failed\n");
846 goto out_unlock;
847 }
894a8293 848 } else if (prev_active &&
60a00956 849 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1b4670f6
OZ
850 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
851 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
2c99a547
PY
852
853 if (!dqm->sched_running) {
854 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
855 goto out_unlock;
856 }
857
8d5f3552 858 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
2243f493
RB
859 (dqm->dev->cwsr_enabled ?
860 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
861 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
60a00956
FK
862 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
863 if (retval) {
864 pr_err("destroy mqd failed\n");
865 goto out_unlock;
866 }
867 }
868
c6e559eb 869 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo);
60a00956 870
096d1a3e
FK
871 /*
872 * check active state vs. the previous state and modify
873 * counter accordingly. map_queues_cpsch uses the
81b820b3 874 * dqm->active_queue_count to determine whether a new runlist must be
096d1a3e
FK
875 * uploaded.
876 */
ab4d51d4
DYS
877 if (q->properties.is_active && !prev_active) {
878 increment_queue_count(dqm, &pdd->qpd, q);
879 } else if (!q->properties.is_active && prev_active) {
880 decrement_queue_count(dqm, &pdd->qpd, q);
881 } else if (q->gws && !q->properties.is_gws) {
b8020b03
JG
882 if (q->properties.is_active) {
883 dqm->gws_queue_count++;
884 pdd->qpd.mapped_gws_queue = true;
885 }
886 q->properties.is_gws = true;
887 } else if (!q->gws && q->properties.is_gws) {
888 if (q->properties.is_active) {
889 dqm->gws_queue_count--;
890 pdd->qpd.mapped_gws_queue = false;
891 }
892 q->properties.is_gws = false;
893 }
894
cc009e61
MJ
895 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
896 if (!dqm->dev->shared_resources.enable_mes)
897 retval = map_queues_cpsch(dqm);
f4f9b827 898 else if (q->properties.is_active)
cc009e61
MJ
899 retval = add_queue_mes(dqm, q, &pdd->qpd);
900 } else if (q->properties.is_active &&
60a00956 901 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1b4670f6
OZ
902 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
903 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1b19aa5a
FK
904 if (WARN(q->process->mm != current->mm,
905 "should only run in user thread"))
906 retval = -EFAULT;
907 else
908 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
909 q->pipe, q->queue,
910 &q->properties, current->mm);
911 }
b6ffbab8 912
ab7c1648 913out_unlock:
efeaed4d 914 dqm_unlock(dqm);
64c7f8cf
BG
915 return retval;
916}
917
26103436
FK
918static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
919 struct qcm_process_device *qpd)
920{
921 struct queue *q;
8d5f3552 922 struct mqd_manager *mqd_mgr;
26103436 923 struct kfd_process_device *pdd;
bb2d2128 924 int retval, ret = 0;
26103436 925
efeaed4d 926 dqm_lock(dqm);
26103436
FK
927 if (qpd->evicted++ > 0) /* already evicted, do nothing */
928 goto out;
929
930 pdd = qpd_to_pdd(qpd);
783a25f4 931 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
26103436
FK
932 pdd->process->pasid);
933
4327bed2 934 pdd->last_evict_timestamp = get_jiffies_64();
bb2d2128
FK
935 /* Mark all queues as evicted. Deactivate all active queues on
936 * the qpd.
937 */
26103436 938 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128 939 q->properties.is_evicted = true;
26103436
FK
940 if (!q->properties.is_active)
941 continue;
bb2d2128 942
fdfa090b
OZ
943 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
944 q->properties.type)];
26103436 945 q->properties.is_active = false;
ab4d51d4 946 decrement_queue_count(dqm, qpd, q);
2c99a547
PY
947
948 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
949 continue;
950
8d5f3552 951 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
2243f493
RB
952 (dqm->dev->cwsr_enabled ?
953 KFD_PREEMPT_TYPE_WAVEFRONT_SAVE :
954 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN),
26103436 955 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
bb2d2128
FK
956 if (retval && !ret)
957 /* Return the first error, but keep going to
958 * maintain a consistent eviction state
959 */
960 ret = retval;
26103436
FK
961 }
962
963out:
efeaed4d 964 dqm_unlock(dqm);
bb2d2128 965 return ret;
26103436
FK
966}
967
968static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
969 struct qcm_process_device *qpd)
970{
971 struct queue *q;
972 struct kfd_process_device *pdd;
973 int retval = 0;
974
efeaed4d 975 dqm_lock(dqm);
26103436
FK
976 if (qpd->evicted++ > 0) /* already evicted, do nothing */
977 goto out;
978
979 pdd = qpd_to_pdd(qpd);
783a25f4 980 pr_debug_ratelimited("Evicting PASID 0x%x queues\n",
26103436
FK
981 pdd->process->pasid);
982
bb2d2128
FK
983 /* Mark all queues as evicted. Deactivate all active queues on
984 * the qpd.
985 */
26103436 986 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128 987 q->properties.is_evicted = true;
26103436
FK
988 if (!q->properties.is_active)
989 continue;
bb2d2128 990
26103436 991 q->properties.is_active = false;
ab4d51d4 992 decrement_queue_count(dqm, qpd, q);
cc009e61
MJ
993
994 if (dqm->dev->shared_resources.enable_mes) {
995 retval = remove_queue_mes(dqm, q, qpd);
996 if (retval) {
997 pr_err("Failed to evict queue %d\n",
998 q->properties.queue_id);
999 goto out;
1000 }
1001 }
26103436 1002 }
4327bed2 1003 pdd->last_evict_timestamp = get_jiffies_64();
cc009e61
MJ
1004 if (!dqm->dev->shared_resources.enable_mes)
1005 retval = execute_queues_cpsch(dqm,
1006 qpd->is_debug ?
1007 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
1008 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
26103436
FK
1009
1010out:
efeaed4d 1011 dqm_unlock(dqm);
26103436
FK
1012 return retval;
1013}
1014
1015static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
1016 struct qcm_process_device *qpd)
1017{
1b19aa5a 1018 struct mm_struct *mm = NULL;
26103436 1019 struct queue *q;
8d5f3552 1020 struct mqd_manager *mqd_mgr;
26103436 1021 struct kfd_process_device *pdd;
e715c6d0 1022 uint64_t pd_base;
4327bed2 1023 uint64_t eviction_duration;
bb2d2128 1024 int retval, ret = 0;
26103436
FK
1025
1026 pdd = qpd_to_pdd(qpd);
1027 /* Retrieve PD base */
b40a6ab2 1028 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
26103436 1029
efeaed4d 1030 dqm_lock(dqm);
26103436
FK
1031 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
1032 goto out;
1033 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
1034 qpd->evicted--;
1035 goto out;
1036 }
1037
783a25f4 1038 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
26103436
FK
1039 pdd->process->pasid);
1040
1041 /* Update PD Base in QPD */
1042 qpd->page_table_base = pd_base;
e715c6d0 1043 pr_debug("Updated PD address to 0x%llx\n", pd_base);
26103436
FK
1044
1045 if (!list_empty(&qpd->queues_list)) {
1046 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
3356c38d 1047 dqm->dev->adev,
26103436
FK
1048 qpd->vmid,
1049 qpd->page_table_base);
3543b055 1050 kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
26103436
FK
1051 }
1052
1b19aa5a
FK
1053 /* Take a safe reference to the mm_struct, which may otherwise
1054 * disappear even while the kfd_process is still referenced.
1055 */
1056 mm = get_task_mm(pdd->process->lead_thread);
1057 if (!mm) {
bb2d2128 1058 ret = -EFAULT;
1b19aa5a
FK
1059 goto out;
1060 }
1061
bb2d2128
FK
1062 /* Remove the eviction flags. Activate queues that are not
1063 * inactive for other reasons.
1064 */
26103436 1065 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128
FK
1066 q->properties.is_evicted = false;
1067 if (!QUEUE_IS_ACTIVE(q->properties))
26103436 1068 continue;
bb2d2128 1069
fdfa090b
OZ
1070 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1071 q->properties.type)];
26103436 1072 q->properties.is_active = true;
ab4d51d4 1073 increment_queue_count(dqm, qpd, q);
2c99a547
PY
1074
1075 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
1076 continue;
1077
8d5f3552 1078 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
1b19aa5a 1079 q->queue, &q->properties, mm);
bb2d2128
FK
1080 if (retval && !ret)
1081 /* Return the first error, but keep going to
1082 * maintain a consistent eviction state
1083 */
1084 ret = retval;
26103436
FK
1085 }
1086 qpd->evicted = 0;
4327bed2
PC
1087 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
1088 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
26103436 1089out:
1b19aa5a
FK
1090 if (mm)
1091 mmput(mm);
efeaed4d 1092 dqm_unlock(dqm);
bb2d2128 1093 return ret;
26103436
FK
1094}
1095
1096static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
1097 struct qcm_process_device *qpd)
1098{
1099 struct queue *q;
1100 struct kfd_process_device *pdd;
e715c6d0 1101 uint64_t pd_base;
4327bed2 1102 uint64_t eviction_duration;
26103436
FK
1103 int retval = 0;
1104
1105 pdd = qpd_to_pdd(qpd);
1106 /* Retrieve PD base */
b40a6ab2 1107 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
26103436 1108
efeaed4d 1109 dqm_lock(dqm);
26103436
FK
1110 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
1111 goto out;
1112 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
1113 qpd->evicted--;
1114 goto out;
1115 }
1116
783a25f4 1117 pr_debug_ratelimited("Restoring PASID 0x%x queues\n",
26103436
FK
1118 pdd->process->pasid);
1119
1120 /* Update PD Base in QPD */
1121 qpd->page_table_base = pd_base;
e715c6d0 1122 pr_debug("Updated PD address to 0x%llx\n", pd_base);
26103436
FK
1123
1124 /* activate all active queues on the qpd */
1125 list_for_each_entry(q, &qpd->queues_list, list) {
26103436 1126 q->properties.is_evicted = false;
bb2d2128
FK
1127 if (!QUEUE_IS_ACTIVE(q->properties))
1128 continue;
1129
26103436 1130 q->properties.is_active = true;
ab4d51d4 1131 increment_queue_count(dqm, &pdd->qpd, q);
cc009e61
MJ
1132
1133 if (dqm->dev->shared_resources.enable_mes) {
1134 retval = add_queue_mes(dqm, q, qpd);
1135 if (retval) {
1136 pr_err("Failed to restore queue %d\n",
1137 q->properties.queue_id);
1138 goto out;
1139 }
1140 }
26103436 1141 }
cc009e61
MJ
1142 if (!dqm->dev->shared_resources.enable_mes)
1143 retval = execute_queues_cpsch(dqm,
1144 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
bb2d2128 1145 qpd->evicted = 0;
4327bed2
PC
1146 eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp;
1147 atomic64_add(eviction_duration, &pdd->evict_duration_counter);
26103436 1148out:
efeaed4d 1149 dqm_unlock(dqm);
26103436
FK
1150 return retval;
1151}
1152
58dcd5bf 1153static int register_process(struct device_queue_manager *dqm,
64c7f8cf
BG
1154 struct qcm_process_device *qpd)
1155{
1156 struct device_process_node *n;
403575c4 1157 struct kfd_process_device *pdd;
e715c6d0 1158 uint64_t pd_base;
a22fc854 1159 int retval;
64c7f8cf 1160
dbf56ab1 1161 n = kzalloc(sizeof(*n), GFP_KERNEL);
64c7f8cf
BG
1162 if (!n)
1163 return -ENOMEM;
1164
1165 n->qpd = qpd;
1166
403575c4
FK
1167 pdd = qpd_to_pdd(qpd);
1168 /* Retrieve PD base */
b40a6ab2 1169 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv);
403575c4 1170
efeaed4d 1171 dqm_lock(dqm);
64c7f8cf
BG
1172 list_add(&n->list, &dqm->queues);
1173
403575c4
FK
1174 /* Update PD Base in QPD */
1175 qpd->page_table_base = pd_base;
e715c6d0 1176 pr_debug("Updated PD address to 0x%llx\n", pd_base);
403575c4 1177
bfd5e378 1178 retval = dqm->asic_ops.update_qpd(dqm, qpd);
a22fc854 1179
f756e631 1180 dqm->processes_count++;
64c7f8cf 1181
efeaed4d 1182 dqm_unlock(dqm);
64c7f8cf 1183
32cce8bc
FK
1184 /* Outside the DQM lock because under the DQM lock we can't do
1185 * reclaim or take other locks that others hold while reclaiming.
1186 */
1187 kfd_inc_compute_active(dqm->dev);
1188
a22fc854 1189 return retval;
64c7f8cf
BG
1190}
1191
58dcd5bf 1192static int unregister_process(struct device_queue_manager *dqm,
64c7f8cf
BG
1193 struct qcm_process_device *qpd)
1194{
1195 int retval;
1196 struct device_process_node *cur, *next;
1197
1e5ec956
OG
1198 pr_debug("qpd->queues_list is %s\n",
1199 list_empty(&qpd->queues_list) ? "empty" : "not empty");
64c7f8cf
BG
1200
1201 retval = 0;
efeaed4d 1202 dqm_lock(dqm);
64c7f8cf
BG
1203
1204 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
1205 if (qpd == cur->qpd) {
1206 list_del(&cur->list);
f5d896bb 1207 kfree(cur);
f756e631 1208 dqm->processes_count--;
64c7f8cf
BG
1209 goto out;
1210 }
1211 }
1212 /* qpd not found in dqm list */
1213 retval = 1;
1214out:
efeaed4d 1215 dqm_unlock(dqm);
32cce8bc
FK
1216
1217 /* Outside the DQM lock because under the DQM lock we can't do
1218 * reclaim or take other locks that others hold while reclaiming.
1219 */
1220 if (!retval)
1221 kfd_dec_compute_active(dqm->dev);
1222
64c7f8cf
BG
1223 return retval;
1224}
1225
1226static int
c7b6bac9 1227set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid,
64c7f8cf
BG
1228 unsigned int vmid)
1229{
cea405b1 1230 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
3356c38d 1231 dqm->dev->adev, pasid, vmid);
64c7f8cf
BG
1232}
1233
2249d558
AL
1234static void init_interrupts(struct device_queue_manager *dqm)
1235{
1236 unsigned int i;
1237
d0b63bb3
AR
1238 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
1239 if (is_pipe_enabled(dqm, 0, i))
3356c38d 1240 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i);
2249d558
AL
1241}
1242
64c7f8cf
BG
1243static int initialize_nocpsch(struct device_queue_manager *dqm)
1244{
86194cf8 1245 int pipe, queue;
64c7f8cf 1246
79775b62 1247 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
64c7f8cf 1248
ab7c1648
KR
1249 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
1250 sizeof(unsigned int), GFP_KERNEL);
1251 if (!dqm->allocated_queues)
1252 return -ENOMEM;
1253
efeaed4d 1254 mutex_init(&dqm->lock_hidden);
64c7f8cf 1255 INIT_LIST_HEAD(&dqm->queues);
81b820b3 1256 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
b42902f4 1257 dqm->active_cp_queue_count = 0;
b8020b03 1258 dqm->gws_queue_count = 0;
64c7f8cf 1259
86194cf8
FK
1260 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1261 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1262
1263 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
1264 if (test_bit(pipe_offset + queue,
e6945304 1265 dqm->dev->shared_resources.cp_queue_bitmap))
86194cf8
FK
1266 dqm->allocated_queues[pipe] |= 1 << queue;
1267 }
64c7f8cf 1268
d9d4623c
YZ
1269 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
1270
35cdc81b 1271 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
cc009e61
MJ
1272 dqm->sdma_bitmap &= ~(get_reserved_sdma_queues_bitmap(dqm));
1273 pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
1274
35cdc81b 1275 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
64c7f8cf 1276
64c7f8cf
BG
1277 return 0;
1278}
1279
58dcd5bf 1280static void uninitialize(struct device_queue_manager *dqm)
64c7f8cf 1281{
6f9d54fd
OG
1282 int i;
1283
81b820b3 1284 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
64c7f8cf
BG
1285
1286 kfree(dqm->allocated_queues);
6f9d54fd 1287 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
8d5f3552 1288 kfree(dqm->mqd_mgrs[i]);
efeaed4d 1289 mutex_destroy(&dqm->lock_hidden);
64c7f8cf
BG
1290}
1291
1292static int start_nocpsch(struct device_queue_manager *dqm)
1293{
6f4cb84a
FK
1294 int r = 0;
1295
52055039 1296 pr_info("SW scheduler is used");
2249d558 1297 init_interrupts(dqm);
2243f493 1298
7eb0502a 1299 if (dqm->dev->adev->asic_type == CHIP_HAWAII)
6f4cb84a
FK
1300 r = pm_init(&dqm->packet_mgr, dqm);
1301 if (!r)
1302 dqm->sched_running = true;
2c99a547 1303
6f4cb84a 1304 return r;
64c7f8cf
BG
1305}
1306
1307static int stop_nocpsch(struct device_queue_manager *dqm)
1308{
7eb0502a 1309 if (dqm->dev->adev->asic_type == CHIP_HAWAII)
9af5379c 1310 pm_uninit(&dqm->packet_mgr, false);
2c99a547
PY
1311 dqm->sched_running = false;
1312
64c7f8cf
BG
1313 return 0;
1314}
1315
09c34e8d
FK
1316static void pre_reset(struct device_queue_manager *dqm)
1317{
1318 dqm_lock(dqm);
1319 dqm->is_resetting = true;
1320 dqm_unlock(dqm);
1321}
1322
bcea3081 1323static int allocate_sdma_queue(struct device_queue_manager *dqm,
2485c12c 1324 struct queue *q, const uint32_t *restore_sdma_id)
bcea3081
BG
1325{
1326 int bit;
1327
1b4670f6 1328 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
c7637c95
YZ
1329 if (dqm->sdma_bitmap == 0) {
1330 pr_err("No more SDMA queue to allocate\n");
1b4670f6 1331 return -ENOMEM;
c7637c95
YZ
1332 }
1333
2485c12c
DYS
1334 if (restore_sdma_id) {
1335 /* Re-use existing sdma_id */
1336 if (!(dqm->sdma_bitmap & (1ULL << *restore_sdma_id))) {
1337 pr_err("SDMA queue already in use\n");
1338 return -EBUSY;
1339 }
1340 dqm->sdma_bitmap &= ~(1ULL << *restore_sdma_id);
1341 q->sdma_id = *restore_sdma_id;
1342 } else {
1343 /* Find first available sdma_id */
1344 bit = __ffs64(dqm->sdma_bitmap);
1345 dqm->sdma_bitmap &= ~(1ULL << bit);
1346 q->sdma_id = bit;
1347 }
1348
1b4670f6 1349 q->properties.sdma_engine_id = q->sdma_id %
ee2f17f4 1350 kfd_get_num_sdma_engines(dqm->dev);
1b4670f6 1351 q->properties.sdma_queue_id = q->sdma_id /
ee2f17f4 1352 kfd_get_num_sdma_engines(dqm->dev);
1b4670f6 1353 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
c7637c95
YZ
1354 if (dqm->xgmi_sdma_bitmap == 0) {
1355 pr_err("No more XGMI SDMA queue to allocate\n");
1b4670f6 1356 return -ENOMEM;
c7637c95 1357 }
2485c12c
DYS
1358 if (restore_sdma_id) {
1359 /* Re-use existing sdma_id */
1360 if (!(dqm->xgmi_sdma_bitmap & (1ULL << *restore_sdma_id))) {
1361 pr_err("SDMA queue already in use\n");
1362 return -EBUSY;
1363 }
1364 dqm->xgmi_sdma_bitmap &= ~(1ULL << *restore_sdma_id);
1365 q->sdma_id = *restore_sdma_id;
1366 } else {
1367 bit = __ffs64(dqm->xgmi_sdma_bitmap);
1368 dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
1369 q->sdma_id = bit;
1370 }
1b4670f6
OZ
1371 /* sdma_engine_id is sdma id including
1372 * both PCIe-optimized SDMAs and XGMI-
1373 * optimized SDMAs. The calculation below
1374 * assumes the first N engines are always
1375 * PCIe-optimized ones
1376 */
ee2f17f4
AL
1377 q->properties.sdma_engine_id =
1378 kfd_get_num_sdma_engines(dqm->dev) +
1379 q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev);
1b4670f6 1380 q->properties.sdma_queue_id = q->sdma_id /
ee2f17f4 1381 kfd_get_num_xgmi_sdma_engines(dqm->dev);
1b4670f6 1382 }
e78579aa 1383
e78579aa
YZ
1384 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1385 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
bcea3081
BG
1386
1387 return 0;
1388}
1389
1390static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1b4670f6 1391 struct queue *q)
bcea3081 1392{
1b4670f6
OZ
1393 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1394 if (q->sdma_id >= get_num_sdma_queues(dqm))
1395 return;
1396 dqm->sdma_bitmap |= (1ULL << q->sdma_id);
1397 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1398 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1399 return;
1400 dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
1401 }
bcea3081
BG
1402}
1403
64c7f8cf
BG
1404/*
1405 * Device Queue Manager implementation for cp scheduler
1406 */
1407
1408static int set_sched_resources(struct device_queue_manager *dqm)
1409{
d0b63bb3 1410 int i, mec;
64c7f8cf 1411 struct scheduling_resources res;
64c7f8cf 1412
44008d7a 1413 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
d0b63bb3
AR
1414
1415 res.queue_mask = 0;
1416 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
1417 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
1418 / dqm->dev->shared_resources.num_pipe_per_mec;
1419
e6945304 1420 if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
d0b63bb3
AR
1421 continue;
1422
1423 /* only acquire queues from the first MEC */
1424 if (mec > 0)
1425 continue;
1426
1427 /* This situation may be hit in the future if a new HW
1428 * generation exposes more than 64 queues. If so, the
8eabaf54
KR
1429 * definition of res.queue_mask needs updating
1430 */
1d11ee89 1431 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
d0b63bb3
AR
1432 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1433 break;
1434 }
1435
d09f85d5
YZ
1436 res.queue_mask |= 1ull
1437 << amdgpu_queue_mask_bit_to_set_resource_bit(
56c5977e 1438 dqm->dev->adev, i);
d0b63bb3 1439 }
d9848e14
OZ
1440 res.gws_mask = ~0ull;
1441 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
64c7f8cf 1442
79775b62
KR
1443 pr_debug("Scheduling resources:\n"
1444 "vmid mask: 0x%8X\n"
1445 "queue mask: 0x%8llX\n",
64c7f8cf
BG
1446 res.vmid_mask, res.queue_mask);
1447
9af5379c 1448 return pm_send_set_resources(&dqm->packet_mgr, &res);
64c7f8cf
BG
1449}
1450
1451static int initialize_cpsch(struct device_queue_manager *dqm)
1452{
50e2fc36
AJ
1453 uint64_t num_sdma_queues;
1454 uint64_t num_xgmi_sdma_queues;
1455
79775b62 1456 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
64c7f8cf 1457
efeaed4d 1458 mutex_init(&dqm->lock_hidden);
64c7f8cf 1459 INIT_LIST_HEAD(&dqm->queues);
81b820b3 1460 dqm->active_queue_count = dqm->processes_count = 0;
b42902f4 1461 dqm->active_cp_queue_count = 0;
b8020b03 1462 dqm->gws_queue_count = 0;
64c7f8cf 1463 dqm->active_runlist = false;
50e2fc36
AJ
1464
1465 num_sdma_queues = get_num_sdma_queues(dqm);
1466 if (num_sdma_queues >= BITS_PER_TYPE(dqm->sdma_bitmap))
1467 dqm->sdma_bitmap = ULLONG_MAX;
1468 else
1469 dqm->sdma_bitmap = (BIT_ULL(num_sdma_queues) - 1);
1470
cc009e61
MJ
1471 dqm->sdma_bitmap &= ~(get_reserved_sdma_queues_bitmap(dqm));
1472 pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap);
1473
50e2fc36
AJ
1474 num_xgmi_sdma_queues = get_num_xgmi_sdma_queues(dqm);
1475 if (num_xgmi_sdma_queues >= BITS_PER_TYPE(dqm->xgmi_sdma_bitmap))
1476 dqm->xgmi_sdma_bitmap = ULLONG_MAX;
1477 else
1478 dqm->xgmi_sdma_bitmap = (BIT_ULL(num_xgmi_sdma_queues) - 1);
64c7f8cf 1479
73ea648d
SL
1480 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1481
bfd5e378 1482 return 0;
64c7f8cf
BG
1483}
1484
1485static int start_cpsch(struct device_queue_manager *dqm)
1486{
64c7f8cf
BG
1487 int retval;
1488
64c7f8cf
BG
1489 retval = 0;
1490
4f942aae 1491 dqm_lock(dqm);
64c7f8cf 1492
cc009e61
MJ
1493 if (!dqm->dev->shared_resources.enable_mes) {
1494 retval = pm_init(&dqm->packet_mgr, dqm);
1495 if (retval)
1496 goto fail_packet_manager_init;
64c7f8cf 1497
cc009e61
MJ
1498 retval = set_sched_resources(dqm);
1499 if (retval)
1500 goto fail_set_sched_resources;
1501 }
79775b62 1502 pr_debug("Allocating fence memory\n");
64c7f8cf
BG
1503
1504 /* allocate fence memory on the gart */
a86aa3ca
OG
1505 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1506 &dqm->fence_mem);
64c7f8cf 1507
4eacc26b 1508 if (retval)
64c7f8cf
BG
1509 goto fail_allocate_vidmem;
1510
b010affe 1511 dqm->fence_addr = (uint64_t *)dqm->fence_mem->cpu_ptr;
64c7f8cf 1512 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
2249d558
AL
1513
1514 init_interrupts(dqm);
1515
73ea648d
SL
1516 /* clear hang status when driver try to start the hw scheduler */
1517 dqm->is_hws_hang = false;
09c34e8d 1518 dqm->is_resetting = false;
2c99a547 1519 dqm->sched_running = true;
cc009e61
MJ
1520 if (!dqm->dev->shared_resources.enable_mes)
1521 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
efeaed4d 1522 dqm_unlock(dqm);
64c7f8cf
BG
1523
1524 return 0;
1525fail_allocate_vidmem:
1526fail_set_sched_resources:
cc009e61
MJ
1527 if (!dqm->dev->shared_resources.enable_mes)
1528 pm_uninit(&dqm->packet_mgr, false);
64c7f8cf 1529fail_packet_manager_init:
4f942aae 1530 dqm_unlock(dqm);
64c7f8cf
BG
1531 return retval;
1532}
1533
1534static int stop_cpsch(struct device_queue_manager *dqm)
1535{
c2a77fde
FK
1536 bool hanging;
1537
efeaed4d 1538 dqm_lock(dqm);
c96cb659 1539 if (!dqm->sched_running) {
1540 dqm_unlock(dqm);
1541 return 0;
1542 }
1543
cc009e61
MJ
1544 if (!dqm->is_hws_hang) {
1545 if (!dqm->dev->shared_resources.enable_mes)
1546 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false);
1547 else
1548 remove_all_queues_mes(dqm);
1549 }
1550
c2a77fde 1551 hanging = dqm->is_hws_hang || dqm->is_resetting;
2c99a547 1552 dqm->sched_running = false;
64c7f8cf 1553
cc009e61
MJ
1554 if (!dqm->dev->shared_resources.enable_mes)
1555 pm_release_ib(&dqm->packet_mgr);
087d7641 1556
a86aa3ca 1557 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
cc009e61
MJ
1558 if (!dqm->dev->shared_resources.enable_mes)
1559 pm_uninit(&dqm->packet_mgr, hanging);
4f942aae 1560 dqm_unlock(dqm);
64c7f8cf
BG
1561
1562 return 0;
1563}
1564
1565static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1566 struct kernel_queue *kq,
1567 struct qcm_process_device *qpd)
1568{
efeaed4d 1569 dqm_lock(dqm);
b8cbab04 1570 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 1571 pr_warn("Can't create new kernel queue because %d queues were already created\n",
b8cbab04 1572 dqm->total_queue_count);
efeaed4d 1573 dqm_unlock(dqm);
b8cbab04
OG
1574 return -EPERM;
1575 }
1576
1577 /*
1578 * Unconditionally increment this counter, regardless of the queue's
1579 * type or whether the queue is active.
1580 */
1581 dqm->total_queue_count++;
1582 pr_debug("Total of %d queues are accountable so far\n",
1583 dqm->total_queue_count);
1584
64c7f8cf 1585 list_add(&kq->list, &qpd->priv_queue_list);
ab4d51d4 1586 increment_queue_count(dqm, qpd, kq->queue);
64c7f8cf 1587 qpd->is_debug = true;
c4744e24 1588 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
efeaed4d 1589 dqm_unlock(dqm);
64c7f8cf
BG
1590
1591 return 0;
1592}
1593
1594static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1595 struct kernel_queue *kq,
1596 struct qcm_process_device *qpd)
1597{
efeaed4d 1598 dqm_lock(dqm);
64c7f8cf 1599 list_del(&kq->list);
ab4d51d4 1600 decrement_queue_count(dqm, qpd, kq->queue);
64c7f8cf 1601 qpd->is_debug = false;
c4744e24 1602 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
b8cbab04
OG
1603 /*
1604 * Unconditionally decrement this counter, regardless of the queue's
1605 * type.
1606 */
8b58f261 1607 dqm->total_queue_count--;
b8cbab04
OG
1608 pr_debug("Total of %d queues are accountable so far\n",
1609 dqm->total_queue_count);
efeaed4d 1610 dqm_unlock(dqm);
64c7f8cf
BG
1611}
1612
1613static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
2485c12c 1614 struct qcm_process_device *qpd,
42c6c482 1615 const struct kfd_criu_queue_priv_data *qd,
3a9822d7 1616 const void *restore_mqd, const void *restore_ctl_stack)
64c7f8cf
BG
1617{
1618 int retval;
8d5f3552 1619 struct mqd_manager *mqd_mgr;
64c7f8cf 1620
b8cbab04 1621 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 1622 pr_warn("Can't create new usermode queue because %d queues were already created\n",
b8cbab04 1623 dqm->total_queue_count);
70d488fb
OZ
1624 retval = -EPERM;
1625 goto out;
b8cbab04
OG
1626 }
1627
1b4670f6
OZ
1628 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1629 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
38bb4226 1630 dqm_lock(dqm);
2485c12c 1631 retval = allocate_sdma_queue(dqm, q, qd ? &qd->sdma_id : NULL);
38bb4226 1632 dqm_unlock(dqm);
894a8293 1633 if (retval)
70d488fb 1634 goto out;
e139cd2a 1635 }
ef568db7 1636
5bb6a8fa 1637 retval = allocate_doorbell(qpd, q, qd ? &qd->doorbell_id : NULL);
ef568db7
FK
1638 if (retval)
1639 goto out_deallocate_sdma_queue;
1640
70d488fb
OZ
1641 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1642 q->properties.type)];
70df8273 1643
eec0b4cf
OZ
1644 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1645 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1646 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
373d7080
FK
1647 q->properties.tba_addr = qpd->tba_addr;
1648 q->properties.tma_addr = qpd->tma_addr;
70d488fb
OZ
1649 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1650 if (!q->mqd_mem_obj) {
1651 retval = -ENOMEM;
1652 goto out_deallocate_doorbell;
1653 }
70df8273
EH
1654
1655 dqm_lock(dqm);
1656 /*
1657 * Eviction state logic: mark all queues as evicted, even ones
1658 * not currently active. Restoring inactive queues later only
1659 * updates the is_evicted flag but is a no-op otherwise.
1660 */
1661 q->properties.is_evicted = !!qpd->evicted;
42c6c482
DYS
1662
1663 if (qd)
1664 mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr,
3a9822d7
DYS
1665 &q->properties, restore_mqd, restore_ctl_stack,
1666 qd->ctl_stack_size);
42c6c482
DYS
1667 else
1668 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1669 &q->gart_mqd_addr, &q->properties);
89cd9d23 1670
64c7f8cf 1671 list_add(&q->list, &qpd->queues_list);
bc920fd4 1672 qpd->queue_count++;
f38abc15 1673
64c7f8cf 1674 if (q->properties.is_active) {
ab4d51d4 1675 increment_queue_count(dqm, qpd, q);
b42902f4 1676
cc009e61
MJ
1677 if (!dqm->dev->shared_resources.enable_mes) {
1678 retval = execute_queues_cpsch(dqm,
1679 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1680 } else {
1681 retval = add_queue_mes(dqm, q, qpd);
1682 if (retval)
1683 goto cleanup_queue;
1684 }
64c7f8cf
BG
1685 }
1686
b8cbab04
OG
1687 /*
1688 * Unconditionally increment this counter, regardless of the queue's
1689 * type or whether the queue is active.
1690 */
1691 dqm->total_queue_count++;
1692
1693 pr_debug("Total of %d queues are accountable so far\n",
1694 dqm->total_queue_count);
1695
efeaed4d 1696 dqm_unlock(dqm);
72a01d23
FK
1697 return retval;
1698
cc009e61
MJ
1699cleanup_queue:
1700 qpd->queue_count--;
1701 list_del(&q->list);
1702 if (q->properties.is_active)
1703 decrement_queue_count(dqm, qpd, q);
1704 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1705 dqm_unlock(dqm);
70d488fb
OZ
1706out_deallocate_doorbell:
1707 deallocate_doorbell(qpd, q);
72a01d23 1708out_deallocate_sdma_queue:
1b4670f6 1709 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
38bb4226
OZ
1710 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1711 dqm_lock(dqm);
1b4670f6 1712 deallocate_sdma_queue(dqm, q);
38bb4226
OZ
1713 dqm_unlock(dqm);
1714 }
70d488fb 1715out:
64c7f8cf
BG
1716 return retval;
1717}
1718
b010affe
QH
1719int amdkfd_fence_wait_timeout(uint64_t *fence_addr,
1720 uint64_t fence_value,
8c72c3d7 1721 unsigned int timeout_ms)
64c7f8cf 1722{
8c72c3d7 1723 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
64c7f8cf
BG
1724
1725 while (*fence_addr != fence_value) {
8c72c3d7 1726 if (time_after(jiffies, end_jiffies)) {
79775b62 1727 pr_err("qcm fence wait loop timeout expired\n");
0e9a860c
YZ
1728 /* In HWS case, this is used to halt the driver thread
1729 * in order not to mess up CP states before doing
1730 * scandumps for FW debugging.
1731 */
1732 while (halt_if_hws_hang)
1733 schedule();
1734
64c7f8cf
BG
1735 return -ETIME;
1736 }
99331a51 1737 schedule();
64c7f8cf
BG
1738 }
1739
1740 return 0;
1741}
1742
60a00956
FK
1743/* dqm->lock mutex has to be locked before calling this function */
1744static int map_queues_cpsch(struct device_queue_manager *dqm)
1745{
1746 int retval;
1747
2c99a547
PY
1748 if (!dqm->sched_running)
1749 return 0;
81b820b3 1750 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
60a00956 1751 return 0;
60a00956
FK
1752 if (dqm->active_runlist)
1753 return 0;
1754
9af5379c 1755 retval = pm_send_runlist(&dqm->packet_mgr, &dqm->queues);
14328aa5 1756 pr_debug("%s sent runlist\n", __func__);
60a00956
FK
1757 if (retval) {
1758 pr_err("failed to execute runlist\n");
1759 return retval;
1760 }
1761 dqm->active_runlist = true;
1762
1763 return retval;
1764}
1765
ac30c783 1766/* dqm->lock mutex has to be locked before calling this function */
7da2bcf8 1767static int unmap_queues_cpsch(struct device_queue_manager *dqm,
4465f466 1768 enum kfd_unmap_queues_filter filter,
f6b80c04 1769 uint32_t filter_param, bool reset)
64c7f8cf 1770{
9fd3f1bf 1771 int retval = 0;
51a0f459 1772 struct mqd_manager *mqd_mgr;
64c7f8cf 1773
2c99a547
PY
1774 if (!dqm->sched_running)
1775 return 0;
b8c20c74 1776 if (dqm->is_hws_hang || dqm->is_resetting)
73ea648d 1777 return -EIO;
991ca8ee 1778 if (!dqm->active_runlist)
ac30c783 1779 return retval;
bcea3081 1780
d2cb0b21 1781 retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset);
4eacc26b 1782 if (retval)
ac30c783 1783 return retval;
64c7f8cf
BG
1784
1785 *dqm->fence_addr = KFD_FENCE_INIT;
9af5379c 1786 pm_send_query_status(&dqm->packet_mgr, dqm->fence_gpu_addr,
64c7f8cf
BG
1787 KFD_FENCE_COMPLETED);
1788 /* should be timed out */
c3447e81 1789 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
14328aa5 1790 queue_preemption_timeout_ms);
09c34e8d
FK
1791 if (retval) {
1792 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
cc009e61 1793 kfd_hws_hang(dqm);
ac30c783 1794 return retval;
09c34e8d 1795 }
9fd3f1bf 1796
51a0f459
OZ
1797 /* In the current MEC firmware implementation, if compute queue
1798 * doesn't response to the preemption request in time, HIQ will
1799 * abandon the unmap request without returning any timeout error
1800 * to driver. Instead, MEC firmware will log the doorbell of the
1801 * unresponding compute queue to HIQ.MQD.queue_doorbell_id fields.
1802 * To make sure the queue unmap was successful, driver need to
1803 * check those fields
1804 */
1805 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ];
9af5379c 1806 if (mqd_mgr->read_doorbell_id(dqm->packet_mgr.priv_queue->queue->mqd)) {
51a0f459
OZ
1807 pr_err("HIQ MQD's queue_doorbell_id0 is not 0, Queue preemption time out\n");
1808 while (halt_if_hws_hang)
1809 schedule();
1810 return -ETIME;
1811 }
1812
9af5379c 1813 pm_release_ib(&dqm->packet_mgr);
64c7f8cf
BG
1814 dqm->active_runlist = false;
1815
64c7f8cf
BG
1816 return retval;
1817}
1818
dec63443
TZ
1819/* only for compute queue */
1820static int reset_queues_cpsch(struct device_queue_manager *dqm,
1821 uint16_t pasid)
1822{
1823 int retval;
1824
1825 dqm_lock(dqm);
1826
1827 retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID,
1828 pasid, true);
1829
1830 dqm_unlock(dqm);
1831 return retval;
1832}
1833
ac30c783 1834/* dqm->lock mutex has to be locked before calling this function */
c4744e24
YZ
1835static int execute_queues_cpsch(struct device_queue_manager *dqm,
1836 enum kfd_unmap_queues_filter filter,
1837 uint32_t filter_param)
64c7f8cf
BG
1838{
1839 int retval;
1840
73ea648d
SL
1841 if (dqm->is_hws_hang)
1842 return -EIO;
f6b80c04 1843 retval = unmap_queues_cpsch(dqm, filter, filter_param, false);
09c34e8d 1844 if (retval)
ac30c783 1845 return retval;
64c7f8cf 1846
60a00956 1847 return map_queues_cpsch(dqm);
64c7f8cf
BG
1848}
1849
1850static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1851 struct qcm_process_device *qpd,
1852 struct queue *q)
1853{
1854 int retval;
8d5f3552 1855 struct mqd_manager *mqd_mgr;
d69fd951
MJ
1856 uint64_t sdma_val = 0;
1857 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
1858
1859 /* Get the SDMA queue stats */
1860 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1861 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
818b0324 1862 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
d69fd951
MJ
1863 &sdma_val);
1864 if (retval)
1865 pr_err("Failed to read SDMA queue counter for queue: %d\n",
1866 q->properties.queue_id);
1867 }
992839ad 1868
64c7f8cf
BG
1869 retval = 0;
1870
1871 /* remove queue from list to prevent rescheduling after preemption */
efeaed4d 1872 dqm_lock(dqm);
992839ad
YS
1873
1874 if (qpd->is_debug) {
1875 /*
1876 * error, currently we do not allow to destroy a queue
1877 * of a currently debugged process
1878 */
1879 retval = -EBUSY;
1880 goto failed_try_destroy_debugged_queue;
1881
1882 }
1883
fdfa090b
OZ
1884 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1885 q->properties.type)];
64c7f8cf 1886
ef568db7
FK
1887 deallocate_doorbell(qpd, q);
1888
d69fd951
MJ
1889 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1890 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1b4670f6 1891 deallocate_sdma_queue(dqm, q);
d69fd951
MJ
1892 pdd->sdma_past_activity_counter += sdma_val;
1893 }
bcea3081 1894
64c7f8cf 1895 list_del(&q->list);
bc920fd4 1896 qpd->queue_count--;
8c07f33e
PY
1897 if (q->properties.is_active) {
1898 if (!dqm->dev->shared_resources.enable_mes) {
1899 decrement_queue_count(dqm, qpd, q);
1900 retval = execute_queues_cpsch(dqm,
1901 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1902 if (retval == -ETIME)
1903 qpd->reset_wavefronts = true;
1904 } else {
1905 retval = remove_queue_mes(dqm, q, qpd);
1906 }
1907 }
64c7f8cf 1908
b8cbab04
OG
1909 /*
1910 * Unconditionally decrement this counter, regardless of the queue's
1911 * type
1912 */
1913 dqm->total_queue_count--;
1914 pr_debug("Total of %d queues are accountable so far\n",
1915 dqm->total_queue_count);
64c7f8cf 1916
efeaed4d 1917 dqm_unlock(dqm);
64c7f8cf 1918
8636e53c
OZ
1919 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1920 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
89cd9d23 1921
9e827224 1922 return retval;
64c7f8cf 1923
992839ad
YS
1924failed_try_destroy_debugged_queue:
1925
efeaed4d 1926 dqm_unlock(dqm);
64c7f8cf
BG
1927 return retval;
1928}
1929
1930/*
1931 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1932 * stay in user mode.
1933 */
1934#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1935/* APE1 limit is inclusive and 64K aligned. */
1936#define APE1_LIMIT_ALIGNMENT 0xFFFF
1937
1938static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1939 struct qcm_process_device *qpd,
1940 enum cache_policy default_policy,
1941 enum cache_policy alternate_policy,
1942 void __user *alternate_aperture_base,
1943 uint64_t alternate_aperture_size)
1944{
bed4f110
FK
1945 bool retval = true;
1946
1947 if (!dqm->asic_ops.set_cache_memory_policy)
1948 return retval;
64c7f8cf 1949
efeaed4d 1950 dqm_lock(dqm);
64c7f8cf
BG
1951
1952 if (alternate_aperture_size == 0) {
1953 /* base > limit disables APE1 */
1954 qpd->sh_mem_ape1_base = 1;
1955 qpd->sh_mem_ape1_limit = 0;
1956 } else {
1957 /*
1958 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1959 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1960 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1961 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1962 * Verify that the base and size parameters can be
1963 * represented in this format and convert them.
1964 * Additionally restrict APE1 to user-mode addresses.
1965 */
1966
1967 uint64_t base = (uintptr_t)alternate_aperture_base;
1968 uint64_t limit = base + alternate_aperture_size - 1;
1969
ab7c1648
KR
1970 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1971 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1972 retval = false;
64c7f8cf 1973 goto out;
ab7c1648 1974 }
64c7f8cf
BG
1975
1976 qpd->sh_mem_ape1_base = base >> 16;
1977 qpd->sh_mem_ape1_limit = limit >> 16;
1978 }
1979
bfd5e378 1980 retval = dqm->asic_ops.set_cache_memory_policy(
a22fc854
BG
1981 dqm,
1982 qpd,
1983 default_policy,
1984 alternate_policy,
1985 alternate_aperture_base,
1986 alternate_aperture_size);
64c7f8cf 1987
d146c5a7 1988 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
64c7f8cf
BG
1989 program_sh_mem_settings(dqm, qpd);
1990
79775b62 1991 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
64c7f8cf
BG
1992 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1993 qpd->sh_mem_ape1_limit);
1994
64c7f8cf 1995out:
efeaed4d 1996 dqm_unlock(dqm);
ab7c1648 1997 return retval;
64c7f8cf
BG
1998}
1999
9fd3f1bf
FK
2000static int process_termination_nocpsch(struct device_queue_manager *dqm,
2001 struct qcm_process_device *qpd)
2002{
a7b2451d 2003 struct queue *q;
9fd3f1bf
FK
2004 struct device_process_node *cur, *next_dpn;
2005 int retval = 0;
32cce8bc 2006 bool found = false;
9fd3f1bf 2007
efeaed4d 2008 dqm_lock(dqm);
9fd3f1bf
FK
2009
2010 /* Clear all user mode queues */
a7b2451d
AL
2011 while (!list_empty(&qpd->queues_list)) {
2012 struct mqd_manager *mqd_mgr;
9fd3f1bf
FK
2013 int ret;
2014
a7b2451d
AL
2015 q = list_first_entry(&qpd->queues_list, struct queue, list);
2016 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2017 q->properties.type)];
9fd3f1bf
FK
2018 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
2019 if (ret)
2020 retval = ret;
a7b2451d
AL
2021 dqm_unlock(dqm);
2022 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
2023 dqm_lock(dqm);
9fd3f1bf
FK
2024 }
2025
2026 /* Unregister process */
2027 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
2028 if (qpd == cur->qpd) {
2029 list_del(&cur->list);
2030 kfree(cur);
2031 dqm->processes_count--;
32cce8bc 2032 found = true;
9fd3f1bf
FK
2033 break;
2034 }
2035 }
2036
efeaed4d 2037 dqm_unlock(dqm);
32cce8bc
FK
2038
2039 /* Outside the DQM lock because under the DQM lock we can't do
2040 * reclaim or take other locks that others hold while reclaiming.
2041 */
2042 if (found)
2043 kfd_dec_compute_active(dqm->dev);
2044
9fd3f1bf
FK
2045 return retval;
2046}
2047
5df099e8
JC
2048static int get_wave_state(struct device_queue_manager *dqm,
2049 struct queue *q,
2050 void __user *ctl_stack,
2051 u32 *ctl_stack_used_size,
2052 u32 *save_area_used_size)
2053{
4e6c6fc1 2054 struct mqd_manager *mqd_mgr;
5df099e8
JC
2055
2056 dqm_lock(dqm);
2057
d7c0b047 2058 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
5df099e8 2059
63f6e012
JK
2060 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
2061 q->properties.is_active || !q->device->cwsr_enabled ||
2062 !mqd_mgr->get_wave_state) {
2063 dqm_unlock(dqm);
2064 return -EINVAL;
5df099e8
JC
2065 }
2066
5df099e8 2067 dqm_unlock(dqm);
63f6e012
JK
2068
2069 /*
2070 * get_wave_state is outside the dqm lock to prevent circular locking
2071 * and the queue should be protected against destruction by the process
2072 * lock.
2073 */
2074 return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
2075 ctl_stack_used_size, save_area_used_size);
5df099e8 2076}
9fd3f1bf 2077
42c6c482
DYS
2078static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
2079 const struct queue *q,
3a9822d7
DYS
2080 u32 *mqd_size,
2081 u32 *ctl_stack_size)
42c6c482
DYS
2082{
2083 struct mqd_manager *mqd_mgr;
2084 enum KFD_MQD_TYPE mqd_type =
2085 get_mqd_type_from_queue_type(q->properties.type);
2086
2087 dqm_lock(dqm);
2088 mqd_mgr = dqm->mqd_mgrs[mqd_type];
2089 *mqd_size = mqd_mgr->mqd_size;
3a9822d7
DYS
2090 *ctl_stack_size = 0;
2091
2092 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
2093 mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
42c6c482
DYS
2094
2095 dqm_unlock(dqm);
2096}
2097
2098static int checkpoint_mqd(struct device_queue_manager *dqm,
2099 const struct queue *q,
3a9822d7
DYS
2100 void *mqd,
2101 void *ctl_stack)
42c6c482
DYS
2102{
2103 struct mqd_manager *mqd_mgr;
2104 int r = 0;
2105 enum KFD_MQD_TYPE mqd_type =
2106 get_mqd_type_from_queue_type(q->properties.type);
2107
2108 dqm_lock(dqm);
2109
2110 if (q->properties.is_active || !q->device->cwsr_enabled) {
2111 r = -EINVAL;
2112 goto dqm_unlock;
2113 }
2114
2115 mqd_mgr = dqm->mqd_mgrs[mqd_type];
2116 if (!mqd_mgr->checkpoint_mqd) {
2117 r = -EOPNOTSUPP;
2118 goto dqm_unlock;
2119 }
2120
3a9822d7 2121 mqd_mgr->checkpoint_mqd(mqd_mgr, q->mqd, mqd, ctl_stack);
42c6c482
DYS
2122
2123dqm_unlock:
2124 dqm_unlock(dqm);
2125 return r;
2126}
2127
9fd3f1bf
FK
2128static int process_termination_cpsch(struct device_queue_manager *dqm,
2129 struct qcm_process_device *qpd)
2130{
2131 int retval;
56f221b6 2132 struct queue *q;
9fd3f1bf 2133 struct kernel_queue *kq, *kq_next;
8d5f3552 2134 struct mqd_manager *mqd_mgr;
9fd3f1bf
FK
2135 struct device_process_node *cur, *next_dpn;
2136 enum kfd_unmap_queues_filter filter =
2137 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
32cce8bc 2138 bool found = false;
9fd3f1bf
FK
2139
2140 retval = 0;
2141
efeaed4d 2142 dqm_lock(dqm);
9fd3f1bf
FK
2143
2144 /* Clean all kernel queues */
2145 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
2146 list_del(&kq->list);
ab4d51d4 2147 decrement_queue_count(dqm, qpd, kq->queue);
9fd3f1bf
FK
2148 qpd->is_debug = false;
2149 dqm->total_queue_count--;
2150 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
2151 }
2152
2153 /* Clear all user mode queues */
2154 list_for_each_entry(q, &qpd->queues_list, list) {
c7637c95 2155 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1b4670f6 2156 deallocate_sdma_queue(dqm, q);
c7637c95 2157 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1b4670f6 2158 deallocate_sdma_queue(dqm, q);
9fd3f1bf 2159
cc009e61 2160 if (q->properties.is_active) {
ab4d51d4 2161 decrement_queue_count(dqm, qpd, q);
9fd3f1bf 2162
cc009e61
MJ
2163 if (dqm->dev->shared_resources.enable_mes) {
2164 retval = remove_queue_mes(dqm, q, qpd);
2165 if (retval)
2166 pr_err("Failed to remove queue %d\n",
2167 q->properties.queue_id);
2168 }
2169 }
2170
9fd3f1bf
FK
2171 dqm->total_queue_count--;
2172 }
2173
2174 /* Unregister process */
2175 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
2176 if (qpd == cur->qpd) {
2177 list_del(&cur->list);
2178 kfree(cur);
2179 dqm->processes_count--;
32cce8bc 2180 found = true;
9fd3f1bf
FK
2181 break;
2182 }
2183 }
2184
cc009e61
MJ
2185 if (!dqm->dev->shared_resources.enable_mes)
2186 retval = execute_queues_cpsch(dqm, filter, 0);
2187
73ea648d 2188 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
9fd3f1bf
FK
2189 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
2190 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
2191 qpd->reset_wavefronts = false;
2192 }
2193
89cd9d23 2194 /* Lastly, free mqd resources.
8636e53c 2195 * Do free_mqd() after dqm_unlock to avoid circular locking.
89cd9d23 2196 */
56f221b6 2197 while (!list_empty(&qpd->queues_list)) {
2198 q = list_first_entry(&qpd->queues_list, struct queue, list);
fdfa090b
OZ
2199 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
2200 q->properties.type)];
9fd3f1bf 2201 list_del(&q->list);
bc920fd4 2202 qpd->queue_count--;
56f221b6 2203 dqm_unlock(dqm);
8636e53c 2204 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
56f221b6 2205 dqm_lock(dqm);
9fd3f1bf 2206 }
56f221b6 2207 dqm_unlock(dqm);
2208
2209 /* Outside the DQM lock because under the DQM lock we can't do
2210 * reclaim or take other locks that others hold while reclaiming.
2211 */
2212 if (found)
2213 kfd_dec_compute_active(dqm->dev);
9fd3f1bf 2214
9fd3f1bf
FK
2215 return retval;
2216}
2217
fdfa090b
OZ
2218static int init_mqd_managers(struct device_queue_manager *dqm)
2219{
2220 int i, j;
2221 struct mqd_manager *mqd_mgr;
2222
2223 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
2224 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
2225 if (!mqd_mgr) {
2226 pr_err("mqd manager [%d] initialization failed\n", i);
2227 goto out_free;
2228 }
2229 dqm->mqd_mgrs[i] = mqd_mgr;
2230 }
2231
2232 return 0;
2233
2234out_free:
2235 for (j = 0; j < i; j++) {
2236 kfree(dqm->mqd_mgrs[j]);
2237 dqm->mqd_mgrs[j] = NULL;
2238 }
2239
2240 return -ENOMEM;
2241}
11614c36
OZ
2242
2243/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
2244static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
2245{
2246 int retval;
2247 struct kfd_dev *dev = dqm->dev;
2248 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
2249 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
c7637c95 2250 get_num_all_sdma_engines(dqm) *
f0dc99a6 2251 dev->device_info.num_sdma_queues_per_engine +
11614c36
OZ
2252 dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
2253
6bfc7c7e 2254 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size,
11614c36 2255 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
f2cc50ce 2256 (void *)&(mem_obj->cpu_ptr), false);
11614c36
OZ
2257
2258 return retval;
2259}
2260
64c7f8cf
BG
2261struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
2262{
2263 struct device_queue_manager *dqm;
2264
79775b62 2265 pr_debug("Loading device queue manager\n");
a22fc854 2266
dbf56ab1 2267 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
64c7f8cf
BG
2268 if (!dqm)
2269 return NULL;
2270
7eb0502a 2271 switch (dev->adev->asic_type) {
d146c5a7
FK
2272 /* HWS is not available on Hawaii. */
2273 case CHIP_HAWAII:
2274 /* HWS depends on CWSR for timely dequeue. CWSR is not
2275 * available on Tonga.
2276 *
2277 * FIXME: This argument also applies to Kaveri.
2278 */
2279 case CHIP_TONGA:
2280 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
2281 break;
2282 default:
2283 dqm->sched_policy = sched_policy;
2284 break;
2285 }
2286
64c7f8cf 2287 dqm->dev = dev;
d146c5a7 2288 switch (dqm->sched_policy) {
64c7f8cf
BG
2289 case KFD_SCHED_POLICY_HWS:
2290 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
2291 /* initialize dqm for cp scheduling */
45c9a5e4
OG
2292 dqm->ops.create_queue = create_queue_cpsch;
2293 dqm->ops.initialize = initialize_cpsch;
2294 dqm->ops.start = start_cpsch;
2295 dqm->ops.stop = stop_cpsch;
09c34e8d 2296 dqm->ops.pre_reset = pre_reset;
45c9a5e4
OG
2297 dqm->ops.destroy_queue = destroy_queue_cpsch;
2298 dqm->ops.update_queue = update_queue;
58dcd5bf
YZ
2299 dqm->ops.register_process = register_process;
2300 dqm->ops.unregister_process = unregister_process;
2301 dqm->ops.uninitialize = uninitialize;
45c9a5e4
OG
2302 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
2303 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
2304 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
9fd3f1bf 2305 dqm->ops.process_termination = process_termination_cpsch;
26103436
FK
2306 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
2307 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
5df099e8 2308 dqm->ops.get_wave_state = get_wave_state;
dec63443 2309 dqm->ops.reset_queues = reset_queues_cpsch;
42c6c482
DYS
2310 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2311 dqm->ops.checkpoint_mqd = checkpoint_mqd;
64c7f8cf
BG
2312 break;
2313 case KFD_SCHED_POLICY_NO_HWS:
2314 /* initialize dqm for no cp scheduling */
45c9a5e4
OG
2315 dqm->ops.start = start_nocpsch;
2316 dqm->ops.stop = stop_nocpsch;
09c34e8d 2317 dqm->ops.pre_reset = pre_reset;
45c9a5e4
OG
2318 dqm->ops.create_queue = create_queue_nocpsch;
2319 dqm->ops.destroy_queue = destroy_queue_nocpsch;
2320 dqm->ops.update_queue = update_queue;
58dcd5bf
YZ
2321 dqm->ops.register_process = register_process;
2322 dqm->ops.unregister_process = unregister_process;
45c9a5e4 2323 dqm->ops.initialize = initialize_nocpsch;
58dcd5bf 2324 dqm->ops.uninitialize = uninitialize;
45c9a5e4 2325 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
9fd3f1bf 2326 dqm->ops.process_termination = process_termination_nocpsch;
26103436
FK
2327 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
2328 dqm->ops.restore_process_queues =
2329 restore_process_queues_nocpsch;
5df099e8 2330 dqm->ops.get_wave_state = get_wave_state;
42c6c482
DYS
2331 dqm->ops.get_queue_checkpoint_info = get_queue_checkpoint_info;
2332 dqm->ops.checkpoint_mqd = checkpoint_mqd;
64c7f8cf
BG
2333 break;
2334 default:
d146c5a7 2335 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
32fa8219 2336 goto out_free;
64c7f8cf
BG
2337 }
2338
7eb0502a 2339 switch (dev->adev->asic_type) {
a22fc854 2340 case CHIP_CARRIZO:
bfd5e378 2341 device_queue_manager_init_vi(&dqm->asic_ops);
300dec95
OG
2342 break;
2343
a22fc854 2344 case CHIP_KAVERI:
bfd5e378 2345 device_queue_manager_init_cik(&dqm->asic_ops);
300dec95 2346 break;
97672cbe
FK
2347
2348 case CHIP_HAWAII:
2349 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
2350 break;
2351
2352 case CHIP_TONGA:
2353 case CHIP_FIJI:
2354 case CHIP_POLARIS10:
2355 case CHIP_POLARIS11:
846a44d7 2356 case CHIP_POLARIS12:
ed81cd6e 2357 case CHIP_VEGAM:
97672cbe
FK
2358 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
2359 break;
bed4f110 2360
e596b903 2361 default:
cc009e61
MJ
2362 if (KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0))
2363 device_queue_manager_init_v11(&dqm->asic_ops);
2364 else if (KFD_GC_VERSION(dev) >= IP_VERSION(10, 1, 1))
e4804a39
GS
2365 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
2366 else if (KFD_GC_VERSION(dev) >= IP_VERSION(9, 0, 1))
2367 device_queue_manager_init_v9(&dqm->asic_ops);
2368 else {
2369 WARN(1, "Unexpected ASIC family %u",
7eb0502a 2370 dev->adev->asic_type);
e4804a39
GS
2371 goto out_free;
2372 }
a22fc854
BG
2373 }
2374
fdfa090b
OZ
2375 if (init_mqd_managers(dqm))
2376 goto out_free;
2377
11614c36
OZ
2378 if (allocate_hiq_sdma_mqd(dqm)) {
2379 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
2380 goto out_free;
2381 }
2382
32fa8219
FK
2383 if (!dqm->ops.initialize(dqm))
2384 return dqm;
64c7f8cf 2385
32fa8219
FK
2386out_free:
2387 kfree(dqm);
2388 return NULL;
64c7f8cf
BG
2389}
2390
7fd5a6fb
Y
2391static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
2392 struct kfd_mem_obj *mqd)
11614c36
OZ
2393{
2394 WARN(!mqd, "No hiq sdma mqd trunk to free");
2395
6bfc7c7e 2396 amdgpu_amdkfd_free_gtt_mem(dev->adev, mqd->gtt_mem);
11614c36
OZ
2397}
2398
64c7f8cf
BG
2399void device_queue_manager_uninit(struct device_queue_manager *dqm)
2400{
45c9a5e4 2401 dqm->ops.uninitialize(dqm);
11614c36 2402 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
64c7f8cf
BG
2403 kfree(dqm);
2404}
851a645e 2405
03e5b167 2406int kfd_dqm_evict_pasid(struct device_queue_manager *dqm, u32 pasid)
2640c3fa 2407{
2408 struct kfd_process_device *pdd;
2409 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
2410 int ret = 0;
2411
2412 if (!p)
2413 return -EINVAL;
8a491bb3 2414 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
2640c3fa 2415 pdd = kfd_get_process_device_data(dqm->dev, p);
2416 if (pdd)
2417 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
2418 kfd_unref_process(p);
2419
2420 return ret;
2421}
2422
73ea648d
SL
2423static void kfd_process_hw_exception(struct work_struct *work)
2424{
2425 struct device_queue_manager *dqm = container_of(work,
2426 struct device_queue_manager, hw_exception_work);
6bfc7c7e 2427 amdgpu_amdkfd_gpu_reset(dqm->dev->adev);
73ea648d
SL
2428}
2429
851a645e
FK
2430#if defined(CONFIG_DEBUG_FS)
2431
2432static void seq_reg_dump(struct seq_file *m,
2433 uint32_t (*dump)[2], uint32_t n_regs)
2434{
2435 uint32_t i, count;
2436
2437 for (i = 0, count = 0; i < n_regs; i++) {
2438 if (count == 0 ||
2439 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
2440 seq_printf(m, "%s %08x: %08x",
2441 i ? "\n" : "",
2442 dump[i][0], dump[i][1]);
2443 count = 7;
2444 } else {
2445 seq_printf(m, " %08x", dump[i][1]);
2446 count--;
2447 }
2448 }
2449
2450 seq_puts(m, "\n");
2451}
2452
2453int dqm_debugfs_hqds(struct seq_file *m, void *data)
2454{
2455 struct device_queue_manager *dqm = data;
2456 uint32_t (*dump)[2], n_regs;
2457 int pipe, queue;
2458 int r = 0;
2459
2c99a547 2460 if (!dqm->sched_running) {
2243f493 2461 seq_puts(m, " Device is stopped\n");
2c99a547
PY
2462 return 0;
2463 }
2464
420185fd 2465 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev,
14328aa5
PC
2466 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
2467 &dump, &n_regs);
24f48a42
OZ
2468 if (!r) {
2469 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
14328aa5
PC
2470 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
2471 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
2472 KFD_CIK_HIQ_QUEUE);
24f48a42
OZ
2473 seq_reg_dump(m, dump, n_regs);
2474
2475 kfree(dump);
2476 }
2477
851a645e
FK
2478 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
2479 int pipe_offset = pipe * get_queues_per_pipe(dqm);
2480
2481 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
2482 if (!test_bit(pipe_offset + queue,
e6945304 2483 dqm->dev->shared_resources.cp_queue_bitmap))
851a645e
FK
2484 continue;
2485
2486 r = dqm->dev->kfd2kgd->hqd_dump(
420185fd 2487 dqm->dev->adev, pipe, queue, &dump, &n_regs);
851a645e
FK
2488 if (r)
2489 break;
2490
2491 seq_printf(m, " CP Pipe %d, Queue %d\n",
2492 pipe, queue);
2493 seq_reg_dump(m, dump, n_regs);
2494
2495 kfree(dump);
2496 }
2497 }
2498
c7637c95 2499 for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
d5094189 2500 for (queue = 0;
f0dc99a6 2501 queue < dqm->dev->device_info.num_sdma_queues_per_engine;
d5094189 2502 queue++) {
851a645e 2503 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
420185fd 2504 dqm->dev->adev, pipe, queue, &dump, &n_regs);
851a645e
FK
2505 if (r)
2506 break;
2507
2508 seq_printf(m, " SDMA Engine %d, RLC %d\n",
2509 pipe, queue);
2510 seq_reg_dump(m, dump, n_regs);
2511
2512 kfree(dump);
2513 }
2514 }
2515
2516 return r;
2517}
2518
4f942aae 2519int dqm_debugfs_hang_hws(struct device_queue_manager *dqm)
a29ec470
SL
2520{
2521 int r = 0;
2522
2523 dqm_lock(dqm);
4f942aae
OZ
2524 r = pm_debugfs_hang_hws(&dqm->packet_mgr);
2525 if (r) {
2526 dqm_unlock(dqm);
2527 return r;
2528 }
a29ec470
SL
2529 dqm->active_runlist = true;
2530 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
2531 dqm_unlock(dqm);
2532
2533 return r;
2534}
2535
851a645e 2536#endif