drm/amdkfd: Add some eveiction debugging code
[linux-block.git] / drivers / gpu / drm / amd / amdkfd / kfd_device_queue_manager.c
CommitLineData
64c7f8cf
BG
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
26103436
FK
24#include <linux/ratelimit.h>
25#include <linux/printk.h>
64c7f8cf
BG
26#include <linux/slab.h>
27#include <linux/list.h>
28#include <linux/types.h>
64c7f8cf 29#include <linux/bitops.h>
99331a51 30#include <linux/sched.h>
64c7f8cf
BG
31#include "kfd_priv.h"
32#include "kfd_device_queue_manager.h"
33#include "kfd_mqd_manager.h"
34#include "cik_regs.h"
35#include "kfd_kernel_queue.h"
5b87245f 36#include "amdgpu_amdkfd.h"
64c7f8cf
BG
37
38/* Size of the per-pipe EOP queue */
39#define CIK_HPD_EOP_BYTES_LOG2 11
40#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
41
64c7f8cf
BG
42static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
43 unsigned int pasid, unsigned int vmid);
44
c4744e24
YZ
45static int execute_queues_cpsch(struct device_queue_manager *dqm,
46 enum kfd_unmap_queues_filter filter,
47 uint32_t filter_param);
7da2bcf8 48static int unmap_queues_cpsch(struct device_queue_manager *dqm,
4465f466
YZ
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
64c7f8cf 51
60a00956
FK
52static int map_queues_cpsch(struct device_queue_manager *dqm);
53
bcea3081 54static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1b4670f6 55 struct queue *q);
64c7f8cf 56
d39b7737
OZ
57static inline void deallocate_hqd(struct device_queue_manager *dqm,
58 struct queue *q);
59static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
60static int allocate_sdma_queue(struct device_queue_manager *dqm,
61 struct queue *q);
73ea648d
SL
62static void kfd_process_hw_exception(struct work_struct *work);
63
bcea3081
BG
64static inline
65enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
64c7f8cf 66{
1b4670f6 67 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
85d258f9
BG
68 return KFD_MQD_TYPE_SDMA;
69 return KFD_MQD_TYPE_CP;
64c7f8cf
BG
70}
71
d0b63bb3
AR
72static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
73{
74 int i;
75 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
76 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
77
78 /* queue is available for KFD usage if bit is 1 */
79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
80 if (test_bit(pipe_offset + i,
e6945304 81 dqm->dev->shared_resources.cp_queue_bitmap))
d0b63bb3
AR
82 return true;
83 return false;
84}
85
e6945304 86unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
64ea8f4a 87{
e6945304 88 return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
d0b63bb3 89 KGD_MAX_QUEUES);
64ea8f4a
OG
90}
91
d0b63bb3 92unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
64c7f8cf 93{
d0b63bb3
AR
94 return dqm->dev->shared_resources.num_queue_per_pipe;
95}
96
97unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
98{
d0b63bb3 99 return dqm->dev->shared_resources.num_pipe_per_mec;
64c7f8cf
BG
100}
101
98bb9222
YZ
102static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
103{
104 return dqm->dev->device_info->num_sdma_engines;
105}
106
1b4670f6
OZ
107static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
108{
109 return dqm->dev->device_info->num_xgmi_sdma_engines;
110}
111
c7637c95
YZ
112static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
113{
114 return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
115}
116
98bb9222
YZ
117unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
118{
119 return dqm->dev->device_info->num_sdma_engines
d5094189 120 * dqm->dev->device_info->num_sdma_queues_per_engine;
98bb9222
YZ
121}
122
1b4670f6
OZ
123unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
124{
125 return dqm->dev->device_info->num_xgmi_sdma_engines
126 * dqm->dev->device_info->num_sdma_queues_per_engine;
127}
128
a22fc854 129void program_sh_mem_settings(struct device_queue_manager *dqm,
64c7f8cf
BG
130 struct qcm_process_device *qpd)
131{
cea405b1
XZ
132 return dqm->dev->kfd2kgd->program_sh_mem_settings(
133 dqm->dev->kgd, qpd->vmid,
64c7f8cf
BG
134 qpd->sh_mem_config,
135 qpd->sh_mem_ape1_base,
136 qpd->sh_mem_ape1_limit,
137 qpd->sh_mem_bases);
138}
139
204d8998 140static void increment_queue_count(struct device_queue_manager *dqm,
b42902f4
YZ
141 enum kfd_queue_type type)
142{
143 dqm->active_queue_count++;
144 if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
145 dqm->active_cp_queue_count++;
146}
147
204d8998 148static void decrement_queue_count(struct device_queue_manager *dqm,
b42902f4
YZ
149 enum kfd_queue_type type)
150{
151 dqm->active_queue_count--;
152 if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
153 dqm->active_cp_queue_count--;
154}
155
ef568db7
FK
156static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
157{
158 struct kfd_dev *dev = qpd->dqm->dev;
159
160 if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
161 /* On pre-SOC15 chips we need to use the queue ID to
162 * preserve the user mode ABI.
163 */
164 q->doorbell_id = q->properties.queue_id;
1b4670f6
OZ
165 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
166 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
234441dd
YZ
167 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
168 * doorbell assignments based on the engine and queue id.
169 * The doobell index distance between RLC (2*i) and (2*i+1)
170 * for a SDMA engine is 512.
ef568db7 171 */
234441dd
YZ
172 uint32_t *idx_offset =
173 dev->shared_resources.sdma_doorbell_idx;
174
175 q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
176 + (q->properties.sdma_queue_id & 1)
177 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
178 + (q->properties.sdma_queue_id >> 1);
ef568db7
FK
179 } else {
180 /* For CP queues on SOC15 reserve a free doorbell ID */
181 unsigned int found;
182
183 found = find_first_zero_bit(qpd->doorbell_bitmap,
184 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
185 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
186 pr_debug("No doorbells available");
187 return -EBUSY;
188 }
189 set_bit(found, qpd->doorbell_bitmap);
190 q->doorbell_id = found;
191 }
192
193 q->properties.doorbell_off =
339903fa 194 kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
ef568db7
FK
195 q->doorbell_id);
196
197 return 0;
198}
199
200static void deallocate_doorbell(struct qcm_process_device *qpd,
201 struct queue *q)
202{
203 unsigned int old;
204 struct kfd_dev *dev = qpd->dqm->dev;
205
206 if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
1b4670f6
OZ
207 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
208 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
ef568db7
FK
209 return;
210
211 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
212 WARN_ON(!old);
213}
214
64c7f8cf
BG
215static int allocate_vmid(struct device_queue_manager *dqm,
216 struct qcm_process_device *qpd,
217 struct queue *q)
218{
d9d4623c 219 int allocated_vmid = -1, i;
64c7f8cf 220
d9d4623c
YZ
221 for (i = dqm->dev->vm_info.first_vmid_kfd;
222 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
223 if (!dqm->vmid_pasid[i]) {
224 allocated_vmid = i;
225 break;
226 }
227 }
228
229 if (allocated_vmid < 0) {
230 pr_err("no more vmid to allocate\n");
231 return -ENOSPC;
232 }
233
234 pr_debug("vmid allocated: %d\n", allocated_vmid);
235
236 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
64c7f8cf 237
d9d4623c 238 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
64c7f8cf 239
64c7f8cf
BG
240 qpd->vmid = allocated_vmid;
241 q->properties.vmid = allocated_vmid;
242
64c7f8cf
BG
243 program_sh_mem_settings(dqm, qpd);
244
403575c4
FK
245 /* qpd->page_table_base is set earlier when register_process()
246 * is called, i.e. when the first queue is created.
247 */
248 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
249 qpd->vmid,
250 qpd->page_table_base);
251 /* invalidate the VM context after pasid and vmid mapping is set up */
252 kfd_flush_tlb(qpd_to_pdd(qpd));
253
c637b36a
YZ
254 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
255 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
256 qpd->sh_hidden_private_base, qpd->vmid);
d39b7737 257
64c7f8cf
BG
258 return 0;
259}
260
552764b6
FK
261static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
262 struct qcm_process_device *qpd)
263{
f6e27ff1
FK
264 const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
265 int ret;
552764b6
FK
266
267 if (!qpd->ib_kaddr)
268 return -ENOMEM;
269
f6e27ff1
FK
270 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
271 if (ret)
272 return ret;
552764b6 273
5b87245f 274 return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
f6e27ff1
FK
275 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
276 pmf->release_mem_size / sizeof(uint32_t));
552764b6
FK
277}
278
64c7f8cf
BG
279static void deallocate_vmid(struct device_queue_manager *dqm,
280 struct qcm_process_device *qpd,
281 struct queue *q)
282{
f1403342
CK
283 /* On GFX v7, CP doesn't flush TC at dequeue */
284 if (q->device->device_info->asic_family == CHIP_HAWAII)
285 if (flush_texture_cache_nocpsch(q->device, qpd))
286 pr_err("Failed to flush TC\n");
552764b6 287
f1403342 288 kfd_flush_tlb(qpd_to_pdd(qpd));
403575c4 289
f1403342
CK
290 /* Release the vmid mapping */
291 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
d9d4623c 292 dqm->vmid_pasid[qpd->vmid] = 0;
2030664b 293
64c7f8cf
BG
294 qpd->vmid = 0;
295 q->properties.vmid = 0;
296}
297
298static int create_queue_nocpsch(struct device_queue_manager *dqm,
299 struct queue *q,
b46cb7d7 300 struct qcm_process_device *qpd)
64c7f8cf 301{
d39b7737 302 struct mqd_manager *mqd_mgr;
64c7f8cf
BG
303 int retval;
304
efeaed4d 305 dqm_lock(dqm);
64c7f8cf 306
b8cbab04 307 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 308 pr_warn("Can't create new usermode queue because %d queues were already created\n",
b8cbab04 309 dqm->total_queue_count);
ab7c1648
KR
310 retval = -EPERM;
311 goto out_unlock;
b8cbab04
OG
312 }
313
64c7f8cf
BG
314 if (list_empty(&qpd->queues_list)) {
315 retval = allocate_vmid(dqm, qpd, q);
ab7c1648
KR
316 if (retval)
317 goto out_unlock;
64c7f8cf 318 }
64c7f8cf 319 q->properties.vmid = qpd->vmid;
26103436 320 /*
bb2d2128
FK
321 * Eviction state logic: mark all queues as evicted, even ones
322 * not currently active. Restoring inactive queues later only
323 * updates the is_evicted flag but is a no-op otherwise.
26103436 324 */
bb2d2128 325 q->properties.is_evicted = !!qpd->evicted;
64c7f8cf 326
373d7080
FK
327 q->properties.tba_addr = qpd->tba_addr;
328 q->properties.tma_addr = qpd->tma_addr;
329
d091bc0a
OZ
330 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
331 q->properties.type)];
d39b7737
OZ
332 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
333 retval = allocate_hqd(dqm, q);
334 if (retval)
335 goto deallocate_vmid;
336 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
337 q->pipe, q->queue);
338 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
339 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
340 retval = allocate_sdma_queue(dqm, q);
341 if (retval)
342 goto deallocate_vmid;
343 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
344 }
345
346 retval = allocate_doorbell(qpd, q);
347 if (retval)
348 goto out_deallocate_hqd;
349
6a6ef5ee
OZ
350 /* Temporarily release dqm lock to avoid a circular lock dependency */
351 dqm_unlock(dqm);
d091bc0a 352 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
6a6ef5ee
OZ
353 dqm_lock(dqm);
354
d091bc0a
OZ
355 if (!q->mqd_mem_obj) {
356 retval = -ENOMEM;
357 goto out_deallocate_doorbell;
358 }
8636e53c
OZ
359 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
360 &q->gart_mqd_addr, &q->properties);
d39b7737 361 if (q->properties.is_active) {
2c99a547
PY
362 if (!dqm->sched_running) {
363 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
364 goto add_queue_to_list;
365 }
d39b7737
OZ
366
367 if (WARN(q->process->mm != current->mm,
368 "should only run in user thread"))
369 retval = -EFAULT;
370 else
371 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
372 q->queue, &q->properties, current->mm);
373 if (retval)
d091bc0a 374 goto out_free_mqd;
64c7f8cf
BG
375 }
376
2c99a547 377add_queue_to_list:
64c7f8cf 378 list_add(&q->list, &qpd->queues_list);
bc920fd4 379 qpd->queue_count++;
b6819cec 380 if (q->properties.is_active)
b42902f4 381 increment_queue_count(dqm, q->properties.type);
64c7f8cf 382
b8cbab04
OG
383 /*
384 * Unconditionally increment this counter, regardless of the queue's
385 * type or whether the queue is active.
386 */
387 dqm->total_queue_count++;
388 pr_debug("Total of %d queues are accountable so far\n",
389 dqm->total_queue_count);
d091bc0a 390 goto out_unlock;
b8cbab04 391
d091bc0a
OZ
392out_free_mqd:
393 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
d39b7737
OZ
394out_deallocate_doorbell:
395 deallocate_doorbell(qpd, q);
396out_deallocate_hqd:
397 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
398 deallocate_hqd(dqm, q);
399 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
400 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
401 deallocate_sdma_queue(dqm, q);
402deallocate_vmid:
403 if (list_empty(&qpd->queues_list))
404 deallocate_vmid(dqm, qpd, q);
ab7c1648 405out_unlock:
efeaed4d 406 dqm_unlock(dqm);
ab7c1648 407 return retval;
64c7f8cf
BG
408}
409
410static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
411{
412 bool set;
f0ec5b99 413 int pipe, bit, i;
64c7f8cf
BG
414
415 set = false;
416
8eabaf54
KR
417 for (pipe = dqm->next_pipe_to_allocate, i = 0;
418 i < get_pipes_per_mec(dqm);
d0b63bb3
AR
419 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
420
421 if (!is_pipe_enabled(dqm, 0, pipe))
422 continue;
423
64c7f8cf 424 if (dqm->allocated_queues[pipe] != 0) {
4252bf68
HK
425 bit = ffs(dqm->allocated_queues[pipe]) - 1;
426 dqm->allocated_queues[pipe] &= ~(1 << bit);
64c7f8cf
BG
427 q->pipe = pipe;
428 q->queue = bit;
429 set = true;
430 break;
431 }
432 }
433
991ca8ee 434 if (!set)
64c7f8cf
BG
435 return -EBUSY;
436
79775b62 437 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
64c7f8cf 438 /* horizontal hqd allocation */
d0b63bb3 439 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
64c7f8cf
BG
440
441 return 0;
442}
443
444static inline void deallocate_hqd(struct device_queue_manager *dqm,
445 struct queue *q)
446{
4252bf68 447 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
64c7f8cf
BG
448}
449
9fd3f1bf
FK
450/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
451 * to avoid asynchronized access
452 */
453static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
64c7f8cf
BG
454 struct qcm_process_device *qpd,
455 struct queue *q)
456{
457 int retval;
8d5f3552 458 struct mqd_manager *mqd_mgr;
64c7f8cf 459
fdfa090b
OZ
460 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
461 q->properties.type)];
64c7f8cf 462
c7637c95 463 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
c2e1b3a4 464 deallocate_hqd(dqm, q);
c7637c95 465 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1b4670f6 466 deallocate_sdma_queue(dqm, q);
c7637c95 467 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1b4670f6 468 deallocate_sdma_queue(dqm, q);
c7637c95 469 else {
79775b62 470 pr_debug("q->properties.type %d is invalid\n",
7113cd65 471 q->properties.type);
9fd3f1bf 472 return -EINVAL;
64c7f8cf 473 }
9fd3f1bf 474 dqm->total_queue_count--;
64c7f8cf 475
ef568db7
FK
476 deallocate_doorbell(qpd, q);
477
2c99a547
PY
478 if (!dqm->sched_running) {
479 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
480 return 0;
481 }
482
8d5f3552 483 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
c2e1b3a4 484 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
b90e3fbe 485 KFD_UNMAP_LATENCY_MS,
64c7f8cf 486 q->pipe, q->queue);
9fd3f1bf
FK
487 if (retval == -ETIME)
488 qpd->reset_wavefronts = true;
64c7f8cf 489
32cb59f3 490
8636e53c 491 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
64c7f8cf
BG
492
493 list_del(&q->list);
9fd3f1bf
FK
494 if (list_empty(&qpd->queues_list)) {
495 if (qpd->reset_wavefronts) {
496 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
497 dqm->dev);
498 /* dbgdev_wave_reset_wavefronts has to be called before
499 * deallocate_vmid(), i.e. when vmid is still in use.
500 */
501 dbgdev_wave_reset_wavefronts(dqm->dev,
502 qpd->pqm->process);
503 qpd->reset_wavefronts = false;
504 }
505
64c7f8cf 506 deallocate_vmid(dqm, qpd, q);
9fd3f1bf 507 }
bc920fd4 508 qpd->queue_count--;
b8020b03 509 if (q->properties.is_active) {
b42902f4 510 decrement_queue_count(dqm, q->properties.type);
b8020b03
JG
511 if (q->properties.is_gws) {
512 dqm->gws_queue_count--;
513 qpd->mapped_gws_queue = false;
514 }
515 }
b8cbab04 516
9fd3f1bf
FK
517 return retval;
518}
b8cbab04 519
9fd3f1bf
FK
520static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
521 struct qcm_process_device *qpd,
522 struct queue *q)
523{
524 int retval;
d69fd951
MJ
525 uint64_t sdma_val = 0;
526 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
527
528 /* Get the SDMA queue stats */
529 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
530 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
818b0324 531 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
d69fd951
MJ
532 &sdma_val);
533 if (retval)
534 pr_err("Failed to read SDMA queue counter for queue: %d\n",
535 q->properties.queue_id);
536 }
9fd3f1bf 537
efeaed4d 538 dqm_lock(dqm);
9fd3f1bf 539 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
d69fd951
MJ
540 if (!retval)
541 pdd->sdma_past_activity_counter += sdma_val;
efeaed4d 542 dqm_unlock(dqm);
9fd3f1bf 543
64c7f8cf
BG
544 return retval;
545}
546
547static int update_queue(struct device_queue_manager *dqm, struct queue *q)
548{
8636e53c 549 int retval = 0;
8d5f3552 550 struct mqd_manager *mqd_mgr;
26103436 551 struct kfd_process_device *pdd;
b6ffbab8 552 bool prev_active = false;
64c7f8cf 553
efeaed4d 554 dqm_lock(dqm);
26103436
FK
555 pdd = kfd_get_process_device_data(q->device, q->process);
556 if (!pdd) {
557 retval = -ENODEV;
558 goto out_unlock;
559 }
fdfa090b
OZ
560 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
561 q->properties.type)];
64c7f8cf 562
60a00956
FK
563 /* Save previous activity state for counters */
564 prev_active = q->properties.is_active;
565
566 /* Make sure the queue is unmapped before updating the MQD */
d146c5a7 567 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
60a00956
FK
568 retval = unmap_queues_cpsch(dqm,
569 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
894a8293 570 if (retval) {
60a00956
FK
571 pr_err("unmap queue failed\n");
572 goto out_unlock;
573 }
894a8293 574 } else if (prev_active &&
60a00956 575 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1b4670f6
OZ
576 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
577 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
2c99a547
PY
578
579 if (!dqm->sched_running) {
580 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
581 goto out_unlock;
582 }
583
8d5f3552 584 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
60a00956
FK
585 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
586 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
587 if (retval) {
588 pr_err("destroy mqd failed\n");
589 goto out_unlock;
590 }
591 }
592
8636e53c 593 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
60a00956 594
096d1a3e
FK
595 /*
596 * check active state vs. the previous state and modify
597 * counter accordingly. map_queues_cpsch uses the
81b820b3 598 * dqm->active_queue_count to determine whether a new runlist must be
096d1a3e
FK
599 * uploaded.
600 */
601 if (q->properties.is_active && !prev_active)
b42902f4 602 increment_queue_count(dqm, q->properties.type);
096d1a3e 603 else if (!q->properties.is_active && prev_active)
b42902f4 604 decrement_queue_count(dqm, q->properties.type);
096d1a3e 605
b8020b03
JG
606 if (q->gws && !q->properties.is_gws) {
607 if (q->properties.is_active) {
608 dqm->gws_queue_count++;
609 pdd->qpd.mapped_gws_queue = true;
610 }
611 q->properties.is_gws = true;
612 } else if (!q->gws && q->properties.is_gws) {
613 if (q->properties.is_active) {
614 dqm->gws_queue_count--;
615 pdd->qpd.mapped_gws_queue = false;
616 }
617 q->properties.is_gws = false;
618 }
619
d146c5a7 620 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
60a00956 621 retval = map_queues_cpsch(dqm);
894a8293 622 else if (q->properties.is_active &&
60a00956 623 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1b4670f6
OZ
624 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
625 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1b19aa5a
FK
626 if (WARN(q->process->mm != current->mm,
627 "should only run in user thread"))
628 retval = -EFAULT;
629 else
630 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
631 q->pipe, q->queue,
632 &q->properties, current->mm);
633 }
b6ffbab8 634
ab7c1648 635out_unlock:
efeaed4d 636 dqm_unlock(dqm);
64c7f8cf
BG
637 return retval;
638}
639
26103436
FK
640static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
641 struct qcm_process_device *qpd)
642{
643 struct queue *q;
8d5f3552 644 struct mqd_manager *mqd_mgr;
26103436 645 struct kfd_process_device *pdd;
bb2d2128 646 int retval, ret = 0;
26103436 647
efeaed4d 648 dqm_lock(dqm);
26103436
FK
649 if (qpd->evicted++ > 0) /* already evicted, do nothing */
650 goto out;
651
652 pdd = qpd_to_pdd(qpd);
6027b1bf 653 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
26103436
FK
654 pdd->process->pasid);
655
bb2d2128
FK
656 /* Mark all queues as evicted. Deactivate all active queues on
657 * the qpd.
658 */
26103436 659 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128 660 q->properties.is_evicted = true;
26103436
FK
661 if (!q->properties.is_active)
662 continue;
bb2d2128 663
fdfa090b
OZ
664 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
665 q->properties.type)];
26103436 666 q->properties.is_active = false;
b42902f4 667 decrement_queue_count(dqm, q->properties.type);
b8020b03
JG
668 if (q->properties.is_gws) {
669 dqm->gws_queue_count--;
670 qpd->mapped_gws_queue = false;
671 }
2c99a547
PY
672
673 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
674 continue;
675
8d5f3552 676 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
26103436
FK
677 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
678 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
bb2d2128
FK
679 if (retval && !ret)
680 /* Return the first error, but keep going to
681 * maintain a consistent eviction state
682 */
683 ret = retval;
26103436
FK
684 }
685
686out:
efeaed4d 687 dqm_unlock(dqm);
bb2d2128 688 return ret;
26103436
FK
689}
690
691static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
692 struct qcm_process_device *qpd)
693{
694 struct queue *q;
695 struct kfd_process_device *pdd;
696 int retval = 0;
697
efeaed4d 698 dqm_lock(dqm);
26103436
FK
699 if (qpd->evicted++ > 0) /* already evicted, do nothing */
700 goto out;
701
702 pdd = qpd_to_pdd(qpd);
6027b1bf 703 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
26103436
FK
704 pdd->process->pasid);
705
bb2d2128
FK
706 /* Mark all queues as evicted. Deactivate all active queues on
707 * the qpd.
708 */
26103436 709 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128 710 q->properties.is_evicted = true;
26103436
FK
711 if (!q->properties.is_active)
712 continue;
bb2d2128 713
26103436 714 q->properties.is_active = false;
b42902f4 715 decrement_queue_count(dqm, q->properties.type);
26103436
FK
716 }
717 retval = execute_queues_cpsch(dqm,
718 qpd->is_debug ?
719 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
720 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
721
722out:
efeaed4d 723 dqm_unlock(dqm);
26103436
FK
724 return retval;
725}
726
727static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
728 struct qcm_process_device *qpd)
729{
1b19aa5a 730 struct mm_struct *mm = NULL;
26103436 731 struct queue *q;
8d5f3552 732 struct mqd_manager *mqd_mgr;
26103436 733 struct kfd_process_device *pdd;
e715c6d0 734 uint64_t pd_base;
bb2d2128 735 int retval, ret = 0;
26103436
FK
736
737 pdd = qpd_to_pdd(qpd);
738 /* Retrieve PD base */
5b87245f 739 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
26103436 740
efeaed4d 741 dqm_lock(dqm);
26103436
FK
742 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
743 goto out;
744 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
745 qpd->evicted--;
746 goto out;
747 }
748
6027b1bf 749 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
26103436
FK
750 pdd->process->pasid);
751
752 /* Update PD Base in QPD */
753 qpd->page_table_base = pd_base;
e715c6d0 754 pr_debug("Updated PD address to 0x%llx\n", pd_base);
26103436
FK
755
756 if (!list_empty(&qpd->queues_list)) {
757 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
758 dqm->dev->kgd,
759 qpd->vmid,
760 qpd->page_table_base);
761 kfd_flush_tlb(pdd);
762 }
763
1b19aa5a
FK
764 /* Take a safe reference to the mm_struct, which may otherwise
765 * disappear even while the kfd_process is still referenced.
766 */
767 mm = get_task_mm(pdd->process->lead_thread);
768 if (!mm) {
bb2d2128 769 ret = -EFAULT;
1b19aa5a
FK
770 goto out;
771 }
772
bb2d2128
FK
773 /* Remove the eviction flags. Activate queues that are not
774 * inactive for other reasons.
775 */
26103436 776 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128
FK
777 q->properties.is_evicted = false;
778 if (!QUEUE_IS_ACTIVE(q->properties))
26103436 779 continue;
bb2d2128 780
fdfa090b
OZ
781 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
782 q->properties.type)];
26103436 783 q->properties.is_active = true;
b42902f4 784 increment_queue_count(dqm, q->properties.type);
b8020b03
JG
785 if (q->properties.is_gws) {
786 dqm->gws_queue_count++;
787 qpd->mapped_gws_queue = true;
788 }
2c99a547
PY
789
790 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
791 continue;
792
8d5f3552 793 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
1b19aa5a 794 q->queue, &q->properties, mm);
bb2d2128
FK
795 if (retval && !ret)
796 /* Return the first error, but keep going to
797 * maintain a consistent eviction state
798 */
799 ret = retval;
26103436
FK
800 }
801 qpd->evicted = 0;
802out:
1b19aa5a
FK
803 if (mm)
804 mmput(mm);
efeaed4d 805 dqm_unlock(dqm);
bb2d2128 806 return ret;
26103436
FK
807}
808
809static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
810 struct qcm_process_device *qpd)
811{
812 struct queue *q;
813 struct kfd_process_device *pdd;
e715c6d0 814 uint64_t pd_base;
26103436
FK
815 int retval = 0;
816
817 pdd = qpd_to_pdd(qpd);
818 /* Retrieve PD base */
5b87245f 819 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
26103436 820
efeaed4d 821 dqm_lock(dqm);
26103436
FK
822 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
823 goto out;
824 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
825 qpd->evicted--;
826 goto out;
827 }
828
6027b1bf 829 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
26103436
FK
830 pdd->process->pasid);
831
832 /* Update PD Base in QPD */
833 qpd->page_table_base = pd_base;
e715c6d0 834 pr_debug("Updated PD address to 0x%llx\n", pd_base);
26103436
FK
835
836 /* activate all active queues on the qpd */
837 list_for_each_entry(q, &qpd->queues_list, list) {
26103436 838 q->properties.is_evicted = false;
bb2d2128
FK
839 if (!QUEUE_IS_ACTIVE(q->properties))
840 continue;
841
26103436 842 q->properties.is_active = true;
b42902f4 843 increment_queue_count(dqm, q->properties.type);
26103436
FK
844 }
845 retval = execute_queues_cpsch(dqm,
846 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
bb2d2128 847 qpd->evicted = 0;
26103436 848out:
efeaed4d 849 dqm_unlock(dqm);
26103436
FK
850 return retval;
851}
852
58dcd5bf 853static int register_process(struct device_queue_manager *dqm,
64c7f8cf
BG
854 struct qcm_process_device *qpd)
855{
856 struct device_process_node *n;
403575c4 857 struct kfd_process_device *pdd;
e715c6d0 858 uint64_t pd_base;
a22fc854 859 int retval;
64c7f8cf 860
dbf56ab1 861 n = kzalloc(sizeof(*n), GFP_KERNEL);
64c7f8cf
BG
862 if (!n)
863 return -ENOMEM;
864
865 n->qpd = qpd;
866
403575c4
FK
867 pdd = qpd_to_pdd(qpd);
868 /* Retrieve PD base */
5b87245f 869 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
403575c4 870
efeaed4d 871 dqm_lock(dqm);
64c7f8cf
BG
872 list_add(&n->list, &dqm->queues);
873
403575c4
FK
874 /* Update PD Base in QPD */
875 qpd->page_table_base = pd_base;
e715c6d0 876 pr_debug("Updated PD address to 0x%llx\n", pd_base);
403575c4 877
bfd5e378 878 retval = dqm->asic_ops.update_qpd(dqm, qpd);
a22fc854 879
f756e631 880 dqm->processes_count++;
64c7f8cf 881
efeaed4d 882 dqm_unlock(dqm);
64c7f8cf 883
32cce8bc
FK
884 /* Outside the DQM lock because under the DQM lock we can't do
885 * reclaim or take other locks that others hold while reclaiming.
886 */
887 kfd_inc_compute_active(dqm->dev);
888
a22fc854 889 return retval;
64c7f8cf
BG
890}
891
58dcd5bf 892static int unregister_process(struct device_queue_manager *dqm,
64c7f8cf
BG
893 struct qcm_process_device *qpd)
894{
895 int retval;
896 struct device_process_node *cur, *next;
897
1e5ec956
OG
898 pr_debug("qpd->queues_list is %s\n",
899 list_empty(&qpd->queues_list) ? "empty" : "not empty");
64c7f8cf
BG
900
901 retval = 0;
efeaed4d 902 dqm_lock(dqm);
64c7f8cf
BG
903
904 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
905 if (qpd == cur->qpd) {
906 list_del(&cur->list);
f5d896bb 907 kfree(cur);
f756e631 908 dqm->processes_count--;
64c7f8cf
BG
909 goto out;
910 }
911 }
912 /* qpd not found in dqm list */
913 retval = 1;
914out:
efeaed4d 915 dqm_unlock(dqm);
32cce8bc
FK
916
917 /* Outside the DQM lock because under the DQM lock we can't do
918 * reclaim or take other locks that others hold while reclaiming.
919 */
920 if (!retval)
921 kfd_dec_compute_active(dqm->dev);
922
64c7f8cf
BG
923 return retval;
924}
925
926static int
927set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
928 unsigned int vmid)
929{
cea405b1 930 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
deb99d7c 931 dqm->dev->kgd, pasid, vmid);
64c7f8cf
BG
932}
933
2249d558
AL
934static void init_interrupts(struct device_queue_manager *dqm)
935{
936 unsigned int i;
937
d0b63bb3
AR
938 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
939 if (is_pipe_enabled(dqm, 0, i))
940 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
2249d558
AL
941}
942
64c7f8cf
BG
943static int initialize_nocpsch(struct device_queue_manager *dqm)
944{
86194cf8 945 int pipe, queue;
64c7f8cf 946
79775b62 947 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
64c7f8cf 948
ab7c1648
KR
949 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
950 sizeof(unsigned int), GFP_KERNEL);
951 if (!dqm->allocated_queues)
952 return -ENOMEM;
953
efeaed4d 954 mutex_init(&dqm->lock_hidden);
64c7f8cf 955 INIT_LIST_HEAD(&dqm->queues);
81b820b3 956 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
b42902f4 957 dqm->active_cp_queue_count = 0;
b8020b03 958 dqm->gws_queue_count = 0;
64c7f8cf 959
86194cf8
FK
960 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
961 int pipe_offset = pipe * get_queues_per_pipe(dqm);
962
963 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
964 if (test_bit(pipe_offset + queue,
e6945304 965 dqm->dev->shared_resources.cp_queue_bitmap))
86194cf8
FK
966 dqm->allocated_queues[pipe] |= 1 << queue;
967 }
64c7f8cf 968
d9d4623c
YZ
969 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
970
35cdc81b
OZ
971 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
972 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
64c7f8cf 973
64c7f8cf
BG
974 return 0;
975}
976
58dcd5bf 977static void uninitialize(struct device_queue_manager *dqm)
64c7f8cf 978{
6f9d54fd
OG
979 int i;
980
81b820b3 981 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
64c7f8cf
BG
982
983 kfree(dqm->allocated_queues);
6f9d54fd 984 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
8d5f3552 985 kfree(dqm->mqd_mgrs[i]);
efeaed4d 986 mutex_destroy(&dqm->lock_hidden);
64c7f8cf
BG
987}
988
989static int start_nocpsch(struct device_queue_manager *dqm)
990{
52055039 991 pr_info("SW scheduler is used");
2249d558 992 init_interrupts(dqm);
424b5442
YZ
993
994 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
995 return pm_init(&dqm->packets, dqm);
2c99a547
PY
996 dqm->sched_running = true;
997
424b5442 998 return 0;
64c7f8cf
BG
999}
1000
1001static int stop_nocpsch(struct device_queue_manager *dqm)
1002{
424b5442 1003 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
c2a77fde 1004 pm_uninit(&dqm->packets, false);
2c99a547
PY
1005 dqm->sched_running = false;
1006
64c7f8cf
BG
1007 return 0;
1008}
1009
09c34e8d
FK
1010static void pre_reset(struct device_queue_manager *dqm)
1011{
1012 dqm_lock(dqm);
1013 dqm->is_resetting = true;
1014 dqm_unlock(dqm);
1015}
1016
bcea3081 1017static int allocate_sdma_queue(struct device_queue_manager *dqm,
e78579aa 1018 struct queue *q)
bcea3081
BG
1019{
1020 int bit;
1021
1b4670f6 1022 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
c7637c95
YZ
1023 if (dqm->sdma_bitmap == 0) {
1024 pr_err("No more SDMA queue to allocate\n");
1b4670f6 1025 return -ENOMEM;
c7637c95
YZ
1026 }
1027
1b4670f6
OZ
1028 bit = __ffs64(dqm->sdma_bitmap);
1029 dqm->sdma_bitmap &= ~(1ULL << bit);
1030 q->sdma_id = bit;
1031 q->properties.sdma_engine_id = q->sdma_id %
1032 get_num_sdma_engines(dqm);
1033 q->properties.sdma_queue_id = q->sdma_id /
1034 get_num_sdma_engines(dqm);
1035 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
c7637c95
YZ
1036 if (dqm->xgmi_sdma_bitmap == 0) {
1037 pr_err("No more XGMI SDMA queue to allocate\n");
1b4670f6 1038 return -ENOMEM;
c7637c95 1039 }
1b4670f6
OZ
1040 bit = __ffs64(dqm->xgmi_sdma_bitmap);
1041 dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
1042 q->sdma_id = bit;
1043 /* sdma_engine_id is sdma id including
1044 * both PCIe-optimized SDMAs and XGMI-
1045 * optimized SDMAs. The calculation below
1046 * assumes the first N engines are always
1047 * PCIe-optimized ones
1048 */
1049 q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
1050 q->sdma_id % get_num_xgmi_sdma_engines(dqm);
1051 q->properties.sdma_queue_id = q->sdma_id /
1052 get_num_xgmi_sdma_engines(dqm);
1053 }
e78579aa 1054
e78579aa
YZ
1055 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1056 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
bcea3081
BG
1057
1058 return 0;
1059}
1060
1061static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1b4670f6 1062 struct queue *q)
bcea3081 1063{
1b4670f6
OZ
1064 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1065 if (q->sdma_id >= get_num_sdma_queues(dqm))
1066 return;
1067 dqm->sdma_bitmap |= (1ULL << q->sdma_id);
1068 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1069 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1070 return;
1071 dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
1072 }
bcea3081
BG
1073}
1074
64c7f8cf
BG
1075/*
1076 * Device Queue Manager implementation for cp scheduler
1077 */
1078
1079static int set_sched_resources(struct device_queue_manager *dqm)
1080{
d0b63bb3 1081 int i, mec;
64c7f8cf 1082 struct scheduling_resources res;
64c7f8cf 1083
44008d7a 1084 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
d0b63bb3
AR
1085
1086 res.queue_mask = 0;
1087 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
1088 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
1089 / dqm->dev->shared_resources.num_pipe_per_mec;
1090
e6945304 1091 if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
d0b63bb3
AR
1092 continue;
1093
1094 /* only acquire queues from the first MEC */
1095 if (mec > 0)
1096 continue;
1097
1098 /* This situation may be hit in the future if a new HW
1099 * generation exposes more than 64 queues. If so, the
8eabaf54
KR
1100 * definition of res.queue_mask needs updating
1101 */
1d11ee89 1102 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
d0b63bb3
AR
1103 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1104 break;
1105 }
1106
d09f85d5
YZ
1107 res.queue_mask |= 1ull
1108 << amdgpu_queue_mask_bit_to_set_resource_bit(
1109 (struct amdgpu_device *)dqm->dev->kgd, i);
d0b63bb3 1110 }
d9848e14
OZ
1111 res.gws_mask = ~0ull;
1112 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
64c7f8cf 1113
79775b62
KR
1114 pr_debug("Scheduling resources:\n"
1115 "vmid mask: 0x%8X\n"
1116 "queue mask: 0x%8llX\n",
64c7f8cf
BG
1117 res.vmid_mask, res.queue_mask);
1118
1119 return pm_send_set_resources(&dqm->packets, &res);
1120}
1121
1122static int initialize_cpsch(struct device_queue_manager *dqm)
1123{
79775b62 1124 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
64c7f8cf 1125
efeaed4d 1126 mutex_init(&dqm->lock_hidden);
64c7f8cf 1127 INIT_LIST_HEAD(&dqm->queues);
81b820b3 1128 dqm->active_queue_count = dqm->processes_count = 0;
b42902f4 1129 dqm->active_cp_queue_count = 0;
b8020b03 1130 dqm->gws_queue_count = 0;
64c7f8cf 1131 dqm->active_runlist = false;
35cdc81b
OZ
1132 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
1133 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
64c7f8cf 1134
73ea648d
SL
1135 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1136
bfd5e378 1137 return 0;
64c7f8cf
BG
1138}
1139
1140static int start_cpsch(struct device_queue_manager *dqm)
1141{
64c7f8cf
BG
1142 int retval;
1143
64c7f8cf
BG
1144 retval = 0;
1145
1146 retval = pm_init(&dqm->packets, dqm);
4eacc26b 1147 if (retval)
64c7f8cf
BG
1148 goto fail_packet_manager_init;
1149
1150 retval = set_sched_resources(dqm);
4eacc26b 1151 if (retval)
64c7f8cf
BG
1152 goto fail_set_sched_resources;
1153
79775b62 1154 pr_debug("Allocating fence memory\n");
64c7f8cf
BG
1155
1156 /* allocate fence memory on the gart */
a86aa3ca
OG
1157 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1158 &dqm->fence_mem);
64c7f8cf 1159
4eacc26b 1160 if (retval)
64c7f8cf
BG
1161 goto fail_allocate_vidmem;
1162
1163 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
1164 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
2249d558
AL
1165
1166 init_interrupts(dqm);
1167
efeaed4d 1168 dqm_lock(dqm);
73ea648d
SL
1169 /* clear hang status when driver try to start the hw scheduler */
1170 dqm->is_hws_hang = false;
09c34e8d 1171 dqm->is_resetting = false;
2c99a547 1172 dqm->sched_running = true;
c4744e24 1173 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
efeaed4d 1174 dqm_unlock(dqm);
64c7f8cf
BG
1175
1176 return 0;
1177fail_allocate_vidmem:
1178fail_set_sched_resources:
c2a77fde 1179 pm_uninit(&dqm->packets, false);
64c7f8cf
BG
1180fail_packet_manager_init:
1181 return retval;
1182}
1183
1184static int stop_cpsch(struct device_queue_manager *dqm)
1185{
c2a77fde
FK
1186 bool hanging;
1187
efeaed4d 1188 dqm_lock(dqm);
c2a77fde
FK
1189 if (!dqm->is_hws_hang)
1190 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1191 hanging = dqm->is_hws_hang || dqm->is_resetting;
2c99a547 1192 dqm->sched_running = false;
efeaed4d 1193 dqm_unlock(dqm);
64c7f8cf 1194
edb084f4
DL
1195 pm_release_ib(&dqm->packets);
1196
a86aa3ca 1197 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
c2a77fde 1198 pm_uninit(&dqm->packets, hanging);
64c7f8cf
BG
1199
1200 return 0;
1201}
1202
1203static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1204 struct kernel_queue *kq,
1205 struct qcm_process_device *qpd)
1206{
efeaed4d 1207 dqm_lock(dqm);
b8cbab04 1208 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 1209 pr_warn("Can't create new kernel queue because %d queues were already created\n",
b8cbab04 1210 dqm->total_queue_count);
efeaed4d 1211 dqm_unlock(dqm);
b8cbab04
OG
1212 return -EPERM;
1213 }
1214
1215 /*
1216 * Unconditionally increment this counter, regardless of the queue's
1217 * type or whether the queue is active.
1218 */
1219 dqm->total_queue_count++;
1220 pr_debug("Total of %d queues are accountable so far\n",
1221 dqm->total_queue_count);
1222
64c7f8cf 1223 list_add(&kq->list, &qpd->priv_queue_list);
b42902f4 1224 increment_queue_count(dqm, kq->queue->properties.type);
64c7f8cf 1225 qpd->is_debug = true;
c4744e24 1226 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
efeaed4d 1227 dqm_unlock(dqm);
64c7f8cf
BG
1228
1229 return 0;
1230}
1231
1232static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1233 struct kernel_queue *kq,
1234 struct qcm_process_device *qpd)
1235{
efeaed4d 1236 dqm_lock(dqm);
64c7f8cf 1237 list_del(&kq->list);
b42902f4 1238 decrement_queue_count(dqm, kq->queue->properties.type);
64c7f8cf 1239 qpd->is_debug = false;
c4744e24 1240 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
b8cbab04
OG
1241 /*
1242 * Unconditionally decrement this counter, regardless of the queue's
1243 * type.
1244 */
8b58f261 1245 dqm->total_queue_count--;
b8cbab04
OG
1246 pr_debug("Total of %d queues are accountable so far\n",
1247 dqm->total_queue_count);
efeaed4d 1248 dqm_unlock(dqm);
64c7f8cf
BG
1249}
1250
1251static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
b46cb7d7 1252 struct qcm_process_device *qpd)
64c7f8cf
BG
1253{
1254 int retval;
8d5f3552 1255 struct mqd_manager *mqd_mgr;
64c7f8cf 1256
b8cbab04 1257 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 1258 pr_warn("Can't create new usermode queue because %d queues were already created\n",
b8cbab04 1259 dqm->total_queue_count);
70d488fb
OZ
1260 retval = -EPERM;
1261 goto out;
b8cbab04
OG
1262 }
1263
1b4670f6
OZ
1264 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1265 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
38bb4226 1266 dqm_lock(dqm);
e78579aa 1267 retval = allocate_sdma_queue(dqm, q);
38bb4226 1268 dqm_unlock(dqm);
894a8293 1269 if (retval)
70d488fb 1270 goto out;
e139cd2a 1271 }
ef568db7
FK
1272
1273 retval = allocate_doorbell(qpd, q);
1274 if (retval)
1275 goto out_deallocate_sdma_queue;
1276
70d488fb
OZ
1277 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1278 q->properties.type)];
70df8273 1279
eec0b4cf
OZ
1280 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1281 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1282 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
373d7080
FK
1283 q->properties.tba_addr = qpd->tba_addr;
1284 q->properties.tma_addr = qpd->tma_addr;
70d488fb
OZ
1285 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1286 if (!q->mqd_mem_obj) {
1287 retval = -ENOMEM;
1288 goto out_deallocate_doorbell;
1289 }
70df8273
EH
1290
1291 dqm_lock(dqm);
1292 /*
1293 * Eviction state logic: mark all queues as evicted, even ones
1294 * not currently active. Restoring inactive queues later only
1295 * updates the is_evicted flag but is a no-op otherwise.
1296 */
1297 q->properties.is_evicted = !!qpd->evicted;
8636e53c
OZ
1298 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1299 &q->gart_mqd_addr, &q->properties);
89cd9d23 1300
64c7f8cf 1301 list_add(&q->list, &qpd->queues_list);
bc920fd4 1302 qpd->queue_count++;
f38abc15 1303
64c7f8cf 1304 if (q->properties.is_active) {
b42902f4
YZ
1305 increment_queue_count(dqm, q->properties.type);
1306
a9a83a92 1307 execute_queues_cpsch(dqm,
c4744e24 1308 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
64c7f8cf
BG
1309 }
1310
b8cbab04
OG
1311 /*
1312 * Unconditionally increment this counter, regardless of the queue's
1313 * type or whether the queue is active.
1314 */
1315 dqm->total_queue_count++;
1316
1317 pr_debug("Total of %d queues are accountable so far\n",
1318 dqm->total_queue_count);
1319
efeaed4d 1320 dqm_unlock(dqm);
72a01d23
FK
1321 return retval;
1322
70d488fb
OZ
1323out_deallocate_doorbell:
1324 deallocate_doorbell(qpd, q);
72a01d23 1325out_deallocate_sdma_queue:
1b4670f6 1326 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
38bb4226
OZ
1327 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1328 dqm_lock(dqm);
1b4670f6 1329 deallocate_sdma_queue(dqm, q);
38bb4226
OZ
1330 dqm_unlock(dqm);
1331 }
70d488fb 1332out:
64c7f8cf
BG
1333 return retval;
1334}
1335
788bf83d 1336int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
d80d19bd 1337 unsigned int fence_value,
8c72c3d7 1338 unsigned int timeout_ms)
64c7f8cf 1339{
8c72c3d7 1340 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
64c7f8cf
BG
1341
1342 while (*fence_addr != fence_value) {
8c72c3d7 1343 if (time_after(jiffies, end_jiffies)) {
79775b62 1344 pr_err("qcm fence wait loop timeout expired\n");
0e9a860c
YZ
1345 /* In HWS case, this is used to halt the driver thread
1346 * in order not to mess up CP states before doing
1347 * scandumps for FW debugging.
1348 */
1349 while (halt_if_hws_hang)
1350 schedule();
1351
64c7f8cf
BG
1352 return -ETIME;
1353 }
99331a51 1354 schedule();
64c7f8cf
BG
1355 }
1356
1357 return 0;
1358}
1359
60a00956
FK
1360/* dqm->lock mutex has to be locked before calling this function */
1361static int map_queues_cpsch(struct device_queue_manager *dqm)
1362{
1363 int retval;
1364
2c99a547
PY
1365 if (!dqm->sched_running)
1366 return 0;
81b820b3 1367 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
60a00956 1368 return 0;
60a00956
FK
1369 if (dqm->active_runlist)
1370 return 0;
1371
1372 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
14328aa5 1373 pr_debug("%s sent runlist\n", __func__);
60a00956
FK
1374 if (retval) {
1375 pr_err("failed to execute runlist\n");
1376 return retval;
1377 }
1378 dqm->active_runlist = true;
1379
1380 return retval;
1381}
1382
ac30c783 1383/* dqm->lock mutex has to be locked before calling this function */
7da2bcf8 1384static int unmap_queues_cpsch(struct device_queue_manager *dqm,
4465f466
YZ
1385 enum kfd_unmap_queues_filter filter,
1386 uint32_t filter_param)
64c7f8cf 1387{
9fd3f1bf 1388 int retval = 0;
64c7f8cf 1389
2c99a547
PY
1390 if (!dqm->sched_running)
1391 return 0;
73ea648d
SL
1392 if (dqm->is_hws_hang)
1393 return -EIO;
991ca8ee 1394 if (!dqm->active_runlist)
ac30c783 1395 return retval;
bcea3081 1396
64c7f8cf 1397 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
4465f466 1398 filter, filter_param, false, 0);
4eacc26b 1399 if (retval)
ac30c783 1400 return retval;
64c7f8cf
BG
1401
1402 *dqm->fence_addr = KFD_FENCE_INIT;
1403 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1404 KFD_FENCE_COMPLETED);
1405 /* should be timed out */
c3447e81 1406 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
14328aa5 1407 queue_preemption_timeout_ms);
09c34e8d
FK
1408 if (retval) {
1409 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1410 dqm->is_hws_hang = true;
1411 /* It's possible we're detecting a HWS hang in the
1412 * middle of a GPU reset. No need to schedule another
1413 * reset in this case.
1414 */
1415 if (!dqm->is_resetting)
1416 schedule_work(&dqm->hw_exception_work);
ac30c783 1417 return retval;
09c34e8d 1418 }
9fd3f1bf 1419
64c7f8cf
BG
1420 pm_release_ib(&dqm->packets);
1421 dqm->active_runlist = false;
1422
64c7f8cf
BG
1423 return retval;
1424}
1425
ac30c783 1426/* dqm->lock mutex has to be locked before calling this function */
c4744e24
YZ
1427static int execute_queues_cpsch(struct device_queue_manager *dqm,
1428 enum kfd_unmap_queues_filter filter,
1429 uint32_t filter_param)
64c7f8cf
BG
1430{
1431 int retval;
1432
73ea648d
SL
1433 if (dqm->is_hws_hang)
1434 return -EIO;
c4744e24 1435 retval = unmap_queues_cpsch(dqm, filter, filter_param);
09c34e8d 1436 if (retval)
ac30c783 1437 return retval;
64c7f8cf 1438
60a00956 1439 return map_queues_cpsch(dqm);
64c7f8cf
BG
1440}
1441
1442static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1443 struct qcm_process_device *qpd,
1444 struct queue *q)
1445{
1446 int retval;
8d5f3552 1447 struct mqd_manager *mqd_mgr;
d69fd951
MJ
1448 uint64_t sdma_val = 0;
1449 struct kfd_process_device *pdd = qpd_to_pdd(qpd);
1450
1451 /* Get the SDMA queue stats */
1452 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1453 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
818b0324 1454 retval = read_sdma_queue_counter((uint64_t __user *)q->properties.read_ptr,
d69fd951
MJ
1455 &sdma_val);
1456 if (retval)
1457 pr_err("Failed to read SDMA queue counter for queue: %d\n",
1458 q->properties.queue_id);
1459 }
992839ad 1460
64c7f8cf
BG
1461 retval = 0;
1462
1463 /* remove queue from list to prevent rescheduling after preemption */
efeaed4d 1464 dqm_lock(dqm);
992839ad
YS
1465
1466 if (qpd->is_debug) {
1467 /*
1468 * error, currently we do not allow to destroy a queue
1469 * of a currently debugged process
1470 */
1471 retval = -EBUSY;
1472 goto failed_try_destroy_debugged_queue;
1473
1474 }
1475
fdfa090b
OZ
1476 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1477 q->properties.type)];
64c7f8cf 1478
ef568db7
FK
1479 deallocate_doorbell(qpd, q);
1480
d69fd951
MJ
1481 if ((q->properties.type == KFD_QUEUE_TYPE_SDMA) ||
1482 (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1b4670f6 1483 deallocate_sdma_queue(dqm, q);
d69fd951
MJ
1484 pdd->sdma_past_activity_counter += sdma_val;
1485 }
bcea3081 1486
64c7f8cf 1487 list_del(&q->list);
bc920fd4 1488 qpd->queue_count--;
40a526dc 1489 if (q->properties.is_active) {
b42902f4 1490 decrement_queue_count(dqm, q->properties.type);
40a526dc 1491 retval = execute_queues_cpsch(dqm,
9fd3f1bf 1492 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
40a526dc
YZ
1493 if (retval == -ETIME)
1494 qpd->reset_wavefronts = true;
b8020b03
JG
1495 if (q->properties.is_gws) {
1496 dqm->gws_queue_count--;
1497 qpd->mapped_gws_queue = false;
1498 }
40a526dc 1499 }
64c7f8cf 1500
b8cbab04
OG
1501 /*
1502 * Unconditionally decrement this counter, regardless of the queue's
1503 * type
1504 */
1505 dqm->total_queue_count--;
1506 pr_debug("Total of %d queues are accountable so far\n",
1507 dqm->total_queue_count);
64c7f8cf 1508
efeaed4d 1509 dqm_unlock(dqm);
64c7f8cf 1510
8636e53c
OZ
1511 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1512 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
89cd9d23 1513
9e827224 1514 return retval;
64c7f8cf 1515
992839ad
YS
1516failed_try_destroy_debugged_queue:
1517
efeaed4d 1518 dqm_unlock(dqm);
64c7f8cf
BG
1519 return retval;
1520}
1521
1522/*
1523 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1524 * stay in user mode.
1525 */
1526#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1527/* APE1 limit is inclusive and 64K aligned. */
1528#define APE1_LIMIT_ALIGNMENT 0xFFFF
1529
1530static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1531 struct qcm_process_device *qpd,
1532 enum cache_policy default_policy,
1533 enum cache_policy alternate_policy,
1534 void __user *alternate_aperture_base,
1535 uint64_t alternate_aperture_size)
1536{
bed4f110
FK
1537 bool retval = true;
1538
1539 if (!dqm->asic_ops.set_cache_memory_policy)
1540 return retval;
64c7f8cf 1541
efeaed4d 1542 dqm_lock(dqm);
64c7f8cf
BG
1543
1544 if (alternate_aperture_size == 0) {
1545 /* base > limit disables APE1 */
1546 qpd->sh_mem_ape1_base = 1;
1547 qpd->sh_mem_ape1_limit = 0;
1548 } else {
1549 /*
1550 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1551 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1552 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1553 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1554 * Verify that the base and size parameters can be
1555 * represented in this format and convert them.
1556 * Additionally restrict APE1 to user-mode addresses.
1557 */
1558
1559 uint64_t base = (uintptr_t)alternate_aperture_base;
1560 uint64_t limit = base + alternate_aperture_size - 1;
1561
ab7c1648
KR
1562 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1563 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1564 retval = false;
64c7f8cf 1565 goto out;
ab7c1648 1566 }
64c7f8cf
BG
1567
1568 qpd->sh_mem_ape1_base = base >> 16;
1569 qpd->sh_mem_ape1_limit = limit >> 16;
1570 }
1571
bfd5e378 1572 retval = dqm->asic_ops.set_cache_memory_policy(
a22fc854
BG
1573 dqm,
1574 qpd,
1575 default_policy,
1576 alternate_policy,
1577 alternate_aperture_base,
1578 alternate_aperture_size);
64c7f8cf 1579
d146c5a7 1580 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
64c7f8cf
BG
1581 program_sh_mem_settings(dqm, qpd);
1582
79775b62 1583 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
64c7f8cf
BG
1584 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1585 qpd->sh_mem_ape1_limit);
1586
64c7f8cf 1587out:
efeaed4d 1588 dqm_unlock(dqm);
ab7c1648 1589 return retval;
64c7f8cf
BG
1590}
1591
d7b9bd22
FK
1592static int set_trap_handler(struct device_queue_manager *dqm,
1593 struct qcm_process_device *qpd,
1594 uint64_t tba_addr,
1595 uint64_t tma_addr)
1596{
1597 uint64_t *tma;
1598
1599 if (dqm->dev->cwsr_enabled) {
1600 /* Jump from CWSR trap handler to user trap */
1601 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1602 tma[0] = tba_addr;
1603 tma[1] = tma_addr;
1604 } else {
1605 qpd->tba_addr = tba_addr;
1606 qpd->tma_addr = tma_addr;
1607 }
1608
1609 return 0;
1610}
1611
9fd3f1bf
FK
1612static int process_termination_nocpsch(struct device_queue_manager *dqm,
1613 struct qcm_process_device *qpd)
1614{
1615 struct queue *q, *next;
1616 struct device_process_node *cur, *next_dpn;
1617 int retval = 0;
32cce8bc 1618 bool found = false;
9fd3f1bf 1619
efeaed4d 1620 dqm_lock(dqm);
9fd3f1bf
FK
1621
1622 /* Clear all user mode queues */
1623 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1624 int ret;
1625
1626 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1627 if (ret)
1628 retval = ret;
1629 }
1630
1631 /* Unregister process */
1632 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1633 if (qpd == cur->qpd) {
1634 list_del(&cur->list);
1635 kfree(cur);
1636 dqm->processes_count--;
32cce8bc 1637 found = true;
9fd3f1bf
FK
1638 break;
1639 }
1640 }
1641
efeaed4d 1642 dqm_unlock(dqm);
32cce8bc
FK
1643
1644 /* Outside the DQM lock because under the DQM lock we can't do
1645 * reclaim or take other locks that others hold while reclaiming.
1646 */
1647 if (found)
1648 kfd_dec_compute_active(dqm->dev);
1649
9fd3f1bf
FK
1650 return retval;
1651}
1652
5df099e8
JC
1653static int get_wave_state(struct device_queue_manager *dqm,
1654 struct queue *q,
1655 void __user *ctl_stack,
1656 u32 *ctl_stack_used_size,
1657 u32 *save_area_used_size)
1658{
4e6c6fc1 1659 struct mqd_manager *mqd_mgr;
5df099e8
JC
1660 int r;
1661
1662 dqm_lock(dqm);
1663
1664 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
1665 q->properties.is_active || !q->device->cwsr_enabled) {
1666 r = -EINVAL;
1667 goto dqm_unlock;
1668 }
1669
d7c0b047 1670 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
5df099e8 1671
4e6c6fc1 1672 if (!mqd_mgr->get_wave_state) {
5df099e8
JC
1673 r = -EINVAL;
1674 goto dqm_unlock;
1675 }
1676
4e6c6fc1
YZ
1677 r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
1678 ctl_stack_used_size, save_area_used_size);
5df099e8
JC
1679
1680dqm_unlock:
1681 dqm_unlock(dqm);
1682 return r;
1683}
9fd3f1bf
FK
1684
1685static int process_termination_cpsch(struct device_queue_manager *dqm,
1686 struct qcm_process_device *qpd)
1687{
1688 int retval;
1689 struct queue *q, *next;
1690 struct kernel_queue *kq, *kq_next;
8d5f3552 1691 struct mqd_manager *mqd_mgr;
9fd3f1bf
FK
1692 struct device_process_node *cur, *next_dpn;
1693 enum kfd_unmap_queues_filter filter =
1694 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
32cce8bc 1695 bool found = false;
9fd3f1bf
FK
1696
1697 retval = 0;
1698
efeaed4d 1699 dqm_lock(dqm);
9fd3f1bf
FK
1700
1701 /* Clean all kernel queues */
1702 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1703 list_del(&kq->list);
b42902f4 1704 decrement_queue_count(dqm, kq->queue->properties.type);
9fd3f1bf
FK
1705 qpd->is_debug = false;
1706 dqm->total_queue_count--;
1707 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1708 }
1709
1710 /* Clear all user mode queues */
1711 list_for_each_entry(q, &qpd->queues_list, list) {
c7637c95 1712 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1b4670f6 1713 deallocate_sdma_queue(dqm, q);
c7637c95 1714 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1b4670f6 1715 deallocate_sdma_queue(dqm, q);
9fd3f1bf 1716
b8020b03 1717 if (q->properties.is_active) {
b42902f4 1718 decrement_queue_count(dqm, q->properties.type);
b8020b03
JG
1719 if (q->properties.is_gws) {
1720 dqm->gws_queue_count--;
1721 qpd->mapped_gws_queue = false;
1722 }
1723 }
9fd3f1bf
FK
1724
1725 dqm->total_queue_count--;
1726 }
1727
1728 /* Unregister process */
1729 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1730 if (qpd == cur->qpd) {
1731 list_del(&cur->list);
1732 kfree(cur);
1733 dqm->processes_count--;
32cce8bc 1734 found = true;
9fd3f1bf
FK
1735 break;
1736 }
1737 }
1738
1739 retval = execute_queues_cpsch(dqm, filter, 0);
73ea648d 1740 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
9fd3f1bf
FK
1741 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1742 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1743 qpd->reset_wavefronts = false;
1744 }
1745
89cd9d23
PY
1746 dqm_unlock(dqm);
1747
32cce8bc
FK
1748 /* Outside the DQM lock because under the DQM lock we can't do
1749 * reclaim or take other locks that others hold while reclaiming.
1750 */
1751 if (found)
1752 kfd_dec_compute_active(dqm->dev);
1753
89cd9d23 1754 /* Lastly, free mqd resources.
8636e53c 1755 * Do free_mqd() after dqm_unlock to avoid circular locking.
89cd9d23 1756 */
9fd3f1bf 1757 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
fdfa090b
OZ
1758 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1759 q->properties.type)];
9fd3f1bf 1760 list_del(&q->list);
bc920fd4 1761 qpd->queue_count--;
8636e53c 1762 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
9fd3f1bf
FK
1763 }
1764
9fd3f1bf
FK
1765 return retval;
1766}
1767
fdfa090b
OZ
1768static int init_mqd_managers(struct device_queue_manager *dqm)
1769{
1770 int i, j;
1771 struct mqd_manager *mqd_mgr;
1772
1773 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
1774 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
1775 if (!mqd_mgr) {
1776 pr_err("mqd manager [%d] initialization failed\n", i);
1777 goto out_free;
1778 }
1779 dqm->mqd_mgrs[i] = mqd_mgr;
1780 }
1781
1782 return 0;
1783
1784out_free:
1785 for (j = 0; j < i; j++) {
1786 kfree(dqm->mqd_mgrs[j]);
1787 dqm->mqd_mgrs[j] = NULL;
1788 }
1789
1790 return -ENOMEM;
1791}
11614c36
OZ
1792
1793/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
1794static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
1795{
1796 int retval;
1797 struct kfd_dev *dev = dqm->dev;
1798 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
1799 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
c7637c95 1800 get_num_all_sdma_engines(dqm) *
11614c36
OZ
1801 dev->device_info->num_sdma_queues_per_engine +
1802 dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
1803
1804 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
1805 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
f2cc50ce 1806 (void *)&(mem_obj->cpu_ptr), false);
11614c36
OZ
1807
1808 return retval;
1809}
1810
64c7f8cf
BG
1811struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1812{
1813 struct device_queue_manager *dqm;
1814
79775b62 1815 pr_debug("Loading device queue manager\n");
a22fc854 1816
dbf56ab1 1817 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
64c7f8cf
BG
1818 if (!dqm)
1819 return NULL;
1820
d146c5a7
FK
1821 switch (dev->device_info->asic_family) {
1822 /* HWS is not available on Hawaii. */
1823 case CHIP_HAWAII:
1824 /* HWS depends on CWSR for timely dequeue. CWSR is not
1825 * available on Tonga.
1826 *
1827 * FIXME: This argument also applies to Kaveri.
1828 */
1829 case CHIP_TONGA:
1830 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1831 break;
1832 default:
1833 dqm->sched_policy = sched_policy;
1834 break;
1835 }
1836
64c7f8cf 1837 dqm->dev = dev;
d146c5a7 1838 switch (dqm->sched_policy) {
64c7f8cf
BG
1839 case KFD_SCHED_POLICY_HWS:
1840 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1841 /* initialize dqm for cp scheduling */
45c9a5e4
OG
1842 dqm->ops.create_queue = create_queue_cpsch;
1843 dqm->ops.initialize = initialize_cpsch;
1844 dqm->ops.start = start_cpsch;
1845 dqm->ops.stop = stop_cpsch;
09c34e8d 1846 dqm->ops.pre_reset = pre_reset;
45c9a5e4
OG
1847 dqm->ops.destroy_queue = destroy_queue_cpsch;
1848 dqm->ops.update_queue = update_queue;
58dcd5bf
YZ
1849 dqm->ops.register_process = register_process;
1850 dqm->ops.unregister_process = unregister_process;
1851 dqm->ops.uninitialize = uninitialize;
45c9a5e4
OG
1852 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1853 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1854 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
d7b9bd22 1855 dqm->ops.set_trap_handler = set_trap_handler;
9fd3f1bf 1856 dqm->ops.process_termination = process_termination_cpsch;
26103436
FK
1857 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1858 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
5df099e8 1859 dqm->ops.get_wave_state = get_wave_state;
64c7f8cf
BG
1860 break;
1861 case KFD_SCHED_POLICY_NO_HWS:
1862 /* initialize dqm for no cp scheduling */
45c9a5e4
OG
1863 dqm->ops.start = start_nocpsch;
1864 dqm->ops.stop = stop_nocpsch;
09c34e8d 1865 dqm->ops.pre_reset = pre_reset;
45c9a5e4
OG
1866 dqm->ops.create_queue = create_queue_nocpsch;
1867 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1868 dqm->ops.update_queue = update_queue;
58dcd5bf
YZ
1869 dqm->ops.register_process = register_process;
1870 dqm->ops.unregister_process = unregister_process;
45c9a5e4 1871 dqm->ops.initialize = initialize_nocpsch;
58dcd5bf 1872 dqm->ops.uninitialize = uninitialize;
45c9a5e4 1873 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
d7b9bd22 1874 dqm->ops.set_trap_handler = set_trap_handler;
9fd3f1bf 1875 dqm->ops.process_termination = process_termination_nocpsch;
26103436
FK
1876 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1877 dqm->ops.restore_process_queues =
1878 restore_process_queues_nocpsch;
5df099e8 1879 dqm->ops.get_wave_state = get_wave_state;
64c7f8cf
BG
1880 break;
1881 default:
d146c5a7 1882 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
32fa8219 1883 goto out_free;
64c7f8cf
BG
1884 }
1885
a22fc854
BG
1886 switch (dev->device_info->asic_family) {
1887 case CHIP_CARRIZO:
bfd5e378 1888 device_queue_manager_init_vi(&dqm->asic_ops);
300dec95
OG
1889 break;
1890
a22fc854 1891 case CHIP_KAVERI:
bfd5e378 1892 device_queue_manager_init_cik(&dqm->asic_ops);
300dec95 1893 break;
97672cbe
FK
1894
1895 case CHIP_HAWAII:
1896 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1897 break;
1898
1899 case CHIP_TONGA:
1900 case CHIP_FIJI:
1901 case CHIP_POLARIS10:
1902 case CHIP_POLARIS11:
846a44d7 1903 case CHIP_POLARIS12:
ed81cd6e 1904 case CHIP_VEGAM:
97672cbe
FK
1905 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1906 break;
bed4f110
FK
1907
1908 case CHIP_VEGA10:
846a44d7 1909 case CHIP_VEGA12:
22a3a294 1910 case CHIP_VEGA20:
bed4f110 1911 case CHIP_RAVEN:
5a959a89 1912 case CHIP_RENOIR:
49adcf8a 1913 case CHIP_ARCTURUS:
bed4f110
FK
1914 device_queue_manager_init_v9(&dqm->asic_ops);
1915 break;
14328aa5 1916 case CHIP_NAVI10:
0e94b564 1917 case CHIP_NAVI12:
8099ae40 1918 case CHIP_NAVI14:
3a2f0c81 1919 case CHIP_SIENNA_CICHLID:
de89b2e4 1920 case CHIP_NAVY_FLOUNDER:
14328aa5
PC
1921 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
1922 break;
e596b903
YZ
1923 default:
1924 WARN(1, "Unexpected ASIC family %u",
1925 dev->device_info->asic_family);
1926 goto out_free;
a22fc854
BG
1927 }
1928
fdfa090b
OZ
1929 if (init_mqd_managers(dqm))
1930 goto out_free;
1931
11614c36
OZ
1932 if (allocate_hiq_sdma_mqd(dqm)) {
1933 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
1934 goto out_free;
1935 }
1936
32fa8219
FK
1937 if (!dqm->ops.initialize(dqm))
1938 return dqm;
64c7f8cf 1939
32fa8219
FK
1940out_free:
1941 kfree(dqm);
1942 return NULL;
64c7f8cf
BG
1943}
1944
7fd5a6fb
Y
1945static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
1946 struct kfd_mem_obj *mqd)
11614c36
OZ
1947{
1948 WARN(!mqd, "No hiq sdma mqd trunk to free");
1949
1950 amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
1951}
1952
64c7f8cf
BG
1953void device_queue_manager_uninit(struct device_queue_manager *dqm)
1954{
45c9a5e4 1955 dqm->ops.uninitialize(dqm);
11614c36 1956 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
64c7f8cf
BG
1957 kfree(dqm);
1958}
851a645e 1959
2640c3fa 1960int kfd_process_vm_fault(struct device_queue_manager *dqm,
1961 unsigned int pasid)
1962{
1963 struct kfd_process_device *pdd;
1964 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1965 int ret = 0;
1966
1967 if (!p)
1968 return -EINVAL;
8a491bb3 1969 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
2640c3fa 1970 pdd = kfd_get_process_device_data(dqm->dev, p);
1971 if (pdd)
1972 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
1973 kfd_unref_process(p);
1974
1975 return ret;
1976}
1977
73ea648d
SL
1978static void kfd_process_hw_exception(struct work_struct *work)
1979{
1980 struct device_queue_manager *dqm = container_of(work,
1981 struct device_queue_manager, hw_exception_work);
5b87245f 1982 amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
73ea648d
SL
1983}
1984
851a645e
FK
1985#if defined(CONFIG_DEBUG_FS)
1986
1987static void seq_reg_dump(struct seq_file *m,
1988 uint32_t (*dump)[2], uint32_t n_regs)
1989{
1990 uint32_t i, count;
1991
1992 for (i = 0, count = 0; i < n_regs; i++) {
1993 if (count == 0 ||
1994 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1995 seq_printf(m, "%s %08x: %08x",
1996 i ? "\n" : "",
1997 dump[i][0], dump[i][1]);
1998 count = 7;
1999 } else {
2000 seq_printf(m, " %08x", dump[i][1]);
2001 count--;
2002 }
2003 }
2004
2005 seq_puts(m, "\n");
2006}
2007
2008int dqm_debugfs_hqds(struct seq_file *m, void *data)
2009{
2010 struct device_queue_manager *dqm = data;
2011 uint32_t (*dump)[2], n_regs;
2012 int pipe, queue;
2013 int r = 0;
2014
2c99a547
PY
2015 if (!dqm->sched_running) {
2016 seq_printf(m, " Device is stopped\n");
2017
2018 return 0;
2019 }
2020
24f48a42 2021 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
14328aa5
PC
2022 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
2023 &dump, &n_regs);
24f48a42
OZ
2024 if (!r) {
2025 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
14328aa5
PC
2026 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
2027 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
2028 KFD_CIK_HIQ_QUEUE);
24f48a42
OZ
2029 seq_reg_dump(m, dump, n_regs);
2030
2031 kfree(dump);
2032 }
2033
851a645e
FK
2034 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
2035 int pipe_offset = pipe * get_queues_per_pipe(dqm);
2036
2037 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
2038 if (!test_bit(pipe_offset + queue,
e6945304 2039 dqm->dev->shared_resources.cp_queue_bitmap))
851a645e
FK
2040 continue;
2041
2042 r = dqm->dev->kfd2kgd->hqd_dump(
2043 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
2044 if (r)
2045 break;
2046
2047 seq_printf(m, " CP Pipe %d, Queue %d\n",
2048 pipe, queue);
2049 seq_reg_dump(m, dump, n_regs);
2050
2051 kfree(dump);
2052 }
2053 }
2054
c7637c95 2055 for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
d5094189
SL
2056 for (queue = 0;
2057 queue < dqm->dev->device_info->num_sdma_queues_per_engine;
2058 queue++) {
851a645e
FK
2059 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
2060 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
2061 if (r)
2062 break;
2063
2064 seq_printf(m, " SDMA Engine %d, RLC %d\n",
2065 pipe, queue);
2066 seq_reg_dump(m, dump, n_regs);
2067
2068 kfree(dump);
2069 }
2070 }
2071
2072 return r;
2073}
2074
a29ec470
SL
2075int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
2076{
2077 int r = 0;
2078
2079 dqm_lock(dqm);
2080 dqm->active_runlist = true;
2081 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
2082 dqm_unlock(dqm);
2083
2084 return r;
2085}
2086
851a645e 2087#endif