drm/amdkfd: Avoid ambiguity by indicating it's cp queue
[linux-block.git] / drivers / gpu / drm / amd / amdkfd / kfd_device_queue_manager.c
CommitLineData
64c7f8cf
BG
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
26103436
FK
24#include <linux/ratelimit.h>
25#include <linux/printk.h>
64c7f8cf
BG
26#include <linux/slab.h>
27#include <linux/list.h>
28#include <linux/types.h>
64c7f8cf 29#include <linux/bitops.h>
99331a51 30#include <linux/sched.h>
64c7f8cf
BG
31#include "kfd_priv.h"
32#include "kfd_device_queue_manager.h"
33#include "kfd_mqd_manager.h"
34#include "cik_regs.h"
35#include "kfd_kernel_queue.h"
5b87245f 36#include "amdgpu_amdkfd.h"
64c7f8cf
BG
37
38/* Size of the per-pipe EOP queue */
39#define CIK_HPD_EOP_BYTES_LOG2 11
40#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
41
64c7f8cf
BG
42static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
43 unsigned int pasid, unsigned int vmid);
44
c4744e24
YZ
45static int execute_queues_cpsch(struct device_queue_manager *dqm,
46 enum kfd_unmap_queues_filter filter,
47 uint32_t filter_param);
7da2bcf8 48static int unmap_queues_cpsch(struct device_queue_manager *dqm,
4465f466
YZ
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
64c7f8cf 51
60a00956
FK
52static int map_queues_cpsch(struct device_queue_manager *dqm);
53
bcea3081 54static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1b4670f6 55 struct queue *q);
64c7f8cf 56
d39b7737
OZ
57static inline void deallocate_hqd(struct device_queue_manager *dqm,
58 struct queue *q);
59static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
60static int allocate_sdma_queue(struct device_queue_manager *dqm,
61 struct queue *q);
73ea648d
SL
62static void kfd_process_hw_exception(struct work_struct *work);
63
bcea3081
BG
64static inline
65enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
64c7f8cf 66{
1b4670f6 67 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
85d258f9
BG
68 return KFD_MQD_TYPE_SDMA;
69 return KFD_MQD_TYPE_CP;
64c7f8cf
BG
70}
71
d0b63bb3
AR
72static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
73{
74 int i;
75 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
76 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
77
78 /* queue is available for KFD usage if bit is 1 */
79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
80 if (test_bit(pipe_offset + i,
e6945304 81 dqm->dev->shared_resources.cp_queue_bitmap))
d0b63bb3
AR
82 return true;
83 return false;
84}
85
e6945304 86unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
64ea8f4a 87{
e6945304 88 return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
d0b63bb3 89 KGD_MAX_QUEUES);
64ea8f4a
OG
90}
91
d0b63bb3 92unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
64c7f8cf 93{
d0b63bb3
AR
94 return dqm->dev->shared_resources.num_queue_per_pipe;
95}
96
97unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
98{
d0b63bb3 99 return dqm->dev->shared_resources.num_pipe_per_mec;
64c7f8cf
BG
100}
101
98bb9222
YZ
102static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
103{
104 return dqm->dev->device_info->num_sdma_engines;
105}
106
1b4670f6
OZ
107static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
108{
109 return dqm->dev->device_info->num_xgmi_sdma_engines;
110}
111
98bb9222
YZ
112unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
113{
114 return dqm->dev->device_info->num_sdma_engines
d5094189 115 * dqm->dev->device_info->num_sdma_queues_per_engine;
98bb9222
YZ
116}
117
1b4670f6
OZ
118unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
119{
120 return dqm->dev->device_info->num_xgmi_sdma_engines
121 * dqm->dev->device_info->num_sdma_queues_per_engine;
122}
123
a22fc854 124void program_sh_mem_settings(struct device_queue_manager *dqm,
64c7f8cf
BG
125 struct qcm_process_device *qpd)
126{
cea405b1
XZ
127 return dqm->dev->kfd2kgd->program_sh_mem_settings(
128 dqm->dev->kgd, qpd->vmid,
64c7f8cf
BG
129 qpd->sh_mem_config,
130 qpd->sh_mem_ape1_base,
131 qpd->sh_mem_ape1_limit,
132 qpd->sh_mem_bases);
133}
134
ef568db7
FK
135static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
136{
137 struct kfd_dev *dev = qpd->dqm->dev;
138
139 if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
140 /* On pre-SOC15 chips we need to use the queue ID to
141 * preserve the user mode ABI.
142 */
143 q->doorbell_id = q->properties.queue_id;
1b4670f6
OZ
144 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
145 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
234441dd
YZ
146 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
147 * doorbell assignments based on the engine and queue id.
148 * The doobell index distance between RLC (2*i) and (2*i+1)
149 * for a SDMA engine is 512.
ef568db7 150 */
234441dd
YZ
151 uint32_t *idx_offset =
152 dev->shared_resources.sdma_doorbell_idx;
153
154 q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
155 + (q->properties.sdma_queue_id & 1)
156 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
157 + (q->properties.sdma_queue_id >> 1);
ef568db7
FK
158 } else {
159 /* For CP queues on SOC15 reserve a free doorbell ID */
160 unsigned int found;
161
162 found = find_first_zero_bit(qpd->doorbell_bitmap,
163 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
164 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
165 pr_debug("No doorbells available");
166 return -EBUSY;
167 }
168 set_bit(found, qpd->doorbell_bitmap);
169 q->doorbell_id = found;
170 }
171
172 q->properties.doorbell_off =
339903fa 173 kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
ef568db7
FK
174 q->doorbell_id);
175
176 return 0;
177}
178
179static void deallocate_doorbell(struct qcm_process_device *qpd,
180 struct queue *q)
181{
182 unsigned int old;
183 struct kfd_dev *dev = qpd->dqm->dev;
184
185 if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
1b4670f6
OZ
186 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
187 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
ef568db7
FK
188 return;
189
190 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
191 WARN_ON(!old);
192}
193
64c7f8cf
BG
194static int allocate_vmid(struct device_queue_manager *dqm,
195 struct qcm_process_device *qpd,
196 struct queue *q)
197{
d9d4623c 198 int allocated_vmid = -1, i;
64c7f8cf 199
d9d4623c
YZ
200 for (i = dqm->dev->vm_info.first_vmid_kfd;
201 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
202 if (!dqm->vmid_pasid[i]) {
203 allocated_vmid = i;
204 break;
205 }
206 }
207
208 if (allocated_vmid < 0) {
209 pr_err("no more vmid to allocate\n");
210 return -ENOSPC;
211 }
212
213 pr_debug("vmid allocated: %d\n", allocated_vmid);
214
215 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
64c7f8cf 216
d9d4623c 217 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
64c7f8cf 218
64c7f8cf
BG
219 qpd->vmid = allocated_vmid;
220 q->properties.vmid = allocated_vmid;
221
64c7f8cf
BG
222 program_sh_mem_settings(dqm, qpd);
223
403575c4
FK
224 /* qpd->page_table_base is set earlier when register_process()
225 * is called, i.e. when the first queue is created.
226 */
227 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
228 qpd->vmid,
229 qpd->page_table_base);
230 /* invalidate the VM context after pasid and vmid mapping is set up */
231 kfd_flush_tlb(qpd_to_pdd(qpd));
232
c637b36a
YZ
233 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
234 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
235 qpd->sh_hidden_private_base, qpd->vmid);
d39b7737 236
64c7f8cf
BG
237 return 0;
238}
239
552764b6
FK
240static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
241 struct qcm_process_device *qpd)
242{
f6e27ff1
FK
243 const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
244 int ret;
552764b6
FK
245
246 if (!qpd->ib_kaddr)
247 return -ENOMEM;
248
f6e27ff1
FK
249 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
250 if (ret)
251 return ret;
552764b6 252
5b87245f 253 return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
f6e27ff1
FK
254 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
255 pmf->release_mem_size / sizeof(uint32_t));
552764b6
FK
256}
257
64c7f8cf
BG
258static void deallocate_vmid(struct device_queue_manager *dqm,
259 struct qcm_process_device *qpd,
260 struct queue *q)
261{
552764b6
FK
262 /* On GFX v7, CP doesn't flush TC at dequeue */
263 if (q->device->device_info->asic_family == CHIP_HAWAII)
264 if (flush_texture_cache_nocpsch(q->device, qpd))
265 pr_err("Failed to flush TC\n");
266
403575c4
FK
267 kfd_flush_tlb(qpd_to_pdd(qpd));
268
2030664b
BG
269 /* Release the vmid mapping */
270 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
d9d4623c 271 dqm->vmid_pasid[qpd->vmid] = 0;
2030664b 272
64c7f8cf
BG
273 qpd->vmid = 0;
274 q->properties.vmid = 0;
275}
276
277static int create_queue_nocpsch(struct device_queue_manager *dqm,
278 struct queue *q,
b46cb7d7 279 struct qcm_process_device *qpd)
64c7f8cf 280{
d39b7737 281 struct mqd_manager *mqd_mgr;
64c7f8cf
BG
282 int retval;
283
64c7f8cf
BG
284 print_queue(q);
285
efeaed4d 286 dqm_lock(dqm);
64c7f8cf 287
b8cbab04 288 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 289 pr_warn("Can't create new usermode queue because %d queues were already created\n",
b8cbab04 290 dqm->total_queue_count);
ab7c1648
KR
291 retval = -EPERM;
292 goto out_unlock;
b8cbab04
OG
293 }
294
64c7f8cf
BG
295 if (list_empty(&qpd->queues_list)) {
296 retval = allocate_vmid(dqm, qpd, q);
ab7c1648
KR
297 if (retval)
298 goto out_unlock;
64c7f8cf 299 }
64c7f8cf 300 q->properties.vmid = qpd->vmid;
26103436 301 /*
bb2d2128
FK
302 * Eviction state logic: mark all queues as evicted, even ones
303 * not currently active. Restoring inactive queues later only
304 * updates the is_evicted flag but is a no-op otherwise.
26103436 305 */
bb2d2128 306 q->properties.is_evicted = !!qpd->evicted;
64c7f8cf 307
373d7080
FK
308 q->properties.tba_addr = qpd->tba_addr;
309 q->properties.tma_addr = qpd->tma_addr;
310
d091bc0a
OZ
311 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
312 q->properties.type)];
d39b7737
OZ
313 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
314 retval = allocate_hqd(dqm, q);
315 if (retval)
316 goto deallocate_vmid;
317 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
318 q->pipe, q->queue);
319 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
320 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
321 retval = allocate_sdma_queue(dqm, q);
322 if (retval)
323 goto deallocate_vmid;
324 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
325 }
326
327 retval = allocate_doorbell(qpd, q);
328 if (retval)
329 goto out_deallocate_hqd;
330
6a6ef5ee
OZ
331 /* Temporarily release dqm lock to avoid a circular lock dependency */
332 dqm_unlock(dqm);
d091bc0a 333 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
6a6ef5ee
OZ
334 dqm_lock(dqm);
335
d091bc0a
OZ
336 if (!q->mqd_mem_obj) {
337 retval = -ENOMEM;
338 goto out_deallocate_doorbell;
339 }
8636e53c
OZ
340 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
341 &q->gart_mqd_addr, &q->properties);
d39b7737 342 if (q->properties.is_active) {
2c99a547
PY
343 if (!dqm->sched_running) {
344 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
345 goto add_queue_to_list;
346 }
d39b7737
OZ
347
348 if (WARN(q->process->mm != current->mm,
349 "should only run in user thread"))
350 retval = -EFAULT;
351 else
352 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
353 q->queue, &q->properties, current->mm);
354 if (retval)
d091bc0a 355 goto out_free_mqd;
64c7f8cf
BG
356 }
357
2c99a547 358add_queue_to_list:
64c7f8cf 359 list_add(&q->list, &qpd->queues_list);
bc920fd4 360 qpd->queue_count++;
b6819cec 361 if (q->properties.is_active)
81b820b3 362 dqm->active_queue_count++;
64c7f8cf 363
bcea3081
BG
364 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
365 dqm->sdma_queue_count++;
1b4670f6
OZ
366 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
367 dqm->xgmi_sdma_queue_count++;
64c7f8cf 368
b8cbab04
OG
369 /*
370 * Unconditionally increment this counter, regardless of the queue's
371 * type or whether the queue is active.
372 */
373 dqm->total_queue_count++;
374 pr_debug("Total of %d queues are accountable so far\n",
375 dqm->total_queue_count);
d091bc0a 376 goto out_unlock;
b8cbab04 377
d091bc0a
OZ
378out_free_mqd:
379 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
d39b7737
OZ
380out_deallocate_doorbell:
381 deallocate_doorbell(qpd, q);
382out_deallocate_hqd:
383 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
384 deallocate_hqd(dqm, q);
385 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
386 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
387 deallocate_sdma_queue(dqm, q);
388deallocate_vmid:
389 if (list_empty(&qpd->queues_list))
390 deallocate_vmid(dqm, qpd, q);
ab7c1648 391out_unlock:
efeaed4d 392 dqm_unlock(dqm);
ab7c1648 393 return retval;
64c7f8cf
BG
394}
395
396static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
397{
398 bool set;
f0ec5b99 399 int pipe, bit, i;
64c7f8cf
BG
400
401 set = false;
402
8eabaf54
KR
403 for (pipe = dqm->next_pipe_to_allocate, i = 0;
404 i < get_pipes_per_mec(dqm);
d0b63bb3
AR
405 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
406
407 if (!is_pipe_enabled(dqm, 0, pipe))
408 continue;
409
64c7f8cf 410 if (dqm->allocated_queues[pipe] != 0) {
4252bf68
HK
411 bit = ffs(dqm->allocated_queues[pipe]) - 1;
412 dqm->allocated_queues[pipe] &= ~(1 << bit);
64c7f8cf
BG
413 q->pipe = pipe;
414 q->queue = bit;
415 set = true;
416 break;
417 }
418 }
419
991ca8ee 420 if (!set)
64c7f8cf
BG
421 return -EBUSY;
422
79775b62 423 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
64c7f8cf 424 /* horizontal hqd allocation */
d0b63bb3 425 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
64c7f8cf
BG
426
427 return 0;
428}
429
430static inline void deallocate_hqd(struct device_queue_manager *dqm,
431 struct queue *q)
432{
4252bf68 433 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
64c7f8cf
BG
434}
435
9fd3f1bf
FK
436/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
437 * to avoid asynchronized access
438 */
439static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
64c7f8cf
BG
440 struct qcm_process_device *qpd,
441 struct queue *q)
442{
443 int retval;
8d5f3552 444 struct mqd_manager *mqd_mgr;
64c7f8cf 445
fdfa090b
OZ
446 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
447 q->properties.type)];
64c7f8cf 448
c2e1b3a4 449 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
c2e1b3a4
BG
450 deallocate_hqd(dqm, q);
451 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
c2e1b3a4 452 dqm->sdma_queue_count--;
1b4670f6
OZ
453 deallocate_sdma_queue(dqm, q);
454 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
455 dqm->xgmi_sdma_queue_count--;
456 deallocate_sdma_queue(dqm, q);
7113cd65 457 } else {
79775b62 458 pr_debug("q->properties.type %d is invalid\n",
7113cd65 459 q->properties.type);
9fd3f1bf 460 return -EINVAL;
64c7f8cf 461 }
9fd3f1bf 462 dqm->total_queue_count--;
64c7f8cf 463
ef568db7
FK
464 deallocate_doorbell(qpd, q);
465
2c99a547
PY
466 if (!dqm->sched_running) {
467 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
468 return 0;
469 }
470
8d5f3552 471 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
c2e1b3a4 472 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
b90e3fbe 473 KFD_UNMAP_LATENCY_MS,
64c7f8cf 474 q->pipe, q->queue);
9fd3f1bf
FK
475 if (retval == -ETIME)
476 qpd->reset_wavefronts = true;
64c7f8cf 477
8636e53c 478 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
64c7f8cf
BG
479
480 list_del(&q->list);
9fd3f1bf
FK
481 if (list_empty(&qpd->queues_list)) {
482 if (qpd->reset_wavefronts) {
483 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
484 dqm->dev);
485 /* dbgdev_wave_reset_wavefronts has to be called before
486 * deallocate_vmid(), i.e. when vmid is still in use.
487 */
488 dbgdev_wave_reset_wavefronts(dqm->dev,
489 qpd->pqm->process);
490 qpd->reset_wavefronts = false;
491 }
492
64c7f8cf 493 deallocate_vmid(dqm, qpd, q);
9fd3f1bf 494 }
bc920fd4 495 qpd->queue_count--;
b6819cec 496 if (q->properties.is_active)
81b820b3 497 dqm->active_queue_count--;
b8cbab04 498
9fd3f1bf
FK
499 return retval;
500}
b8cbab04 501
9fd3f1bf
FK
502static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
503 struct qcm_process_device *qpd,
504 struct queue *q)
505{
506 int retval;
507
efeaed4d 508 dqm_lock(dqm);
9fd3f1bf 509 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
efeaed4d 510 dqm_unlock(dqm);
9fd3f1bf 511
64c7f8cf
BG
512 return retval;
513}
514
515static int update_queue(struct device_queue_manager *dqm, struct queue *q)
516{
8636e53c 517 int retval = 0;
8d5f3552 518 struct mqd_manager *mqd_mgr;
26103436 519 struct kfd_process_device *pdd;
b6ffbab8 520 bool prev_active = false;
64c7f8cf 521
efeaed4d 522 dqm_lock(dqm);
26103436
FK
523 pdd = kfd_get_process_device_data(q->device, q->process);
524 if (!pdd) {
525 retval = -ENODEV;
526 goto out_unlock;
527 }
fdfa090b
OZ
528 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
529 q->properties.type)];
64c7f8cf 530
60a00956
FK
531 /* Save previous activity state for counters */
532 prev_active = q->properties.is_active;
533
534 /* Make sure the queue is unmapped before updating the MQD */
d146c5a7 535 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
60a00956
FK
536 retval = unmap_queues_cpsch(dqm,
537 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
894a8293 538 if (retval) {
60a00956
FK
539 pr_err("unmap queue failed\n");
540 goto out_unlock;
541 }
894a8293 542 } else if (prev_active &&
60a00956 543 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1b4670f6
OZ
544 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
545 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
2c99a547
PY
546
547 if (!dqm->sched_running) {
548 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
549 goto out_unlock;
550 }
551
8d5f3552 552 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
60a00956
FK
553 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
554 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
555 if (retval) {
556 pr_err("destroy mqd failed\n");
557 goto out_unlock;
558 }
559 }
560
8636e53c 561 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
60a00956 562
096d1a3e
FK
563 /*
564 * check active state vs. the previous state and modify
565 * counter accordingly. map_queues_cpsch uses the
81b820b3 566 * dqm->active_queue_count to determine whether a new runlist must be
096d1a3e
FK
567 * uploaded.
568 */
569 if (q->properties.is_active && !prev_active)
81b820b3 570 dqm->active_queue_count++;
096d1a3e 571 else if (!q->properties.is_active && prev_active)
81b820b3 572 dqm->active_queue_count--;
096d1a3e 573
d146c5a7 574 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
60a00956 575 retval = map_queues_cpsch(dqm);
894a8293 576 else if (q->properties.is_active &&
60a00956 577 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
1b4670f6
OZ
578 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
579 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
1b19aa5a
FK
580 if (WARN(q->process->mm != current->mm,
581 "should only run in user thread"))
582 retval = -EFAULT;
583 else
584 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
585 q->pipe, q->queue,
586 &q->properties, current->mm);
587 }
b6ffbab8 588
ab7c1648 589out_unlock:
efeaed4d 590 dqm_unlock(dqm);
64c7f8cf
BG
591 return retval;
592}
593
26103436
FK
594static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
595 struct qcm_process_device *qpd)
596{
597 struct queue *q;
8d5f3552 598 struct mqd_manager *mqd_mgr;
26103436 599 struct kfd_process_device *pdd;
bb2d2128 600 int retval, ret = 0;
26103436 601
efeaed4d 602 dqm_lock(dqm);
26103436
FK
603 if (qpd->evicted++ > 0) /* already evicted, do nothing */
604 goto out;
605
606 pdd = qpd_to_pdd(qpd);
6027b1bf 607 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
26103436
FK
608 pdd->process->pasid);
609
bb2d2128
FK
610 /* Mark all queues as evicted. Deactivate all active queues on
611 * the qpd.
612 */
26103436 613 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128 614 q->properties.is_evicted = true;
26103436
FK
615 if (!q->properties.is_active)
616 continue;
bb2d2128 617
fdfa090b
OZ
618 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
619 q->properties.type)];
26103436 620 q->properties.is_active = false;
81b820b3 621 dqm->active_queue_count--;
2c99a547
PY
622
623 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
624 continue;
625
8d5f3552 626 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
26103436
FK
627 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
628 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
bb2d2128
FK
629 if (retval && !ret)
630 /* Return the first error, but keep going to
631 * maintain a consistent eviction state
632 */
633 ret = retval;
26103436
FK
634 }
635
636out:
efeaed4d 637 dqm_unlock(dqm);
bb2d2128 638 return ret;
26103436
FK
639}
640
641static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
642 struct qcm_process_device *qpd)
643{
644 struct queue *q;
645 struct kfd_process_device *pdd;
646 int retval = 0;
647
efeaed4d 648 dqm_lock(dqm);
26103436
FK
649 if (qpd->evicted++ > 0) /* already evicted, do nothing */
650 goto out;
651
652 pdd = qpd_to_pdd(qpd);
6027b1bf 653 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
26103436
FK
654 pdd->process->pasid);
655
bb2d2128
FK
656 /* Mark all queues as evicted. Deactivate all active queues on
657 * the qpd.
658 */
26103436 659 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128 660 q->properties.is_evicted = true;
26103436
FK
661 if (!q->properties.is_active)
662 continue;
bb2d2128 663
26103436 664 q->properties.is_active = false;
81b820b3 665 dqm->active_queue_count--;
26103436
FK
666 }
667 retval = execute_queues_cpsch(dqm,
668 qpd->is_debug ?
669 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
670 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
671
672out:
efeaed4d 673 dqm_unlock(dqm);
26103436
FK
674 return retval;
675}
676
677static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
678 struct qcm_process_device *qpd)
679{
1b19aa5a 680 struct mm_struct *mm = NULL;
26103436 681 struct queue *q;
8d5f3552 682 struct mqd_manager *mqd_mgr;
26103436 683 struct kfd_process_device *pdd;
e715c6d0 684 uint64_t pd_base;
bb2d2128 685 int retval, ret = 0;
26103436
FK
686
687 pdd = qpd_to_pdd(qpd);
688 /* Retrieve PD base */
5b87245f 689 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
26103436 690
efeaed4d 691 dqm_lock(dqm);
26103436
FK
692 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
693 goto out;
694 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
695 qpd->evicted--;
696 goto out;
697 }
698
6027b1bf 699 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
26103436
FK
700 pdd->process->pasid);
701
702 /* Update PD Base in QPD */
703 qpd->page_table_base = pd_base;
e715c6d0 704 pr_debug("Updated PD address to 0x%llx\n", pd_base);
26103436
FK
705
706 if (!list_empty(&qpd->queues_list)) {
707 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
708 dqm->dev->kgd,
709 qpd->vmid,
710 qpd->page_table_base);
711 kfd_flush_tlb(pdd);
712 }
713
1b19aa5a
FK
714 /* Take a safe reference to the mm_struct, which may otherwise
715 * disappear even while the kfd_process is still referenced.
716 */
717 mm = get_task_mm(pdd->process->lead_thread);
718 if (!mm) {
bb2d2128 719 ret = -EFAULT;
1b19aa5a
FK
720 goto out;
721 }
722
bb2d2128
FK
723 /* Remove the eviction flags. Activate queues that are not
724 * inactive for other reasons.
725 */
26103436 726 list_for_each_entry(q, &qpd->queues_list, list) {
bb2d2128
FK
727 q->properties.is_evicted = false;
728 if (!QUEUE_IS_ACTIVE(q->properties))
26103436 729 continue;
bb2d2128 730
fdfa090b
OZ
731 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
732 q->properties.type)];
26103436 733 q->properties.is_active = true;
81b820b3 734 dqm->active_queue_count++;
2c99a547
PY
735
736 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
737 continue;
738
8d5f3552 739 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
1b19aa5a 740 q->queue, &q->properties, mm);
bb2d2128
FK
741 if (retval && !ret)
742 /* Return the first error, but keep going to
743 * maintain a consistent eviction state
744 */
745 ret = retval;
26103436
FK
746 }
747 qpd->evicted = 0;
748out:
1b19aa5a
FK
749 if (mm)
750 mmput(mm);
efeaed4d 751 dqm_unlock(dqm);
bb2d2128 752 return ret;
26103436
FK
753}
754
755static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
756 struct qcm_process_device *qpd)
757{
758 struct queue *q;
759 struct kfd_process_device *pdd;
e715c6d0 760 uint64_t pd_base;
26103436
FK
761 int retval = 0;
762
763 pdd = qpd_to_pdd(qpd);
764 /* Retrieve PD base */
5b87245f 765 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
26103436 766
efeaed4d 767 dqm_lock(dqm);
26103436
FK
768 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
769 goto out;
770 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
771 qpd->evicted--;
772 goto out;
773 }
774
6027b1bf 775 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
26103436
FK
776 pdd->process->pasid);
777
778 /* Update PD Base in QPD */
779 qpd->page_table_base = pd_base;
e715c6d0 780 pr_debug("Updated PD address to 0x%llx\n", pd_base);
26103436
FK
781
782 /* activate all active queues on the qpd */
783 list_for_each_entry(q, &qpd->queues_list, list) {
26103436 784 q->properties.is_evicted = false;
bb2d2128
FK
785 if (!QUEUE_IS_ACTIVE(q->properties))
786 continue;
787
26103436 788 q->properties.is_active = true;
81b820b3 789 dqm->active_queue_count++;
26103436
FK
790 }
791 retval = execute_queues_cpsch(dqm,
792 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
bb2d2128 793 qpd->evicted = 0;
26103436 794out:
efeaed4d 795 dqm_unlock(dqm);
26103436
FK
796 return retval;
797}
798
58dcd5bf 799static int register_process(struct device_queue_manager *dqm,
64c7f8cf
BG
800 struct qcm_process_device *qpd)
801{
802 struct device_process_node *n;
403575c4 803 struct kfd_process_device *pdd;
e715c6d0 804 uint64_t pd_base;
a22fc854 805 int retval;
64c7f8cf 806
dbf56ab1 807 n = kzalloc(sizeof(*n), GFP_KERNEL);
64c7f8cf
BG
808 if (!n)
809 return -ENOMEM;
810
811 n->qpd = qpd;
812
403575c4
FK
813 pdd = qpd_to_pdd(qpd);
814 /* Retrieve PD base */
5b87245f 815 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
403575c4 816
efeaed4d 817 dqm_lock(dqm);
64c7f8cf
BG
818 list_add(&n->list, &dqm->queues);
819
403575c4
FK
820 /* Update PD Base in QPD */
821 qpd->page_table_base = pd_base;
e715c6d0 822 pr_debug("Updated PD address to 0x%llx\n", pd_base);
403575c4 823
bfd5e378 824 retval = dqm->asic_ops.update_qpd(dqm, qpd);
a22fc854 825
f756e631 826 dqm->processes_count++;
64c7f8cf 827
efeaed4d 828 dqm_unlock(dqm);
64c7f8cf 829
32cce8bc
FK
830 /* Outside the DQM lock because under the DQM lock we can't do
831 * reclaim or take other locks that others hold while reclaiming.
832 */
833 kfd_inc_compute_active(dqm->dev);
834
a22fc854 835 return retval;
64c7f8cf
BG
836}
837
58dcd5bf 838static int unregister_process(struct device_queue_manager *dqm,
64c7f8cf
BG
839 struct qcm_process_device *qpd)
840{
841 int retval;
842 struct device_process_node *cur, *next;
843
1e5ec956
OG
844 pr_debug("qpd->queues_list is %s\n",
845 list_empty(&qpd->queues_list) ? "empty" : "not empty");
64c7f8cf
BG
846
847 retval = 0;
efeaed4d 848 dqm_lock(dqm);
64c7f8cf
BG
849
850 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
851 if (qpd == cur->qpd) {
852 list_del(&cur->list);
f5d896bb 853 kfree(cur);
f756e631 854 dqm->processes_count--;
64c7f8cf
BG
855 goto out;
856 }
857 }
858 /* qpd not found in dqm list */
859 retval = 1;
860out:
efeaed4d 861 dqm_unlock(dqm);
32cce8bc
FK
862
863 /* Outside the DQM lock because under the DQM lock we can't do
864 * reclaim or take other locks that others hold while reclaiming.
865 */
866 if (!retval)
867 kfd_dec_compute_active(dqm->dev);
868
64c7f8cf
BG
869 return retval;
870}
871
872static int
873set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
874 unsigned int vmid)
875{
cea405b1 876 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
deb99d7c 877 dqm->dev->kgd, pasid, vmid);
64c7f8cf
BG
878}
879
2249d558
AL
880static void init_interrupts(struct device_queue_manager *dqm)
881{
882 unsigned int i;
883
d0b63bb3
AR
884 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
885 if (is_pipe_enabled(dqm, 0, i))
886 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
2249d558
AL
887}
888
64c7f8cf
BG
889static int initialize_nocpsch(struct device_queue_manager *dqm)
890{
86194cf8 891 int pipe, queue;
64c7f8cf 892
79775b62 893 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
64c7f8cf 894
ab7c1648
KR
895 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
896 sizeof(unsigned int), GFP_KERNEL);
897 if (!dqm->allocated_queues)
898 return -ENOMEM;
899
efeaed4d 900 mutex_init(&dqm->lock_hidden);
64c7f8cf 901 INIT_LIST_HEAD(&dqm->queues);
81b820b3 902 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
bcea3081 903 dqm->sdma_queue_count = 0;
1b4670f6 904 dqm->xgmi_sdma_queue_count = 0;
64c7f8cf 905
86194cf8
FK
906 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
907 int pipe_offset = pipe * get_queues_per_pipe(dqm);
908
909 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
910 if (test_bit(pipe_offset + queue,
e6945304 911 dqm->dev->shared_resources.cp_queue_bitmap))
86194cf8
FK
912 dqm->allocated_queues[pipe] |= 1 << queue;
913 }
64c7f8cf 914
d9d4623c
YZ
915 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
916
35cdc81b
OZ
917 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
918 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
64c7f8cf 919
64c7f8cf
BG
920 return 0;
921}
922
58dcd5bf 923static void uninitialize(struct device_queue_manager *dqm)
64c7f8cf 924{
6f9d54fd
OG
925 int i;
926
81b820b3 927 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
64c7f8cf
BG
928
929 kfree(dqm->allocated_queues);
6f9d54fd 930 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
8d5f3552 931 kfree(dqm->mqd_mgrs[i]);
efeaed4d 932 mutex_destroy(&dqm->lock_hidden);
64c7f8cf
BG
933}
934
935static int start_nocpsch(struct device_queue_manager *dqm)
936{
52055039 937 pr_info("SW scheduler is used");
2249d558 938 init_interrupts(dqm);
424b5442
YZ
939
940 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
941 return pm_init(&dqm->packets, dqm);
2c99a547
PY
942 dqm->sched_running = true;
943
424b5442 944 return 0;
64c7f8cf
BG
945}
946
947static int stop_nocpsch(struct device_queue_manager *dqm)
948{
424b5442 949 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
c2a77fde 950 pm_uninit(&dqm->packets, false);
2c99a547
PY
951 dqm->sched_running = false;
952
64c7f8cf
BG
953 return 0;
954}
955
09c34e8d
FK
956static void pre_reset(struct device_queue_manager *dqm)
957{
958 dqm_lock(dqm);
959 dqm->is_resetting = true;
960 dqm_unlock(dqm);
961}
962
bcea3081 963static int allocate_sdma_queue(struct device_queue_manager *dqm,
e78579aa 964 struct queue *q)
bcea3081
BG
965{
966 int bit;
967
1b4670f6
OZ
968 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
969 if (dqm->sdma_bitmap == 0)
970 return -ENOMEM;
971 bit = __ffs64(dqm->sdma_bitmap);
972 dqm->sdma_bitmap &= ~(1ULL << bit);
973 q->sdma_id = bit;
974 q->properties.sdma_engine_id = q->sdma_id %
975 get_num_sdma_engines(dqm);
976 q->properties.sdma_queue_id = q->sdma_id /
977 get_num_sdma_engines(dqm);
978 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
979 if (dqm->xgmi_sdma_bitmap == 0)
980 return -ENOMEM;
981 bit = __ffs64(dqm->xgmi_sdma_bitmap);
982 dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
983 q->sdma_id = bit;
984 /* sdma_engine_id is sdma id including
985 * both PCIe-optimized SDMAs and XGMI-
986 * optimized SDMAs. The calculation below
987 * assumes the first N engines are always
988 * PCIe-optimized ones
989 */
990 q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
991 q->sdma_id % get_num_xgmi_sdma_engines(dqm);
992 q->properties.sdma_queue_id = q->sdma_id /
993 get_num_xgmi_sdma_engines(dqm);
994 }
e78579aa 995
e78579aa
YZ
996 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
997 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
bcea3081
BG
998
999 return 0;
1000}
1001
1002static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1b4670f6 1003 struct queue *q)
bcea3081 1004{
1b4670f6
OZ
1005 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1006 if (q->sdma_id >= get_num_sdma_queues(dqm))
1007 return;
1008 dqm->sdma_bitmap |= (1ULL << q->sdma_id);
1009 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1010 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1011 return;
1012 dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
1013 }
bcea3081
BG
1014}
1015
64c7f8cf
BG
1016/*
1017 * Device Queue Manager implementation for cp scheduler
1018 */
1019
1020static int set_sched_resources(struct device_queue_manager *dqm)
1021{
d0b63bb3 1022 int i, mec;
64c7f8cf 1023 struct scheduling_resources res;
64c7f8cf 1024
44008d7a 1025 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
d0b63bb3
AR
1026
1027 res.queue_mask = 0;
1028 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
1029 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
1030 / dqm->dev->shared_resources.num_pipe_per_mec;
1031
e6945304 1032 if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
d0b63bb3
AR
1033 continue;
1034
1035 /* only acquire queues from the first MEC */
1036 if (mec > 0)
1037 continue;
1038
1039 /* This situation may be hit in the future if a new HW
1040 * generation exposes more than 64 queues. If so, the
8eabaf54
KR
1041 * definition of res.queue_mask needs updating
1042 */
1d11ee89 1043 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
d0b63bb3
AR
1044 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1045 break;
1046 }
1047
1048 res.queue_mask |= (1ull << i);
1049 }
d9848e14
OZ
1050 res.gws_mask = ~0ull;
1051 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
64c7f8cf 1052
79775b62
KR
1053 pr_debug("Scheduling resources:\n"
1054 "vmid mask: 0x%8X\n"
1055 "queue mask: 0x%8llX\n",
64c7f8cf
BG
1056 res.vmid_mask, res.queue_mask);
1057
1058 return pm_send_set_resources(&dqm->packets, &res);
1059}
1060
1061static int initialize_cpsch(struct device_queue_manager *dqm)
1062{
79775b62 1063 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
64c7f8cf 1064
efeaed4d 1065 mutex_init(&dqm->lock_hidden);
64c7f8cf 1066 INIT_LIST_HEAD(&dqm->queues);
81b820b3 1067 dqm->active_queue_count = dqm->processes_count = 0;
bcea3081 1068 dqm->sdma_queue_count = 0;
1b4670f6 1069 dqm->xgmi_sdma_queue_count = 0;
64c7f8cf 1070 dqm->active_runlist = false;
35cdc81b
OZ
1071 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
1072 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
64c7f8cf 1073
73ea648d
SL
1074 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1075
bfd5e378 1076 return 0;
64c7f8cf
BG
1077}
1078
1079static int start_cpsch(struct device_queue_manager *dqm)
1080{
64c7f8cf
BG
1081 int retval;
1082
64c7f8cf
BG
1083 retval = 0;
1084
1085 retval = pm_init(&dqm->packets, dqm);
4eacc26b 1086 if (retval)
64c7f8cf
BG
1087 goto fail_packet_manager_init;
1088
1089 retval = set_sched_resources(dqm);
4eacc26b 1090 if (retval)
64c7f8cf
BG
1091 goto fail_set_sched_resources;
1092
79775b62 1093 pr_debug("Allocating fence memory\n");
64c7f8cf
BG
1094
1095 /* allocate fence memory on the gart */
a86aa3ca
OG
1096 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1097 &dqm->fence_mem);
64c7f8cf 1098
4eacc26b 1099 if (retval)
64c7f8cf
BG
1100 goto fail_allocate_vidmem;
1101
1102 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
1103 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
2249d558
AL
1104
1105 init_interrupts(dqm);
1106
efeaed4d 1107 dqm_lock(dqm);
73ea648d
SL
1108 /* clear hang status when driver try to start the hw scheduler */
1109 dqm->is_hws_hang = false;
09c34e8d 1110 dqm->is_resetting = false;
2c99a547 1111 dqm->sched_running = true;
c4744e24 1112 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
efeaed4d 1113 dqm_unlock(dqm);
64c7f8cf
BG
1114
1115 return 0;
1116fail_allocate_vidmem:
1117fail_set_sched_resources:
c2a77fde 1118 pm_uninit(&dqm->packets, false);
64c7f8cf
BG
1119fail_packet_manager_init:
1120 return retval;
1121}
1122
1123static int stop_cpsch(struct device_queue_manager *dqm)
1124{
c2a77fde
FK
1125 bool hanging;
1126
efeaed4d 1127 dqm_lock(dqm);
c2a77fde
FK
1128 if (!dqm->is_hws_hang)
1129 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1130 hanging = dqm->is_hws_hang || dqm->is_resetting;
2c99a547 1131 dqm->sched_running = false;
efeaed4d 1132 dqm_unlock(dqm);
64c7f8cf 1133
a86aa3ca 1134 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
c2a77fde 1135 pm_uninit(&dqm->packets, hanging);
64c7f8cf
BG
1136
1137 return 0;
1138}
1139
1140static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1141 struct kernel_queue *kq,
1142 struct qcm_process_device *qpd)
1143{
efeaed4d 1144 dqm_lock(dqm);
b8cbab04 1145 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 1146 pr_warn("Can't create new kernel queue because %d queues were already created\n",
b8cbab04 1147 dqm->total_queue_count);
efeaed4d 1148 dqm_unlock(dqm);
b8cbab04
OG
1149 return -EPERM;
1150 }
1151
1152 /*
1153 * Unconditionally increment this counter, regardless of the queue's
1154 * type or whether the queue is active.
1155 */
1156 dqm->total_queue_count++;
1157 pr_debug("Total of %d queues are accountable so far\n",
1158 dqm->total_queue_count);
1159
64c7f8cf 1160 list_add(&kq->list, &qpd->priv_queue_list);
81b820b3 1161 dqm->active_queue_count++;
64c7f8cf 1162 qpd->is_debug = true;
c4744e24 1163 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
efeaed4d 1164 dqm_unlock(dqm);
64c7f8cf
BG
1165
1166 return 0;
1167}
1168
1169static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1170 struct kernel_queue *kq,
1171 struct qcm_process_device *qpd)
1172{
efeaed4d 1173 dqm_lock(dqm);
64c7f8cf 1174 list_del(&kq->list);
81b820b3 1175 dqm->active_queue_count--;
64c7f8cf 1176 qpd->is_debug = false;
c4744e24 1177 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
b8cbab04
OG
1178 /*
1179 * Unconditionally decrement this counter, regardless of the queue's
1180 * type.
1181 */
8b58f261 1182 dqm->total_queue_count--;
b8cbab04
OG
1183 pr_debug("Total of %d queues are accountable so far\n",
1184 dqm->total_queue_count);
efeaed4d 1185 dqm_unlock(dqm);
64c7f8cf
BG
1186}
1187
1188static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
b46cb7d7 1189 struct qcm_process_device *qpd)
64c7f8cf
BG
1190{
1191 int retval;
8d5f3552 1192 struct mqd_manager *mqd_mgr;
64c7f8cf 1193
b8cbab04 1194 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 1195 pr_warn("Can't create new usermode queue because %d queues were already created\n",
b8cbab04 1196 dqm->total_queue_count);
70d488fb
OZ
1197 retval = -EPERM;
1198 goto out;
b8cbab04
OG
1199 }
1200
1b4670f6
OZ
1201 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1202 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
38bb4226 1203 dqm_lock(dqm);
e78579aa 1204 retval = allocate_sdma_queue(dqm, q);
38bb4226 1205 dqm_unlock(dqm);
894a8293 1206 if (retval)
70d488fb 1207 goto out;
e139cd2a 1208 }
ef568db7
FK
1209
1210 retval = allocate_doorbell(qpd, q);
1211 if (retval)
1212 goto out_deallocate_sdma_queue;
1213
70d488fb
OZ
1214 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1215 q->properties.type)];
70df8273 1216
eec0b4cf
OZ
1217 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1218 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1219 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
373d7080
FK
1220 q->properties.tba_addr = qpd->tba_addr;
1221 q->properties.tma_addr = qpd->tma_addr;
70d488fb
OZ
1222 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1223 if (!q->mqd_mem_obj) {
1224 retval = -ENOMEM;
1225 goto out_deallocate_doorbell;
1226 }
70df8273
EH
1227
1228 dqm_lock(dqm);
1229 /*
1230 * Eviction state logic: mark all queues as evicted, even ones
1231 * not currently active. Restoring inactive queues later only
1232 * updates the is_evicted flag but is a no-op otherwise.
1233 */
1234 q->properties.is_evicted = !!qpd->evicted;
8636e53c
OZ
1235 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1236 &q->gart_mqd_addr, &q->properties);
89cd9d23 1237
64c7f8cf 1238 list_add(&q->list, &qpd->queues_list);
bc920fd4 1239 qpd->queue_count++;
f38abc15
YZ
1240
1241 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1242 dqm->sdma_queue_count++;
1243 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1244 dqm->xgmi_sdma_queue_count++;
1245
64c7f8cf 1246 if (q->properties.is_active) {
81b820b3 1247 dqm->active_queue_count++;
c4744e24
YZ
1248 retval = execute_queues_cpsch(dqm,
1249 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
64c7f8cf
BG
1250 }
1251
b8cbab04
OG
1252 /*
1253 * Unconditionally increment this counter, regardless of the queue's
1254 * type or whether the queue is active.
1255 */
1256 dqm->total_queue_count++;
1257
1258 pr_debug("Total of %d queues are accountable so far\n",
1259 dqm->total_queue_count);
1260
efeaed4d 1261 dqm_unlock(dqm);
72a01d23
FK
1262 return retval;
1263
70d488fb
OZ
1264out_deallocate_doorbell:
1265 deallocate_doorbell(qpd, q);
72a01d23 1266out_deallocate_sdma_queue:
1b4670f6 1267 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
38bb4226
OZ
1268 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1269 dqm_lock(dqm);
1b4670f6 1270 deallocate_sdma_queue(dqm, q);
38bb4226
OZ
1271 dqm_unlock(dqm);
1272 }
70d488fb 1273out:
64c7f8cf
BG
1274 return retval;
1275}
1276
788bf83d 1277int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
d80d19bd 1278 unsigned int fence_value,
8c72c3d7 1279 unsigned int timeout_ms)
64c7f8cf 1280{
8c72c3d7 1281 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
64c7f8cf
BG
1282
1283 while (*fence_addr != fence_value) {
8c72c3d7 1284 if (time_after(jiffies, end_jiffies)) {
79775b62 1285 pr_err("qcm fence wait loop timeout expired\n");
0e9a860c
YZ
1286 /* In HWS case, this is used to halt the driver thread
1287 * in order not to mess up CP states before doing
1288 * scandumps for FW debugging.
1289 */
1290 while (halt_if_hws_hang)
1291 schedule();
1292
64c7f8cf
BG
1293 return -ETIME;
1294 }
99331a51 1295 schedule();
64c7f8cf
BG
1296 }
1297
1298 return 0;
1299}
1300
065e4bdf 1301static int unmap_sdma_queues(struct device_queue_manager *dqm)
bcea3081 1302{
065e4bdf
OZ
1303 int i, retval = 0;
1304
1b4670f6
OZ
1305 for (i = 0; i < dqm->dev->device_info->num_sdma_engines +
1306 dqm->dev->device_info->num_xgmi_sdma_engines; i++) {
065e4bdf
OZ
1307 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
1308 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
1309 if (retval)
1310 return retval;
1311 }
1312 return retval;
bcea3081
BG
1313}
1314
60a00956
FK
1315/* dqm->lock mutex has to be locked before calling this function */
1316static int map_queues_cpsch(struct device_queue_manager *dqm)
1317{
1318 int retval;
1319
2c99a547
PY
1320 if (!dqm->sched_running)
1321 return 0;
81b820b3 1322 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
60a00956 1323 return 0;
60a00956
FK
1324 if (dqm->active_runlist)
1325 return 0;
1326
1327 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
14328aa5 1328 pr_debug("%s sent runlist\n", __func__);
60a00956
FK
1329 if (retval) {
1330 pr_err("failed to execute runlist\n");
1331 return retval;
1332 }
1333 dqm->active_runlist = true;
1334
1335 return retval;
1336}
1337
ac30c783 1338/* dqm->lock mutex has to be locked before calling this function */
7da2bcf8 1339static int unmap_queues_cpsch(struct device_queue_manager *dqm,
4465f466
YZ
1340 enum kfd_unmap_queues_filter filter,
1341 uint32_t filter_param)
64c7f8cf 1342{
9fd3f1bf 1343 int retval = 0;
64c7f8cf 1344
2c99a547
PY
1345 if (!dqm->sched_running)
1346 return 0;
73ea648d
SL
1347 if (dqm->is_hws_hang)
1348 return -EIO;
991ca8ee 1349 if (!dqm->active_runlist)
ac30c783 1350 return retval;
bcea3081 1351
1b4670f6
OZ
1352 pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
1353 dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
bcea3081 1354
1b4670f6 1355 if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
065e4bdf 1356 unmap_sdma_queues(dqm);
bcea3081 1357
64c7f8cf 1358 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
4465f466 1359 filter, filter_param, false, 0);
4eacc26b 1360 if (retval)
ac30c783 1361 return retval;
64c7f8cf
BG
1362
1363 *dqm->fence_addr = KFD_FENCE_INIT;
1364 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1365 KFD_FENCE_COMPLETED);
1366 /* should be timed out */
c3447e81 1367 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
14328aa5 1368 queue_preemption_timeout_ms);
09c34e8d
FK
1369 if (retval) {
1370 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1371 dqm->is_hws_hang = true;
1372 /* It's possible we're detecting a HWS hang in the
1373 * middle of a GPU reset. No need to schedule another
1374 * reset in this case.
1375 */
1376 if (!dqm->is_resetting)
1377 schedule_work(&dqm->hw_exception_work);
ac30c783 1378 return retval;
09c34e8d 1379 }
9fd3f1bf 1380
64c7f8cf
BG
1381 pm_release_ib(&dqm->packets);
1382 dqm->active_runlist = false;
1383
64c7f8cf
BG
1384 return retval;
1385}
1386
ac30c783 1387/* dqm->lock mutex has to be locked before calling this function */
c4744e24
YZ
1388static int execute_queues_cpsch(struct device_queue_manager *dqm,
1389 enum kfd_unmap_queues_filter filter,
1390 uint32_t filter_param)
64c7f8cf
BG
1391{
1392 int retval;
1393
73ea648d
SL
1394 if (dqm->is_hws_hang)
1395 return -EIO;
c4744e24 1396 retval = unmap_queues_cpsch(dqm, filter, filter_param);
09c34e8d 1397 if (retval)
ac30c783 1398 return retval;
64c7f8cf 1399
60a00956 1400 return map_queues_cpsch(dqm);
64c7f8cf
BG
1401}
1402
1403static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1404 struct qcm_process_device *qpd,
1405 struct queue *q)
1406{
1407 int retval;
8d5f3552 1408 struct mqd_manager *mqd_mgr;
992839ad 1409
64c7f8cf
BG
1410 retval = 0;
1411
1412 /* remove queue from list to prevent rescheduling after preemption */
efeaed4d 1413 dqm_lock(dqm);
992839ad
YS
1414
1415 if (qpd->is_debug) {
1416 /*
1417 * error, currently we do not allow to destroy a queue
1418 * of a currently debugged process
1419 */
1420 retval = -EBUSY;
1421 goto failed_try_destroy_debugged_queue;
1422
1423 }
1424
fdfa090b
OZ
1425 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1426 q->properties.type)];
64c7f8cf 1427
ef568db7
FK
1428 deallocate_doorbell(qpd, q);
1429
e139cd2a 1430 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
bcea3081 1431 dqm->sdma_queue_count--;
1b4670f6
OZ
1432 deallocate_sdma_queue(dqm, q);
1433 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1434 dqm->xgmi_sdma_queue_count--;
1435 deallocate_sdma_queue(dqm, q);
e139cd2a 1436 }
bcea3081 1437
64c7f8cf 1438 list_del(&q->list);
bc920fd4 1439 qpd->queue_count--;
40a526dc 1440 if (q->properties.is_active) {
81b820b3 1441 dqm->active_queue_count--;
40a526dc 1442 retval = execute_queues_cpsch(dqm,
9fd3f1bf 1443 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
40a526dc
YZ
1444 if (retval == -ETIME)
1445 qpd->reset_wavefronts = true;
1446 }
64c7f8cf 1447
b8cbab04
OG
1448 /*
1449 * Unconditionally decrement this counter, regardless of the queue's
1450 * type
1451 */
1452 dqm->total_queue_count--;
1453 pr_debug("Total of %d queues are accountable so far\n",
1454 dqm->total_queue_count);
64c7f8cf 1455
efeaed4d 1456 dqm_unlock(dqm);
64c7f8cf 1457
8636e53c
OZ
1458 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1459 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
89cd9d23 1460
9e827224 1461 return retval;
64c7f8cf 1462
992839ad
YS
1463failed_try_destroy_debugged_queue:
1464
efeaed4d 1465 dqm_unlock(dqm);
64c7f8cf
BG
1466 return retval;
1467}
1468
1469/*
1470 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1471 * stay in user mode.
1472 */
1473#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1474/* APE1 limit is inclusive and 64K aligned. */
1475#define APE1_LIMIT_ALIGNMENT 0xFFFF
1476
1477static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1478 struct qcm_process_device *qpd,
1479 enum cache_policy default_policy,
1480 enum cache_policy alternate_policy,
1481 void __user *alternate_aperture_base,
1482 uint64_t alternate_aperture_size)
1483{
bed4f110
FK
1484 bool retval = true;
1485
1486 if (!dqm->asic_ops.set_cache_memory_policy)
1487 return retval;
64c7f8cf 1488
efeaed4d 1489 dqm_lock(dqm);
64c7f8cf
BG
1490
1491 if (alternate_aperture_size == 0) {
1492 /* base > limit disables APE1 */
1493 qpd->sh_mem_ape1_base = 1;
1494 qpd->sh_mem_ape1_limit = 0;
1495 } else {
1496 /*
1497 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1498 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1499 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1500 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1501 * Verify that the base and size parameters can be
1502 * represented in this format and convert them.
1503 * Additionally restrict APE1 to user-mode addresses.
1504 */
1505
1506 uint64_t base = (uintptr_t)alternate_aperture_base;
1507 uint64_t limit = base + alternate_aperture_size - 1;
1508
ab7c1648
KR
1509 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1510 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1511 retval = false;
64c7f8cf 1512 goto out;
ab7c1648 1513 }
64c7f8cf
BG
1514
1515 qpd->sh_mem_ape1_base = base >> 16;
1516 qpd->sh_mem_ape1_limit = limit >> 16;
1517 }
1518
bfd5e378 1519 retval = dqm->asic_ops.set_cache_memory_policy(
a22fc854
BG
1520 dqm,
1521 qpd,
1522 default_policy,
1523 alternate_policy,
1524 alternate_aperture_base,
1525 alternate_aperture_size);
64c7f8cf 1526
d146c5a7 1527 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
64c7f8cf
BG
1528 program_sh_mem_settings(dqm, qpd);
1529
79775b62 1530 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
64c7f8cf
BG
1531 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1532 qpd->sh_mem_ape1_limit);
1533
64c7f8cf 1534out:
efeaed4d 1535 dqm_unlock(dqm);
ab7c1648 1536 return retval;
64c7f8cf
BG
1537}
1538
d7b9bd22
FK
1539static int set_trap_handler(struct device_queue_manager *dqm,
1540 struct qcm_process_device *qpd,
1541 uint64_t tba_addr,
1542 uint64_t tma_addr)
1543{
1544 uint64_t *tma;
1545
1546 if (dqm->dev->cwsr_enabled) {
1547 /* Jump from CWSR trap handler to user trap */
1548 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1549 tma[0] = tba_addr;
1550 tma[1] = tma_addr;
1551 } else {
1552 qpd->tba_addr = tba_addr;
1553 qpd->tma_addr = tma_addr;
1554 }
1555
1556 return 0;
1557}
1558
9fd3f1bf
FK
1559static int process_termination_nocpsch(struct device_queue_manager *dqm,
1560 struct qcm_process_device *qpd)
1561{
1562 struct queue *q, *next;
1563 struct device_process_node *cur, *next_dpn;
1564 int retval = 0;
32cce8bc 1565 bool found = false;
9fd3f1bf 1566
efeaed4d 1567 dqm_lock(dqm);
9fd3f1bf
FK
1568
1569 /* Clear all user mode queues */
1570 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1571 int ret;
1572
1573 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1574 if (ret)
1575 retval = ret;
1576 }
1577
1578 /* Unregister process */
1579 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1580 if (qpd == cur->qpd) {
1581 list_del(&cur->list);
1582 kfree(cur);
1583 dqm->processes_count--;
32cce8bc 1584 found = true;
9fd3f1bf
FK
1585 break;
1586 }
1587 }
1588
efeaed4d 1589 dqm_unlock(dqm);
32cce8bc
FK
1590
1591 /* Outside the DQM lock because under the DQM lock we can't do
1592 * reclaim or take other locks that others hold while reclaiming.
1593 */
1594 if (found)
1595 kfd_dec_compute_active(dqm->dev);
1596
9fd3f1bf
FK
1597 return retval;
1598}
1599
5df099e8
JC
1600static int get_wave_state(struct device_queue_manager *dqm,
1601 struct queue *q,
1602 void __user *ctl_stack,
1603 u32 *ctl_stack_used_size,
1604 u32 *save_area_used_size)
1605{
4e6c6fc1 1606 struct mqd_manager *mqd_mgr;
5df099e8
JC
1607 int r;
1608
1609 dqm_lock(dqm);
1610
1611 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
1612 q->properties.is_active || !q->device->cwsr_enabled) {
1613 r = -EINVAL;
1614 goto dqm_unlock;
1615 }
1616
d7c0b047 1617 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
5df099e8 1618
4e6c6fc1 1619 if (!mqd_mgr->get_wave_state) {
5df099e8
JC
1620 r = -EINVAL;
1621 goto dqm_unlock;
1622 }
1623
4e6c6fc1
YZ
1624 r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
1625 ctl_stack_used_size, save_area_used_size);
5df099e8
JC
1626
1627dqm_unlock:
1628 dqm_unlock(dqm);
1629 return r;
1630}
9fd3f1bf
FK
1631
1632static int process_termination_cpsch(struct device_queue_manager *dqm,
1633 struct qcm_process_device *qpd)
1634{
1635 int retval;
1636 struct queue *q, *next;
1637 struct kernel_queue *kq, *kq_next;
8d5f3552 1638 struct mqd_manager *mqd_mgr;
9fd3f1bf
FK
1639 struct device_process_node *cur, *next_dpn;
1640 enum kfd_unmap_queues_filter filter =
1641 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
32cce8bc 1642 bool found = false;
9fd3f1bf
FK
1643
1644 retval = 0;
1645
efeaed4d 1646 dqm_lock(dqm);
9fd3f1bf
FK
1647
1648 /* Clean all kernel queues */
1649 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1650 list_del(&kq->list);
81b820b3 1651 dqm->active_queue_count--;
9fd3f1bf
FK
1652 qpd->is_debug = false;
1653 dqm->total_queue_count--;
1654 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1655 }
1656
1657 /* Clear all user mode queues */
1658 list_for_each_entry(q, &qpd->queues_list, list) {
72a01d23 1659 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
9fd3f1bf 1660 dqm->sdma_queue_count--;
1b4670f6
OZ
1661 deallocate_sdma_queue(dqm, q);
1662 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1663 dqm->xgmi_sdma_queue_count--;
1664 deallocate_sdma_queue(dqm, q);
72a01d23 1665 }
9fd3f1bf
FK
1666
1667 if (q->properties.is_active)
81b820b3 1668 dqm->active_queue_count--;
9fd3f1bf
FK
1669
1670 dqm->total_queue_count--;
1671 }
1672
1673 /* Unregister process */
1674 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1675 if (qpd == cur->qpd) {
1676 list_del(&cur->list);
1677 kfree(cur);
1678 dqm->processes_count--;
32cce8bc 1679 found = true;
9fd3f1bf
FK
1680 break;
1681 }
1682 }
1683
1684 retval = execute_queues_cpsch(dqm, filter, 0);
73ea648d 1685 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
9fd3f1bf
FK
1686 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1687 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1688 qpd->reset_wavefronts = false;
1689 }
1690
89cd9d23
PY
1691 dqm_unlock(dqm);
1692
32cce8bc
FK
1693 /* Outside the DQM lock because under the DQM lock we can't do
1694 * reclaim or take other locks that others hold while reclaiming.
1695 */
1696 if (found)
1697 kfd_dec_compute_active(dqm->dev);
1698
89cd9d23 1699 /* Lastly, free mqd resources.
8636e53c 1700 * Do free_mqd() after dqm_unlock to avoid circular locking.
89cd9d23 1701 */
9fd3f1bf 1702 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
fdfa090b
OZ
1703 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1704 q->properties.type)];
9fd3f1bf 1705 list_del(&q->list);
bc920fd4 1706 qpd->queue_count--;
8636e53c 1707 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
9fd3f1bf
FK
1708 }
1709
9fd3f1bf
FK
1710 return retval;
1711}
1712
fdfa090b
OZ
1713static int init_mqd_managers(struct device_queue_manager *dqm)
1714{
1715 int i, j;
1716 struct mqd_manager *mqd_mgr;
1717
1718 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
1719 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
1720 if (!mqd_mgr) {
1721 pr_err("mqd manager [%d] initialization failed\n", i);
1722 goto out_free;
1723 }
1724 dqm->mqd_mgrs[i] = mqd_mgr;
1725 }
1726
1727 return 0;
1728
1729out_free:
1730 for (j = 0; j < i; j++) {
1731 kfree(dqm->mqd_mgrs[j]);
1732 dqm->mqd_mgrs[j] = NULL;
1733 }
1734
1735 return -ENOMEM;
1736}
11614c36
OZ
1737
1738/* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
1739static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
1740{
1741 int retval;
1742 struct kfd_dev *dev = dqm->dev;
1743 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
1744 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
40a9592a
OZ
1745 (dev->device_info->num_sdma_engines +
1746 dev->device_info->num_xgmi_sdma_engines) *
11614c36
OZ
1747 dev->device_info->num_sdma_queues_per_engine +
1748 dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
1749
1750 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
1751 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
1752 (void *)&(mem_obj->cpu_ptr), true);
1753
1754 return retval;
1755}
1756
64c7f8cf
BG
1757struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1758{
1759 struct device_queue_manager *dqm;
1760
79775b62 1761 pr_debug("Loading device queue manager\n");
a22fc854 1762
dbf56ab1 1763 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
64c7f8cf
BG
1764 if (!dqm)
1765 return NULL;
1766
d146c5a7
FK
1767 switch (dev->device_info->asic_family) {
1768 /* HWS is not available on Hawaii. */
1769 case CHIP_HAWAII:
1770 /* HWS depends on CWSR for timely dequeue. CWSR is not
1771 * available on Tonga.
1772 *
1773 * FIXME: This argument also applies to Kaveri.
1774 */
1775 case CHIP_TONGA:
1776 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1777 break;
1778 default:
1779 dqm->sched_policy = sched_policy;
1780 break;
1781 }
1782
64c7f8cf 1783 dqm->dev = dev;
d146c5a7 1784 switch (dqm->sched_policy) {
64c7f8cf
BG
1785 case KFD_SCHED_POLICY_HWS:
1786 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1787 /* initialize dqm for cp scheduling */
45c9a5e4
OG
1788 dqm->ops.create_queue = create_queue_cpsch;
1789 dqm->ops.initialize = initialize_cpsch;
1790 dqm->ops.start = start_cpsch;
1791 dqm->ops.stop = stop_cpsch;
09c34e8d 1792 dqm->ops.pre_reset = pre_reset;
45c9a5e4
OG
1793 dqm->ops.destroy_queue = destroy_queue_cpsch;
1794 dqm->ops.update_queue = update_queue;
58dcd5bf
YZ
1795 dqm->ops.register_process = register_process;
1796 dqm->ops.unregister_process = unregister_process;
1797 dqm->ops.uninitialize = uninitialize;
45c9a5e4
OG
1798 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1799 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1800 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
d7b9bd22 1801 dqm->ops.set_trap_handler = set_trap_handler;
9fd3f1bf 1802 dqm->ops.process_termination = process_termination_cpsch;
26103436
FK
1803 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1804 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
5df099e8 1805 dqm->ops.get_wave_state = get_wave_state;
64c7f8cf
BG
1806 break;
1807 case KFD_SCHED_POLICY_NO_HWS:
1808 /* initialize dqm for no cp scheduling */
45c9a5e4
OG
1809 dqm->ops.start = start_nocpsch;
1810 dqm->ops.stop = stop_nocpsch;
09c34e8d 1811 dqm->ops.pre_reset = pre_reset;
45c9a5e4
OG
1812 dqm->ops.create_queue = create_queue_nocpsch;
1813 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1814 dqm->ops.update_queue = update_queue;
58dcd5bf
YZ
1815 dqm->ops.register_process = register_process;
1816 dqm->ops.unregister_process = unregister_process;
45c9a5e4 1817 dqm->ops.initialize = initialize_nocpsch;
58dcd5bf 1818 dqm->ops.uninitialize = uninitialize;
45c9a5e4 1819 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
d7b9bd22 1820 dqm->ops.set_trap_handler = set_trap_handler;
9fd3f1bf 1821 dqm->ops.process_termination = process_termination_nocpsch;
26103436
FK
1822 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1823 dqm->ops.restore_process_queues =
1824 restore_process_queues_nocpsch;
5df099e8 1825 dqm->ops.get_wave_state = get_wave_state;
64c7f8cf
BG
1826 break;
1827 default:
d146c5a7 1828 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
32fa8219 1829 goto out_free;
64c7f8cf
BG
1830 }
1831
a22fc854
BG
1832 switch (dev->device_info->asic_family) {
1833 case CHIP_CARRIZO:
bfd5e378 1834 device_queue_manager_init_vi(&dqm->asic_ops);
300dec95
OG
1835 break;
1836
a22fc854 1837 case CHIP_KAVERI:
bfd5e378 1838 device_queue_manager_init_cik(&dqm->asic_ops);
300dec95 1839 break;
97672cbe
FK
1840
1841 case CHIP_HAWAII:
1842 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1843 break;
1844
1845 case CHIP_TONGA:
1846 case CHIP_FIJI:
1847 case CHIP_POLARIS10:
1848 case CHIP_POLARIS11:
846a44d7 1849 case CHIP_POLARIS12:
ed81cd6e 1850 case CHIP_VEGAM:
97672cbe
FK
1851 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1852 break;
bed4f110
FK
1853
1854 case CHIP_VEGA10:
846a44d7 1855 case CHIP_VEGA12:
22a3a294 1856 case CHIP_VEGA20:
bed4f110 1857 case CHIP_RAVEN:
5a959a89 1858 case CHIP_RENOIR:
49adcf8a 1859 case CHIP_ARCTURUS:
bed4f110
FK
1860 device_queue_manager_init_v9(&dqm->asic_ops);
1861 break;
14328aa5 1862 case CHIP_NAVI10:
0e94b564 1863 case CHIP_NAVI12:
8099ae40 1864 case CHIP_NAVI14:
14328aa5
PC
1865 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
1866 break;
e596b903
YZ
1867 default:
1868 WARN(1, "Unexpected ASIC family %u",
1869 dev->device_info->asic_family);
1870 goto out_free;
a22fc854
BG
1871 }
1872
fdfa090b
OZ
1873 if (init_mqd_managers(dqm))
1874 goto out_free;
1875
11614c36
OZ
1876 if (allocate_hiq_sdma_mqd(dqm)) {
1877 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
1878 goto out_free;
1879 }
1880
32fa8219
FK
1881 if (!dqm->ops.initialize(dqm))
1882 return dqm;
64c7f8cf 1883
32fa8219
FK
1884out_free:
1885 kfree(dqm);
1886 return NULL;
64c7f8cf
BG
1887}
1888
7fd5a6fb
Y
1889static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
1890 struct kfd_mem_obj *mqd)
11614c36
OZ
1891{
1892 WARN(!mqd, "No hiq sdma mqd trunk to free");
1893
1894 amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
1895}
1896
64c7f8cf
BG
1897void device_queue_manager_uninit(struct device_queue_manager *dqm)
1898{
45c9a5e4 1899 dqm->ops.uninitialize(dqm);
11614c36 1900 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
64c7f8cf
BG
1901 kfree(dqm);
1902}
851a645e 1903
2640c3fa 1904int kfd_process_vm_fault(struct device_queue_manager *dqm,
1905 unsigned int pasid)
1906{
1907 struct kfd_process_device *pdd;
1908 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1909 int ret = 0;
1910
1911 if (!p)
1912 return -EINVAL;
1913 pdd = kfd_get_process_device_data(dqm->dev, p);
1914 if (pdd)
1915 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
1916 kfd_unref_process(p);
1917
1918 return ret;
1919}
1920
73ea648d
SL
1921static void kfd_process_hw_exception(struct work_struct *work)
1922{
1923 struct device_queue_manager *dqm = container_of(work,
1924 struct device_queue_manager, hw_exception_work);
5b87245f 1925 amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
73ea648d
SL
1926}
1927
851a645e
FK
1928#if defined(CONFIG_DEBUG_FS)
1929
1930static void seq_reg_dump(struct seq_file *m,
1931 uint32_t (*dump)[2], uint32_t n_regs)
1932{
1933 uint32_t i, count;
1934
1935 for (i = 0, count = 0; i < n_regs; i++) {
1936 if (count == 0 ||
1937 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1938 seq_printf(m, "%s %08x: %08x",
1939 i ? "\n" : "",
1940 dump[i][0], dump[i][1]);
1941 count = 7;
1942 } else {
1943 seq_printf(m, " %08x", dump[i][1]);
1944 count--;
1945 }
1946 }
1947
1948 seq_puts(m, "\n");
1949}
1950
1951int dqm_debugfs_hqds(struct seq_file *m, void *data)
1952{
1953 struct device_queue_manager *dqm = data;
1954 uint32_t (*dump)[2], n_regs;
1955 int pipe, queue;
1956 int r = 0;
1957
2c99a547
PY
1958 if (!dqm->sched_running) {
1959 seq_printf(m, " Device is stopped\n");
1960
1961 return 0;
1962 }
1963
24f48a42 1964 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
14328aa5
PC
1965 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
1966 &dump, &n_regs);
24f48a42
OZ
1967 if (!r) {
1968 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
14328aa5
PC
1969 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
1970 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
1971 KFD_CIK_HIQ_QUEUE);
24f48a42
OZ
1972 seq_reg_dump(m, dump, n_regs);
1973
1974 kfree(dump);
1975 }
1976
851a645e
FK
1977 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1978 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1979
1980 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
1981 if (!test_bit(pipe_offset + queue,
e6945304 1982 dqm->dev->shared_resources.cp_queue_bitmap))
851a645e
FK
1983 continue;
1984
1985 r = dqm->dev->kfd2kgd->hqd_dump(
1986 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1987 if (r)
1988 break;
1989
1990 seq_printf(m, " CP Pipe %d, Queue %d\n",
1991 pipe, queue);
1992 seq_reg_dump(m, dump, n_regs);
1993
1994 kfree(dump);
1995 }
1996 }
1997
c4bb16e0
OZ
1998 for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
1999 get_num_xgmi_sdma_engines(dqm); pipe++) {
d5094189
SL
2000 for (queue = 0;
2001 queue < dqm->dev->device_info->num_sdma_queues_per_engine;
2002 queue++) {
851a645e
FK
2003 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
2004 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
2005 if (r)
2006 break;
2007
2008 seq_printf(m, " SDMA Engine %d, RLC %d\n",
2009 pipe, queue);
2010 seq_reg_dump(m, dump, n_regs);
2011
2012 kfree(dump);
2013 }
2014 }
2015
2016 return r;
2017}
2018
a29ec470
SL
2019int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
2020{
2021 int r = 0;
2022
2023 dqm_lock(dqm);
2024 dqm->active_runlist = true;
2025 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
2026 dqm_unlock(dqm);
2027
2028 return r;
2029}
2030
851a645e 2031#endif