drm/amdkfd: Add GPUVM virtual address space to PDD
[linux-block.git] / drivers / gpu / drm / amd / amdkfd / kfd_device_queue_manager.c
CommitLineData
64c7f8cf
BG
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/slab.h>
25#include <linux/list.h>
26#include <linux/types.h>
27#include <linux/printk.h>
28#include <linux/bitops.h>
99331a51 29#include <linux/sched.h>
64c7f8cf
BG
30#include "kfd_priv.h"
31#include "kfd_device_queue_manager.h"
32#include "kfd_mqd_manager.h"
33#include "cik_regs.h"
34#include "kfd_kernel_queue.h"
64c7f8cf
BG
35
36/* Size of the per-pipe EOP queue */
37#define CIK_HPD_EOP_BYTES_LOG2 11
38#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
39
64c7f8cf
BG
40static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
41 unsigned int pasid, unsigned int vmid);
42
43static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
44 struct queue *q,
45 struct qcm_process_device *qpd);
bcea3081 46
c4744e24
YZ
47static int execute_queues_cpsch(struct device_queue_manager *dqm,
48 enum kfd_unmap_queues_filter filter,
49 uint32_t filter_param);
7da2bcf8 50static int unmap_queues_cpsch(struct device_queue_manager *dqm,
4465f466
YZ
51 enum kfd_unmap_queues_filter filter,
52 uint32_t filter_param);
64c7f8cf 53
60a00956
FK
54static int map_queues_cpsch(struct device_queue_manager *dqm);
55
bcea3081
BG
56static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
57 struct queue *q,
58 struct qcm_process_device *qpd);
59
60static void deallocate_sdma_queue(struct device_queue_manager *dqm,
61 unsigned int sdma_queue_id);
64c7f8cf 62
bcea3081
BG
63static inline
64enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
64c7f8cf 65{
bcea3081 66 if (type == KFD_QUEUE_TYPE_SDMA)
85d258f9
BG
67 return KFD_MQD_TYPE_SDMA;
68 return KFD_MQD_TYPE_CP;
64c7f8cf
BG
69}
70
d0b63bb3
AR
71static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
72{
73 int i;
74 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
75 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
76
77 /* queue is available for KFD usage if bit is 1 */
78 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
79 if (test_bit(pipe_offset + i,
80 dqm->dev->shared_resources.queue_bitmap))
81 return true;
82 return false;
83}
84
d0b63bb3 85unsigned int get_queues_num(struct device_queue_manager *dqm)
64ea8f4a 86{
d0b63bb3
AR
87 return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
88 KGD_MAX_QUEUES);
64ea8f4a
OG
89}
90
d0b63bb3 91unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
64c7f8cf 92{
d0b63bb3
AR
93 return dqm->dev->shared_resources.num_queue_per_pipe;
94}
95
96unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
97{
d0b63bb3 98 return dqm->dev->shared_resources.num_pipe_per_mec;
64c7f8cf
BG
99}
100
a22fc854 101void program_sh_mem_settings(struct device_queue_manager *dqm,
64c7f8cf
BG
102 struct qcm_process_device *qpd)
103{
cea405b1
XZ
104 return dqm->dev->kfd2kgd->program_sh_mem_settings(
105 dqm->dev->kgd, qpd->vmid,
64c7f8cf
BG
106 qpd->sh_mem_config,
107 qpd->sh_mem_ape1_base,
108 qpd->sh_mem_ape1_limit,
109 qpd->sh_mem_bases);
110}
111
112static int allocate_vmid(struct device_queue_manager *dqm,
113 struct qcm_process_device *qpd,
114 struct queue *q)
115{
116 int bit, allocated_vmid;
117
118 if (dqm->vmid_bitmap == 0)
119 return -ENOMEM;
120
4252bf68
HK
121 bit = ffs(dqm->vmid_bitmap) - 1;
122 dqm->vmid_bitmap &= ~(1 << bit);
64c7f8cf 123
44008d7a 124 allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
79775b62 125 pr_debug("vmid allocation %d\n", allocated_vmid);
64c7f8cf
BG
126 qpd->vmid = allocated_vmid;
127 q->properties.vmid = allocated_vmid;
128
129 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
130 program_sh_mem_settings(dqm, qpd);
131
403575c4
FK
132 /* qpd->page_table_base is set earlier when register_process()
133 * is called, i.e. when the first queue is created.
134 */
135 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
136 qpd->vmid,
137 qpd->page_table_base);
138 /* invalidate the VM context after pasid and vmid mapping is set up */
139 kfd_flush_tlb(qpd_to_pdd(qpd));
140
64c7f8cf
BG
141 return 0;
142}
143
144static void deallocate_vmid(struct device_queue_manager *dqm,
145 struct qcm_process_device *qpd,
146 struct queue *q)
147{
44008d7a 148 int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
64c7f8cf 149
403575c4
FK
150 kfd_flush_tlb(qpd_to_pdd(qpd));
151
2030664b
BG
152 /* Release the vmid mapping */
153 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
154
4252bf68 155 dqm->vmid_bitmap |= (1 << bit);
64c7f8cf
BG
156 qpd->vmid = 0;
157 q->properties.vmid = 0;
158}
159
160static int create_queue_nocpsch(struct device_queue_manager *dqm,
161 struct queue *q,
b46cb7d7 162 struct qcm_process_device *qpd)
64c7f8cf
BG
163{
164 int retval;
165
64c7f8cf
BG
166 print_queue(q);
167
168 mutex_lock(&dqm->lock);
169
b8cbab04 170 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 171 pr_warn("Can't create new usermode queue because %d queues were already created\n",
b8cbab04 172 dqm->total_queue_count);
ab7c1648
KR
173 retval = -EPERM;
174 goto out_unlock;
b8cbab04
OG
175 }
176
64c7f8cf
BG
177 if (list_empty(&qpd->queues_list)) {
178 retval = allocate_vmid(dqm, qpd, q);
ab7c1648
KR
179 if (retval)
180 goto out_unlock;
64c7f8cf 181 }
64c7f8cf
BG
182 q->properties.vmid = qpd->vmid;
183
373d7080
FK
184 q->properties.tba_addr = qpd->tba_addr;
185 q->properties.tma_addr = qpd->tma_addr;
186
bcea3081
BG
187 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
188 retval = create_compute_queue_nocpsch(dqm, q, qpd);
ab7c1648 189 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
bcea3081 190 retval = create_sdma_queue_nocpsch(dqm, q, qpd);
ab7c1648
KR
191 else
192 retval = -EINVAL;
64c7f8cf 193
4eacc26b 194 if (retval) {
b46cb7d7 195 if (list_empty(&qpd->queues_list))
64c7f8cf 196 deallocate_vmid(dqm, qpd, q);
ab7c1648 197 goto out_unlock;
64c7f8cf
BG
198 }
199
200 list_add(&q->list, &qpd->queues_list);
bc920fd4 201 qpd->queue_count++;
b6819cec
JC
202 if (q->properties.is_active)
203 dqm->queue_count++;
64c7f8cf 204
bcea3081
BG
205 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
206 dqm->sdma_queue_count++;
64c7f8cf 207
b8cbab04
OG
208 /*
209 * Unconditionally increment this counter, regardless of the queue's
210 * type or whether the queue is active.
211 */
212 dqm->total_queue_count++;
213 pr_debug("Total of %d queues are accountable so far\n",
214 dqm->total_queue_count);
215
ab7c1648 216out_unlock:
64c7f8cf 217 mutex_unlock(&dqm->lock);
ab7c1648 218 return retval;
64c7f8cf
BG
219}
220
221static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
222{
223 bool set;
f0ec5b99 224 int pipe, bit, i;
64c7f8cf
BG
225
226 set = false;
227
8eabaf54
KR
228 for (pipe = dqm->next_pipe_to_allocate, i = 0;
229 i < get_pipes_per_mec(dqm);
d0b63bb3
AR
230 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
231
232 if (!is_pipe_enabled(dqm, 0, pipe))
233 continue;
234
64c7f8cf 235 if (dqm->allocated_queues[pipe] != 0) {
4252bf68
HK
236 bit = ffs(dqm->allocated_queues[pipe]) - 1;
237 dqm->allocated_queues[pipe] &= ~(1 << bit);
64c7f8cf
BG
238 q->pipe = pipe;
239 q->queue = bit;
240 set = true;
241 break;
242 }
243 }
244
991ca8ee 245 if (!set)
64c7f8cf
BG
246 return -EBUSY;
247
79775b62 248 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
64c7f8cf 249 /* horizontal hqd allocation */
d0b63bb3 250 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
64c7f8cf
BG
251
252 return 0;
253}
254
255static inline void deallocate_hqd(struct device_queue_manager *dqm,
256 struct queue *q)
257{
4252bf68 258 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
64c7f8cf
BG
259}
260
261static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
262 struct queue *q,
263 struct qcm_process_device *qpd)
264{
265 int retval;
266 struct mqd_manager *mqd;
267
45c9a5e4 268 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
4eacc26b 269 if (!mqd)
64c7f8cf
BG
270 return -ENOMEM;
271
272 retval = allocate_hqd(dqm, q);
4eacc26b 273 if (retval)
64c7f8cf
BG
274 return retval;
275
276 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
277 &q->gart_mqd_addr, &q->properties);
ab7c1648
KR
278 if (retval)
279 goto out_deallocate_hqd;
64c7f8cf 280
79775b62
KR
281 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
282 q->pipe, q->queue);
030e416b 283
6a1c9510
MR
284 dqm->dev->kfd2kgd->set_scratch_backing_va(
285 dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
286
60a00956
FK
287 if (!q->properties.is_active)
288 return 0;
289
70539bd7
FK
290 retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
291 q->process->mm);
ab7c1648
KR
292 if (retval)
293 goto out_uninit_mqd;
030e416b 294
64c7f8cf 295 return 0;
ab7c1648
KR
296
297out_uninit_mqd:
298 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
299out_deallocate_hqd:
300 deallocate_hqd(dqm, q);
301
302 return retval;
64c7f8cf
BG
303}
304
9fd3f1bf
FK
305/* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
306 * to avoid asynchronized access
307 */
308static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
64c7f8cf
BG
309 struct qcm_process_device *qpd,
310 struct queue *q)
311{
312 int retval;
313 struct mqd_manager *mqd;
314
9fd3f1bf
FK
315 mqd = dqm->ops.get_mqd_manager(dqm,
316 get_mqd_type_from_queue_type(q->properties.type));
317 if (!mqd)
318 return -ENOMEM;
64c7f8cf 319
c2e1b3a4 320 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
c2e1b3a4
BG
321 deallocate_hqd(dqm, q);
322 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
c2e1b3a4
BG
323 dqm->sdma_queue_count--;
324 deallocate_sdma_queue(dqm, q->sdma_id);
7113cd65 325 } else {
79775b62 326 pr_debug("q->properties.type %d is invalid\n",
7113cd65 327 q->properties.type);
9fd3f1bf 328 return -EINVAL;
64c7f8cf 329 }
9fd3f1bf 330 dqm->total_queue_count--;
64c7f8cf
BG
331
332 retval = mqd->destroy_mqd(mqd, q->mqd,
c2e1b3a4 333 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
b90e3fbe 334 KFD_UNMAP_LATENCY_MS,
64c7f8cf 335 q->pipe, q->queue);
9fd3f1bf
FK
336 if (retval == -ETIME)
337 qpd->reset_wavefronts = true;
64c7f8cf 338
64c7f8cf
BG
339 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
340
341 list_del(&q->list);
9fd3f1bf
FK
342 if (list_empty(&qpd->queues_list)) {
343 if (qpd->reset_wavefronts) {
344 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
345 dqm->dev);
346 /* dbgdev_wave_reset_wavefronts has to be called before
347 * deallocate_vmid(), i.e. when vmid is still in use.
348 */
349 dbgdev_wave_reset_wavefronts(dqm->dev,
350 qpd->pqm->process);
351 qpd->reset_wavefronts = false;
352 }
353
64c7f8cf 354 deallocate_vmid(dqm, qpd, q);
9fd3f1bf 355 }
bc920fd4 356 qpd->queue_count--;
b6819cec
JC
357 if (q->properties.is_active)
358 dqm->queue_count--;
b8cbab04 359
9fd3f1bf
FK
360 return retval;
361}
b8cbab04 362
9fd3f1bf
FK
363static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
364 struct qcm_process_device *qpd,
365 struct queue *q)
366{
367 int retval;
368
369 mutex_lock(&dqm->lock);
370 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
64c7f8cf 371 mutex_unlock(&dqm->lock);
9fd3f1bf 372
64c7f8cf
BG
373 return retval;
374}
375
376static int update_queue(struct device_queue_manager *dqm, struct queue *q)
377{
378 int retval;
379 struct mqd_manager *mqd;
b6ffbab8 380 bool prev_active = false;
64c7f8cf 381
64c7f8cf 382 mutex_lock(&dqm->lock);
0b3674ae
OG
383 mqd = dqm->ops.get_mqd_manager(dqm,
384 get_mqd_type_from_queue_type(q->properties.type));
4eacc26b 385 if (!mqd) {
ab7c1648
KR
386 retval = -ENOMEM;
387 goto out_unlock;
64c7f8cf
BG
388 }
389
60a00956
FK
390 /* Save previous activity state for counters */
391 prev_active = q->properties.is_active;
392
393 /* Make sure the queue is unmapped before updating the MQD */
d146c5a7 394 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
60a00956
FK
395 retval = unmap_queues_cpsch(dqm,
396 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
894a8293 397 if (retval) {
60a00956
FK
398 pr_err("unmap queue failed\n");
399 goto out_unlock;
400 }
894a8293 401 } else if (prev_active &&
60a00956
FK
402 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
403 q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
404 retval = mqd->destroy_mqd(mqd, q->mqd,
405 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
406 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
407 if (retval) {
408 pr_err("destroy mqd failed\n");
409 goto out_unlock;
410 }
411 }
412
413 retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
414
096d1a3e
FK
415 /*
416 * check active state vs. the previous state and modify
417 * counter accordingly. map_queues_cpsch uses the
418 * dqm->queue_count to determine whether a new runlist must be
419 * uploaded.
420 */
421 if (q->properties.is_active && !prev_active)
422 dqm->queue_count++;
423 else if (!q->properties.is_active && prev_active)
424 dqm->queue_count--;
425
d146c5a7 426 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
60a00956 427 retval = map_queues_cpsch(dqm);
894a8293 428 else if (q->properties.is_active &&
60a00956
FK
429 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
430 q->properties.type == KFD_QUEUE_TYPE_SDMA))
431 retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
432 &q->properties, q->process->mm);
b6ffbab8 433
ab7c1648 434out_unlock:
64c7f8cf
BG
435 mutex_unlock(&dqm->lock);
436 return retval;
437}
438
58dcd5bf 439static struct mqd_manager *get_mqd_manager(
64c7f8cf
BG
440 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
441{
442 struct mqd_manager *mqd;
443
32fa8219
FK
444 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
445 return NULL;
64c7f8cf 446
79775b62 447 pr_debug("mqd type %d\n", type);
64c7f8cf
BG
448
449 mqd = dqm->mqds[type];
450 if (!mqd) {
451 mqd = mqd_manager_init(type, dqm->dev);
4eacc26b 452 if (!mqd)
79775b62 453 pr_err("mqd manager is NULL");
64c7f8cf
BG
454 dqm->mqds[type] = mqd;
455 }
456
457 return mqd;
458}
459
58dcd5bf 460static int register_process(struct device_queue_manager *dqm,
64c7f8cf
BG
461 struct qcm_process_device *qpd)
462{
463 struct device_process_node *n;
403575c4
FK
464 struct kfd_process_device *pdd;
465 uint32_t pd_base;
a22fc854 466 int retval;
64c7f8cf 467
dbf56ab1 468 n = kzalloc(sizeof(*n), GFP_KERNEL);
64c7f8cf
BG
469 if (!n)
470 return -ENOMEM;
471
472 n->qpd = qpd;
473
403575c4
FK
474 pdd = qpd_to_pdd(qpd);
475 /* Retrieve PD base */
476 pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
477
64c7f8cf
BG
478 mutex_lock(&dqm->lock);
479 list_add(&n->list, &dqm->queues);
480
403575c4
FK
481 /* Update PD Base in QPD */
482 qpd->page_table_base = pd_base;
483
bfd5e378 484 retval = dqm->asic_ops.update_qpd(dqm, qpd);
a22fc854 485
64c7f8cf
BG
486 dqm->processes_count++;
487
488 mutex_unlock(&dqm->lock);
489
a22fc854 490 return retval;
64c7f8cf
BG
491}
492
58dcd5bf 493static int unregister_process(struct device_queue_manager *dqm,
64c7f8cf
BG
494 struct qcm_process_device *qpd)
495{
496 int retval;
497 struct device_process_node *cur, *next;
498
1e5ec956
OG
499 pr_debug("qpd->queues_list is %s\n",
500 list_empty(&qpd->queues_list) ? "empty" : "not empty");
64c7f8cf
BG
501
502 retval = 0;
503 mutex_lock(&dqm->lock);
504
505 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
506 if (qpd == cur->qpd) {
507 list_del(&cur->list);
f5d896bb 508 kfree(cur);
64c7f8cf
BG
509 dqm->processes_count--;
510 goto out;
511 }
512 }
513 /* qpd not found in dqm list */
514 retval = 1;
515out:
516 mutex_unlock(&dqm->lock);
517 return retval;
518}
519
520static int
521set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
522 unsigned int vmid)
523{
524 uint32_t pasid_mapping;
525
cea405b1
XZ
526 pasid_mapping = (pasid == 0) ? 0 :
527 (uint32_t)pasid |
528 ATC_VMID_PASID_MAPPING_VALID;
529
530 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
531 dqm->dev->kgd, pasid_mapping,
64c7f8cf
BG
532 vmid);
533}
534
2249d558
AL
535static void init_interrupts(struct device_queue_manager *dqm)
536{
537 unsigned int i;
538
d0b63bb3
AR
539 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
540 if (is_pipe_enabled(dqm, 0, i))
541 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
2249d558
AL
542}
543
64c7f8cf
BG
544static int initialize_nocpsch(struct device_queue_manager *dqm)
545{
86194cf8 546 int pipe, queue;
64c7f8cf 547
79775b62 548 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
64c7f8cf 549
ab7c1648
KR
550 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
551 sizeof(unsigned int), GFP_KERNEL);
552 if (!dqm->allocated_queues)
553 return -ENOMEM;
554
64c7f8cf
BG
555 mutex_init(&dqm->lock);
556 INIT_LIST_HEAD(&dqm->queues);
557 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
bcea3081 558 dqm->sdma_queue_count = 0;
64c7f8cf 559
86194cf8
FK
560 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
561 int pipe_offset = pipe * get_queues_per_pipe(dqm);
562
563 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
564 if (test_bit(pipe_offset + queue,
565 dqm->dev->shared_resources.queue_bitmap))
566 dqm->allocated_queues[pipe] |= 1 << queue;
567 }
64c7f8cf 568
44008d7a 569 dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
bcea3081 570 dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
64c7f8cf 571
64c7f8cf
BG
572 return 0;
573}
574
58dcd5bf 575static void uninitialize(struct device_queue_manager *dqm)
64c7f8cf 576{
6f9d54fd
OG
577 int i;
578
32fa8219 579 WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
64c7f8cf
BG
580
581 kfree(dqm->allocated_queues);
6f9d54fd
OG
582 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
583 kfree(dqm->mqds[i]);
64c7f8cf 584 mutex_destroy(&dqm->lock);
a86aa3ca 585 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
64c7f8cf
BG
586}
587
588static int start_nocpsch(struct device_queue_manager *dqm)
589{
2249d558 590 init_interrupts(dqm);
64c7f8cf
BG
591 return 0;
592}
593
594static int stop_nocpsch(struct device_queue_manager *dqm)
595{
596 return 0;
597}
598
bcea3081
BG
599static int allocate_sdma_queue(struct device_queue_manager *dqm,
600 unsigned int *sdma_queue_id)
601{
602 int bit;
603
604 if (dqm->sdma_bitmap == 0)
605 return -ENOMEM;
606
4252bf68
HK
607 bit = ffs(dqm->sdma_bitmap) - 1;
608 dqm->sdma_bitmap &= ~(1 << bit);
bcea3081
BG
609 *sdma_queue_id = bit;
610
611 return 0;
612}
613
614static void deallocate_sdma_queue(struct device_queue_manager *dqm,
615 unsigned int sdma_queue_id)
616{
010b82e7 617 if (sdma_queue_id >= CIK_SDMA_QUEUES)
bcea3081 618 return;
4252bf68 619 dqm->sdma_bitmap |= (1 << sdma_queue_id);
bcea3081
BG
620}
621
bcea3081
BG
622static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
623 struct queue *q,
624 struct qcm_process_device *qpd)
625{
626 struct mqd_manager *mqd;
627 int retval;
628
45c9a5e4 629 mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
bcea3081
BG
630 if (!mqd)
631 return -ENOMEM;
632
633 retval = allocate_sdma_queue(dqm, &q->sdma_id);
4eacc26b 634 if (retval)
bcea3081
BG
635 return retval;
636
e139cd2a 637 q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
638 q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
bcea3081 639
79775b62
KR
640 pr_debug("SDMA id is: %d\n", q->sdma_id);
641 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
642 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
bcea3081 643
bfd5e378 644 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
bcea3081
BG
645 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
646 &q->gart_mqd_addr, &q->properties);
ab7c1648
KR
647 if (retval)
648 goto out_deallocate_sdma_queue;
bcea3081 649
70539bd7 650 retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
ab7c1648
KR
651 if (retval)
652 goto out_uninit_mqd;
4fadf6b6 653
bcea3081 654 return 0;
ab7c1648
KR
655
656out_uninit_mqd:
657 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
658out_deallocate_sdma_queue:
659 deallocate_sdma_queue(dqm, q->sdma_id);
660
661 return retval;
bcea3081
BG
662}
663
64c7f8cf
BG
664/*
665 * Device Queue Manager implementation for cp scheduler
666 */
667
668static int set_sched_resources(struct device_queue_manager *dqm)
669{
d0b63bb3 670 int i, mec;
64c7f8cf 671 struct scheduling_resources res;
64c7f8cf 672
44008d7a 673 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
d0b63bb3
AR
674
675 res.queue_mask = 0;
676 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
677 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
678 / dqm->dev->shared_resources.num_pipe_per_mec;
679
680 if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
681 continue;
682
683 /* only acquire queues from the first MEC */
684 if (mec > 0)
685 continue;
686
687 /* This situation may be hit in the future if a new HW
688 * generation exposes more than 64 queues. If so, the
8eabaf54
KR
689 * definition of res.queue_mask needs updating
690 */
1d11ee89 691 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
d0b63bb3
AR
692 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
693 break;
694 }
695
696 res.queue_mask |= (1ull << i);
697 }
64c7f8cf
BG
698 res.gws_mask = res.oac_mask = res.gds_heap_base =
699 res.gds_heap_size = 0;
700
79775b62
KR
701 pr_debug("Scheduling resources:\n"
702 "vmid mask: 0x%8X\n"
703 "queue mask: 0x%8llX\n",
64c7f8cf
BG
704 res.vmid_mask, res.queue_mask);
705
706 return pm_send_set_resources(&dqm->packets, &res);
707}
708
709static int initialize_cpsch(struct device_queue_manager *dqm)
710{
79775b62 711 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
64c7f8cf
BG
712
713 mutex_init(&dqm->lock);
714 INIT_LIST_HEAD(&dqm->queues);
715 dqm->queue_count = dqm->processes_count = 0;
bcea3081 716 dqm->sdma_queue_count = 0;
64c7f8cf 717 dqm->active_runlist = false;
e139cd2a 718 dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
64c7f8cf 719
bfd5e378 720 return 0;
64c7f8cf
BG
721}
722
723static int start_cpsch(struct device_queue_manager *dqm)
724{
64c7f8cf
BG
725 int retval;
726
64c7f8cf
BG
727 retval = 0;
728
729 retval = pm_init(&dqm->packets, dqm);
4eacc26b 730 if (retval)
64c7f8cf
BG
731 goto fail_packet_manager_init;
732
733 retval = set_sched_resources(dqm);
4eacc26b 734 if (retval)
64c7f8cf
BG
735 goto fail_set_sched_resources;
736
79775b62 737 pr_debug("Allocating fence memory\n");
64c7f8cf
BG
738
739 /* allocate fence memory on the gart */
a86aa3ca
OG
740 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
741 &dqm->fence_mem);
64c7f8cf 742
4eacc26b 743 if (retval)
64c7f8cf
BG
744 goto fail_allocate_vidmem;
745
746 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
747 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
2249d558
AL
748
749 init_interrupts(dqm);
750
ac30c783 751 mutex_lock(&dqm->lock);
c4744e24 752 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
ac30c783 753 mutex_unlock(&dqm->lock);
64c7f8cf
BG
754
755 return 0;
756fail_allocate_vidmem:
757fail_set_sched_resources:
758 pm_uninit(&dqm->packets);
759fail_packet_manager_init:
760 return retval;
761}
762
763static int stop_cpsch(struct device_queue_manager *dqm)
764{
ac30c783 765 mutex_lock(&dqm->lock);
4465f466 766 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
ac30c783 767 mutex_unlock(&dqm->lock);
64c7f8cf 768
a86aa3ca 769 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
64c7f8cf
BG
770 pm_uninit(&dqm->packets);
771
772 return 0;
773}
774
775static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
776 struct kernel_queue *kq,
777 struct qcm_process_device *qpd)
778{
64c7f8cf 779 mutex_lock(&dqm->lock);
b8cbab04 780 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 781 pr_warn("Can't create new kernel queue because %d queues were already created\n",
b8cbab04
OG
782 dqm->total_queue_count);
783 mutex_unlock(&dqm->lock);
784 return -EPERM;
785 }
786
787 /*
788 * Unconditionally increment this counter, regardless of the queue's
789 * type or whether the queue is active.
790 */
791 dqm->total_queue_count++;
792 pr_debug("Total of %d queues are accountable so far\n",
793 dqm->total_queue_count);
794
64c7f8cf
BG
795 list_add(&kq->list, &qpd->priv_queue_list);
796 dqm->queue_count++;
797 qpd->is_debug = true;
c4744e24 798 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
64c7f8cf
BG
799 mutex_unlock(&dqm->lock);
800
801 return 0;
802}
803
804static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
805 struct kernel_queue *kq,
806 struct qcm_process_device *qpd)
807{
64c7f8cf 808 mutex_lock(&dqm->lock);
64c7f8cf
BG
809 list_del(&kq->list);
810 dqm->queue_count--;
811 qpd->is_debug = false;
c4744e24 812 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
b8cbab04
OG
813 /*
814 * Unconditionally decrement this counter, regardless of the queue's
815 * type.
816 */
8b58f261 817 dqm->total_queue_count--;
b8cbab04
OG
818 pr_debug("Total of %d queues are accountable so far\n",
819 dqm->total_queue_count);
64c7f8cf
BG
820 mutex_unlock(&dqm->lock);
821}
822
823static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
b46cb7d7 824 struct qcm_process_device *qpd)
64c7f8cf
BG
825{
826 int retval;
827 struct mqd_manager *mqd;
828
64c7f8cf
BG
829 retval = 0;
830
64c7f8cf
BG
831 mutex_lock(&dqm->lock);
832
b8cbab04 833 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
79775b62 834 pr_warn("Can't create new usermode queue because %d queues were already created\n",
b8cbab04
OG
835 dqm->total_queue_count);
836 retval = -EPERM;
837 goto out;
838 }
839
e139cd2a 840 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
841 retval = allocate_sdma_queue(dqm, &q->sdma_id);
894a8293 842 if (retval)
e139cd2a 843 goto out;
844 q->properties.sdma_queue_id =
845 q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
846 q->properties.sdma_engine_id =
847 q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
848 }
45c9a5e4 849 mqd = dqm->ops.get_mqd_manager(dqm,
bcea3081
BG
850 get_mqd_type_from_queue_type(q->properties.type));
851
4eacc26b 852 if (!mqd) {
ab7c1648
KR
853 retval = -ENOMEM;
854 goto out;
64c7f8cf
BG
855 }
856
bfd5e378 857 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
373d7080
FK
858
859 q->properties.tba_addr = qpd->tba_addr;
860 q->properties.tma_addr = qpd->tma_addr;
64c7f8cf
BG
861 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
862 &q->gart_mqd_addr, &q->properties);
4eacc26b 863 if (retval)
64c7f8cf
BG
864 goto out;
865
866 list_add(&q->list, &qpd->queues_list);
bc920fd4 867 qpd->queue_count++;
64c7f8cf
BG
868 if (q->properties.is_active) {
869 dqm->queue_count++;
c4744e24
YZ
870 retval = execute_queues_cpsch(dqm,
871 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
64c7f8cf
BG
872 }
873
bcea3081 874 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
8eabaf54 875 dqm->sdma_queue_count++;
b8cbab04
OG
876 /*
877 * Unconditionally increment this counter, regardless of the queue's
878 * type or whether the queue is active.
879 */
880 dqm->total_queue_count++;
881
882 pr_debug("Total of %d queues are accountable so far\n",
883 dqm->total_queue_count);
884
64c7f8cf
BG
885out:
886 mutex_unlock(&dqm->lock);
887 return retval;
888}
889
788bf83d 890int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
d80d19bd 891 unsigned int fence_value,
8c72c3d7 892 unsigned int timeout_ms)
64c7f8cf 893{
8c72c3d7 894 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
64c7f8cf
BG
895
896 while (*fence_addr != fence_value) {
8c72c3d7 897 if (time_after(jiffies, end_jiffies)) {
79775b62 898 pr_err("qcm fence wait loop timeout expired\n");
64c7f8cf
BG
899 return -ETIME;
900 }
99331a51 901 schedule();
64c7f8cf
BG
902 }
903
904 return 0;
905}
906
7da2bcf8 907static int unmap_sdma_queues(struct device_queue_manager *dqm,
bcea3081
BG
908 unsigned int sdma_engine)
909{
910 return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
7da2bcf8 911 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false,
bcea3081
BG
912 sdma_engine);
913}
914
60a00956
FK
915/* dqm->lock mutex has to be locked before calling this function */
916static int map_queues_cpsch(struct device_queue_manager *dqm)
917{
918 int retval;
919
920 if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
921 return 0;
922
923 if (dqm->active_runlist)
924 return 0;
925
926 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
927 if (retval) {
928 pr_err("failed to execute runlist\n");
929 return retval;
930 }
931 dqm->active_runlist = true;
932
933 return retval;
934}
935
ac30c783 936/* dqm->lock mutex has to be locked before calling this function */
7da2bcf8 937static int unmap_queues_cpsch(struct device_queue_manager *dqm,
4465f466
YZ
938 enum kfd_unmap_queues_filter filter,
939 uint32_t filter_param)
64c7f8cf 940{
9fd3f1bf 941 int retval = 0;
64c7f8cf 942
991ca8ee 943 if (!dqm->active_runlist)
ac30c783 944 return retval;
bcea3081 945
79775b62 946 pr_debug("Before destroying queues, sdma queue count is : %u\n",
bcea3081
BG
947 dqm->sdma_queue_count);
948
949 if (dqm->sdma_queue_count > 0) {
7da2bcf8
YZ
950 unmap_sdma_queues(dqm, 0);
951 unmap_sdma_queues(dqm, 1);
bcea3081
BG
952 }
953
64c7f8cf 954 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
4465f466 955 filter, filter_param, false, 0);
4eacc26b 956 if (retval)
ac30c783 957 return retval;
64c7f8cf
BG
958
959 *dqm->fence_addr = KFD_FENCE_INIT;
960 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
961 KFD_FENCE_COMPLETED);
962 /* should be timed out */
c3447e81 963 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
64c7f8cf 964 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
9fd3f1bf 965 if (retval)
ac30c783 966 return retval;
9fd3f1bf 967
64c7f8cf
BG
968 pm_release_ib(&dqm->packets);
969 dqm->active_runlist = false;
970
64c7f8cf
BG
971 return retval;
972}
973
ac30c783 974/* dqm->lock mutex has to be locked before calling this function */
c4744e24
YZ
975static int execute_queues_cpsch(struct device_queue_manager *dqm,
976 enum kfd_unmap_queues_filter filter,
977 uint32_t filter_param)
64c7f8cf
BG
978{
979 int retval;
980
c4744e24 981 retval = unmap_queues_cpsch(dqm, filter, filter_param);
4eacc26b 982 if (retval) {
c4744e24 983 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
ac30c783 984 return retval;
64c7f8cf
BG
985 }
986
60a00956 987 return map_queues_cpsch(dqm);
64c7f8cf
BG
988}
989
990static int destroy_queue_cpsch(struct device_queue_manager *dqm,
991 struct qcm_process_device *qpd,
992 struct queue *q)
993{
994 int retval;
995 struct mqd_manager *mqd;
992839ad 996 bool preempt_all_queues;
64c7f8cf 997
992839ad
YS
998 preempt_all_queues = false;
999
64c7f8cf
BG
1000 retval = 0;
1001
1002 /* remove queue from list to prevent rescheduling after preemption */
1003 mutex_lock(&dqm->lock);
992839ad
YS
1004
1005 if (qpd->is_debug) {
1006 /*
1007 * error, currently we do not allow to destroy a queue
1008 * of a currently debugged process
1009 */
1010 retval = -EBUSY;
1011 goto failed_try_destroy_debugged_queue;
1012
1013 }
1014
45c9a5e4 1015 mqd = dqm->ops.get_mqd_manager(dqm,
bcea3081 1016 get_mqd_type_from_queue_type(q->properties.type));
64c7f8cf
BG
1017 if (!mqd) {
1018 retval = -ENOMEM;
1019 goto failed;
1020 }
1021
e139cd2a 1022 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
bcea3081 1023 dqm->sdma_queue_count--;
e139cd2a 1024 deallocate_sdma_queue(dqm, q->sdma_id);
1025 }
bcea3081 1026
64c7f8cf 1027 list_del(&q->list);
bc920fd4 1028 qpd->queue_count--;
40a526dc 1029 if (q->properties.is_active) {
b6819cec 1030 dqm->queue_count--;
40a526dc 1031 retval = execute_queues_cpsch(dqm,
9fd3f1bf 1032 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
40a526dc
YZ
1033 if (retval == -ETIME)
1034 qpd->reset_wavefronts = true;
1035 }
64c7f8cf
BG
1036
1037 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
b8cbab04
OG
1038
1039 /*
1040 * Unconditionally decrement this counter, regardless of the queue's
1041 * type
1042 */
1043 dqm->total_queue_count--;
1044 pr_debug("Total of %d queues are accountable so far\n",
1045 dqm->total_queue_count);
64c7f8cf
BG
1046
1047 mutex_unlock(&dqm->lock);
1048
9e827224 1049 return retval;
64c7f8cf
BG
1050
1051failed:
992839ad
YS
1052failed_try_destroy_debugged_queue:
1053
64c7f8cf
BG
1054 mutex_unlock(&dqm->lock);
1055 return retval;
1056}
1057
1058/*
1059 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1060 * stay in user mode.
1061 */
1062#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1063/* APE1 limit is inclusive and 64K aligned. */
1064#define APE1_LIMIT_ALIGNMENT 0xFFFF
1065
1066static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1067 struct qcm_process_device *qpd,
1068 enum cache_policy default_policy,
1069 enum cache_policy alternate_policy,
1070 void __user *alternate_aperture_base,
1071 uint64_t alternate_aperture_size)
1072{
a22fc854 1073 bool retval;
64c7f8cf 1074
64c7f8cf
BG
1075 mutex_lock(&dqm->lock);
1076
1077 if (alternate_aperture_size == 0) {
1078 /* base > limit disables APE1 */
1079 qpd->sh_mem_ape1_base = 1;
1080 qpd->sh_mem_ape1_limit = 0;
1081 } else {
1082 /*
1083 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1084 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1085 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1086 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1087 * Verify that the base and size parameters can be
1088 * represented in this format and convert them.
1089 * Additionally restrict APE1 to user-mode addresses.
1090 */
1091
1092 uint64_t base = (uintptr_t)alternate_aperture_base;
1093 uint64_t limit = base + alternate_aperture_size - 1;
1094
ab7c1648
KR
1095 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1096 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1097 retval = false;
64c7f8cf 1098 goto out;
ab7c1648 1099 }
64c7f8cf
BG
1100
1101 qpd->sh_mem_ape1_base = base >> 16;
1102 qpd->sh_mem_ape1_limit = limit >> 16;
1103 }
1104
bfd5e378 1105 retval = dqm->asic_ops.set_cache_memory_policy(
a22fc854
BG
1106 dqm,
1107 qpd,
1108 default_policy,
1109 alternate_policy,
1110 alternate_aperture_base,
1111 alternate_aperture_size);
64c7f8cf 1112
d146c5a7 1113 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
64c7f8cf
BG
1114 program_sh_mem_settings(dqm, qpd);
1115
79775b62 1116 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
64c7f8cf
BG
1117 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1118 qpd->sh_mem_ape1_limit);
1119
64c7f8cf
BG
1120out:
1121 mutex_unlock(&dqm->lock);
ab7c1648 1122 return retval;
64c7f8cf
BG
1123}
1124
d7b9bd22
FK
1125static int set_trap_handler(struct device_queue_manager *dqm,
1126 struct qcm_process_device *qpd,
1127 uint64_t tba_addr,
1128 uint64_t tma_addr)
1129{
1130 uint64_t *tma;
1131
1132 if (dqm->dev->cwsr_enabled) {
1133 /* Jump from CWSR trap handler to user trap */
1134 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1135 tma[0] = tba_addr;
1136 tma[1] = tma_addr;
1137 } else {
1138 qpd->tba_addr = tba_addr;
1139 qpd->tma_addr = tma_addr;
1140 }
1141
1142 return 0;
1143}
1144
9fd3f1bf
FK
1145static int process_termination_nocpsch(struct device_queue_manager *dqm,
1146 struct qcm_process_device *qpd)
1147{
1148 struct queue *q, *next;
1149 struct device_process_node *cur, *next_dpn;
1150 int retval = 0;
1151
1152 mutex_lock(&dqm->lock);
1153
1154 /* Clear all user mode queues */
1155 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1156 int ret;
1157
1158 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1159 if (ret)
1160 retval = ret;
1161 }
1162
1163 /* Unregister process */
1164 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1165 if (qpd == cur->qpd) {
1166 list_del(&cur->list);
1167 kfree(cur);
1168 dqm->processes_count--;
1169 break;
1170 }
1171 }
1172
1173 mutex_unlock(&dqm->lock);
1174 return retval;
1175}
1176
1177
1178static int process_termination_cpsch(struct device_queue_manager *dqm,
1179 struct qcm_process_device *qpd)
1180{
1181 int retval;
1182 struct queue *q, *next;
1183 struct kernel_queue *kq, *kq_next;
1184 struct mqd_manager *mqd;
1185 struct device_process_node *cur, *next_dpn;
1186 enum kfd_unmap_queues_filter filter =
1187 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1188
1189 retval = 0;
1190
1191 mutex_lock(&dqm->lock);
1192
1193 /* Clean all kernel queues */
1194 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1195 list_del(&kq->list);
1196 dqm->queue_count--;
1197 qpd->is_debug = false;
1198 dqm->total_queue_count--;
1199 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1200 }
1201
1202 /* Clear all user mode queues */
1203 list_for_each_entry(q, &qpd->queues_list, list) {
1204 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1205 dqm->sdma_queue_count--;
1206
1207 if (q->properties.is_active)
1208 dqm->queue_count--;
1209
1210 dqm->total_queue_count--;
1211 }
1212
1213 /* Unregister process */
1214 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1215 if (qpd == cur->qpd) {
1216 list_del(&cur->list);
1217 kfree(cur);
1218 dqm->processes_count--;
1219 break;
1220 }
1221 }
1222
1223 retval = execute_queues_cpsch(dqm, filter, 0);
1224 if (retval || qpd->reset_wavefronts) {
1225 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1226 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1227 qpd->reset_wavefronts = false;
1228 }
1229
1230 /* lastly, free mqd resources */
1231 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1232 mqd = dqm->ops.get_mqd_manager(dqm,
1233 get_mqd_type_from_queue_type(q->properties.type));
1234 if (!mqd) {
1235 retval = -ENOMEM;
1236 goto out;
1237 }
1238 list_del(&q->list);
bc920fd4 1239 qpd->queue_count--;
9fd3f1bf
FK
1240 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
1241 }
1242
1243out:
1244 mutex_unlock(&dqm->lock);
1245 return retval;
1246}
1247
64c7f8cf
BG
1248struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1249{
1250 struct device_queue_manager *dqm;
1251
79775b62 1252 pr_debug("Loading device queue manager\n");
a22fc854 1253
dbf56ab1 1254 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
64c7f8cf
BG
1255 if (!dqm)
1256 return NULL;
1257
d146c5a7
FK
1258 switch (dev->device_info->asic_family) {
1259 /* HWS is not available on Hawaii. */
1260 case CHIP_HAWAII:
1261 /* HWS depends on CWSR for timely dequeue. CWSR is not
1262 * available on Tonga.
1263 *
1264 * FIXME: This argument also applies to Kaveri.
1265 */
1266 case CHIP_TONGA:
1267 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1268 break;
1269 default:
1270 dqm->sched_policy = sched_policy;
1271 break;
1272 }
1273
64c7f8cf 1274 dqm->dev = dev;
d146c5a7 1275 switch (dqm->sched_policy) {
64c7f8cf
BG
1276 case KFD_SCHED_POLICY_HWS:
1277 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1278 /* initialize dqm for cp scheduling */
45c9a5e4
OG
1279 dqm->ops.create_queue = create_queue_cpsch;
1280 dqm->ops.initialize = initialize_cpsch;
1281 dqm->ops.start = start_cpsch;
1282 dqm->ops.stop = stop_cpsch;
1283 dqm->ops.destroy_queue = destroy_queue_cpsch;
1284 dqm->ops.update_queue = update_queue;
58dcd5bf
YZ
1285 dqm->ops.get_mqd_manager = get_mqd_manager;
1286 dqm->ops.register_process = register_process;
1287 dqm->ops.unregister_process = unregister_process;
1288 dqm->ops.uninitialize = uninitialize;
45c9a5e4
OG
1289 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1290 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1291 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
d7b9bd22 1292 dqm->ops.set_trap_handler = set_trap_handler;
9fd3f1bf 1293 dqm->ops.process_termination = process_termination_cpsch;
64c7f8cf
BG
1294 break;
1295 case KFD_SCHED_POLICY_NO_HWS:
1296 /* initialize dqm for no cp scheduling */
45c9a5e4
OG
1297 dqm->ops.start = start_nocpsch;
1298 dqm->ops.stop = stop_nocpsch;
1299 dqm->ops.create_queue = create_queue_nocpsch;
1300 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1301 dqm->ops.update_queue = update_queue;
58dcd5bf
YZ
1302 dqm->ops.get_mqd_manager = get_mqd_manager;
1303 dqm->ops.register_process = register_process;
1304 dqm->ops.unregister_process = unregister_process;
45c9a5e4 1305 dqm->ops.initialize = initialize_nocpsch;
58dcd5bf 1306 dqm->ops.uninitialize = uninitialize;
45c9a5e4 1307 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
d7b9bd22 1308 dqm->ops.set_trap_handler = set_trap_handler;
9fd3f1bf 1309 dqm->ops.process_termination = process_termination_nocpsch;
64c7f8cf
BG
1310 break;
1311 default:
d146c5a7 1312 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
32fa8219 1313 goto out_free;
64c7f8cf
BG
1314 }
1315
a22fc854
BG
1316 switch (dev->device_info->asic_family) {
1317 case CHIP_CARRIZO:
bfd5e378 1318 device_queue_manager_init_vi(&dqm->asic_ops);
300dec95
OG
1319 break;
1320
a22fc854 1321 case CHIP_KAVERI:
bfd5e378 1322 device_queue_manager_init_cik(&dqm->asic_ops);
300dec95 1323 break;
97672cbe
FK
1324
1325 case CHIP_HAWAII:
1326 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1327 break;
1328
1329 case CHIP_TONGA:
1330 case CHIP_FIJI:
1331 case CHIP_POLARIS10:
1332 case CHIP_POLARIS11:
1333 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1334 break;
e596b903
YZ
1335 default:
1336 WARN(1, "Unexpected ASIC family %u",
1337 dev->device_info->asic_family);
1338 goto out_free;
a22fc854
BG
1339 }
1340
32fa8219
FK
1341 if (!dqm->ops.initialize(dqm))
1342 return dqm;
64c7f8cf 1343
32fa8219
FK
1344out_free:
1345 kfree(dqm);
1346 return NULL;
64c7f8cf
BG
1347}
1348
1349void device_queue_manager_uninit(struct device_queue_manager *dqm)
1350{
45c9a5e4 1351 dqm->ops.uninitialize(dqm);
64c7f8cf
BG
1352 kfree(dqm);
1353}
851a645e
FK
1354
1355#if defined(CONFIG_DEBUG_FS)
1356
1357static void seq_reg_dump(struct seq_file *m,
1358 uint32_t (*dump)[2], uint32_t n_regs)
1359{
1360 uint32_t i, count;
1361
1362 for (i = 0, count = 0; i < n_regs; i++) {
1363 if (count == 0 ||
1364 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1365 seq_printf(m, "%s %08x: %08x",
1366 i ? "\n" : "",
1367 dump[i][0], dump[i][1]);
1368 count = 7;
1369 } else {
1370 seq_printf(m, " %08x", dump[i][1]);
1371 count--;
1372 }
1373 }
1374
1375 seq_puts(m, "\n");
1376}
1377
1378int dqm_debugfs_hqds(struct seq_file *m, void *data)
1379{
1380 struct device_queue_manager *dqm = data;
1381 uint32_t (*dump)[2], n_regs;
1382 int pipe, queue;
1383 int r = 0;
1384
1385 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1386 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1387
1388 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
1389 if (!test_bit(pipe_offset + queue,
1390 dqm->dev->shared_resources.queue_bitmap))
1391 continue;
1392
1393 r = dqm->dev->kfd2kgd->hqd_dump(
1394 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1395 if (r)
1396 break;
1397
1398 seq_printf(m, " CP Pipe %d, Queue %d\n",
1399 pipe, queue);
1400 seq_reg_dump(m, dump, n_regs);
1401
1402 kfree(dump);
1403 }
1404 }
1405
1406 for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) {
1407 for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) {
1408 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
1409 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1410 if (r)
1411 break;
1412
1413 seq_printf(m, " SDMA Engine %d, RLC %d\n",
1414 pipe, queue);
1415 seq_reg_dump(m, dump, n_regs);
1416
1417 kfree(dump);
1418 }
1419 }
1420
1421 return r;
1422}
1423
1424#endif