Commit | Line | Data |
---|---|---|
64c7f8cf BG |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
26103436 FK |
24 | #include <linux/ratelimit.h> |
25 | #include <linux/printk.h> | |
64c7f8cf BG |
26 | #include <linux/slab.h> |
27 | #include <linux/list.h> | |
28 | #include <linux/types.h> | |
64c7f8cf | 29 | #include <linux/bitops.h> |
99331a51 | 30 | #include <linux/sched.h> |
64c7f8cf BG |
31 | #include "kfd_priv.h" |
32 | #include "kfd_device_queue_manager.h" | |
33 | #include "kfd_mqd_manager.h" | |
34 | #include "cik_regs.h" | |
35 | #include "kfd_kernel_queue.h" | |
5b87245f | 36 | #include "amdgpu_amdkfd.h" |
64c7f8cf BG |
37 | |
38 | /* Size of the per-pipe EOP queue */ | |
39 | #define CIK_HPD_EOP_BYTES_LOG2 11 | |
40 | #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2) | |
41 | ||
64c7f8cf BG |
42 | static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, |
43 | unsigned int pasid, unsigned int vmid); | |
44 | ||
45 | static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, | |
46 | struct queue *q, | |
47 | struct qcm_process_device *qpd); | |
bcea3081 | 48 | |
c4744e24 YZ |
49 | static int execute_queues_cpsch(struct device_queue_manager *dqm, |
50 | enum kfd_unmap_queues_filter filter, | |
51 | uint32_t filter_param); | |
7da2bcf8 | 52 | static int unmap_queues_cpsch(struct device_queue_manager *dqm, |
4465f466 YZ |
53 | enum kfd_unmap_queues_filter filter, |
54 | uint32_t filter_param); | |
64c7f8cf | 55 | |
60a00956 FK |
56 | static int map_queues_cpsch(struct device_queue_manager *dqm); |
57 | ||
bcea3081 BG |
58 | static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, |
59 | struct queue *q, | |
60 | struct qcm_process_device *qpd); | |
61 | ||
62 | static void deallocate_sdma_queue(struct device_queue_manager *dqm, | |
63 | unsigned int sdma_queue_id); | |
64c7f8cf | 64 | |
73ea648d SL |
65 | static void kfd_process_hw_exception(struct work_struct *work); |
66 | ||
bcea3081 BG |
67 | static inline |
68 | enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) | |
64c7f8cf | 69 | { |
bcea3081 | 70 | if (type == KFD_QUEUE_TYPE_SDMA) |
85d258f9 BG |
71 | return KFD_MQD_TYPE_SDMA; |
72 | return KFD_MQD_TYPE_CP; | |
64c7f8cf BG |
73 | } |
74 | ||
d0b63bb3 AR |
75 | static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) |
76 | { | |
77 | int i; | |
78 | int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec | |
79 | + pipe * dqm->dev->shared_resources.num_queue_per_pipe; | |
80 | ||
81 | /* queue is available for KFD usage if bit is 1 */ | |
82 | for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i) | |
83 | if (test_bit(pipe_offset + i, | |
84 | dqm->dev->shared_resources.queue_bitmap)) | |
85 | return true; | |
86 | return false; | |
87 | } | |
88 | ||
d0b63bb3 | 89 | unsigned int get_queues_num(struct device_queue_manager *dqm) |
64ea8f4a | 90 | { |
d0b63bb3 AR |
91 | return bitmap_weight(dqm->dev->shared_resources.queue_bitmap, |
92 | KGD_MAX_QUEUES); | |
64ea8f4a OG |
93 | } |
94 | ||
d0b63bb3 | 95 | unsigned int get_queues_per_pipe(struct device_queue_manager *dqm) |
64c7f8cf | 96 | { |
d0b63bb3 AR |
97 | return dqm->dev->shared_resources.num_queue_per_pipe; |
98 | } | |
99 | ||
100 | unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) | |
101 | { | |
d0b63bb3 | 102 | return dqm->dev->shared_resources.num_pipe_per_mec; |
64c7f8cf BG |
103 | } |
104 | ||
98bb9222 YZ |
105 | static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm) |
106 | { | |
107 | return dqm->dev->device_info->num_sdma_engines; | |
108 | } | |
109 | ||
110 | unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) | |
111 | { | |
112 | return dqm->dev->device_info->num_sdma_engines | |
d5094189 | 113 | * dqm->dev->device_info->num_sdma_queues_per_engine; |
98bb9222 YZ |
114 | } |
115 | ||
a22fc854 | 116 | void program_sh_mem_settings(struct device_queue_manager *dqm, |
64c7f8cf BG |
117 | struct qcm_process_device *qpd) |
118 | { | |
cea405b1 XZ |
119 | return dqm->dev->kfd2kgd->program_sh_mem_settings( |
120 | dqm->dev->kgd, qpd->vmid, | |
64c7f8cf BG |
121 | qpd->sh_mem_config, |
122 | qpd->sh_mem_ape1_base, | |
123 | qpd->sh_mem_ape1_limit, | |
124 | qpd->sh_mem_bases); | |
125 | } | |
126 | ||
ef568db7 FK |
127 | static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) |
128 | { | |
129 | struct kfd_dev *dev = qpd->dqm->dev; | |
130 | ||
131 | if (!KFD_IS_SOC15(dev->device_info->asic_family)) { | |
132 | /* On pre-SOC15 chips we need to use the queue ID to | |
133 | * preserve the user mode ABI. | |
134 | */ | |
135 | q->doorbell_id = q->properties.queue_id; | |
136 | } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { | |
234441dd YZ |
137 | /* For SDMA queues on SOC15 with 8-byte doorbell, use static |
138 | * doorbell assignments based on the engine and queue id. | |
139 | * The doobell index distance between RLC (2*i) and (2*i+1) | |
140 | * for a SDMA engine is 512. | |
ef568db7 | 141 | */ |
234441dd YZ |
142 | uint32_t *idx_offset = |
143 | dev->shared_resources.sdma_doorbell_idx; | |
144 | ||
145 | q->doorbell_id = idx_offset[q->properties.sdma_engine_id] | |
146 | + (q->properties.sdma_queue_id & 1) | |
147 | * KFD_QUEUE_DOORBELL_MIRROR_OFFSET | |
148 | + (q->properties.sdma_queue_id >> 1); | |
ef568db7 FK |
149 | } else { |
150 | /* For CP queues on SOC15 reserve a free doorbell ID */ | |
151 | unsigned int found; | |
152 | ||
153 | found = find_first_zero_bit(qpd->doorbell_bitmap, | |
154 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); | |
155 | if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { | |
156 | pr_debug("No doorbells available"); | |
157 | return -EBUSY; | |
158 | } | |
159 | set_bit(found, qpd->doorbell_bitmap); | |
160 | q->doorbell_id = found; | |
161 | } | |
162 | ||
163 | q->properties.doorbell_off = | |
164 | kfd_doorbell_id_to_offset(dev, q->process, | |
165 | q->doorbell_id); | |
166 | ||
167 | return 0; | |
168 | } | |
169 | ||
170 | static void deallocate_doorbell(struct qcm_process_device *qpd, | |
171 | struct queue *q) | |
172 | { | |
173 | unsigned int old; | |
174 | struct kfd_dev *dev = qpd->dqm->dev; | |
175 | ||
176 | if (!KFD_IS_SOC15(dev->device_info->asic_family) || | |
177 | q->properties.type == KFD_QUEUE_TYPE_SDMA) | |
178 | return; | |
179 | ||
180 | old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap); | |
181 | WARN_ON(!old); | |
182 | } | |
183 | ||
64c7f8cf BG |
184 | static int allocate_vmid(struct device_queue_manager *dqm, |
185 | struct qcm_process_device *qpd, | |
186 | struct queue *q) | |
187 | { | |
188 | int bit, allocated_vmid; | |
189 | ||
190 | if (dqm->vmid_bitmap == 0) | |
191 | return -ENOMEM; | |
192 | ||
4252bf68 HK |
193 | bit = ffs(dqm->vmid_bitmap) - 1; |
194 | dqm->vmid_bitmap &= ~(1 << bit); | |
64c7f8cf | 195 | |
44008d7a | 196 | allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd; |
79775b62 | 197 | pr_debug("vmid allocation %d\n", allocated_vmid); |
64c7f8cf BG |
198 | qpd->vmid = allocated_vmid; |
199 | q->properties.vmid = allocated_vmid; | |
200 | ||
201 | set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); | |
202 | program_sh_mem_settings(dqm, qpd); | |
203 | ||
403575c4 FK |
204 | /* qpd->page_table_base is set earlier when register_process() |
205 | * is called, i.e. when the first queue is created. | |
206 | */ | |
207 | dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd, | |
208 | qpd->vmid, | |
209 | qpd->page_table_base); | |
210 | /* invalidate the VM context after pasid and vmid mapping is set up */ | |
211 | kfd_flush_tlb(qpd_to_pdd(qpd)); | |
212 | ||
64c7f8cf BG |
213 | return 0; |
214 | } | |
215 | ||
552764b6 FK |
216 | static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, |
217 | struct qcm_process_device *qpd) | |
218 | { | |
f6e27ff1 FK |
219 | const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf; |
220 | int ret; | |
552764b6 FK |
221 | |
222 | if (!qpd->ib_kaddr) | |
223 | return -ENOMEM; | |
224 | ||
f6e27ff1 FK |
225 | ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr); |
226 | if (ret) | |
227 | return ret; | |
552764b6 | 228 | |
5b87245f | 229 | return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid, |
f6e27ff1 FK |
230 | qpd->ib_base, (uint32_t *)qpd->ib_kaddr, |
231 | pmf->release_mem_size / sizeof(uint32_t)); | |
552764b6 FK |
232 | } |
233 | ||
64c7f8cf BG |
234 | static void deallocate_vmid(struct device_queue_manager *dqm, |
235 | struct qcm_process_device *qpd, | |
236 | struct queue *q) | |
237 | { | |
44008d7a | 238 | int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd; |
64c7f8cf | 239 | |
552764b6 FK |
240 | /* On GFX v7, CP doesn't flush TC at dequeue */ |
241 | if (q->device->device_info->asic_family == CHIP_HAWAII) | |
242 | if (flush_texture_cache_nocpsch(q->device, qpd)) | |
243 | pr_err("Failed to flush TC\n"); | |
244 | ||
403575c4 FK |
245 | kfd_flush_tlb(qpd_to_pdd(qpd)); |
246 | ||
2030664b BG |
247 | /* Release the vmid mapping */ |
248 | set_pasid_vmid_mapping(dqm, 0, qpd->vmid); | |
249 | ||
4252bf68 | 250 | dqm->vmid_bitmap |= (1 << bit); |
64c7f8cf BG |
251 | qpd->vmid = 0; |
252 | q->properties.vmid = 0; | |
253 | } | |
254 | ||
255 | static int create_queue_nocpsch(struct device_queue_manager *dqm, | |
256 | struct queue *q, | |
b46cb7d7 | 257 | struct qcm_process_device *qpd) |
64c7f8cf BG |
258 | { |
259 | int retval; | |
260 | ||
64c7f8cf BG |
261 | print_queue(q); |
262 | ||
efeaed4d | 263 | dqm_lock(dqm); |
64c7f8cf | 264 | |
b8cbab04 | 265 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { |
79775b62 | 266 | pr_warn("Can't create new usermode queue because %d queues were already created\n", |
b8cbab04 | 267 | dqm->total_queue_count); |
ab7c1648 KR |
268 | retval = -EPERM; |
269 | goto out_unlock; | |
b8cbab04 OG |
270 | } |
271 | ||
64c7f8cf BG |
272 | if (list_empty(&qpd->queues_list)) { |
273 | retval = allocate_vmid(dqm, qpd, q); | |
ab7c1648 KR |
274 | if (retval) |
275 | goto out_unlock; | |
64c7f8cf | 276 | } |
64c7f8cf | 277 | q->properties.vmid = qpd->vmid; |
26103436 FK |
278 | /* |
279 | * Eviction state logic: we only mark active queues as evicted | |
280 | * to avoid the overhead of restoring inactive queues later | |
281 | */ | |
282 | if (qpd->evicted) | |
283 | q->properties.is_evicted = (q->properties.queue_size > 0 && | |
284 | q->properties.queue_percent > 0 && | |
285 | q->properties.queue_address != 0); | |
64c7f8cf | 286 | |
373d7080 FK |
287 | q->properties.tba_addr = qpd->tba_addr; |
288 | q->properties.tma_addr = qpd->tma_addr; | |
289 | ||
bcea3081 BG |
290 | if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) |
291 | retval = create_compute_queue_nocpsch(dqm, q, qpd); | |
ab7c1648 | 292 | else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) |
bcea3081 | 293 | retval = create_sdma_queue_nocpsch(dqm, q, qpd); |
ab7c1648 KR |
294 | else |
295 | retval = -EINVAL; | |
64c7f8cf | 296 | |
4eacc26b | 297 | if (retval) { |
b46cb7d7 | 298 | if (list_empty(&qpd->queues_list)) |
64c7f8cf | 299 | deallocate_vmid(dqm, qpd, q); |
ab7c1648 | 300 | goto out_unlock; |
64c7f8cf BG |
301 | } |
302 | ||
303 | list_add(&q->list, &qpd->queues_list); | |
bc920fd4 | 304 | qpd->queue_count++; |
b6819cec JC |
305 | if (q->properties.is_active) |
306 | dqm->queue_count++; | |
64c7f8cf | 307 | |
bcea3081 BG |
308 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) |
309 | dqm->sdma_queue_count++; | |
64c7f8cf | 310 | |
b8cbab04 OG |
311 | /* |
312 | * Unconditionally increment this counter, regardless of the queue's | |
313 | * type or whether the queue is active. | |
314 | */ | |
315 | dqm->total_queue_count++; | |
316 | pr_debug("Total of %d queues are accountable so far\n", | |
317 | dqm->total_queue_count); | |
318 | ||
ab7c1648 | 319 | out_unlock: |
efeaed4d | 320 | dqm_unlock(dqm); |
ab7c1648 | 321 | return retval; |
64c7f8cf BG |
322 | } |
323 | ||
324 | static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) | |
325 | { | |
326 | bool set; | |
f0ec5b99 | 327 | int pipe, bit, i; |
64c7f8cf BG |
328 | |
329 | set = false; | |
330 | ||
8eabaf54 KR |
331 | for (pipe = dqm->next_pipe_to_allocate, i = 0; |
332 | i < get_pipes_per_mec(dqm); | |
d0b63bb3 AR |
333 | pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) { |
334 | ||
335 | if (!is_pipe_enabled(dqm, 0, pipe)) | |
336 | continue; | |
337 | ||
64c7f8cf | 338 | if (dqm->allocated_queues[pipe] != 0) { |
4252bf68 HK |
339 | bit = ffs(dqm->allocated_queues[pipe]) - 1; |
340 | dqm->allocated_queues[pipe] &= ~(1 << bit); | |
64c7f8cf BG |
341 | q->pipe = pipe; |
342 | q->queue = bit; | |
343 | set = true; | |
344 | break; | |
345 | } | |
346 | } | |
347 | ||
991ca8ee | 348 | if (!set) |
64c7f8cf BG |
349 | return -EBUSY; |
350 | ||
79775b62 | 351 | pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue); |
64c7f8cf | 352 | /* horizontal hqd allocation */ |
d0b63bb3 | 353 | dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm); |
64c7f8cf BG |
354 | |
355 | return 0; | |
356 | } | |
357 | ||
358 | static inline void deallocate_hqd(struct device_queue_manager *dqm, | |
359 | struct queue *q) | |
360 | { | |
4252bf68 | 361 | dqm->allocated_queues[q->pipe] |= (1 << q->queue); |
64c7f8cf BG |
362 | } |
363 | ||
364 | static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, | |
365 | struct queue *q, | |
366 | struct qcm_process_device *qpd) | |
367 | { | |
8d5f3552 | 368 | struct mqd_manager *mqd_mgr; |
1b19aa5a | 369 | int retval; |
64c7f8cf | 370 | |
8d5f3552 YZ |
371 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); |
372 | if (!mqd_mgr) | |
64c7f8cf BG |
373 | return -ENOMEM; |
374 | ||
375 | retval = allocate_hqd(dqm, q); | |
4eacc26b | 376 | if (retval) |
64c7f8cf BG |
377 | return retval; |
378 | ||
ef568db7 FK |
379 | retval = allocate_doorbell(qpd, q); |
380 | if (retval) | |
381 | goto out_deallocate_hqd; | |
382 | ||
8d5f3552 | 383 | retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj, |
64c7f8cf | 384 | &q->gart_mqd_addr, &q->properties); |
ab7c1648 | 385 | if (retval) |
ef568db7 | 386 | goto out_deallocate_doorbell; |
64c7f8cf | 387 | |
79775b62 KR |
388 | pr_debug("Loading mqd to hqd on pipe %d, queue %d\n", |
389 | q->pipe, q->queue); | |
030e416b | 390 | |
6a1c9510 MR |
391 | dqm->dev->kfd2kgd->set_scratch_backing_va( |
392 | dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid); | |
393 | ||
60a00956 FK |
394 | if (!q->properties.is_active) |
395 | return 0; | |
396 | ||
1b19aa5a FK |
397 | if (WARN(q->process->mm != current->mm, |
398 | "should only run in user thread")) | |
399 | retval = -EFAULT; | |
400 | else | |
401 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, | |
402 | &q->properties, current->mm); | |
ab7c1648 KR |
403 | if (retval) |
404 | goto out_uninit_mqd; | |
030e416b | 405 | |
64c7f8cf | 406 | return 0; |
ab7c1648 KR |
407 | |
408 | out_uninit_mqd: | |
8d5f3552 | 409 | mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
ef568db7 FK |
410 | out_deallocate_doorbell: |
411 | deallocate_doorbell(qpd, q); | |
ab7c1648 KR |
412 | out_deallocate_hqd: |
413 | deallocate_hqd(dqm, q); | |
414 | ||
415 | return retval; | |
64c7f8cf BG |
416 | } |
417 | ||
9fd3f1bf FK |
418 | /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked |
419 | * to avoid asynchronized access | |
420 | */ | |
421 | static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, | |
64c7f8cf BG |
422 | struct qcm_process_device *qpd, |
423 | struct queue *q) | |
424 | { | |
425 | int retval; | |
8d5f3552 | 426 | struct mqd_manager *mqd_mgr; |
64c7f8cf | 427 | |
8d5f3552 | 428 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
9fd3f1bf | 429 | get_mqd_type_from_queue_type(q->properties.type)); |
8d5f3552 | 430 | if (!mqd_mgr) |
9fd3f1bf | 431 | return -ENOMEM; |
64c7f8cf | 432 | |
c2e1b3a4 | 433 | if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { |
c2e1b3a4 BG |
434 | deallocate_hqd(dqm, q); |
435 | } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { | |
c2e1b3a4 BG |
436 | dqm->sdma_queue_count--; |
437 | deallocate_sdma_queue(dqm, q->sdma_id); | |
7113cd65 | 438 | } else { |
79775b62 | 439 | pr_debug("q->properties.type %d is invalid\n", |
7113cd65 | 440 | q->properties.type); |
9fd3f1bf | 441 | return -EINVAL; |
64c7f8cf | 442 | } |
9fd3f1bf | 443 | dqm->total_queue_count--; |
64c7f8cf | 444 | |
ef568db7 FK |
445 | deallocate_doorbell(qpd, q); |
446 | ||
8d5f3552 | 447 | retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, |
c2e1b3a4 | 448 | KFD_PREEMPT_TYPE_WAVEFRONT_RESET, |
b90e3fbe | 449 | KFD_UNMAP_LATENCY_MS, |
64c7f8cf | 450 | q->pipe, q->queue); |
9fd3f1bf FK |
451 | if (retval == -ETIME) |
452 | qpd->reset_wavefronts = true; | |
64c7f8cf | 453 | |
8d5f3552 | 454 | mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
64c7f8cf BG |
455 | |
456 | list_del(&q->list); | |
9fd3f1bf FK |
457 | if (list_empty(&qpd->queues_list)) { |
458 | if (qpd->reset_wavefronts) { | |
459 | pr_warn("Resetting wave fronts (nocpsch) on dev %p\n", | |
460 | dqm->dev); | |
461 | /* dbgdev_wave_reset_wavefronts has to be called before | |
462 | * deallocate_vmid(), i.e. when vmid is still in use. | |
463 | */ | |
464 | dbgdev_wave_reset_wavefronts(dqm->dev, | |
465 | qpd->pqm->process); | |
466 | qpd->reset_wavefronts = false; | |
467 | } | |
468 | ||
64c7f8cf | 469 | deallocate_vmid(dqm, qpd, q); |
9fd3f1bf | 470 | } |
bc920fd4 | 471 | qpd->queue_count--; |
b6819cec JC |
472 | if (q->properties.is_active) |
473 | dqm->queue_count--; | |
b8cbab04 | 474 | |
9fd3f1bf FK |
475 | return retval; |
476 | } | |
b8cbab04 | 477 | |
9fd3f1bf FK |
478 | static int destroy_queue_nocpsch(struct device_queue_manager *dqm, |
479 | struct qcm_process_device *qpd, | |
480 | struct queue *q) | |
481 | { | |
482 | int retval; | |
483 | ||
efeaed4d | 484 | dqm_lock(dqm); |
9fd3f1bf | 485 | retval = destroy_queue_nocpsch_locked(dqm, qpd, q); |
efeaed4d | 486 | dqm_unlock(dqm); |
9fd3f1bf | 487 | |
64c7f8cf BG |
488 | return retval; |
489 | } | |
490 | ||
491 | static int update_queue(struct device_queue_manager *dqm, struct queue *q) | |
492 | { | |
493 | int retval; | |
8d5f3552 | 494 | struct mqd_manager *mqd_mgr; |
26103436 | 495 | struct kfd_process_device *pdd; |
b6ffbab8 | 496 | bool prev_active = false; |
64c7f8cf | 497 | |
efeaed4d | 498 | dqm_lock(dqm); |
26103436 FK |
499 | pdd = kfd_get_process_device_data(q->device, q->process); |
500 | if (!pdd) { | |
501 | retval = -ENODEV; | |
502 | goto out_unlock; | |
503 | } | |
8d5f3552 | 504 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
0b3674ae | 505 | get_mqd_type_from_queue_type(q->properties.type)); |
8d5f3552 | 506 | if (!mqd_mgr) { |
ab7c1648 KR |
507 | retval = -ENOMEM; |
508 | goto out_unlock; | |
64c7f8cf | 509 | } |
26103436 FK |
510 | /* |
511 | * Eviction state logic: we only mark active queues as evicted | |
512 | * to avoid the overhead of restoring inactive queues later | |
513 | */ | |
514 | if (pdd->qpd.evicted) | |
515 | q->properties.is_evicted = (q->properties.queue_size > 0 && | |
516 | q->properties.queue_percent > 0 && | |
517 | q->properties.queue_address != 0); | |
64c7f8cf | 518 | |
60a00956 FK |
519 | /* Save previous activity state for counters */ |
520 | prev_active = q->properties.is_active; | |
521 | ||
522 | /* Make sure the queue is unmapped before updating the MQD */ | |
d146c5a7 | 523 | if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { |
60a00956 FK |
524 | retval = unmap_queues_cpsch(dqm, |
525 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); | |
894a8293 | 526 | if (retval) { |
60a00956 FK |
527 | pr_err("unmap queue failed\n"); |
528 | goto out_unlock; | |
529 | } | |
894a8293 | 530 | } else if (prev_active && |
60a00956 FK |
531 | (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || |
532 | q->properties.type == KFD_QUEUE_TYPE_SDMA)) { | |
8d5f3552 | 533 | retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, |
60a00956 FK |
534 | KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, |
535 | KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); | |
536 | if (retval) { | |
537 | pr_err("destroy mqd failed\n"); | |
538 | goto out_unlock; | |
539 | } | |
540 | } | |
541 | ||
8d5f3552 | 542 | retval = mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties); |
60a00956 | 543 | |
096d1a3e FK |
544 | /* |
545 | * check active state vs. the previous state and modify | |
546 | * counter accordingly. map_queues_cpsch uses the | |
547 | * dqm->queue_count to determine whether a new runlist must be | |
548 | * uploaded. | |
549 | */ | |
550 | if (q->properties.is_active && !prev_active) | |
551 | dqm->queue_count++; | |
552 | else if (!q->properties.is_active && prev_active) | |
553 | dqm->queue_count--; | |
554 | ||
d146c5a7 | 555 | if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) |
60a00956 | 556 | retval = map_queues_cpsch(dqm); |
894a8293 | 557 | else if (q->properties.is_active && |
60a00956 | 558 | (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || |
1b19aa5a FK |
559 | q->properties.type == KFD_QUEUE_TYPE_SDMA)) { |
560 | if (WARN(q->process->mm != current->mm, | |
561 | "should only run in user thread")) | |
562 | retval = -EFAULT; | |
563 | else | |
564 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, | |
565 | q->pipe, q->queue, | |
566 | &q->properties, current->mm); | |
567 | } | |
b6ffbab8 | 568 | |
ab7c1648 | 569 | out_unlock: |
efeaed4d | 570 | dqm_unlock(dqm); |
64c7f8cf BG |
571 | return retval; |
572 | } | |
573 | ||
58dcd5bf | 574 | static struct mqd_manager *get_mqd_manager( |
64c7f8cf BG |
575 | struct device_queue_manager *dqm, enum KFD_MQD_TYPE type) |
576 | { | |
8d5f3552 | 577 | struct mqd_manager *mqd_mgr; |
64c7f8cf | 578 | |
32fa8219 FK |
579 | if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) |
580 | return NULL; | |
64c7f8cf | 581 | |
79775b62 | 582 | pr_debug("mqd type %d\n", type); |
64c7f8cf | 583 | |
8d5f3552 YZ |
584 | mqd_mgr = dqm->mqd_mgrs[type]; |
585 | if (!mqd_mgr) { | |
972fcdb5 | 586 | mqd_mgr = dqm->asic_ops.mqd_manager_init(type, dqm->dev); |
8d5f3552 | 587 | if (!mqd_mgr) |
79775b62 | 588 | pr_err("mqd manager is NULL"); |
8d5f3552 | 589 | dqm->mqd_mgrs[type] = mqd_mgr; |
64c7f8cf BG |
590 | } |
591 | ||
8d5f3552 | 592 | return mqd_mgr; |
64c7f8cf BG |
593 | } |
594 | ||
26103436 FK |
595 | static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, |
596 | struct qcm_process_device *qpd) | |
597 | { | |
598 | struct queue *q; | |
8d5f3552 | 599 | struct mqd_manager *mqd_mgr; |
26103436 FK |
600 | struct kfd_process_device *pdd; |
601 | int retval = 0; | |
602 | ||
efeaed4d | 603 | dqm_lock(dqm); |
26103436 FK |
604 | if (qpd->evicted++ > 0) /* already evicted, do nothing */ |
605 | goto out; | |
606 | ||
607 | pdd = qpd_to_pdd(qpd); | |
608 | pr_info_ratelimited("Evicting PASID %u queues\n", | |
609 | pdd->process->pasid); | |
610 | ||
611 | /* unactivate all active queues on the qpd */ | |
612 | list_for_each_entry(q, &qpd->queues_list, list) { | |
613 | if (!q->properties.is_active) | |
614 | continue; | |
8d5f3552 | 615 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
26103436 | 616 | get_mqd_type_from_queue_type(q->properties.type)); |
8d5f3552 | 617 | if (!mqd_mgr) { /* should not be here */ |
26103436 FK |
618 | pr_err("Cannot evict queue, mqd mgr is NULL\n"); |
619 | retval = -ENOMEM; | |
620 | goto out; | |
621 | } | |
622 | q->properties.is_evicted = true; | |
623 | q->properties.is_active = false; | |
8d5f3552 | 624 | retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, |
26103436 FK |
625 | KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, |
626 | KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); | |
627 | if (retval) | |
628 | goto out; | |
629 | dqm->queue_count--; | |
630 | } | |
631 | ||
632 | out: | |
efeaed4d | 633 | dqm_unlock(dqm); |
26103436 FK |
634 | return retval; |
635 | } | |
636 | ||
637 | static int evict_process_queues_cpsch(struct device_queue_manager *dqm, | |
638 | struct qcm_process_device *qpd) | |
639 | { | |
640 | struct queue *q; | |
641 | struct kfd_process_device *pdd; | |
642 | int retval = 0; | |
643 | ||
efeaed4d | 644 | dqm_lock(dqm); |
26103436 FK |
645 | if (qpd->evicted++ > 0) /* already evicted, do nothing */ |
646 | goto out; | |
647 | ||
648 | pdd = qpd_to_pdd(qpd); | |
649 | pr_info_ratelimited("Evicting PASID %u queues\n", | |
650 | pdd->process->pasid); | |
651 | ||
652 | /* unactivate all active queues on the qpd */ | |
653 | list_for_each_entry(q, &qpd->queues_list, list) { | |
654 | if (!q->properties.is_active) | |
655 | continue; | |
656 | q->properties.is_evicted = true; | |
657 | q->properties.is_active = false; | |
658 | dqm->queue_count--; | |
659 | } | |
660 | retval = execute_queues_cpsch(dqm, | |
661 | qpd->is_debug ? | |
662 | KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : | |
663 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); | |
664 | ||
665 | out: | |
efeaed4d | 666 | dqm_unlock(dqm); |
26103436 FK |
667 | return retval; |
668 | } | |
669 | ||
670 | static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, | |
671 | struct qcm_process_device *qpd) | |
672 | { | |
1b19aa5a | 673 | struct mm_struct *mm = NULL; |
26103436 | 674 | struct queue *q; |
8d5f3552 | 675 | struct mqd_manager *mqd_mgr; |
26103436 | 676 | struct kfd_process_device *pdd; |
e715c6d0 | 677 | uint64_t pd_base; |
26103436 FK |
678 | int retval = 0; |
679 | ||
680 | pdd = qpd_to_pdd(qpd); | |
681 | /* Retrieve PD base */ | |
5b87245f | 682 | pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm); |
26103436 | 683 | |
efeaed4d | 684 | dqm_lock(dqm); |
26103436 FK |
685 | if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ |
686 | goto out; | |
687 | if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ | |
688 | qpd->evicted--; | |
689 | goto out; | |
690 | } | |
691 | ||
692 | pr_info_ratelimited("Restoring PASID %u queues\n", | |
693 | pdd->process->pasid); | |
694 | ||
695 | /* Update PD Base in QPD */ | |
696 | qpd->page_table_base = pd_base; | |
e715c6d0 | 697 | pr_debug("Updated PD address to 0x%llx\n", pd_base); |
26103436 FK |
698 | |
699 | if (!list_empty(&qpd->queues_list)) { | |
700 | dqm->dev->kfd2kgd->set_vm_context_page_table_base( | |
701 | dqm->dev->kgd, | |
702 | qpd->vmid, | |
703 | qpd->page_table_base); | |
704 | kfd_flush_tlb(pdd); | |
705 | } | |
706 | ||
1b19aa5a FK |
707 | /* Take a safe reference to the mm_struct, which may otherwise |
708 | * disappear even while the kfd_process is still referenced. | |
709 | */ | |
710 | mm = get_task_mm(pdd->process->lead_thread); | |
711 | if (!mm) { | |
712 | retval = -EFAULT; | |
713 | goto out; | |
714 | } | |
715 | ||
26103436 FK |
716 | /* activate all active queues on the qpd */ |
717 | list_for_each_entry(q, &qpd->queues_list, list) { | |
718 | if (!q->properties.is_evicted) | |
719 | continue; | |
8d5f3552 | 720 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
26103436 | 721 | get_mqd_type_from_queue_type(q->properties.type)); |
8d5f3552 | 722 | if (!mqd_mgr) { /* should not be here */ |
26103436 FK |
723 | pr_err("Cannot restore queue, mqd mgr is NULL\n"); |
724 | retval = -ENOMEM; | |
725 | goto out; | |
726 | } | |
727 | q->properties.is_evicted = false; | |
728 | q->properties.is_active = true; | |
8d5f3552 | 729 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, |
1b19aa5a | 730 | q->queue, &q->properties, mm); |
26103436 FK |
731 | if (retval) |
732 | goto out; | |
733 | dqm->queue_count++; | |
734 | } | |
735 | qpd->evicted = 0; | |
736 | out: | |
1b19aa5a FK |
737 | if (mm) |
738 | mmput(mm); | |
efeaed4d | 739 | dqm_unlock(dqm); |
26103436 FK |
740 | return retval; |
741 | } | |
742 | ||
743 | static int restore_process_queues_cpsch(struct device_queue_manager *dqm, | |
744 | struct qcm_process_device *qpd) | |
745 | { | |
746 | struct queue *q; | |
747 | struct kfd_process_device *pdd; | |
e715c6d0 | 748 | uint64_t pd_base; |
26103436 FK |
749 | int retval = 0; |
750 | ||
751 | pdd = qpd_to_pdd(qpd); | |
752 | /* Retrieve PD base */ | |
5b87245f | 753 | pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm); |
26103436 | 754 | |
efeaed4d | 755 | dqm_lock(dqm); |
26103436 FK |
756 | if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ |
757 | goto out; | |
758 | if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ | |
759 | qpd->evicted--; | |
760 | goto out; | |
761 | } | |
762 | ||
763 | pr_info_ratelimited("Restoring PASID %u queues\n", | |
764 | pdd->process->pasid); | |
765 | ||
766 | /* Update PD Base in QPD */ | |
767 | qpd->page_table_base = pd_base; | |
e715c6d0 | 768 | pr_debug("Updated PD address to 0x%llx\n", pd_base); |
26103436 FK |
769 | |
770 | /* activate all active queues on the qpd */ | |
771 | list_for_each_entry(q, &qpd->queues_list, list) { | |
772 | if (!q->properties.is_evicted) | |
773 | continue; | |
774 | q->properties.is_evicted = false; | |
775 | q->properties.is_active = true; | |
776 | dqm->queue_count++; | |
777 | } | |
778 | retval = execute_queues_cpsch(dqm, | |
779 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); | |
780 | if (!retval) | |
781 | qpd->evicted = 0; | |
782 | out: | |
efeaed4d | 783 | dqm_unlock(dqm); |
26103436 FK |
784 | return retval; |
785 | } | |
786 | ||
58dcd5bf | 787 | static int register_process(struct device_queue_manager *dqm, |
64c7f8cf BG |
788 | struct qcm_process_device *qpd) |
789 | { | |
790 | struct device_process_node *n; | |
403575c4 | 791 | struct kfd_process_device *pdd; |
e715c6d0 | 792 | uint64_t pd_base; |
a22fc854 | 793 | int retval; |
64c7f8cf | 794 | |
dbf56ab1 | 795 | n = kzalloc(sizeof(*n), GFP_KERNEL); |
64c7f8cf BG |
796 | if (!n) |
797 | return -ENOMEM; | |
798 | ||
799 | n->qpd = qpd; | |
800 | ||
403575c4 FK |
801 | pdd = qpd_to_pdd(qpd); |
802 | /* Retrieve PD base */ | |
5b87245f | 803 | pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm); |
403575c4 | 804 | |
efeaed4d | 805 | dqm_lock(dqm); |
64c7f8cf BG |
806 | list_add(&n->list, &dqm->queues); |
807 | ||
403575c4 FK |
808 | /* Update PD Base in QPD */ |
809 | qpd->page_table_base = pd_base; | |
e715c6d0 | 810 | pr_debug("Updated PD address to 0x%llx\n", pd_base); |
403575c4 | 811 | |
bfd5e378 | 812 | retval = dqm->asic_ops.update_qpd(dqm, qpd); |
a22fc854 | 813 | |
f756e631 HK |
814 | dqm->processes_count++; |
815 | kfd_inc_compute_active(dqm->dev); | |
64c7f8cf | 816 | |
efeaed4d | 817 | dqm_unlock(dqm); |
64c7f8cf | 818 | |
a22fc854 | 819 | return retval; |
64c7f8cf BG |
820 | } |
821 | ||
58dcd5bf | 822 | static int unregister_process(struct device_queue_manager *dqm, |
64c7f8cf BG |
823 | struct qcm_process_device *qpd) |
824 | { | |
825 | int retval; | |
826 | struct device_process_node *cur, *next; | |
827 | ||
1e5ec956 OG |
828 | pr_debug("qpd->queues_list is %s\n", |
829 | list_empty(&qpd->queues_list) ? "empty" : "not empty"); | |
64c7f8cf BG |
830 | |
831 | retval = 0; | |
efeaed4d | 832 | dqm_lock(dqm); |
64c7f8cf BG |
833 | |
834 | list_for_each_entry_safe(cur, next, &dqm->queues, list) { | |
835 | if (qpd == cur->qpd) { | |
836 | list_del(&cur->list); | |
f5d896bb | 837 | kfree(cur); |
f756e631 HK |
838 | dqm->processes_count--; |
839 | kfd_dec_compute_active(dqm->dev); | |
64c7f8cf BG |
840 | goto out; |
841 | } | |
842 | } | |
843 | /* qpd not found in dqm list */ | |
844 | retval = 1; | |
845 | out: | |
efeaed4d | 846 | dqm_unlock(dqm); |
64c7f8cf BG |
847 | return retval; |
848 | } | |
849 | ||
850 | static int | |
851 | set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid, | |
852 | unsigned int vmid) | |
853 | { | |
cea405b1 | 854 | return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( |
deb99d7c | 855 | dqm->dev->kgd, pasid, vmid); |
64c7f8cf BG |
856 | } |
857 | ||
2249d558 AL |
858 | static void init_interrupts(struct device_queue_manager *dqm) |
859 | { | |
860 | unsigned int i; | |
861 | ||
d0b63bb3 AR |
862 | for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) |
863 | if (is_pipe_enabled(dqm, 0, i)) | |
864 | dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i); | |
2249d558 AL |
865 | } |
866 | ||
64c7f8cf BG |
867 | static int initialize_nocpsch(struct device_queue_manager *dqm) |
868 | { | |
86194cf8 | 869 | int pipe, queue; |
64c7f8cf | 870 | |
79775b62 | 871 | pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); |
64c7f8cf | 872 | |
ab7c1648 KR |
873 | dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm), |
874 | sizeof(unsigned int), GFP_KERNEL); | |
875 | if (!dqm->allocated_queues) | |
876 | return -ENOMEM; | |
877 | ||
efeaed4d | 878 | mutex_init(&dqm->lock_hidden); |
64c7f8cf BG |
879 | INIT_LIST_HEAD(&dqm->queues); |
880 | dqm->queue_count = dqm->next_pipe_to_allocate = 0; | |
bcea3081 | 881 | dqm->sdma_queue_count = 0; |
64c7f8cf | 882 | |
86194cf8 FK |
883 | for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { |
884 | int pipe_offset = pipe * get_queues_per_pipe(dqm); | |
885 | ||
886 | for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) | |
887 | if (test_bit(pipe_offset + queue, | |
888 | dqm->dev->shared_resources.queue_bitmap)) | |
889 | dqm->allocated_queues[pipe] |= 1 << queue; | |
890 | } | |
64c7f8cf | 891 | |
44008d7a | 892 | dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1; |
cb77ee7c | 893 | dqm->sdma_bitmap = (1ULL << get_num_sdma_queues(dqm)) - 1; |
64c7f8cf | 894 | |
64c7f8cf BG |
895 | return 0; |
896 | } | |
897 | ||
58dcd5bf | 898 | static void uninitialize(struct device_queue_manager *dqm) |
64c7f8cf | 899 | { |
6f9d54fd OG |
900 | int i; |
901 | ||
32fa8219 | 902 | WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0); |
64c7f8cf BG |
903 | |
904 | kfree(dqm->allocated_queues); | |
6f9d54fd | 905 | for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) |
8d5f3552 | 906 | kfree(dqm->mqd_mgrs[i]); |
efeaed4d | 907 | mutex_destroy(&dqm->lock_hidden); |
a86aa3ca | 908 | kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem); |
64c7f8cf BG |
909 | } |
910 | ||
911 | static int start_nocpsch(struct device_queue_manager *dqm) | |
912 | { | |
2249d558 | 913 | init_interrupts(dqm); |
552764b6 | 914 | return pm_init(&dqm->packets, dqm); |
64c7f8cf BG |
915 | } |
916 | ||
917 | static int stop_nocpsch(struct device_queue_manager *dqm) | |
918 | { | |
552764b6 | 919 | pm_uninit(&dqm->packets); |
64c7f8cf BG |
920 | return 0; |
921 | } | |
922 | ||
bcea3081 | 923 | static int allocate_sdma_queue(struct device_queue_manager *dqm, |
323c71df | 924 | unsigned int *sdma_id) |
bcea3081 BG |
925 | { |
926 | int bit; | |
927 | ||
928 | if (dqm->sdma_bitmap == 0) | |
929 | return -ENOMEM; | |
930 | ||
cb77ee7c OZ |
931 | bit = __ffs64(dqm->sdma_bitmap); |
932 | dqm->sdma_bitmap &= ~(1ULL << bit); | |
323c71df | 933 | *sdma_id = bit; |
bcea3081 BG |
934 | |
935 | return 0; | |
936 | } | |
937 | ||
938 | static void deallocate_sdma_queue(struct device_queue_manager *dqm, | |
323c71df | 939 | unsigned int sdma_id) |
bcea3081 | 940 | { |
323c71df | 941 | if (sdma_id >= get_num_sdma_queues(dqm)) |
bcea3081 | 942 | return; |
323c71df | 943 | dqm->sdma_bitmap |= (1ULL << sdma_id); |
bcea3081 BG |
944 | } |
945 | ||
bcea3081 BG |
946 | static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, |
947 | struct queue *q, | |
948 | struct qcm_process_device *qpd) | |
949 | { | |
8d5f3552 | 950 | struct mqd_manager *mqd_mgr; |
bcea3081 BG |
951 | int retval; |
952 | ||
8d5f3552 YZ |
953 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); |
954 | if (!mqd_mgr) | |
bcea3081 BG |
955 | return -ENOMEM; |
956 | ||
957 | retval = allocate_sdma_queue(dqm, &q->sdma_id); | |
4eacc26b | 958 | if (retval) |
bcea3081 BG |
959 | return retval; |
960 | ||
98bb9222 YZ |
961 | q->properties.sdma_queue_id = q->sdma_id / get_num_sdma_engines(dqm); |
962 | q->properties.sdma_engine_id = q->sdma_id % get_num_sdma_engines(dqm); | |
bcea3081 | 963 | |
ef568db7 FK |
964 | retval = allocate_doorbell(qpd, q); |
965 | if (retval) | |
966 | goto out_deallocate_sdma_queue; | |
967 | ||
79775b62 KR |
968 | pr_debug("SDMA id is: %d\n", q->sdma_id); |
969 | pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id); | |
970 | pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); | |
bcea3081 | 971 | |
bfd5e378 | 972 | dqm->asic_ops.init_sdma_vm(dqm, q, qpd); |
8d5f3552 | 973 | retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj, |
bcea3081 | 974 | &q->gart_mqd_addr, &q->properties); |
ab7c1648 | 975 | if (retval) |
ef568db7 | 976 | goto out_deallocate_doorbell; |
bcea3081 | 977 | |
8d5f3552 YZ |
978 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties, |
979 | NULL); | |
ab7c1648 KR |
980 | if (retval) |
981 | goto out_uninit_mqd; | |
4fadf6b6 | 982 | |
bcea3081 | 983 | return 0; |
ab7c1648 KR |
984 | |
985 | out_uninit_mqd: | |
8d5f3552 | 986 | mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
ef568db7 FK |
987 | out_deallocate_doorbell: |
988 | deallocate_doorbell(qpd, q); | |
ab7c1648 KR |
989 | out_deallocate_sdma_queue: |
990 | deallocate_sdma_queue(dqm, q->sdma_id); | |
991 | ||
992 | return retval; | |
bcea3081 BG |
993 | } |
994 | ||
64c7f8cf BG |
995 | /* |
996 | * Device Queue Manager implementation for cp scheduler | |
997 | */ | |
998 | ||
999 | static int set_sched_resources(struct device_queue_manager *dqm) | |
1000 | { | |
d0b63bb3 | 1001 | int i, mec; |
64c7f8cf | 1002 | struct scheduling_resources res; |
64c7f8cf | 1003 | |
44008d7a | 1004 | res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap; |
d0b63bb3 AR |
1005 | |
1006 | res.queue_mask = 0; | |
1007 | for (i = 0; i < KGD_MAX_QUEUES; ++i) { | |
1008 | mec = (i / dqm->dev->shared_resources.num_queue_per_pipe) | |
1009 | / dqm->dev->shared_resources.num_pipe_per_mec; | |
1010 | ||
1011 | if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap)) | |
1012 | continue; | |
1013 | ||
1014 | /* only acquire queues from the first MEC */ | |
1015 | if (mec > 0) | |
1016 | continue; | |
1017 | ||
1018 | /* This situation may be hit in the future if a new HW | |
1019 | * generation exposes more than 64 queues. If so, the | |
8eabaf54 KR |
1020 | * definition of res.queue_mask needs updating |
1021 | */ | |
1d11ee89 | 1022 | if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) { |
d0b63bb3 AR |
1023 | pr_err("Invalid queue enabled by amdgpu: %d\n", i); |
1024 | break; | |
1025 | } | |
1026 | ||
1027 | res.queue_mask |= (1ull << i); | |
1028 | } | |
64c7f8cf BG |
1029 | res.gws_mask = res.oac_mask = res.gds_heap_base = |
1030 | res.gds_heap_size = 0; | |
1031 | ||
79775b62 KR |
1032 | pr_debug("Scheduling resources:\n" |
1033 | "vmid mask: 0x%8X\n" | |
1034 | "queue mask: 0x%8llX\n", | |
64c7f8cf BG |
1035 | res.vmid_mask, res.queue_mask); |
1036 | ||
1037 | return pm_send_set_resources(&dqm->packets, &res); | |
1038 | } | |
1039 | ||
1040 | static int initialize_cpsch(struct device_queue_manager *dqm) | |
1041 | { | |
79775b62 | 1042 | pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); |
64c7f8cf | 1043 | |
efeaed4d | 1044 | mutex_init(&dqm->lock_hidden); |
64c7f8cf BG |
1045 | INIT_LIST_HEAD(&dqm->queues); |
1046 | dqm->queue_count = dqm->processes_count = 0; | |
bcea3081 | 1047 | dqm->sdma_queue_count = 0; |
64c7f8cf | 1048 | dqm->active_runlist = false; |
cb77ee7c | 1049 | dqm->sdma_bitmap = (1ULL << get_num_sdma_queues(dqm)) - 1; |
64c7f8cf | 1050 | |
73ea648d SL |
1051 | INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception); |
1052 | ||
bfd5e378 | 1053 | return 0; |
64c7f8cf BG |
1054 | } |
1055 | ||
1056 | static int start_cpsch(struct device_queue_manager *dqm) | |
1057 | { | |
64c7f8cf BG |
1058 | int retval; |
1059 | ||
64c7f8cf BG |
1060 | retval = 0; |
1061 | ||
1062 | retval = pm_init(&dqm->packets, dqm); | |
4eacc26b | 1063 | if (retval) |
64c7f8cf BG |
1064 | goto fail_packet_manager_init; |
1065 | ||
1066 | retval = set_sched_resources(dqm); | |
4eacc26b | 1067 | if (retval) |
64c7f8cf BG |
1068 | goto fail_set_sched_resources; |
1069 | ||
79775b62 | 1070 | pr_debug("Allocating fence memory\n"); |
64c7f8cf BG |
1071 | |
1072 | /* allocate fence memory on the gart */ | |
a86aa3ca OG |
1073 | retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr), |
1074 | &dqm->fence_mem); | |
64c7f8cf | 1075 | |
4eacc26b | 1076 | if (retval) |
64c7f8cf BG |
1077 | goto fail_allocate_vidmem; |
1078 | ||
1079 | dqm->fence_addr = dqm->fence_mem->cpu_ptr; | |
1080 | dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr; | |
2249d558 AL |
1081 | |
1082 | init_interrupts(dqm); | |
1083 | ||
efeaed4d | 1084 | dqm_lock(dqm); |
73ea648d SL |
1085 | /* clear hang status when driver try to start the hw scheduler */ |
1086 | dqm->is_hws_hang = false; | |
c4744e24 | 1087 | execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); |
efeaed4d | 1088 | dqm_unlock(dqm); |
64c7f8cf BG |
1089 | |
1090 | return 0; | |
1091 | fail_allocate_vidmem: | |
1092 | fail_set_sched_resources: | |
1093 | pm_uninit(&dqm->packets); | |
1094 | fail_packet_manager_init: | |
1095 | return retval; | |
1096 | } | |
1097 | ||
1098 | static int stop_cpsch(struct device_queue_manager *dqm) | |
1099 | { | |
efeaed4d | 1100 | dqm_lock(dqm); |
4465f466 | 1101 | unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); |
efeaed4d | 1102 | dqm_unlock(dqm); |
64c7f8cf | 1103 | |
a86aa3ca | 1104 | kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); |
64c7f8cf BG |
1105 | pm_uninit(&dqm->packets); |
1106 | ||
1107 | return 0; | |
1108 | } | |
1109 | ||
1110 | static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, | |
1111 | struct kernel_queue *kq, | |
1112 | struct qcm_process_device *qpd) | |
1113 | { | |
efeaed4d | 1114 | dqm_lock(dqm); |
b8cbab04 | 1115 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { |
79775b62 | 1116 | pr_warn("Can't create new kernel queue because %d queues were already created\n", |
b8cbab04 | 1117 | dqm->total_queue_count); |
efeaed4d | 1118 | dqm_unlock(dqm); |
b8cbab04 OG |
1119 | return -EPERM; |
1120 | } | |
1121 | ||
1122 | /* | |
1123 | * Unconditionally increment this counter, regardless of the queue's | |
1124 | * type or whether the queue is active. | |
1125 | */ | |
1126 | dqm->total_queue_count++; | |
1127 | pr_debug("Total of %d queues are accountable so far\n", | |
1128 | dqm->total_queue_count); | |
1129 | ||
64c7f8cf BG |
1130 | list_add(&kq->list, &qpd->priv_queue_list); |
1131 | dqm->queue_count++; | |
1132 | qpd->is_debug = true; | |
c4744e24 | 1133 | execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); |
efeaed4d | 1134 | dqm_unlock(dqm); |
64c7f8cf BG |
1135 | |
1136 | return 0; | |
1137 | } | |
1138 | ||
1139 | static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, | |
1140 | struct kernel_queue *kq, | |
1141 | struct qcm_process_device *qpd) | |
1142 | { | |
efeaed4d | 1143 | dqm_lock(dqm); |
64c7f8cf BG |
1144 | list_del(&kq->list); |
1145 | dqm->queue_count--; | |
1146 | qpd->is_debug = false; | |
c4744e24 | 1147 | execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); |
b8cbab04 OG |
1148 | /* |
1149 | * Unconditionally decrement this counter, regardless of the queue's | |
1150 | * type. | |
1151 | */ | |
8b58f261 | 1152 | dqm->total_queue_count--; |
b8cbab04 OG |
1153 | pr_debug("Total of %d queues are accountable so far\n", |
1154 | dqm->total_queue_count); | |
efeaed4d | 1155 | dqm_unlock(dqm); |
64c7f8cf BG |
1156 | } |
1157 | ||
1158 | static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, | |
b46cb7d7 | 1159 | struct qcm_process_device *qpd) |
64c7f8cf BG |
1160 | { |
1161 | int retval; | |
8d5f3552 | 1162 | struct mqd_manager *mqd_mgr; |
64c7f8cf | 1163 | |
b8cbab04 | 1164 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { |
79775b62 | 1165 | pr_warn("Can't create new usermode queue because %d queues were already created\n", |
b8cbab04 OG |
1166 | dqm->total_queue_count); |
1167 | retval = -EPERM; | |
89cd9d23 | 1168 | goto out; |
b8cbab04 OG |
1169 | } |
1170 | ||
e139cd2a | 1171 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { |
1172 | retval = allocate_sdma_queue(dqm, &q->sdma_id); | |
894a8293 | 1173 | if (retval) |
89cd9d23 | 1174 | goto out; |
e139cd2a | 1175 | q->properties.sdma_queue_id = |
98bb9222 | 1176 | q->sdma_id / get_num_sdma_engines(dqm); |
e139cd2a | 1177 | q->properties.sdma_engine_id = |
98bb9222 | 1178 | q->sdma_id % get_num_sdma_engines(dqm); |
96eb5f9d OZ |
1179 | pr_debug("SDMA id is: %d\n", q->sdma_id); |
1180 | pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id); | |
1181 | pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); | |
e139cd2a | 1182 | } |
ef568db7 FK |
1183 | |
1184 | retval = allocate_doorbell(qpd, q); | |
1185 | if (retval) | |
1186 | goto out_deallocate_sdma_queue; | |
1187 | ||
89cd9d23 PY |
1188 | /* Do init_mqd before dqm_lock(dqm) to avoid circular locking order: |
1189 | * lock(dqm) -> bo::reserve | |
1190 | */ | |
8d5f3552 | 1191 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
bcea3081 BG |
1192 | get_mqd_type_from_queue_type(q->properties.type)); |
1193 | ||
8d5f3552 | 1194 | if (!mqd_mgr) { |
ab7c1648 | 1195 | retval = -ENOMEM; |
ef568db7 | 1196 | goto out_deallocate_doorbell; |
64c7f8cf | 1197 | } |
89cd9d23 | 1198 | |
26103436 FK |
1199 | /* |
1200 | * Eviction state logic: we only mark active queues as evicted | |
1201 | * to avoid the overhead of restoring inactive queues later | |
1202 | */ | |
1203 | if (qpd->evicted) | |
1204 | q->properties.is_evicted = (q->properties.queue_size > 0 && | |
1205 | q->properties.queue_percent > 0 && | |
1206 | q->properties.queue_address != 0); | |
bfd5e378 | 1207 | dqm->asic_ops.init_sdma_vm(dqm, q, qpd); |
373d7080 FK |
1208 | q->properties.tba_addr = qpd->tba_addr; |
1209 | q->properties.tma_addr = qpd->tma_addr; | |
8d5f3552 | 1210 | retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj, |
64c7f8cf | 1211 | &q->gart_mqd_addr, &q->properties); |
4eacc26b | 1212 | if (retval) |
ef568db7 | 1213 | goto out_deallocate_doorbell; |
64c7f8cf | 1214 | |
89cd9d23 PY |
1215 | dqm_lock(dqm); |
1216 | ||
64c7f8cf | 1217 | list_add(&q->list, &qpd->queues_list); |
bc920fd4 | 1218 | qpd->queue_count++; |
64c7f8cf BG |
1219 | if (q->properties.is_active) { |
1220 | dqm->queue_count++; | |
c4744e24 YZ |
1221 | retval = execute_queues_cpsch(dqm, |
1222 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); | |
64c7f8cf BG |
1223 | } |
1224 | ||
bcea3081 | 1225 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) |
8eabaf54 | 1226 | dqm->sdma_queue_count++; |
b8cbab04 OG |
1227 | /* |
1228 | * Unconditionally increment this counter, regardless of the queue's | |
1229 | * type or whether the queue is active. | |
1230 | */ | |
1231 | dqm->total_queue_count++; | |
1232 | ||
1233 | pr_debug("Total of %d queues are accountable so far\n", | |
1234 | dqm->total_queue_count); | |
1235 | ||
efeaed4d | 1236 | dqm_unlock(dqm); |
72a01d23 FK |
1237 | return retval; |
1238 | ||
ef568db7 FK |
1239 | out_deallocate_doorbell: |
1240 | deallocate_doorbell(qpd, q); | |
72a01d23 FK |
1241 | out_deallocate_sdma_queue: |
1242 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) | |
1243 | deallocate_sdma_queue(dqm, q->sdma_id); | |
89cd9d23 | 1244 | out: |
64c7f8cf BG |
1245 | return retval; |
1246 | } | |
1247 | ||
788bf83d | 1248 | int amdkfd_fence_wait_timeout(unsigned int *fence_addr, |
d80d19bd | 1249 | unsigned int fence_value, |
8c72c3d7 | 1250 | unsigned int timeout_ms) |
64c7f8cf | 1251 | { |
8c72c3d7 | 1252 | unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies; |
64c7f8cf BG |
1253 | |
1254 | while (*fence_addr != fence_value) { | |
8c72c3d7 | 1255 | if (time_after(jiffies, end_jiffies)) { |
79775b62 | 1256 | pr_err("qcm fence wait loop timeout expired\n"); |
0e9a860c YZ |
1257 | /* In HWS case, this is used to halt the driver thread |
1258 | * in order not to mess up CP states before doing | |
1259 | * scandumps for FW debugging. | |
1260 | */ | |
1261 | while (halt_if_hws_hang) | |
1262 | schedule(); | |
1263 | ||
64c7f8cf BG |
1264 | return -ETIME; |
1265 | } | |
99331a51 | 1266 | schedule(); |
64c7f8cf BG |
1267 | } |
1268 | ||
1269 | return 0; | |
1270 | } | |
1271 | ||
7da2bcf8 | 1272 | static int unmap_sdma_queues(struct device_queue_manager *dqm, |
bcea3081 BG |
1273 | unsigned int sdma_engine) |
1274 | { | |
1275 | return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA, | |
7da2bcf8 | 1276 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, |
bcea3081 BG |
1277 | sdma_engine); |
1278 | } | |
1279 | ||
60a00956 FK |
1280 | /* dqm->lock mutex has to be locked before calling this function */ |
1281 | static int map_queues_cpsch(struct device_queue_manager *dqm) | |
1282 | { | |
1283 | int retval; | |
1284 | ||
1285 | if (dqm->queue_count <= 0 || dqm->processes_count <= 0) | |
1286 | return 0; | |
1287 | ||
1288 | if (dqm->active_runlist) | |
1289 | return 0; | |
1290 | ||
1291 | retval = pm_send_runlist(&dqm->packets, &dqm->queues); | |
1292 | if (retval) { | |
1293 | pr_err("failed to execute runlist\n"); | |
1294 | return retval; | |
1295 | } | |
1296 | dqm->active_runlist = true; | |
1297 | ||
1298 | return retval; | |
1299 | } | |
1300 | ||
ac30c783 | 1301 | /* dqm->lock mutex has to be locked before calling this function */ |
7da2bcf8 | 1302 | static int unmap_queues_cpsch(struct device_queue_manager *dqm, |
4465f466 YZ |
1303 | enum kfd_unmap_queues_filter filter, |
1304 | uint32_t filter_param) | |
64c7f8cf | 1305 | { |
9fd3f1bf | 1306 | int retval = 0; |
64c7f8cf | 1307 | |
73ea648d SL |
1308 | if (dqm->is_hws_hang) |
1309 | return -EIO; | |
991ca8ee | 1310 | if (!dqm->active_runlist) |
ac30c783 | 1311 | return retval; |
bcea3081 | 1312 | |
79775b62 | 1313 | pr_debug("Before destroying queues, sdma queue count is : %u\n", |
bcea3081 BG |
1314 | dqm->sdma_queue_count); |
1315 | ||
1316 | if (dqm->sdma_queue_count > 0) { | |
7da2bcf8 YZ |
1317 | unmap_sdma_queues(dqm, 0); |
1318 | unmap_sdma_queues(dqm, 1); | |
bcea3081 BG |
1319 | } |
1320 | ||
64c7f8cf | 1321 | retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, |
4465f466 | 1322 | filter, filter_param, false, 0); |
4eacc26b | 1323 | if (retval) |
ac30c783 | 1324 | return retval; |
64c7f8cf BG |
1325 | |
1326 | *dqm->fence_addr = KFD_FENCE_INIT; | |
1327 | pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr, | |
1328 | KFD_FENCE_COMPLETED); | |
1329 | /* should be timed out */ | |
c3447e81 | 1330 | retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, |
64c7f8cf | 1331 | QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS); |
9fd3f1bf | 1332 | if (retval) |
ac30c783 | 1333 | return retval; |
9fd3f1bf | 1334 | |
64c7f8cf BG |
1335 | pm_release_ib(&dqm->packets); |
1336 | dqm->active_runlist = false; | |
1337 | ||
64c7f8cf BG |
1338 | return retval; |
1339 | } | |
1340 | ||
ac30c783 | 1341 | /* dqm->lock mutex has to be locked before calling this function */ |
c4744e24 YZ |
1342 | static int execute_queues_cpsch(struct device_queue_manager *dqm, |
1343 | enum kfd_unmap_queues_filter filter, | |
1344 | uint32_t filter_param) | |
64c7f8cf BG |
1345 | { |
1346 | int retval; | |
1347 | ||
73ea648d SL |
1348 | if (dqm->is_hws_hang) |
1349 | return -EIO; | |
c4744e24 | 1350 | retval = unmap_queues_cpsch(dqm, filter, filter_param); |
4eacc26b | 1351 | if (retval) { |
c4744e24 | 1352 | pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n"); |
73ea648d SL |
1353 | dqm->is_hws_hang = true; |
1354 | schedule_work(&dqm->hw_exception_work); | |
ac30c783 | 1355 | return retval; |
64c7f8cf BG |
1356 | } |
1357 | ||
60a00956 | 1358 | return map_queues_cpsch(dqm); |
64c7f8cf BG |
1359 | } |
1360 | ||
1361 | static int destroy_queue_cpsch(struct device_queue_manager *dqm, | |
1362 | struct qcm_process_device *qpd, | |
1363 | struct queue *q) | |
1364 | { | |
1365 | int retval; | |
8d5f3552 | 1366 | struct mqd_manager *mqd_mgr; |
992839ad | 1367 | |
64c7f8cf BG |
1368 | retval = 0; |
1369 | ||
1370 | /* remove queue from list to prevent rescheduling after preemption */ | |
efeaed4d | 1371 | dqm_lock(dqm); |
992839ad YS |
1372 | |
1373 | if (qpd->is_debug) { | |
1374 | /* | |
1375 | * error, currently we do not allow to destroy a queue | |
1376 | * of a currently debugged process | |
1377 | */ | |
1378 | retval = -EBUSY; | |
1379 | goto failed_try_destroy_debugged_queue; | |
1380 | ||
1381 | } | |
1382 | ||
8d5f3552 | 1383 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
bcea3081 | 1384 | get_mqd_type_from_queue_type(q->properties.type)); |
8d5f3552 | 1385 | if (!mqd_mgr) { |
64c7f8cf BG |
1386 | retval = -ENOMEM; |
1387 | goto failed; | |
1388 | } | |
1389 | ||
ef568db7 FK |
1390 | deallocate_doorbell(qpd, q); |
1391 | ||
e139cd2a | 1392 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { |
bcea3081 | 1393 | dqm->sdma_queue_count--; |
e139cd2a | 1394 | deallocate_sdma_queue(dqm, q->sdma_id); |
1395 | } | |
bcea3081 | 1396 | |
64c7f8cf | 1397 | list_del(&q->list); |
bc920fd4 | 1398 | qpd->queue_count--; |
40a526dc | 1399 | if (q->properties.is_active) { |
b6819cec | 1400 | dqm->queue_count--; |
40a526dc | 1401 | retval = execute_queues_cpsch(dqm, |
9fd3f1bf | 1402 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); |
40a526dc YZ |
1403 | if (retval == -ETIME) |
1404 | qpd->reset_wavefronts = true; | |
1405 | } | |
64c7f8cf | 1406 | |
b8cbab04 OG |
1407 | /* |
1408 | * Unconditionally decrement this counter, regardless of the queue's | |
1409 | * type | |
1410 | */ | |
1411 | dqm->total_queue_count--; | |
1412 | pr_debug("Total of %d queues are accountable so far\n", | |
1413 | dqm->total_queue_count); | |
64c7f8cf | 1414 | |
efeaed4d | 1415 | dqm_unlock(dqm); |
64c7f8cf | 1416 | |
89cd9d23 PY |
1417 | /* Do uninit_mqd after dqm_unlock(dqm) to avoid circular locking */ |
1418 | mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); | |
1419 | ||
9e827224 | 1420 | return retval; |
64c7f8cf BG |
1421 | |
1422 | failed: | |
992839ad YS |
1423 | failed_try_destroy_debugged_queue: |
1424 | ||
efeaed4d | 1425 | dqm_unlock(dqm); |
64c7f8cf BG |
1426 | return retval; |
1427 | } | |
1428 | ||
1429 | /* | |
1430 | * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to | |
1431 | * stay in user mode. | |
1432 | */ | |
1433 | #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL | |
1434 | /* APE1 limit is inclusive and 64K aligned. */ | |
1435 | #define APE1_LIMIT_ALIGNMENT 0xFFFF | |
1436 | ||
1437 | static bool set_cache_memory_policy(struct device_queue_manager *dqm, | |
1438 | struct qcm_process_device *qpd, | |
1439 | enum cache_policy default_policy, | |
1440 | enum cache_policy alternate_policy, | |
1441 | void __user *alternate_aperture_base, | |
1442 | uint64_t alternate_aperture_size) | |
1443 | { | |
bed4f110 FK |
1444 | bool retval = true; |
1445 | ||
1446 | if (!dqm->asic_ops.set_cache_memory_policy) | |
1447 | return retval; | |
64c7f8cf | 1448 | |
efeaed4d | 1449 | dqm_lock(dqm); |
64c7f8cf BG |
1450 | |
1451 | if (alternate_aperture_size == 0) { | |
1452 | /* base > limit disables APE1 */ | |
1453 | qpd->sh_mem_ape1_base = 1; | |
1454 | qpd->sh_mem_ape1_limit = 0; | |
1455 | } else { | |
1456 | /* | |
1457 | * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, | |
1458 | * SH_MEM_APE1_BASE[31:0], 0x0000 } | |
1459 | * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, | |
1460 | * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } | |
1461 | * Verify that the base and size parameters can be | |
1462 | * represented in this format and convert them. | |
1463 | * Additionally restrict APE1 to user-mode addresses. | |
1464 | */ | |
1465 | ||
1466 | uint64_t base = (uintptr_t)alternate_aperture_base; | |
1467 | uint64_t limit = base + alternate_aperture_size - 1; | |
1468 | ||
ab7c1648 KR |
1469 | if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 || |
1470 | (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) { | |
1471 | retval = false; | |
64c7f8cf | 1472 | goto out; |
ab7c1648 | 1473 | } |
64c7f8cf BG |
1474 | |
1475 | qpd->sh_mem_ape1_base = base >> 16; | |
1476 | qpd->sh_mem_ape1_limit = limit >> 16; | |
1477 | } | |
1478 | ||
bfd5e378 | 1479 | retval = dqm->asic_ops.set_cache_memory_policy( |
a22fc854 BG |
1480 | dqm, |
1481 | qpd, | |
1482 | default_policy, | |
1483 | alternate_policy, | |
1484 | alternate_aperture_base, | |
1485 | alternate_aperture_size); | |
64c7f8cf | 1486 | |
d146c5a7 | 1487 | if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) |
64c7f8cf BG |
1488 | program_sh_mem_settings(dqm, qpd); |
1489 | ||
79775b62 | 1490 | pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n", |
64c7f8cf BG |
1491 | qpd->sh_mem_config, qpd->sh_mem_ape1_base, |
1492 | qpd->sh_mem_ape1_limit); | |
1493 | ||
64c7f8cf | 1494 | out: |
efeaed4d | 1495 | dqm_unlock(dqm); |
ab7c1648 | 1496 | return retval; |
64c7f8cf BG |
1497 | } |
1498 | ||
d7b9bd22 FK |
1499 | static int set_trap_handler(struct device_queue_manager *dqm, |
1500 | struct qcm_process_device *qpd, | |
1501 | uint64_t tba_addr, | |
1502 | uint64_t tma_addr) | |
1503 | { | |
1504 | uint64_t *tma; | |
1505 | ||
1506 | if (dqm->dev->cwsr_enabled) { | |
1507 | /* Jump from CWSR trap handler to user trap */ | |
1508 | tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET); | |
1509 | tma[0] = tba_addr; | |
1510 | tma[1] = tma_addr; | |
1511 | } else { | |
1512 | qpd->tba_addr = tba_addr; | |
1513 | qpd->tma_addr = tma_addr; | |
1514 | } | |
1515 | ||
1516 | return 0; | |
1517 | } | |
1518 | ||
9fd3f1bf FK |
1519 | static int process_termination_nocpsch(struct device_queue_manager *dqm, |
1520 | struct qcm_process_device *qpd) | |
1521 | { | |
1522 | struct queue *q, *next; | |
1523 | struct device_process_node *cur, *next_dpn; | |
1524 | int retval = 0; | |
1525 | ||
efeaed4d | 1526 | dqm_lock(dqm); |
9fd3f1bf FK |
1527 | |
1528 | /* Clear all user mode queues */ | |
1529 | list_for_each_entry_safe(q, next, &qpd->queues_list, list) { | |
1530 | int ret; | |
1531 | ||
1532 | ret = destroy_queue_nocpsch_locked(dqm, qpd, q); | |
1533 | if (ret) | |
1534 | retval = ret; | |
1535 | } | |
1536 | ||
1537 | /* Unregister process */ | |
1538 | list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) { | |
1539 | if (qpd == cur->qpd) { | |
1540 | list_del(&cur->list); | |
1541 | kfree(cur); | |
1542 | dqm->processes_count--; | |
f756e631 | 1543 | kfd_dec_compute_active(dqm->dev); |
9fd3f1bf FK |
1544 | break; |
1545 | } | |
1546 | } | |
1547 | ||
efeaed4d | 1548 | dqm_unlock(dqm); |
9fd3f1bf FK |
1549 | return retval; |
1550 | } | |
1551 | ||
5df099e8 JC |
1552 | static int get_wave_state(struct device_queue_manager *dqm, |
1553 | struct queue *q, | |
1554 | void __user *ctl_stack, | |
1555 | u32 *ctl_stack_used_size, | |
1556 | u32 *save_area_used_size) | |
1557 | { | |
4e6c6fc1 | 1558 | struct mqd_manager *mqd_mgr; |
5df099e8 JC |
1559 | int r; |
1560 | ||
1561 | dqm_lock(dqm); | |
1562 | ||
1563 | if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || | |
1564 | q->properties.is_active || !q->device->cwsr_enabled) { | |
1565 | r = -EINVAL; | |
1566 | goto dqm_unlock; | |
1567 | } | |
1568 | ||
4e6c6fc1 YZ |
1569 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); |
1570 | if (!mqd_mgr) { | |
5df099e8 JC |
1571 | r = -ENOMEM; |
1572 | goto dqm_unlock; | |
1573 | } | |
1574 | ||
4e6c6fc1 | 1575 | if (!mqd_mgr->get_wave_state) { |
5df099e8 JC |
1576 | r = -EINVAL; |
1577 | goto dqm_unlock; | |
1578 | } | |
1579 | ||
4e6c6fc1 YZ |
1580 | r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack, |
1581 | ctl_stack_used_size, save_area_used_size); | |
5df099e8 JC |
1582 | |
1583 | dqm_unlock: | |
1584 | dqm_unlock(dqm); | |
1585 | return r; | |
1586 | } | |
9fd3f1bf FK |
1587 | |
1588 | static int process_termination_cpsch(struct device_queue_manager *dqm, | |
1589 | struct qcm_process_device *qpd) | |
1590 | { | |
1591 | int retval; | |
1592 | struct queue *q, *next; | |
1593 | struct kernel_queue *kq, *kq_next; | |
8d5f3552 | 1594 | struct mqd_manager *mqd_mgr; |
9fd3f1bf FK |
1595 | struct device_process_node *cur, *next_dpn; |
1596 | enum kfd_unmap_queues_filter filter = | |
1597 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES; | |
1598 | ||
1599 | retval = 0; | |
1600 | ||
efeaed4d | 1601 | dqm_lock(dqm); |
9fd3f1bf FK |
1602 | |
1603 | /* Clean all kernel queues */ | |
1604 | list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { | |
1605 | list_del(&kq->list); | |
1606 | dqm->queue_count--; | |
1607 | qpd->is_debug = false; | |
1608 | dqm->total_queue_count--; | |
1609 | filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; | |
1610 | } | |
1611 | ||
1612 | /* Clear all user mode queues */ | |
1613 | list_for_each_entry(q, &qpd->queues_list, list) { | |
72a01d23 | 1614 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { |
9fd3f1bf | 1615 | dqm->sdma_queue_count--; |
72a01d23 FK |
1616 | deallocate_sdma_queue(dqm, q->sdma_id); |
1617 | } | |
9fd3f1bf FK |
1618 | |
1619 | if (q->properties.is_active) | |
1620 | dqm->queue_count--; | |
1621 | ||
1622 | dqm->total_queue_count--; | |
1623 | } | |
1624 | ||
1625 | /* Unregister process */ | |
1626 | list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) { | |
1627 | if (qpd == cur->qpd) { | |
1628 | list_del(&cur->list); | |
1629 | kfree(cur); | |
1630 | dqm->processes_count--; | |
f756e631 | 1631 | kfd_dec_compute_active(dqm->dev); |
9fd3f1bf FK |
1632 | break; |
1633 | } | |
1634 | } | |
1635 | ||
1636 | retval = execute_queues_cpsch(dqm, filter, 0); | |
73ea648d | 1637 | if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) { |
9fd3f1bf FK |
1638 | pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev); |
1639 | dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process); | |
1640 | qpd->reset_wavefronts = false; | |
1641 | } | |
1642 | ||
89cd9d23 PY |
1643 | dqm_unlock(dqm); |
1644 | ||
1645 | /* Lastly, free mqd resources. | |
1646 | * Do uninit_mqd() after dqm_unlock to avoid circular locking. | |
1647 | */ | |
9fd3f1bf | 1648 | list_for_each_entry_safe(q, next, &qpd->queues_list, list) { |
8d5f3552 | 1649 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
9fd3f1bf | 1650 | get_mqd_type_from_queue_type(q->properties.type)); |
8d5f3552 | 1651 | if (!mqd_mgr) { |
9fd3f1bf FK |
1652 | retval = -ENOMEM; |
1653 | goto out; | |
1654 | } | |
1655 | list_del(&q->list); | |
bc920fd4 | 1656 | qpd->queue_count--; |
8d5f3552 | 1657 | mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
9fd3f1bf FK |
1658 | } |
1659 | ||
1660 | out: | |
9fd3f1bf FK |
1661 | return retval; |
1662 | } | |
1663 | ||
64c7f8cf BG |
1664 | struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) |
1665 | { | |
1666 | struct device_queue_manager *dqm; | |
1667 | ||
79775b62 | 1668 | pr_debug("Loading device queue manager\n"); |
a22fc854 | 1669 | |
dbf56ab1 | 1670 | dqm = kzalloc(sizeof(*dqm), GFP_KERNEL); |
64c7f8cf BG |
1671 | if (!dqm) |
1672 | return NULL; | |
1673 | ||
d146c5a7 FK |
1674 | switch (dev->device_info->asic_family) { |
1675 | /* HWS is not available on Hawaii. */ | |
1676 | case CHIP_HAWAII: | |
1677 | /* HWS depends on CWSR for timely dequeue. CWSR is not | |
1678 | * available on Tonga. | |
1679 | * | |
1680 | * FIXME: This argument also applies to Kaveri. | |
1681 | */ | |
1682 | case CHIP_TONGA: | |
1683 | dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS; | |
1684 | break; | |
1685 | default: | |
1686 | dqm->sched_policy = sched_policy; | |
1687 | break; | |
1688 | } | |
1689 | ||
64c7f8cf | 1690 | dqm->dev = dev; |
d146c5a7 | 1691 | switch (dqm->sched_policy) { |
64c7f8cf BG |
1692 | case KFD_SCHED_POLICY_HWS: |
1693 | case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: | |
1694 | /* initialize dqm for cp scheduling */ | |
45c9a5e4 OG |
1695 | dqm->ops.create_queue = create_queue_cpsch; |
1696 | dqm->ops.initialize = initialize_cpsch; | |
1697 | dqm->ops.start = start_cpsch; | |
1698 | dqm->ops.stop = stop_cpsch; | |
1699 | dqm->ops.destroy_queue = destroy_queue_cpsch; | |
1700 | dqm->ops.update_queue = update_queue; | |
58dcd5bf YZ |
1701 | dqm->ops.get_mqd_manager = get_mqd_manager; |
1702 | dqm->ops.register_process = register_process; | |
1703 | dqm->ops.unregister_process = unregister_process; | |
1704 | dqm->ops.uninitialize = uninitialize; | |
45c9a5e4 OG |
1705 | dqm->ops.create_kernel_queue = create_kernel_queue_cpsch; |
1706 | dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch; | |
1707 | dqm->ops.set_cache_memory_policy = set_cache_memory_policy; | |
d7b9bd22 | 1708 | dqm->ops.set_trap_handler = set_trap_handler; |
9fd3f1bf | 1709 | dqm->ops.process_termination = process_termination_cpsch; |
26103436 FK |
1710 | dqm->ops.evict_process_queues = evict_process_queues_cpsch; |
1711 | dqm->ops.restore_process_queues = restore_process_queues_cpsch; | |
5df099e8 | 1712 | dqm->ops.get_wave_state = get_wave_state; |
64c7f8cf BG |
1713 | break; |
1714 | case KFD_SCHED_POLICY_NO_HWS: | |
1715 | /* initialize dqm for no cp scheduling */ | |
45c9a5e4 OG |
1716 | dqm->ops.start = start_nocpsch; |
1717 | dqm->ops.stop = stop_nocpsch; | |
1718 | dqm->ops.create_queue = create_queue_nocpsch; | |
1719 | dqm->ops.destroy_queue = destroy_queue_nocpsch; | |
1720 | dqm->ops.update_queue = update_queue; | |
58dcd5bf YZ |
1721 | dqm->ops.get_mqd_manager = get_mqd_manager; |
1722 | dqm->ops.register_process = register_process; | |
1723 | dqm->ops.unregister_process = unregister_process; | |
45c9a5e4 | 1724 | dqm->ops.initialize = initialize_nocpsch; |
58dcd5bf | 1725 | dqm->ops.uninitialize = uninitialize; |
45c9a5e4 | 1726 | dqm->ops.set_cache_memory_policy = set_cache_memory_policy; |
d7b9bd22 | 1727 | dqm->ops.set_trap_handler = set_trap_handler; |
9fd3f1bf | 1728 | dqm->ops.process_termination = process_termination_nocpsch; |
26103436 FK |
1729 | dqm->ops.evict_process_queues = evict_process_queues_nocpsch; |
1730 | dqm->ops.restore_process_queues = | |
1731 | restore_process_queues_nocpsch; | |
5df099e8 | 1732 | dqm->ops.get_wave_state = get_wave_state; |
64c7f8cf BG |
1733 | break; |
1734 | default: | |
d146c5a7 | 1735 | pr_err("Invalid scheduling policy %d\n", dqm->sched_policy); |
32fa8219 | 1736 | goto out_free; |
64c7f8cf BG |
1737 | } |
1738 | ||
a22fc854 BG |
1739 | switch (dev->device_info->asic_family) { |
1740 | case CHIP_CARRIZO: | |
bfd5e378 | 1741 | device_queue_manager_init_vi(&dqm->asic_ops); |
300dec95 OG |
1742 | break; |
1743 | ||
a22fc854 | 1744 | case CHIP_KAVERI: |
bfd5e378 | 1745 | device_queue_manager_init_cik(&dqm->asic_ops); |
300dec95 | 1746 | break; |
97672cbe FK |
1747 | |
1748 | case CHIP_HAWAII: | |
1749 | device_queue_manager_init_cik_hawaii(&dqm->asic_ops); | |
1750 | break; | |
1751 | ||
1752 | case CHIP_TONGA: | |
1753 | case CHIP_FIJI: | |
1754 | case CHIP_POLARIS10: | |
1755 | case CHIP_POLARIS11: | |
846a44d7 | 1756 | case CHIP_POLARIS12: |
97672cbe FK |
1757 | device_queue_manager_init_vi_tonga(&dqm->asic_ops); |
1758 | break; | |
bed4f110 FK |
1759 | |
1760 | case CHIP_VEGA10: | |
846a44d7 | 1761 | case CHIP_VEGA12: |
22a3a294 | 1762 | case CHIP_VEGA20: |
bed4f110 FK |
1763 | case CHIP_RAVEN: |
1764 | device_queue_manager_init_v9(&dqm->asic_ops); | |
1765 | break; | |
e596b903 YZ |
1766 | default: |
1767 | WARN(1, "Unexpected ASIC family %u", | |
1768 | dev->device_info->asic_family); | |
1769 | goto out_free; | |
a22fc854 BG |
1770 | } |
1771 | ||
32fa8219 FK |
1772 | if (!dqm->ops.initialize(dqm)) |
1773 | return dqm; | |
64c7f8cf | 1774 | |
32fa8219 FK |
1775 | out_free: |
1776 | kfree(dqm); | |
1777 | return NULL; | |
64c7f8cf BG |
1778 | } |
1779 | ||
1780 | void device_queue_manager_uninit(struct device_queue_manager *dqm) | |
1781 | { | |
45c9a5e4 | 1782 | dqm->ops.uninitialize(dqm); |
64c7f8cf BG |
1783 | kfree(dqm); |
1784 | } | |
851a645e | 1785 | |
2640c3fa | 1786 | int kfd_process_vm_fault(struct device_queue_manager *dqm, |
1787 | unsigned int pasid) | |
1788 | { | |
1789 | struct kfd_process_device *pdd; | |
1790 | struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); | |
1791 | int ret = 0; | |
1792 | ||
1793 | if (!p) | |
1794 | return -EINVAL; | |
1795 | pdd = kfd_get_process_device_data(dqm->dev, p); | |
1796 | if (pdd) | |
1797 | ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd); | |
1798 | kfd_unref_process(p); | |
1799 | ||
1800 | return ret; | |
1801 | } | |
1802 | ||
73ea648d SL |
1803 | static void kfd_process_hw_exception(struct work_struct *work) |
1804 | { | |
1805 | struct device_queue_manager *dqm = container_of(work, | |
1806 | struct device_queue_manager, hw_exception_work); | |
5b87245f | 1807 | amdgpu_amdkfd_gpu_reset(dqm->dev->kgd); |
73ea648d SL |
1808 | } |
1809 | ||
851a645e FK |
1810 | #if defined(CONFIG_DEBUG_FS) |
1811 | ||
1812 | static void seq_reg_dump(struct seq_file *m, | |
1813 | uint32_t (*dump)[2], uint32_t n_regs) | |
1814 | { | |
1815 | uint32_t i, count; | |
1816 | ||
1817 | for (i = 0, count = 0; i < n_regs; i++) { | |
1818 | if (count == 0 || | |
1819 | dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) { | |
1820 | seq_printf(m, "%s %08x: %08x", | |
1821 | i ? "\n" : "", | |
1822 | dump[i][0], dump[i][1]); | |
1823 | count = 7; | |
1824 | } else { | |
1825 | seq_printf(m, " %08x", dump[i][1]); | |
1826 | count--; | |
1827 | } | |
1828 | } | |
1829 | ||
1830 | seq_puts(m, "\n"); | |
1831 | } | |
1832 | ||
1833 | int dqm_debugfs_hqds(struct seq_file *m, void *data) | |
1834 | { | |
1835 | struct device_queue_manager *dqm = data; | |
1836 | uint32_t (*dump)[2], n_regs; | |
1837 | int pipe, queue; | |
1838 | int r = 0; | |
1839 | ||
24f48a42 OZ |
1840 | r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd, |
1841 | KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs); | |
1842 | if (!r) { | |
1843 | seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n", | |
1844 | KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1, | |
1845 | KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm), | |
1846 | KFD_CIK_HIQ_QUEUE); | |
1847 | seq_reg_dump(m, dump, n_regs); | |
1848 | ||
1849 | kfree(dump); | |
1850 | } | |
1851 | ||
851a645e FK |
1852 | for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { |
1853 | int pipe_offset = pipe * get_queues_per_pipe(dqm); | |
1854 | ||
1855 | for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { | |
1856 | if (!test_bit(pipe_offset + queue, | |
1857 | dqm->dev->shared_resources.queue_bitmap)) | |
1858 | continue; | |
1859 | ||
1860 | r = dqm->dev->kfd2kgd->hqd_dump( | |
1861 | dqm->dev->kgd, pipe, queue, &dump, &n_regs); | |
1862 | if (r) | |
1863 | break; | |
1864 | ||
1865 | seq_printf(m, " CP Pipe %d, Queue %d\n", | |
1866 | pipe, queue); | |
1867 | seq_reg_dump(m, dump, n_regs); | |
1868 | ||
1869 | kfree(dump); | |
1870 | } | |
1871 | } | |
1872 | ||
98bb9222 | 1873 | for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) { |
d5094189 SL |
1874 | for (queue = 0; |
1875 | queue < dqm->dev->device_info->num_sdma_queues_per_engine; | |
1876 | queue++) { | |
851a645e FK |
1877 | r = dqm->dev->kfd2kgd->hqd_sdma_dump( |
1878 | dqm->dev->kgd, pipe, queue, &dump, &n_regs); | |
1879 | if (r) | |
1880 | break; | |
1881 | ||
1882 | seq_printf(m, " SDMA Engine %d, RLC %d\n", | |
1883 | pipe, queue); | |
1884 | seq_reg_dump(m, dump, n_regs); | |
1885 | ||
1886 | kfree(dump); | |
1887 | } | |
1888 | } | |
1889 | ||
1890 | return r; | |
1891 | } | |
1892 | ||
a29ec470 SL |
1893 | int dqm_debugfs_execute_queues(struct device_queue_manager *dqm) |
1894 | { | |
1895 | int r = 0; | |
1896 | ||
1897 | dqm_lock(dqm); | |
1898 | dqm->active_runlist = true; | |
1899 | r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); | |
1900 | dqm_unlock(dqm); | |
1901 | ||
1902 | return r; | |
1903 | } | |
1904 | ||
851a645e | 1905 | #endif |