Commit | Line | Data |
---|---|---|
64c7f8cf BG |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
26103436 FK |
24 | #include <linux/ratelimit.h> |
25 | #include <linux/printk.h> | |
64c7f8cf BG |
26 | #include <linux/slab.h> |
27 | #include <linux/list.h> | |
28 | #include <linux/types.h> | |
64c7f8cf | 29 | #include <linux/bitops.h> |
99331a51 | 30 | #include <linux/sched.h> |
64c7f8cf BG |
31 | #include "kfd_priv.h" |
32 | #include "kfd_device_queue_manager.h" | |
33 | #include "kfd_mqd_manager.h" | |
34 | #include "cik_regs.h" | |
35 | #include "kfd_kernel_queue.h" | |
5b87245f | 36 | #include "amdgpu_amdkfd.h" |
64c7f8cf BG |
37 | |
38 | /* Size of the per-pipe EOP queue */ | |
39 | #define CIK_HPD_EOP_BYTES_LOG2 11 | |
40 | #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2) | |
41 | ||
64c7f8cf BG |
42 | static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, |
43 | unsigned int pasid, unsigned int vmid); | |
44 | ||
45 | static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, | |
46 | struct queue *q, | |
47 | struct qcm_process_device *qpd); | |
bcea3081 | 48 | |
c4744e24 YZ |
49 | static int execute_queues_cpsch(struct device_queue_manager *dqm, |
50 | enum kfd_unmap_queues_filter filter, | |
51 | uint32_t filter_param); | |
7da2bcf8 | 52 | static int unmap_queues_cpsch(struct device_queue_manager *dqm, |
4465f466 YZ |
53 | enum kfd_unmap_queues_filter filter, |
54 | uint32_t filter_param); | |
64c7f8cf | 55 | |
60a00956 FK |
56 | static int map_queues_cpsch(struct device_queue_manager *dqm); |
57 | ||
bcea3081 BG |
58 | static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, |
59 | struct queue *q, | |
60 | struct qcm_process_device *qpd); | |
61 | ||
62 | static void deallocate_sdma_queue(struct device_queue_manager *dqm, | |
63 | unsigned int sdma_queue_id); | |
64c7f8cf | 64 | |
73ea648d SL |
65 | static void kfd_process_hw_exception(struct work_struct *work); |
66 | ||
bcea3081 BG |
67 | static inline |
68 | enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) | |
64c7f8cf | 69 | { |
bcea3081 | 70 | if (type == KFD_QUEUE_TYPE_SDMA) |
85d258f9 BG |
71 | return KFD_MQD_TYPE_SDMA; |
72 | return KFD_MQD_TYPE_CP; | |
64c7f8cf BG |
73 | } |
74 | ||
d0b63bb3 AR |
75 | static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) |
76 | { | |
77 | int i; | |
78 | int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec | |
79 | + pipe * dqm->dev->shared_resources.num_queue_per_pipe; | |
80 | ||
81 | /* queue is available for KFD usage if bit is 1 */ | |
82 | for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i) | |
83 | if (test_bit(pipe_offset + i, | |
84 | dqm->dev->shared_resources.queue_bitmap)) | |
85 | return true; | |
86 | return false; | |
87 | } | |
88 | ||
d0b63bb3 | 89 | unsigned int get_queues_num(struct device_queue_manager *dqm) |
64ea8f4a | 90 | { |
d0b63bb3 AR |
91 | return bitmap_weight(dqm->dev->shared_resources.queue_bitmap, |
92 | KGD_MAX_QUEUES); | |
64ea8f4a OG |
93 | } |
94 | ||
d0b63bb3 | 95 | unsigned int get_queues_per_pipe(struct device_queue_manager *dqm) |
64c7f8cf | 96 | { |
d0b63bb3 AR |
97 | return dqm->dev->shared_resources.num_queue_per_pipe; |
98 | } | |
99 | ||
100 | unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) | |
101 | { | |
d0b63bb3 | 102 | return dqm->dev->shared_resources.num_pipe_per_mec; |
64c7f8cf BG |
103 | } |
104 | ||
98bb9222 YZ |
105 | static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm) |
106 | { | |
107 | return dqm->dev->device_info->num_sdma_engines; | |
108 | } | |
109 | ||
110 | unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) | |
111 | { | |
112 | return dqm->dev->device_info->num_sdma_engines | |
d5094189 | 113 | * dqm->dev->device_info->num_sdma_queues_per_engine; |
98bb9222 YZ |
114 | } |
115 | ||
a22fc854 | 116 | void program_sh_mem_settings(struct device_queue_manager *dqm, |
64c7f8cf BG |
117 | struct qcm_process_device *qpd) |
118 | { | |
cea405b1 XZ |
119 | return dqm->dev->kfd2kgd->program_sh_mem_settings( |
120 | dqm->dev->kgd, qpd->vmid, | |
64c7f8cf BG |
121 | qpd->sh_mem_config, |
122 | qpd->sh_mem_ape1_base, | |
123 | qpd->sh_mem_ape1_limit, | |
124 | qpd->sh_mem_bases); | |
125 | } | |
126 | ||
ef568db7 FK |
127 | static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) |
128 | { | |
129 | struct kfd_dev *dev = qpd->dqm->dev; | |
130 | ||
131 | if (!KFD_IS_SOC15(dev->device_info->asic_family)) { | |
132 | /* On pre-SOC15 chips we need to use the queue ID to | |
133 | * preserve the user mode ABI. | |
134 | */ | |
135 | q->doorbell_id = q->properties.queue_id; | |
136 | } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { | |
137 | /* For SDMA queues on SOC15, use static doorbell | |
138 | * assignments based on the engine and queue. | |
139 | */ | |
140 | q->doorbell_id = dev->shared_resources.sdma_doorbell | |
141 | [q->properties.sdma_engine_id] | |
142 | [q->properties.sdma_queue_id]; | |
143 | } else { | |
144 | /* For CP queues on SOC15 reserve a free doorbell ID */ | |
145 | unsigned int found; | |
146 | ||
147 | found = find_first_zero_bit(qpd->doorbell_bitmap, | |
148 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); | |
149 | if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { | |
150 | pr_debug("No doorbells available"); | |
151 | return -EBUSY; | |
152 | } | |
153 | set_bit(found, qpd->doorbell_bitmap); | |
154 | q->doorbell_id = found; | |
155 | } | |
156 | ||
157 | q->properties.doorbell_off = | |
158 | kfd_doorbell_id_to_offset(dev, q->process, | |
159 | q->doorbell_id); | |
160 | ||
161 | return 0; | |
162 | } | |
163 | ||
164 | static void deallocate_doorbell(struct qcm_process_device *qpd, | |
165 | struct queue *q) | |
166 | { | |
167 | unsigned int old; | |
168 | struct kfd_dev *dev = qpd->dqm->dev; | |
169 | ||
170 | if (!KFD_IS_SOC15(dev->device_info->asic_family) || | |
171 | q->properties.type == KFD_QUEUE_TYPE_SDMA) | |
172 | return; | |
173 | ||
174 | old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap); | |
175 | WARN_ON(!old); | |
176 | } | |
177 | ||
64c7f8cf BG |
178 | static int allocate_vmid(struct device_queue_manager *dqm, |
179 | struct qcm_process_device *qpd, | |
180 | struct queue *q) | |
181 | { | |
182 | int bit, allocated_vmid; | |
183 | ||
184 | if (dqm->vmid_bitmap == 0) | |
185 | return -ENOMEM; | |
186 | ||
4252bf68 HK |
187 | bit = ffs(dqm->vmid_bitmap) - 1; |
188 | dqm->vmid_bitmap &= ~(1 << bit); | |
64c7f8cf | 189 | |
44008d7a | 190 | allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd; |
79775b62 | 191 | pr_debug("vmid allocation %d\n", allocated_vmid); |
64c7f8cf BG |
192 | qpd->vmid = allocated_vmid; |
193 | q->properties.vmid = allocated_vmid; | |
194 | ||
195 | set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); | |
196 | program_sh_mem_settings(dqm, qpd); | |
197 | ||
403575c4 FK |
198 | /* qpd->page_table_base is set earlier when register_process() |
199 | * is called, i.e. when the first queue is created. | |
200 | */ | |
201 | dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd, | |
202 | qpd->vmid, | |
203 | qpd->page_table_base); | |
204 | /* invalidate the VM context after pasid and vmid mapping is set up */ | |
205 | kfd_flush_tlb(qpd_to_pdd(qpd)); | |
206 | ||
64c7f8cf BG |
207 | return 0; |
208 | } | |
209 | ||
552764b6 FK |
210 | static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, |
211 | struct qcm_process_device *qpd) | |
212 | { | |
f6e27ff1 FK |
213 | const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf; |
214 | int ret; | |
552764b6 FK |
215 | |
216 | if (!qpd->ib_kaddr) | |
217 | return -ENOMEM; | |
218 | ||
f6e27ff1 FK |
219 | ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr); |
220 | if (ret) | |
221 | return ret; | |
552764b6 | 222 | |
5b87245f | 223 | return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid, |
f6e27ff1 FK |
224 | qpd->ib_base, (uint32_t *)qpd->ib_kaddr, |
225 | pmf->release_mem_size / sizeof(uint32_t)); | |
552764b6 FK |
226 | } |
227 | ||
64c7f8cf BG |
228 | static void deallocate_vmid(struct device_queue_manager *dqm, |
229 | struct qcm_process_device *qpd, | |
230 | struct queue *q) | |
231 | { | |
44008d7a | 232 | int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd; |
64c7f8cf | 233 | |
552764b6 FK |
234 | /* On GFX v7, CP doesn't flush TC at dequeue */ |
235 | if (q->device->device_info->asic_family == CHIP_HAWAII) | |
236 | if (flush_texture_cache_nocpsch(q->device, qpd)) | |
237 | pr_err("Failed to flush TC\n"); | |
238 | ||
403575c4 FK |
239 | kfd_flush_tlb(qpd_to_pdd(qpd)); |
240 | ||
2030664b BG |
241 | /* Release the vmid mapping */ |
242 | set_pasid_vmid_mapping(dqm, 0, qpd->vmid); | |
243 | ||
4252bf68 | 244 | dqm->vmid_bitmap |= (1 << bit); |
64c7f8cf BG |
245 | qpd->vmid = 0; |
246 | q->properties.vmid = 0; | |
247 | } | |
248 | ||
249 | static int create_queue_nocpsch(struct device_queue_manager *dqm, | |
250 | struct queue *q, | |
b46cb7d7 | 251 | struct qcm_process_device *qpd) |
64c7f8cf BG |
252 | { |
253 | int retval; | |
254 | ||
64c7f8cf BG |
255 | print_queue(q); |
256 | ||
efeaed4d | 257 | dqm_lock(dqm); |
64c7f8cf | 258 | |
b8cbab04 | 259 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { |
79775b62 | 260 | pr_warn("Can't create new usermode queue because %d queues were already created\n", |
b8cbab04 | 261 | dqm->total_queue_count); |
ab7c1648 KR |
262 | retval = -EPERM; |
263 | goto out_unlock; | |
b8cbab04 OG |
264 | } |
265 | ||
64c7f8cf BG |
266 | if (list_empty(&qpd->queues_list)) { |
267 | retval = allocate_vmid(dqm, qpd, q); | |
ab7c1648 KR |
268 | if (retval) |
269 | goto out_unlock; | |
64c7f8cf | 270 | } |
64c7f8cf | 271 | q->properties.vmid = qpd->vmid; |
26103436 FK |
272 | /* |
273 | * Eviction state logic: we only mark active queues as evicted | |
274 | * to avoid the overhead of restoring inactive queues later | |
275 | */ | |
276 | if (qpd->evicted) | |
277 | q->properties.is_evicted = (q->properties.queue_size > 0 && | |
278 | q->properties.queue_percent > 0 && | |
279 | q->properties.queue_address != 0); | |
64c7f8cf | 280 | |
373d7080 FK |
281 | q->properties.tba_addr = qpd->tba_addr; |
282 | q->properties.tma_addr = qpd->tma_addr; | |
283 | ||
bcea3081 BG |
284 | if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) |
285 | retval = create_compute_queue_nocpsch(dqm, q, qpd); | |
ab7c1648 | 286 | else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) |
bcea3081 | 287 | retval = create_sdma_queue_nocpsch(dqm, q, qpd); |
ab7c1648 KR |
288 | else |
289 | retval = -EINVAL; | |
64c7f8cf | 290 | |
4eacc26b | 291 | if (retval) { |
b46cb7d7 | 292 | if (list_empty(&qpd->queues_list)) |
64c7f8cf | 293 | deallocate_vmid(dqm, qpd, q); |
ab7c1648 | 294 | goto out_unlock; |
64c7f8cf BG |
295 | } |
296 | ||
297 | list_add(&q->list, &qpd->queues_list); | |
bc920fd4 | 298 | qpd->queue_count++; |
b6819cec JC |
299 | if (q->properties.is_active) |
300 | dqm->queue_count++; | |
64c7f8cf | 301 | |
bcea3081 BG |
302 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) |
303 | dqm->sdma_queue_count++; | |
64c7f8cf | 304 | |
b8cbab04 OG |
305 | /* |
306 | * Unconditionally increment this counter, regardless of the queue's | |
307 | * type or whether the queue is active. | |
308 | */ | |
309 | dqm->total_queue_count++; | |
310 | pr_debug("Total of %d queues are accountable so far\n", | |
311 | dqm->total_queue_count); | |
312 | ||
ab7c1648 | 313 | out_unlock: |
efeaed4d | 314 | dqm_unlock(dqm); |
ab7c1648 | 315 | return retval; |
64c7f8cf BG |
316 | } |
317 | ||
318 | static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) | |
319 | { | |
320 | bool set; | |
f0ec5b99 | 321 | int pipe, bit, i; |
64c7f8cf BG |
322 | |
323 | set = false; | |
324 | ||
8eabaf54 KR |
325 | for (pipe = dqm->next_pipe_to_allocate, i = 0; |
326 | i < get_pipes_per_mec(dqm); | |
d0b63bb3 AR |
327 | pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) { |
328 | ||
329 | if (!is_pipe_enabled(dqm, 0, pipe)) | |
330 | continue; | |
331 | ||
64c7f8cf | 332 | if (dqm->allocated_queues[pipe] != 0) { |
4252bf68 HK |
333 | bit = ffs(dqm->allocated_queues[pipe]) - 1; |
334 | dqm->allocated_queues[pipe] &= ~(1 << bit); | |
64c7f8cf BG |
335 | q->pipe = pipe; |
336 | q->queue = bit; | |
337 | set = true; | |
338 | break; | |
339 | } | |
340 | } | |
341 | ||
991ca8ee | 342 | if (!set) |
64c7f8cf BG |
343 | return -EBUSY; |
344 | ||
79775b62 | 345 | pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue); |
64c7f8cf | 346 | /* horizontal hqd allocation */ |
d0b63bb3 | 347 | dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm); |
64c7f8cf BG |
348 | |
349 | return 0; | |
350 | } | |
351 | ||
352 | static inline void deallocate_hqd(struct device_queue_manager *dqm, | |
353 | struct queue *q) | |
354 | { | |
4252bf68 | 355 | dqm->allocated_queues[q->pipe] |= (1 << q->queue); |
64c7f8cf BG |
356 | } |
357 | ||
358 | static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, | |
359 | struct queue *q, | |
360 | struct qcm_process_device *qpd) | |
361 | { | |
8d5f3552 | 362 | struct mqd_manager *mqd_mgr; |
1b19aa5a | 363 | int retval; |
64c7f8cf | 364 | |
8d5f3552 YZ |
365 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); |
366 | if (!mqd_mgr) | |
64c7f8cf BG |
367 | return -ENOMEM; |
368 | ||
369 | retval = allocate_hqd(dqm, q); | |
4eacc26b | 370 | if (retval) |
64c7f8cf BG |
371 | return retval; |
372 | ||
ef568db7 FK |
373 | retval = allocate_doorbell(qpd, q); |
374 | if (retval) | |
375 | goto out_deallocate_hqd; | |
376 | ||
8d5f3552 | 377 | retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj, |
64c7f8cf | 378 | &q->gart_mqd_addr, &q->properties); |
ab7c1648 | 379 | if (retval) |
ef568db7 | 380 | goto out_deallocate_doorbell; |
64c7f8cf | 381 | |
79775b62 KR |
382 | pr_debug("Loading mqd to hqd on pipe %d, queue %d\n", |
383 | q->pipe, q->queue); | |
030e416b | 384 | |
6a1c9510 MR |
385 | dqm->dev->kfd2kgd->set_scratch_backing_va( |
386 | dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid); | |
387 | ||
60a00956 FK |
388 | if (!q->properties.is_active) |
389 | return 0; | |
390 | ||
1b19aa5a FK |
391 | if (WARN(q->process->mm != current->mm, |
392 | "should only run in user thread")) | |
393 | retval = -EFAULT; | |
394 | else | |
395 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, | |
396 | &q->properties, current->mm); | |
ab7c1648 KR |
397 | if (retval) |
398 | goto out_uninit_mqd; | |
030e416b | 399 | |
64c7f8cf | 400 | return 0; |
ab7c1648 KR |
401 | |
402 | out_uninit_mqd: | |
8d5f3552 | 403 | mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
ef568db7 FK |
404 | out_deallocate_doorbell: |
405 | deallocate_doorbell(qpd, q); | |
ab7c1648 KR |
406 | out_deallocate_hqd: |
407 | deallocate_hqd(dqm, q); | |
408 | ||
409 | return retval; | |
64c7f8cf BG |
410 | } |
411 | ||
9fd3f1bf FK |
412 | /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked |
413 | * to avoid asynchronized access | |
414 | */ | |
415 | static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, | |
64c7f8cf BG |
416 | struct qcm_process_device *qpd, |
417 | struct queue *q) | |
418 | { | |
419 | int retval; | |
8d5f3552 | 420 | struct mqd_manager *mqd_mgr; |
64c7f8cf | 421 | |
8d5f3552 | 422 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
9fd3f1bf | 423 | get_mqd_type_from_queue_type(q->properties.type)); |
8d5f3552 | 424 | if (!mqd_mgr) |
9fd3f1bf | 425 | return -ENOMEM; |
64c7f8cf | 426 | |
c2e1b3a4 | 427 | if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { |
c2e1b3a4 BG |
428 | deallocate_hqd(dqm, q); |
429 | } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { | |
c2e1b3a4 BG |
430 | dqm->sdma_queue_count--; |
431 | deallocate_sdma_queue(dqm, q->sdma_id); | |
7113cd65 | 432 | } else { |
79775b62 | 433 | pr_debug("q->properties.type %d is invalid\n", |
7113cd65 | 434 | q->properties.type); |
9fd3f1bf | 435 | return -EINVAL; |
64c7f8cf | 436 | } |
9fd3f1bf | 437 | dqm->total_queue_count--; |
64c7f8cf | 438 | |
ef568db7 FK |
439 | deallocate_doorbell(qpd, q); |
440 | ||
8d5f3552 | 441 | retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, |
c2e1b3a4 | 442 | KFD_PREEMPT_TYPE_WAVEFRONT_RESET, |
b90e3fbe | 443 | KFD_UNMAP_LATENCY_MS, |
64c7f8cf | 444 | q->pipe, q->queue); |
9fd3f1bf FK |
445 | if (retval == -ETIME) |
446 | qpd->reset_wavefronts = true; | |
64c7f8cf | 447 | |
8d5f3552 | 448 | mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
64c7f8cf BG |
449 | |
450 | list_del(&q->list); | |
9fd3f1bf FK |
451 | if (list_empty(&qpd->queues_list)) { |
452 | if (qpd->reset_wavefronts) { | |
453 | pr_warn("Resetting wave fronts (nocpsch) on dev %p\n", | |
454 | dqm->dev); | |
455 | /* dbgdev_wave_reset_wavefronts has to be called before | |
456 | * deallocate_vmid(), i.e. when vmid is still in use. | |
457 | */ | |
458 | dbgdev_wave_reset_wavefronts(dqm->dev, | |
459 | qpd->pqm->process); | |
460 | qpd->reset_wavefronts = false; | |
461 | } | |
462 | ||
64c7f8cf | 463 | deallocate_vmid(dqm, qpd, q); |
9fd3f1bf | 464 | } |
bc920fd4 | 465 | qpd->queue_count--; |
b6819cec JC |
466 | if (q->properties.is_active) |
467 | dqm->queue_count--; | |
b8cbab04 | 468 | |
9fd3f1bf FK |
469 | return retval; |
470 | } | |
b8cbab04 | 471 | |
9fd3f1bf FK |
472 | static int destroy_queue_nocpsch(struct device_queue_manager *dqm, |
473 | struct qcm_process_device *qpd, | |
474 | struct queue *q) | |
475 | { | |
476 | int retval; | |
477 | ||
efeaed4d | 478 | dqm_lock(dqm); |
9fd3f1bf | 479 | retval = destroy_queue_nocpsch_locked(dqm, qpd, q); |
efeaed4d | 480 | dqm_unlock(dqm); |
9fd3f1bf | 481 | |
64c7f8cf BG |
482 | return retval; |
483 | } | |
484 | ||
485 | static int update_queue(struct device_queue_manager *dqm, struct queue *q) | |
486 | { | |
487 | int retval; | |
8d5f3552 | 488 | struct mqd_manager *mqd_mgr; |
26103436 | 489 | struct kfd_process_device *pdd; |
b6ffbab8 | 490 | bool prev_active = false; |
64c7f8cf | 491 | |
efeaed4d | 492 | dqm_lock(dqm); |
26103436 FK |
493 | pdd = kfd_get_process_device_data(q->device, q->process); |
494 | if (!pdd) { | |
495 | retval = -ENODEV; | |
496 | goto out_unlock; | |
497 | } | |
8d5f3552 | 498 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
0b3674ae | 499 | get_mqd_type_from_queue_type(q->properties.type)); |
8d5f3552 | 500 | if (!mqd_mgr) { |
ab7c1648 KR |
501 | retval = -ENOMEM; |
502 | goto out_unlock; | |
64c7f8cf | 503 | } |
26103436 FK |
504 | /* |
505 | * Eviction state logic: we only mark active queues as evicted | |
506 | * to avoid the overhead of restoring inactive queues later | |
507 | */ | |
508 | if (pdd->qpd.evicted) | |
509 | q->properties.is_evicted = (q->properties.queue_size > 0 && | |
510 | q->properties.queue_percent > 0 && | |
511 | q->properties.queue_address != 0); | |
64c7f8cf | 512 | |
60a00956 FK |
513 | /* Save previous activity state for counters */ |
514 | prev_active = q->properties.is_active; | |
515 | ||
516 | /* Make sure the queue is unmapped before updating the MQD */ | |
d146c5a7 | 517 | if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { |
60a00956 FK |
518 | retval = unmap_queues_cpsch(dqm, |
519 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); | |
894a8293 | 520 | if (retval) { |
60a00956 FK |
521 | pr_err("unmap queue failed\n"); |
522 | goto out_unlock; | |
523 | } | |
894a8293 | 524 | } else if (prev_active && |
60a00956 FK |
525 | (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || |
526 | q->properties.type == KFD_QUEUE_TYPE_SDMA)) { | |
8d5f3552 | 527 | retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, |
60a00956 FK |
528 | KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, |
529 | KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); | |
530 | if (retval) { | |
531 | pr_err("destroy mqd failed\n"); | |
532 | goto out_unlock; | |
533 | } | |
534 | } | |
535 | ||
8d5f3552 | 536 | retval = mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties); |
60a00956 | 537 | |
096d1a3e FK |
538 | /* |
539 | * check active state vs. the previous state and modify | |
540 | * counter accordingly. map_queues_cpsch uses the | |
541 | * dqm->queue_count to determine whether a new runlist must be | |
542 | * uploaded. | |
543 | */ | |
544 | if (q->properties.is_active && !prev_active) | |
545 | dqm->queue_count++; | |
546 | else if (!q->properties.is_active && prev_active) | |
547 | dqm->queue_count--; | |
548 | ||
d146c5a7 | 549 | if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) |
60a00956 | 550 | retval = map_queues_cpsch(dqm); |
894a8293 | 551 | else if (q->properties.is_active && |
60a00956 | 552 | (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || |
1b19aa5a FK |
553 | q->properties.type == KFD_QUEUE_TYPE_SDMA)) { |
554 | if (WARN(q->process->mm != current->mm, | |
555 | "should only run in user thread")) | |
556 | retval = -EFAULT; | |
557 | else | |
558 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, | |
559 | q->pipe, q->queue, | |
560 | &q->properties, current->mm); | |
561 | } | |
b6ffbab8 | 562 | |
ab7c1648 | 563 | out_unlock: |
efeaed4d | 564 | dqm_unlock(dqm); |
64c7f8cf BG |
565 | return retval; |
566 | } | |
567 | ||
58dcd5bf | 568 | static struct mqd_manager *get_mqd_manager( |
64c7f8cf BG |
569 | struct device_queue_manager *dqm, enum KFD_MQD_TYPE type) |
570 | { | |
8d5f3552 | 571 | struct mqd_manager *mqd_mgr; |
64c7f8cf | 572 | |
32fa8219 FK |
573 | if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) |
574 | return NULL; | |
64c7f8cf | 575 | |
79775b62 | 576 | pr_debug("mqd type %d\n", type); |
64c7f8cf | 577 | |
8d5f3552 YZ |
578 | mqd_mgr = dqm->mqd_mgrs[type]; |
579 | if (!mqd_mgr) { | |
580 | mqd_mgr = mqd_manager_init(type, dqm->dev); | |
581 | if (!mqd_mgr) | |
79775b62 | 582 | pr_err("mqd manager is NULL"); |
8d5f3552 | 583 | dqm->mqd_mgrs[type] = mqd_mgr; |
64c7f8cf BG |
584 | } |
585 | ||
8d5f3552 | 586 | return mqd_mgr; |
64c7f8cf BG |
587 | } |
588 | ||
26103436 FK |
589 | static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, |
590 | struct qcm_process_device *qpd) | |
591 | { | |
592 | struct queue *q; | |
8d5f3552 | 593 | struct mqd_manager *mqd_mgr; |
26103436 FK |
594 | struct kfd_process_device *pdd; |
595 | int retval = 0; | |
596 | ||
efeaed4d | 597 | dqm_lock(dqm); |
26103436 FK |
598 | if (qpd->evicted++ > 0) /* already evicted, do nothing */ |
599 | goto out; | |
600 | ||
601 | pdd = qpd_to_pdd(qpd); | |
602 | pr_info_ratelimited("Evicting PASID %u queues\n", | |
603 | pdd->process->pasid); | |
604 | ||
605 | /* unactivate all active queues on the qpd */ | |
606 | list_for_each_entry(q, &qpd->queues_list, list) { | |
607 | if (!q->properties.is_active) | |
608 | continue; | |
8d5f3552 | 609 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
26103436 | 610 | get_mqd_type_from_queue_type(q->properties.type)); |
8d5f3552 | 611 | if (!mqd_mgr) { /* should not be here */ |
26103436 FK |
612 | pr_err("Cannot evict queue, mqd mgr is NULL\n"); |
613 | retval = -ENOMEM; | |
614 | goto out; | |
615 | } | |
616 | q->properties.is_evicted = true; | |
617 | q->properties.is_active = false; | |
8d5f3552 | 618 | retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, |
26103436 FK |
619 | KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, |
620 | KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); | |
621 | if (retval) | |
622 | goto out; | |
623 | dqm->queue_count--; | |
624 | } | |
625 | ||
626 | out: | |
efeaed4d | 627 | dqm_unlock(dqm); |
26103436 FK |
628 | return retval; |
629 | } | |
630 | ||
631 | static int evict_process_queues_cpsch(struct device_queue_manager *dqm, | |
632 | struct qcm_process_device *qpd) | |
633 | { | |
634 | struct queue *q; | |
635 | struct kfd_process_device *pdd; | |
636 | int retval = 0; | |
637 | ||
efeaed4d | 638 | dqm_lock(dqm); |
26103436 FK |
639 | if (qpd->evicted++ > 0) /* already evicted, do nothing */ |
640 | goto out; | |
641 | ||
642 | pdd = qpd_to_pdd(qpd); | |
643 | pr_info_ratelimited("Evicting PASID %u queues\n", | |
644 | pdd->process->pasid); | |
645 | ||
646 | /* unactivate all active queues on the qpd */ | |
647 | list_for_each_entry(q, &qpd->queues_list, list) { | |
648 | if (!q->properties.is_active) | |
649 | continue; | |
650 | q->properties.is_evicted = true; | |
651 | q->properties.is_active = false; | |
652 | dqm->queue_count--; | |
653 | } | |
654 | retval = execute_queues_cpsch(dqm, | |
655 | qpd->is_debug ? | |
656 | KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : | |
657 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); | |
658 | ||
659 | out: | |
efeaed4d | 660 | dqm_unlock(dqm); |
26103436 FK |
661 | return retval; |
662 | } | |
663 | ||
664 | static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, | |
665 | struct qcm_process_device *qpd) | |
666 | { | |
1b19aa5a | 667 | struct mm_struct *mm = NULL; |
26103436 | 668 | struct queue *q; |
8d5f3552 | 669 | struct mqd_manager *mqd_mgr; |
26103436 | 670 | struct kfd_process_device *pdd; |
e715c6d0 | 671 | uint64_t pd_base; |
26103436 FK |
672 | int retval = 0; |
673 | ||
674 | pdd = qpd_to_pdd(qpd); | |
675 | /* Retrieve PD base */ | |
5b87245f | 676 | pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm); |
26103436 | 677 | |
efeaed4d | 678 | dqm_lock(dqm); |
26103436 FK |
679 | if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ |
680 | goto out; | |
681 | if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ | |
682 | qpd->evicted--; | |
683 | goto out; | |
684 | } | |
685 | ||
686 | pr_info_ratelimited("Restoring PASID %u queues\n", | |
687 | pdd->process->pasid); | |
688 | ||
689 | /* Update PD Base in QPD */ | |
690 | qpd->page_table_base = pd_base; | |
e715c6d0 | 691 | pr_debug("Updated PD address to 0x%llx\n", pd_base); |
26103436 FK |
692 | |
693 | if (!list_empty(&qpd->queues_list)) { | |
694 | dqm->dev->kfd2kgd->set_vm_context_page_table_base( | |
695 | dqm->dev->kgd, | |
696 | qpd->vmid, | |
697 | qpd->page_table_base); | |
698 | kfd_flush_tlb(pdd); | |
699 | } | |
700 | ||
1b19aa5a FK |
701 | /* Take a safe reference to the mm_struct, which may otherwise |
702 | * disappear even while the kfd_process is still referenced. | |
703 | */ | |
704 | mm = get_task_mm(pdd->process->lead_thread); | |
705 | if (!mm) { | |
706 | retval = -EFAULT; | |
707 | goto out; | |
708 | } | |
709 | ||
26103436 FK |
710 | /* activate all active queues on the qpd */ |
711 | list_for_each_entry(q, &qpd->queues_list, list) { | |
712 | if (!q->properties.is_evicted) | |
713 | continue; | |
8d5f3552 | 714 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
26103436 | 715 | get_mqd_type_from_queue_type(q->properties.type)); |
8d5f3552 | 716 | if (!mqd_mgr) { /* should not be here */ |
26103436 FK |
717 | pr_err("Cannot restore queue, mqd mgr is NULL\n"); |
718 | retval = -ENOMEM; | |
719 | goto out; | |
720 | } | |
721 | q->properties.is_evicted = false; | |
722 | q->properties.is_active = true; | |
8d5f3552 | 723 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, |
1b19aa5a | 724 | q->queue, &q->properties, mm); |
26103436 FK |
725 | if (retval) |
726 | goto out; | |
727 | dqm->queue_count++; | |
728 | } | |
729 | qpd->evicted = 0; | |
730 | out: | |
1b19aa5a FK |
731 | if (mm) |
732 | mmput(mm); | |
efeaed4d | 733 | dqm_unlock(dqm); |
26103436 FK |
734 | return retval; |
735 | } | |
736 | ||
737 | static int restore_process_queues_cpsch(struct device_queue_manager *dqm, | |
738 | struct qcm_process_device *qpd) | |
739 | { | |
740 | struct queue *q; | |
741 | struct kfd_process_device *pdd; | |
e715c6d0 | 742 | uint64_t pd_base; |
26103436 FK |
743 | int retval = 0; |
744 | ||
745 | pdd = qpd_to_pdd(qpd); | |
746 | /* Retrieve PD base */ | |
5b87245f | 747 | pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm); |
26103436 | 748 | |
efeaed4d | 749 | dqm_lock(dqm); |
26103436 FK |
750 | if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ |
751 | goto out; | |
752 | if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ | |
753 | qpd->evicted--; | |
754 | goto out; | |
755 | } | |
756 | ||
757 | pr_info_ratelimited("Restoring PASID %u queues\n", | |
758 | pdd->process->pasid); | |
759 | ||
760 | /* Update PD Base in QPD */ | |
761 | qpd->page_table_base = pd_base; | |
e715c6d0 | 762 | pr_debug("Updated PD address to 0x%llx\n", pd_base); |
26103436 FK |
763 | |
764 | /* activate all active queues on the qpd */ | |
765 | list_for_each_entry(q, &qpd->queues_list, list) { | |
766 | if (!q->properties.is_evicted) | |
767 | continue; | |
768 | q->properties.is_evicted = false; | |
769 | q->properties.is_active = true; | |
770 | dqm->queue_count++; | |
771 | } | |
772 | retval = execute_queues_cpsch(dqm, | |
773 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); | |
774 | if (!retval) | |
775 | qpd->evicted = 0; | |
776 | out: | |
efeaed4d | 777 | dqm_unlock(dqm); |
26103436 FK |
778 | return retval; |
779 | } | |
780 | ||
58dcd5bf | 781 | static int register_process(struct device_queue_manager *dqm, |
64c7f8cf BG |
782 | struct qcm_process_device *qpd) |
783 | { | |
784 | struct device_process_node *n; | |
403575c4 | 785 | struct kfd_process_device *pdd; |
e715c6d0 | 786 | uint64_t pd_base; |
a22fc854 | 787 | int retval; |
64c7f8cf | 788 | |
dbf56ab1 | 789 | n = kzalloc(sizeof(*n), GFP_KERNEL); |
64c7f8cf BG |
790 | if (!n) |
791 | return -ENOMEM; | |
792 | ||
793 | n->qpd = qpd; | |
794 | ||
403575c4 FK |
795 | pdd = qpd_to_pdd(qpd); |
796 | /* Retrieve PD base */ | |
5b87245f | 797 | pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm); |
403575c4 | 798 | |
efeaed4d | 799 | dqm_lock(dqm); |
64c7f8cf BG |
800 | list_add(&n->list, &dqm->queues); |
801 | ||
403575c4 FK |
802 | /* Update PD Base in QPD */ |
803 | qpd->page_table_base = pd_base; | |
e715c6d0 | 804 | pr_debug("Updated PD address to 0x%llx\n", pd_base); |
403575c4 | 805 | |
bfd5e378 | 806 | retval = dqm->asic_ops.update_qpd(dqm, qpd); |
a22fc854 | 807 | |
b5aa3f4a | 808 | if (dqm->processes_count++ == 0) |
5b87245f | 809 | amdgpu_amdkfd_set_compute_idle(dqm->dev->kgd, false); |
64c7f8cf | 810 | |
efeaed4d | 811 | dqm_unlock(dqm); |
64c7f8cf | 812 | |
a22fc854 | 813 | return retval; |
64c7f8cf BG |
814 | } |
815 | ||
58dcd5bf | 816 | static int unregister_process(struct device_queue_manager *dqm, |
64c7f8cf BG |
817 | struct qcm_process_device *qpd) |
818 | { | |
819 | int retval; | |
820 | struct device_process_node *cur, *next; | |
821 | ||
1e5ec956 OG |
822 | pr_debug("qpd->queues_list is %s\n", |
823 | list_empty(&qpd->queues_list) ? "empty" : "not empty"); | |
64c7f8cf BG |
824 | |
825 | retval = 0; | |
efeaed4d | 826 | dqm_lock(dqm); |
64c7f8cf BG |
827 | |
828 | list_for_each_entry_safe(cur, next, &dqm->queues, list) { | |
829 | if (qpd == cur->qpd) { | |
830 | list_del(&cur->list); | |
f5d896bb | 831 | kfree(cur); |
b5aa3f4a | 832 | if (--dqm->processes_count == 0) |
5b87245f | 833 | amdgpu_amdkfd_set_compute_idle( |
b5aa3f4a | 834 | dqm->dev->kgd, true); |
64c7f8cf BG |
835 | goto out; |
836 | } | |
837 | } | |
838 | /* qpd not found in dqm list */ | |
839 | retval = 1; | |
840 | out: | |
efeaed4d | 841 | dqm_unlock(dqm); |
64c7f8cf BG |
842 | return retval; |
843 | } | |
844 | ||
845 | static int | |
846 | set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid, | |
847 | unsigned int vmid) | |
848 | { | |
849 | uint32_t pasid_mapping; | |
850 | ||
cea405b1 XZ |
851 | pasid_mapping = (pasid == 0) ? 0 : |
852 | (uint32_t)pasid | | |
853 | ATC_VMID_PASID_MAPPING_VALID; | |
854 | ||
855 | return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( | |
856 | dqm->dev->kgd, pasid_mapping, | |
64c7f8cf BG |
857 | vmid); |
858 | } | |
859 | ||
2249d558 AL |
860 | static void init_interrupts(struct device_queue_manager *dqm) |
861 | { | |
862 | unsigned int i; | |
863 | ||
d0b63bb3 AR |
864 | for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) |
865 | if (is_pipe_enabled(dqm, 0, i)) | |
866 | dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i); | |
2249d558 AL |
867 | } |
868 | ||
64c7f8cf BG |
869 | static int initialize_nocpsch(struct device_queue_manager *dqm) |
870 | { | |
86194cf8 | 871 | int pipe, queue; |
64c7f8cf | 872 | |
79775b62 | 873 | pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); |
64c7f8cf | 874 | |
ab7c1648 KR |
875 | dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm), |
876 | sizeof(unsigned int), GFP_KERNEL); | |
877 | if (!dqm->allocated_queues) | |
878 | return -ENOMEM; | |
879 | ||
efeaed4d | 880 | mutex_init(&dqm->lock_hidden); |
64c7f8cf BG |
881 | INIT_LIST_HEAD(&dqm->queues); |
882 | dqm->queue_count = dqm->next_pipe_to_allocate = 0; | |
bcea3081 | 883 | dqm->sdma_queue_count = 0; |
64c7f8cf | 884 | |
86194cf8 FK |
885 | for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { |
886 | int pipe_offset = pipe * get_queues_per_pipe(dqm); | |
887 | ||
888 | for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) | |
889 | if (test_bit(pipe_offset + queue, | |
890 | dqm->dev->shared_resources.queue_bitmap)) | |
891 | dqm->allocated_queues[pipe] |= 1 << queue; | |
892 | } | |
64c7f8cf | 893 | |
44008d7a | 894 | dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1; |
98bb9222 | 895 | dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1; |
64c7f8cf | 896 | |
64c7f8cf BG |
897 | return 0; |
898 | } | |
899 | ||
58dcd5bf | 900 | static void uninitialize(struct device_queue_manager *dqm) |
64c7f8cf | 901 | { |
6f9d54fd OG |
902 | int i; |
903 | ||
32fa8219 | 904 | WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0); |
64c7f8cf BG |
905 | |
906 | kfree(dqm->allocated_queues); | |
6f9d54fd | 907 | for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) |
8d5f3552 | 908 | kfree(dqm->mqd_mgrs[i]); |
efeaed4d | 909 | mutex_destroy(&dqm->lock_hidden); |
a86aa3ca | 910 | kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem); |
64c7f8cf BG |
911 | } |
912 | ||
913 | static int start_nocpsch(struct device_queue_manager *dqm) | |
914 | { | |
2249d558 | 915 | init_interrupts(dqm); |
552764b6 | 916 | return pm_init(&dqm->packets, dqm); |
64c7f8cf BG |
917 | } |
918 | ||
919 | static int stop_nocpsch(struct device_queue_manager *dqm) | |
920 | { | |
552764b6 | 921 | pm_uninit(&dqm->packets); |
64c7f8cf BG |
922 | return 0; |
923 | } | |
924 | ||
bcea3081 BG |
925 | static int allocate_sdma_queue(struct device_queue_manager *dqm, |
926 | unsigned int *sdma_queue_id) | |
927 | { | |
928 | int bit; | |
929 | ||
930 | if (dqm->sdma_bitmap == 0) | |
931 | return -ENOMEM; | |
932 | ||
4252bf68 HK |
933 | bit = ffs(dqm->sdma_bitmap) - 1; |
934 | dqm->sdma_bitmap &= ~(1 << bit); | |
bcea3081 BG |
935 | *sdma_queue_id = bit; |
936 | ||
937 | return 0; | |
938 | } | |
939 | ||
940 | static void deallocate_sdma_queue(struct device_queue_manager *dqm, | |
941 | unsigned int sdma_queue_id) | |
942 | { | |
98bb9222 | 943 | if (sdma_queue_id >= get_num_sdma_queues(dqm)) |
bcea3081 | 944 | return; |
4252bf68 | 945 | dqm->sdma_bitmap |= (1 << sdma_queue_id); |
bcea3081 BG |
946 | } |
947 | ||
bcea3081 BG |
948 | static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, |
949 | struct queue *q, | |
950 | struct qcm_process_device *qpd) | |
951 | { | |
8d5f3552 | 952 | struct mqd_manager *mqd_mgr; |
bcea3081 BG |
953 | int retval; |
954 | ||
8d5f3552 YZ |
955 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); |
956 | if (!mqd_mgr) | |
bcea3081 BG |
957 | return -ENOMEM; |
958 | ||
959 | retval = allocate_sdma_queue(dqm, &q->sdma_id); | |
4eacc26b | 960 | if (retval) |
bcea3081 BG |
961 | return retval; |
962 | ||
98bb9222 YZ |
963 | q->properties.sdma_queue_id = q->sdma_id / get_num_sdma_engines(dqm); |
964 | q->properties.sdma_engine_id = q->sdma_id % get_num_sdma_engines(dqm); | |
bcea3081 | 965 | |
ef568db7 FK |
966 | retval = allocate_doorbell(qpd, q); |
967 | if (retval) | |
968 | goto out_deallocate_sdma_queue; | |
969 | ||
79775b62 KR |
970 | pr_debug("SDMA id is: %d\n", q->sdma_id); |
971 | pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id); | |
972 | pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); | |
bcea3081 | 973 | |
bfd5e378 | 974 | dqm->asic_ops.init_sdma_vm(dqm, q, qpd); |
8d5f3552 | 975 | retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj, |
bcea3081 | 976 | &q->gart_mqd_addr, &q->properties); |
ab7c1648 | 977 | if (retval) |
ef568db7 | 978 | goto out_deallocate_doorbell; |
bcea3081 | 979 | |
8d5f3552 YZ |
980 | retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties, |
981 | NULL); | |
ab7c1648 KR |
982 | if (retval) |
983 | goto out_uninit_mqd; | |
4fadf6b6 | 984 | |
bcea3081 | 985 | return 0; |
ab7c1648 KR |
986 | |
987 | out_uninit_mqd: | |
8d5f3552 | 988 | mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
ef568db7 FK |
989 | out_deallocate_doorbell: |
990 | deallocate_doorbell(qpd, q); | |
ab7c1648 KR |
991 | out_deallocate_sdma_queue: |
992 | deallocate_sdma_queue(dqm, q->sdma_id); | |
993 | ||
994 | return retval; | |
bcea3081 BG |
995 | } |
996 | ||
64c7f8cf BG |
997 | /* |
998 | * Device Queue Manager implementation for cp scheduler | |
999 | */ | |
1000 | ||
1001 | static int set_sched_resources(struct device_queue_manager *dqm) | |
1002 | { | |
d0b63bb3 | 1003 | int i, mec; |
64c7f8cf | 1004 | struct scheduling_resources res; |
64c7f8cf | 1005 | |
44008d7a | 1006 | res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap; |
d0b63bb3 AR |
1007 | |
1008 | res.queue_mask = 0; | |
1009 | for (i = 0; i < KGD_MAX_QUEUES; ++i) { | |
1010 | mec = (i / dqm->dev->shared_resources.num_queue_per_pipe) | |
1011 | / dqm->dev->shared_resources.num_pipe_per_mec; | |
1012 | ||
1013 | if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap)) | |
1014 | continue; | |
1015 | ||
1016 | /* only acquire queues from the first MEC */ | |
1017 | if (mec > 0) | |
1018 | continue; | |
1019 | ||
1020 | /* This situation may be hit in the future if a new HW | |
1021 | * generation exposes more than 64 queues. If so, the | |
8eabaf54 KR |
1022 | * definition of res.queue_mask needs updating |
1023 | */ | |
1d11ee89 | 1024 | if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) { |
d0b63bb3 AR |
1025 | pr_err("Invalid queue enabled by amdgpu: %d\n", i); |
1026 | break; | |
1027 | } | |
1028 | ||
1029 | res.queue_mask |= (1ull << i); | |
1030 | } | |
64c7f8cf BG |
1031 | res.gws_mask = res.oac_mask = res.gds_heap_base = |
1032 | res.gds_heap_size = 0; | |
1033 | ||
79775b62 KR |
1034 | pr_debug("Scheduling resources:\n" |
1035 | "vmid mask: 0x%8X\n" | |
1036 | "queue mask: 0x%8llX\n", | |
64c7f8cf BG |
1037 | res.vmid_mask, res.queue_mask); |
1038 | ||
1039 | return pm_send_set_resources(&dqm->packets, &res); | |
1040 | } | |
1041 | ||
1042 | static int initialize_cpsch(struct device_queue_manager *dqm) | |
1043 | { | |
79775b62 | 1044 | pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); |
64c7f8cf | 1045 | |
efeaed4d | 1046 | mutex_init(&dqm->lock_hidden); |
64c7f8cf BG |
1047 | INIT_LIST_HEAD(&dqm->queues); |
1048 | dqm->queue_count = dqm->processes_count = 0; | |
bcea3081 | 1049 | dqm->sdma_queue_count = 0; |
64c7f8cf | 1050 | dqm->active_runlist = false; |
98bb9222 | 1051 | dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1; |
64c7f8cf | 1052 | |
73ea648d SL |
1053 | INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception); |
1054 | ||
bfd5e378 | 1055 | return 0; |
64c7f8cf BG |
1056 | } |
1057 | ||
1058 | static int start_cpsch(struct device_queue_manager *dqm) | |
1059 | { | |
64c7f8cf BG |
1060 | int retval; |
1061 | ||
64c7f8cf BG |
1062 | retval = 0; |
1063 | ||
1064 | retval = pm_init(&dqm->packets, dqm); | |
4eacc26b | 1065 | if (retval) |
64c7f8cf BG |
1066 | goto fail_packet_manager_init; |
1067 | ||
1068 | retval = set_sched_resources(dqm); | |
4eacc26b | 1069 | if (retval) |
64c7f8cf BG |
1070 | goto fail_set_sched_resources; |
1071 | ||
79775b62 | 1072 | pr_debug("Allocating fence memory\n"); |
64c7f8cf BG |
1073 | |
1074 | /* allocate fence memory on the gart */ | |
a86aa3ca OG |
1075 | retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr), |
1076 | &dqm->fence_mem); | |
64c7f8cf | 1077 | |
4eacc26b | 1078 | if (retval) |
64c7f8cf BG |
1079 | goto fail_allocate_vidmem; |
1080 | ||
1081 | dqm->fence_addr = dqm->fence_mem->cpu_ptr; | |
1082 | dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr; | |
2249d558 AL |
1083 | |
1084 | init_interrupts(dqm); | |
1085 | ||
efeaed4d | 1086 | dqm_lock(dqm); |
73ea648d SL |
1087 | /* clear hang status when driver try to start the hw scheduler */ |
1088 | dqm->is_hws_hang = false; | |
c4744e24 | 1089 | execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); |
efeaed4d | 1090 | dqm_unlock(dqm); |
64c7f8cf BG |
1091 | |
1092 | return 0; | |
1093 | fail_allocate_vidmem: | |
1094 | fail_set_sched_resources: | |
1095 | pm_uninit(&dqm->packets); | |
1096 | fail_packet_manager_init: | |
1097 | return retval; | |
1098 | } | |
1099 | ||
1100 | static int stop_cpsch(struct device_queue_manager *dqm) | |
1101 | { | |
efeaed4d | 1102 | dqm_lock(dqm); |
4465f466 | 1103 | unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); |
efeaed4d | 1104 | dqm_unlock(dqm); |
64c7f8cf | 1105 | |
a86aa3ca | 1106 | kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); |
64c7f8cf BG |
1107 | pm_uninit(&dqm->packets); |
1108 | ||
1109 | return 0; | |
1110 | } | |
1111 | ||
1112 | static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, | |
1113 | struct kernel_queue *kq, | |
1114 | struct qcm_process_device *qpd) | |
1115 | { | |
efeaed4d | 1116 | dqm_lock(dqm); |
b8cbab04 | 1117 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { |
79775b62 | 1118 | pr_warn("Can't create new kernel queue because %d queues were already created\n", |
b8cbab04 | 1119 | dqm->total_queue_count); |
efeaed4d | 1120 | dqm_unlock(dqm); |
b8cbab04 OG |
1121 | return -EPERM; |
1122 | } | |
1123 | ||
1124 | /* | |
1125 | * Unconditionally increment this counter, regardless of the queue's | |
1126 | * type or whether the queue is active. | |
1127 | */ | |
1128 | dqm->total_queue_count++; | |
1129 | pr_debug("Total of %d queues are accountable so far\n", | |
1130 | dqm->total_queue_count); | |
1131 | ||
64c7f8cf BG |
1132 | list_add(&kq->list, &qpd->priv_queue_list); |
1133 | dqm->queue_count++; | |
1134 | qpd->is_debug = true; | |
c4744e24 | 1135 | execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); |
efeaed4d | 1136 | dqm_unlock(dqm); |
64c7f8cf BG |
1137 | |
1138 | return 0; | |
1139 | } | |
1140 | ||
1141 | static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, | |
1142 | struct kernel_queue *kq, | |
1143 | struct qcm_process_device *qpd) | |
1144 | { | |
efeaed4d | 1145 | dqm_lock(dqm); |
64c7f8cf BG |
1146 | list_del(&kq->list); |
1147 | dqm->queue_count--; | |
1148 | qpd->is_debug = false; | |
c4744e24 | 1149 | execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); |
b8cbab04 OG |
1150 | /* |
1151 | * Unconditionally decrement this counter, regardless of the queue's | |
1152 | * type. | |
1153 | */ | |
8b58f261 | 1154 | dqm->total_queue_count--; |
b8cbab04 OG |
1155 | pr_debug("Total of %d queues are accountable so far\n", |
1156 | dqm->total_queue_count); | |
efeaed4d | 1157 | dqm_unlock(dqm); |
64c7f8cf BG |
1158 | } |
1159 | ||
1160 | static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, | |
b46cb7d7 | 1161 | struct qcm_process_device *qpd) |
64c7f8cf BG |
1162 | { |
1163 | int retval; | |
8d5f3552 | 1164 | struct mqd_manager *mqd_mgr; |
64c7f8cf | 1165 | |
64c7f8cf BG |
1166 | retval = 0; |
1167 | ||
efeaed4d | 1168 | dqm_lock(dqm); |
64c7f8cf | 1169 | |
b8cbab04 | 1170 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { |
79775b62 | 1171 | pr_warn("Can't create new usermode queue because %d queues were already created\n", |
b8cbab04 OG |
1172 | dqm->total_queue_count); |
1173 | retval = -EPERM; | |
72a01d23 | 1174 | goto out_unlock; |
b8cbab04 OG |
1175 | } |
1176 | ||
e139cd2a | 1177 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { |
1178 | retval = allocate_sdma_queue(dqm, &q->sdma_id); | |
894a8293 | 1179 | if (retval) |
72a01d23 | 1180 | goto out_unlock; |
e139cd2a | 1181 | q->properties.sdma_queue_id = |
98bb9222 | 1182 | q->sdma_id / get_num_sdma_engines(dqm); |
e139cd2a | 1183 | q->properties.sdma_engine_id = |
98bb9222 | 1184 | q->sdma_id % get_num_sdma_engines(dqm); |
e139cd2a | 1185 | } |
ef568db7 FK |
1186 | |
1187 | retval = allocate_doorbell(qpd, q); | |
1188 | if (retval) | |
1189 | goto out_deallocate_sdma_queue; | |
1190 | ||
8d5f3552 | 1191 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
bcea3081 BG |
1192 | get_mqd_type_from_queue_type(q->properties.type)); |
1193 | ||
8d5f3552 | 1194 | if (!mqd_mgr) { |
ab7c1648 | 1195 | retval = -ENOMEM; |
ef568db7 | 1196 | goto out_deallocate_doorbell; |
64c7f8cf | 1197 | } |
26103436 FK |
1198 | /* |
1199 | * Eviction state logic: we only mark active queues as evicted | |
1200 | * to avoid the overhead of restoring inactive queues later | |
1201 | */ | |
1202 | if (qpd->evicted) | |
1203 | q->properties.is_evicted = (q->properties.queue_size > 0 && | |
1204 | q->properties.queue_percent > 0 && | |
1205 | q->properties.queue_address != 0); | |
64c7f8cf | 1206 | |
bfd5e378 | 1207 | dqm->asic_ops.init_sdma_vm(dqm, q, qpd); |
373d7080 FK |
1208 | |
1209 | q->properties.tba_addr = qpd->tba_addr; | |
1210 | q->properties.tma_addr = qpd->tma_addr; | |
8d5f3552 | 1211 | retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj, |
64c7f8cf | 1212 | &q->gart_mqd_addr, &q->properties); |
4eacc26b | 1213 | if (retval) |
ef568db7 | 1214 | goto out_deallocate_doorbell; |
64c7f8cf BG |
1215 | |
1216 | list_add(&q->list, &qpd->queues_list); | |
bc920fd4 | 1217 | qpd->queue_count++; |
64c7f8cf BG |
1218 | if (q->properties.is_active) { |
1219 | dqm->queue_count++; | |
c4744e24 YZ |
1220 | retval = execute_queues_cpsch(dqm, |
1221 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); | |
64c7f8cf BG |
1222 | } |
1223 | ||
bcea3081 | 1224 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) |
8eabaf54 | 1225 | dqm->sdma_queue_count++; |
b8cbab04 OG |
1226 | /* |
1227 | * Unconditionally increment this counter, regardless of the queue's | |
1228 | * type or whether the queue is active. | |
1229 | */ | |
1230 | dqm->total_queue_count++; | |
1231 | ||
1232 | pr_debug("Total of %d queues are accountable so far\n", | |
1233 | dqm->total_queue_count); | |
1234 | ||
efeaed4d | 1235 | dqm_unlock(dqm); |
72a01d23 FK |
1236 | return retval; |
1237 | ||
ef568db7 FK |
1238 | out_deallocate_doorbell: |
1239 | deallocate_doorbell(qpd, q); | |
72a01d23 FK |
1240 | out_deallocate_sdma_queue: |
1241 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) | |
1242 | deallocate_sdma_queue(dqm, q->sdma_id); | |
1243 | out_unlock: | |
efeaed4d FK |
1244 | dqm_unlock(dqm); |
1245 | ||
64c7f8cf BG |
1246 | return retval; |
1247 | } | |
1248 | ||
788bf83d | 1249 | int amdkfd_fence_wait_timeout(unsigned int *fence_addr, |
d80d19bd | 1250 | unsigned int fence_value, |
8c72c3d7 | 1251 | unsigned int timeout_ms) |
64c7f8cf | 1252 | { |
8c72c3d7 | 1253 | unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies; |
64c7f8cf BG |
1254 | |
1255 | while (*fence_addr != fence_value) { | |
8c72c3d7 | 1256 | if (time_after(jiffies, end_jiffies)) { |
79775b62 | 1257 | pr_err("qcm fence wait loop timeout expired\n"); |
0e9a860c YZ |
1258 | /* In HWS case, this is used to halt the driver thread |
1259 | * in order not to mess up CP states before doing | |
1260 | * scandumps for FW debugging. | |
1261 | */ | |
1262 | while (halt_if_hws_hang) | |
1263 | schedule(); | |
1264 | ||
64c7f8cf BG |
1265 | return -ETIME; |
1266 | } | |
99331a51 | 1267 | schedule(); |
64c7f8cf BG |
1268 | } |
1269 | ||
1270 | return 0; | |
1271 | } | |
1272 | ||
7da2bcf8 | 1273 | static int unmap_sdma_queues(struct device_queue_manager *dqm, |
bcea3081 BG |
1274 | unsigned int sdma_engine) |
1275 | { | |
1276 | return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA, | |
7da2bcf8 | 1277 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, |
bcea3081 BG |
1278 | sdma_engine); |
1279 | } | |
1280 | ||
60a00956 FK |
1281 | /* dqm->lock mutex has to be locked before calling this function */ |
1282 | static int map_queues_cpsch(struct device_queue_manager *dqm) | |
1283 | { | |
1284 | int retval; | |
1285 | ||
1286 | if (dqm->queue_count <= 0 || dqm->processes_count <= 0) | |
1287 | return 0; | |
1288 | ||
1289 | if (dqm->active_runlist) | |
1290 | return 0; | |
1291 | ||
1292 | retval = pm_send_runlist(&dqm->packets, &dqm->queues); | |
1293 | if (retval) { | |
1294 | pr_err("failed to execute runlist\n"); | |
1295 | return retval; | |
1296 | } | |
1297 | dqm->active_runlist = true; | |
1298 | ||
1299 | return retval; | |
1300 | } | |
1301 | ||
ac30c783 | 1302 | /* dqm->lock mutex has to be locked before calling this function */ |
7da2bcf8 | 1303 | static int unmap_queues_cpsch(struct device_queue_manager *dqm, |
4465f466 YZ |
1304 | enum kfd_unmap_queues_filter filter, |
1305 | uint32_t filter_param) | |
64c7f8cf | 1306 | { |
9fd3f1bf | 1307 | int retval = 0; |
64c7f8cf | 1308 | |
73ea648d SL |
1309 | if (dqm->is_hws_hang) |
1310 | return -EIO; | |
991ca8ee | 1311 | if (!dqm->active_runlist) |
ac30c783 | 1312 | return retval; |
bcea3081 | 1313 | |
79775b62 | 1314 | pr_debug("Before destroying queues, sdma queue count is : %u\n", |
bcea3081 BG |
1315 | dqm->sdma_queue_count); |
1316 | ||
1317 | if (dqm->sdma_queue_count > 0) { | |
7da2bcf8 YZ |
1318 | unmap_sdma_queues(dqm, 0); |
1319 | unmap_sdma_queues(dqm, 1); | |
bcea3081 BG |
1320 | } |
1321 | ||
64c7f8cf | 1322 | retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, |
4465f466 | 1323 | filter, filter_param, false, 0); |
4eacc26b | 1324 | if (retval) |
ac30c783 | 1325 | return retval; |
64c7f8cf BG |
1326 | |
1327 | *dqm->fence_addr = KFD_FENCE_INIT; | |
1328 | pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr, | |
1329 | KFD_FENCE_COMPLETED); | |
1330 | /* should be timed out */ | |
c3447e81 | 1331 | retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, |
64c7f8cf | 1332 | QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS); |
9fd3f1bf | 1333 | if (retval) |
ac30c783 | 1334 | return retval; |
9fd3f1bf | 1335 | |
64c7f8cf BG |
1336 | pm_release_ib(&dqm->packets); |
1337 | dqm->active_runlist = false; | |
1338 | ||
64c7f8cf BG |
1339 | return retval; |
1340 | } | |
1341 | ||
ac30c783 | 1342 | /* dqm->lock mutex has to be locked before calling this function */ |
c4744e24 YZ |
1343 | static int execute_queues_cpsch(struct device_queue_manager *dqm, |
1344 | enum kfd_unmap_queues_filter filter, | |
1345 | uint32_t filter_param) | |
64c7f8cf BG |
1346 | { |
1347 | int retval; | |
1348 | ||
73ea648d SL |
1349 | if (dqm->is_hws_hang) |
1350 | return -EIO; | |
c4744e24 | 1351 | retval = unmap_queues_cpsch(dqm, filter, filter_param); |
4eacc26b | 1352 | if (retval) { |
c4744e24 | 1353 | pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n"); |
73ea648d SL |
1354 | dqm->is_hws_hang = true; |
1355 | schedule_work(&dqm->hw_exception_work); | |
ac30c783 | 1356 | return retval; |
64c7f8cf BG |
1357 | } |
1358 | ||
60a00956 | 1359 | return map_queues_cpsch(dqm); |
64c7f8cf BG |
1360 | } |
1361 | ||
1362 | static int destroy_queue_cpsch(struct device_queue_manager *dqm, | |
1363 | struct qcm_process_device *qpd, | |
1364 | struct queue *q) | |
1365 | { | |
1366 | int retval; | |
8d5f3552 | 1367 | struct mqd_manager *mqd_mgr; |
992839ad | 1368 | |
64c7f8cf BG |
1369 | retval = 0; |
1370 | ||
1371 | /* remove queue from list to prevent rescheduling after preemption */ | |
efeaed4d | 1372 | dqm_lock(dqm); |
992839ad YS |
1373 | |
1374 | if (qpd->is_debug) { | |
1375 | /* | |
1376 | * error, currently we do not allow to destroy a queue | |
1377 | * of a currently debugged process | |
1378 | */ | |
1379 | retval = -EBUSY; | |
1380 | goto failed_try_destroy_debugged_queue; | |
1381 | ||
1382 | } | |
1383 | ||
8d5f3552 | 1384 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
bcea3081 | 1385 | get_mqd_type_from_queue_type(q->properties.type)); |
8d5f3552 | 1386 | if (!mqd_mgr) { |
64c7f8cf BG |
1387 | retval = -ENOMEM; |
1388 | goto failed; | |
1389 | } | |
1390 | ||
ef568db7 FK |
1391 | deallocate_doorbell(qpd, q); |
1392 | ||
e139cd2a | 1393 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { |
bcea3081 | 1394 | dqm->sdma_queue_count--; |
e139cd2a | 1395 | deallocate_sdma_queue(dqm, q->sdma_id); |
1396 | } | |
bcea3081 | 1397 | |
64c7f8cf | 1398 | list_del(&q->list); |
bc920fd4 | 1399 | qpd->queue_count--; |
40a526dc | 1400 | if (q->properties.is_active) { |
b6819cec | 1401 | dqm->queue_count--; |
40a526dc | 1402 | retval = execute_queues_cpsch(dqm, |
9fd3f1bf | 1403 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); |
40a526dc YZ |
1404 | if (retval == -ETIME) |
1405 | qpd->reset_wavefronts = true; | |
1406 | } | |
64c7f8cf | 1407 | |
8d5f3552 | 1408 | mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
b8cbab04 OG |
1409 | |
1410 | /* | |
1411 | * Unconditionally decrement this counter, regardless of the queue's | |
1412 | * type | |
1413 | */ | |
1414 | dqm->total_queue_count--; | |
1415 | pr_debug("Total of %d queues are accountable so far\n", | |
1416 | dqm->total_queue_count); | |
64c7f8cf | 1417 | |
efeaed4d | 1418 | dqm_unlock(dqm); |
64c7f8cf | 1419 | |
9e827224 | 1420 | return retval; |
64c7f8cf BG |
1421 | |
1422 | failed: | |
992839ad YS |
1423 | failed_try_destroy_debugged_queue: |
1424 | ||
efeaed4d | 1425 | dqm_unlock(dqm); |
64c7f8cf BG |
1426 | return retval; |
1427 | } | |
1428 | ||
1429 | /* | |
1430 | * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to | |
1431 | * stay in user mode. | |
1432 | */ | |
1433 | #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL | |
1434 | /* APE1 limit is inclusive and 64K aligned. */ | |
1435 | #define APE1_LIMIT_ALIGNMENT 0xFFFF | |
1436 | ||
1437 | static bool set_cache_memory_policy(struct device_queue_manager *dqm, | |
1438 | struct qcm_process_device *qpd, | |
1439 | enum cache_policy default_policy, | |
1440 | enum cache_policy alternate_policy, | |
1441 | void __user *alternate_aperture_base, | |
1442 | uint64_t alternate_aperture_size) | |
1443 | { | |
bed4f110 FK |
1444 | bool retval = true; |
1445 | ||
1446 | if (!dqm->asic_ops.set_cache_memory_policy) | |
1447 | return retval; | |
64c7f8cf | 1448 | |
efeaed4d | 1449 | dqm_lock(dqm); |
64c7f8cf BG |
1450 | |
1451 | if (alternate_aperture_size == 0) { | |
1452 | /* base > limit disables APE1 */ | |
1453 | qpd->sh_mem_ape1_base = 1; | |
1454 | qpd->sh_mem_ape1_limit = 0; | |
1455 | } else { | |
1456 | /* | |
1457 | * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, | |
1458 | * SH_MEM_APE1_BASE[31:0], 0x0000 } | |
1459 | * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, | |
1460 | * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } | |
1461 | * Verify that the base and size parameters can be | |
1462 | * represented in this format and convert them. | |
1463 | * Additionally restrict APE1 to user-mode addresses. | |
1464 | */ | |
1465 | ||
1466 | uint64_t base = (uintptr_t)alternate_aperture_base; | |
1467 | uint64_t limit = base + alternate_aperture_size - 1; | |
1468 | ||
ab7c1648 KR |
1469 | if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 || |
1470 | (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) { | |
1471 | retval = false; | |
64c7f8cf | 1472 | goto out; |
ab7c1648 | 1473 | } |
64c7f8cf BG |
1474 | |
1475 | qpd->sh_mem_ape1_base = base >> 16; | |
1476 | qpd->sh_mem_ape1_limit = limit >> 16; | |
1477 | } | |
1478 | ||
bfd5e378 | 1479 | retval = dqm->asic_ops.set_cache_memory_policy( |
a22fc854 BG |
1480 | dqm, |
1481 | qpd, | |
1482 | default_policy, | |
1483 | alternate_policy, | |
1484 | alternate_aperture_base, | |
1485 | alternate_aperture_size); | |
64c7f8cf | 1486 | |
d146c5a7 | 1487 | if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) |
64c7f8cf BG |
1488 | program_sh_mem_settings(dqm, qpd); |
1489 | ||
79775b62 | 1490 | pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n", |
64c7f8cf BG |
1491 | qpd->sh_mem_config, qpd->sh_mem_ape1_base, |
1492 | qpd->sh_mem_ape1_limit); | |
1493 | ||
64c7f8cf | 1494 | out: |
efeaed4d | 1495 | dqm_unlock(dqm); |
ab7c1648 | 1496 | return retval; |
64c7f8cf BG |
1497 | } |
1498 | ||
d7b9bd22 FK |
1499 | static int set_trap_handler(struct device_queue_manager *dqm, |
1500 | struct qcm_process_device *qpd, | |
1501 | uint64_t tba_addr, | |
1502 | uint64_t tma_addr) | |
1503 | { | |
1504 | uint64_t *tma; | |
1505 | ||
1506 | if (dqm->dev->cwsr_enabled) { | |
1507 | /* Jump from CWSR trap handler to user trap */ | |
1508 | tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET); | |
1509 | tma[0] = tba_addr; | |
1510 | tma[1] = tma_addr; | |
1511 | } else { | |
1512 | qpd->tba_addr = tba_addr; | |
1513 | qpd->tma_addr = tma_addr; | |
1514 | } | |
1515 | ||
1516 | return 0; | |
1517 | } | |
1518 | ||
9fd3f1bf FK |
1519 | static int process_termination_nocpsch(struct device_queue_manager *dqm, |
1520 | struct qcm_process_device *qpd) | |
1521 | { | |
1522 | struct queue *q, *next; | |
1523 | struct device_process_node *cur, *next_dpn; | |
1524 | int retval = 0; | |
1525 | ||
efeaed4d | 1526 | dqm_lock(dqm); |
9fd3f1bf FK |
1527 | |
1528 | /* Clear all user mode queues */ | |
1529 | list_for_each_entry_safe(q, next, &qpd->queues_list, list) { | |
1530 | int ret; | |
1531 | ||
1532 | ret = destroy_queue_nocpsch_locked(dqm, qpd, q); | |
1533 | if (ret) | |
1534 | retval = ret; | |
1535 | } | |
1536 | ||
1537 | /* Unregister process */ | |
1538 | list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) { | |
1539 | if (qpd == cur->qpd) { | |
1540 | list_del(&cur->list); | |
1541 | kfree(cur); | |
1542 | dqm->processes_count--; | |
1543 | break; | |
1544 | } | |
1545 | } | |
1546 | ||
efeaed4d | 1547 | dqm_unlock(dqm); |
9fd3f1bf FK |
1548 | return retval; |
1549 | } | |
1550 | ||
5df099e8 JC |
1551 | static int get_wave_state(struct device_queue_manager *dqm, |
1552 | struct queue *q, | |
1553 | void __user *ctl_stack, | |
1554 | u32 *ctl_stack_used_size, | |
1555 | u32 *save_area_used_size) | |
1556 | { | |
1557 | struct mqd_manager *mqd; | |
1558 | int r; | |
1559 | ||
1560 | dqm_lock(dqm); | |
1561 | ||
1562 | if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || | |
1563 | q->properties.is_active || !q->device->cwsr_enabled) { | |
1564 | r = -EINVAL; | |
1565 | goto dqm_unlock; | |
1566 | } | |
1567 | ||
1568 | mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); | |
1569 | if (!mqd) { | |
1570 | r = -ENOMEM; | |
1571 | goto dqm_unlock; | |
1572 | } | |
1573 | ||
1574 | if (!mqd->get_wave_state) { | |
1575 | r = -EINVAL; | |
1576 | goto dqm_unlock; | |
1577 | } | |
1578 | ||
1579 | r = mqd->get_wave_state(mqd, q->mqd, ctl_stack, ctl_stack_used_size, | |
1580 | save_area_used_size); | |
1581 | ||
1582 | dqm_unlock: | |
1583 | dqm_unlock(dqm); | |
1584 | return r; | |
1585 | } | |
9fd3f1bf FK |
1586 | |
1587 | static int process_termination_cpsch(struct device_queue_manager *dqm, | |
1588 | struct qcm_process_device *qpd) | |
1589 | { | |
1590 | int retval; | |
1591 | struct queue *q, *next; | |
1592 | struct kernel_queue *kq, *kq_next; | |
8d5f3552 | 1593 | struct mqd_manager *mqd_mgr; |
9fd3f1bf FK |
1594 | struct device_process_node *cur, *next_dpn; |
1595 | enum kfd_unmap_queues_filter filter = | |
1596 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES; | |
1597 | ||
1598 | retval = 0; | |
1599 | ||
efeaed4d | 1600 | dqm_lock(dqm); |
9fd3f1bf FK |
1601 | |
1602 | /* Clean all kernel queues */ | |
1603 | list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { | |
1604 | list_del(&kq->list); | |
1605 | dqm->queue_count--; | |
1606 | qpd->is_debug = false; | |
1607 | dqm->total_queue_count--; | |
1608 | filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; | |
1609 | } | |
1610 | ||
1611 | /* Clear all user mode queues */ | |
1612 | list_for_each_entry(q, &qpd->queues_list, list) { | |
72a01d23 | 1613 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { |
9fd3f1bf | 1614 | dqm->sdma_queue_count--; |
72a01d23 FK |
1615 | deallocate_sdma_queue(dqm, q->sdma_id); |
1616 | } | |
9fd3f1bf FK |
1617 | |
1618 | if (q->properties.is_active) | |
1619 | dqm->queue_count--; | |
1620 | ||
1621 | dqm->total_queue_count--; | |
1622 | } | |
1623 | ||
1624 | /* Unregister process */ | |
1625 | list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) { | |
1626 | if (qpd == cur->qpd) { | |
1627 | list_del(&cur->list); | |
1628 | kfree(cur); | |
1629 | dqm->processes_count--; | |
1630 | break; | |
1631 | } | |
1632 | } | |
1633 | ||
1634 | retval = execute_queues_cpsch(dqm, filter, 0); | |
73ea648d | 1635 | if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) { |
9fd3f1bf FK |
1636 | pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev); |
1637 | dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process); | |
1638 | qpd->reset_wavefronts = false; | |
1639 | } | |
1640 | ||
1641 | /* lastly, free mqd resources */ | |
1642 | list_for_each_entry_safe(q, next, &qpd->queues_list, list) { | |
8d5f3552 | 1643 | mqd_mgr = dqm->ops.get_mqd_manager(dqm, |
9fd3f1bf | 1644 | get_mqd_type_from_queue_type(q->properties.type)); |
8d5f3552 | 1645 | if (!mqd_mgr) { |
9fd3f1bf FK |
1646 | retval = -ENOMEM; |
1647 | goto out; | |
1648 | } | |
1649 | list_del(&q->list); | |
bc920fd4 | 1650 | qpd->queue_count--; |
8d5f3552 | 1651 | mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); |
9fd3f1bf FK |
1652 | } |
1653 | ||
1654 | out: | |
efeaed4d | 1655 | dqm_unlock(dqm); |
9fd3f1bf FK |
1656 | return retval; |
1657 | } | |
1658 | ||
64c7f8cf BG |
1659 | struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) |
1660 | { | |
1661 | struct device_queue_manager *dqm; | |
1662 | ||
79775b62 | 1663 | pr_debug("Loading device queue manager\n"); |
a22fc854 | 1664 | |
dbf56ab1 | 1665 | dqm = kzalloc(sizeof(*dqm), GFP_KERNEL); |
64c7f8cf BG |
1666 | if (!dqm) |
1667 | return NULL; | |
1668 | ||
d146c5a7 FK |
1669 | switch (dev->device_info->asic_family) { |
1670 | /* HWS is not available on Hawaii. */ | |
1671 | case CHIP_HAWAII: | |
1672 | /* HWS depends on CWSR for timely dequeue. CWSR is not | |
1673 | * available on Tonga. | |
1674 | * | |
1675 | * FIXME: This argument also applies to Kaveri. | |
1676 | */ | |
1677 | case CHIP_TONGA: | |
1678 | dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS; | |
1679 | break; | |
1680 | default: | |
1681 | dqm->sched_policy = sched_policy; | |
1682 | break; | |
1683 | } | |
1684 | ||
64c7f8cf | 1685 | dqm->dev = dev; |
d146c5a7 | 1686 | switch (dqm->sched_policy) { |
64c7f8cf BG |
1687 | case KFD_SCHED_POLICY_HWS: |
1688 | case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: | |
1689 | /* initialize dqm for cp scheduling */ | |
45c9a5e4 OG |
1690 | dqm->ops.create_queue = create_queue_cpsch; |
1691 | dqm->ops.initialize = initialize_cpsch; | |
1692 | dqm->ops.start = start_cpsch; | |
1693 | dqm->ops.stop = stop_cpsch; | |
1694 | dqm->ops.destroy_queue = destroy_queue_cpsch; | |
1695 | dqm->ops.update_queue = update_queue; | |
58dcd5bf YZ |
1696 | dqm->ops.get_mqd_manager = get_mqd_manager; |
1697 | dqm->ops.register_process = register_process; | |
1698 | dqm->ops.unregister_process = unregister_process; | |
1699 | dqm->ops.uninitialize = uninitialize; | |
45c9a5e4 OG |
1700 | dqm->ops.create_kernel_queue = create_kernel_queue_cpsch; |
1701 | dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch; | |
1702 | dqm->ops.set_cache_memory_policy = set_cache_memory_policy; | |
d7b9bd22 | 1703 | dqm->ops.set_trap_handler = set_trap_handler; |
9fd3f1bf | 1704 | dqm->ops.process_termination = process_termination_cpsch; |
26103436 FK |
1705 | dqm->ops.evict_process_queues = evict_process_queues_cpsch; |
1706 | dqm->ops.restore_process_queues = restore_process_queues_cpsch; | |
5df099e8 | 1707 | dqm->ops.get_wave_state = get_wave_state; |
64c7f8cf BG |
1708 | break; |
1709 | case KFD_SCHED_POLICY_NO_HWS: | |
1710 | /* initialize dqm for no cp scheduling */ | |
45c9a5e4 OG |
1711 | dqm->ops.start = start_nocpsch; |
1712 | dqm->ops.stop = stop_nocpsch; | |
1713 | dqm->ops.create_queue = create_queue_nocpsch; | |
1714 | dqm->ops.destroy_queue = destroy_queue_nocpsch; | |
1715 | dqm->ops.update_queue = update_queue; | |
58dcd5bf YZ |
1716 | dqm->ops.get_mqd_manager = get_mqd_manager; |
1717 | dqm->ops.register_process = register_process; | |
1718 | dqm->ops.unregister_process = unregister_process; | |
45c9a5e4 | 1719 | dqm->ops.initialize = initialize_nocpsch; |
58dcd5bf | 1720 | dqm->ops.uninitialize = uninitialize; |
45c9a5e4 | 1721 | dqm->ops.set_cache_memory_policy = set_cache_memory_policy; |
d7b9bd22 | 1722 | dqm->ops.set_trap_handler = set_trap_handler; |
9fd3f1bf | 1723 | dqm->ops.process_termination = process_termination_nocpsch; |
26103436 FK |
1724 | dqm->ops.evict_process_queues = evict_process_queues_nocpsch; |
1725 | dqm->ops.restore_process_queues = | |
1726 | restore_process_queues_nocpsch; | |
5df099e8 | 1727 | dqm->ops.get_wave_state = get_wave_state; |
64c7f8cf BG |
1728 | break; |
1729 | default: | |
d146c5a7 | 1730 | pr_err("Invalid scheduling policy %d\n", dqm->sched_policy); |
32fa8219 | 1731 | goto out_free; |
64c7f8cf BG |
1732 | } |
1733 | ||
a22fc854 BG |
1734 | switch (dev->device_info->asic_family) { |
1735 | case CHIP_CARRIZO: | |
bfd5e378 | 1736 | device_queue_manager_init_vi(&dqm->asic_ops); |
300dec95 OG |
1737 | break; |
1738 | ||
a22fc854 | 1739 | case CHIP_KAVERI: |
bfd5e378 | 1740 | device_queue_manager_init_cik(&dqm->asic_ops); |
300dec95 | 1741 | break; |
97672cbe FK |
1742 | |
1743 | case CHIP_HAWAII: | |
1744 | device_queue_manager_init_cik_hawaii(&dqm->asic_ops); | |
1745 | break; | |
1746 | ||
1747 | case CHIP_TONGA: | |
1748 | case CHIP_FIJI: | |
1749 | case CHIP_POLARIS10: | |
1750 | case CHIP_POLARIS11: | |
1751 | device_queue_manager_init_vi_tonga(&dqm->asic_ops); | |
1752 | break; | |
bed4f110 FK |
1753 | |
1754 | case CHIP_VEGA10: | |
22a3a294 | 1755 | case CHIP_VEGA20: |
bed4f110 FK |
1756 | case CHIP_RAVEN: |
1757 | device_queue_manager_init_v9(&dqm->asic_ops); | |
1758 | break; | |
e596b903 YZ |
1759 | default: |
1760 | WARN(1, "Unexpected ASIC family %u", | |
1761 | dev->device_info->asic_family); | |
1762 | goto out_free; | |
a22fc854 BG |
1763 | } |
1764 | ||
32fa8219 FK |
1765 | if (!dqm->ops.initialize(dqm)) |
1766 | return dqm; | |
64c7f8cf | 1767 | |
32fa8219 FK |
1768 | out_free: |
1769 | kfree(dqm); | |
1770 | return NULL; | |
64c7f8cf BG |
1771 | } |
1772 | ||
1773 | void device_queue_manager_uninit(struct device_queue_manager *dqm) | |
1774 | { | |
45c9a5e4 | 1775 | dqm->ops.uninitialize(dqm); |
64c7f8cf BG |
1776 | kfree(dqm); |
1777 | } | |
851a645e | 1778 | |
2640c3fa | 1779 | int kfd_process_vm_fault(struct device_queue_manager *dqm, |
1780 | unsigned int pasid) | |
1781 | { | |
1782 | struct kfd_process_device *pdd; | |
1783 | struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); | |
1784 | int ret = 0; | |
1785 | ||
1786 | if (!p) | |
1787 | return -EINVAL; | |
1788 | pdd = kfd_get_process_device_data(dqm->dev, p); | |
1789 | if (pdd) | |
1790 | ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd); | |
1791 | kfd_unref_process(p); | |
1792 | ||
1793 | return ret; | |
1794 | } | |
1795 | ||
73ea648d SL |
1796 | static void kfd_process_hw_exception(struct work_struct *work) |
1797 | { | |
1798 | struct device_queue_manager *dqm = container_of(work, | |
1799 | struct device_queue_manager, hw_exception_work); | |
5b87245f | 1800 | amdgpu_amdkfd_gpu_reset(dqm->dev->kgd); |
73ea648d SL |
1801 | } |
1802 | ||
851a645e FK |
1803 | #if defined(CONFIG_DEBUG_FS) |
1804 | ||
1805 | static void seq_reg_dump(struct seq_file *m, | |
1806 | uint32_t (*dump)[2], uint32_t n_regs) | |
1807 | { | |
1808 | uint32_t i, count; | |
1809 | ||
1810 | for (i = 0, count = 0; i < n_regs; i++) { | |
1811 | if (count == 0 || | |
1812 | dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) { | |
1813 | seq_printf(m, "%s %08x: %08x", | |
1814 | i ? "\n" : "", | |
1815 | dump[i][0], dump[i][1]); | |
1816 | count = 7; | |
1817 | } else { | |
1818 | seq_printf(m, " %08x", dump[i][1]); | |
1819 | count--; | |
1820 | } | |
1821 | } | |
1822 | ||
1823 | seq_puts(m, "\n"); | |
1824 | } | |
1825 | ||
1826 | int dqm_debugfs_hqds(struct seq_file *m, void *data) | |
1827 | { | |
1828 | struct device_queue_manager *dqm = data; | |
1829 | uint32_t (*dump)[2], n_regs; | |
1830 | int pipe, queue; | |
1831 | int r = 0; | |
1832 | ||
24f48a42 OZ |
1833 | r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd, |
1834 | KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs); | |
1835 | if (!r) { | |
1836 | seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n", | |
1837 | KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1, | |
1838 | KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm), | |
1839 | KFD_CIK_HIQ_QUEUE); | |
1840 | seq_reg_dump(m, dump, n_regs); | |
1841 | ||
1842 | kfree(dump); | |
1843 | } | |
1844 | ||
851a645e FK |
1845 | for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { |
1846 | int pipe_offset = pipe * get_queues_per_pipe(dqm); | |
1847 | ||
1848 | for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { | |
1849 | if (!test_bit(pipe_offset + queue, | |
1850 | dqm->dev->shared_resources.queue_bitmap)) | |
1851 | continue; | |
1852 | ||
1853 | r = dqm->dev->kfd2kgd->hqd_dump( | |
1854 | dqm->dev->kgd, pipe, queue, &dump, &n_regs); | |
1855 | if (r) | |
1856 | break; | |
1857 | ||
1858 | seq_printf(m, " CP Pipe %d, Queue %d\n", | |
1859 | pipe, queue); | |
1860 | seq_reg_dump(m, dump, n_regs); | |
1861 | ||
1862 | kfree(dump); | |
1863 | } | |
1864 | } | |
1865 | ||
98bb9222 | 1866 | for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) { |
d5094189 SL |
1867 | for (queue = 0; |
1868 | queue < dqm->dev->device_info->num_sdma_queues_per_engine; | |
1869 | queue++) { | |
851a645e FK |
1870 | r = dqm->dev->kfd2kgd->hqd_sdma_dump( |
1871 | dqm->dev->kgd, pipe, queue, &dump, &n_regs); | |
1872 | if (r) | |
1873 | break; | |
1874 | ||
1875 | seq_printf(m, " SDMA Engine %d, RLC %d\n", | |
1876 | pipe, queue); | |
1877 | seq_reg_dump(m, dump, n_regs); | |
1878 | ||
1879 | kfree(dump); | |
1880 | } | |
1881 | } | |
1882 | ||
1883 | return r; | |
1884 | } | |
1885 | ||
a29ec470 SL |
1886 | int dqm_debugfs_execute_queues(struct device_queue_manager *dqm) |
1887 | { | |
1888 | int r = 0; | |
1889 | ||
1890 | dqm_lock(dqm); | |
1891 | dqm->active_runlist = true; | |
1892 | r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); | |
1893 | dqm_unlock(dqm); | |
1894 | ||
1895 | return r; | |
1896 | } | |
1897 | ||
851a645e | 1898 | #endif |