Commit | Line | Data |
---|---|---|
64c7f8cf BG |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | ||
26103436 FK |
24 | #include <linux/ratelimit.h> |
25 | #include <linux/printk.h> | |
64c7f8cf BG |
26 | #include <linux/slab.h> |
27 | #include <linux/list.h> | |
28 | #include <linux/types.h> | |
64c7f8cf | 29 | #include <linux/bitops.h> |
99331a51 | 30 | #include <linux/sched.h> |
64c7f8cf BG |
31 | #include "kfd_priv.h" |
32 | #include "kfd_device_queue_manager.h" | |
33 | #include "kfd_mqd_manager.h" | |
34 | #include "cik_regs.h" | |
35 | #include "kfd_kernel_queue.h" | |
64c7f8cf BG |
36 | |
37 | /* Size of the per-pipe EOP queue */ | |
38 | #define CIK_HPD_EOP_BYTES_LOG2 11 | |
39 | #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2) | |
40 | ||
64c7f8cf BG |
41 | static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, |
42 | unsigned int pasid, unsigned int vmid); | |
43 | ||
44 | static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, | |
45 | struct queue *q, | |
46 | struct qcm_process_device *qpd); | |
bcea3081 | 47 | |
c4744e24 YZ |
48 | static int execute_queues_cpsch(struct device_queue_manager *dqm, |
49 | enum kfd_unmap_queues_filter filter, | |
50 | uint32_t filter_param); | |
7da2bcf8 | 51 | static int unmap_queues_cpsch(struct device_queue_manager *dqm, |
4465f466 YZ |
52 | enum kfd_unmap_queues_filter filter, |
53 | uint32_t filter_param); | |
64c7f8cf | 54 | |
60a00956 FK |
55 | static int map_queues_cpsch(struct device_queue_manager *dqm); |
56 | ||
bcea3081 BG |
57 | static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, |
58 | struct queue *q, | |
59 | struct qcm_process_device *qpd); | |
60 | ||
61 | static void deallocate_sdma_queue(struct device_queue_manager *dqm, | |
62 | unsigned int sdma_queue_id); | |
64c7f8cf | 63 | |
73ea648d SL |
64 | static void kfd_process_hw_exception(struct work_struct *work); |
65 | ||
bcea3081 BG |
66 | static inline |
67 | enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) | |
64c7f8cf | 68 | { |
bcea3081 | 69 | if (type == KFD_QUEUE_TYPE_SDMA) |
85d258f9 BG |
70 | return KFD_MQD_TYPE_SDMA; |
71 | return KFD_MQD_TYPE_CP; | |
64c7f8cf BG |
72 | } |
73 | ||
d0b63bb3 AR |
74 | static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) |
75 | { | |
76 | int i; | |
77 | int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec | |
78 | + pipe * dqm->dev->shared_resources.num_queue_per_pipe; | |
79 | ||
80 | /* queue is available for KFD usage if bit is 1 */ | |
81 | for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i) | |
82 | if (test_bit(pipe_offset + i, | |
83 | dqm->dev->shared_resources.queue_bitmap)) | |
84 | return true; | |
85 | return false; | |
86 | } | |
87 | ||
d0b63bb3 | 88 | unsigned int get_queues_num(struct device_queue_manager *dqm) |
64ea8f4a | 89 | { |
d0b63bb3 AR |
90 | return bitmap_weight(dqm->dev->shared_resources.queue_bitmap, |
91 | KGD_MAX_QUEUES); | |
64ea8f4a OG |
92 | } |
93 | ||
d0b63bb3 | 94 | unsigned int get_queues_per_pipe(struct device_queue_manager *dqm) |
64c7f8cf | 95 | { |
d0b63bb3 AR |
96 | return dqm->dev->shared_resources.num_queue_per_pipe; |
97 | } | |
98 | ||
99 | unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) | |
100 | { | |
d0b63bb3 | 101 | return dqm->dev->shared_resources.num_pipe_per_mec; |
64c7f8cf BG |
102 | } |
103 | ||
a22fc854 | 104 | void program_sh_mem_settings(struct device_queue_manager *dqm, |
64c7f8cf BG |
105 | struct qcm_process_device *qpd) |
106 | { | |
cea405b1 XZ |
107 | return dqm->dev->kfd2kgd->program_sh_mem_settings( |
108 | dqm->dev->kgd, qpd->vmid, | |
64c7f8cf BG |
109 | qpd->sh_mem_config, |
110 | qpd->sh_mem_ape1_base, | |
111 | qpd->sh_mem_ape1_limit, | |
112 | qpd->sh_mem_bases); | |
113 | } | |
114 | ||
ef568db7 FK |
115 | static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q) |
116 | { | |
117 | struct kfd_dev *dev = qpd->dqm->dev; | |
118 | ||
119 | if (!KFD_IS_SOC15(dev->device_info->asic_family)) { | |
120 | /* On pre-SOC15 chips we need to use the queue ID to | |
121 | * preserve the user mode ABI. | |
122 | */ | |
123 | q->doorbell_id = q->properties.queue_id; | |
124 | } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { | |
125 | /* For SDMA queues on SOC15, use static doorbell | |
126 | * assignments based on the engine and queue. | |
127 | */ | |
128 | q->doorbell_id = dev->shared_resources.sdma_doorbell | |
129 | [q->properties.sdma_engine_id] | |
130 | [q->properties.sdma_queue_id]; | |
131 | } else { | |
132 | /* For CP queues on SOC15 reserve a free doorbell ID */ | |
133 | unsigned int found; | |
134 | ||
135 | found = find_first_zero_bit(qpd->doorbell_bitmap, | |
136 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); | |
137 | if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { | |
138 | pr_debug("No doorbells available"); | |
139 | return -EBUSY; | |
140 | } | |
141 | set_bit(found, qpd->doorbell_bitmap); | |
142 | q->doorbell_id = found; | |
143 | } | |
144 | ||
145 | q->properties.doorbell_off = | |
146 | kfd_doorbell_id_to_offset(dev, q->process, | |
147 | q->doorbell_id); | |
148 | ||
149 | return 0; | |
150 | } | |
151 | ||
152 | static void deallocate_doorbell(struct qcm_process_device *qpd, | |
153 | struct queue *q) | |
154 | { | |
155 | unsigned int old; | |
156 | struct kfd_dev *dev = qpd->dqm->dev; | |
157 | ||
158 | if (!KFD_IS_SOC15(dev->device_info->asic_family) || | |
159 | q->properties.type == KFD_QUEUE_TYPE_SDMA) | |
160 | return; | |
161 | ||
162 | old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap); | |
163 | WARN_ON(!old); | |
164 | } | |
165 | ||
64c7f8cf BG |
166 | static int allocate_vmid(struct device_queue_manager *dqm, |
167 | struct qcm_process_device *qpd, | |
168 | struct queue *q) | |
169 | { | |
170 | int bit, allocated_vmid; | |
171 | ||
172 | if (dqm->vmid_bitmap == 0) | |
173 | return -ENOMEM; | |
174 | ||
4252bf68 HK |
175 | bit = ffs(dqm->vmid_bitmap) - 1; |
176 | dqm->vmid_bitmap &= ~(1 << bit); | |
64c7f8cf | 177 | |
44008d7a | 178 | allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd; |
79775b62 | 179 | pr_debug("vmid allocation %d\n", allocated_vmid); |
64c7f8cf BG |
180 | qpd->vmid = allocated_vmid; |
181 | q->properties.vmid = allocated_vmid; | |
182 | ||
183 | set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); | |
184 | program_sh_mem_settings(dqm, qpd); | |
185 | ||
403575c4 FK |
186 | /* qpd->page_table_base is set earlier when register_process() |
187 | * is called, i.e. when the first queue is created. | |
188 | */ | |
189 | dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd, | |
190 | qpd->vmid, | |
191 | qpd->page_table_base); | |
192 | /* invalidate the VM context after pasid and vmid mapping is set up */ | |
193 | kfd_flush_tlb(qpd_to_pdd(qpd)); | |
194 | ||
64c7f8cf BG |
195 | return 0; |
196 | } | |
197 | ||
552764b6 FK |
198 | static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, |
199 | struct qcm_process_device *qpd) | |
200 | { | |
f6e27ff1 FK |
201 | const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf; |
202 | int ret; | |
552764b6 FK |
203 | |
204 | if (!qpd->ib_kaddr) | |
205 | return -ENOMEM; | |
206 | ||
f6e27ff1 FK |
207 | ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr); |
208 | if (ret) | |
209 | return ret; | |
552764b6 FK |
210 | |
211 | return kdev->kfd2kgd->submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid, | |
f6e27ff1 FK |
212 | qpd->ib_base, (uint32_t *)qpd->ib_kaddr, |
213 | pmf->release_mem_size / sizeof(uint32_t)); | |
552764b6 FK |
214 | } |
215 | ||
64c7f8cf BG |
216 | static void deallocate_vmid(struct device_queue_manager *dqm, |
217 | struct qcm_process_device *qpd, | |
218 | struct queue *q) | |
219 | { | |
44008d7a | 220 | int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd; |
64c7f8cf | 221 | |
552764b6 FK |
222 | /* On GFX v7, CP doesn't flush TC at dequeue */ |
223 | if (q->device->device_info->asic_family == CHIP_HAWAII) | |
224 | if (flush_texture_cache_nocpsch(q->device, qpd)) | |
225 | pr_err("Failed to flush TC\n"); | |
226 | ||
403575c4 FK |
227 | kfd_flush_tlb(qpd_to_pdd(qpd)); |
228 | ||
2030664b BG |
229 | /* Release the vmid mapping */ |
230 | set_pasid_vmid_mapping(dqm, 0, qpd->vmid); | |
231 | ||
4252bf68 | 232 | dqm->vmid_bitmap |= (1 << bit); |
64c7f8cf BG |
233 | qpd->vmid = 0; |
234 | q->properties.vmid = 0; | |
235 | } | |
236 | ||
237 | static int create_queue_nocpsch(struct device_queue_manager *dqm, | |
238 | struct queue *q, | |
b46cb7d7 | 239 | struct qcm_process_device *qpd) |
64c7f8cf BG |
240 | { |
241 | int retval; | |
242 | ||
64c7f8cf BG |
243 | print_queue(q); |
244 | ||
efeaed4d | 245 | dqm_lock(dqm); |
64c7f8cf | 246 | |
b8cbab04 | 247 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { |
79775b62 | 248 | pr_warn("Can't create new usermode queue because %d queues were already created\n", |
b8cbab04 | 249 | dqm->total_queue_count); |
ab7c1648 KR |
250 | retval = -EPERM; |
251 | goto out_unlock; | |
b8cbab04 OG |
252 | } |
253 | ||
64c7f8cf BG |
254 | if (list_empty(&qpd->queues_list)) { |
255 | retval = allocate_vmid(dqm, qpd, q); | |
ab7c1648 KR |
256 | if (retval) |
257 | goto out_unlock; | |
64c7f8cf | 258 | } |
64c7f8cf | 259 | q->properties.vmid = qpd->vmid; |
26103436 FK |
260 | /* |
261 | * Eviction state logic: we only mark active queues as evicted | |
262 | * to avoid the overhead of restoring inactive queues later | |
263 | */ | |
264 | if (qpd->evicted) | |
265 | q->properties.is_evicted = (q->properties.queue_size > 0 && | |
266 | q->properties.queue_percent > 0 && | |
267 | q->properties.queue_address != 0); | |
64c7f8cf | 268 | |
373d7080 FK |
269 | q->properties.tba_addr = qpd->tba_addr; |
270 | q->properties.tma_addr = qpd->tma_addr; | |
271 | ||
bcea3081 BG |
272 | if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) |
273 | retval = create_compute_queue_nocpsch(dqm, q, qpd); | |
ab7c1648 | 274 | else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) |
bcea3081 | 275 | retval = create_sdma_queue_nocpsch(dqm, q, qpd); |
ab7c1648 KR |
276 | else |
277 | retval = -EINVAL; | |
64c7f8cf | 278 | |
4eacc26b | 279 | if (retval) { |
b46cb7d7 | 280 | if (list_empty(&qpd->queues_list)) |
64c7f8cf | 281 | deallocate_vmid(dqm, qpd, q); |
ab7c1648 | 282 | goto out_unlock; |
64c7f8cf BG |
283 | } |
284 | ||
285 | list_add(&q->list, &qpd->queues_list); | |
bc920fd4 | 286 | qpd->queue_count++; |
b6819cec JC |
287 | if (q->properties.is_active) |
288 | dqm->queue_count++; | |
64c7f8cf | 289 | |
bcea3081 BG |
290 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) |
291 | dqm->sdma_queue_count++; | |
64c7f8cf | 292 | |
b8cbab04 OG |
293 | /* |
294 | * Unconditionally increment this counter, regardless of the queue's | |
295 | * type or whether the queue is active. | |
296 | */ | |
297 | dqm->total_queue_count++; | |
298 | pr_debug("Total of %d queues are accountable so far\n", | |
299 | dqm->total_queue_count); | |
300 | ||
ab7c1648 | 301 | out_unlock: |
efeaed4d | 302 | dqm_unlock(dqm); |
ab7c1648 | 303 | return retval; |
64c7f8cf BG |
304 | } |
305 | ||
306 | static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q) | |
307 | { | |
308 | bool set; | |
f0ec5b99 | 309 | int pipe, bit, i; |
64c7f8cf BG |
310 | |
311 | set = false; | |
312 | ||
8eabaf54 KR |
313 | for (pipe = dqm->next_pipe_to_allocate, i = 0; |
314 | i < get_pipes_per_mec(dqm); | |
d0b63bb3 AR |
315 | pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) { |
316 | ||
317 | if (!is_pipe_enabled(dqm, 0, pipe)) | |
318 | continue; | |
319 | ||
64c7f8cf | 320 | if (dqm->allocated_queues[pipe] != 0) { |
4252bf68 HK |
321 | bit = ffs(dqm->allocated_queues[pipe]) - 1; |
322 | dqm->allocated_queues[pipe] &= ~(1 << bit); | |
64c7f8cf BG |
323 | q->pipe = pipe; |
324 | q->queue = bit; | |
325 | set = true; | |
326 | break; | |
327 | } | |
328 | } | |
329 | ||
991ca8ee | 330 | if (!set) |
64c7f8cf BG |
331 | return -EBUSY; |
332 | ||
79775b62 | 333 | pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue); |
64c7f8cf | 334 | /* horizontal hqd allocation */ |
d0b63bb3 | 335 | dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm); |
64c7f8cf BG |
336 | |
337 | return 0; | |
338 | } | |
339 | ||
340 | static inline void deallocate_hqd(struct device_queue_manager *dqm, | |
341 | struct queue *q) | |
342 | { | |
4252bf68 | 343 | dqm->allocated_queues[q->pipe] |= (1 << q->queue); |
64c7f8cf BG |
344 | } |
345 | ||
346 | static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, | |
347 | struct queue *q, | |
348 | struct qcm_process_device *qpd) | |
349 | { | |
350 | int retval; | |
351 | struct mqd_manager *mqd; | |
352 | ||
45c9a5e4 | 353 | mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE); |
4eacc26b | 354 | if (!mqd) |
64c7f8cf BG |
355 | return -ENOMEM; |
356 | ||
357 | retval = allocate_hqd(dqm, q); | |
4eacc26b | 358 | if (retval) |
64c7f8cf BG |
359 | return retval; |
360 | ||
ef568db7 FK |
361 | retval = allocate_doorbell(qpd, q); |
362 | if (retval) | |
363 | goto out_deallocate_hqd; | |
364 | ||
64c7f8cf BG |
365 | retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, |
366 | &q->gart_mqd_addr, &q->properties); | |
ab7c1648 | 367 | if (retval) |
ef568db7 | 368 | goto out_deallocate_doorbell; |
64c7f8cf | 369 | |
79775b62 KR |
370 | pr_debug("Loading mqd to hqd on pipe %d, queue %d\n", |
371 | q->pipe, q->queue); | |
030e416b | 372 | |
6a1c9510 MR |
373 | dqm->dev->kfd2kgd->set_scratch_backing_va( |
374 | dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid); | |
375 | ||
60a00956 FK |
376 | if (!q->properties.is_active) |
377 | return 0; | |
378 | ||
70539bd7 FK |
379 | retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties, |
380 | q->process->mm); | |
ab7c1648 KR |
381 | if (retval) |
382 | goto out_uninit_mqd; | |
030e416b | 383 | |
64c7f8cf | 384 | return 0; |
ab7c1648 KR |
385 | |
386 | out_uninit_mqd: | |
387 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); | |
ef568db7 FK |
388 | out_deallocate_doorbell: |
389 | deallocate_doorbell(qpd, q); | |
ab7c1648 KR |
390 | out_deallocate_hqd: |
391 | deallocate_hqd(dqm, q); | |
392 | ||
393 | return retval; | |
64c7f8cf BG |
394 | } |
395 | ||
9fd3f1bf FK |
396 | /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked |
397 | * to avoid asynchronized access | |
398 | */ | |
399 | static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, | |
64c7f8cf BG |
400 | struct qcm_process_device *qpd, |
401 | struct queue *q) | |
402 | { | |
403 | int retval; | |
404 | struct mqd_manager *mqd; | |
405 | ||
9fd3f1bf FK |
406 | mqd = dqm->ops.get_mqd_manager(dqm, |
407 | get_mqd_type_from_queue_type(q->properties.type)); | |
408 | if (!mqd) | |
409 | return -ENOMEM; | |
64c7f8cf | 410 | |
c2e1b3a4 | 411 | if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) { |
c2e1b3a4 BG |
412 | deallocate_hqd(dqm, q); |
413 | } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { | |
c2e1b3a4 BG |
414 | dqm->sdma_queue_count--; |
415 | deallocate_sdma_queue(dqm, q->sdma_id); | |
7113cd65 | 416 | } else { |
79775b62 | 417 | pr_debug("q->properties.type %d is invalid\n", |
7113cd65 | 418 | q->properties.type); |
9fd3f1bf | 419 | return -EINVAL; |
64c7f8cf | 420 | } |
9fd3f1bf | 421 | dqm->total_queue_count--; |
64c7f8cf | 422 | |
ef568db7 FK |
423 | deallocate_doorbell(qpd, q); |
424 | ||
64c7f8cf | 425 | retval = mqd->destroy_mqd(mqd, q->mqd, |
c2e1b3a4 | 426 | KFD_PREEMPT_TYPE_WAVEFRONT_RESET, |
b90e3fbe | 427 | KFD_UNMAP_LATENCY_MS, |
64c7f8cf | 428 | q->pipe, q->queue); |
9fd3f1bf FK |
429 | if (retval == -ETIME) |
430 | qpd->reset_wavefronts = true; | |
64c7f8cf | 431 | |
64c7f8cf BG |
432 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); |
433 | ||
434 | list_del(&q->list); | |
9fd3f1bf FK |
435 | if (list_empty(&qpd->queues_list)) { |
436 | if (qpd->reset_wavefronts) { | |
437 | pr_warn("Resetting wave fronts (nocpsch) on dev %p\n", | |
438 | dqm->dev); | |
439 | /* dbgdev_wave_reset_wavefronts has to be called before | |
440 | * deallocate_vmid(), i.e. when vmid is still in use. | |
441 | */ | |
442 | dbgdev_wave_reset_wavefronts(dqm->dev, | |
443 | qpd->pqm->process); | |
444 | qpd->reset_wavefronts = false; | |
445 | } | |
446 | ||
64c7f8cf | 447 | deallocate_vmid(dqm, qpd, q); |
9fd3f1bf | 448 | } |
bc920fd4 | 449 | qpd->queue_count--; |
b6819cec JC |
450 | if (q->properties.is_active) |
451 | dqm->queue_count--; | |
b8cbab04 | 452 | |
9fd3f1bf FK |
453 | return retval; |
454 | } | |
b8cbab04 | 455 | |
9fd3f1bf FK |
456 | static int destroy_queue_nocpsch(struct device_queue_manager *dqm, |
457 | struct qcm_process_device *qpd, | |
458 | struct queue *q) | |
459 | { | |
460 | int retval; | |
461 | ||
efeaed4d | 462 | dqm_lock(dqm); |
9fd3f1bf | 463 | retval = destroy_queue_nocpsch_locked(dqm, qpd, q); |
efeaed4d | 464 | dqm_unlock(dqm); |
9fd3f1bf | 465 | |
64c7f8cf BG |
466 | return retval; |
467 | } | |
468 | ||
469 | static int update_queue(struct device_queue_manager *dqm, struct queue *q) | |
470 | { | |
471 | int retval; | |
472 | struct mqd_manager *mqd; | |
26103436 | 473 | struct kfd_process_device *pdd; |
b6ffbab8 | 474 | bool prev_active = false; |
64c7f8cf | 475 | |
efeaed4d | 476 | dqm_lock(dqm); |
26103436 FK |
477 | pdd = kfd_get_process_device_data(q->device, q->process); |
478 | if (!pdd) { | |
479 | retval = -ENODEV; | |
480 | goto out_unlock; | |
481 | } | |
0b3674ae OG |
482 | mqd = dqm->ops.get_mqd_manager(dqm, |
483 | get_mqd_type_from_queue_type(q->properties.type)); | |
4eacc26b | 484 | if (!mqd) { |
ab7c1648 KR |
485 | retval = -ENOMEM; |
486 | goto out_unlock; | |
64c7f8cf | 487 | } |
26103436 FK |
488 | /* |
489 | * Eviction state logic: we only mark active queues as evicted | |
490 | * to avoid the overhead of restoring inactive queues later | |
491 | */ | |
492 | if (pdd->qpd.evicted) | |
493 | q->properties.is_evicted = (q->properties.queue_size > 0 && | |
494 | q->properties.queue_percent > 0 && | |
495 | q->properties.queue_address != 0); | |
64c7f8cf | 496 | |
60a00956 FK |
497 | /* Save previous activity state for counters */ |
498 | prev_active = q->properties.is_active; | |
499 | ||
500 | /* Make sure the queue is unmapped before updating the MQD */ | |
d146c5a7 | 501 | if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { |
60a00956 FK |
502 | retval = unmap_queues_cpsch(dqm, |
503 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); | |
894a8293 | 504 | if (retval) { |
60a00956 FK |
505 | pr_err("unmap queue failed\n"); |
506 | goto out_unlock; | |
507 | } | |
894a8293 | 508 | } else if (prev_active && |
60a00956 FK |
509 | (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || |
510 | q->properties.type == KFD_QUEUE_TYPE_SDMA)) { | |
511 | retval = mqd->destroy_mqd(mqd, q->mqd, | |
512 | KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, | |
513 | KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); | |
514 | if (retval) { | |
515 | pr_err("destroy mqd failed\n"); | |
516 | goto out_unlock; | |
517 | } | |
518 | } | |
519 | ||
520 | retval = mqd->update_mqd(mqd, q->mqd, &q->properties); | |
521 | ||
096d1a3e FK |
522 | /* |
523 | * check active state vs. the previous state and modify | |
524 | * counter accordingly. map_queues_cpsch uses the | |
525 | * dqm->queue_count to determine whether a new runlist must be | |
526 | * uploaded. | |
527 | */ | |
528 | if (q->properties.is_active && !prev_active) | |
529 | dqm->queue_count++; | |
530 | else if (!q->properties.is_active && prev_active) | |
531 | dqm->queue_count--; | |
532 | ||
d146c5a7 | 533 | if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) |
60a00956 | 534 | retval = map_queues_cpsch(dqm); |
894a8293 | 535 | else if (q->properties.is_active && |
60a00956 FK |
536 | (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || |
537 | q->properties.type == KFD_QUEUE_TYPE_SDMA)) | |
538 | retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, | |
539 | &q->properties, q->process->mm); | |
b6ffbab8 | 540 | |
ab7c1648 | 541 | out_unlock: |
efeaed4d | 542 | dqm_unlock(dqm); |
64c7f8cf BG |
543 | return retval; |
544 | } | |
545 | ||
58dcd5bf | 546 | static struct mqd_manager *get_mqd_manager( |
64c7f8cf BG |
547 | struct device_queue_manager *dqm, enum KFD_MQD_TYPE type) |
548 | { | |
549 | struct mqd_manager *mqd; | |
550 | ||
32fa8219 FK |
551 | if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) |
552 | return NULL; | |
64c7f8cf | 553 | |
79775b62 | 554 | pr_debug("mqd type %d\n", type); |
64c7f8cf BG |
555 | |
556 | mqd = dqm->mqds[type]; | |
557 | if (!mqd) { | |
558 | mqd = mqd_manager_init(type, dqm->dev); | |
4eacc26b | 559 | if (!mqd) |
79775b62 | 560 | pr_err("mqd manager is NULL"); |
64c7f8cf BG |
561 | dqm->mqds[type] = mqd; |
562 | } | |
563 | ||
564 | return mqd; | |
565 | } | |
566 | ||
26103436 FK |
567 | static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, |
568 | struct qcm_process_device *qpd) | |
569 | { | |
570 | struct queue *q; | |
571 | struct mqd_manager *mqd; | |
572 | struct kfd_process_device *pdd; | |
573 | int retval = 0; | |
574 | ||
efeaed4d | 575 | dqm_lock(dqm); |
26103436 FK |
576 | if (qpd->evicted++ > 0) /* already evicted, do nothing */ |
577 | goto out; | |
578 | ||
579 | pdd = qpd_to_pdd(qpd); | |
580 | pr_info_ratelimited("Evicting PASID %u queues\n", | |
581 | pdd->process->pasid); | |
582 | ||
583 | /* unactivate all active queues on the qpd */ | |
584 | list_for_each_entry(q, &qpd->queues_list, list) { | |
585 | if (!q->properties.is_active) | |
586 | continue; | |
587 | mqd = dqm->ops.get_mqd_manager(dqm, | |
588 | get_mqd_type_from_queue_type(q->properties.type)); | |
589 | if (!mqd) { /* should not be here */ | |
590 | pr_err("Cannot evict queue, mqd mgr is NULL\n"); | |
591 | retval = -ENOMEM; | |
592 | goto out; | |
593 | } | |
594 | q->properties.is_evicted = true; | |
595 | q->properties.is_active = false; | |
596 | retval = mqd->destroy_mqd(mqd, q->mqd, | |
597 | KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, | |
598 | KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); | |
599 | if (retval) | |
600 | goto out; | |
601 | dqm->queue_count--; | |
602 | } | |
603 | ||
604 | out: | |
efeaed4d | 605 | dqm_unlock(dqm); |
26103436 FK |
606 | return retval; |
607 | } | |
608 | ||
609 | static int evict_process_queues_cpsch(struct device_queue_manager *dqm, | |
610 | struct qcm_process_device *qpd) | |
611 | { | |
612 | struct queue *q; | |
613 | struct kfd_process_device *pdd; | |
614 | int retval = 0; | |
615 | ||
efeaed4d | 616 | dqm_lock(dqm); |
26103436 FK |
617 | if (qpd->evicted++ > 0) /* already evicted, do nothing */ |
618 | goto out; | |
619 | ||
620 | pdd = qpd_to_pdd(qpd); | |
621 | pr_info_ratelimited("Evicting PASID %u queues\n", | |
622 | pdd->process->pasid); | |
623 | ||
624 | /* unactivate all active queues on the qpd */ | |
625 | list_for_each_entry(q, &qpd->queues_list, list) { | |
626 | if (!q->properties.is_active) | |
627 | continue; | |
628 | q->properties.is_evicted = true; | |
629 | q->properties.is_active = false; | |
630 | dqm->queue_count--; | |
631 | } | |
632 | retval = execute_queues_cpsch(dqm, | |
633 | qpd->is_debug ? | |
634 | KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : | |
635 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); | |
636 | ||
637 | out: | |
efeaed4d | 638 | dqm_unlock(dqm); |
26103436 FK |
639 | return retval; |
640 | } | |
641 | ||
642 | static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, | |
643 | struct qcm_process_device *qpd) | |
644 | { | |
645 | struct queue *q; | |
646 | struct mqd_manager *mqd; | |
647 | struct kfd_process_device *pdd; | |
648 | uint32_t pd_base; | |
649 | int retval = 0; | |
650 | ||
651 | pdd = qpd_to_pdd(qpd); | |
652 | /* Retrieve PD base */ | |
653 | pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm); | |
654 | ||
efeaed4d | 655 | dqm_lock(dqm); |
26103436 FK |
656 | if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ |
657 | goto out; | |
658 | if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ | |
659 | qpd->evicted--; | |
660 | goto out; | |
661 | } | |
662 | ||
663 | pr_info_ratelimited("Restoring PASID %u queues\n", | |
664 | pdd->process->pasid); | |
665 | ||
666 | /* Update PD Base in QPD */ | |
667 | qpd->page_table_base = pd_base; | |
668 | pr_debug("Updated PD address to 0x%08x\n", pd_base); | |
669 | ||
670 | if (!list_empty(&qpd->queues_list)) { | |
671 | dqm->dev->kfd2kgd->set_vm_context_page_table_base( | |
672 | dqm->dev->kgd, | |
673 | qpd->vmid, | |
674 | qpd->page_table_base); | |
675 | kfd_flush_tlb(pdd); | |
676 | } | |
677 | ||
678 | /* activate all active queues on the qpd */ | |
679 | list_for_each_entry(q, &qpd->queues_list, list) { | |
680 | if (!q->properties.is_evicted) | |
681 | continue; | |
682 | mqd = dqm->ops.get_mqd_manager(dqm, | |
683 | get_mqd_type_from_queue_type(q->properties.type)); | |
684 | if (!mqd) { /* should not be here */ | |
685 | pr_err("Cannot restore queue, mqd mgr is NULL\n"); | |
686 | retval = -ENOMEM; | |
687 | goto out; | |
688 | } | |
689 | q->properties.is_evicted = false; | |
690 | q->properties.is_active = true; | |
691 | retval = mqd->load_mqd(mqd, q->mqd, q->pipe, | |
692 | q->queue, &q->properties, | |
693 | q->process->mm); | |
694 | if (retval) | |
695 | goto out; | |
696 | dqm->queue_count++; | |
697 | } | |
698 | qpd->evicted = 0; | |
699 | out: | |
efeaed4d | 700 | dqm_unlock(dqm); |
26103436 FK |
701 | return retval; |
702 | } | |
703 | ||
704 | static int restore_process_queues_cpsch(struct device_queue_manager *dqm, | |
705 | struct qcm_process_device *qpd) | |
706 | { | |
707 | struct queue *q; | |
708 | struct kfd_process_device *pdd; | |
709 | uint32_t pd_base; | |
710 | int retval = 0; | |
711 | ||
712 | pdd = qpd_to_pdd(qpd); | |
713 | /* Retrieve PD base */ | |
714 | pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm); | |
715 | ||
efeaed4d | 716 | dqm_lock(dqm); |
26103436 FK |
717 | if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ |
718 | goto out; | |
719 | if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */ | |
720 | qpd->evicted--; | |
721 | goto out; | |
722 | } | |
723 | ||
724 | pr_info_ratelimited("Restoring PASID %u queues\n", | |
725 | pdd->process->pasid); | |
726 | ||
727 | /* Update PD Base in QPD */ | |
728 | qpd->page_table_base = pd_base; | |
729 | pr_debug("Updated PD address to 0x%08x\n", pd_base); | |
730 | ||
731 | /* activate all active queues on the qpd */ | |
732 | list_for_each_entry(q, &qpd->queues_list, list) { | |
733 | if (!q->properties.is_evicted) | |
734 | continue; | |
735 | q->properties.is_evicted = false; | |
736 | q->properties.is_active = true; | |
737 | dqm->queue_count++; | |
738 | } | |
739 | retval = execute_queues_cpsch(dqm, | |
740 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); | |
741 | if (!retval) | |
742 | qpd->evicted = 0; | |
743 | out: | |
efeaed4d | 744 | dqm_unlock(dqm); |
26103436 FK |
745 | return retval; |
746 | } | |
747 | ||
58dcd5bf | 748 | static int register_process(struct device_queue_manager *dqm, |
64c7f8cf BG |
749 | struct qcm_process_device *qpd) |
750 | { | |
751 | struct device_process_node *n; | |
403575c4 FK |
752 | struct kfd_process_device *pdd; |
753 | uint32_t pd_base; | |
a22fc854 | 754 | int retval; |
64c7f8cf | 755 | |
dbf56ab1 | 756 | n = kzalloc(sizeof(*n), GFP_KERNEL); |
64c7f8cf BG |
757 | if (!n) |
758 | return -ENOMEM; | |
759 | ||
760 | n->qpd = qpd; | |
761 | ||
403575c4 FK |
762 | pdd = qpd_to_pdd(qpd); |
763 | /* Retrieve PD base */ | |
764 | pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm); | |
765 | ||
efeaed4d | 766 | dqm_lock(dqm); |
64c7f8cf BG |
767 | list_add(&n->list, &dqm->queues); |
768 | ||
403575c4 FK |
769 | /* Update PD Base in QPD */ |
770 | qpd->page_table_base = pd_base; | |
771 | ||
bfd5e378 | 772 | retval = dqm->asic_ops.update_qpd(dqm, qpd); |
a22fc854 | 773 | |
64c7f8cf BG |
774 | dqm->processes_count++; |
775 | ||
efeaed4d | 776 | dqm_unlock(dqm); |
64c7f8cf | 777 | |
a22fc854 | 778 | return retval; |
64c7f8cf BG |
779 | } |
780 | ||
58dcd5bf | 781 | static int unregister_process(struct device_queue_manager *dqm, |
64c7f8cf BG |
782 | struct qcm_process_device *qpd) |
783 | { | |
784 | int retval; | |
785 | struct device_process_node *cur, *next; | |
786 | ||
1e5ec956 OG |
787 | pr_debug("qpd->queues_list is %s\n", |
788 | list_empty(&qpd->queues_list) ? "empty" : "not empty"); | |
64c7f8cf BG |
789 | |
790 | retval = 0; | |
efeaed4d | 791 | dqm_lock(dqm); |
64c7f8cf BG |
792 | |
793 | list_for_each_entry_safe(cur, next, &dqm->queues, list) { | |
794 | if (qpd == cur->qpd) { | |
795 | list_del(&cur->list); | |
f5d896bb | 796 | kfree(cur); |
64c7f8cf BG |
797 | dqm->processes_count--; |
798 | goto out; | |
799 | } | |
800 | } | |
801 | /* qpd not found in dqm list */ | |
802 | retval = 1; | |
803 | out: | |
efeaed4d | 804 | dqm_unlock(dqm); |
64c7f8cf BG |
805 | return retval; |
806 | } | |
807 | ||
808 | static int | |
809 | set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid, | |
810 | unsigned int vmid) | |
811 | { | |
812 | uint32_t pasid_mapping; | |
813 | ||
cea405b1 XZ |
814 | pasid_mapping = (pasid == 0) ? 0 : |
815 | (uint32_t)pasid | | |
816 | ATC_VMID_PASID_MAPPING_VALID; | |
817 | ||
818 | return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( | |
819 | dqm->dev->kgd, pasid_mapping, | |
64c7f8cf BG |
820 | vmid); |
821 | } | |
822 | ||
2249d558 AL |
823 | static void init_interrupts(struct device_queue_manager *dqm) |
824 | { | |
825 | unsigned int i; | |
826 | ||
d0b63bb3 AR |
827 | for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) |
828 | if (is_pipe_enabled(dqm, 0, i)) | |
829 | dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i); | |
2249d558 AL |
830 | } |
831 | ||
64c7f8cf BG |
832 | static int initialize_nocpsch(struct device_queue_manager *dqm) |
833 | { | |
86194cf8 | 834 | int pipe, queue; |
64c7f8cf | 835 | |
79775b62 | 836 | pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); |
64c7f8cf | 837 | |
ab7c1648 KR |
838 | dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm), |
839 | sizeof(unsigned int), GFP_KERNEL); | |
840 | if (!dqm->allocated_queues) | |
841 | return -ENOMEM; | |
842 | ||
efeaed4d | 843 | mutex_init(&dqm->lock_hidden); |
64c7f8cf BG |
844 | INIT_LIST_HEAD(&dqm->queues); |
845 | dqm->queue_count = dqm->next_pipe_to_allocate = 0; | |
bcea3081 | 846 | dqm->sdma_queue_count = 0; |
64c7f8cf | 847 | |
86194cf8 FK |
848 | for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { |
849 | int pipe_offset = pipe * get_queues_per_pipe(dqm); | |
850 | ||
851 | for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) | |
852 | if (test_bit(pipe_offset + queue, | |
853 | dqm->dev->shared_resources.queue_bitmap)) | |
854 | dqm->allocated_queues[pipe] |= 1 << queue; | |
855 | } | |
64c7f8cf | 856 | |
44008d7a | 857 | dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1; |
bcea3081 | 858 | dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1; |
64c7f8cf | 859 | |
64c7f8cf BG |
860 | return 0; |
861 | } | |
862 | ||
58dcd5bf | 863 | static void uninitialize(struct device_queue_manager *dqm) |
64c7f8cf | 864 | { |
6f9d54fd OG |
865 | int i; |
866 | ||
32fa8219 | 867 | WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0); |
64c7f8cf BG |
868 | |
869 | kfree(dqm->allocated_queues); | |
6f9d54fd OG |
870 | for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) |
871 | kfree(dqm->mqds[i]); | |
efeaed4d | 872 | mutex_destroy(&dqm->lock_hidden); |
a86aa3ca | 873 | kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem); |
64c7f8cf BG |
874 | } |
875 | ||
876 | static int start_nocpsch(struct device_queue_manager *dqm) | |
877 | { | |
2249d558 | 878 | init_interrupts(dqm); |
552764b6 | 879 | return pm_init(&dqm->packets, dqm); |
64c7f8cf BG |
880 | } |
881 | ||
882 | static int stop_nocpsch(struct device_queue_manager *dqm) | |
883 | { | |
552764b6 | 884 | pm_uninit(&dqm->packets); |
64c7f8cf BG |
885 | return 0; |
886 | } | |
887 | ||
bcea3081 BG |
888 | static int allocate_sdma_queue(struct device_queue_manager *dqm, |
889 | unsigned int *sdma_queue_id) | |
890 | { | |
891 | int bit; | |
892 | ||
893 | if (dqm->sdma_bitmap == 0) | |
894 | return -ENOMEM; | |
895 | ||
4252bf68 HK |
896 | bit = ffs(dqm->sdma_bitmap) - 1; |
897 | dqm->sdma_bitmap &= ~(1 << bit); | |
bcea3081 BG |
898 | *sdma_queue_id = bit; |
899 | ||
900 | return 0; | |
901 | } | |
902 | ||
903 | static void deallocate_sdma_queue(struct device_queue_manager *dqm, | |
904 | unsigned int sdma_queue_id) | |
905 | { | |
010b82e7 | 906 | if (sdma_queue_id >= CIK_SDMA_QUEUES) |
bcea3081 | 907 | return; |
4252bf68 | 908 | dqm->sdma_bitmap |= (1 << sdma_queue_id); |
bcea3081 BG |
909 | } |
910 | ||
bcea3081 BG |
911 | static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm, |
912 | struct queue *q, | |
913 | struct qcm_process_device *qpd) | |
914 | { | |
915 | struct mqd_manager *mqd; | |
916 | int retval; | |
917 | ||
45c9a5e4 | 918 | mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA); |
bcea3081 BG |
919 | if (!mqd) |
920 | return -ENOMEM; | |
921 | ||
922 | retval = allocate_sdma_queue(dqm, &q->sdma_id); | |
4eacc26b | 923 | if (retval) |
bcea3081 BG |
924 | return retval; |
925 | ||
e139cd2a | 926 | q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE; |
927 | q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE; | |
bcea3081 | 928 | |
ef568db7 FK |
929 | retval = allocate_doorbell(qpd, q); |
930 | if (retval) | |
931 | goto out_deallocate_sdma_queue; | |
932 | ||
79775b62 KR |
933 | pr_debug("SDMA id is: %d\n", q->sdma_id); |
934 | pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id); | |
935 | pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id); | |
bcea3081 | 936 | |
bfd5e378 | 937 | dqm->asic_ops.init_sdma_vm(dqm, q, qpd); |
bcea3081 BG |
938 | retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, |
939 | &q->gart_mqd_addr, &q->properties); | |
ab7c1648 | 940 | if (retval) |
ef568db7 | 941 | goto out_deallocate_doorbell; |
bcea3081 | 942 | |
70539bd7 | 943 | retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL); |
ab7c1648 KR |
944 | if (retval) |
945 | goto out_uninit_mqd; | |
4fadf6b6 | 946 | |
bcea3081 | 947 | return 0; |
ab7c1648 KR |
948 | |
949 | out_uninit_mqd: | |
950 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); | |
ef568db7 FK |
951 | out_deallocate_doorbell: |
952 | deallocate_doorbell(qpd, q); | |
ab7c1648 KR |
953 | out_deallocate_sdma_queue: |
954 | deallocate_sdma_queue(dqm, q->sdma_id); | |
955 | ||
956 | return retval; | |
bcea3081 BG |
957 | } |
958 | ||
64c7f8cf BG |
959 | /* |
960 | * Device Queue Manager implementation for cp scheduler | |
961 | */ | |
962 | ||
963 | static int set_sched_resources(struct device_queue_manager *dqm) | |
964 | { | |
d0b63bb3 | 965 | int i, mec; |
64c7f8cf | 966 | struct scheduling_resources res; |
64c7f8cf | 967 | |
44008d7a | 968 | res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap; |
d0b63bb3 AR |
969 | |
970 | res.queue_mask = 0; | |
971 | for (i = 0; i < KGD_MAX_QUEUES; ++i) { | |
972 | mec = (i / dqm->dev->shared_resources.num_queue_per_pipe) | |
973 | / dqm->dev->shared_resources.num_pipe_per_mec; | |
974 | ||
975 | if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap)) | |
976 | continue; | |
977 | ||
978 | /* only acquire queues from the first MEC */ | |
979 | if (mec > 0) | |
980 | continue; | |
981 | ||
982 | /* This situation may be hit in the future if a new HW | |
983 | * generation exposes more than 64 queues. If so, the | |
8eabaf54 KR |
984 | * definition of res.queue_mask needs updating |
985 | */ | |
1d11ee89 | 986 | if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) { |
d0b63bb3 AR |
987 | pr_err("Invalid queue enabled by amdgpu: %d\n", i); |
988 | break; | |
989 | } | |
990 | ||
991 | res.queue_mask |= (1ull << i); | |
992 | } | |
64c7f8cf BG |
993 | res.gws_mask = res.oac_mask = res.gds_heap_base = |
994 | res.gds_heap_size = 0; | |
995 | ||
79775b62 KR |
996 | pr_debug("Scheduling resources:\n" |
997 | "vmid mask: 0x%8X\n" | |
998 | "queue mask: 0x%8llX\n", | |
64c7f8cf BG |
999 | res.vmid_mask, res.queue_mask); |
1000 | ||
1001 | return pm_send_set_resources(&dqm->packets, &res); | |
1002 | } | |
1003 | ||
1004 | static int initialize_cpsch(struct device_queue_manager *dqm) | |
1005 | { | |
79775b62 | 1006 | pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm)); |
64c7f8cf | 1007 | |
efeaed4d | 1008 | mutex_init(&dqm->lock_hidden); |
64c7f8cf BG |
1009 | INIT_LIST_HEAD(&dqm->queues); |
1010 | dqm->queue_count = dqm->processes_count = 0; | |
bcea3081 | 1011 | dqm->sdma_queue_count = 0; |
64c7f8cf | 1012 | dqm->active_runlist = false; |
e139cd2a | 1013 | dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1; |
64c7f8cf | 1014 | |
73ea648d SL |
1015 | INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception); |
1016 | ||
bfd5e378 | 1017 | return 0; |
64c7f8cf BG |
1018 | } |
1019 | ||
1020 | static int start_cpsch(struct device_queue_manager *dqm) | |
1021 | { | |
64c7f8cf BG |
1022 | int retval; |
1023 | ||
64c7f8cf BG |
1024 | retval = 0; |
1025 | ||
1026 | retval = pm_init(&dqm->packets, dqm); | |
4eacc26b | 1027 | if (retval) |
64c7f8cf BG |
1028 | goto fail_packet_manager_init; |
1029 | ||
1030 | retval = set_sched_resources(dqm); | |
4eacc26b | 1031 | if (retval) |
64c7f8cf BG |
1032 | goto fail_set_sched_resources; |
1033 | ||
79775b62 | 1034 | pr_debug("Allocating fence memory\n"); |
64c7f8cf BG |
1035 | |
1036 | /* allocate fence memory on the gart */ | |
a86aa3ca OG |
1037 | retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr), |
1038 | &dqm->fence_mem); | |
64c7f8cf | 1039 | |
4eacc26b | 1040 | if (retval) |
64c7f8cf BG |
1041 | goto fail_allocate_vidmem; |
1042 | ||
1043 | dqm->fence_addr = dqm->fence_mem->cpu_ptr; | |
1044 | dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr; | |
2249d558 AL |
1045 | |
1046 | init_interrupts(dqm); | |
1047 | ||
efeaed4d | 1048 | dqm_lock(dqm); |
73ea648d SL |
1049 | /* clear hang status when driver try to start the hw scheduler */ |
1050 | dqm->is_hws_hang = false; | |
c4744e24 | 1051 | execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); |
efeaed4d | 1052 | dqm_unlock(dqm); |
64c7f8cf BG |
1053 | |
1054 | return 0; | |
1055 | fail_allocate_vidmem: | |
1056 | fail_set_sched_resources: | |
1057 | pm_uninit(&dqm->packets); | |
1058 | fail_packet_manager_init: | |
1059 | return retval; | |
1060 | } | |
1061 | ||
1062 | static int stop_cpsch(struct device_queue_manager *dqm) | |
1063 | { | |
efeaed4d | 1064 | dqm_lock(dqm); |
4465f466 | 1065 | unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); |
efeaed4d | 1066 | dqm_unlock(dqm); |
64c7f8cf | 1067 | |
a86aa3ca | 1068 | kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); |
64c7f8cf BG |
1069 | pm_uninit(&dqm->packets); |
1070 | ||
1071 | return 0; | |
1072 | } | |
1073 | ||
1074 | static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, | |
1075 | struct kernel_queue *kq, | |
1076 | struct qcm_process_device *qpd) | |
1077 | { | |
efeaed4d | 1078 | dqm_lock(dqm); |
b8cbab04 | 1079 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { |
79775b62 | 1080 | pr_warn("Can't create new kernel queue because %d queues were already created\n", |
b8cbab04 | 1081 | dqm->total_queue_count); |
efeaed4d | 1082 | dqm_unlock(dqm); |
b8cbab04 OG |
1083 | return -EPERM; |
1084 | } | |
1085 | ||
1086 | /* | |
1087 | * Unconditionally increment this counter, regardless of the queue's | |
1088 | * type or whether the queue is active. | |
1089 | */ | |
1090 | dqm->total_queue_count++; | |
1091 | pr_debug("Total of %d queues are accountable so far\n", | |
1092 | dqm->total_queue_count); | |
1093 | ||
64c7f8cf BG |
1094 | list_add(&kq->list, &qpd->priv_queue_list); |
1095 | dqm->queue_count++; | |
1096 | qpd->is_debug = true; | |
c4744e24 | 1097 | execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); |
efeaed4d | 1098 | dqm_unlock(dqm); |
64c7f8cf BG |
1099 | |
1100 | return 0; | |
1101 | } | |
1102 | ||
1103 | static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, | |
1104 | struct kernel_queue *kq, | |
1105 | struct qcm_process_device *qpd) | |
1106 | { | |
efeaed4d | 1107 | dqm_lock(dqm); |
64c7f8cf BG |
1108 | list_del(&kq->list); |
1109 | dqm->queue_count--; | |
1110 | qpd->is_debug = false; | |
c4744e24 | 1111 | execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); |
b8cbab04 OG |
1112 | /* |
1113 | * Unconditionally decrement this counter, regardless of the queue's | |
1114 | * type. | |
1115 | */ | |
8b58f261 | 1116 | dqm->total_queue_count--; |
b8cbab04 OG |
1117 | pr_debug("Total of %d queues are accountable so far\n", |
1118 | dqm->total_queue_count); | |
efeaed4d | 1119 | dqm_unlock(dqm); |
64c7f8cf BG |
1120 | } |
1121 | ||
1122 | static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, | |
b46cb7d7 | 1123 | struct qcm_process_device *qpd) |
64c7f8cf BG |
1124 | { |
1125 | int retval; | |
1126 | struct mqd_manager *mqd; | |
1127 | ||
64c7f8cf BG |
1128 | retval = 0; |
1129 | ||
efeaed4d | 1130 | dqm_lock(dqm); |
64c7f8cf | 1131 | |
b8cbab04 | 1132 | if (dqm->total_queue_count >= max_num_of_queues_per_device) { |
79775b62 | 1133 | pr_warn("Can't create new usermode queue because %d queues were already created\n", |
b8cbab04 OG |
1134 | dqm->total_queue_count); |
1135 | retval = -EPERM; | |
72a01d23 | 1136 | goto out_unlock; |
b8cbab04 OG |
1137 | } |
1138 | ||
e139cd2a | 1139 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { |
1140 | retval = allocate_sdma_queue(dqm, &q->sdma_id); | |
894a8293 | 1141 | if (retval) |
72a01d23 | 1142 | goto out_unlock; |
e139cd2a | 1143 | q->properties.sdma_queue_id = |
1144 | q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE; | |
1145 | q->properties.sdma_engine_id = | |
1146 | q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE; | |
1147 | } | |
ef568db7 FK |
1148 | |
1149 | retval = allocate_doorbell(qpd, q); | |
1150 | if (retval) | |
1151 | goto out_deallocate_sdma_queue; | |
1152 | ||
45c9a5e4 | 1153 | mqd = dqm->ops.get_mqd_manager(dqm, |
bcea3081 BG |
1154 | get_mqd_type_from_queue_type(q->properties.type)); |
1155 | ||
4eacc26b | 1156 | if (!mqd) { |
ab7c1648 | 1157 | retval = -ENOMEM; |
ef568db7 | 1158 | goto out_deallocate_doorbell; |
64c7f8cf | 1159 | } |
26103436 FK |
1160 | /* |
1161 | * Eviction state logic: we only mark active queues as evicted | |
1162 | * to avoid the overhead of restoring inactive queues later | |
1163 | */ | |
1164 | if (qpd->evicted) | |
1165 | q->properties.is_evicted = (q->properties.queue_size > 0 && | |
1166 | q->properties.queue_percent > 0 && | |
1167 | q->properties.queue_address != 0); | |
64c7f8cf | 1168 | |
bfd5e378 | 1169 | dqm->asic_ops.init_sdma_vm(dqm, q, qpd); |
373d7080 FK |
1170 | |
1171 | q->properties.tba_addr = qpd->tba_addr; | |
1172 | q->properties.tma_addr = qpd->tma_addr; | |
64c7f8cf BG |
1173 | retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj, |
1174 | &q->gart_mqd_addr, &q->properties); | |
4eacc26b | 1175 | if (retval) |
ef568db7 | 1176 | goto out_deallocate_doorbell; |
64c7f8cf BG |
1177 | |
1178 | list_add(&q->list, &qpd->queues_list); | |
bc920fd4 | 1179 | qpd->queue_count++; |
64c7f8cf BG |
1180 | if (q->properties.is_active) { |
1181 | dqm->queue_count++; | |
c4744e24 YZ |
1182 | retval = execute_queues_cpsch(dqm, |
1183 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); | |
64c7f8cf BG |
1184 | } |
1185 | ||
bcea3081 | 1186 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) |
8eabaf54 | 1187 | dqm->sdma_queue_count++; |
b8cbab04 OG |
1188 | /* |
1189 | * Unconditionally increment this counter, regardless of the queue's | |
1190 | * type or whether the queue is active. | |
1191 | */ | |
1192 | dqm->total_queue_count++; | |
1193 | ||
1194 | pr_debug("Total of %d queues are accountable so far\n", | |
1195 | dqm->total_queue_count); | |
1196 | ||
efeaed4d | 1197 | dqm_unlock(dqm); |
72a01d23 FK |
1198 | return retval; |
1199 | ||
ef568db7 FK |
1200 | out_deallocate_doorbell: |
1201 | deallocate_doorbell(qpd, q); | |
72a01d23 FK |
1202 | out_deallocate_sdma_queue: |
1203 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) | |
1204 | deallocate_sdma_queue(dqm, q->sdma_id); | |
1205 | out_unlock: | |
efeaed4d FK |
1206 | dqm_unlock(dqm); |
1207 | ||
64c7f8cf BG |
1208 | return retval; |
1209 | } | |
1210 | ||
788bf83d | 1211 | int amdkfd_fence_wait_timeout(unsigned int *fence_addr, |
d80d19bd | 1212 | unsigned int fence_value, |
8c72c3d7 | 1213 | unsigned int timeout_ms) |
64c7f8cf | 1214 | { |
8c72c3d7 | 1215 | unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies; |
64c7f8cf BG |
1216 | |
1217 | while (*fence_addr != fence_value) { | |
8c72c3d7 | 1218 | if (time_after(jiffies, end_jiffies)) { |
79775b62 | 1219 | pr_err("qcm fence wait loop timeout expired\n"); |
64c7f8cf BG |
1220 | return -ETIME; |
1221 | } | |
99331a51 | 1222 | schedule(); |
64c7f8cf BG |
1223 | } |
1224 | ||
1225 | return 0; | |
1226 | } | |
1227 | ||
7da2bcf8 | 1228 | static int unmap_sdma_queues(struct device_queue_manager *dqm, |
bcea3081 BG |
1229 | unsigned int sdma_engine) |
1230 | { | |
1231 | return pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA, | |
7da2bcf8 | 1232 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, |
bcea3081 BG |
1233 | sdma_engine); |
1234 | } | |
1235 | ||
60a00956 FK |
1236 | /* dqm->lock mutex has to be locked before calling this function */ |
1237 | static int map_queues_cpsch(struct device_queue_manager *dqm) | |
1238 | { | |
1239 | int retval; | |
1240 | ||
1241 | if (dqm->queue_count <= 0 || dqm->processes_count <= 0) | |
1242 | return 0; | |
1243 | ||
1244 | if (dqm->active_runlist) | |
1245 | return 0; | |
1246 | ||
1247 | retval = pm_send_runlist(&dqm->packets, &dqm->queues); | |
1248 | if (retval) { | |
1249 | pr_err("failed to execute runlist\n"); | |
1250 | return retval; | |
1251 | } | |
1252 | dqm->active_runlist = true; | |
1253 | ||
1254 | return retval; | |
1255 | } | |
1256 | ||
ac30c783 | 1257 | /* dqm->lock mutex has to be locked before calling this function */ |
7da2bcf8 | 1258 | static int unmap_queues_cpsch(struct device_queue_manager *dqm, |
4465f466 YZ |
1259 | enum kfd_unmap_queues_filter filter, |
1260 | uint32_t filter_param) | |
64c7f8cf | 1261 | { |
9fd3f1bf | 1262 | int retval = 0; |
64c7f8cf | 1263 | |
73ea648d SL |
1264 | if (dqm->is_hws_hang) |
1265 | return -EIO; | |
991ca8ee | 1266 | if (!dqm->active_runlist) |
ac30c783 | 1267 | return retval; |
bcea3081 | 1268 | |
79775b62 | 1269 | pr_debug("Before destroying queues, sdma queue count is : %u\n", |
bcea3081 BG |
1270 | dqm->sdma_queue_count); |
1271 | ||
1272 | if (dqm->sdma_queue_count > 0) { | |
7da2bcf8 YZ |
1273 | unmap_sdma_queues(dqm, 0); |
1274 | unmap_sdma_queues(dqm, 1); | |
bcea3081 BG |
1275 | } |
1276 | ||
64c7f8cf | 1277 | retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE, |
4465f466 | 1278 | filter, filter_param, false, 0); |
4eacc26b | 1279 | if (retval) |
ac30c783 | 1280 | return retval; |
64c7f8cf BG |
1281 | |
1282 | *dqm->fence_addr = KFD_FENCE_INIT; | |
1283 | pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr, | |
1284 | KFD_FENCE_COMPLETED); | |
1285 | /* should be timed out */ | |
c3447e81 | 1286 | retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, |
64c7f8cf | 1287 | QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS); |
9fd3f1bf | 1288 | if (retval) |
ac30c783 | 1289 | return retval; |
9fd3f1bf | 1290 | |
64c7f8cf BG |
1291 | pm_release_ib(&dqm->packets); |
1292 | dqm->active_runlist = false; | |
1293 | ||
64c7f8cf BG |
1294 | return retval; |
1295 | } | |
1296 | ||
ac30c783 | 1297 | /* dqm->lock mutex has to be locked before calling this function */ |
c4744e24 YZ |
1298 | static int execute_queues_cpsch(struct device_queue_manager *dqm, |
1299 | enum kfd_unmap_queues_filter filter, | |
1300 | uint32_t filter_param) | |
64c7f8cf BG |
1301 | { |
1302 | int retval; | |
1303 | ||
73ea648d SL |
1304 | if (dqm->is_hws_hang) |
1305 | return -EIO; | |
c4744e24 | 1306 | retval = unmap_queues_cpsch(dqm, filter, filter_param); |
4eacc26b | 1307 | if (retval) { |
c4744e24 | 1308 | pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n"); |
73ea648d SL |
1309 | dqm->is_hws_hang = true; |
1310 | schedule_work(&dqm->hw_exception_work); | |
ac30c783 | 1311 | return retval; |
64c7f8cf BG |
1312 | } |
1313 | ||
60a00956 | 1314 | return map_queues_cpsch(dqm); |
64c7f8cf BG |
1315 | } |
1316 | ||
1317 | static int destroy_queue_cpsch(struct device_queue_manager *dqm, | |
1318 | struct qcm_process_device *qpd, | |
1319 | struct queue *q) | |
1320 | { | |
1321 | int retval; | |
1322 | struct mqd_manager *mqd; | |
992839ad | 1323 | bool preempt_all_queues; |
64c7f8cf | 1324 | |
992839ad YS |
1325 | preempt_all_queues = false; |
1326 | ||
64c7f8cf BG |
1327 | retval = 0; |
1328 | ||
1329 | /* remove queue from list to prevent rescheduling after preemption */ | |
efeaed4d | 1330 | dqm_lock(dqm); |
992839ad YS |
1331 | |
1332 | if (qpd->is_debug) { | |
1333 | /* | |
1334 | * error, currently we do not allow to destroy a queue | |
1335 | * of a currently debugged process | |
1336 | */ | |
1337 | retval = -EBUSY; | |
1338 | goto failed_try_destroy_debugged_queue; | |
1339 | ||
1340 | } | |
1341 | ||
45c9a5e4 | 1342 | mqd = dqm->ops.get_mqd_manager(dqm, |
bcea3081 | 1343 | get_mqd_type_from_queue_type(q->properties.type)); |
64c7f8cf BG |
1344 | if (!mqd) { |
1345 | retval = -ENOMEM; | |
1346 | goto failed; | |
1347 | } | |
1348 | ||
ef568db7 FK |
1349 | deallocate_doorbell(qpd, q); |
1350 | ||
e139cd2a | 1351 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { |
bcea3081 | 1352 | dqm->sdma_queue_count--; |
e139cd2a | 1353 | deallocate_sdma_queue(dqm, q->sdma_id); |
1354 | } | |
bcea3081 | 1355 | |
64c7f8cf | 1356 | list_del(&q->list); |
bc920fd4 | 1357 | qpd->queue_count--; |
40a526dc | 1358 | if (q->properties.is_active) { |
b6819cec | 1359 | dqm->queue_count--; |
40a526dc | 1360 | retval = execute_queues_cpsch(dqm, |
9fd3f1bf | 1361 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); |
40a526dc YZ |
1362 | if (retval == -ETIME) |
1363 | qpd->reset_wavefronts = true; | |
1364 | } | |
64c7f8cf BG |
1365 | |
1366 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); | |
b8cbab04 OG |
1367 | |
1368 | /* | |
1369 | * Unconditionally decrement this counter, regardless of the queue's | |
1370 | * type | |
1371 | */ | |
1372 | dqm->total_queue_count--; | |
1373 | pr_debug("Total of %d queues are accountable so far\n", | |
1374 | dqm->total_queue_count); | |
64c7f8cf | 1375 | |
efeaed4d | 1376 | dqm_unlock(dqm); |
64c7f8cf | 1377 | |
9e827224 | 1378 | return retval; |
64c7f8cf BG |
1379 | |
1380 | failed: | |
992839ad YS |
1381 | failed_try_destroy_debugged_queue: |
1382 | ||
efeaed4d | 1383 | dqm_unlock(dqm); |
64c7f8cf BG |
1384 | return retval; |
1385 | } | |
1386 | ||
1387 | /* | |
1388 | * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to | |
1389 | * stay in user mode. | |
1390 | */ | |
1391 | #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL | |
1392 | /* APE1 limit is inclusive and 64K aligned. */ | |
1393 | #define APE1_LIMIT_ALIGNMENT 0xFFFF | |
1394 | ||
1395 | static bool set_cache_memory_policy(struct device_queue_manager *dqm, | |
1396 | struct qcm_process_device *qpd, | |
1397 | enum cache_policy default_policy, | |
1398 | enum cache_policy alternate_policy, | |
1399 | void __user *alternate_aperture_base, | |
1400 | uint64_t alternate_aperture_size) | |
1401 | { | |
bed4f110 FK |
1402 | bool retval = true; |
1403 | ||
1404 | if (!dqm->asic_ops.set_cache_memory_policy) | |
1405 | return retval; | |
64c7f8cf | 1406 | |
efeaed4d | 1407 | dqm_lock(dqm); |
64c7f8cf BG |
1408 | |
1409 | if (alternate_aperture_size == 0) { | |
1410 | /* base > limit disables APE1 */ | |
1411 | qpd->sh_mem_ape1_base = 1; | |
1412 | qpd->sh_mem_ape1_limit = 0; | |
1413 | } else { | |
1414 | /* | |
1415 | * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]}, | |
1416 | * SH_MEM_APE1_BASE[31:0], 0x0000 } | |
1417 | * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]}, | |
1418 | * SH_MEM_APE1_LIMIT[31:0], 0xFFFF } | |
1419 | * Verify that the base and size parameters can be | |
1420 | * represented in this format and convert them. | |
1421 | * Additionally restrict APE1 to user-mode addresses. | |
1422 | */ | |
1423 | ||
1424 | uint64_t base = (uintptr_t)alternate_aperture_base; | |
1425 | uint64_t limit = base + alternate_aperture_size - 1; | |
1426 | ||
ab7c1648 KR |
1427 | if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 || |
1428 | (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) { | |
1429 | retval = false; | |
64c7f8cf | 1430 | goto out; |
ab7c1648 | 1431 | } |
64c7f8cf BG |
1432 | |
1433 | qpd->sh_mem_ape1_base = base >> 16; | |
1434 | qpd->sh_mem_ape1_limit = limit >> 16; | |
1435 | } | |
1436 | ||
bfd5e378 | 1437 | retval = dqm->asic_ops.set_cache_memory_policy( |
a22fc854 BG |
1438 | dqm, |
1439 | qpd, | |
1440 | default_policy, | |
1441 | alternate_policy, | |
1442 | alternate_aperture_base, | |
1443 | alternate_aperture_size); | |
64c7f8cf | 1444 | |
d146c5a7 | 1445 | if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0)) |
64c7f8cf BG |
1446 | program_sh_mem_settings(dqm, qpd); |
1447 | ||
79775b62 | 1448 | pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n", |
64c7f8cf BG |
1449 | qpd->sh_mem_config, qpd->sh_mem_ape1_base, |
1450 | qpd->sh_mem_ape1_limit); | |
1451 | ||
64c7f8cf | 1452 | out: |
efeaed4d | 1453 | dqm_unlock(dqm); |
ab7c1648 | 1454 | return retval; |
64c7f8cf BG |
1455 | } |
1456 | ||
d7b9bd22 FK |
1457 | static int set_trap_handler(struct device_queue_manager *dqm, |
1458 | struct qcm_process_device *qpd, | |
1459 | uint64_t tba_addr, | |
1460 | uint64_t tma_addr) | |
1461 | { | |
1462 | uint64_t *tma; | |
1463 | ||
1464 | if (dqm->dev->cwsr_enabled) { | |
1465 | /* Jump from CWSR trap handler to user trap */ | |
1466 | tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET); | |
1467 | tma[0] = tba_addr; | |
1468 | tma[1] = tma_addr; | |
1469 | } else { | |
1470 | qpd->tba_addr = tba_addr; | |
1471 | qpd->tma_addr = tma_addr; | |
1472 | } | |
1473 | ||
1474 | return 0; | |
1475 | } | |
1476 | ||
9fd3f1bf FK |
1477 | static int process_termination_nocpsch(struct device_queue_manager *dqm, |
1478 | struct qcm_process_device *qpd) | |
1479 | { | |
1480 | struct queue *q, *next; | |
1481 | struct device_process_node *cur, *next_dpn; | |
1482 | int retval = 0; | |
1483 | ||
efeaed4d | 1484 | dqm_lock(dqm); |
9fd3f1bf FK |
1485 | |
1486 | /* Clear all user mode queues */ | |
1487 | list_for_each_entry_safe(q, next, &qpd->queues_list, list) { | |
1488 | int ret; | |
1489 | ||
1490 | ret = destroy_queue_nocpsch_locked(dqm, qpd, q); | |
1491 | if (ret) | |
1492 | retval = ret; | |
1493 | } | |
1494 | ||
1495 | /* Unregister process */ | |
1496 | list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) { | |
1497 | if (qpd == cur->qpd) { | |
1498 | list_del(&cur->list); | |
1499 | kfree(cur); | |
1500 | dqm->processes_count--; | |
1501 | break; | |
1502 | } | |
1503 | } | |
1504 | ||
efeaed4d | 1505 | dqm_unlock(dqm); |
9fd3f1bf FK |
1506 | return retval; |
1507 | } | |
1508 | ||
1509 | ||
1510 | static int process_termination_cpsch(struct device_queue_manager *dqm, | |
1511 | struct qcm_process_device *qpd) | |
1512 | { | |
1513 | int retval; | |
1514 | struct queue *q, *next; | |
1515 | struct kernel_queue *kq, *kq_next; | |
1516 | struct mqd_manager *mqd; | |
1517 | struct device_process_node *cur, *next_dpn; | |
1518 | enum kfd_unmap_queues_filter filter = | |
1519 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES; | |
1520 | ||
1521 | retval = 0; | |
1522 | ||
efeaed4d | 1523 | dqm_lock(dqm); |
9fd3f1bf FK |
1524 | |
1525 | /* Clean all kernel queues */ | |
1526 | list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) { | |
1527 | list_del(&kq->list); | |
1528 | dqm->queue_count--; | |
1529 | qpd->is_debug = false; | |
1530 | dqm->total_queue_count--; | |
1531 | filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES; | |
1532 | } | |
1533 | ||
1534 | /* Clear all user mode queues */ | |
1535 | list_for_each_entry(q, &qpd->queues_list, list) { | |
72a01d23 | 1536 | if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { |
9fd3f1bf | 1537 | dqm->sdma_queue_count--; |
72a01d23 FK |
1538 | deallocate_sdma_queue(dqm, q->sdma_id); |
1539 | } | |
9fd3f1bf FK |
1540 | |
1541 | if (q->properties.is_active) | |
1542 | dqm->queue_count--; | |
1543 | ||
1544 | dqm->total_queue_count--; | |
1545 | } | |
1546 | ||
1547 | /* Unregister process */ | |
1548 | list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) { | |
1549 | if (qpd == cur->qpd) { | |
1550 | list_del(&cur->list); | |
1551 | kfree(cur); | |
1552 | dqm->processes_count--; | |
1553 | break; | |
1554 | } | |
1555 | } | |
1556 | ||
1557 | retval = execute_queues_cpsch(dqm, filter, 0); | |
73ea648d | 1558 | if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) { |
9fd3f1bf FK |
1559 | pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev); |
1560 | dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process); | |
1561 | qpd->reset_wavefronts = false; | |
1562 | } | |
1563 | ||
1564 | /* lastly, free mqd resources */ | |
1565 | list_for_each_entry_safe(q, next, &qpd->queues_list, list) { | |
1566 | mqd = dqm->ops.get_mqd_manager(dqm, | |
1567 | get_mqd_type_from_queue_type(q->properties.type)); | |
1568 | if (!mqd) { | |
1569 | retval = -ENOMEM; | |
1570 | goto out; | |
1571 | } | |
1572 | list_del(&q->list); | |
bc920fd4 | 1573 | qpd->queue_count--; |
9fd3f1bf FK |
1574 | mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); |
1575 | } | |
1576 | ||
1577 | out: | |
efeaed4d | 1578 | dqm_unlock(dqm); |
9fd3f1bf FK |
1579 | return retval; |
1580 | } | |
1581 | ||
64c7f8cf BG |
1582 | struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) |
1583 | { | |
1584 | struct device_queue_manager *dqm; | |
1585 | ||
79775b62 | 1586 | pr_debug("Loading device queue manager\n"); |
a22fc854 | 1587 | |
dbf56ab1 | 1588 | dqm = kzalloc(sizeof(*dqm), GFP_KERNEL); |
64c7f8cf BG |
1589 | if (!dqm) |
1590 | return NULL; | |
1591 | ||
d146c5a7 FK |
1592 | switch (dev->device_info->asic_family) { |
1593 | /* HWS is not available on Hawaii. */ | |
1594 | case CHIP_HAWAII: | |
1595 | /* HWS depends on CWSR for timely dequeue. CWSR is not | |
1596 | * available on Tonga. | |
1597 | * | |
1598 | * FIXME: This argument also applies to Kaveri. | |
1599 | */ | |
1600 | case CHIP_TONGA: | |
1601 | dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS; | |
1602 | break; | |
1603 | default: | |
1604 | dqm->sched_policy = sched_policy; | |
1605 | break; | |
1606 | } | |
1607 | ||
64c7f8cf | 1608 | dqm->dev = dev; |
d146c5a7 | 1609 | switch (dqm->sched_policy) { |
64c7f8cf BG |
1610 | case KFD_SCHED_POLICY_HWS: |
1611 | case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: | |
1612 | /* initialize dqm for cp scheduling */ | |
45c9a5e4 OG |
1613 | dqm->ops.create_queue = create_queue_cpsch; |
1614 | dqm->ops.initialize = initialize_cpsch; | |
1615 | dqm->ops.start = start_cpsch; | |
1616 | dqm->ops.stop = stop_cpsch; | |
1617 | dqm->ops.destroy_queue = destroy_queue_cpsch; | |
1618 | dqm->ops.update_queue = update_queue; | |
58dcd5bf YZ |
1619 | dqm->ops.get_mqd_manager = get_mqd_manager; |
1620 | dqm->ops.register_process = register_process; | |
1621 | dqm->ops.unregister_process = unregister_process; | |
1622 | dqm->ops.uninitialize = uninitialize; | |
45c9a5e4 OG |
1623 | dqm->ops.create_kernel_queue = create_kernel_queue_cpsch; |
1624 | dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch; | |
1625 | dqm->ops.set_cache_memory_policy = set_cache_memory_policy; | |
d7b9bd22 | 1626 | dqm->ops.set_trap_handler = set_trap_handler; |
9fd3f1bf | 1627 | dqm->ops.process_termination = process_termination_cpsch; |
26103436 FK |
1628 | dqm->ops.evict_process_queues = evict_process_queues_cpsch; |
1629 | dqm->ops.restore_process_queues = restore_process_queues_cpsch; | |
64c7f8cf BG |
1630 | break; |
1631 | case KFD_SCHED_POLICY_NO_HWS: | |
1632 | /* initialize dqm for no cp scheduling */ | |
45c9a5e4 OG |
1633 | dqm->ops.start = start_nocpsch; |
1634 | dqm->ops.stop = stop_nocpsch; | |
1635 | dqm->ops.create_queue = create_queue_nocpsch; | |
1636 | dqm->ops.destroy_queue = destroy_queue_nocpsch; | |
1637 | dqm->ops.update_queue = update_queue; | |
58dcd5bf YZ |
1638 | dqm->ops.get_mqd_manager = get_mqd_manager; |
1639 | dqm->ops.register_process = register_process; | |
1640 | dqm->ops.unregister_process = unregister_process; | |
45c9a5e4 | 1641 | dqm->ops.initialize = initialize_nocpsch; |
58dcd5bf | 1642 | dqm->ops.uninitialize = uninitialize; |
45c9a5e4 | 1643 | dqm->ops.set_cache_memory_policy = set_cache_memory_policy; |
d7b9bd22 | 1644 | dqm->ops.set_trap_handler = set_trap_handler; |
9fd3f1bf | 1645 | dqm->ops.process_termination = process_termination_nocpsch; |
26103436 FK |
1646 | dqm->ops.evict_process_queues = evict_process_queues_nocpsch; |
1647 | dqm->ops.restore_process_queues = | |
1648 | restore_process_queues_nocpsch; | |
64c7f8cf BG |
1649 | break; |
1650 | default: | |
d146c5a7 | 1651 | pr_err("Invalid scheduling policy %d\n", dqm->sched_policy); |
32fa8219 | 1652 | goto out_free; |
64c7f8cf BG |
1653 | } |
1654 | ||
a22fc854 BG |
1655 | switch (dev->device_info->asic_family) { |
1656 | case CHIP_CARRIZO: | |
bfd5e378 | 1657 | device_queue_manager_init_vi(&dqm->asic_ops); |
300dec95 OG |
1658 | break; |
1659 | ||
a22fc854 | 1660 | case CHIP_KAVERI: |
bfd5e378 | 1661 | device_queue_manager_init_cik(&dqm->asic_ops); |
300dec95 | 1662 | break; |
97672cbe FK |
1663 | |
1664 | case CHIP_HAWAII: | |
1665 | device_queue_manager_init_cik_hawaii(&dqm->asic_ops); | |
1666 | break; | |
1667 | ||
1668 | case CHIP_TONGA: | |
1669 | case CHIP_FIJI: | |
1670 | case CHIP_POLARIS10: | |
1671 | case CHIP_POLARIS11: | |
1672 | device_queue_manager_init_vi_tonga(&dqm->asic_ops); | |
1673 | break; | |
bed4f110 FK |
1674 | |
1675 | case CHIP_VEGA10: | |
1676 | case CHIP_RAVEN: | |
1677 | device_queue_manager_init_v9(&dqm->asic_ops); | |
1678 | break; | |
e596b903 YZ |
1679 | default: |
1680 | WARN(1, "Unexpected ASIC family %u", | |
1681 | dev->device_info->asic_family); | |
1682 | goto out_free; | |
a22fc854 BG |
1683 | } |
1684 | ||
32fa8219 FK |
1685 | if (!dqm->ops.initialize(dqm)) |
1686 | return dqm; | |
64c7f8cf | 1687 | |
32fa8219 FK |
1688 | out_free: |
1689 | kfree(dqm); | |
1690 | return NULL; | |
64c7f8cf BG |
1691 | } |
1692 | ||
1693 | void device_queue_manager_uninit(struct device_queue_manager *dqm) | |
1694 | { | |
45c9a5e4 | 1695 | dqm->ops.uninitialize(dqm); |
64c7f8cf BG |
1696 | kfree(dqm); |
1697 | } | |
851a645e | 1698 | |
2640c3fa | 1699 | int kfd_process_vm_fault(struct device_queue_manager *dqm, |
1700 | unsigned int pasid) | |
1701 | { | |
1702 | struct kfd_process_device *pdd; | |
1703 | struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); | |
1704 | int ret = 0; | |
1705 | ||
1706 | if (!p) | |
1707 | return -EINVAL; | |
1708 | pdd = kfd_get_process_device_data(dqm->dev, p); | |
1709 | if (pdd) | |
1710 | ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd); | |
1711 | kfd_unref_process(p); | |
1712 | ||
1713 | return ret; | |
1714 | } | |
1715 | ||
73ea648d SL |
1716 | static void kfd_process_hw_exception(struct work_struct *work) |
1717 | { | |
1718 | struct device_queue_manager *dqm = container_of(work, | |
1719 | struct device_queue_manager, hw_exception_work); | |
1720 | dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd); | |
1721 | } | |
1722 | ||
851a645e FK |
1723 | #if defined(CONFIG_DEBUG_FS) |
1724 | ||
1725 | static void seq_reg_dump(struct seq_file *m, | |
1726 | uint32_t (*dump)[2], uint32_t n_regs) | |
1727 | { | |
1728 | uint32_t i, count; | |
1729 | ||
1730 | for (i = 0, count = 0; i < n_regs; i++) { | |
1731 | if (count == 0 || | |
1732 | dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) { | |
1733 | seq_printf(m, "%s %08x: %08x", | |
1734 | i ? "\n" : "", | |
1735 | dump[i][0], dump[i][1]); | |
1736 | count = 7; | |
1737 | } else { | |
1738 | seq_printf(m, " %08x", dump[i][1]); | |
1739 | count--; | |
1740 | } | |
1741 | } | |
1742 | ||
1743 | seq_puts(m, "\n"); | |
1744 | } | |
1745 | ||
1746 | int dqm_debugfs_hqds(struct seq_file *m, void *data) | |
1747 | { | |
1748 | struct device_queue_manager *dqm = data; | |
1749 | uint32_t (*dump)[2], n_regs; | |
1750 | int pipe, queue; | |
1751 | int r = 0; | |
1752 | ||
24f48a42 OZ |
1753 | r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd, |
1754 | KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs); | |
1755 | if (!r) { | |
1756 | seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n", | |
1757 | KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1, | |
1758 | KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm), | |
1759 | KFD_CIK_HIQ_QUEUE); | |
1760 | seq_reg_dump(m, dump, n_regs); | |
1761 | ||
1762 | kfree(dump); | |
1763 | } | |
1764 | ||
851a645e FK |
1765 | for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { |
1766 | int pipe_offset = pipe * get_queues_per_pipe(dqm); | |
1767 | ||
1768 | for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { | |
1769 | if (!test_bit(pipe_offset + queue, | |
1770 | dqm->dev->shared_resources.queue_bitmap)) | |
1771 | continue; | |
1772 | ||
1773 | r = dqm->dev->kfd2kgd->hqd_dump( | |
1774 | dqm->dev->kgd, pipe, queue, &dump, &n_regs); | |
1775 | if (r) | |
1776 | break; | |
1777 | ||
1778 | seq_printf(m, " CP Pipe %d, Queue %d\n", | |
1779 | pipe, queue); | |
1780 | seq_reg_dump(m, dump, n_regs); | |
1781 | ||
1782 | kfree(dump); | |
1783 | } | |
1784 | } | |
1785 | ||
1786 | for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) { | |
1787 | for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) { | |
1788 | r = dqm->dev->kfd2kgd->hqd_sdma_dump( | |
1789 | dqm->dev->kgd, pipe, queue, &dump, &n_regs); | |
1790 | if (r) | |
1791 | break; | |
1792 | ||
1793 | seq_printf(m, " SDMA Engine %d, RLC %d\n", | |
1794 | pipe, queue); | |
1795 | seq_reg_dump(m, dump, n_regs); | |
1796 | ||
1797 | kfree(dump); | |
1798 | } | |
1799 | } | |
1800 | ||
1801 | return r; | |
1802 | } | |
1803 | ||
1804 | #endif |