Commit | Line | Data |
---|---|---|
4a488a7a OG |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
23 | #ifndef KFD_PRIV_H_INCLUDED | |
24 | #define KFD_PRIV_H_INCLUDED | |
25 | ||
26 | #include <linux/hashtable.h> | |
27 | #include <linux/mmu_notifier.h> | |
28 | #include <linux/mutex.h> | |
29 | #include <linux/types.h> | |
30 | #include <linux/atomic.h> | |
31 | #include <linux/workqueue.h> | |
32 | #include <linux/spinlock.h> | |
19f6d2a6 | 33 | #include <linux/kfd_ioctl.h> |
482f0777 | 34 | #include <linux/idr.h> |
04ad47bd | 35 | #include <linux/kfifo.h> |
851a645e | 36 | #include <linux/seq_file.h> |
5ce10687 | 37 | #include <linux/kref.h> |
4a488a7a OG |
38 | #include <kgd_kfd_interface.h> |
39 | ||
e596b903 YZ |
40 | #include "amd_shared.h" |
41 | ||
af47b390 LA |
42 | #define KFD_MAX_RING_ENTRY_SIZE 8 |
43 | ||
5b5c4e40 EP |
44 | #define KFD_SYSFS_FILE_MODE 0444 |
45 | ||
df03ef93 HK |
46 | /* GPU ID hash width in bits */ |
47 | #define KFD_GPU_ID_HASH_WIDTH 16 | |
48 | ||
49 | /* Use upper bits of mmap offset to store KFD driver specific information. | |
50 | * BITS[63:62] - Encode MMAP type | |
51 | * BITS[61:46] - Encode gpu_id. To identify to which GPU the offset belongs to | |
52 | * BITS[45:0] - MMAP offset value | |
53 | * | |
54 | * NOTE: struct vm_area_struct.vm_pgoff uses offset in pages. Hence, these | |
55 | * defines are w.r.t to PAGE_SIZE | |
56 | */ | |
57 | #define KFD_MMAP_TYPE_SHIFT (62 - PAGE_SHIFT) | |
58 | #define KFD_MMAP_TYPE_MASK (0x3ULL << KFD_MMAP_TYPE_SHIFT) | |
59 | #define KFD_MMAP_TYPE_DOORBELL (0x3ULL << KFD_MMAP_TYPE_SHIFT) | |
60 | #define KFD_MMAP_TYPE_EVENTS (0x2ULL << KFD_MMAP_TYPE_SHIFT) | |
61 | #define KFD_MMAP_TYPE_RESERVED_MEM (0x1ULL << KFD_MMAP_TYPE_SHIFT) | |
62 | ||
63 | #define KFD_MMAP_GPU_ID_SHIFT (46 - PAGE_SHIFT) | |
64 | #define KFD_MMAP_GPU_ID_MASK (((1ULL << KFD_GPU_ID_HASH_WIDTH) - 1) \ | |
65 | << KFD_MMAP_GPU_ID_SHIFT) | |
66 | #define KFD_MMAP_GPU_ID(gpu_id) ((((uint64_t)gpu_id) << KFD_MMAP_GPU_ID_SHIFT)\ | |
67 | & KFD_MMAP_GPU_ID_MASK) | |
68 | #define KFD_MMAP_GPU_ID_GET(offset) ((offset & KFD_MMAP_GPU_ID_MASK) \ | |
69 | >> KFD_MMAP_GPU_ID_SHIFT) | |
70 | ||
71 | #define KFD_MMAP_OFFSET_VALUE_MASK (0x3FFFFFFFFFFFULL >> PAGE_SHIFT) | |
72 | #define KFD_MMAP_OFFSET_VALUE_GET(offset) (offset & KFD_MMAP_OFFSET_VALUE_MASK) | |
f3a39818 | 73 | |
ed6e6a34 BG |
74 | /* |
75 | * When working with cp scheduler we should assign the HIQ manually or via | |
76 | * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot | |
77 | * definitions for Kaveri. In Kaveri only the first ME queues participates | |
78 | * in the cp scheduling taking that in mind we set the HIQ slot in the | |
79 | * second ME. | |
80 | */ | |
81 | #define KFD_CIK_HIQ_PIPE 4 | |
82 | #define KFD_CIK_HIQ_QUEUE 0 | |
83 | ||
5b5c4e40 EP |
84 | /* Macro for allocating structures */ |
85 | #define kfd_alloc_struct(ptr_to_struct) \ | |
86 | ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL)) | |
87 | ||
19f6d2a6 | 88 | #define KFD_MAX_NUM_OF_PROCESSES 512 |
b8cbab04 | 89 | #define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024 |
19f6d2a6 | 90 | |
373d7080 FK |
91 | /* |
92 | * Size of the per-process TBA+TMA buffer: 2 pages | |
93 | * | |
94 | * The first page is the TBA used for the CWSR ISA code. The second | |
95 | * page is used as TMA for daisy changing a user-mode trap handler. | |
96 | */ | |
97 | #define KFD_CWSR_TBA_TMA_SIZE (PAGE_SIZE * 2) | |
98 | #define KFD_CWSR_TMA_OFFSET PAGE_SIZE | |
99 | ||
19f6d2a6 | 100 | /* |
b8cbab04 OG |
101 | * Kernel module parameter to specify maximum number of supported queues per |
102 | * device | |
19f6d2a6 | 103 | */ |
b8cbab04 | 104 | extern int max_num_of_queues_per_device; |
19f6d2a6 | 105 | |
b8cbab04 OG |
106 | #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096 |
107 | #define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \ | |
108 | (KFD_MAX_NUM_OF_PROCESSES * \ | |
109 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) | |
19f6d2a6 | 110 | |
ed6e6a34 BG |
111 | #define KFD_KERNEL_QUEUE_SIZE 2048 |
112 | ||
31c21fec BG |
113 | /* Kernel module parameter to specify the scheduling policy */ |
114 | extern int sched_policy; | |
115 | ||
a99c6d4f FK |
116 | /* |
117 | * Kernel module parameter to specify the maximum process | |
118 | * number per HW scheduler | |
119 | */ | |
120 | extern int hws_max_conc_proc; | |
121 | ||
373d7080 FK |
122 | extern int cwsr_enable; |
123 | ||
81663016 OG |
124 | /* |
125 | * Kernel module parameter to specify whether to send sigterm to HSA process on | |
126 | * unhandled exception | |
127 | */ | |
128 | extern int send_sigterm; | |
129 | ||
374200b1 FK |
130 | /* |
131 | * This kernel module is used to simulate large bar machine on non-large bar | |
132 | * enabled machines. | |
133 | */ | |
134 | extern int debug_largebar; | |
135 | ||
ebcfd1e2 FK |
136 | /* |
137 | * Ignore CRAT table during KFD initialization, can be used to work around | |
138 | * broken CRAT tables on some AMD systems | |
139 | */ | |
140 | extern int ignore_crat; | |
141 | ||
bed4f110 FK |
142 | /* |
143 | * Set sh_mem_config.retry_disable on Vega10 | |
144 | */ | |
145 | extern int vega10_noretry; | |
146 | ||
31c21fec BG |
147 | /** |
148 | * enum kfd_sched_policy | |
149 | * | |
150 | * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp) | |
151 | * scheduling. In this scheduling mode we're using the firmware code to | |
152 | * schedule the user mode queues and kernel queues such as HIQ and DIQ. | |
153 | * the HIQ queue is used as a special queue that dispatches the configuration | |
154 | * to the cp and the user mode queues list that are currently running. | |
155 | * the DIQ queue is a debugging queue that dispatches debugging commands to the | |
156 | * firmware. | |
157 | * in this scheduling mode user mode queues over subscription feature is | |
158 | * enabled. | |
159 | * | |
160 | * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over | |
161 | * subscription feature disabled. | |
162 | * | |
163 | * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly | |
164 | * set the command processor registers and sets the queues "manually". This | |
165 | * mode is used *ONLY* for debugging proposes. | |
166 | * | |
167 | */ | |
168 | enum kfd_sched_policy { | |
169 | KFD_SCHED_POLICY_HWS = 0, | |
170 | KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION, | |
171 | KFD_SCHED_POLICY_NO_HWS | |
172 | }; | |
173 | ||
ed6e6a34 BG |
174 | enum cache_policy { |
175 | cache_policy_coherent, | |
176 | cache_policy_noncoherent | |
177 | }; | |
178 | ||
ef568db7 FK |
179 | #define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10) |
180 | ||
f3a39818 AL |
181 | struct kfd_event_interrupt_class { |
182 | bool (*interrupt_isr)(struct kfd_dev *dev, | |
183 | const uint32_t *ih_ring_entry); | |
184 | void (*interrupt_wq)(struct kfd_dev *dev, | |
185 | const uint32_t *ih_ring_entry); | |
186 | }; | |
187 | ||
4a488a7a | 188 | struct kfd_device_info { |
e596b903 | 189 | enum amd_asic_type asic_family; |
f3a39818 | 190 | const struct kfd_event_interrupt_class *event_interrupt_class; |
4a488a7a | 191 | unsigned int max_pasid_bits; |
992839ad | 192 | unsigned int max_no_of_hqd; |
ada2b29c | 193 | unsigned int doorbell_size; |
4a488a7a | 194 | size_t ih_ring_entry_size; |
f7c826ad | 195 | uint8_t num_of_watch_points; |
19f6d2a6 | 196 | uint16_t mqd_size_aligned; |
373d7080 | 197 | bool supports_cwsr; |
64d1c3a4 | 198 | bool needs_iommu_device; |
3ee2d00c | 199 | bool needs_pci_atomics; |
4a488a7a OG |
200 | }; |
201 | ||
36b5c08f OG |
202 | struct kfd_mem_obj { |
203 | uint32_t range_start; | |
204 | uint32_t range_end; | |
205 | uint64_t gpu_addr; | |
206 | uint32_t *cpu_ptr; | |
b91d43dd | 207 | void *gtt_mem; |
36b5c08f OG |
208 | }; |
209 | ||
44008d7a YZ |
210 | struct kfd_vmid_info { |
211 | uint32_t first_vmid_kfd; | |
212 | uint32_t last_vmid_kfd; | |
213 | uint32_t vmid_num_kfd; | |
214 | }; | |
215 | ||
4a488a7a OG |
216 | struct kfd_dev { |
217 | struct kgd_dev *kgd; | |
218 | ||
219 | const struct kfd_device_info *device_info; | |
220 | struct pci_dev *pdev; | |
221 | ||
222 | unsigned int id; /* topology stub index */ | |
223 | ||
19f6d2a6 OG |
224 | phys_addr_t doorbell_base; /* Start of actual doorbells used by |
225 | * KFD. It is aligned for mapping | |
226 | * into user mode | |
227 | */ | |
228 | size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell | |
229 | * to HW doorbell, GFX reserved some | |
230 | * at the start) | |
231 | */ | |
19f6d2a6 OG |
232 | u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells |
233 | * page used by kernel queue | |
234 | */ | |
235 | ||
4a488a7a | 236 | struct kgd2kfd_shared_resources shared_resources; |
44008d7a | 237 | struct kfd_vmid_info vm_info; |
4a488a7a | 238 | |
cea405b1 XZ |
239 | const struct kfd2kgd_calls *kfd2kgd; |
240 | struct mutex doorbell_mutex; | |
f761d8bd JP |
241 | DECLARE_BITMAP(doorbell_available_index, |
242 | KFD_MAX_NUM_OF_QUEUES_PER_PROCESS); | |
cea405b1 | 243 | |
36b5c08f OG |
244 | void *gtt_mem; |
245 | uint64_t gtt_start_gpu_addr; | |
246 | void *gtt_start_cpu_ptr; | |
247 | void *gtt_sa_bitmap; | |
248 | struct mutex gtt_sa_lock; | |
249 | unsigned int gtt_sa_chunk_size; | |
250 | unsigned int gtt_sa_num_of_chunks; | |
251 | ||
2249d558 | 252 | /* Interrupts */ |
04ad47bd | 253 | struct kfifo ih_fifo; |
48e876a2 | 254 | struct workqueue_struct *ih_wq; |
2249d558 AL |
255 | struct work_struct interrupt_work; |
256 | spinlock_t interrupt_lock; | |
257 | ||
ed6e6a34 BG |
258 | /* QCM Device instance */ |
259 | struct device_queue_manager *dqm; | |
4a488a7a | 260 | |
ed6e6a34 | 261 | bool init_complete; |
2249d558 AL |
262 | /* |
263 | * Interrupts of interest to KFD are copied | |
264 | * from the HW ring into a SW ring. | |
265 | */ | |
266 | bool interrupts_active; | |
fbeb661b YS |
267 | |
268 | /* Debug manager */ | |
269 | struct kfd_dbgmgr *dbgmgr; | |
373d7080 | 270 | |
a99c6d4f FK |
271 | /* Maximum process number mapped to HW scheduler */ |
272 | unsigned int max_proc_per_quantum; | |
273 | ||
373d7080 FK |
274 | /* CWSR */ |
275 | bool cwsr_enabled; | |
276 | const void *cwsr_isa; | |
277 | unsigned int cwsr_isa_size; | |
4a488a7a OG |
278 | }; |
279 | ||
280 | /* KGD2KFD callbacks */ | |
281 | void kgd2kfd_exit(void); | |
cea405b1 XZ |
282 | struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, |
283 | struct pci_dev *pdev, const struct kfd2kgd_calls *f2g); | |
4a488a7a | 284 | bool kgd2kfd_device_init(struct kfd_dev *kfd, |
cea405b1 | 285 | const struct kgd2kfd_shared_resources *gpu_resources); |
4a488a7a OG |
286 | void kgd2kfd_device_exit(struct kfd_dev *kfd); |
287 | ||
19f6d2a6 OG |
288 | enum kfd_mempool { |
289 | KFD_MEMPOOL_SYSTEM_CACHEABLE = 1, | |
290 | KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2, | |
291 | KFD_MEMPOOL_FRAMEBUFFER = 3, | |
292 | }; | |
293 | ||
4a488a7a OG |
294 | /* Character device interface */ |
295 | int kfd_chardev_init(void); | |
296 | void kfd_chardev_exit(void); | |
297 | struct device *kfd_chardev(void); | |
298 | ||
241f24f8 | 299 | /** |
7da2bcf8 | 300 | * enum kfd_unmap_queues_filter |
241f24f8 | 301 | * |
7da2bcf8 | 302 | * @KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE: Preempts single queue. |
241f24f8 | 303 | * |
7da2bcf8 | 304 | * @KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: Preempts all queues in the |
241f24f8 BG |
305 | * running queues list. |
306 | * | |
7da2bcf8 | 307 | * @KFD_UNMAP_QUEUES_FILTER_BY_PASID: Preempts queues that belongs to |
241f24f8 BG |
308 | * specific process. |
309 | * | |
310 | */ | |
7da2bcf8 YZ |
311 | enum kfd_unmap_queues_filter { |
312 | KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE, | |
313 | KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, | |
314 | KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, | |
315 | KFD_UNMAP_QUEUES_FILTER_BY_PASID | |
241f24f8 | 316 | }; |
19f6d2a6 | 317 | |
ed8aab45 BG |
318 | /** |
319 | * enum kfd_queue_type | |
320 | * | |
321 | * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type. | |
322 | * | |
323 | * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type. | |
324 | * | |
325 | * @KFD_QUEUE_TYPE_HIQ: HIQ queue type. | |
326 | * | |
327 | * @KFD_QUEUE_TYPE_DIQ: DIQ queue type. | |
328 | */ | |
329 | enum kfd_queue_type { | |
330 | KFD_QUEUE_TYPE_COMPUTE, | |
331 | KFD_QUEUE_TYPE_SDMA, | |
332 | KFD_QUEUE_TYPE_HIQ, | |
333 | KFD_QUEUE_TYPE_DIQ | |
334 | }; | |
335 | ||
6e99df57 BG |
336 | enum kfd_queue_format { |
337 | KFD_QUEUE_FORMAT_PM4, | |
338 | KFD_QUEUE_FORMAT_AQL | |
339 | }; | |
340 | ||
ed8aab45 BG |
341 | /** |
342 | * struct queue_properties | |
343 | * | |
344 | * @type: The queue type. | |
345 | * | |
346 | * @queue_id: Queue identifier. | |
347 | * | |
348 | * @queue_address: Queue ring buffer address. | |
349 | * | |
350 | * @queue_size: Queue ring buffer size. | |
351 | * | |
352 | * @priority: Defines the queue priority relative to other queues in the | |
353 | * process. | |
354 | * This is just an indication and HW scheduling may override the priority as | |
355 | * necessary while keeping the relative prioritization. | |
356 | * the priority granularity is from 0 to f which f is the highest priority. | |
357 | * currently all queues are initialized with the highest priority. | |
358 | * | |
359 | * @queue_percent: This field is partially implemented and currently a zero in | |
360 | * this field defines that the queue is non active. | |
361 | * | |
362 | * @read_ptr: User space address which points to the number of dwords the | |
363 | * cp read from the ring buffer. This field updates automatically by the H/W. | |
364 | * | |
365 | * @write_ptr: Defines the number of dwords written to the ring buffer. | |
366 | * | |
367 | * @doorbell_ptr: This field aim is to notify the H/W of new packet written to | |
8eabaf54 KR |
368 | * the queue ring buffer. This field should be similar to write_ptr and the |
369 | * user should update this field after he updated the write_ptr. | |
ed8aab45 BG |
370 | * |
371 | * @doorbell_off: The doorbell offset in the doorbell pci-bar. | |
372 | * | |
8eabaf54 KR |
373 | * @is_interop: Defines if this is a interop queue. Interop queue means that |
374 | * the queue can access both graphics and compute resources. | |
ed8aab45 | 375 | * |
26103436 FK |
376 | * @is_evicted: Defines if the queue is evicted. Only active queues |
377 | * are evicted, rendering them inactive. | |
378 | * | |
379 | * @is_active: Defines if the queue is active or not. @is_active and | |
380 | * @is_evicted are protected by the DQM lock. | |
ed8aab45 BG |
381 | * |
382 | * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid | |
383 | * of the queue. | |
384 | * | |
385 | * This structure represents the queue properties for each queue no matter if | |
386 | * it's user mode or kernel mode queue. | |
387 | * | |
388 | */ | |
389 | struct queue_properties { | |
390 | enum kfd_queue_type type; | |
6e99df57 | 391 | enum kfd_queue_format format; |
ed8aab45 BG |
392 | unsigned int queue_id; |
393 | uint64_t queue_address; | |
394 | uint64_t queue_size; | |
395 | uint32_t priority; | |
396 | uint32_t queue_percent; | |
397 | uint32_t *read_ptr; | |
398 | uint32_t *write_ptr; | |
ada2b29c | 399 | void __iomem *doorbell_ptr; |
ed8aab45 BG |
400 | uint32_t doorbell_off; |
401 | bool is_interop; | |
26103436 | 402 | bool is_evicted; |
ed8aab45 BG |
403 | bool is_active; |
404 | /* Not relevant for user mode queues in cp scheduling */ | |
405 | unsigned int vmid; | |
77669eb8 BG |
406 | /* Relevant only for sdma queues*/ |
407 | uint32_t sdma_engine_id; | |
408 | uint32_t sdma_queue_id; | |
409 | uint32_t sdma_vm_addr; | |
ff3d04a1 BG |
410 | /* Relevant only for VI */ |
411 | uint64_t eop_ring_buffer_address; | |
412 | uint32_t eop_ring_buffer_size; | |
413 | uint64_t ctx_save_restore_area_address; | |
414 | uint32_t ctx_save_restore_area_size; | |
373d7080 FK |
415 | uint32_t ctl_stack_size; |
416 | uint64_t tba_addr; | |
417 | uint64_t tma_addr; | |
ed8aab45 BG |
418 | }; |
419 | ||
420 | /** | |
421 | * struct queue | |
422 | * | |
423 | * @list: Queue linked list. | |
424 | * | |
425 | * @mqd: The queue MQD. | |
426 | * | |
427 | * @mqd_mem_obj: The MQD local gpu memory object. | |
428 | * | |
429 | * @gart_mqd_addr: The MQD gart mc address. | |
430 | * | |
431 | * @properties: The queue properties. | |
432 | * | |
433 | * @mec: Used only in no cp scheduling mode and identifies to micro engine id | |
8eabaf54 | 434 | * that the queue should be execute on. |
ed8aab45 | 435 | * |
8eabaf54 KR |
436 | * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe |
437 | * id. | |
ed8aab45 BG |
438 | * |
439 | * @queue: Used only in no cp scheduliong mode and identifies the queue's slot. | |
440 | * | |
441 | * @process: The kfd process that created this queue. | |
442 | * | |
443 | * @device: The kfd device that created this queue. | |
444 | * | |
445 | * This structure represents user mode compute queues. | |
446 | * It contains all the necessary data to handle such queues. | |
447 | * | |
448 | */ | |
449 | ||
450 | struct queue { | |
451 | struct list_head list; | |
452 | void *mqd; | |
453 | struct kfd_mem_obj *mqd_mem_obj; | |
454 | uint64_t gart_mqd_addr; | |
455 | struct queue_properties properties; | |
456 | ||
457 | uint32_t mec; | |
458 | uint32_t pipe; | |
459 | uint32_t queue; | |
460 | ||
77669eb8 | 461 | unsigned int sdma_id; |
ef568db7 | 462 | unsigned int doorbell_id; |
77669eb8 | 463 | |
ed8aab45 BG |
464 | struct kfd_process *process; |
465 | struct kfd_dev *device; | |
466 | }; | |
467 | ||
6e99df57 BG |
468 | /* |
469 | * Please read the kfd_mqd_manager.h description. | |
470 | */ | |
471 | enum KFD_MQD_TYPE { | |
85d258f9 BG |
472 | KFD_MQD_TYPE_COMPUTE = 0, /* for no cp scheduling */ |
473 | KFD_MQD_TYPE_HIQ, /* for hiq */ | |
474 | KFD_MQD_TYPE_CP, /* for cp queues and diq */ | |
475 | KFD_MQD_TYPE_SDMA, /* for sdma queues */ | |
6e99df57 BG |
476 | KFD_MQD_TYPE_MAX |
477 | }; | |
478 | ||
241f24f8 BG |
479 | struct scheduling_resources { |
480 | unsigned int vmid_mask; | |
481 | enum kfd_queue_type type; | |
482 | uint64_t queue_mask; | |
483 | uint64_t gws_mask; | |
484 | uint32_t oac_mask; | |
485 | uint32_t gds_heap_base; | |
486 | uint32_t gds_heap_size; | |
487 | }; | |
488 | ||
489 | struct process_queue_manager { | |
490 | /* data */ | |
491 | struct kfd_process *process; | |
241f24f8 BG |
492 | struct list_head queues; |
493 | unsigned long *queue_slot_bitmap; | |
494 | }; | |
495 | ||
496 | struct qcm_process_device { | |
497 | /* The Device Queue Manager that owns this data */ | |
498 | struct device_queue_manager *dqm; | |
499 | struct process_queue_manager *pqm; | |
241f24f8 BG |
500 | /* Queues list */ |
501 | struct list_head queues_list; | |
502 | struct list_head priv_queue_list; | |
503 | ||
504 | unsigned int queue_count; | |
505 | unsigned int vmid; | |
506 | bool is_debug; | |
26103436 | 507 | unsigned int evicted; /* eviction counter, 0=active */ |
9fd3f1bf FK |
508 | |
509 | /* This flag tells if we should reset all wavefronts on | |
510 | * process termination | |
511 | */ | |
512 | bool reset_wavefronts; | |
513 | ||
241f24f8 BG |
514 | /* |
515 | * All the memory management data should be here too | |
516 | */ | |
517 | uint64_t gds_context_area; | |
518 | uint32_t sh_mem_config; | |
519 | uint32_t sh_mem_bases; | |
520 | uint32_t sh_mem_ape1_base; | |
521 | uint32_t sh_mem_ape1_limit; | |
522 | uint32_t page_table_base; | |
523 | uint32_t gds_size; | |
524 | uint32_t num_gws; | |
525 | uint32_t num_oac; | |
6a1c9510 | 526 | uint32_t sh_hidden_private_base; |
373d7080 FK |
527 | |
528 | /* CWSR memory */ | |
529 | void *cwsr_kaddr; | |
d01994c2 | 530 | uint64_t cwsr_base; |
373d7080 FK |
531 | uint64_t tba_addr; |
532 | uint64_t tma_addr; | |
d01994c2 FK |
533 | |
534 | /* IB memory */ | |
535 | uint64_t ib_base; | |
552764b6 | 536 | void *ib_kaddr; |
ef568db7 FK |
537 | |
538 | /* doorbell resources per process per device */ | |
539 | unsigned long *doorbell_bitmap; | |
241f24f8 BG |
540 | }; |
541 | ||
26103436 FK |
542 | /* KFD Memory Eviction */ |
543 | ||
544 | /* Approx. wait time before attempting to restore evicted BOs */ | |
545 | #define PROCESS_RESTORE_TIME_MS 100 | |
546 | /* Approx. back off time if restore fails due to lack of memory */ | |
547 | #define PROCESS_BACK_OFF_TIME_MS 100 | |
548 | /* Approx. time before evicting the process again */ | |
549 | #define PROCESS_ACTIVE_TIME_MS 10 | |
550 | ||
6b95e797 FK |
551 | int kgd2kfd_quiesce_mm(struct mm_struct *mm); |
552 | int kgd2kfd_resume_mm(struct mm_struct *mm); | |
26103436 FK |
553 | int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, |
554 | struct dma_fence *fence); | |
733fa1f7 | 555 | |
5ec7e028 FK |
556 | /* 8 byte handle containing GPU ID in the most significant 4 bytes and |
557 | * idr_handle in the least significant 4 bytes | |
558 | */ | |
559 | #define MAKE_HANDLE(gpu_id, idr_handle) \ | |
560 | (((uint64_t)(gpu_id) << 32) + idr_handle) | |
561 | #define GET_GPU_ID(handle) (handle >> 32) | |
562 | #define GET_IDR_HANDLE(handle) (handle & 0xFFFFFFFF) | |
563 | ||
733fa1f7 YZ |
564 | enum kfd_pdd_bound { |
565 | PDD_UNBOUND = 0, | |
566 | PDD_BOUND, | |
567 | PDD_BOUND_SUSPENDED, | |
568 | }; | |
569 | ||
19f6d2a6 OG |
570 | /* Data that is per-process-per device. */ |
571 | struct kfd_process_device { | |
572 | /* | |
573 | * List of all per-device data for a process. | |
574 | * Starts from kfd_process.per_device_data. | |
575 | */ | |
576 | struct list_head per_device_list; | |
577 | ||
578 | /* The device that owns this data. */ | |
579 | struct kfd_dev *dev; | |
580 | ||
9fd3f1bf FK |
581 | /* The process that owns this kfd_process_device. */ |
582 | struct kfd_process *process; | |
19f6d2a6 | 583 | |
45102048 BG |
584 | /* per-process-per device QCM data structure */ |
585 | struct qcm_process_device qpd; | |
586 | ||
19f6d2a6 OG |
587 | /*Apertures*/ |
588 | uint64_t lds_base; | |
589 | uint64_t lds_limit; | |
590 | uint64_t gpuvm_base; | |
591 | uint64_t gpuvm_limit; | |
592 | uint64_t scratch_base; | |
593 | uint64_t scratch_limit; | |
594 | ||
403575c4 | 595 | /* VM context for GPUVM allocations */ |
b84394e2 | 596 | struct file *drm_file; |
403575c4 FK |
597 | void *vm; |
598 | ||
52b29d73 FK |
599 | /* GPUVM allocations storage */ |
600 | struct idr alloc_idr; | |
601 | ||
9fd3f1bf FK |
602 | /* Flag used to tell the pdd has dequeued from the dqm. |
603 | * This is used to prevent dev->dqm->ops.process_termination() from | |
604 | * being called twice when it is already called in IOMMU callback | |
605 | * function. | |
a82918f1 | 606 | */ |
9fd3f1bf | 607 | bool already_dequeued; |
64d1c3a4 FK |
608 | |
609 | /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */ | |
610 | enum kfd_pdd_bound bound; | |
19f6d2a6 OG |
611 | }; |
612 | ||
52a5fdce AS |
613 | #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) |
614 | ||
4a488a7a OG |
615 | /* Process data */ |
616 | struct kfd_process { | |
19f6d2a6 OG |
617 | /* |
618 | * kfd_process are stored in an mm_struct*->kfd_process* | |
619 | * hash table (kfd_processes in kfd_process.c) | |
620 | */ | |
621 | struct hlist_node kfd_processes; | |
622 | ||
9b56bb11 FK |
623 | /* |
624 | * Opaque pointer to mm_struct. We don't hold a reference to | |
625 | * it so it should never be dereferenced from here. This is | |
626 | * only used for looking up processes by their mm. | |
627 | */ | |
628 | void *mm; | |
19f6d2a6 | 629 | |
5ce10687 FK |
630 | struct kref ref; |
631 | struct work_struct release_work; | |
632 | ||
19f6d2a6 OG |
633 | struct mutex mutex; |
634 | ||
635 | /* | |
636 | * In any process, the thread that started main() is the lead | |
637 | * thread and outlives the rest. | |
638 | * It is here because amd_iommu_bind_pasid wants a task_struct. | |
894a8293 FK |
639 | * It can also be used for safely getting a reference to the |
640 | * mm_struct of the process. | |
19f6d2a6 OG |
641 | */ |
642 | struct task_struct *lead_thread; | |
643 | ||
644 | /* We want to receive a notification when the mm_struct is destroyed */ | |
645 | struct mmu_notifier mmu_notifier; | |
646 | ||
647 | /* Use for delayed freeing of kfd_process structure */ | |
648 | struct rcu_head rcu; | |
649 | ||
650 | unsigned int pasid; | |
a91e70e3 | 651 | unsigned int doorbell_index; |
19f6d2a6 OG |
652 | |
653 | /* | |
654 | * List of kfd_process_device structures, | |
655 | * one for each device the process is using. | |
656 | */ | |
657 | struct list_head per_device_data; | |
658 | ||
45102048 BG |
659 | struct process_queue_manager pqm; |
660 | ||
19f6d2a6 OG |
661 | /*Is the user space process 32 bit?*/ |
662 | bool is_32bit_user_mode; | |
f3a39818 AL |
663 | |
664 | /* Event-related data */ | |
665 | struct mutex event_mutex; | |
482f0777 FK |
666 | /* Event ID allocator and lookup */ |
667 | struct idr event_idr; | |
50cb7dd9 FK |
668 | /* Event page */ |
669 | struct kfd_signal_page *signal_page; | |
b9a5d0a5 | 670 | size_t signal_mapped_size; |
f3a39818 | 671 | size_t signal_event_count; |
c986169f | 672 | bool signal_event_limit_reached; |
403575c4 FK |
673 | |
674 | /* Information used for memory eviction */ | |
675 | void *kgd_process_info; | |
676 | /* Eviction fence that is attached to all the BOs of this process. The | |
677 | * fence will be triggered during eviction and new one will be created | |
678 | * during restore | |
679 | */ | |
680 | struct dma_fence *ef; | |
26103436 FK |
681 | |
682 | /* Work items for evicting and restoring BOs */ | |
683 | struct delayed_work eviction_work; | |
684 | struct delayed_work restore_work; | |
685 | /* seqno of the last scheduled eviction */ | |
686 | unsigned int last_eviction_seqno; | |
687 | /* Approx. the last timestamp (in jiffies) when the process was | |
688 | * restored after an eviction | |
689 | */ | |
690 | unsigned long last_restore_timestamp; | |
4a488a7a OG |
691 | }; |
692 | ||
64d1c3a4 FK |
693 | #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ |
694 | extern DECLARE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE); | |
695 | extern struct srcu_struct kfd_processes_srcu; | |
696 | ||
76baee6c OG |
697 | /** |
698 | * Ioctl function type. | |
699 | * | |
700 | * \param filep pointer to file structure. | |
701 | * \param p amdkfd process pointer. | |
702 | * \param data pointer to arg that was copied from user. | |
703 | */ | |
704 | typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, | |
705 | void *data); | |
706 | ||
707 | struct amdkfd_ioctl_desc { | |
708 | unsigned int cmd; | |
709 | int flags; | |
710 | amdkfd_ioctl_t *func; | |
711 | unsigned int cmd_drv; | |
712 | const char *name; | |
713 | }; | |
714 | ||
1679ae8f | 715 | int kfd_process_create_wq(void); |
19f6d2a6 | 716 | void kfd_process_destroy_wq(void); |
373d7080 | 717 | struct kfd_process *kfd_create_process(struct file *filep); |
19f6d2a6 | 718 | struct kfd_process *kfd_get_process(const struct task_struct *); |
f3a39818 | 719 | struct kfd_process *kfd_lookup_process_by_pasid(unsigned int pasid); |
26103436 | 720 | struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); |
abb208a8 | 721 | void kfd_unref_process(struct kfd_process *p); |
6b95e797 FK |
722 | int kfd_process_evict_queues(struct kfd_process *p); |
723 | int kfd_process_restore_queues(struct kfd_process *p); | |
26103436 FK |
724 | void kfd_suspend_all_processes(void); |
725 | int kfd_resume_all_processes(void); | |
19f6d2a6 | 726 | |
b84394e2 FK |
727 | int kfd_process_device_init_vm(struct kfd_process_device *pdd, |
728 | struct file *drm_file); | |
64c7f8cf | 729 | struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev, |
733fa1f7 | 730 | struct kfd_process *p); |
19f6d2a6 | 731 | struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev, |
093c7d8c AS |
732 | struct kfd_process *p); |
733 | struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev, | |
734 | struct kfd_process *p); | |
19f6d2a6 | 735 | |
df03ef93 | 736 | int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process, |
373d7080 FK |
737 | struct vm_area_struct *vma); |
738 | ||
52b29d73 FK |
739 | /* KFD process API for creating and translating handles */ |
740 | int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, | |
741 | void *mem); | |
742 | void *kfd_process_device_translate_handle(struct kfd_process_device *p, | |
743 | int handle); | |
744 | void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, | |
745 | int handle); | |
746 | ||
775921ed | 747 | /* Process device data iterator */ |
8eabaf54 KR |
748 | struct kfd_process_device *kfd_get_first_process_device_data( |
749 | struct kfd_process *p); | |
750 | struct kfd_process_device *kfd_get_next_process_device_data( | |
751 | struct kfd_process *p, | |
775921ed AS |
752 | struct kfd_process_device *pdd); |
753 | bool kfd_has_process_device_data(struct kfd_process *p); | |
754 | ||
19f6d2a6 OG |
755 | /* PASIDs */ |
756 | int kfd_pasid_init(void); | |
757 | void kfd_pasid_exit(void); | |
758 | bool kfd_set_pasid_limit(unsigned int new_limit); | |
759 | unsigned int kfd_get_pasid_limit(void); | |
760 | unsigned int kfd_pasid_alloc(void); | |
761 | void kfd_pasid_free(unsigned int pasid); | |
762 | ||
763 | /* Doorbells */ | |
ef568db7 | 764 | size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); |
735df2ba FK |
765 | int kfd_doorbell_init(struct kfd_dev *kfd); |
766 | void kfd_doorbell_fini(struct kfd_dev *kfd); | |
df03ef93 HK |
767 | int kfd_doorbell_mmap(struct kfd_dev *dev, struct kfd_process *process, |
768 | struct vm_area_struct *vma); | |
ada2b29c | 769 | void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, |
19f6d2a6 OG |
770 | unsigned int *doorbell_off); |
771 | void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); | |
772 | u32 read_kernel_doorbell(u32 __iomem *db); | |
ada2b29c | 773 | void write_kernel_doorbell(void __iomem *db, u32 value); |
9d7d0248 | 774 | void write_kernel_doorbell64(void __iomem *db, u64 value); |
ef568db7 | 775 | unsigned int kfd_doorbell_id_to_offset(struct kfd_dev *kfd, |
19f6d2a6 | 776 | struct kfd_process *process, |
ef568db7 | 777 | unsigned int doorbell_id); |
a91e70e3 FK |
778 | phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev, |
779 | struct kfd_process *process); | |
780 | int kfd_alloc_process_doorbells(struct kfd_process *process); | |
781 | void kfd_free_process_doorbells(struct kfd_process *process); | |
19f6d2a6 | 782 | |
6e81090b OG |
783 | /* GTT Sub-Allocator */ |
784 | ||
785 | int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, | |
786 | struct kfd_mem_obj **mem_obj); | |
787 | ||
788 | int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj); | |
789 | ||
4a488a7a OG |
790 | extern struct device *kfd_device; |
791 | ||
5b5c4e40 EP |
792 | /* Topology */ |
793 | int kfd_topology_init(void); | |
794 | void kfd_topology_shutdown(void); | |
795 | int kfd_topology_add_device(struct kfd_dev *gpu); | |
796 | int kfd_topology_remove_device(struct kfd_dev *gpu); | |
3a87177e HK |
797 | struct kfd_topology_device *kfd_topology_device_by_proximity_domain( |
798 | uint32_t proximity_domain); | |
5b5c4e40 EP |
799 | struct kfd_dev *kfd_device_by_id(uint32_t gpu_id); |
800 | struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); | |
6d82eb0e | 801 | int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev); |
520b8fb7 | 802 | int kfd_numa_node_to_apic_id(int numa_node_id); |
5b5c4e40 | 803 | |
4a488a7a | 804 | /* Interrupts */ |
2249d558 AL |
805 | int kfd_interrupt_init(struct kfd_dev *dev); |
806 | void kfd_interrupt_exit(struct kfd_dev *dev); | |
b3f5e6b4 | 807 | void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); |
2249d558 AL |
808 | bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); |
809 | bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry); | |
4a488a7a OG |
810 | |
811 | /* Power Management */ | |
b3f5e6b4 AL |
812 | void kgd2kfd_suspend(struct kfd_dev *kfd); |
813 | int kgd2kfd_resume(struct kfd_dev *kfd); | |
4a488a7a | 814 | |
19f6d2a6 OG |
815 | /* amdkfd Apertures */ |
816 | int kfd_init_apertures(struct kfd_process *process); | |
817 | ||
ed6e6a34 | 818 | /* Queue Context Management */ |
e88a614c | 819 | int init_queue(struct queue **q, const struct queue_properties *properties); |
ed6e6a34 | 820 | void uninit_queue(struct queue *q); |
45102048 | 821 | void print_queue_properties(struct queue_properties *q); |
ed6e6a34 BG |
822 | void print_queue(struct queue *q); |
823 | ||
64c7f8cf BG |
824 | struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type, |
825 | struct kfd_dev *dev); | |
4b8f589b BG |
826 | struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, |
827 | struct kfd_dev *dev); | |
ee04955a FK |
828 | struct mqd_manager *mqd_manager_init_cik_hawaii(enum KFD_MQD_TYPE type, |
829 | struct kfd_dev *dev); | |
4b8f589b BG |
830 | struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, |
831 | struct kfd_dev *dev); | |
ee04955a FK |
832 | struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, |
833 | struct kfd_dev *dev); | |
b91d43dd FK |
834 | struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, |
835 | struct kfd_dev *dev); | |
64c7f8cf BG |
836 | struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev); |
837 | void device_queue_manager_uninit(struct device_queue_manager *dqm); | |
241f24f8 BG |
838 | struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, |
839 | enum kfd_queue_type type); | |
840 | void kernel_queue_uninit(struct kernel_queue *kq); | |
2640c3fa | 841 | int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid); |
241f24f8 | 842 | |
45102048 BG |
843 | /* Process Queue Manager */ |
844 | struct process_queue_node { | |
845 | struct queue *q; | |
846 | struct kernel_queue *kq; | |
847 | struct list_head process_queue_list; | |
848 | }; | |
849 | ||
9fd3f1bf FK |
850 | void kfd_process_dequeue_from_device(struct kfd_process_device *pdd); |
851 | void kfd_process_dequeue_from_all_devices(struct kfd_process *p); | |
45102048 BG |
852 | int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p); |
853 | void pqm_uninit(struct process_queue_manager *pqm); | |
854 | int pqm_create_queue(struct process_queue_manager *pqm, | |
855 | struct kfd_dev *dev, | |
856 | struct file *f, | |
857 | struct queue_properties *properties, | |
45102048 BG |
858 | unsigned int *qid); |
859 | int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid); | |
860 | int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid, | |
861 | struct queue_properties *p); | |
fbeb661b YS |
862 | struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm, |
863 | unsigned int qid); | |
45102048 | 864 | |
788bf83d YS |
865 | int amdkfd_fence_wait_timeout(unsigned int *fence_addr, |
866 | unsigned int fence_value, | |
8c72c3d7 | 867 | unsigned int timeout_ms); |
788bf83d | 868 | |
ed6e6a34 BG |
869 | /* Packet Manager */ |
870 | ||
64c7f8cf BG |
871 | #define KFD_FENCE_COMPLETED (100) |
872 | #define KFD_FENCE_INIT (10) | |
241f24f8 | 873 | |
ed6e6a34 BG |
874 | struct packet_manager { |
875 | struct device_queue_manager *dqm; | |
876 | struct kernel_queue *priv_queue; | |
877 | struct mutex lock; | |
878 | bool allocated; | |
879 | struct kfd_mem_obj *ib_buffer_obj; | |
851a645e | 880 | unsigned int ib_size_bytes; |
f6e27ff1 FK |
881 | |
882 | const struct packet_manager_funcs *pmf; | |
883 | }; | |
884 | ||
885 | struct packet_manager_funcs { | |
886 | /* Support ASIC-specific packet formats for PM4 packets */ | |
887 | int (*map_process)(struct packet_manager *pm, uint32_t *buffer, | |
888 | struct qcm_process_device *qpd); | |
889 | int (*runlist)(struct packet_manager *pm, uint32_t *buffer, | |
890 | uint64_t ib, size_t ib_size_in_dwords, bool chain); | |
891 | int (*set_resources)(struct packet_manager *pm, uint32_t *buffer, | |
892 | struct scheduling_resources *res); | |
893 | int (*map_queues)(struct packet_manager *pm, uint32_t *buffer, | |
894 | struct queue *q, bool is_static); | |
895 | int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, | |
896 | enum kfd_queue_type type, | |
897 | enum kfd_unmap_queues_filter mode, | |
898 | uint32_t filter_param, bool reset, | |
899 | unsigned int sdma_engine); | |
900 | int (*query_status)(struct packet_manager *pm, uint32_t *buffer, | |
901 | uint64_t fence_address, uint32_t fence_value); | |
902 | int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); | |
903 | ||
904 | /* Packet sizes */ | |
905 | int map_process_size; | |
906 | int runlist_size; | |
907 | int set_resources_size; | |
908 | int map_queues_size; | |
909 | int unmap_queues_size; | |
910 | int query_status_size; | |
911 | int release_mem_size; | |
ed6e6a34 BG |
912 | }; |
913 | ||
f6e27ff1 | 914 | extern const struct packet_manager_funcs kfd_vi_pm_funcs; |
454150b1 | 915 | extern const struct packet_manager_funcs kfd_v9_pm_funcs; |
f6e27ff1 | 916 | |
64c7f8cf BG |
917 | int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); |
918 | void pm_uninit(struct packet_manager *pm); | |
919 | int pm_send_set_resources(struct packet_manager *pm, | |
920 | struct scheduling_resources *res); | |
921 | int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues); | |
922 | int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address, | |
923 | uint32_t fence_value); | |
924 | ||
925 | int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type, | |
7da2bcf8 | 926 | enum kfd_unmap_queues_filter mode, |
64c7f8cf BG |
927 | uint32_t filter_param, bool reset, |
928 | unsigned int sdma_engine); | |
929 | ||
241f24f8 BG |
930 | void pm_release_ib(struct packet_manager *pm); |
931 | ||
454150b1 FK |
932 | /* Following PM funcs can be shared among VI and AI */ |
933 | unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); | |
934 | int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer, | |
935 | struct scheduling_resources *res); | |
936 | ||
19f6d2a6 | 937 | uint64_t kfd_get_number_elems(struct kfd_dev *kfd); |
19f6d2a6 | 938 | |
f3a39818 AL |
939 | /* Events */ |
940 | extern const struct kfd_event_interrupt_class event_interrupt_class_cik; | |
ca750681 FK |
941 | extern const struct kfd_event_interrupt_class event_interrupt_class_v9; |
942 | ||
930c5ff4 | 943 | extern const struct kfd_device_global_init_class device_global_init_class_cik; |
f3a39818 | 944 | |
f3a39818 AL |
945 | void kfd_event_init_process(struct kfd_process *p); |
946 | void kfd_event_free_process(struct kfd_process *p); | |
947 | int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma); | |
948 | int kfd_wait_on_events(struct kfd_process *p, | |
59d3e8be | 949 | uint32_t num_events, void __user *data, |
f3a39818 | 950 | bool all, uint32_t user_timeout_ms, |
fdf0c833 | 951 | uint32_t *wait_result); |
f3a39818 AL |
952 | void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id, |
953 | uint32_t valid_id_bits); | |
59d3e8be AS |
954 | void kfd_signal_iommu_event(struct kfd_dev *dev, |
955 | unsigned int pasid, unsigned long address, | |
956 | bool is_write_requested, bool is_execute_requested); | |
930c5ff4 | 957 | void kfd_signal_hw_exception_event(unsigned int pasid); |
f3a39818 AL |
958 | int kfd_set_event(struct kfd_process *p, uint32_t event_id); |
959 | int kfd_reset_event(struct kfd_process *p, uint32_t event_id); | |
0fc8011f FK |
960 | int kfd_event_page_set(struct kfd_process *p, void *kernel_address, |
961 | uint64_t size); | |
f3a39818 AL |
962 | int kfd_event_create(struct file *devkfd, struct kfd_process *p, |
963 | uint32_t event_type, bool auto_reset, uint32_t node_id, | |
964 | uint32_t *event_id, uint32_t *event_trigger_data, | |
965 | uint64_t *event_page_offset, uint32_t *event_slot_index); | |
966 | int kfd_event_destroy(struct kfd_process *p, uint32_t event_id); | |
967 | ||
2640c3fa | 968 | void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid, |
969 | struct kfd_vm_fault_info *info); | |
970 | ||
403575c4 FK |
971 | void kfd_flush_tlb(struct kfd_process_device *pdd); |
972 | ||
c3447e81 BG |
973 | int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p); |
974 | ||
851a645e FK |
975 | /* Debugfs */ |
976 | #if defined(CONFIG_DEBUG_FS) | |
977 | ||
978 | void kfd_debugfs_init(void); | |
979 | void kfd_debugfs_fini(void); | |
980 | int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data); | |
981 | int pqm_debugfs_mqds(struct seq_file *m, void *data); | |
982 | int kfd_debugfs_hqds_by_device(struct seq_file *m, void *data); | |
983 | int dqm_debugfs_hqds(struct seq_file *m, void *data); | |
984 | int kfd_debugfs_rls_by_device(struct seq_file *m, void *data); | |
985 | int pm_debugfs_runlist(struct seq_file *m, void *data); | |
986 | ||
987 | #else | |
988 | ||
989 | static inline void kfd_debugfs_init(void) {} | |
990 | static inline void kfd_debugfs_fini(void) {} | |
991 | ||
992 | #endif | |
993 | ||
4a488a7a | 994 | #endif |