Commit | Line | Data |
---|---|---|
130e0371 OG |
1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | */ | |
22 | ||
23 | /* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */ | |
24 | ||
25 | #ifndef AMDGPU_AMDKFD_H_INCLUDED | |
26 | #define AMDGPU_AMDKFD_H_INCLUDED | |
27 | ||
8abc1eb2 | 28 | #include <linux/list.h> |
130e0371 | 29 | #include <linux/types.h> |
7420f482 | 30 | #include <linux/mm.h> |
9bf5b9eb | 31 | #include <linux/kthread.h> |
5ae0283e | 32 | #include <linux/workqueue.h> |
f95f51a4 | 33 | #include <linux/mmu_notifier.h> |
610dab11 | 34 | #include <linux/memremap.h> |
130e0371 | 35 | #include <kgd_kfd_interface.h> |
18192001 | 36 | #include <drm/drm_client.h> |
a46a2cd1 FK |
37 | #include "amdgpu_sync.h" |
38 | #include "amdgpu_vm.h" | |
1c77527a | 39 | #include "amdgpu_xcp.h" |
130e0371 | 40 | |
611736d8 | 41 | extern uint64_t amdgpu_amdkfd_total_mem_size; |
d8d019cc | 42 | |
765385ec PY |
43 | enum TLB_FLUSH_TYPE { |
44 | TLB_FLUSH_LEGACY = 0, | |
45 | TLB_FLUSH_LIGHTWEIGHT, | |
46 | TLB_FLUSH_HEAVYWEIGHT | |
47 | }; | |
48 | ||
130e0371 OG |
49 | struct amdgpu_device; |
50 | ||
264fb4d3 FK |
51 | enum kfd_mem_attachment_type { |
52 | KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */ | |
53 | KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */ | |
5ac3c3e4 | 54 | KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */ |
08a2fd23 | 55 | KFD_MEM_ATT_SG /* Tag to DMA map SG BOs */ |
264fb4d3 FK |
56 | }; |
57 | ||
c780b2ee FK |
58 | struct kfd_mem_attachment { |
59 | struct list_head list; | |
264fb4d3 | 60 | enum kfd_mem_attachment_type type; |
a46a2cd1 | 61 | bool is_mapped; |
a46a2cd1 | 62 | struct amdgpu_bo_va *bo_va; |
c780b2ee | 63 | struct amdgpu_device *adev; |
a46a2cd1 FK |
64 | uint64_t va; |
65 | uint64_t pte_flags; | |
66 | }; | |
67 | ||
130e0371 | 68 | struct kgd_mem { |
a46a2cd1 | 69 | struct mutex lock; |
130e0371 | 70 | struct amdgpu_bo *bo; |
5ac3c3e4 | 71 | struct dma_buf *dmabuf; |
f95f51a4 | 72 | struct hmm_range *range; |
c780b2ee | 73 | struct list_head attachments; |
a46a2cd1 | 74 | /* protected by amdkfd_process_info.lock */ |
8abc1eb2 | 75 | struct list_head validate_list; |
a46a2cd1 FK |
76 | uint32_t domain; |
77 | unsigned int mapped_to_gpu_memory; | |
78 | uint64_t va; | |
79 | ||
d0ba51b1 | 80 | uint32_t alloc_flags; |
a46a2cd1 | 81 | |
f95f51a4 | 82 | uint32_t invalid; |
a46a2cd1 FK |
83 | struct amdkfd_process_info *process_info; |
84 | ||
85 | struct amdgpu_sync sync; | |
86 | ||
18192001 | 87 | uint32_t gem_handle; |
a46a2cd1 | 88 | bool aql_queue; |
d4566dee | 89 | bool is_imported; |
130e0371 OG |
90 | }; |
91 | ||
d8d019cc FK |
92 | /* KFD Memory Eviction */ |
93 | struct amdgpu_amdkfd_fence { | |
94 | struct dma_fence base; | |
95 | struct mm_struct *mm; | |
96 | spinlock_t lock; | |
97 | char timeline_name[TASK_COMM_LEN]; | |
eb2cec55 | 98 | struct svm_range_bo *svm_bo; |
d8d019cc FK |
99 | }; |
100 | ||
611736d8 FK |
101 | struct amdgpu_kfd_dev { |
102 | struct kfd_dev *dev; | |
1c77527a MJ |
103 | int64_t vram_used[MAX_XCP]; |
104 | uint64_t vram_used_aligned[MAX_XCP]; | |
8e2712e7 | 105 | bool init_complete; |
b5fd0cf3 | 106 | struct work_struct reset_work; |
610dab11 PY |
107 | |
108 | /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ | |
109 | struct dev_pagemap pgmap; | |
18192001 FK |
110 | |
111 | /* Client for KFD BO GEM handle allocations */ | |
112 | struct drm_client_dev client; | |
611736d8 FK |
113 | }; |
114 | ||
0da8b10e AL |
115 | enum kgd_engine_type { |
116 | KGD_ENGINE_PFP = 1, | |
117 | KGD_ENGINE_ME, | |
118 | KGD_ENGINE_CE, | |
119 | KGD_ENGINE_MEC1, | |
120 | KGD_ENGINE_MEC2, | |
121 | KGD_ENGINE_RLC, | |
122 | KGD_ENGINE_SDMA1, | |
123 | KGD_ENGINE_SDMA2, | |
124 | KGD_ENGINE_MAX | |
125 | }; | |
126 | ||
d8d019cc | 127 | |
a46a2cd1 FK |
128 | struct amdkfd_process_info { |
129 | /* List head of all VMs that belong to a KFD process */ | |
130 | struct list_head vm_list_head; | |
131 | /* List head for all KFD BOs that belong to a KFD process. */ | |
132 | struct list_head kfd_bo_list; | |
5ae0283e FK |
133 | /* List of userptr BOs that are valid or invalid */ |
134 | struct list_head userptr_valid_list; | |
135 | struct list_head userptr_inval_list; | |
a46a2cd1 FK |
136 | /* Lock to protect kfd_bo_list */ |
137 | struct mutex lock; | |
138 | ||
139 | /* Number of VMs */ | |
140 | unsigned int n_vms; | |
141 | /* Eviction Fence */ | |
142 | struct amdgpu_amdkfd_fence *eviction_fence; | |
5ae0283e FK |
143 | |
144 | /* MMU-notifier related fields */ | |
f95f51a4 FK |
145 | struct mutex notifier_lock; |
146 | uint32_t evicted_bos; | |
5ae0283e FK |
147 | struct delayed_work restore_userptr_work; |
148 | struct pid *pid; | |
011bbb03 | 149 | bool block_mmu_notifications; |
a46a2cd1 FK |
150 | }; |
151 | ||
efb1c658 | 152 | int amdgpu_amdkfd_init(void); |
130e0371 OG |
153 | void amdgpu_amdkfd_fini(void); |
154 | ||
9593f4d6 RB |
155 | void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); |
156 | int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); | |
dc102c43 | 157 | void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, |
130e0371 | 158 | const void *ih_ring_entry); |
dc102c43 AR |
159 | void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); |
160 | void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); | |
e9669fb7 | 161 | void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); |
0c7315e7 MJ |
162 | int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev); |
163 | void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev); | |
6bfc7c7e GS |
164 | int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, |
165 | enum kgd_engine_type engine, | |
4c660c8f FK |
166 | uint32_t vmid, uint64_t gpu_addr, |
167 | uint32_t *ib_cmd, uint32_t ib_len); | |
6bfc7c7e GS |
168 | void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle); |
169 | bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev); | |
4c660c8f | 170 | |
155494db FK |
171 | bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); |
172 | ||
5c6dd71e SL |
173 | int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); |
174 | ||
175 | int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); | |
176 | ||
6bfc7c7e | 177 | void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev); |
24da5a9c | 178 | |
d09f85d5 YZ |
179 | int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, |
180 | int queue_bit); | |
181 | ||
cd63989e | 182 | struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, |
eb2cec55 AS |
183 | struct mm_struct *mm, |
184 | struct svm_range_bo *svm_bo); | |
db2aad03 LM |
185 | |
186 | int amdgpu_amdkfd_drm_client_create(struct amdgpu_device *adev); | |
3d2af401 AS |
187 | #if defined(CONFIG_DEBUG_FS) |
188 | int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data); | |
189 | #endif | |
cd63989e LY |
190 | #if IS_ENABLED(CONFIG_HSA_AMD) |
191 | bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); | |
192 | struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); | |
193 | int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); | |
f95f51a4 FK |
194 | int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, |
195 | unsigned long cur_seq, struct kgd_mem *mem); | |
cd63989e LY |
196 | #else |
197 | static inline | |
198 | bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) | |
199 | { | |
200 | return false; | |
201 | } | |
202 | ||
203 | static inline | |
204 | struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) | |
205 | { | |
206 | return NULL; | |
207 | } | |
208 | ||
209 | static inline | |
210 | int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) | |
211 | { | |
212 | return 0; | |
213 | } | |
214 | ||
215 | static inline | |
f95f51a4 FK |
216 | int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, |
217 | unsigned long cur_seq, struct kgd_mem *mem) | |
cd63989e LY |
218 | { |
219 | return 0; | |
220 | } | |
221 | #endif | |
130e0371 | 222 | /* Shared API */ |
6bfc7c7e | 223 | int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, |
7cd52c91 AL |
224 | void **mem_obj, uint64_t *gpu_addr, |
225 | void **cpu_ptr, bool mqd_gfx9); | |
6bfc7c7e GS |
226 | void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj); |
227 | int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, | |
228 | void **mem_obj); | |
229 | void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj); | |
71efab6a OZ |
230 | int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem); |
231 | int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem); | |
574c4183 | 232 | uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, |
0da8b10e | 233 | enum kgd_engine_type type); |
574c4183 | 234 | void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, |
315e29ec | 235 | struct kfd_local_mem_info *mem_info, |
9a3ce1a7 | 236 | struct amdgpu_xcp *xcp); |
574c4183 | 237 | uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); |
7cd52c91 | 238 | |
574c4183 | 239 | uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); |
574c4183 GS |
240 | int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, |
241 | struct amdgpu_device **dmabuf_adev, | |
1dde0ea9 FK |
242 | uint64_t *bo_size, void *metadata_buffer, |
243 | size_t buffer_size, uint32_t *metadata_size, | |
2fa9ff25 | 244 | uint32_t *flags, int8_t *xcp_id); |
574c4183 GS |
245 | uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, |
246 | struct amdgpu_device *src); | |
247 | int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, | |
248 | struct amdgpu_device *src, | |
249 | bool is_min); | |
250 | int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min); | |
12fb1ad7 JK |
251 | int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, |
252 | uint32_t *payload); | |
9041b53a MJ |
253 | int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, |
254 | u32 inst); | |
130e0371 | 255 | |
cd05c865 FK |
256 | /* Read user wptr from a specified user address space with page fault |
257 | * disabled. The memory must be pinned and mapped to the hardware when | |
258 | * this is called in hqd_load functions, so it should never fault in | |
259 | * the first place. This resolves a circular lock dependency involving | |
c1e8d7c6 | 260 | * four locks, including the DQM lock and mmap_lock. |
cd05c865 | 261 | */ |
70539bd7 FK |
262 | #define read_user_wptr(mmptr, wptr, dst) \ |
263 | ({ \ | |
264 | bool valid = false; \ | |
265 | if ((mmptr) && (wptr)) { \ | |
cd05c865 | 266 | pagefault_disable(); \ |
70539bd7 FK |
267 | if ((mmptr) == current->mm) { \ |
268 | valid = !get_user((dst), (wptr)); \ | |
8449d150 | 269 | } else if (current->flags & PF_KTHREAD) { \ |
f5678e7f | 270 | kthread_use_mm(mmptr); \ |
70539bd7 | 271 | valid = !get_user((dst), (wptr)); \ |
f5678e7f | 272 | kthread_unuse_mm(mmptr); \ |
70539bd7 | 273 | } \ |
cd05c865 | 274 | pagefault_enable(); \ |
70539bd7 FK |
275 | } \ |
276 | valid; \ | |
277 | }) | |
278 | ||
a46a2cd1 | 279 | /* GPUVM API */ |
f80fe9d3 FK |
280 | #define drm_priv_to_vm(drm_priv) \ |
281 | (&((struct amdgpu_fpriv *) \ | |
282 | ((struct drm_file *)(drm_priv))->driver_priv)->vm) | |
283 | ||
41d82649 | 284 | int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev, |
23b02b0e | 285 | struct amdgpu_vm *avm, u32 pasid); |
dff63da9 | 286 | int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, |
23b02b0e | 287 | struct amdgpu_vm *avm, |
b40a6ab2 | 288 | void **process_info, |
fcdfa432 | 289 | struct dma_fence **ef); |
dff63da9 GS |
290 | void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, |
291 | void *drm_priv); | |
b40a6ab2 | 292 | uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); |
1c77527a MJ |
293 | size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, |
294 | uint8_t xcp_id); | |
a46a2cd1 | 295 | int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( |
dff63da9 | 296 | struct amdgpu_device *adev, uint64_t va, uint64_t size, |
b40a6ab2 | 297 | void *drm_priv, struct kgd_mem **mem, |
011bbb03 | 298 | uint64_t *offset, uint32_t flags, bool criu_resume); |
a46a2cd1 | 299 | int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( |
dff63da9 | 300 | struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, |
d4ec4bdc | 301 | uint64_t *size); |
4d30a83c CK |
302 | int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev, |
303 | struct kgd_mem *mem, void *drm_priv); | |
a46a2cd1 | 304 | int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( |
dff63da9 | 305 | struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); |
9c29282e | 306 | int amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv); |
a46a2cd1 | 307 | int amdgpu_amdkfd_gpuvm_sync_memory( |
dff63da9 | 308 | struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); |
4e2d1044 FK |
309 | int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, |
310 | void **kptr, uint64_t *size); | |
311 | void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem); | |
68df0f19 | 312 | |
e77a541f GS |
313 | int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo); |
314 | ||
a46a2cd1 | 315 | int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, |
c147ddc6 | 316 | struct dma_fence __rcu **ef); |
dff63da9 | 317 | int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, |
b97dfa27 | 318 | struct kfd_vm_fault_info *info); |
0188006d FK |
319 | int amdgpu_amdkfd_gpuvm_import_dmabuf_fd(struct amdgpu_device *adev, int fd, |
320 | uint64_t va, void *drm_priv, | |
321 | struct kgd_mem **mem, uint64_t *size, | |
322 | uint64_t *mmap_offset); | |
fd234e75 FK |
323 | int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, |
324 | struct dma_buf **dmabuf); | |
a70a93fa | 325 | void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); |
dff63da9 | 326 | int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, |
fd7d08ba | 327 | struct tile_config *config); |
b6485bed TZ |
328 | void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, |
329 | bool reset); | |
5ccbb057 | 330 | bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem); |
011bbb03 RB |
331 | void amdgpu_amdkfd_block_mmu_notifications(void *p); |
332 | int amdgpu_amdkfd_criu_resume(void *p); | |
6475ae2b | 333 | bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev); |
f9af3c16 | 334 | int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, |
1c77527a | 335 | uint64_t size, u32 alloc_flag, int8_t xcp_id); |
f9af3c16 | 336 | void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, |
1c77527a | 337 | uint64_t size, u32 alloc_flag, int8_t xcp_id); |
011bbb03 | 338 | |
45b3a914 AD |
339 | u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id); |
340 | ||
3ebfd221 PY |
341 | #define KFD_XCP_MEM_ID(adev, xcp_id) \ |
342 | ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ | |
343 | (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) | |
344 | ||
45b3a914 AD |
345 | #define KFD_XCP_MEMORY_SIZE(adev, xcp_id) amdgpu_amdkfd_xcp_memory_size((adev), (xcp_id)) |
346 | ||
4c6ce75f | 347 | |
cd63989e LY |
348 | #if IS_ENABLED(CONFIG_HSA_AMD) |
349 | void amdgpu_amdkfd_gpuvm_init_mem_limits(void); | |
350 | void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, | |
351 | struct amdgpu_vm *vm); | |
f441dd33 RE |
352 | |
353 | /** | |
354 | * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released | |
355 | * | |
356 | * Allows KFD to release its resources associated with the GEM object. | |
357 | */ | |
5702d052 | 358 | void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo); |
c46ebb6a | 359 | void amdgpu_amdkfd_reserve_system_mem(uint64_t size); |
cd63989e LY |
360 | #else |
361 | static inline | |
362 | void amdgpu_amdkfd_gpuvm_init_mem_limits(void) | |
363 | { | |
364 | } | |
fd7d08ba | 365 | |
cd63989e LY |
366 | static inline |
367 | void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, | |
368 | struct amdgpu_vm *vm) | |
369 | { | |
370 | } | |
371 | ||
372 | static inline | |
5702d052 | 373 | void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) |
cd63989e LY |
374 | { |
375 | } | |
376 | #endif | |
84b4dd3f PY |
377 | |
378 | #if IS_ENABLED(CONFIG_HSA_AMD_SVM) | |
379 | int kgd2kfd_init_zone_device(struct amdgpu_device *adev); | |
380 | #else | |
381 | static inline | |
382 | int kgd2kfd_init_zone_device(struct amdgpu_device *adev) | |
383 | { | |
384 | return 0; | |
385 | } | |
386 | #endif | |
387 | ||
2d3d25b6 | 388 | /* KGD2KFD callbacks */ |
c7f21978 | 389 | int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger); |
cd63989e LY |
390 | int kgd2kfd_resume_mm(struct mm_struct *mm); |
391 | int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, | |
392 | struct dma_fence *fence); | |
393 | #if IS_ENABLED(CONFIG_HSA_AMD) | |
308176d6 | 394 | int kgd2kfd_init(void); |
2d3d25b6 | 395 | void kgd2kfd_exit(void); |
b5d1d755 | 396 | struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf); |
2d3d25b6 AL |
397 | bool kgd2kfd_device_init(struct kfd_dev *kfd, |
398 | const struct kgd2kfd_shared_resources *gpu_resources); | |
399 | void kgd2kfd_device_exit(struct kfd_dev *kfd); | |
9593f4d6 RB |
400 | void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); |
401 | int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); | |
2d3d25b6 AL |
402 | int kgd2kfd_pre_reset(struct kfd_dev *kfd); |
403 | int kgd2kfd_post_reset(struct kfd_dev *kfd); | |
404 | void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); | |
9b54d201 | 405 | void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); |
410e302e | 406 | void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); |
0c7315e7 MJ |
407 | int kgd2kfd_check_and_lock_kfd(void); |
408 | void kgd2kfd_unlock_kfd(void); | |
cd63989e LY |
409 | #else |
410 | static inline int kgd2kfd_init(void) | |
411 | { | |
412 | return -ENOENT; | |
413 | } | |
2d3d25b6 | 414 | |
cd63989e LY |
415 | static inline void kgd2kfd_exit(void) |
416 | { | |
417 | } | |
418 | ||
419 | static inline | |
b5d1d755 | 420 | struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) |
cd63989e LY |
421 | { |
422 | return NULL; | |
423 | } | |
424 | ||
425 | static inline | |
d69a3b76 | 426 | bool kgd2kfd_device_init(struct kfd_dev *kfd, |
cd63989e LY |
427 | const struct kgd2kfd_shared_resources *gpu_resources) |
428 | { | |
429 | return false; | |
430 | } | |
431 | ||
432 | static inline void kgd2kfd_device_exit(struct kfd_dev *kfd) | |
433 | { | |
434 | } | |
435 | ||
436 | static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) | |
437 | { | |
438 | } | |
439 | ||
440 | static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) | |
441 | { | |
442 | return 0; | |
443 | } | |
444 | ||
445 | static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd) | |
446 | { | |
447 | return 0; | |
448 | } | |
449 | ||
450 | static inline int kgd2kfd_post_reset(struct kfd_dev *kfd) | |
451 | { | |
452 | return 0; | |
453 | } | |
454 | ||
455 | static inline | |
456 | void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) | |
457 | { | |
458 | } | |
459 | ||
460 | static inline | |
461 | void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) | |
462 | { | |
463 | } | |
464 | ||
465 | static inline | |
410e302e | 466 | void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) |
cd63989e LY |
467 | { |
468 | } | |
0c7315e7 MJ |
469 | |
470 | static inline int kgd2kfd_check_and_lock_kfd(void) | |
471 | { | |
472 | return 0; | |
473 | } | |
474 | ||
475 | static inline void kgd2kfd_unlock_kfd(void) | |
476 | { | |
477 | } | |
cd63989e | 478 | #endif |
130e0371 | 479 | #endif /* AMDGPU_AMDKFD_H_INCLUDED */ |