Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #ifndef __AMDGPU_OBJECT_H__ | |
29 | #define __AMDGPU_OBJECT_H__ | |
30 | ||
31 | #include <drm/amdgpu_drm.h> | |
32 | #include "amdgpu.h" | |
2b77ade8 CK |
33 | #include "amdgpu_res_cursor.h" |
34 | ||
62914a99 JG |
35 | #ifdef CONFIG_MMU_NOTIFIER |
36 | #include <linux/mmu_notifier.h> | |
37 | #endif | |
d38ceaf9 | 38 | |
9702d40d | 39 | #define AMDGPU_BO_INVALID_OFFSET LONG_MAX |
bf314ca3 | 40 | #define AMDGPU_BO_MAX_PLACEMENTS 3 |
9702d40d | 41 | |
f04c79cf AS |
42 | /* BO flag to indicate a KFD userptr BO */ |
43 | #define AMDGPU_AMDKFD_CREATE_USERPTR_BO (1ULL << 63) | |
f04c79cf | 44 | |
9ad0d033 | 45 | #define to_amdgpu_bo_user(abo) container_of((abo), struct amdgpu_bo_user, bo) |
6fdd6f4a | 46 | #define to_amdgpu_bo_vm(abo) container_of((abo), struct amdgpu_bo_vm, bo) |
9ad0d033 | 47 | |
a906dbb1 CZ |
48 | struct amdgpu_bo_param { |
49 | unsigned long size; | |
50 | int byte_align; | |
9fd5543e | 51 | u32 bo_ptr_size; |
a906dbb1 | 52 | u32 domain; |
aa2b2e28 | 53 | u32 preferred_domain; |
a906dbb1 CZ |
54 | u64 flags; |
55 | enum ttm_bo_type type; | |
061468c4 | 56 | bool no_wait_gpu; |
23e24fbb ND |
57 | struct dma_resv *resv; |
58 | void (*destroy)(struct ttm_buffer_object *bo); | |
3ebfd221 PY |
59 | /* xcp partition number plus 1, 0 means any partition */ |
60 | int8_t xcp_id_plus1; | |
a906dbb1 CZ |
61 | }; |
62 | ||
ec681545 | 63 | /* bo virtual addresses in a vm */ |
9124a398 | 64 | struct amdgpu_bo_va_mapping { |
aebc5e6f | 65 | struct amdgpu_bo_va *bo_va; |
9124a398 CK |
66 | struct list_head list; |
67 | struct rb_node rb; | |
68 | uint64_t start; | |
69 | uint64_t last; | |
70 | uint64_t __subtree_last; | |
71 | uint64_t offset; | |
72 | uint64_t flags; | |
73 | }; | |
74 | ||
ec681545 | 75 | /* User space allocated BO in a VM */ |
9124a398 | 76 | struct amdgpu_bo_va { |
ec681545 CK |
77 | struct amdgpu_vm_bo_base base; |
78 | ||
9124a398 | 79 | /* protected by bo being reserved */ |
9124a398 CK |
80 | unsigned ref_count; |
81 | ||
00b5cc83 CK |
82 | /* all other members protected by the VM PD being reserved */ |
83 | struct dma_fence *last_pt_update; | |
84 | ||
9124a398 CK |
85 | /* mappings for this bo_va */ |
86 | struct list_head invalids; | |
87 | struct list_head valids; | |
cb7b6ec2 CK |
88 | |
89 | /* If the mappings are cleared or filled */ | |
90 | bool cleared; | |
df399b06 | 91 | |
92 | bool is_xgmi; | |
834368ea PY |
93 | |
94 | /* | |
95 | * protected by vm reservation lock | |
96 | * if non-zero, cannot unmap from GPU because user queues may still access it | |
97 | */ | |
98 | unsigned int queue_refcount; | |
9124a398 CK |
99 | }; |
100 | ||
9124a398 CK |
101 | struct amdgpu_bo { |
102 | /* Protected by tbo.reserved */ | |
6d7d9c5a | 103 | u32 preferred_domains; |
9124a398 | 104 | u32 allowed_domains; |
bf314ca3 | 105 | struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS]; |
9124a398 CK |
106 | struct ttm_placement placement; |
107 | struct ttm_buffer_object tbo; | |
108 | struct ttm_bo_kmap_obj kmap; | |
109 | u64 flags; | |
646b9025 CK |
110 | /* per VM structure for page tables and with virtual addresses */ |
111 | struct amdgpu_vm_bo_base *vm_bo; | |
9124a398 | 112 | /* Constant after initialization */ |
9124a398 | 113 | struct amdgpu_bo *parent; |
62914a99 JG |
114 | |
115 | #ifdef CONFIG_MMU_NOTIFIER | |
116 | struct mmu_interval_notifier notifier; | |
117 | #endif | |
a46a2cd1 | 118 | struct kgd_mem *kfd_bo; |
f24e924b | 119 | |
3ebfd221 PY |
120 | /* |
121 | * For GPUs with spatial partitioning, xcp partition number, -1 means | |
122 | * any partition. For other ASICs without spatial partition, always 0 | |
123 | * for memory accounting. | |
124 | */ | |
125 | int8_t xcp_id; | |
9124a398 CK |
126 | }; |
127 | ||
9ad0d033 ND |
128 | struct amdgpu_bo_user { |
129 | struct amdgpu_bo bo; | |
130 | u64 tiling_flags; | |
131 | u64 metadata_flags; | |
132 | void *metadata; | |
133 | u32 metadata_size; | |
134 | ||
135 | }; | |
136 | ||
6fdd6f4a ND |
137 | struct amdgpu_bo_vm { |
138 | struct amdgpu_bo bo; | |
391629bd | 139 | struct amdgpu_vm_bo_base entries[]; |
6fdd6f4a ND |
140 | }; |
141 | ||
b82485fd AR |
142 | static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) |
143 | { | |
144 | return container_of(tbo, struct amdgpu_bo, tbo); | |
145 | } | |
146 | ||
d38ceaf9 AD |
147 | /** |
148 | * amdgpu_mem_type_to_domain - return domain corresponding to mem_type | |
149 | * @mem_type: ttm memory type | |
150 | * | |
151 | * Returns corresponding domain of the ttm mem_type | |
152 | */ | |
153 | static inline unsigned amdgpu_mem_type_to_domain(u32 mem_type) | |
154 | { | |
155 | switch (mem_type) { | |
156 | case TTM_PL_VRAM: | |
157 | return AMDGPU_GEM_DOMAIN_VRAM; | |
158 | case TTM_PL_TT: | |
159 | return AMDGPU_GEM_DOMAIN_GTT; | |
160 | case TTM_PL_SYSTEM: | |
161 | return AMDGPU_GEM_DOMAIN_CPU; | |
162 | case AMDGPU_PL_GDS: | |
163 | return AMDGPU_GEM_DOMAIN_GDS; | |
164 | case AMDGPU_PL_GWS: | |
165 | return AMDGPU_GEM_DOMAIN_GWS; | |
166 | case AMDGPU_PL_OA: | |
167 | return AMDGPU_GEM_DOMAIN_OA; | |
dc3499c7 AD |
168 | case AMDGPU_PL_DOORBELL: |
169 | return AMDGPU_GEM_DOMAIN_DOORBELL; | |
d38ceaf9 AD |
170 | default: |
171 | break; | |
172 | } | |
173 | return 0; | |
174 | } | |
175 | ||
176 | /** | |
177 | * amdgpu_bo_reserve - reserve bo | |
178 | * @bo: bo structure | |
179 | * @no_intr: don't return -ERESTARTSYS on pending signal | |
180 | * | |
181 | * Returns: | |
182 | * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by | |
183 | * a signal. Release all buffer reservations and return to user-space. | |
184 | */ | |
185 | static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) | |
186 | { | |
a7d64de6 | 187 | struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); |
d38ceaf9 AD |
188 | int r; |
189 | ||
46bca88b | 190 | r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); |
d38ceaf9 AD |
191 | if (unlikely(r != 0)) { |
192 | if (r != -ERESTARTSYS) | |
a7d64de6 | 193 | dev_err(adev->dev, "%p reserve failed\n", bo); |
d38ceaf9 AD |
194 | return r; |
195 | } | |
196 | return 0; | |
197 | } | |
198 | ||
199 | static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) | |
200 | { | |
201 | ttm_bo_unreserve(&bo->tbo); | |
202 | } | |
203 | ||
d38ceaf9 AD |
204 | static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) |
205 | { | |
e11bfb99 | 206 | return bo->tbo.base.size; |
d38ceaf9 AD |
207 | } |
208 | ||
209 | static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) | |
210 | { | |
e11bfb99 | 211 | return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE; |
d38ceaf9 AD |
212 | } |
213 | ||
214 | static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) | |
215 | { | |
c777dc9e | 216 | return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; |
d38ceaf9 AD |
217 | } |
218 | ||
219 | /** | |
220 | * amdgpu_bo_mmap_offset - return mmap offset of bo | |
221 | * @bo: amdgpu object for which we query the offset | |
222 | * | |
223 | * Returns mmap offset of the object. | |
224 | */ | |
225 | static inline u64 amdgpu_bo_mmap_offset(struct amdgpu_bo *bo) | |
226 | { | |
b96f3e7c | 227 | return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); |
d38ceaf9 AD |
228 | } |
229 | ||
177ae09b AR |
230 | /** |
231 | * amdgpu_bo_explicit_sync - return whether the bo is explicitly synced | |
232 | */ | |
233 | static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo) | |
234 | { | |
235 | return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC; | |
236 | } | |
237 | ||
4cd24494 AD |
238 | /** |
239 | * amdgpu_bo_encrypted - test if the BO is encrypted | |
240 | * @bo: pointer to a buffer object | |
241 | * | |
242 | * Return true if the buffer object is encrypted, false otherwise. | |
243 | */ | |
244 | static inline bool amdgpu_bo_encrypted(struct amdgpu_bo *bo) | |
245 | { | |
246 | return bo->flags & AMDGPU_GEM_CREATE_ENCRYPTED; | |
247 | } | |
248 | ||
c704ab18 CK |
249 | bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); |
250 | void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain); | |
251 | ||
3216c6b7 CZ |
252 | int amdgpu_bo_create(struct amdgpu_device *adev, |
253 | struct amdgpu_bo_param *bp, | |
eab3de23 | 254 | struct amdgpu_bo **bo_ptr); |
9d903cbd CK |
255 | int amdgpu_bo_create_reserved(struct amdgpu_device *adev, |
256 | unsigned long size, int align, | |
257 | u32 domain, struct amdgpu_bo **bo_ptr, | |
258 | u64 *gpu_addr, void **cpu_addr); | |
7c204889 CK |
259 | int amdgpu_bo_create_kernel(struct amdgpu_device *adev, |
260 | unsigned long size, int align, | |
261 | u32 domain, struct amdgpu_bo **bo_ptr, | |
262 | u64 *gpu_addr, void **cpu_addr); | |
ebbe34ed PN |
263 | int amdgpu_bo_create_isp_user(struct amdgpu_device *adev, |
264 | struct dma_buf *dbuf, u32 domain, | |
265 | struct amdgpu_bo **bo, | |
266 | u64 *gpu_addr); | |
de7b45ba | 267 | int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, |
3273f116 | 268 | uint64_t offset, uint64_t size, |
de7b45ba | 269 | struct amdgpu_bo **bo_ptr, void **cpu_addr); |
9ad0d033 ND |
270 | int amdgpu_bo_create_user(struct amdgpu_device *adev, |
271 | struct amdgpu_bo_param *bp, | |
272 | struct amdgpu_bo_user **ubo_ptr); | |
6fdd6f4a ND |
273 | int amdgpu_bo_create_vm(struct amdgpu_device *adev, |
274 | struct amdgpu_bo_param *bp, | |
275 | struct amdgpu_bo_vm **ubo_ptr); | |
aa1d562e JZ |
276 | void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, |
277 | void **cpu_addr); | |
ebbe34ed | 278 | void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo); |
d38ceaf9 | 279 | int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); |
f5e1c740 | 280 | void *amdgpu_bo_kptr(struct amdgpu_bo *bo); |
d38ceaf9 AD |
281 | void amdgpu_bo_kunmap(struct amdgpu_bo *bo); |
282 | struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo); | |
283 | void amdgpu_bo_unref(struct amdgpu_bo **bo); | |
7b7c6c81 | 284 | int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain); |
4671078e | 285 | void amdgpu_bo_unpin(struct amdgpu_bo *bo); |
d38ceaf9 AD |
286 | int amdgpu_bo_init(struct amdgpu_device *adev); |
287 | void amdgpu_bo_fini(struct amdgpu_device *adev); | |
d38ceaf9 AD |
288 | int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags); |
289 | void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags); | |
290 | int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata, | |
291 | uint32_t metadata_size, uint64_t flags); | |
292 | int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer, | |
293 | size_t buffer_size, uint32_t *metadata_size, | |
294 | uint64_t *flags); | |
d3a9331a CK |
295 | void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, |
296 | bool evict, | |
297 | struct ttm_resource *new_mem); | |
ab2f7a5c | 298 | void amdgpu_bo_release_notify(struct ttm_buffer_object *bo); |
d3ef581a | 299 | vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo); |
f54d1867 | 300 | void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence, |
d38ceaf9 | 301 | bool shared); |
9f3cc18d CK |
302 | int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, |
303 | enum amdgpu_sync_mode sync_mode, void *owner, | |
304 | bool intr); | |
e8e32426 | 305 | int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); |
cdb7e8f2 | 306 | u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); |
b1a8ef95 | 307 | u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); |
74ef9527 | 308 | uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo); |
d035f84d | 309 | uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, |
84b74608 | 310 | uint32_t domain); |
d38ceaf9 AD |
311 | |
312 | /* | |
313 | * sub allocation | |
314 | */ | |
c103a23f ML |
315 | static inline struct amdgpu_sa_manager * |
316 | to_amdgpu_sa_manager(struct drm_suballoc_manager *manager) | |
317 | { | |
318 | return container_of(manager, struct amdgpu_sa_manager, base); | |
319 | } | |
d38ceaf9 | 320 | |
c103a23f | 321 | static inline uint64_t amdgpu_sa_bo_gpu_addr(struct drm_suballoc *sa_bo) |
d38ceaf9 | 322 | { |
c103a23f ML |
323 | return to_amdgpu_sa_manager(sa_bo->manager)->gpu_addr + |
324 | drm_suballoc_soffset(sa_bo); | |
d38ceaf9 AD |
325 | } |
326 | ||
c103a23f | 327 | static inline void *amdgpu_sa_bo_cpu_addr(struct drm_suballoc *sa_bo) |
d38ceaf9 | 328 | { |
c103a23f ML |
329 | return to_amdgpu_sa_manager(sa_bo->manager)->cpu_ptr + |
330 | drm_suballoc_soffset(sa_bo); | |
d38ceaf9 AD |
331 | } |
332 | ||
333 | int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev, | |
334 | struct amdgpu_sa_manager *sa_manager, | |
335 | unsigned size, u32 align, u32 domain); | |
336 | void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev, | |
337 | struct amdgpu_sa_manager *sa_manager); | |
338 | int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, | |
339 | struct amdgpu_sa_manager *sa_manager); | |
bbf0b345 | 340 | int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager, |
c103a23f ML |
341 | struct drm_suballoc **sa_bo, |
342 | unsigned int size); | |
0014952b | 343 | void amdgpu_sa_bo_free(struct drm_suballoc **sa_bo, |
c103a23f | 344 | struct dma_fence *fence); |
d38ceaf9 AD |
345 | #if defined(CONFIG_DEBUG_FS) |
346 | void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager, | |
347 | struct seq_file *m); | |
ff72bc40 | 348 | u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m); |
d38ceaf9 | 349 | #endif |
98d28ac2 | 350 | void amdgpu_debugfs_sa_init(struct amdgpu_device *adev); |
d38ceaf9 | 351 | |
3d1b8ec7 AG |
352 | bool amdgpu_bo_support_uswc(u64 bo_flags); |
353 | ||
d38ceaf9 AD |
354 | |
355 | #endif |