Commit | Line | Data |
---|---|---|
81629cba AD |
1 | /* amdgpu_drm.h -- Public header for the amdgpu driver -*- linux-c -*- |
2 | * | |
3 | * Copyright 2000 Precision Insight, Inc., Cedar Park, Texas. | |
4 | * Copyright 2000 VA Linux Systems, Inc., Fremont, California. | |
5 | * Copyright 2002 Tungsten Graphics, Inc., Cedar Park, Texas. | |
6 | * Copyright 2014 Advanced Micro Devices, Inc. | |
7 | * | |
8 | * Permission is hereby granted, free of charge, to any person obtaining a | |
9 | * copy of this software and associated documentation files (the "Software"), | |
10 | * to deal in the Software without restriction, including without limitation | |
11 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
12 | * and/or sell copies of the Software, and to permit persons to whom the | |
13 | * Software is furnished to do so, subject to the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice shall be included in | |
16 | * all copies or substantial portions of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
22 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
23 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
24 | * OTHER DEALINGS IN THE SOFTWARE. | |
25 | * | |
26 | * Authors: | |
27 | * Kevin E. Martin <martin@valinux.com> | |
28 | * Gareth Hughes <gareth@valinux.com> | |
29 | * Keith Whitwell <keith@tungstengraphics.com> | |
30 | */ | |
31 | ||
32 | #ifndef __AMDGPU_DRM_H__ | |
33 | #define __AMDGPU_DRM_H__ | |
34 | ||
b3fcf36a | 35 | #include "drm.h" |
81629cba | 36 | |
cfa7152f EV |
37 | #if defined(__cplusplus) |
38 | extern "C" { | |
39 | #endif | |
40 | ||
81629cba AD |
41 | #define DRM_AMDGPU_GEM_CREATE 0x00 |
42 | #define DRM_AMDGPU_GEM_MMAP 0x01 | |
43 | #define DRM_AMDGPU_CTX 0x02 | |
44 | #define DRM_AMDGPU_BO_LIST 0x03 | |
45 | #define DRM_AMDGPU_CS 0x04 | |
46 | #define DRM_AMDGPU_INFO 0x05 | |
47 | #define DRM_AMDGPU_GEM_METADATA 0x06 | |
48 | #define DRM_AMDGPU_GEM_WAIT_IDLE 0x07 | |
49 | #define DRM_AMDGPU_GEM_VA 0x08 | |
50 | #define DRM_AMDGPU_WAIT_CS 0x09 | |
51 | #define DRM_AMDGPU_GEM_OP 0x10 | |
52 | #define DRM_AMDGPU_GEM_USERPTR 0x11 | |
eef18a82 | 53 | #define DRM_AMDGPU_WAIT_FENCES 0x12 |
cfbcacf4 | 54 | #define DRM_AMDGPU_VM 0x13 |
7ca24cf2 | 55 | #define DRM_AMDGPU_FENCE_TO_HANDLE 0x14 |
52c6a62c | 56 | #define DRM_AMDGPU_SCHED 0x15 |
81629cba AD |
57 | |
58 | #define DRM_IOCTL_AMDGPU_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_CREATE, union drm_amdgpu_gem_create) | |
59 | #define DRM_IOCTL_AMDGPU_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_MMAP, union drm_amdgpu_gem_mmap) | |
60 | #define DRM_IOCTL_AMDGPU_CTX DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CTX, union drm_amdgpu_ctx) | |
61 | #define DRM_IOCTL_AMDGPU_BO_LIST DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_BO_LIST, union drm_amdgpu_bo_list) | |
62 | #define DRM_IOCTL_AMDGPU_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_CS, union drm_amdgpu_cs) | |
63 | #define DRM_IOCTL_AMDGPU_INFO DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_INFO, struct drm_amdgpu_info) | |
64 | #define DRM_IOCTL_AMDGPU_GEM_METADATA DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_METADATA, struct drm_amdgpu_gem_metadata) | |
65 | #define DRM_IOCTL_AMDGPU_GEM_WAIT_IDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_WAIT_IDLE, union drm_amdgpu_gem_wait_idle) | |
34b5f6a6 | 66 | #define DRM_IOCTL_AMDGPU_GEM_VA DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_VA, struct drm_amdgpu_gem_va) |
81629cba AD |
67 | #define DRM_IOCTL_AMDGPU_WAIT_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_CS, union drm_amdgpu_wait_cs) |
68 | #define DRM_IOCTL_AMDGPU_GEM_OP DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_OP, struct drm_amdgpu_gem_op) | |
69 | #define DRM_IOCTL_AMDGPU_GEM_USERPTR DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_GEM_USERPTR, struct drm_amdgpu_gem_userptr) | |
eef18a82 | 70 | #define DRM_IOCTL_AMDGPU_WAIT_FENCES DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_WAIT_FENCES, union drm_amdgpu_wait_fences) |
cfbcacf4 | 71 | #define DRM_IOCTL_AMDGPU_VM DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_VM, union drm_amdgpu_vm) |
7ca24cf2 | 72 | #define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle) |
52c6a62c | 73 | #define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched) |
81629cba | 74 | |
b646c1dc SL |
75 | /** |
76 | * DOC: memory domains | |
77 | * | |
78 | * %AMDGPU_GEM_DOMAIN_CPU System memory that is not GPU accessible. | |
79 | * Memory in this pool could be swapped out to disk if there is pressure. | |
80 | * | |
81 | * %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the | |
82 | * GPU's virtual address space via gart. Gart memory linearizes non-contiguous | |
83 | * pages of system memory, allows GPU access system memory in a linezrized | |
84 | * fashion. | |
85 | * | |
86 | * %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory | |
87 | * carved out by the BIOS. | |
88 | * | |
89 | * %AMDGPU_GEM_DOMAIN_GDS Global on-chip data storage used to share data | |
90 | * across shader threads. | |
91 | * | |
92 | * %AMDGPU_GEM_DOMAIN_GWS Global wave sync, used to synchronize the | |
93 | * execution of all the waves on a device. | |
94 | * | |
95 | * %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines | |
96 | * for appending data. | |
97 | */ | |
81629cba AD |
98 | #define AMDGPU_GEM_DOMAIN_CPU 0x1 |
99 | #define AMDGPU_GEM_DOMAIN_GTT 0x2 | |
100 | #define AMDGPU_GEM_DOMAIN_VRAM 0x4 | |
101 | #define AMDGPU_GEM_DOMAIN_GDS 0x8 | |
102 | #define AMDGPU_GEM_DOMAIN_GWS 0x10 | |
103 | #define AMDGPU_GEM_DOMAIN_OA 0x20 | |
3f188453 CZ |
104 | #define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \ |
105 | AMDGPU_GEM_DOMAIN_GTT | \ | |
106 | AMDGPU_GEM_DOMAIN_VRAM | \ | |
107 | AMDGPU_GEM_DOMAIN_GDS | \ | |
108 | AMDGPU_GEM_DOMAIN_GWS | \ | |
109 | AMDGPU_GEM_DOMAIN_OA) | |
81629cba | 110 | |
81629cba AD |
111 | /* Flag that CPU access will be required for the case of VRAM domain */ |
112 | #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0) | |
113 | /* Flag that CPU access will not work, this VRAM domain is invisible */ | |
114 | #define AMDGPU_GEM_CREATE_NO_CPU_ACCESS (1 << 1) | |
81629cba | 115 | /* Flag that USWC attributes should be used for GTT */ |
88671288 | 116 | #define AMDGPU_GEM_CREATE_CPU_GTT_USWC (1 << 2) |
4fea83ff FC |
117 | /* Flag that the memory should be in VRAM and cleared */ |
118 | #define AMDGPU_GEM_CREATE_VRAM_CLEARED (1 << 3) | |
e7893c4b CZ |
119 | /* Flag that create shadow bo(GTT) while allocating vram bo */ |
120 | #define AMDGPU_GEM_CREATE_SHADOW (1 << 4) | |
03f48dd5 CK |
121 | /* Flag that allocating the BO should use linear VRAM */ |
122 | #define AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS (1 << 5) | |
e1eb899b CK |
123 | /* Flag that BO is always valid in this VM */ |
124 | #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID (1 << 6) | |
177ae09b AR |
125 | /* Flag that BO sharing will be explicitly synchronized */ |
126 | #define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7) | |
959a2091 YZ |
127 | /* Flag that indicates allocating MQD gart on GFX9, where the mtype |
128 | * for the second page onward should be set to NC. | |
129 | */ | |
130 | #define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8) | |
81629cba | 131 | |
81629cba AD |
132 | struct drm_amdgpu_gem_create_in { |
133 | /** the requested memory size */ | |
2ce9dde0 | 134 | __u64 bo_size; |
81629cba | 135 | /** physical start_addr alignment in bytes for some HW requirements */ |
2ce9dde0 | 136 | __u64 alignment; |
81629cba | 137 | /** the requested memory domains */ |
2ce9dde0 | 138 | __u64 domains; |
81629cba | 139 | /** allocation flags */ |
2ce9dde0 | 140 | __u64 domain_flags; |
81629cba AD |
141 | }; |
142 | ||
143 | struct drm_amdgpu_gem_create_out { | |
144 | /** returned GEM object handle */ | |
2ce9dde0 MR |
145 | __u32 handle; |
146 | __u32 _pad; | |
81629cba AD |
147 | }; |
148 | ||
149 | union drm_amdgpu_gem_create { | |
150 | struct drm_amdgpu_gem_create_in in; | |
151 | struct drm_amdgpu_gem_create_out out; | |
152 | }; | |
153 | ||
154 | /** Opcode to create new residency list. */ | |
155 | #define AMDGPU_BO_LIST_OP_CREATE 0 | |
156 | /** Opcode to destroy previously created residency list */ | |
157 | #define AMDGPU_BO_LIST_OP_DESTROY 1 | |
158 | /** Opcode to update resource information in the list */ | |
159 | #define AMDGPU_BO_LIST_OP_UPDATE 2 | |
160 | ||
161 | struct drm_amdgpu_bo_list_in { | |
162 | /** Type of operation */ | |
2ce9dde0 | 163 | __u32 operation; |
81629cba | 164 | /** Handle of list or 0 if we want to create one */ |
2ce9dde0 | 165 | __u32 list_handle; |
81629cba | 166 | /** Number of BOs in list */ |
2ce9dde0 | 167 | __u32 bo_number; |
81629cba | 168 | /** Size of each element describing BO */ |
2ce9dde0 | 169 | __u32 bo_info_size; |
81629cba | 170 | /** Pointer to array describing BOs */ |
2ce9dde0 | 171 | __u64 bo_info_ptr; |
81629cba AD |
172 | }; |
173 | ||
174 | struct drm_amdgpu_bo_list_entry { | |
175 | /** Handle of BO */ | |
2ce9dde0 | 176 | __u32 bo_handle; |
81629cba | 177 | /** New (if specified) BO priority to be used during migration */ |
2ce9dde0 | 178 | __u32 bo_priority; |
81629cba AD |
179 | }; |
180 | ||
181 | struct drm_amdgpu_bo_list_out { | |
182 | /** Handle of resource list */ | |
2ce9dde0 MR |
183 | __u32 list_handle; |
184 | __u32 _pad; | |
81629cba AD |
185 | }; |
186 | ||
187 | union drm_amdgpu_bo_list { | |
188 | struct drm_amdgpu_bo_list_in in; | |
189 | struct drm_amdgpu_bo_list_out out; | |
190 | }; | |
191 | ||
192 | /* context related */ | |
193 | #define AMDGPU_CTX_OP_ALLOC_CTX 1 | |
194 | #define AMDGPU_CTX_OP_FREE_CTX 2 | |
195 | #define AMDGPU_CTX_OP_QUERY_STATE 3 | |
bc1b1bf6 | 196 | #define AMDGPU_CTX_OP_QUERY_STATE2 4 |
81629cba | 197 | |
d94aed5a MO |
198 | /* GPU reset status */ |
199 | #define AMDGPU_CTX_NO_RESET 0 | |
675da0dd CK |
200 | /* this the context caused it */ |
201 | #define AMDGPU_CTX_GUILTY_RESET 1 | |
202 | /* some other context caused it */ | |
203 | #define AMDGPU_CTX_INNOCENT_RESET 2 | |
204 | /* unknown cause */ | |
205 | #define AMDGPU_CTX_UNKNOWN_RESET 3 | |
d94aed5a | 206 | |
bc1b1bf6 ML |
207 | /* indicate gpu reset occured after ctx created */ |
208 | #define AMDGPU_CTX_QUERY2_FLAGS_RESET (1<<0) | |
209 | /* indicate vram lost occured after ctx created */ | |
210 | #define AMDGPU_CTX_QUERY2_FLAGS_VRAMLOST (1<<1) | |
211 | /* indicate some job from this context once cause gpu hang */ | |
212 | #define AMDGPU_CTX_QUERY2_FLAGS_GUILTY (1<<2) | |
ae363a21 | 213 | /* indicate some errors are detected by RAS */ |
214 | #define AMDGPU_CTX_QUERY2_FLAGS_RAS_CE (1<<3) | |
215 | #define AMDGPU_CTX_QUERY2_FLAGS_RAS_UE (1<<4) | |
bc1b1bf6 | 216 | |
c2636dc5 | 217 | /* Context priority level */ |
f3d19bf8 | 218 | #define AMDGPU_CTX_PRIORITY_UNSET -2048 |
8bc4c256 AR |
219 | #define AMDGPU_CTX_PRIORITY_VERY_LOW -1023 |
220 | #define AMDGPU_CTX_PRIORITY_LOW -512 | |
c2636dc5 AR |
221 | #define AMDGPU_CTX_PRIORITY_NORMAL 0 |
222 | /* Selecting a priority above NORMAL requires CAP_SYS_NICE or DRM_MASTER */ | |
8bc4c256 AR |
223 | #define AMDGPU_CTX_PRIORITY_HIGH 512 |
224 | #define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023 | |
c2636dc5 | 225 | |
81629cba | 226 | struct drm_amdgpu_ctx_in { |
675da0dd | 227 | /** AMDGPU_CTX_OP_* */ |
2ce9dde0 | 228 | __u32 op; |
675da0dd | 229 | /** For future use, no flags defined so far */ |
2ce9dde0 MR |
230 | __u32 flags; |
231 | __u32 ctx_id; | |
c2636dc5 | 232 | __s32 priority; |
81629cba AD |
233 | }; |
234 | ||
235 | union drm_amdgpu_ctx_out { | |
236 | struct { | |
2ce9dde0 MR |
237 | __u32 ctx_id; |
238 | __u32 _pad; | |
81629cba AD |
239 | } alloc; |
240 | ||
241 | struct { | |
675da0dd | 242 | /** For future use, no flags defined so far */ |
2ce9dde0 | 243 | __u64 flags; |
d94aed5a | 244 | /** Number of resets caused by this context so far. */ |
2ce9dde0 | 245 | __u32 hangs; |
d94aed5a | 246 | /** Reset status since the last call of the ioctl. */ |
2ce9dde0 | 247 | __u32 reset_status; |
81629cba AD |
248 | } state; |
249 | }; | |
250 | ||
251 | union drm_amdgpu_ctx { | |
252 | struct drm_amdgpu_ctx_in in; | |
253 | union drm_amdgpu_ctx_out out; | |
254 | }; | |
255 | ||
cfbcacf4 CZ |
256 | /* vm ioctl */ |
257 | #define AMDGPU_VM_OP_RESERVE_VMID 1 | |
258 | #define AMDGPU_VM_OP_UNRESERVE_VMID 2 | |
259 | ||
260 | struct drm_amdgpu_vm_in { | |
261 | /** AMDGPU_VM_OP_* */ | |
262 | __u32 op; | |
263 | __u32 flags; | |
264 | }; | |
265 | ||
266 | struct drm_amdgpu_vm_out { | |
267 | /** For future use, no flags defined so far */ | |
268 | __u64 flags; | |
269 | }; | |
270 | ||
271 | union drm_amdgpu_vm { | |
272 | struct drm_amdgpu_vm_in in; | |
273 | struct drm_amdgpu_vm_out out; | |
274 | }; | |
275 | ||
52c6a62c AR |
276 | /* sched ioctl */ |
277 | #define AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE 1 | |
b5bb37ed | 278 | #define AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE 2 |
52c6a62c AR |
279 | |
280 | struct drm_amdgpu_sched_in { | |
281 | /* AMDGPU_SCHED_OP_* */ | |
282 | __u32 op; | |
283 | __u32 fd; | |
284 | __s32 priority; | |
b5bb37ed | 285 | __u32 ctx_id; |
52c6a62c AR |
286 | }; |
287 | ||
288 | union drm_amdgpu_sched { | |
289 | struct drm_amdgpu_sched_in in; | |
290 | }; | |
291 | ||
81629cba AD |
292 | /* |
293 | * This is not a reliable API and you should expect it to fail for any | |
294 | * number of reasons and have fallback path that do not use userptr to | |
295 | * perform any operation. | |
296 | */ | |
297 | #define AMDGPU_GEM_USERPTR_READONLY (1 << 0) | |
298 | #define AMDGPU_GEM_USERPTR_ANONONLY (1 << 1) | |
299 | #define AMDGPU_GEM_USERPTR_VALIDATE (1 << 2) | |
300 | #define AMDGPU_GEM_USERPTR_REGISTER (1 << 3) | |
301 | ||
302 | struct drm_amdgpu_gem_userptr { | |
2ce9dde0 MR |
303 | __u64 addr; |
304 | __u64 size; | |
675da0dd | 305 | /* AMDGPU_GEM_USERPTR_* */ |
2ce9dde0 | 306 | __u32 flags; |
675da0dd | 307 | /* Resulting GEM handle */ |
2ce9dde0 | 308 | __u32 handle; |
81629cba AD |
309 | }; |
310 | ||
00ac6f6b | 311 | /* SI-CI-VI: */ |
fbd76d59 MO |
312 | /* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */ |
313 | #define AMDGPU_TILING_ARRAY_MODE_SHIFT 0 | |
314 | #define AMDGPU_TILING_ARRAY_MODE_MASK 0xf | |
315 | #define AMDGPU_TILING_PIPE_CONFIG_SHIFT 4 | |
316 | #define AMDGPU_TILING_PIPE_CONFIG_MASK 0x1f | |
317 | #define AMDGPU_TILING_TILE_SPLIT_SHIFT 9 | |
318 | #define AMDGPU_TILING_TILE_SPLIT_MASK 0x7 | |
319 | #define AMDGPU_TILING_MICRO_TILE_MODE_SHIFT 12 | |
320 | #define AMDGPU_TILING_MICRO_TILE_MODE_MASK 0x7 | |
321 | #define AMDGPU_TILING_BANK_WIDTH_SHIFT 15 | |
322 | #define AMDGPU_TILING_BANK_WIDTH_MASK 0x3 | |
323 | #define AMDGPU_TILING_BANK_HEIGHT_SHIFT 17 | |
324 | #define AMDGPU_TILING_BANK_HEIGHT_MASK 0x3 | |
325 | #define AMDGPU_TILING_MACRO_TILE_ASPECT_SHIFT 19 | |
326 | #define AMDGPU_TILING_MACRO_TILE_ASPECT_MASK 0x3 | |
327 | #define AMDGPU_TILING_NUM_BANKS_SHIFT 21 | |
328 | #define AMDGPU_TILING_NUM_BANKS_MASK 0x3 | |
329 | ||
00ac6f6b AD |
330 | /* GFX9 and later: */ |
331 | #define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0 | |
332 | #define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f | |
ce331f8f NK |
333 | #define AMDGPU_TILING_DCC_OFFSET_256B_SHIFT 5 |
334 | #define AMDGPU_TILING_DCC_OFFSET_256B_MASK 0xFFFFFF | |
335 | #define AMDGPU_TILING_DCC_PITCH_MAX_SHIFT 29 | |
336 | #define AMDGPU_TILING_DCC_PITCH_MAX_MASK 0x3FFF | |
337 | #define AMDGPU_TILING_DCC_INDEPENDENT_64B_SHIFT 43 | |
338 | #define AMDGPU_TILING_DCC_INDEPENDENT_64B_MASK 0x1 | |
00ac6f6b AD |
339 | |
340 | /* Set/Get helpers for tiling flags. */ | |
fbd76d59 | 341 | #define AMDGPU_TILING_SET(field, value) \ |
00ac6f6b | 342 | (((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT) |
fbd76d59 | 343 | #define AMDGPU_TILING_GET(value, field) \ |
00ac6f6b | 344 | (((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK) |
81629cba AD |
345 | |
346 | #define AMDGPU_GEM_METADATA_OP_SET_METADATA 1 | |
347 | #define AMDGPU_GEM_METADATA_OP_GET_METADATA 2 | |
348 | ||
349 | /** The same structure is shared for input/output */ | |
350 | struct drm_amdgpu_gem_metadata { | |
675da0dd | 351 | /** GEM Object handle */ |
2ce9dde0 | 352 | __u32 handle; |
675da0dd | 353 | /** Do we want get or set metadata */ |
2ce9dde0 | 354 | __u32 op; |
81629cba | 355 | struct { |
675da0dd | 356 | /** For future use, no flags defined so far */ |
2ce9dde0 | 357 | __u64 flags; |
675da0dd | 358 | /** family specific tiling info */ |
2ce9dde0 MR |
359 | __u64 tiling_info; |
360 | __u32 data_size_bytes; | |
361 | __u32 data[64]; | |
81629cba AD |
362 | } data; |
363 | }; | |
364 | ||
365 | struct drm_amdgpu_gem_mmap_in { | |
675da0dd | 366 | /** the GEM object handle */ |
2ce9dde0 MR |
367 | __u32 handle; |
368 | __u32 _pad; | |
81629cba AD |
369 | }; |
370 | ||
371 | struct drm_amdgpu_gem_mmap_out { | |
675da0dd | 372 | /** mmap offset from the vma offset manager */ |
2ce9dde0 | 373 | __u64 addr_ptr; |
81629cba AD |
374 | }; |
375 | ||
376 | union drm_amdgpu_gem_mmap { | |
377 | struct drm_amdgpu_gem_mmap_in in; | |
378 | struct drm_amdgpu_gem_mmap_out out; | |
379 | }; | |
380 | ||
381 | struct drm_amdgpu_gem_wait_idle_in { | |
675da0dd | 382 | /** GEM object handle */ |
2ce9dde0 | 383 | __u32 handle; |
675da0dd | 384 | /** For future use, no flags defined so far */ |
2ce9dde0 | 385 | __u32 flags; |
675da0dd | 386 | /** Absolute timeout to wait */ |
2ce9dde0 | 387 | __u64 timeout; |
81629cba AD |
388 | }; |
389 | ||
390 | struct drm_amdgpu_gem_wait_idle_out { | |
675da0dd | 391 | /** BO status: 0 - BO is idle, 1 - BO is busy */ |
2ce9dde0 | 392 | __u32 status; |
675da0dd | 393 | /** Returned current memory domain */ |
2ce9dde0 | 394 | __u32 domain; |
81629cba AD |
395 | }; |
396 | ||
397 | union drm_amdgpu_gem_wait_idle { | |
398 | struct drm_amdgpu_gem_wait_idle_in in; | |
399 | struct drm_amdgpu_gem_wait_idle_out out; | |
400 | }; | |
401 | ||
402 | struct drm_amdgpu_wait_cs_in { | |
d7b1eeb2 ML |
403 | /* Command submission handle |
404 | * handle equals 0 means none to wait for | |
080b24eb | 405 | * handle equals ~0ull means wait for the latest sequence number |
d7b1eeb2 | 406 | */ |
2ce9dde0 | 407 | __u64 handle; |
675da0dd | 408 | /** Absolute timeout to wait */ |
2ce9dde0 MR |
409 | __u64 timeout; |
410 | __u32 ip_type; | |
411 | __u32 ip_instance; | |
412 | __u32 ring; | |
413 | __u32 ctx_id; | |
81629cba AD |
414 | }; |
415 | ||
416 | struct drm_amdgpu_wait_cs_out { | |
675da0dd | 417 | /** CS status: 0 - CS completed, 1 - CS still busy */ |
2ce9dde0 | 418 | __u64 status; |
81629cba AD |
419 | }; |
420 | ||
421 | union drm_amdgpu_wait_cs { | |
422 | struct drm_amdgpu_wait_cs_in in; | |
423 | struct drm_amdgpu_wait_cs_out out; | |
424 | }; | |
425 | ||
eef18a82 JZ |
426 | struct drm_amdgpu_fence { |
427 | __u32 ctx_id; | |
428 | __u32 ip_type; | |
429 | __u32 ip_instance; | |
430 | __u32 ring; | |
431 | __u64 seq_no; | |
432 | }; | |
433 | ||
434 | struct drm_amdgpu_wait_fences_in { | |
435 | /** This points to uint64_t * which points to fences */ | |
436 | __u64 fences; | |
437 | __u32 fence_count; | |
438 | __u32 wait_all; | |
439 | __u64 timeout_ns; | |
440 | }; | |
441 | ||
442 | struct drm_amdgpu_wait_fences_out { | |
443 | __u32 status; | |
444 | __u32 first_signaled; | |
445 | }; | |
446 | ||
447 | union drm_amdgpu_wait_fences { | |
448 | struct drm_amdgpu_wait_fences_in in; | |
449 | struct drm_amdgpu_wait_fences_out out; | |
450 | }; | |
451 | ||
675da0dd CK |
452 | #define AMDGPU_GEM_OP_GET_GEM_CREATE_INFO 0 |
453 | #define AMDGPU_GEM_OP_SET_PLACEMENT 1 | |
454 | ||
81629cba AD |
455 | /* Sets or returns a value associated with a buffer. */ |
456 | struct drm_amdgpu_gem_op { | |
675da0dd | 457 | /** GEM object handle */ |
2ce9dde0 | 458 | __u32 handle; |
675da0dd | 459 | /** AMDGPU_GEM_OP_* */ |
2ce9dde0 | 460 | __u32 op; |
675da0dd | 461 | /** Input or return value */ |
2ce9dde0 | 462 | __u64 value; |
81629cba AD |
463 | }; |
464 | ||
81629cba AD |
465 | #define AMDGPU_VA_OP_MAP 1 |
466 | #define AMDGPU_VA_OP_UNMAP 2 | |
dc54d3d1 | 467 | #define AMDGPU_VA_OP_CLEAR 3 |
80f95c57 | 468 | #define AMDGPU_VA_OP_REPLACE 4 |
81629cba | 469 | |
fc220f65 CK |
470 | /* Delay the page table update till the next CS */ |
471 | #define AMDGPU_VM_DELAY_UPDATE (1 << 0) | |
472 | ||
81629cba AD |
473 | /* Mapping flags */ |
474 | /* readable mapping */ | |
475 | #define AMDGPU_VM_PAGE_READABLE (1 << 1) | |
476 | /* writable mapping */ | |
477 | #define AMDGPU_VM_PAGE_WRITEABLE (1 << 2) | |
478 | /* executable mapping, new for VI */ | |
479 | #define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3) | |
b85891bd JZ |
480 | /* partially resident texture */ |
481 | #define AMDGPU_VM_PAGE_PRT (1 << 4) | |
66e02bc3 AX |
482 | /* MTYPE flags use bit 5 to 8 */ |
483 | #define AMDGPU_VM_MTYPE_MASK (0xf << 5) | |
484 | /* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */ | |
485 | #define AMDGPU_VM_MTYPE_DEFAULT (0 << 5) | |
486 | /* Use NC MTYPE instead of default MTYPE */ | |
487 | #define AMDGPU_VM_MTYPE_NC (1 << 5) | |
488 | /* Use WC MTYPE instead of default MTYPE */ | |
489 | #define AMDGPU_VM_MTYPE_WC (2 << 5) | |
490 | /* Use CC MTYPE instead of default MTYPE */ | |
491 | #define AMDGPU_VM_MTYPE_CC (3 << 5) | |
492 | /* Use UC MTYPE instead of default MTYPE */ | |
493 | #define AMDGPU_VM_MTYPE_UC (4 << 5) | |
81629cba | 494 | |
34b5f6a6 | 495 | struct drm_amdgpu_gem_va { |
675da0dd | 496 | /** GEM object handle */ |
2ce9dde0 MR |
497 | __u32 handle; |
498 | __u32 _pad; | |
675da0dd | 499 | /** AMDGPU_VA_OP_* */ |
2ce9dde0 | 500 | __u32 operation; |
675da0dd | 501 | /** AMDGPU_VM_PAGE_* */ |
2ce9dde0 | 502 | __u32 flags; |
675da0dd | 503 | /** va address to assign . Must be correctly aligned.*/ |
2ce9dde0 | 504 | __u64 va_address; |
675da0dd | 505 | /** Specify offset inside of BO to assign. Must be correctly aligned.*/ |
2ce9dde0 | 506 | __u64 offset_in_bo; |
675da0dd | 507 | /** Specify mapping size. Must be correctly aligned. */ |
2ce9dde0 | 508 | __u64 map_size; |
81629cba AD |
509 | }; |
510 | ||
81629cba AD |
511 | #define AMDGPU_HW_IP_GFX 0 |
512 | #define AMDGPU_HW_IP_COMPUTE 1 | |
513 | #define AMDGPU_HW_IP_DMA 2 | |
514 | #define AMDGPU_HW_IP_UVD 3 | |
515 | #define AMDGPU_HW_IP_VCE 4 | |
a50798b6 | 516 | #define AMDGPU_HW_IP_UVD_ENC 5 |
66e236f1 | 517 | #define AMDGPU_HW_IP_VCN_DEC 6 |
fcfc5a90 | 518 | #define AMDGPU_HW_IP_VCN_ENC 7 |
81d35014 BZ |
519 | #define AMDGPU_HW_IP_VCN_JPEG 8 |
520 | #define AMDGPU_HW_IP_NUM 9 | |
81629cba AD |
521 | |
522 | #define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1 | |
523 | ||
524 | #define AMDGPU_CHUNK_ID_IB 0x01 | |
525 | #define AMDGPU_CHUNK_ID_FENCE 0x02 | |
2b48d323 | 526 | #define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03 |
660e8558 DA |
527 | #define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04 |
528 | #define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05 | |
964d0fbf | 529 | #define AMDGPU_CHUNK_ID_BO_HANDLES 0x06 |
67dd1a36 | 530 | #define AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES 0x07 |
675da0dd | 531 | |
81629cba | 532 | struct drm_amdgpu_cs_chunk { |
2ce9dde0 MR |
533 | __u32 chunk_id; |
534 | __u32 length_dw; | |
535 | __u64 chunk_data; | |
81629cba AD |
536 | }; |
537 | ||
538 | struct drm_amdgpu_cs_in { | |
539 | /** Rendering context id */ | |
2ce9dde0 | 540 | __u32 ctx_id; |
81629cba | 541 | /** Handle of resource list associated with CS */ |
2ce9dde0 MR |
542 | __u32 bo_list_handle; |
543 | __u32 num_chunks; | |
544 | __u32 _pad; | |
545 | /** this points to __u64 * which point to cs chunks */ | |
546 | __u64 chunks; | |
81629cba AD |
547 | }; |
548 | ||
549 | struct drm_amdgpu_cs_out { | |
2ce9dde0 | 550 | __u64 handle; |
81629cba AD |
551 | }; |
552 | ||
553 | union drm_amdgpu_cs { | |
675da0dd CK |
554 | struct drm_amdgpu_cs_in in; |
555 | struct drm_amdgpu_cs_out out; | |
81629cba AD |
556 | }; |
557 | ||
558 | /* Specify flags to be used for IB */ | |
559 | ||
560 | /* This IB should be submitted to CE */ | |
561 | #define AMDGPU_IB_FLAG_CE (1<<0) | |
562 | ||
ed834af2 | 563 | /* Preamble flag, which means the IB could be dropped if no context switch */ |
cab6d57c | 564 | #define AMDGPU_IB_FLAG_PREAMBLE (1<<1) |
aa2bdb24 | 565 | |
71aec257 ML |
566 | /* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */ |
567 | #define AMDGPU_IB_FLAG_PREEMPT (1<<2) | |
568 | ||
d240cd9e MO |
569 | /* The IB fence should do the L2 writeback but not invalidate any shader |
570 | * caches (L2/vL1/sL1/I$). */ | |
571 | #define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3) | |
572 | ||
41cca166 MO |
573 | /* Set GDS_COMPUTE_MAX_WAVE_ID = DEFAULT before PACKET3_INDIRECT_BUFFER. |
574 | * This will reset wave ID counters for the IB. | |
575 | */ | |
576 | #define AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID (1 << 4) | |
577 | ||
81629cba | 578 | struct drm_amdgpu_cs_chunk_ib { |
2ce9dde0 | 579 | __u32 _pad; |
675da0dd | 580 | /** AMDGPU_IB_FLAG_* */ |
2ce9dde0 | 581 | __u32 flags; |
675da0dd | 582 | /** Virtual address to begin IB execution */ |
2ce9dde0 | 583 | __u64 va_start; |
675da0dd | 584 | /** Size of submission */ |
2ce9dde0 | 585 | __u32 ib_bytes; |
675da0dd | 586 | /** HW IP to submit to */ |
2ce9dde0 | 587 | __u32 ip_type; |
675da0dd | 588 | /** HW IP index of the same type to submit to */ |
2ce9dde0 | 589 | __u32 ip_instance; |
675da0dd | 590 | /** Ring index to submit to */ |
2ce9dde0 | 591 | __u32 ring; |
81629cba AD |
592 | }; |
593 | ||
2b48d323 | 594 | struct drm_amdgpu_cs_chunk_dep { |
2ce9dde0 MR |
595 | __u32 ip_type; |
596 | __u32 ip_instance; | |
597 | __u32 ring; | |
598 | __u32 ctx_id; | |
599 | __u64 handle; | |
2b48d323 CK |
600 | }; |
601 | ||
81629cba | 602 | struct drm_amdgpu_cs_chunk_fence { |
2ce9dde0 MR |
603 | __u32 handle; |
604 | __u32 offset; | |
81629cba AD |
605 | }; |
606 | ||
660e8558 DA |
607 | struct drm_amdgpu_cs_chunk_sem { |
608 | __u32 handle; | |
609 | }; | |
610 | ||
7ca24cf2 MO |
611 | #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ 0 |
612 | #define AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD 1 | |
613 | #define AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD 2 | |
614 | ||
615 | union drm_amdgpu_fence_to_handle { | |
616 | struct { | |
617 | struct drm_amdgpu_fence fence; | |
618 | __u32 what; | |
56e0349f | 619 | __u32 pad; |
7ca24cf2 MO |
620 | } in; |
621 | struct { | |
622 | __u32 handle; | |
623 | } out; | |
624 | }; | |
625 | ||
81629cba AD |
626 | struct drm_amdgpu_cs_chunk_data { |
627 | union { | |
628 | struct drm_amdgpu_cs_chunk_ib ib_data; | |
629 | struct drm_amdgpu_cs_chunk_fence fence_data; | |
630 | }; | |
631 | }; | |
632 | ||
633 | /** | |
634 | * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU | |
635 | * | |
636 | */ | |
637 | #define AMDGPU_IDS_FLAGS_FUSION 0x1 | |
aafcafa0 | 638 | #define AMDGPU_IDS_FLAGS_PREEMPTION 0x2 |
81629cba AD |
639 | |
640 | /* indicate if acceleration can be working */ | |
641 | #define AMDGPU_INFO_ACCEL_WORKING 0x00 | |
642 | /* get the crtc_id from the mode object id? */ | |
643 | #define AMDGPU_INFO_CRTC_FROM_ID 0x01 | |
644 | /* query hw IP info */ | |
645 | #define AMDGPU_INFO_HW_IP_INFO 0x02 | |
646 | /* query hw IP instance count for the specified type */ | |
647 | #define AMDGPU_INFO_HW_IP_COUNT 0x03 | |
648 | /* timestamp for GL_ARB_timer_query */ | |
649 | #define AMDGPU_INFO_TIMESTAMP 0x05 | |
650 | /* Query the firmware version */ | |
651 | #define AMDGPU_INFO_FW_VERSION 0x0e | |
652 | /* Subquery id: Query VCE firmware version */ | |
653 | #define AMDGPU_INFO_FW_VCE 0x1 | |
654 | /* Subquery id: Query UVD firmware version */ | |
655 | #define AMDGPU_INFO_FW_UVD 0x2 | |
656 | /* Subquery id: Query GMC firmware version */ | |
657 | #define AMDGPU_INFO_FW_GMC 0x03 | |
658 | /* Subquery id: Query GFX ME firmware version */ | |
659 | #define AMDGPU_INFO_FW_GFX_ME 0x04 | |
660 | /* Subquery id: Query GFX PFP firmware version */ | |
661 | #define AMDGPU_INFO_FW_GFX_PFP 0x05 | |
662 | /* Subquery id: Query GFX CE firmware version */ | |
663 | #define AMDGPU_INFO_FW_GFX_CE 0x06 | |
664 | /* Subquery id: Query GFX RLC firmware version */ | |
665 | #define AMDGPU_INFO_FW_GFX_RLC 0x07 | |
666 | /* Subquery id: Query GFX MEC firmware version */ | |
667 | #define AMDGPU_INFO_FW_GFX_MEC 0x08 | |
668 | /* Subquery id: Query SMC firmware version */ | |
669 | #define AMDGPU_INFO_FW_SMC 0x0a | |
670 | /* Subquery id: Query SDMA firmware version */ | |
671 | #define AMDGPU_INFO_FW_SDMA 0x0b | |
6a7ed07e HR |
672 | /* Subquery id: Query PSP SOS firmware version */ |
673 | #define AMDGPU_INFO_FW_SOS 0x0c | |
674 | /* Subquery id: Query PSP ASD firmware version */ | |
675 | #define AMDGPU_INFO_FW_ASD 0x0d | |
3ac952b1 AD |
676 | /* Subquery id: Query VCN firmware version */ |
677 | #define AMDGPU_INFO_FW_VCN 0x0e | |
621a6318 HR |
678 | /* Subquery id: Query GFX RLC SRLC firmware version */ |
679 | #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f | |
680 | /* Subquery id: Query GFX RLC SRLG firmware version */ | |
681 | #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10 | |
682 | /* Subquery id: Query GFX RLC SRLS firmware version */ | |
683 | #define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11 | |
4d11b4b2 DF |
684 | /* Subquery id: Query DMCU firmware version */ |
685 | #define AMDGPU_INFO_FW_DMCU 0x12 | |
9b9ca62d | 686 | #define AMDGPU_INFO_FW_TA 0x13 |
81629cba AD |
687 | /* number of bytes moved for TTM migration */ |
688 | #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f | |
689 | /* the used VRAM size */ | |
690 | #define AMDGPU_INFO_VRAM_USAGE 0x10 | |
691 | /* the used GTT size */ | |
692 | #define AMDGPU_INFO_GTT_USAGE 0x11 | |
693 | /* Information about GDS, etc. resource configuration */ | |
694 | #define AMDGPU_INFO_GDS_CONFIG 0x13 | |
695 | /* Query information about VRAM and GTT domains */ | |
696 | #define AMDGPU_INFO_VRAM_GTT 0x14 | |
697 | /* Query information about register in MMR address space*/ | |
698 | #define AMDGPU_INFO_READ_MMR_REG 0x15 | |
699 | /* Query information about device: rev id, family, etc. */ | |
700 | #define AMDGPU_INFO_DEV_INFO 0x16 | |
701 | /* visible vram usage */ | |
702 | #define AMDGPU_INFO_VIS_VRAM_USAGE 0x17 | |
83a59b63 MO |
703 | /* number of TTM buffer evictions */ |
704 | #define AMDGPU_INFO_NUM_EVICTIONS 0x18 | |
e0adf6c8 JZ |
705 | /* Query memory about VRAM and GTT domains */ |
706 | #define AMDGPU_INFO_MEMORY 0x19 | |
bbe87974 AD |
707 | /* Query vce clock table */ |
708 | #define AMDGPU_INFO_VCE_CLOCK_TABLE 0x1A | |
40ee5888 EQ |
709 | /* Query vbios related information */ |
710 | #define AMDGPU_INFO_VBIOS 0x1B | |
711 | /* Subquery id: Query vbios size */ | |
712 | #define AMDGPU_INFO_VBIOS_SIZE 0x1 | |
713 | /* Subquery id: Query vbios image */ | |
714 | #define AMDGPU_INFO_VBIOS_IMAGE 0x2 | |
44879b62 AN |
715 | /* Query UVD handles */ |
716 | #define AMDGPU_INFO_NUM_HANDLES 0x1C | |
5ebbac4b AD |
717 | /* Query sensor related information */ |
718 | #define AMDGPU_INFO_SENSOR 0x1D | |
719 | /* Subquery id: Query GPU shader clock */ | |
720 | #define AMDGPU_INFO_SENSOR_GFX_SCLK 0x1 | |
721 | /* Subquery id: Query GPU memory clock */ | |
722 | #define AMDGPU_INFO_SENSOR_GFX_MCLK 0x2 | |
723 | /* Subquery id: Query GPU temperature */ | |
724 | #define AMDGPU_INFO_SENSOR_GPU_TEMP 0x3 | |
725 | /* Subquery id: Query GPU load */ | |
726 | #define AMDGPU_INFO_SENSOR_GPU_LOAD 0x4 | |
727 | /* Subquery id: Query average GPU power */ | |
728 | #define AMDGPU_INFO_SENSOR_GPU_AVG_POWER 0x5 | |
729 | /* Subquery id: Query northbridge voltage */ | |
730 | #define AMDGPU_INFO_SENSOR_VDDNB 0x6 | |
731 | /* Subquery id: Query graphics voltage */ | |
732 | #define AMDGPU_INFO_SENSOR_VDDGFX 0x7 | |
60bbade2 RZ |
733 | /* Subquery id: Query GPU stable pstate shader clock */ |
734 | #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_SCLK 0x8 | |
735 | /* Subquery id: Query GPU stable pstate memory clock */ | |
736 | #define AMDGPU_INFO_SENSOR_STABLE_PSTATE_GFX_MCLK 0x9 | |
68e2c5ff MO |
737 | /* Number of VRAM page faults on CPU access. */ |
738 | #define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS 0x1E | |
1f7251b7 | 739 | #define AMDGPU_INFO_VRAM_LOST_COUNTER 0x1F |
5cb77114 | 740 | /* query ras mask of enabled features*/ |
741 | #define AMDGPU_INFO_RAS_ENABLED_FEATURES 0x20 | |
742 | ||
743 | /* RAS MASK: UMC (VRAM) */ | |
744 | #define AMDGPU_INFO_RAS_ENABLED_UMC (1 << 0) | |
745 | /* RAS MASK: SDMA */ | |
746 | #define AMDGPU_INFO_RAS_ENABLED_SDMA (1 << 1) | |
747 | /* RAS MASK: GFX */ | |
748 | #define AMDGPU_INFO_RAS_ENABLED_GFX (1 << 2) | |
749 | /* RAS MASK: MMHUB */ | |
750 | #define AMDGPU_INFO_RAS_ENABLED_MMHUB (1 << 3) | |
751 | /* RAS MASK: ATHUB */ | |
752 | #define AMDGPU_INFO_RAS_ENABLED_ATHUB (1 << 4) | |
753 | /* RAS MASK: PCIE */ | |
754 | #define AMDGPU_INFO_RAS_ENABLED_PCIE (1 << 5) | |
755 | /* RAS MASK: HDP */ | |
756 | #define AMDGPU_INFO_RAS_ENABLED_HDP (1 << 6) | |
757 | /* RAS MASK: XGMI */ | |
758 | #define AMDGPU_INFO_RAS_ENABLED_XGMI (1 << 7) | |
759 | /* RAS MASK: DF */ | |
760 | #define AMDGPU_INFO_RAS_ENABLED_DF (1 << 8) | |
761 | /* RAS MASK: SMN */ | |
762 | #define AMDGPU_INFO_RAS_ENABLED_SMN (1 << 9) | |
763 | /* RAS MASK: SEM */ | |
764 | #define AMDGPU_INFO_RAS_ENABLED_SEM (1 << 10) | |
765 | /* RAS MASK: MP0 */ | |
766 | #define AMDGPU_INFO_RAS_ENABLED_MP0 (1 << 11) | |
767 | /* RAS MASK: MP1 */ | |
768 | #define AMDGPU_INFO_RAS_ENABLED_MP1 (1 << 12) | |
769 | /* RAS MASK: FUSE */ | |
770 | #define AMDGPU_INFO_RAS_ENABLED_FUSE (1 << 13) | |
81629cba AD |
771 | |
772 | #define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0 | |
773 | #define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff | |
774 | #define AMDGPU_INFO_MMR_SH_INDEX_SHIFT 8 | |
775 | #define AMDGPU_INFO_MMR_SH_INDEX_MASK 0xff | |
776 | ||
000cab9a HR |
777 | struct drm_amdgpu_query_fw { |
778 | /** AMDGPU_INFO_FW_* */ | |
779 | __u32 fw_type; | |
780 | /** | |
781 | * Index of the IP if there are more IPs of | |
782 | * the same type. | |
783 | */ | |
784 | __u32 ip_instance; | |
785 | /** | |
786 | * Index of the engine. Whether this is used depends | |
787 | * on the firmware type. (e.g. MEC, SDMA) | |
788 | */ | |
789 | __u32 index; | |
790 | __u32 _pad; | |
791 | }; | |
792 | ||
81629cba AD |
793 | /* Input structure for the INFO ioctl */ |
794 | struct drm_amdgpu_info { | |
795 | /* Where the return value will be stored */ | |
2ce9dde0 | 796 | __u64 return_pointer; |
81629cba AD |
797 | /* The size of the return value. Just like "size" in "snprintf", |
798 | * it limits how many bytes the kernel can write. */ | |
2ce9dde0 | 799 | __u32 return_size; |
81629cba | 800 | /* The query request id. */ |
2ce9dde0 | 801 | __u32 query; |
81629cba AD |
802 | |
803 | union { | |
804 | struct { | |
2ce9dde0 MR |
805 | __u32 id; |
806 | __u32 _pad; | |
81629cba AD |
807 | } mode_crtc; |
808 | ||
809 | struct { | |
810 | /** AMDGPU_HW_IP_* */ | |
2ce9dde0 | 811 | __u32 type; |
81629cba | 812 | /** |
675da0dd CK |
813 | * Index of the IP if there are more IPs of the same |
814 | * type. Ignored by AMDGPU_INFO_HW_IP_COUNT. | |
81629cba | 815 | */ |
2ce9dde0 | 816 | __u32 ip_instance; |
81629cba AD |
817 | } query_hw_ip; |
818 | ||
819 | struct { | |
2ce9dde0 | 820 | __u32 dword_offset; |
675da0dd | 821 | /** number of registers to read */ |
2ce9dde0 MR |
822 | __u32 count; |
823 | __u32 instance; | |
675da0dd | 824 | /** For future use, no flags defined so far */ |
2ce9dde0 | 825 | __u32 flags; |
81629cba AD |
826 | } read_mmr_reg; |
827 | ||
000cab9a | 828 | struct drm_amdgpu_query_fw query_fw; |
40ee5888 EQ |
829 | |
830 | struct { | |
831 | __u32 type; | |
832 | __u32 offset; | |
833 | } vbios_info; | |
5ebbac4b AD |
834 | |
835 | struct { | |
836 | __u32 type; | |
837 | } sensor_info; | |
81629cba AD |
838 | }; |
839 | }; | |
840 | ||
841 | struct drm_amdgpu_info_gds { | |
842 | /** GDS GFX partition size */ | |
2ce9dde0 | 843 | __u32 gds_gfx_partition_size; |
81629cba | 844 | /** GDS compute partition size */ |
2ce9dde0 | 845 | __u32 compute_partition_size; |
81629cba | 846 | /** total GDS memory size */ |
2ce9dde0 | 847 | __u32 gds_total_size; |
81629cba | 848 | /** GWS size per GFX partition */ |
2ce9dde0 | 849 | __u32 gws_per_gfx_partition; |
81629cba | 850 | /** GSW size per compute partition */ |
2ce9dde0 | 851 | __u32 gws_per_compute_partition; |
81629cba | 852 | /** OA size per GFX partition */ |
2ce9dde0 | 853 | __u32 oa_per_gfx_partition; |
81629cba | 854 | /** OA size per compute partition */ |
2ce9dde0 MR |
855 | __u32 oa_per_compute_partition; |
856 | __u32 _pad; | |
81629cba AD |
857 | }; |
858 | ||
859 | struct drm_amdgpu_info_vram_gtt { | |
2ce9dde0 MR |
860 | __u64 vram_size; |
861 | __u64 vram_cpu_accessible_size; | |
862 | __u64 gtt_size; | |
81629cba AD |
863 | }; |
864 | ||
e0adf6c8 JZ |
865 | struct drm_amdgpu_heap_info { |
866 | /** max. physical memory */ | |
867 | __u64 total_heap_size; | |
868 | ||
869 | /** Theoretical max. available memory in the given heap */ | |
870 | __u64 usable_heap_size; | |
871 | ||
872 | /** | |
873 | * Number of bytes allocated in the heap. This includes all processes | |
874 | * and private allocations in the kernel. It changes when new buffers | |
875 | * are allocated, freed, and moved. It cannot be larger than | |
876 | * heap_size. | |
877 | */ | |
878 | __u64 heap_usage; | |
879 | ||
880 | /** | |
881 | * Theoretical possible max. size of buffer which | |
882 | * could be allocated in the given heap | |
883 | */ | |
884 | __u64 max_allocation; | |
9f6163e7 JZ |
885 | }; |
886 | ||
e0adf6c8 JZ |
887 | struct drm_amdgpu_memory_info { |
888 | struct drm_amdgpu_heap_info vram; | |
889 | struct drm_amdgpu_heap_info cpu_accessible_vram; | |
890 | struct drm_amdgpu_heap_info gtt; | |
cfa32556 JZ |
891 | }; |
892 | ||
81629cba | 893 | struct drm_amdgpu_info_firmware { |
2ce9dde0 MR |
894 | __u32 ver; |
895 | __u32 feature; | |
81629cba AD |
896 | }; |
897 | ||
81c59f54 KW |
898 | #define AMDGPU_VRAM_TYPE_UNKNOWN 0 |
899 | #define AMDGPU_VRAM_TYPE_GDDR1 1 | |
900 | #define AMDGPU_VRAM_TYPE_DDR2 2 | |
901 | #define AMDGPU_VRAM_TYPE_GDDR3 3 | |
902 | #define AMDGPU_VRAM_TYPE_GDDR4 4 | |
903 | #define AMDGPU_VRAM_TYPE_GDDR5 5 | |
904 | #define AMDGPU_VRAM_TYPE_HBM 6 | |
905 | #define AMDGPU_VRAM_TYPE_DDR3 7 | |
1e09b053 | 906 | #define AMDGPU_VRAM_TYPE_DDR4 8 |
81c59f54 | 907 | |
81629cba AD |
908 | struct drm_amdgpu_info_device { |
909 | /** PCI Device ID */ | |
2ce9dde0 | 910 | __u32 device_id; |
81629cba | 911 | /** Internal chip revision: A0, A1, etc.) */ |
2ce9dde0 MR |
912 | __u32 chip_rev; |
913 | __u32 external_rev; | |
81629cba | 914 | /** Revision id in PCI Config space */ |
2ce9dde0 MR |
915 | __u32 pci_rev; |
916 | __u32 family; | |
917 | __u32 num_shader_engines; | |
918 | __u32 num_shader_arrays_per_engine; | |
675da0dd | 919 | /* in KHz */ |
2ce9dde0 MR |
920 | __u32 gpu_counter_freq; |
921 | __u64 max_engine_clock; | |
922 | __u64 max_memory_clock; | |
81629cba | 923 | /* cu information */ |
2ce9dde0 | 924 | __u32 cu_active_number; |
dbfe85ea | 925 | /* NOTE: cu_ao_mask is INVALID, DON'T use it */ |
2ce9dde0 MR |
926 | __u32 cu_ao_mask; |
927 | __u32 cu_bitmap[4][4]; | |
81629cba | 928 | /** Render backend pipe mask. One render backend is CB+DB. */ |
2ce9dde0 MR |
929 | __u32 enabled_rb_pipes_mask; |
930 | __u32 num_rb_pipes; | |
931 | __u32 num_hw_gfx_contexts; | |
932 | __u32 _pad; | |
933 | __u64 ids_flags; | |
81629cba | 934 | /** Starting virtual address for UMDs. */ |
2ce9dde0 | 935 | __u64 virtual_address_offset; |
02b70c8c | 936 | /** The maximum virtual address */ |
2ce9dde0 | 937 | __u64 virtual_address_max; |
81629cba | 938 | /** Required alignment of virtual addresses. */ |
2ce9dde0 | 939 | __u32 virtual_address_alignment; |
81629cba | 940 | /** Page table entry - fragment size */ |
2ce9dde0 MR |
941 | __u32 pte_fragment_size; |
942 | __u32 gart_page_size; | |
a101a899 | 943 | /** constant engine ram size*/ |
2ce9dde0 | 944 | __u32 ce_ram_size; |
cab6d57c | 945 | /** video memory type info*/ |
2ce9dde0 | 946 | __u32 vram_type; |
81c59f54 | 947 | /** video memory bit width*/ |
2ce9dde0 | 948 | __u32 vram_bit_width; |
fa92754e | 949 | /* vce harvesting instance */ |
2ce9dde0 | 950 | __u32 vce_harvest_config; |
df6e2c4a JZ |
951 | /* gfx double offchip LDS buffers */ |
952 | __u32 gc_double_offchip_lds_buf; | |
bce23e00 AD |
953 | /* NGG Primitive Buffer */ |
954 | __u64 prim_buf_gpu_addr; | |
955 | /* NGG Position Buffer */ | |
956 | __u64 pos_buf_gpu_addr; | |
957 | /* NGG Control Sideband */ | |
958 | __u64 cntl_sb_buf_gpu_addr; | |
959 | /* NGG Parameter Cache */ | |
960 | __u64 param_buf_gpu_addr; | |
408bfe7c JZ |
961 | __u32 prim_buf_size; |
962 | __u32 pos_buf_size; | |
963 | __u32 cntl_sb_buf_size; | |
964 | __u32 param_buf_size; | |
965 | /* wavefront size*/ | |
966 | __u32 wave_front_size; | |
967 | /* shader visible vgprs*/ | |
968 | __u32 num_shader_visible_vgprs; | |
969 | /* CU per shader array*/ | |
970 | __u32 num_cu_per_sh; | |
971 | /* number of tcc blocks*/ | |
972 | __u32 num_tcc_blocks; | |
973 | /* gs vgt table depth*/ | |
974 | __u32 gs_vgt_table_depth; | |
975 | /* gs primitive buffer depth*/ | |
976 | __u32 gs_prim_buffer_depth; | |
977 | /* max gs wavefront per vgt*/ | |
978 | __u32 max_gs_waves_per_vgt; | |
979 | __u32 _pad1; | |
dbfe85ea FC |
980 | /* always on cu bitmap */ |
981 | __u32 cu_ao_bitmap[4][4]; | |
5b565e0e CK |
982 | /** Starting high virtual address for UMDs. */ |
983 | __u64 high_va_offset; | |
984 | /** The maximum high virtual address */ | |
985 | __u64 high_va_max; | |
81629cba AD |
986 | }; |
987 | ||
988 | struct drm_amdgpu_info_hw_ip { | |
989 | /** Version of h/w IP */ | |
2ce9dde0 MR |
990 | __u32 hw_ip_version_major; |
991 | __u32 hw_ip_version_minor; | |
81629cba | 992 | /** Capabilities */ |
2ce9dde0 | 993 | __u64 capabilities_flags; |
71062f43 | 994 | /** command buffer address start alignment*/ |
2ce9dde0 | 995 | __u32 ib_start_alignment; |
71062f43 | 996 | /** command buffer size alignment*/ |
2ce9dde0 | 997 | __u32 ib_size_alignment; |
81629cba | 998 | /** Bitmask of available rings. Bit 0 means ring 0, etc. */ |
2ce9dde0 MR |
999 | __u32 available_rings; |
1000 | __u32 _pad; | |
81629cba AD |
1001 | }; |
1002 | ||
44879b62 AN |
1003 | struct drm_amdgpu_info_num_handles { |
1004 | /** Max handles as supported by firmware for UVD */ | |
1005 | __u32 uvd_max_handles; | |
1006 | /** Handles currently in use for UVD */ | |
1007 | __u32 uvd_used_handles; | |
1008 | }; | |
1009 | ||
bbe87974 AD |
1010 | #define AMDGPU_VCE_CLOCK_TABLE_ENTRIES 6 |
1011 | ||
1012 | struct drm_amdgpu_info_vce_clock_table_entry { | |
1013 | /** System clock */ | |
1014 | __u32 sclk; | |
1015 | /** Memory clock */ | |
1016 | __u32 mclk; | |
1017 | /** VCE clock */ | |
1018 | __u32 eclk; | |
1019 | __u32 pad; | |
1020 | }; | |
1021 | ||
1022 | struct drm_amdgpu_info_vce_clock_table { | |
1023 | struct drm_amdgpu_info_vce_clock_table_entry entries[AMDGPU_VCE_CLOCK_TABLE_ENTRIES]; | |
1024 | __u32 num_valid_entries; | |
1025 | __u32 pad; | |
1026 | }; | |
1027 | ||
81629cba AD |
1028 | /* |
1029 | * Supported GPU families | |
1030 | */ | |
1031 | #define AMDGPU_FAMILY_UNKNOWN 0 | |
295d0daf | 1032 | #define AMDGPU_FAMILY_SI 110 /* Hainan, Oland, Verde, Pitcairn, Tahiti */ |
81629cba AD |
1033 | #define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ |
1034 | #define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ | |
1035 | #define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ | |
39bb0c92 | 1036 | #define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */ |
a8f1f1ce | 1037 | #define AMDGPU_FAMILY_AI 141 /* Vega10 */ |
2ca8a5d2 | 1038 | #define AMDGPU_FAMILY_RV 142 /* Raven */ |
81629cba | 1039 | |
cfa7152f EV |
1040 | #if defined(__cplusplus) |
1041 | } | |
1042 | #endif | |
1043 | ||
81629cba | 1044 | #endif |