Commit | Line | Data |
---|---|---|
97b2e202 AD |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #ifndef __AMDGPU_H__ | |
29 | #define __AMDGPU_H__ | |
30 | ||
31 | #include <linux/atomic.h> | |
32 | #include <linux/wait.h> | |
33 | #include <linux/list.h> | |
34 | #include <linux/kref.h> | |
a9f87f64 | 35 | #include <linux/rbtree.h> |
97b2e202 | 36 | #include <linux/hashtable.h> |
f54d1867 | 37 | #include <linux/dma-fence.h> |
97b2e202 | 38 | |
248a1d6f MY |
39 | #include <drm/ttm/ttm_bo_api.h> |
40 | #include <drm/ttm/ttm_bo_driver.h> | |
41 | #include <drm/ttm/ttm_placement.h> | |
42 | #include <drm/ttm/ttm_module.h> | |
43 | #include <drm/ttm/ttm_execbuf_util.h> | |
97b2e202 | 44 | |
d03846af | 45 | #include <drm/drmP.h> |
97b2e202 | 46 | #include <drm/drm_gem.h> |
7e5a547f | 47 | #include <drm/amdgpu_drm.h> |
1b1f42d8 | 48 | #include <drm/gpu_scheduler.h> |
97b2e202 | 49 | |
78c16834 | 50 | #include <kgd_kfd_interface.h> |
c79563a3 RZ |
51 | #include "dm_pp_interface.h" |
52 | #include "kgd_pp_interface.h" | |
78c16834 | 53 | |
5fc3aeeb | 54 | #include "amd_shared.h" |
97b2e202 AD |
55 | #include "amdgpu_mode.h" |
56 | #include "amdgpu_ih.h" | |
57 | #include "amdgpu_irq.h" | |
58 | #include "amdgpu_ucode.h" | |
c632d799 | 59 | #include "amdgpu_ttm.h" |
0e5ca0d1 | 60 | #include "amdgpu_psp.h" |
97b2e202 | 61 | #include "amdgpu_gds.h" |
56113504 | 62 | #include "amdgpu_sync.h" |
78023016 | 63 | #include "amdgpu_ring.h" |
073440d2 | 64 | #include "amdgpu_vm.h" |
cf097881 | 65 | #include "amdgpu_dpm.h" |
a8fe58ce | 66 | #include "amdgpu_acp.h" |
4df654d2 | 67 | #include "amdgpu_uvd.h" |
5e568178 | 68 | #include "amdgpu_vce.h" |
95aa13f6 | 69 | #include "amdgpu_vcn.h" |
9a189996 | 70 | #include "amdgpu_mn.h" |
770d13b1 | 71 | #include "amdgpu_gmc.h" |
448fe192 | 72 | #include "amdgpu_gfx.h" |
4562236b | 73 | #include "amdgpu_dm.h" |
ceeb50ed | 74 | #include "amdgpu_virt.h" |
3490bdb5 | 75 | #include "amdgpu_gart.h" |
75758255 | 76 | #include "amdgpu_debugfs.h" |
050d9d43 | 77 | #include "amdgpu_job.h" |
4a8c21a1 | 78 | #include "amdgpu_bo_list.h" |
c79563a3 | 79 | |
97b2e202 AD |
80 | /* |
81 | * Modules parameters. | |
82 | */ | |
83 | extern int amdgpu_modeset; | |
84 | extern int amdgpu_vram_limit; | |
218b5dcd | 85 | extern int amdgpu_vis_vram_limit; |
83e74db6 | 86 | extern int amdgpu_gart_size; |
36d38372 | 87 | extern int amdgpu_gtt_size; |
95844d20 | 88 | extern int amdgpu_moverate; |
97b2e202 AD |
89 | extern int amdgpu_benchmarking; |
90 | extern int amdgpu_testing; | |
91 | extern int amdgpu_audio; | |
92 | extern int amdgpu_disp_priority; | |
93 | extern int amdgpu_hw_i2c; | |
94 | extern int amdgpu_pcie_gen2; | |
95 | extern int amdgpu_msi; | |
96 | extern int amdgpu_lockup_timeout; | |
97 | extern int amdgpu_dpm; | |
e635ee07 | 98 | extern int amdgpu_fw_load_type; |
97b2e202 AD |
99 | extern int amdgpu_aspm; |
100 | extern int amdgpu_runtime_pm; | |
0b693f0b | 101 | extern uint amdgpu_ip_block_mask; |
97b2e202 AD |
102 | extern int amdgpu_bapm; |
103 | extern int amdgpu_deep_color; | |
104 | extern int amdgpu_vm_size; | |
105 | extern int amdgpu_vm_block_size; | |
d07f14be | 106 | extern int amdgpu_vm_fragment_size; |
d9c13156 | 107 | extern int amdgpu_vm_fault_stop; |
b495bd3a | 108 | extern int amdgpu_vm_debug; |
9a4b7d4c | 109 | extern int amdgpu_vm_update_mode; |
4562236b | 110 | extern int amdgpu_dc; |
1333f723 | 111 | extern int amdgpu_sched_jobs; |
4afcb303 | 112 | extern int amdgpu_sched_hw_submission; |
0b693f0b RZ |
113 | extern uint amdgpu_pcie_gen_cap; |
114 | extern uint amdgpu_pcie_lane_cap; | |
115 | extern uint amdgpu_cg_mask; | |
116 | extern uint amdgpu_pg_mask; | |
117 | extern uint amdgpu_sdma_phase_quantum; | |
6f8941a2 | 118 | extern char *amdgpu_disable_cu; |
9accf2fd | 119 | extern char *amdgpu_virtual_display; |
0b693f0b | 120 | extern uint amdgpu_pp_feature_mask; |
6a7f76e7 | 121 | extern int amdgpu_vram_page_split; |
bce23e00 AD |
122 | extern int amdgpu_ngg; |
123 | extern int amdgpu_prim_buf_per_se; | |
124 | extern int amdgpu_pos_buf_per_se; | |
125 | extern int amdgpu_cntl_sb_buf_per_se; | |
126 | extern int amdgpu_param_buf_per_se; | |
65781c78 | 127 | extern int amdgpu_job_hang_limit; |
e8835e0e | 128 | extern int amdgpu_lbpw; |
4a75aefe | 129 | extern int amdgpu_compute_multipipe; |
dcebf026 | 130 | extern int amdgpu_gpu_recovery; |
bfca0289 | 131 | extern int amdgpu_emu_mode; |
7951e376 | 132 | extern uint amdgpu_smu_memory_pool_size; |
97b2e202 | 133 | |
6dd13096 FK |
134 | #ifdef CONFIG_DRM_AMDGPU_SI |
135 | extern int amdgpu_si_support; | |
136 | #endif | |
7df28986 FK |
137 | #ifdef CONFIG_DRM_AMDGPU_CIK |
138 | extern int amdgpu_cik_support; | |
139 | #endif | |
97b2e202 | 140 | |
6c8d74ca | 141 | #define AMDGPU_SG_THRESHOLD (256*1024*1024) |
55ed8caf | 142 | #define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ |
4b559c90 | 143 | #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 |
97b2e202 AD |
144 | #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ |
145 | #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) | |
146 | /* AMDGPU_IB_POOL_SIZE must be a power of 2 */ | |
147 | #define AMDGPU_IB_POOL_SIZE 16 | |
148 | #define AMDGPU_DEBUGFS_MAX_COMPONENTS 32 | |
149 | #define AMDGPUFB_CONN_LIMIT 4 | |
a5bde2f9 | 150 | #define AMDGPU_BIOS_NUM_SCRATCH 16 |
97b2e202 | 151 | |
36f523a7 JZ |
152 | /* max number of IP instances */ |
153 | #define AMDGPU_MAX_SDMA_INSTANCES 2 | |
154 | ||
97b2e202 AD |
155 | /* hard reset data */ |
156 | #define AMDGPU_ASIC_RESET_DATA 0x39d5e86b | |
157 | ||
158 | /* reset flags */ | |
159 | #define AMDGPU_RESET_GFX (1 << 0) | |
160 | #define AMDGPU_RESET_COMPUTE (1 << 1) | |
161 | #define AMDGPU_RESET_DMA (1 << 2) | |
162 | #define AMDGPU_RESET_CP (1 << 3) | |
163 | #define AMDGPU_RESET_GRBM (1 << 4) | |
164 | #define AMDGPU_RESET_DMA1 (1 << 5) | |
165 | #define AMDGPU_RESET_RLC (1 << 6) | |
166 | #define AMDGPU_RESET_SEM (1 << 7) | |
167 | #define AMDGPU_RESET_IH (1 << 8) | |
168 | #define AMDGPU_RESET_VMC (1 << 9) | |
169 | #define AMDGPU_RESET_MC (1 << 10) | |
170 | #define AMDGPU_RESET_DISPLAY (1 << 11) | |
171 | #define AMDGPU_RESET_UVD (1 << 12) | |
172 | #define AMDGPU_RESET_VCE (1 << 13) | |
173 | #define AMDGPU_RESET_VCE1 (1 << 14) | |
174 | ||
97b2e202 AD |
175 | /* max cursor sizes (in pixels) */ |
176 | #define CIK_CURSOR_WIDTH 128 | |
177 | #define CIK_CURSOR_HEIGHT 128 | |
178 | ||
179 | struct amdgpu_device; | |
97b2e202 | 180 | struct amdgpu_ib; |
97b2e202 | 181 | struct amdgpu_cs_parser; |
bb977d37 | 182 | struct amdgpu_job; |
97b2e202 | 183 | struct amdgpu_irq_src; |
0b492a4c | 184 | struct amdgpu_fpriv; |
9cca0b8e | 185 | struct amdgpu_bo_va_mapping; |
102c16a0 | 186 | struct amdgpu_atif; |
97b2e202 AD |
187 | |
188 | enum amdgpu_cp_irq { | |
189 | AMDGPU_CP_IRQ_GFX_EOP = 0, | |
190 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP, | |
191 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP, | |
192 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP, | |
193 | AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP, | |
194 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP, | |
195 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP, | |
196 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP, | |
197 | AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP, | |
198 | ||
199 | AMDGPU_CP_IRQ_LAST | |
200 | }; | |
201 | ||
202 | enum amdgpu_sdma_irq { | |
203 | AMDGPU_SDMA_IRQ_TRAP0 = 0, | |
204 | AMDGPU_SDMA_IRQ_TRAP1, | |
205 | ||
206 | AMDGPU_SDMA_IRQ_LAST | |
207 | }; | |
208 | ||
209 | enum amdgpu_thermal_irq { | |
210 | AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0, | |
211 | AMDGPU_THERMAL_IRQ_HIGH_TO_LOW, | |
212 | ||
213 | AMDGPU_THERMAL_IRQ_LAST | |
214 | }; | |
215 | ||
4e638ae9 XY |
216 | enum amdgpu_kiq_irq { |
217 | AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, | |
218 | AMDGPU_CP_KIQ_IRQ_LAST | |
219 | }; | |
220 | ||
43fa561f | 221 | int amdgpu_device_ip_set_clockgating_state(void *dev, |
2990a1fc AD |
222 | enum amd_ip_block_type block_type, |
223 | enum amd_clockgating_state state); | |
43fa561f | 224 | int amdgpu_device_ip_set_powergating_state(void *dev, |
2990a1fc AD |
225 | enum amd_ip_block_type block_type, |
226 | enum amd_powergating_state state); | |
227 | void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, | |
228 | u32 *flags); | |
229 | int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, | |
230 | enum amd_ip_block_type block_type); | |
231 | bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, | |
232 | enum amd_ip_block_type block_type); | |
97b2e202 | 233 | |
a1255107 AD |
234 | #define AMDGPU_MAX_IP_NUM 16 |
235 | ||
236 | struct amdgpu_ip_block_status { | |
237 | bool valid; | |
238 | bool sw; | |
239 | bool hw; | |
240 | bool late_initialized; | |
241 | bool hang; | |
242 | }; | |
243 | ||
97b2e202 | 244 | struct amdgpu_ip_block_version { |
a1255107 AD |
245 | const enum amd_ip_block_type type; |
246 | const u32 major; | |
247 | const u32 minor; | |
248 | const u32 rev; | |
5fc3aeeb | 249 | const struct amd_ip_funcs *funcs; |
97b2e202 AD |
250 | }; |
251 | ||
a1255107 AD |
252 | struct amdgpu_ip_block { |
253 | struct amdgpu_ip_block_status status; | |
254 | const struct amdgpu_ip_block_version *version; | |
255 | }; | |
256 | ||
2990a1fc AD |
257 | int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, |
258 | enum amd_ip_block_type type, | |
259 | u32 major, u32 minor); | |
97b2e202 | 260 | |
2990a1fc AD |
261 | struct amdgpu_ip_block * |
262 | amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev, | |
263 | enum amd_ip_block_type type); | |
a1255107 | 264 | |
2990a1fc AD |
265 | int amdgpu_device_ip_block_add(struct amdgpu_device *adev, |
266 | const struct amdgpu_ip_block_version *ip_block_version); | |
97b2e202 AD |
267 | |
268 | /* provided by hw blocks that can move/clear data. e.g., gfx or sdma */ | |
269 | struct amdgpu_buffer_funcs { | |
270 | /* maximum bytes in a single operation */ | |
271 | uint32_t copy_max_bytes; | |
272 | ||
273 | /* number of dw to reserve per operation */ | |
274 | unsigned copy_num_dw; | |
275 | ||
276 | /* used for buffer migration */ | |
c7ae72c0 | 277 | void (*emit_copy_buffer)(struct amdgpu_ib *ib, |
97b2e202 AD |
278 | /* src addr in bytes */ |
279 | uint64_t src_offset, | |
280 | /* dst addr in bytes */ | |
281 | uint64_t dst_offset, | |
282 | /* number of byte to transfer */ | |
283 | uint32_t byte_count); | |
284 | ||
285 | /* maximum bytes in a single operation */ | |
286 | uint32_t fill_max_bytes; | |
287 | ||
288 | /* number of dw to reserve per operation */ | |
289 | unsigned fill_num_dw; | |
290 | ||
291 | /* used for buffer clearing */ | |
6e7a3840 | 292 | void (*emit_fill_buffer)(struct amdgpu_ib *ib, |
97b2e202 AD |
293 | /* value to write to memory */ |
294 | uint32_t src_data, | |
295 | /* dst addr in bytes */ | |
296 | uint64_t dst_offset, | |
297 | /* number of byte to fill */ | |
298 | uint32_t byte_count); | |
299 | }; | |
300 | ||
301 | /* provided by hw blocks that can write ptes, e.g., sdma */ | |
302 | struct amdgpu_vm_pte_funcs { | |
e6d92197 YZ |
303 | /* number of dw to reserve per operation */ |
304 | unsigned copy_pte_num_dw; | |
305 | ||
97b2e202 AD |
306 | /* copy pte entries from GART */ |
307 | void (*copy_pte)(struct amdgpu_ib *ib, | |
308 | uint64_t pe, uint64_t src, | |
309 | unsigned count); | |
e6d92197 | 310 | |
97b2e202 | 311 | /* write pte one entry at a time with addr mapping */ |
de9ea7bd CK |
312 | void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, |
313 | uint64_t value, unsigned count, | |
314 | uint32_t incr); | |
97b2e202 AD |
315 | /* for linear pte/pde updates without addr mapping */ |
316 | void (*set_pte_pde)(struct amdgpu_ib *ib, | |
317 | uint64_t pe, | |
318 | uint64_t addr, unsigned count, | |
6b777607 | 319 | uint32_t incr, uint64_t flags); |
97b2e202 AD |
320 | }; |
321 | ||
97b2e202 AD |
322 | /* |
323 | * BIOS. | |
324 | */ | |
325 | bool amdgpu_get_bios(struct amdgpu_device *adev); | |
326 | bool amdgpu_read_bios(struct amdgpu_device *adev); | |
327 | ||
97b2e202 AD |
328 | /* |
329 | * Clocks | |
330 | */ | |
331 | ||
332 | #define AMDGPU_MAX_PPLL 3 | |
333 | ||
334 | struct amdgpu_clock { | |
335 | struct amdgpu_pll ppll[AMDGPU_MAX_PPLL]; | |
336 | struct amdgpu_pll spll; | |
337 | struct amdgpu_pll mpll; | |
338 | /* 10 Khz units */ | |
339 | uint32_t default_mclk; | |
340 | uint32_t default_sclk; | |
341 | uint32_t default_dispclk; | |
342 | uint32_t current_dispclk; | |
343 | uint32_t dp_extclk; | |
344 | uint32_t max_pixel_clock; | |
345 | }; | |
346 | ||
97b2e202 | 347 | /* |
9124a398 | 348 | * GEM. |
97b2e202 | 349 | */ |
97b2e202 | 350 | |
7e5a547f | 351 | #define AMDGPU_GEM_DOMAIN_MAX 0x3 |
97b2e202 AD |
352 | #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base) |
353 | ||
354 | void amdgpu_gem_object_free(struct drm_gem_object *obj); | |
355 | int amdgpu_gem_object_open(struct drm_gem_object *obj, | |
356 | struct drm_file *file_priv); | |
357 | void amdgpu_gem_object_close(struct drm_gem_object *obj, | |
358 | struct drm_file *file_priv); | |
359 | unsigned long amdgpu_gem_timeout(uint64_t timeout_ns); | |
360 | struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); | |
4d9c514d CK |
361 | struct drm_gem_object * |
362 | amdgpu_gem_prime_import_sg_table(struct drm_device *dev, | |
363 | struct dma_buf_attachment *attach, | |
364 | struct sg_table *sg); | |
97b2e202 AD |
365 | struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, |
366 | struct drm_gem_object *gobj, | |
367 | int flags); | |
09052fc3 SL |
368 | struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, |
369 | struct dma_buf *dma_buf); | |
97b2e202 AD |
370 | struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *); |
371 | void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj); | |
372 | void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); | |
dfced2e4 | 373 | int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); |
97b2e202 AD |
374 | |
375 | /* sub-allocation manager, it has to be protected by another lock. | |
376 | * By conception this is an helper for other part of the driver | |
377 | * like the indirect buffer or semaphore, which both have their | |
378 | * locking. | |
379 | * | |
380 | * Principe is simple, we keep a list of sub allocation in offset | |
381 | * order (first entry has offset == 0, last entry has the highest | |
382 | * offset). | |
383 | * | |
384 | * When allocating new object we first check if there is room at | |
385 | * the end total_size - (last_object_offset + last_object_size) >= | |
386 | * alloc_size. If so we allocate new object there. | |
387 | * | |
388 | * When there is not enough room at the end, we start waiting for | |
389 | * each sub object until we reach object_offset+object_size >= | |
390 | * alloc_size, this object then become the sub object we return. | |
391 | * | |
392 | * Alignment can't be bigger than page size. | |
393 | * | |
394 | * Hole are not considered for allocation to keep things simple. | |
395 | * Assumption is that there won't be hole (all object on same | |
396 | * alignment). | |
397 | */ | |
6ba60b89 CK |
398 | |
399 | #define AMDGPU_SA_NUM_FENCE_LISTS 32 | |
400 | ||
97b2e202 AD |
401 | struct amdgpu_sa_manager { |
402 | wait_queue_head_t wq; | |
403 | struct amdgpu_bo *bo; | |
404 | struct list_head *hole; | |
6ba60b89 | 405 | struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS]; |
97b2e202 AD |
406 | struct list_head olist; |
407 | unsigned size; | |
408 | uint64_t gpu_addr; | |
409 | void *cpu_ptr; | |
410 | uint32_t domain; | |
411 | uint32_t align; | |
412 | }; | |
413 | ||
97b2e202 AD |
414 | /* sub-allocation buffer */ |
415 | struct amdgpu_sa_bo { | |
416 | struct list_head olist; | |
417 | struct list_head flist; | |
418 | struct amdgpu_sa_manager *manager; | |
419 | unsigned soffset; | |
420 | unsigned eoffset; | |
f54d1867 | 421 | struct dma_fence *fence; |
97b2e202 AD |
422 | }; |
423 | ||
424 | /* | |
425 | * GEM objects. | |
426 | */ | |
418aa0c2 | 427 | void amdgpu_gem_force_release(struct amdgpu_device *adev); |
97b2e202 | 428 | int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, |
e1eb899b | 429 | int alignment, u32 initial_domain, |
eab3de23 | 430 | u64 flags, enum ttm_bo_type type, |
e1eb899b CK |
431 | struct reservation_object *resv, |
432 | struct drm_gem_object **obj); | |
97b2e202 AD |
433 | |
434 | int amdgpu_mode_dumb_create(struct drm_file *file_priv, | |
435 | struct drm_device *dev, | |
436 | struct drm_mode_create_dumb *args); | |
437 | int amdgpu_mode_dumb_mmap(struct drm_file *filp, | |
438 | struct drm_device *dev, | |
439 | uint32_t handle, uint64_t *offset_p); | |
d573de2d RZ |
440 | int amdgpu_fence_slab_init(void); |
441 | void amdgpu_fence_slab_fini(void); | |
97b2e202 | 442 | |
97b2e202 AD |
443 | /* |
444 | * GPU doorbell structures, functions & helpers | |
445 | */ | |
446 | typedef enum _AMDGPU_DOORBELL_ASSIGNMENT | |
447 | { | |
448 | AMDGPU_DOORBELL_KIQ = 0x000, | |
449 | AMDGPU_DOORBELL_HIQ = 0x001, | |
450 | AMDGPU_DOORBELL_DIQ = 0x002, | |
451 | AMDGPU_DOORBELL_MEC_RING0 = 0x010, | |
452 | AMDGPU_DOORBELL_MEC_RING1 = 0x011, | |
453 | AMDGPU_DOORBELL_MEC_RING2 = 0x012, | |
454 | AMDGPU_DOORBELL_MEC_RING3 = 0x013, | |
455 | AMDGPU_DOORBELL_MEC_RING4 = 0x014, | |
456 | AMDGPU_DOORBELL_MEC_RING5 = 0x015, | |
457 | AMDGPU_DOORBELL_MEC_RING6 = 0x016, | |
458 | AMDGPU_DOORBELL_MEC_RING7 = 0x017, | |
459 | AMDGPU_DOORBELL_GFX_RING0 = 0x020, | |
460 | AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0, | |
461 | AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1, | |
462 | AMDGPU_DOORBELL_IH = 0x1E8, | |
463 | AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF, | |
464 | AMDGPU_DOORBELL_INVALID = 0xFFFF | |
465 | } AMDGPU_DOORBELL_ASSIGNMENT; | |
466 | ||
467 | struct amdgpu_doorbell { | |
468 | /* doorbell mmio */ | |
469 | resource_size_t base; | |
470 | resource_size_t size; | |
471 | u32 __iomem *ptr; | |
472 | u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */ | |
473 | }; | |
474 | ||
39807b93 KW |
475 | /* |
476 | * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space | |
477 | */ | |
478 | typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT | |
479 | { | |
480 | /* | |
481 | * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in | |
482 | * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range. | |
483 | * Compute related doorbells are allocated from 0x00 to 0x8a | |
484 | */ | |
485 | ||
486 | ||
487 | /* kernel scheduling */ | |
488 | AMDGPU_DOORBELL64_KIQ = 0x00, | |
489 | ||
490 | /* HSA interface queue and debug queue */ | |
491 | AMDGPU_DOORBELL64_HIQ = 0x01, | |
492 | AMDGPU_DOORBELL64_DIQ = 0x02, | |
493 | ||
494 | /* Compute engines */ | |
495 | AMDGPU_DOORBELL64_MEC_RING0 = 0x03, | |
496 | AMDGPU_DOORBELL64_MEC_RING1 = 0x04, | |
497 | AMDGPU_DOORBELL64_MEC_RING2 = 0x05, | |
498 | AMDGPU_DOORBELL64_MEC_RING3 = 0x06, | |
499 | AMDGPU_DOORBELL64_MEC_RING4 = 0x07, | |
500 | AMDGPU_DOORBELL64_MEC_RING5 = 0x08, | |
501 | AMDGPU_DOORBELL64_MEC_RING6 = 0x09, | |
502 | AMDGPU_DOORBELL64_MEC_RING7 = 0x0a, | |
503 | ||
504 | /* User queue doorbell range (128 doorbells) */ | |
505 | AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b, | |
506 | AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a, | |
507 | ||
508 | /* Graphics engine */ | |
509 | AMDGPU_DOORBELL64_GFX_RING0 = 0x8b, | |
510 | ||
511 | /* | |
512 | * Other graphics doorbells can be allocated here: from 0x8c to 0xef | |
513 | * Graphics voltage island aperture 1 | |
514 | * default non-graphics QWORD index is 0xF0 - 0xFF inclusive | |
515 | */ | |
516 | ||
517 | /* sDMA engines */ | |
518 | AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0, | |
519 | AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1, | |
520 | AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2, | |
521 | AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3, | |
522 | ||
523 | /* Interrupt handler */ | |
524 | AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */ | |
525 | AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */ | |
526 | AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */ | |
527 | ||
e6b3ecb4 ML |
528 | /* VCN engine use 32 bits doorbell */ |
529 | AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */ | |
530 | AMDGPU_DOORBELL64_VCN2_3 = 0xF9, | |
531 | AMDGPU_DOORBELL64_VCN4_5 = 0xFA, | |
532 | AMDGPU_DOORBELL64_VCN6_7 = 0xFB, | |
533 | ||
534 | /* overlap the doorbell assignment with VCN as they are mutually exclusive | |
535 | * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD | |
536 | */ | |
4ed11d79 FM |
537 | AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8, |
538 | AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9, | |
539 | AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA, | |
540 | AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB, | |
541 | ||
542 | AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC, | |
543 | AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD, | |
544 | AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE, | |
545 | AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF, | |
39807b93 KW |
546 | |
547 | AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF, | |
548 | AMDGPU_DOORBELL64_INVALID = 0xFFFF | |
549 | } AMDGPU_DOORBELL64_ASSIGNMENT; | |
550 | ||
97b2e202 AD |
551 | /* |
552 | * IRQS. | |
553 | */ | |
554 | ||
555 | struct amdgpu_flip_work { | |
325cbba1 | 556 | struct delayed_work flip_work; |
97b2e202 AD |
557 | struct work_struct unpin_work; |
558 | struct amdgpu_device *adev; | |
559 | int crtc_id; | |
325cbba1 | 560 | u32 target_vblank; |
97b2e202 AD |
561 | uint64_t base; |
562 | struct drm_pending_vblank_event *event; | |
765e7fbf | 563 | struct amdgpu_bo *old_abo; |
f54d1867 | 564 | struct dma_fence *excl; |
1ffd2652 | 565 | unsigned shared_count; |
f54d1867 CW |
566 | struct dma_fence **shared; |
567 | struct dma_fence_cb cb; | |
cb9e59d7 | 568 | bool async; |
97b2e202 AD |
569 | }; |
570 | ||
571 | ||
572 | /* | |
573 | * CP & rings. | |
574 | */ | |
575 | ||
576 | struct amdgpu_ib { | |
577 | struct amdgpu_sa_bo *sa_bo; | |
578 | uint32_t length_dw; | |
579 | uint64_t gpu_addr; | |
580 | uint32_t *ptr; | |
de807f81 | 581 | uint32_t flags; |
97b2e202 AD |
582 | }; |
583 | ||
1b1f42d8 | 584 | extern const struct drm_sched_backend_ops amdgpu_sched_ops; |
c1b69ed0 | 585 | |
effd924d AR |
586 | /* |
587 | * Queue manager | |
588 | */ | |
589 | struct amdgpu_queue_mapper { | |
590 | int hw_ip; | |
591 | struct mutex lock; | |
592 | /* protected by lock */ | |
593 | struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS]; | |
594 | }; | |
595 | ||
596 | struct amdgpu_queue_mgr { | |
597 | struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM]; | |
598 | }; | |
599 | ||
600 | int amdgpu_queue_mgr_init(struct amdgpu_device *adev, | |
601 | struct amdgpu_queue_mgr *mgr); | |
602 | int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, | |
603 | struct amdgpu_queue_mgr *mgr); | |
604 | int amdgpu_queue_mgr_map(struct amdgpu_device *adev, | |
605 | struct amdgpu_queue_mgr *mgr, | |
fa7c7939 | 606 | u32 hw_ip, u32 instance, u32 ring, |
effd924d AR |
607 | struct amdgpu_ring **out_ring); |
608 | ||
97b2e202 AD |
609 | /* |
610 | * context related structures | |
611 | */ | |
612 | ||
21c16bf6 | 613 | struct amdgpu_ctx_ring { |
91404fb2 | 614 | uint64_t sequence; |
f54d1867 | 615 | struct dma_fence **fences; |
1b1f42d8 | 616 | struct drm_sched_entity entity; |
21c16bf6 CK |
617 | }; |
618 | ||
97b2e202 | 619 | struct amdgpu_ctx { |
0b492a4c | 620 | struct kref refcount; |
9cb7e5a9 | 621 | struct amdgpu_device *adev; |
effd924d | 622 | struct amdgpu_queue_mgr queue_mgr; |
0b492a4c | 623 | unsigned reset_counter; |
668ca1b4 | 624 | unsigned reset_counter_query; |
e55f2b64 | 625 | uint32_t vram_lost_counter; |
21c16bf6 | 626 | spinlock_t ring_lock; |
f54d1867 | 627 | struct dma_fence **fences; |
21c16bf6 | 628 | struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS]; |
e55f2b64 | 629 | bool preamble_presented; |
1b1f42d8 LS |
630 | enum drm_sched_priority init_priority; |
631 | enum drm_sched_priority override_priority; | |
0ae94444 | 632 | struct mutex lock; |
1102900d | 633 | atomic_t guilty; |
97b2e202 AD |
634 | }; |
635 | ||
636 | struct amdgpu_ctx_mgr { | |
0b492a4c AD |
637 | struct amdgpu_device *adev; |
638 | struct mutex lock; | |
639 | /* protected by lock */ | |
640 | struct idr ctx_handles; | |
97b2e202 AD |
641 | }; |
642 | ||
0b492a4c AD |
643 | struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id); |
644 | int amdgpu_ctx_put(struct amdgpu_ctx *ctx); | |
645 | ||
eb01abc7 ML |
646 | int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, |
647 | struct dma_fence *fence, uint64_t *seq); | |
f54d1867 | 648 | struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, |
21c16bf6 | 649 | struct amdgpu_ring *ring, uint64_t seq); |
c23be4ae | 650 | void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, |
1b1f42d8 | 651 | enum drm_sched_priority priority); |
21c16bf6 | 652 | |
0b492a4c AD |
653 | int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, |
654 | struct drm_file *filp); | |
655 | ||
0ae94444 AG |
656 | int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id); |
657 | ||
efd4ccb5 | 658 | void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr); |
8ee3a52e | 659 | void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr); |
c49d8280 | 660 | void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr); |
efd4ccb5 | 661 | void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); |
0b492a4c | 662 | |
0ae94444 | 663 | |
97b2e202 AD |
664 | /* |
665 | * file private structure | |
666 | */ | |
667 | ||
668 | struct amdgpu_fpriv { | |
669 | struct amdgpu_vm vm; | |
b85891bd | 670 | struct amdgpu_bo_va *prt_va; |
0f4b3c68 | 671 | struct amdgpu_bo_va *csa_va; |
97b2e202 AD |
672 | struct mutex bo_list_lock; |
673 | struct idr bo_list_handles; | |
0b492a4c | 674 | struct amdgpu_ctx_mgr ctx_mgr; |
97b2e202 AD |
675 | }; |
676 | ||
b07c60c0 | 677 | int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
97b2e202 | 678 | unsigned size, struct amdgpu_ib *ib); |
4d9c514d | 679 | void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, |
f54d1867 | 680 | struct dma_fence *f); |
b07c60c0 | 681 | int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, |
50ddc75e JZ |
682 | struct amdgpu_ib *ibs, struct amdgpu_job *job, |
683 | struct dma_fence **f); | |
97b2e202 AD |
684 | int amdgpu_ib_pool_init(struct amdgpu_device *adev); |
685 | void amdgpu_ib_pool_fini(struct amdgpu_device *adev); | |
686 | int amdgpu_ib_ring_tests(struct amdgpu_device *adev); | |
97b2e202 AD |
687 | |
688 | /* | |
689 | * CS. | |
690 | */ | |
691 | struct amdgpu_cs_chunk { | |
692 | uint32_t chunk_id; | |
693 | uint32_t length_dw; | |
758ac17f | 694 | void *kdata; |
97b2e202 AD |
695 | }; |
696 | ||
697 | struct amdgpu_cs_parser { | |
698 | struct amdgpu_device *adev; | |
699 | struct drm_file *filp; | |
3cb485f3 | 700 | struct amdgpu_ctx *ctx; |
c3cca41e | 701 | |
97b2e202 AD |
702 | /* chunks */ |
703 | unsigned nchunks; | |
704 | struct amdgpu_cs_chunk *chunks; | |
97b2e202 | 705 | |
50838c8c CK |
706 | /* scheduler job object */ |
707 | struct amdgpu_job *job; | |
3320b8d2 | 708 | struct amdgpu_ring *ring; |
97b2e202 | 709 | |
c3cca41e CK |
710 | /* buffer objects */ |
711 | struct ww_acquire_ctx ticket; | |
712 | struct amdgpu_bo_list *bo_list; | |
3fe89771 | 713 | struct amdgpu_mn *mn; |
c3cca41e CK |
714 | struct amdgpu_bo_list_entry vm_pd; |
715 | struct list_head validated; | |
f54d1867 | 716 | struct dma_fence *fence; |
c3cca41e | 717 | uint64_t bytes_moved_threshold; |
00f06b24 | 718 | uint64_t bytes_moved_vis_threshold; |
c3cca41e | 719 | uint64_t bytes_moved; |
00f06b24 | 720 | uint64_t bytes_moved_vis; |
662bfa61 | 721 | struct amdgpu_bo_list_entry *evictable; |
97b2e202 AD |
722 | |
723 | /* user fence */ | |
91acbeb6 | 724 | struct amdgpu_bo_list_entry uf_entry; |
660e8558 DA |
725 | |
726 | unsigned num_post_dep_syncobjs; | |
727 | struct drm_syncobj **post_dep_syncobjs; | |
97b2e202 AD |
728 | }; |
729 | ||
7270f839 CK |
730 | static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, |
731 | uint32_t ib_idx, int idx) | |
97b2e202 | 732 | { |
50838c8c | 733 | return p->job->ibs[ib_idx].ptr[idx]; |
97b2e202 AD |
734 | } |
735 | ||
7270f839 CK |
736 | static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, |
737 | uint32_t ib_idx, int idx, | |
738 | uint32_t value) | |
739 | { | |
50838c8c | 740 | p->job->ibs[ib_idx].ptr[idx] = value; |
7270f839 CK |
741 | } |
742 | ||
97b2e202 AD |
743 | /* |
744 | * Writeback | |
745 | */ | |
73469585 | 746 | #define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */ |
97b2e202 AD |
747 | |
748 | struct amdgpu_wb { | |
749 | struct amdgpu_bo *wb_obj; | |
750 | volatile uint32_t *wb; | |
751 | uint64_t gpu_addr; | |
752 | u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ | |
753 | unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; | |
754 | }; | |
755 | ||
131b4b36 AD |
756 | int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); |
757 | void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); | |
97b2e202 | 758 | |
97b2e202 AD |
759 | /* |
760 | * SDMA | |
761 | */ | |
c113ea1c | 762 | struct amdgpu_sdma_instance { |
97b2e202 AD |
763 | /* SDMA firmware */ |
764 | const struct firmware *fw; | |
765 | uint32_t fw_version; | |
cfa2104f | 766 | uint32_t feature_version; |
97b2e202 AD |
767 | |
768 | struct amdgpu_ring ring; | |
18111de0 | 769 | bool burst_nop; |
97b2e202 AD |
770 | }; |
771 | ||
c113ea1c AD |
772 | struct amdgpu_sdma { |
773 | struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; | |
30d1574f KW |
774 | #ifdef CONFIG_DRM_AMDGPU_SI |
775 | //SI DMA has a difference trap irq number for the second engine | |
776 | struct amdgpu_irq_src trap_irq_1; | |
777 | #endif | |
c113ea1c AD |
778 | struct amdgpu_irq_src trap_irq; |
779 | struct amdgpu_irq_src illegal_inst_irq; | |
edf600da | 780 | int num_instances; |
e702a680 | 781 | uint32_t srbm_soft_reset; |
c113ea1c AD |
782 | }; |
783 | ||
97b2e202 AD |
784 | /* |
785 | * Firmware | |
786 | */ | |
e635ee07 HR |
787 | enum amdgpu_firmware_load_type { |
788 | AMDGPU_FW_LOAD_DIRECT = 0, | |
789 | AMDGPU_FW_LOAD_SMU, | |
790 | AMDGPU_FW_LOAD_PSP, | |
791 | }; | |
792 | ||
97b2e202 AD |
793 | struct amdgpu_firmware { |
794 | struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM]; | |
e635ee07 | 795 | enum amdgpu_firmware_load_type load_type; |
97b2e202 AD |
796 | struct amdgpu_bo *fw_buf; |
797 | unsigned int fw_size; | |
2445b227 | 798 | unsigned int max_ucodes; |
0e5ca0d1 HR |
799 | /* firmwares are loaded by psp instead of smu from vega10 */ |
800 | const struct amdgpu_psp_funcs *funcs; | |
801 | struct amdgpu_bo *rbuf; | |
802 | struct mutex mutex; | |
ab4fe3e1 HR |
803 | |
804 | /* gpu info firmware data pointer */ | |
805 | const struct firmware *gpu_info_fw; | |
d59c026b ML |
806 | |
807 | void *fw_buf_ptr; | |
808 | uint64_t fw_buf_mc; | |
97b2e202 AD |
809 | }; |
810 | ||
811 | /* | |
812 | * Benchmarking | |
813 | */ | |
814 | void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); | |
815 | ||
816 | ||
817 | /* | |
818 | * Testing | |
819 | */ | |
820 | void amdgpu_test_moves(struct amdgpu_device *adev); | |
97b2e202 | 821 | |
50ab2533 | 822 | |
97b2e202 AD |
823 | /* |
824 | * amdgpu smumgr functions | |
825 | */ | |
826 | struct amdgpu_smumgr_funcs { | |
827 | int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype); | |
828 | int (*request_smu_load_fw)(struct amdgpu_device *adev); | |
829 | int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype); | |
830 | }; | |
831 | ||
832 | /* | |
833 | * amdgpu smumgr | |
834 | */ | |
835 | struct amdgpu_smumgr { | |
836 | struct amdgpu_bo *toc_buf; | |
837 | struct amdgpu_bo *smu_buf; | |
838 | /* asic priv smu data */ | |
839 | void *priv; | |
840 | spinlock_t smu_lock; | |
841 | /* smumgr functions */ | |
842 | const struct amdgpu_smumgr_funcs *smumgr_funcs; | |
843 | /* ucode loading complete flag */ | |
844 | uint32_t fw_flags; | |
845 | }; | |
846 | ||
847 | /* | |
848 | * ASIC specific register table accessible by UMD | |
849 | */ | |
850 | struct amdgpu_allowed_register_entry { | |
851 | uint32_t reg_offset; | |
97b2e202 AD |
852 | bool grbm_indexed; |
853 | }; | |
854 | ||
97b2e202 AD |
855 | /* |
856 | * ASIC specific functions. | |
857 | */ | |
858 | struct amdgpu_asic_funcs { | |
859 | bool (*read_disabled_bios)(struct amdgpu_device *adev); | |
7946b878 AD |
860 | bool (*read_bios_from_rom)(struct amdgpu_device *adev, |
861 | u8 *bios, u32 length_bytes); | |
97b2e202 AD |
862 | int (*read_register)(struct amdgpu_device *adev, u32 se_num, |
863 | u32 sh_num, u32 reg_offset, u32 *value); | |
864 | void (*set_vga_state)(struct amdgpu_device *adev, bool state); | |
865 | int (*reset)(struct amdgpu_device *adev); | |
97b2e202 AD |
866 | /* get the reference clock */ |
867 | u32 (*get_xclk)(struct amdgpu_device *adev); | |
97b2e202 AD |
868 | /* MM block clocks */ |
869 | int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); | |
870 | int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); | |
841686df MB |
871 | /* static power management */ |
872 | int (*get_pcie_lanes)(struct amdgpu_device *adev); | |
873 | void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes); | |
bbf282d8 AD |
874 | /* get config memsize register */ |
875 | u32 (*get_config_memsize)(struct amdgpu_device *adev); | |
2df1b8b6 | 876 | /* flush hdp write queue */ |
69882565 | 877 | void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring); |
2df1b8b6 | 878 | /* invalidate hdp read cache */ |
69882565 CK |
879 | void (*invalidate_hdp)(struct amdgpu_device *adev, |
880 | struct amdgpu_ring *ring); | |
69070690 AD |
881 | /* check if the asic needs a full reset of if soft reset will work */ |
882 | bool (*need_full_reset)(struct amdgpu_device *adev); | |
97b2e202 AD |
883 | }; |
884 | ||
885 | /* | |
886 | * IOCTL. | |
887 | */ | |
888 | int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, | |
889 | struct drm_file *filp); | |
890 | int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data, | |
891 | struct drm_file *filp); | |
892 | ||
893 | int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data, | |
894 | struct drm_file *filp); | |
895 | int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, | |
896 | struct drm_file *filp); | |
897 | int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, | |
898 | struct drm_file *filp); | |
899 | int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, | |
900 | struct drm_file *filp); | |
901 | int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, | |
902 | struct drm_file *filp); | |
903 | int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, | |
904 | struct drm_file *filp); | |
905 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); | |
7ca24cf2 MO |
906 | int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, |
907 | struct drm_file *filp); | |
97b2e202 | 908 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
eef18a82 JZ |
909 | int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, |
910 | struct drm_file *filp); | |
97b2e202 AD |
911 | |
912 | int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, | |
913 | struct drm_file *filp); | |
914 | ||
915 | /* VRAM scratch page for HDP bug, default vram page */ | |
916 | struct amdgpu_vram_scratch { | |
917 | struct amdgpu_bo *robj; | |
918 | volatile uint32_t *ptr; | |
919 | u64 gpu_addr; | |
920 | }; | |
921 | ||
922 | /* | |
923 | * ACPI | |
924 | */ | |
97b2e202 AD |
925 | struct amdgpu_atcs_functions { |
926 | bool get_ext_state; | |
927 | bool pcie_perf_req; | |
928 | bool pcie_dev_rdy; | |
929 | bool pcie_bus_width; | |
930 | }; | |
931 | ||
932 | struct amdgpu_atcs { | |
933 | struct amdgpu_atcs_functions functions; | |
934 | }; | |
935 | ||
a05502e5 HC |
936 | /* |
937 | * Firmware VRAM reservation | |
938 | */ | |
939 | struct amdgpu_fw_vram_usage { | |
940 | u64 start_offset; | |
941 | u64 size; | |
942 | struct amdgpu_bo *reserved_bo; | |
943 | void *va; | |
944 | }; | |
945 | ||
d03846af CZ |
946 | /* |
947 | * CGS | |
948 | */ | |
110e6f26 DA |
949 | struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev); |
950 | void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); | |
a8fe58ce | 951 | |
97b2e202 AD |
952 | /* |
953 | * Core structure, functions and helpers. | |
954 | */ | |
955 | typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); | |
956 | typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); | |
957 | ||
958 | typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); | |
959 | typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); | |
960 | ||
946a4d5b SL |
961 | |
962 | /* | |
963 | * amdgpu nbio functions | |
964 | * | |
946a4d5b | 965 | */ |
bf383fb6 AD |
966 | struct nbio_hdp_flush_reg { |
967 | u32 ref_and_mask_cp0; | |
968 | u32 ref_and_mask_cp1; | |
969 | u32 ref_and_mask_cp2; | |
970 | u32 ref_and_mask_cp3; | |
971 | u32 ref_and_mask_cp4; | |
972 | u32 ref_and_mask_cp5; | |
973 | u32 ref_and_mask_cp6; | |
974 | u32 ref_and_mask_cp7; | |
975 | u32 ref_and_mask_cp8; | |
976 | u32 ref_and_mask_cp9; | |
977 | u32 ref_and_mask_sdma0; | |
978 | u32 ref_and_mask_sdma1; | |
979 | }; | |
946a4d5b SL |
980 | |
981 | struct amdgpu_nbio_funcs { | |
bf383fb6 AD |
982 | const struct nbio_hdp_flush_reg *hdp_flush_reg; |
983 | u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev); | |
984 | u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); | |
985 | u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); | |
986 | u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); | |
987 | u32 (*get_rev_id)(struct amdgpu_device *adev); | |
bf383fb6 | 988 | void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); |
69882565 | 989 | void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); |
bf383fb6 AD |
990 | u32 (*get_memsize)(struct amdgpu_device *adev); |
991 | void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, | |
992 | bool use_doorbell, int doorbell_index); | |
993 | void (*enable_doorbell_aperture)(struct amdgpu_device *adev, | |
994 | bool enable); | |
995 | void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, | |
996 | bool enable); | |
997 | void (*ih_doorbell_range)(struct amdgpu_device *adev, | |
998 | bool use_doorbell, int doorbell_index); | |
999 | void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, | |
1000 | bool enable); | |
1001 | void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev, | |
1002 | bool enable); | |
1003 | void (*get_clockgating_state)(struct amdgpu_device *adev, | |
1004 | u32 *flags); | |
1005 | void (*ih_control)(struct amdgpu_device *adev); | |
1006 | void (*init_registers)(struct amdgpu_device *adev); | |
1007 | void (*detect_hw_virt)(struct amdgpu_device *adev); | |
946a4d5b SL |
1008 | }; |
1009 | ||
634c96e3 HZ |
1010 | struct amdgpu_df_funcs { |
1011 | void (*init)(struct amdgpu_device *adev); | |
1012 | void (*enable_broadcast_mode)(struct amdgpu_device *adev, | |
1013 | bool enable); | |
1014 | u32 (*get_fb_channel_number)(struct amdgpu_device *adev); | |
1015 | u32 (*get_hbm_channel_number)(struct amdgpu_device *adev); | |
1016 | void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, | |
1017 | bool enable); | |
1018 | void (*get_clockgating_state)(struct amdgpu_device *adev, | |
1019 | u32 *flags); | |
8f9b2e50 AD |
1020 | void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev, |
1021 | bool enable); | |
634c96e3 | 1022 | }; |
4522824c SL |
1023 | /* Define the HW IP blocks will be used in driver , add more if necessary */ |
1024 | enum amd_hw_ip_block_type { | |
1025 | GC_HWIP = 1, | |
1026 | HDP_HWIP, | |
1027 | SDMA0_HWIP, | |
1028 | SDMA1_HWIP, | |
1029 | MMHUB_HWIP, | |
1030 | ATHUB_HWIP, | |
1031 | NBIO_HWIP, | |
1032 | MP0_HWIP, | |
e6636ae1 | 1033 | MP1_HWIP, |
4522824c SL |
1034 | UVD_HWIP, |
1035 | VCN_HWIP = UVD_HWIP, | |
1036 | VCE_HWIP, | |
1037 | DF_HWIP, | |
1038 | DCE_HWIP, | |
1039 | OSSSYS_HWIP, | |
1040 | SMUIO_HWIP, | |
1041 | PWR_HWIP, | |
1042 | NBIF_HWIP, | |
e6636ae1 | 1043 | THM_HWIP, |
73b19174 | 1044 | CLK_HWIP, |
4522824c SL |
1045 | MAX_HWIP |
1046 | }; | |
1047 | ||
1048 | #define HWIP_MAX_INSTANCE 6 | |
1049 | ||
11dc9364 | 1050 | struct amd_powerplay { |
11dc9364 | 1051 | void *pp_handle; |
11dc9364 | 1052 | const struct amd_pm_funcs *pp_funcs; |
00f54b97 | 1053 | uint32_t pp_feature; |
11dc9364 RZ |
1054 | }; |
1055 | ||
0c49e0b8 | 1056 | #define AMDGPU_RESET_MAGIC_NUM 64 |
97b2e202 AD |
1057 | struct amdgpu_device { |
1058 | struct device *dev; | |
1059 | struct drm_device *ddev; | |
1060 | struct pci_dev *pdev; | |
97b2e202 | 1061 | |
a8fe58ce MB |
1062 | #ifdef CONFIG_DRM_AMD_ACP |
1063 | struct amdgpu_acp acp; | |
1064 | #endif | |
1065 | ||
97b2e202 | 1066 | /* ASIC */ |
2f7d10b3 | 1067 | enum amd_asic_type asic_type; |
97b2e202 AD |
1068 | uint32_t family; |
1069 | uint32_t rev_id; | |
1070 | uint32_t external_rev_id; | |
1071 | unsigned long flags; | |
1072 | int usec_timeout; | |
1073 | const struct amdgpu_asic_funcs *asic_funcs; | |
1074 | bool shutdown; | |
97b2e202 | 1075 | bool need_dma32; |
fd5fd480 | 1076 | bool need_swiotlb; |
97b2e202 | 1077 | bool accel_working; |
edf600da | 1078 | struct work_struct reset_work; |
97b2e202 AD |
1079 | struct notifier_block acpi_nb; |
1080 | struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; | |
1081 | struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; | |
edf600da | 1082 | unsigned debugfs_count; |
97b2e202 | 1083 | #if defined(CONFIG_DEBUG_FS) |
adcec288 | 1084 | struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS]; |
97b2e202 | 1085 | #endif |
102c16a0 | 1086 | struct amdgpu_atif *atif; |
97b2e202 AD |
1087 | struct amdgpu_atcs atcs; |
1088 | struct mutex srbm_mutex; | |
1089 | /* GRBM index mutex. Protects concurrent access to GRBM index */ | |
1090 | struct mutex grbm_idx_mutex; | |
1091 | struct dev_pm_domain vga_pm_domain; | |
1092 | bool have_disp_power_ref; | |
1093 | ||
1094 | /* BIOS */ | |
0cdd5005 | 1095 | bool is_atom_fw; |
97b2e202 | 1096 | uint8_t *bios; |
a9f5db9c | 1097 | uint32_t bios_size; |
5af2c10d | 1098 | struct amdgpu_bo *stolen_vga_memory; |
a5bde2f9 | 1099 | uint32_t bios_scratch_reg_offset; |
97b2e202 AD |
1100 | uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; |
1101 | ||
1102 | /* Register/doorbell mmio */ | |
1103 | resource_size_t rmmio_base; | |
1104 | resource_size_t rmmio_size; | |
1105 | void __iomem *rmmio; | |
1106 | /* protects concurrent MM_INDEX/DATA based register access */ | |
1107 | spinlock_t mmio_idx_lock; | |
1108 | /* protects concurrent SMC based register access */ | |
1109 | spinlock_t smc_idx_lock; | |
1110 | amdgpu_rreg_t smc_rreg; | |
1111 | amdgpu_wreg_t smc_wreg; | |
1112 | /* protects concurrent PCIE register access */ | |
1113 | spinlock_t pcie_idx_lock; | |
1114 | amdgpu_rreg_t pcie_rreg; | |
1115 | amdgpu_wreg_t pcie_wreg; | |
36b9a952 HR |
1116 | amdgpu_rreg_t pciep_rreg; |
1117 | amdgpu_wreg_t pciep_wreg; | |
97b2e202 AD |
1118 | /* protects concurrent UVD register access */ |
1119 | spinlock_t uvd_ctx_idx_lock; | |
1120 | amdgpu_rreg_t uvd_ctx_rreg; | |
1121 | amdgpu_wreg_t uvd_ctx_wreg; | |
1122 | /* protects concurrent DIDT register access */ | |
1123 | spinlock_t didt_idx_lock; | |
1124 | amdgpu_rreg_t didt_rreg; | |
1125 | amdgpu_wreg_t didt_wreg; | |
ccdbb20a RZ |
1126 | /* protects concurrent gc_cac register access */ |
1127 | spinlock_t gc_cac_idx_lock; | |
1128 | amdgpu_rreg_t gc_cac_rreg; | |
1129 | amdgpu_wreg_t gc_cac_wreg; | |
16abb5d2 EQ |
1130 | /* protects concurrent se_cac register access */ |
1131 | spinlock_t se_cac_idx_lock; | |
1132 | amdgpu_rreg_t se_cac_rreg; | |
1133 | amdgpu_wreg_t se_cac_wreg; | |
97b2e202 AD |
1134 | /* protects concurrent ENDPOINT (audio) register access */ |
1135 | spinlock_t audio_endpt_idx_lock; | |
1136 | amdgpu_block_rreg_t audio_endpt_rreg; | |
1137 | amdgpu_block_wreg_t audio_endpt_wreg; | |
1138 | void __iomem *rio_mem; | |
1139 | resource_size_t rio_mem_size; | |
1140 | struct amdgpu_doorbell doorbell; | |
1141 | ||
1142 | /* clock/pll info */ | |
1143 | struct amdgpu_clock clock; | |
1144 | ||
1145 | /* MC */ | |
770d13b1 | 1146 | struct amdgpu_gmc gmc; |
97b2e202 | 1147 | struct amdgpu_gart gart; |
92e71b06 | 1148 | dma_addr_t dummy_page_addr; |
97b2e202 | 1149 | struct amdgpu_vm_manager vm_manager; |
e60f8db5 | 1150 | struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; |
97b2e202 AD |
1151 | |
1152 | /* memory management */ | |
1153 | struct amdgpu_mman mman; | |
97b2e202 AD |
1154 | struct amdgpu_vram_scratch vram_scratch; |
1155 | struct amdgpu_wb wb; | |
97b2e202 | 1156 | atomic64_t num_bytes_moved; |
dbd5ed60 | 1157 | atomic64_t num_evictions; |
68e2c5ff | 1158 | atomic64_t num_vram_cpu_page_faults; |
d94aed5a | 1159 | atomic_t gpu_reset_counter; |
f1892138 | 1160 | atomic_t vram_lost_counter; |
97b2e202 | 1161 | |
95844d20 MO |
1162 | /* data for buffer migration throttling */ |
1163 | struct { | |
1164 | spinlock_t lock; | |
1165 | s64 last_update_us; | |
1166 | s64 accum_us; /* accumulated microseconds */ | |
00f06b24 | 1167 | s64 accum_us_vis; /* for visible VRAM */ |
95844d20 MO |
1168 | u32 log2_max_MBps; |
1169 | } mm_stats; | |
1170 | ||
97b2e202 | 1171 | /* display */ |
9accf2fd | 1172 | bool enable_virtual_display; |
97b2e202 | 1173 | struct amdgpu_mode_info mode_info; |
4562236b | 1174 | /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ |
97b2e202 AD |
1175 | struct work_struct hotplug_work; |
1176 | struct amdgpu_irq_src crtc_irq; | |
1177 | struct amdgpu_irq_src pageflip_irq; | |
1178 | struct amdgpu_irq_src hpd_irq; | |
1179 | ||
1180 | /* rings */ | |
76bf0db5 | 1181 | u64 fence_context; |
97b2e202 AD |
1182 | unsigned num_rings; |
1183 | struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; | |
1184 | bool ib_pool_ready; | |
1185 | struct amdgpu_sa_manager ring_tmp_bo; | |
1186 | ||
1187 | /* interrupts */ | |
1188 | struct amdgpu_irq irq; | |
1189 | ||
1f7371b2 AD |
1190 | /* powerplay */ |
1191 | struct amd_powerplay powerplay; | |
f3898ea1 | 1192 | bool pp_force_state_enabled; |
1f7371b2 | 1193 | |
97b2e202 AD |
1194 | /* dpm */ |
1195 | struct amdgpu_pm pm; | |
1196 | u32 cg_flags; | |
1197 | u32 pg_flags; | |
1198 | ||
1199 | /* amdgpu smumgr */ | |
1200 | struct amdgpu_smumgr smu; | |
1201 | ||
1202 | /* gfx */ | |
1203 | struct amdgpu_gfx gfx; | |
1204 | ||
1205 | /* sdma */ | |
c113ea1c | 1206 | struct amdgpu_sdma sdma; |
97b2e202 | 1207 | |
b43aaee6 LL |
1208 | /* uvd */ |
1209 | struct amdgpu_uvd uvd; | |
1210 | ||
1211 | /* vce */ | |
1212 | struct amdgpu_vce vce; | |
1213 | ||
1214 | /* vcn */ | |
1215 | struct amdgpu_vcn vcn; | |
97b2e202 AD |
1216 | |
1217 | /* firmwares */ | |
1218 | struct amdgpu_firmware firmware; | |
1219 | ||
0e5ca0d1 HR |
1220 | /* PSP */ |
1221 | struct psp_context psp; | |
1222 | ||
97b2e202 AD |
1223 | /* GDS */ |
1224 | struct amdgpu_gds gds; | |
1225 | ||
4562236b HW |
1226 | /* display related functionality */ |
1227 | struct amdgpu_display_manager dm; | |
1228 | ||
a1255107 | 1229 | struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; |
97b2e202 | 1230 | int num_ip_blocks; |
97b2e202 AD |
1231 | struct mutex mn_lock; |
1232 | DECLARE_HASHTABLE(mn_hash, 7); | |
1233 | ||
1234 | /* tracking pinned memory */ | |
a5ccfe5c MD |
1235 | atomic64_t vram_pin_size; |
1236 | atomic64_t visible_pin_size; | |
1237 | atomic64_t gart_pin_size; | |
130e0371 OG |
1238 | |
1239 | /* amdkfd interface */ | |
1240 | struct kfd_dev *kfd; | |
23ca0e4e | 1241 | |
4522824c SL |
1242 | /* soc15 register offset based on ip, instance and segment */ |
1243 | uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; | |
1244 | ||
946a4d5b | 1245 | const struct amdgpu_nbio_funcs *nbio_funcs; |
634c96e3 | 1246 | const struct amdgpu_df_funcs *df_funcs; |
946a4d5b | 1247 | |
2dc80b00 S |
1248 | /* delayed work_func for deferring clockgating during resume */ |
1249 | struct delayed_work late_init_work; | |
1250 | ||
5a5099cb | 1251 | struct amdgpu_virt virt; |
a05502e5 HC |
1252 | /* firmware VRAM reservation */ |
1253 | struct amdgpu_fw_vram_usage fw_vram_usage; | |
0c4e7fa5 CZ |
1254 | |
1255 | /* link all shadow bo */ | |
1256 | struct list_head shadow_list; | |
1257 | struct mutex shadow_list_lock; | |
795f2813 AR |
1258 | /* keep an lru list of rings by HW IP */ |
1259 | struct list_head ring_lru_list; | |
1260 | spinlock_t ring_lru_list_lock; | |
5c1354bd | 1261 | |
c836fec5 JQ |
1262 | /* record hw reset is performed */ |
1263 | bool has_hw_reset; | |
0c49e0b8 | 1264 | u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; |
c836fec5 | 1265 | |
47ed4e1c KW |
1266 | /* record last mm index being written through WREG32*/ |
1267 | unsigned long last_mm_index; | |
13a752e3 ML |
1268 | bool in_gpu_reset; |
1269 | struct mutex lock_reset; | |
97b2e202 AD |
1270 | }; |
1271 | ||
a7d64de6 CK |
1272 | static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) |
1273 | { | |
1274 | return container_of(bdev, struct amdgpu_device, mman.bdev); | |
1275 | } | |
1276 | ||
97b2e202 AD |
1277 | int amdgpu_device_init(struct amdgpu_device *adev, |
1278 | struct drm_device *ddev, | |
1279 | struct pci_dev *pdev, | |
1280 | uint32_t flags); | |
1281 | void amdgpu_device_fini(struct amdgpu_device *adev); | |
1282 | int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); | |
1283 | ||
1284 | uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, | |
15d72fd7 | 1285 | uint32_t acc_flags); |
97b2e202 | 1286 | void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, |
15d72fd7 | 1287 | uint32_t acc_flags); |
421a2a30 ML |
1288 | void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); |
1289 | uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); | |
1290 | ||
97b2e202 AD |
1291 | u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg); |
1292 | void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v); | |
1293 | ||
1294 | u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index); | |
1295 | void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); | |
832be404 KW |
1296 | u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index); |
1297 | void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v); | |
97b2e202 | 1298 | |
4562236b HW |
1299 | bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); |
1300 | bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); | |
1301 | ||
9475a943 SL |
1302 | int emu_soc_asic_init(struct amdgpu_device *adev); |
1303 | ||
97b2e202 AD |
1304 | /* |
1305 | * Registers read & write functions. | |
1306 | */ | |
15d72fd7 ML |
1307 | |
1308 | #define AMDGPU_REGS_IDX (1<<0) | |
1309 | #define AMDGPU_REGS_NO_KIQ (1<<1) | |
1310 | ||
1311 | #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) | |
1312 | #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) | |
1313 | ||
421a2a30 ML |
1314 | #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) |
1315 | #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) | |
1316 | ||
15d72fd7 ML |
1317 | #define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0) |
1318 | #define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX) | |
1319 | #define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0)) | |
1320 | #define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0) | |
1321 | #define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX) | |
97b2e202 AD |
1322 | #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) |
1323 | #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) | |
1324 | #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) | |
1325 | #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) | |
36b9a952 HR |
1326 | #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) |
1327 | #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) | |
97b2e202 AD |
1328 | #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) |
1329 | #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) | |
1330 | #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) | |
1331 | #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) | |
1332 | #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) | |
1333 | #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) | |
ccdbb20a RZ |
1334 | #define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) |
1335 | #define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) | |
16abb5d2 EQ |
1336 | #define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg)) |
1337 | #define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v)) | |
97b2e202 AD |
1338 | #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) |
1339 | #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) | |
1340 | #define WREG32_P(reg, val, mask) \ | |
1341 | do { \ | |
1342 | uint32_t tmp_ = RREG32(reg); \ | |
1343 | tmp_ &= (mask); \ | |
1344 | tmp_ |= ((val) & ~(mask)); \ | |
1345 | WREG32(reg, tmp_); \ | |
1346 | } while (0) | |
1347 | #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) | |
1348 | #define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) | |
1349 | #define WREG32_PLL_P(reg, val, mask) \ | |
1350 | do { \ | |
1351 | uint32_t tmp_ = RREG32_PLL(reg); \ | |
1352 | tmp_ &= (mask); \ | |
1353 | tmp_ |= ((val) & ~(mask)); \ | |
1354 | WREG32_PLL(reg, tmp_); \ | |
1355 | } while (0) | |
1356 | #define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false)) | |
1357 | #define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg)) | |
1358 | #define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v)) | |
1359 | ||
1360 | #define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index)) | |
1361 | #define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v)) | |
832be404 KW |
1362 | #define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index)) |
1363 | #define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v)) | |
97b2e202 AD |
1364 | |
1365 | #define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT | |
1366 | #define REG_FIELD_MASK(reg, field) reg##__##field##_MASK | |
1367 | ||
1368 | #define REG_SET_FIELD(orig_val, reg, field, field_val) \ | |
1369 | (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \ | |
1370 | (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field)))) | |
1371 | ||
1372 | #define REG_GET_FIELD(value, reg, field) \ | |
1373 | (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field)) | |
61cb8cef TSD |
1374 | |
1375 | #define WREG32_FIELD(reg, field, val) \ | |
1376 | WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) | |
97b2e202 | 1377 | |
ccaf3574 TSD |
1378 | #define WREG32_FIELD_OFFSET(reg, offset, field, val) \ |
1379 | WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) | |
1380 | ||
97b2e202 AD |
1381 | /* |
1382 | * BIOS helpers. | |
1383 | */ | |
1384 | #define RBIOS8(i) (adev->bios[i]) | |
1385 | #define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8)) | |
1386 | #define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16)) | |
1387 | ||
c113ea1c AD |
1388 | static inline struct amdgpu_sdma_instance * |
1389 | amdgpu_get_sdma_instance(struct amdgpu_ring *ring) | |
4b2f7e2c JZ |
1390 | { |
1391 | struct amdgpu_device *adev = ring->adev; | |
1392 | int i; | |
1393 | ||
c113ea1c AD |
1394 | for (i = 0; i < adev->sdma.num_instances; i++) |
1395 | if (&adev->sdma.instance[i].ring == ring) | |
4b2f7e2c JZ |
1396 | break; |
1397 | ||
1398 | if (i < AMDGPU_MAX_SDMA_INSTANCES) | |
c113ea1c | 1399 | return &adev->sdma.instance[i]; |
4b2f7e2c JZ |
1400 | else |
1401 | return NULL; | |
1402 | } | |
1403 | ||
97b2e202 AD |
1404 | /* |
1405 | * ASICs macro. | |
1406 | */ | |
1407 | #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) | |
1408 | #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) | |
97b2e202 AD |
1409 | #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) |
1410 | #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) | |
1411 | #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) | |
841686df MB |
1412 | #define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev)) |
1413 | #define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l)) | |
1414 | #define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) | |
97b2e202 | 1415 | #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) |
7946b878 | 1416 | #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) |
97b2e202 | 1417 | #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) |
bbf282d8 | 1418 | #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) |
69882565 CK |
1419 | #define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r)) |
1420 | #define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r)) | |
69070690 | 1421 | #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) |
132f34e4 | 1422 | #define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid)) |
c633c00b CK |
1423 | #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) |
1424 | #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) | |
132f34e4 CK |
1425 | #define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) |
1426 | #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) | |
1427 | #define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags)) | |
97b2e202 | 1428 | #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) |
de9ea7bd | 1429 | #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) |
97b2e202 | 1430 | #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) |
97b2e202 | 1431 | #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) |
9d248517 | 1432 | #define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib))) |
97b2e202 | 1433 | #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) |
bbec97aa | 1434 | #define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t)) |
97b2e202 AD |
1435 | #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) |
1436 | #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) | |
1437 | #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) | |
c4f46f22 | 1438 | #define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c)) |
b8c7b39e | 1439 | #define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r)) |
c633c00b | 1440 | #define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr)) |
890ee23f | 1441 | #define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags)) |
97b2e202 | 1442 | #define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as)) |
d2edb07b | 1443 | #define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r)) |
c2167a65 | 1444 | #define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r)) |
753ad49c | 1445 | #define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d)) |
b6091c12 XY |
1446 | #define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d)) |
1447 | #define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v)) | |
c1e877da | 1448 | #define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m)) |
82853638 | 1449 | #define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m)) |
3b4d68e9 | 1450 | #define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b)) |
9e5d5309 | 1451 | #define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib))) |
03ccf481 ML |
1452 | #define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r)) |
1453 | #define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o)) | |
97b2e202 | 1454 | #define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc)) |
97b2e202 AD |
1455 | #define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l)) |
1456 | #define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e)) | |
1457 | #define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h)) | |
1458 | #define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h)) | |
1459 | #define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev)) | |
1460 | #define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev)) | |
cb9e59d7 | 1461 | #define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async)) |
97b2e202 AD |
1462 | #define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos)) |
1463 | #define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c)) | |
1464 | #define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r)) | |
c7ae72c0 | 1465 | #define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b)) |
6e7a3840 | 1466 | #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) |
97b2e202 | 1467 | #define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a)) |
0e5ca0d1 | 1468 | #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) |
97b2e202 AD |
1469 | |
1470 | /* Common functions */ | |
5f152b5e AD |
1471 | int amdgpu_device_gpu_recover(struct amdgpu_device *adev, |
1472 | struct amdgpu_job* job, bool force); | |
8111c387 | 1473 | void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); |
39c640c0 | 1474 | bool amdgpu_device_need_post(struct amdgpu_device *adev); |
166140fb | 1475 | void amdgpu_display_update_priority(struct amdgpu_device *adev); |
d5fc5e82 | 1476 | |
00f06b24 JB |
1477 | void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, |
1478 | u64 num_vis_bytes); | |
2543e28a | 1479 | void amdgpu_device_vram_location(struct amdgpu_device *adev, |
770d13b1 | 1480 | struct amdgpu_gmc *mc, u64 base); |
2543e28a | 1481 | void amdgpu_device_gart_location(struct amdgpu_device *adev, |
770d13b1 | 1482 | struct amdgpu_gmc *mc); |
d6895ad3 | 1483 | int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev); |
9c3f2b54 | 1484 | void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, |
97b2e202 AD |
1485 | const u32 *registers, |
1486 | const u32 array_size); | |
1487 | ||
1488 | bool amdgpu_device_is_px(struct drm_device *dev); | |
d23ee13f | 1489 | void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); |
97b2e202 AD |
1490 | /* atpx handler */ |
1491 | #if defined(CONFIG_VGA_SWITCHEROO) | |
1492 | void amdgpu_register_atpx_handler(void); | |
1493 | void amdgpu_unregister_atpx_handler(void); | |
a78fe133 | 1494 | bool amdgpu_has_atpx_dgpu_power_cntl(void); |
2f5af82e | 1495 | bool amdgpu_is_atpx_hybrid(void); |
efc83cf4 | 1496 | bool amdgpu_atpx_dgpu_req_power_for_displays(void); |
714f88e0 | 1497 | bool amdgpu_has_atpx(void); |
97b2e202 AD |
1498 | #else |
1499 | static inline void amdgpu_register_atpx_handler(void) {} | |
1500 | static inline void amdgpu_unregister_atpx_handler(void) {} | |
a78fe133 | 1501 | static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } |
2f5af82e | 1502 | static inline bool amdgpu_is_atpx_hybrid(void) { return false; } |
efc83cf4 | 1503 | static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } |
714f88e0 | 1504 | static inline bool amdgpu_has_atpx(void) { return false; } |
97b2e202 AD |
1505 | #endif |
1506 | ||
24aeefcd LP |
1507 | #if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) |
1508 | void *amdgpu_atpx_get_dhandle(void); | |
1509 | #else | |
1510 | static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } | |
1511 | #endif | |
1512 | ||
97b2e202 AD |
1513 | /* |
1514 | * KMS | |
1515 | */ | |
1516 | extern const struct drm_ioctl_desc amdgpu_ioctls_kms[]; | |
f498d9ed | 1517 | extern const int amdgpu_max_kms_ioctl; |
97b2e202 AD |
1518 | |
1519 | int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags); | |
11b3c20b | 1520 | void amdgpu_driver_unload_kms(struct drm_device *dev); |
97b2e202 AD |
1521 | void amdgpu_driver_lastclose_kms(struct drm_device *dev); |
1522 | int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); | |
1523 | void amdgpu_driver_postclose_kms(struct drm_device *dev, | |
1524 | struct drm_file *file_priv); | |
cdd61df6 | 1525 | int amdgpu_device_ip_suspend(struct amdgpu_device *adev); |
810ddc3a AD |
1526 | int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon); |
1527 | int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon); | |
88e72717 TR |
1528 | u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe); |
1529 | int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe); | |
1530 | void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe); | |
97b2e202 AD |
1531 | long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, |
1532 | unsigned long arg); | |
1533 | ||
97b2e202 AD |
1534 | /* |
1535 | * functions used by amdgpu_encoder.c | |
1536 | */ | |
1537 | struct amdgpu_afmt_acr { | |
1538 | u32 clock; | |
1539 | ||
1540 | int n_32khz; | |
1541 | int cts_32khz; | |
1542 | ||
1543 | int n_44_1khz; | |
1544 | int cts_44_1khz; | |
1545 | ||
1546 | int n_48khz; | |
1547 | int cts_48khz; | |
1548 | ||
1549 | }; | |
1550 | ||
1551 | struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); | |
1552 | ||
1553 | /* amdgpu_acpi.c */ | |
1554 | #if defined(CONFIG_ACPI) | |
1555 | int amdgpu_acpi_init(struct amdgpu_device *adev); | |
1556 | void amdgpu_acpi_fini(struct amdgpu_device *adev); | |
1557 | bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev); | |
1558 | int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, | |
1559 | u8 perf_req, bool advertise); | |
1560 | int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); | |
1561 | #else | |
1562 | static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } | |
1563 | static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } | |
1564 | #endif | |
1565 | ||
9cca0b8e CK |
1566 | int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, |
1567 | uint64_t addr, struct amdgpu_bo **bo, | |
1568 | struct amdgpu_bo_va_mapping **mapping); | |
97b2e202 | 1569 | |
4562236b HW |
1570 | #if defined(CONFIG_DRM_AMD_DC) |
1571 | int amdgpu_dm_display_resume(struct amdgpu_device *adev ); | |
1572 | #else | |
1573 | static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; } | |
1574 | #endif | |
1575 | ||
97b2e202 | 1576 | #include "amdgpu_object.h" |
97b2e202 | 1577 | #endif |