drm/amd/powerplay: cosmetic fix
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu.h
CommitLineData
97b2e202
AD
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __AMDGPU_H__
29#define __AMDGPU_H__
30
31#include <linux/atomic.h>
32#include <linux/wait.h>
33#include <linux/list.h>
34#include <linux/kref.h>
a9f87f64 35#include <linux/rbtree.h>
97b2e202 36#include <linux/hashtable.h>
f54d1867 37#include <linux/dma-fence.h>
97b2e202 38
248a1d6f
MY
39#include <drm/ttm/ttm_bo_api.h>
40#include <drm/ttm/ttm_bo_driver.h>
41#include <drm/ttm/ttm_placement.h>
42#include <drm/ttm/ttm_module.h>
43#include <drm/ttm/ttm_execbuf_util.h>
97b2e202 44
d03846af 45#include <drm/drmP.h>
97b2e202 46#include <drm/drm_gem.h>
7e5a547f 47#include <drm/amdgpu_drm.h>
1b1f42d8 48#include <drm/gpu_scheduler.h>
97b2e202 49
78c16834 50#include <kgd_kfd_interface.h>
c79563a3
RZ
51#include "dm_pp_interface.h"
52#include "kgd_pp_interface.h"
78c16834 53
5fc3aeeb 54#include "amd_shared.h"
97b2e202
AD
55#include "amdgpu_mode.h"
56#include "amdgpu_ih.h"
57#include "amdgpu_irq.h"
58#include "amdgpu_ucode.h"
c632d799 59#include "amdgpu_ttm.h"
0e5ca0d1 60#include "amdgpu_psp.h"
97b2e202 61#include "amdgpu_gds.h"
56113504 62#include "amdgpu_sync.h"
78023016 63#include "amdgpu_ring.h"
073440d2 64#include "amdgpu_vm.h"
cf097881 65#include "amdgpu_dpm.h"
a8fe58ce 66#include "amdgpu_acp.h"
4df654d2 67#include "amdgpu_uvd.h"
5e568178 68#include "amdgpu_vce.h"
95aa13f6 69#include "amdgpu_vcn.h"
9a189996 70#include "amdgpu_mn.h"
770d13b1 71#include "amdgpu_gmc.h"
4562236b 72#include "amdgpu_dm.h"
ceeb50ed 73#include "amdgpu_virt.h"
3490bdb5 74#include "amdgpu_gart.h"
75758255 75#include "amdgpu_debugfs.h"
c79563a3 76
97b2e202
AD
77/*
78 * Modules parameters.
79 */
80extern int amdgpu_modeset;
81extern int amdgpu_vram_limit;
218b5dcd 82extern int amdgpu_vis_vram_limit;
83e74db6 83extern int amdgpu_gart_size;
36d38372 84extern int amdgpu_gtt_size;
95844d20 85extern int amdgpu_moverate;
97b2e202
AD
86extern int amdgpu_benchmarking;
87extern int amdgpu_testing;
88extern int amdgpu_audio;
89extern int amdgpu_disp_priority;
90extern int amdgpu_hw_i2c;
91extern int amdgpu_pcie_gen2;
92extern int amdgpu_msi;
93extern int amdgpu_lockup_timeout;
94extern int amdgpu_dpm;
e635ee07 95extern int amdgpu_fw_load_type;
97b2e202
AD
96extern int amdgpu_aspm;
97extern int amdgpu_runtime_pm;
0b693f0b 98extern uint amdgpu_ip_block_mask;
97b2e202
AD
99extern int amdgpu_bapm;
100extern int amdgpu_deep_color;
101extern int amdgpu_vm_size;
102extern int amdgpu_vm_block_size;
d07f14be 103extern int amdgpu_vm_fragment_size;
d9c13156 104extern int amdgpu_vm_fault_stop;
b495bd3a 105extern int amdgpu_vm_debug;
9a4b7d4c 106extern int amdgpu_vm_update_mode;
4562236b 107extern int amdgpu_dc;
02e749dc 108extern int amdgpu_dc_log;
1333f723 109extern int amdgpu_sched_jobs;
4afcb303 110extern int amdgpu_sched_hw_submission;
3ca67300
RZ
111extern int amdgpu_no_evict;
112extern int amdgpu_direct_gma_size;
0b693f0b
RZ
113extern uint amdgpu_pcie_gen_cap;
114extern uint amdgpu_pcie_lane_cap;
115extern uint amdgpu_cg_mask;
116extern uint amdgpu_pg_mask;
117extern uint amdgpu_sdma_phase_quantum;
6f8941a2 118extern char *amdgpu_disable_cu;
9accf2fd 119extern char *amdgpu_virtual_display;
0b693f0b 120extern uint amdgpu_pp_feature_mask;
6a7f76e7 121extern int amdgpu_vram_page_split;
bce23e00
AD
122extern int amdgpu_ngg;
123extern int amdgpu_prim_buf_per_se;
124extern int amdgpu_pos_buf_per_se;
125extern int amdgpu_cntl_sb_buf_per_se;
126extern int amdgpu_param_buf_per_se;
65781c78 127extern int amdgpu_job_hang_limit;
e8835e0e 128extern int amdgpu_lbpw;
4a75aefe 129extern int amdgpu_compute_multipipe;
dcebf026 130extern int amdgpu_gpu_recovery;
bfca0289 131extern int amdgpu_emu_mode;
7951e376 132extern uint amdgpu_smu_memory_pool_size;
97b2e202 133
6dd13096
FK
134#ifdef CONFIG_DRM_AMDGPU_SI
135extern int amdgpu_si_support;
136#endif
7df28986
FK
137#ifdef CONFIG_DRM_AMDGPU_CIK
138extern int amdgpu_cik_support;
139#endif
97b2e202 140
6c8d74ca 141#define AMDGPU_SG_THRESHOLD (256*1024*1024)
55ed8caf 142#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
4b559c90 143#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
97b2e202
AD
144#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
145#define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2)
146/* AMDGPU_IB_POOL_SIZE must be a power of 2 */
147#define AMDGPU_IB_POOL_SIZE 16
148#define AMDGPU_DEBUGFS_MAX_COMPONENTS 32
149#define AMDGPUFB_CONN_LIMIT 4
a5bde2f9 150#define AMDGPU_BIOS_NUM_SCRATCH 16
97b2e202 151
36f523a7
JZ
152/* max number of IP instances */
153#define AMDGPU_MAX_SDMA_INSTANCES 2
154
97b2e202
AD
155/* hard reset data */
156#define AMDGPU_ASIC_RESET_DATA 0x39d5e86b
157
158/* reset flags */
159#define AMDGPU_RESET_GFX (1 << 0)
160#define AMDGPU_RESET_COMPUTE (1 << 1)
161#define AMDGPU_RESET_DMA (1 << 2)
162#define AMDGPU_RESET_CP (1 << 3)
163#define AMDGPU_RESET_GRBM (1 << 4)
164#define AMDGPU_RESET_DMA1 (1 << 5)
165#define AMDGPU_RESET_RLC (1 << 6)
166#define AMDGPU_RESET_SEM (1 << 7)
167#define AMDGPU_RESET_IH (1 << 8)
168#define AMDGPU_RESET_VMC (1 << 9)
169#define AMDGPU_RESET_MC (1 << 10)
170#define AMDGPU_RESET_DISPLAY (1 << 11)
171#define AMDGPU_RESET_UVD (1 << 12)
172#define AMDGPU_RESET_VCE (1 << 13)
173#define AMDGPU_RESET_VCE1 (1 << 14)
174
97b2e202
AD
175/* GFX current status */
176#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
177#define AMDGPU_GFX_SAFE_MODE 0x00000001L
178#define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L
179#define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L
180#define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L
181
182/* max cursor sizes (in pixels) */
183#define CIK_CURSOR_WIDTH 128
184#define CIK_CURSOR_HEIGHT 128
185
186struct amdgpu_device;
97b2e202 187struct amdgpu_ib;
97b2e202 188struct amdgpu_cs_parser;
bb977d37 189struct amdgpu_job;
97b2e202 190struct amdgpu_irq_src;
0b492a4c 191struct amdgpu_fpriv;
9cca0b8e 192struct amdgpu_bo_va_mapping;
97b2e202
AD
193
194enum amdgpu_cp_irq {
195 AMDGPU_CP_IRQ_GFX_EOP = 0,
196 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP,
197 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP,
198 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP,
199 AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP,
200 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP,
201 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP,
202 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP,
203 AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP,
204
205 AMDGPU_CP_IRQ_LAST
206};
207
208enum amdgpu_sdma_irq {
209 AMDGPU_SDMA_IRQ_TRAP0 = 0,
210 AMDGPU_SDMA_IRQ_TRAP1,
211
212 AMDGPU_SDMA_IRQ_LAST
213};
214
215enum amdgpu_thermal_irq {
216 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH = 0,
217 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW,
218
219 AMDGPU_THERMAL_IRQ_LAST
220};
221
4e638ae9
XY
222enum amdgpu_kiq_irq {
223 AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
224 AMDGPU_CP_KIQ_IRQ_LAST
225};
226
43fa561f 227int amdgpu_device_ip_set_clockgating_state(void *dev,
2990a1fc
AD
228 enum amd_ip_block_type block_type,
229 enum amd_clockgating_state state);
43fa561f 230int amdgpu_device_ip_set_powergating_state(void *dev,
2990a1fc
AD
231 enum amd_ip_block_type block_type,
232 enum amd_powergating_state state);
233void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev,
234 u32 *flags);
235int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev,
236 enum amd_ip_block_type block_type);
237bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev,
238 enum amd_ip_block_type block_type);
97b2e202 239
a1255107
AD
240#define AMDGPU_MAX_IP_NUM 16
241
242struct amdgpu_ip_block_status {
243 bool valid;
244 bool sw;
245 bool hw;
246 bool late_initialized;
247 bool hang;
248};
249
97b2e202 250struct amdgpu_ip_block_version {
a1255107
AD
251 const enum amd_ip_block_type type;
252 const u32 major;
253 const u32 minor;
254 const u32 rev;
5fc3aeeb 255 const struct amd_ip_funcs *funcs;
97b2e202
AD
256};
257
a1255107
AD
258struct amdgpu_ip_block {
259 struct amdgpu_ip_block_status status;
260 const struct amdgpu_ip_block_version *version;
261};
262
2990a1fc
AD
263int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev,
264 enum amd_ip_block_type type,
265 u32 major, u32 minor);
97b2e202 266
2990a1fc
AD
267struct amdgpu_ip_block *
268amdgpu_device_ip_get_ip_block(struct amdgpu_device *adev,
269 enum amd_ip_block_type type);
a1255107 270
2990a1fc
AD
271int amdgpu_device_ip_block_add(struct amdgpu_device *adev,
272 const struct amdgpu_ip_block_version *ip_block_version);
97b2e202
AD
273
274/* provided by hw blocks that can move/clear data. e.g., gfx or sdma */
275struct amdgpu_buffer_funcs {
276 /* maximum bytes in a single operation */
277 uint32_t copy_max_bytes;
278
279 /* number of dw to reserve per operation */
280 unsigned copy_num_dw;
281
282 /* used for buffer migration */
c7ae72c0 283 void (*emit_copy_buffer)(struct amdgpu_ib *ib,
97b2e202
AD
284 /* src addr in bytes */
285 uint64_t src_offset,
286 /* dst addr in bytes */
287 uint64_t dst_offset,
288 /* number of byte to transfer */
289 uint32_t byte_count);
290
291 /* maximum bytes in a single operation */
292 uint32_t fill_max_bytes;
293
294 /* number of dw to reserve per operation */
295 unsigned fill_num_dw;
296
297 /* used for buffer clearing */
6e7a3840 298 void (*emit_fill_buffer)(struct amdgpu_ib *ib,
97b2e202
AD
299 /* value to write to memory */
300 uint32_t src_data,
301 /* dst addr in bytes */
302 uint64_t dst_offset,
303 /* number of byte to fill */
304 uint32_t byte_count);
305};
306
307/* provided by hw blocks that can write ptes, e.g., sdma */
308struct amdgpu_vm_pte_funcs {
e6d92197
YZ
309 /* number of dw to reserve per operation */
310 unsigned copy_pte_num_dw;
311
97b2e202
AD
312 /* copy pte entries from GART */
313 void (*copy_pte)(struct amdgpu_ib *ib,
314 uint64_t pe, uint64_t src,
315 unsigned count);
e6d92197 316
97b2e202 317 /* write pte one entry at a time with addr mapping */
de9ea7bd
CK
318 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
319 uint64_t value, unsigned count,
320 uint32_t incr);
97b2e202
AD
321 /* for linear pte/pde updates without addr mapping */
322 void (*set_pte_pde)(struct amdgpu_ib *ib,
323 uint64_t pe,
324 uint64_t addr, unsigned count,
6b777607 325 uint32_t incr, uint64_t flags);
97b2e202
AD
326};
327
97b2e202
AD
328/* provided by the ih block */
329struct amdgpu_ih_funcs {
330 /* ring read/write ptr handling, called from interrupt context */
331 u32 (*get_wptr)(struct amdgpu_device *adev);
00ecd8a2 332 bool (*prescreen_iv)(struct amdgpu_device *adev);
97b2e202
AD
333 void (*decode_iv)(struct amdgpu_device *adev,
334 struct amdgpu_iv_entry *entry);
335 void (*set_rptr)(struct amdgpu_device *adev);
336};
337
97b2e202
AD
338/*
339 * BIOS.
340 */
341bool amdgpu_get_bios(struct amdgpu_device *adev);
342bool amdgpu_read_bios(struct amdgpu_device *adev);
343
97b2e202
AD
344/*
345 * Clocks
346 */
347
348#define AMDGPU_MAX_PPLL 3
349
350struct amdgpu_clock {
351 struct amdgpu_pll ppll[AMDGPU_MAX_PPLL];
352 struct amdgpu_pll spll;
353 struct amdgpu_pll mpll;
354 /* 10 Khz units */
355 uint32_t default_mclk;
356 uint32_t default_sclk;
357 uint32_t default_dispclk;
358 uint32_t current_dispclk;
359 uint32_t dp_extclk;
360 uint32_t max_pixel_clock;
361};
362
97b2e202 363/*
9124a398 364 * GEM.
97b2e202 365 */
97b2e202 366
7e5a547f 367#define AMDGPU_GEM_DOMAIN_MAX 0x3
97b2e202
AD
368#define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, gem_base)
369
370void amdgpu_gem_object_free(struct drm_gem_object *obj);
371int amdgpu_gem_object_open(struct drm_gem_object *obj,
372 struct drm_file *file_priv);
373void amdgpu_gem_object_close(struct drm_gem_object *obj,
374 struct drm_file *file_priv);
375unsigned long amdgpu_gem_timeout(uint64_t timeout_ns);
376struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
4d9c514d
CK
377struct drm_gem_object *
378amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
379 struct dma_buf_attachment *attach,
380 struct sg_table *sg);
97b2e202
AD
381struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
382 struct drm_gem_object *gobj,
383 int flags);
09052fc3
SL
384struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
385 struct dma_buf *dma_buf);
97b2e202
AD
386struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
387void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
388void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
dfced2e4 389int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
97b2e202
AD
390
391/* sub-allocation manager, it has to be protected by another lock.
392 * By conception this is an helper for other part of the driver
393 * like the indirect buffer or semaphore, which both have their
394 * locking.
395 *
396 * Principe is simple, we keep a list of sub allocation in offset
397 * order (first entry has offset == 0, last entry has the highest
398 * offset).
399 *
400 * When allocating new object we first check if there is room at
401 * the end total_size - (last_object_offset + last_object_size) >=
402 * alloc_size. If so we allocate new object there.
403 *
404 * When there is not enough room at the end, we start waiting for
405 * each sub object until we reach object_offset+object_size >=
406 * alloc_size, this object then become the sub object we return.
407 *
408 * Alignment can't be bigger than page size.
409 *
410 * Hole are not considered for allocation to keep things simple.
411 * Assumption is that there won't be hole (all object on same
412 * alignment).
413 */
6ba60b89
CK
414
415#define AMDGPU_SA_NUM_FENCE_LISTS 32
416
97b2e202
AD
417struct amdgpu_sa_manager {
418 wait_queue_head_t wq;
419 struct amdgpu_bo *bo;
420 struct list_head *hole;
6ba60b89 421 struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS];
97b2e202
AD
422 struct list_head olist;
423 unsigned size;
424 uint64_t gpu_addr;
425 void *cpu_ptr;
426 uint32_t domain;
427 uint32_t align;
428};
429
97b2e202
AD
430/* sub-allocation buffer */
431struct amdgpu_sa_bo {
432 struct list_head olist;
433 struct list_head flist;
434 struct amdgpu_sa_manager *manager;
435 unsigned soffset;
436 unsigned eoffset;
f54d1867 437 struct dma_fence *fence;
97b2e202
AD
438};
439
440/*
441 * GEM objects.
442 */
418aa0c2 443void amdgpu_gem_force_release(struct amdgpu_device *adev);
97b2e202 444int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
e1eb899b 445 int alignment, u32 initial_domain,
eab3de23 446 u64 flags, enum ttm_bo_type type,
e1eb899b
CK
447 struct reservation_object *resv,
448 struct drm_gem_object **obj);
97b2e202
AD
449
450int amdgpu_mode_dumb_create(struct drm_file *file_priv,
451 struct drm_device *dev,
452 struct drm_mode_create_dumb *args);
453int amdgpu_mode_dumb_mmap(struct drm_file *filp,
454 struct drm_device *dev,
455 uint32_t handle, uint64_t *offset_p);
d573de2d
RZ
456int amdgpu_fence_slab_init(void);
457void amdgpu_fence_slab_fini(void);
97b2e202 458
97b2e202
AD
459/*
460 * GPU doorbell structures, functions & helpers
461 */
462typedef enum _AMDGPU_DOORBELL_ASSIGNMENT
463{
464 AMDGPU_DOORBELL_KIQ = 0x000,
465 AMDGPU_DOORBELL_HIQ = 0x001,
466 AMDGPU_DOORBELL_DIQ = 0x002,
467 AMDGPU_DOORBELL_MEC_RING0 = 0x010,
468 AMDGPU_DOORBELL_MEC_RING1 = 0x011,
469 AMDGPU_DOORBELL_MEC_RING2 = 0x012,
470 AMDGPU_DOORBELL_MEC_RING3 = 0x013,
471 AMDGPU_DOORBELL_MEC_RING4 = 0x014,
472 AMDGPU_DOORBELL_MEC_RING5 = 0x015,
473 AMDGPU_DOORBELL_MEC_RING6 = 0x016,
474 AMDGPU_DOORBELL_MEC_RING7 = 0x017,
475 AMDGPU_DOORBELL_GFX_RING0 = 0x020,
476 AMDGPU_DOORBELL_sDMA_ENGINE0 = 0x1E0,
477 AMDGPU_DOORBELL_sDMA_ENGINE1 = 0x1E1,
478 AMDGPU_DOORBELL_IH = 0x1E8,
479 AMDGPU_DOORBELL_MAX_ASSIGNMENT = 0x3FF,
480 AMDGPU_DOORBELL_INVALID = 0xFFFF
481} AMDGPU_DOORBELL_ASSIGNMENT;
482
483struct amdgpu_doorbell {
484 /* doorbell mmio */
485 resource_size_t base;
486 resource_size_t size;
487 u32 __iomem *ptr;
488 u32 num_doorbells; /* Number of doorbells actually reserved for amdgpu. */
489};
490
39807b93
KW
491/*
492 * 64bit doorbell, offset are in QWORD, occupy 2KB doorbell space
493 */
494typedef enum _AMDGPU_DOORBELL64_ASSIGNMENT
495{
496 /*
497 * All compute related doorbells: kiq, hiq, diq, traditional compute queue, user queue, should locate in
498 * a continues range so that programming CP_MEC_DOORBELL_RANGE_LOWER/UPPER can cover this range.
499 * Compute related doorbells are allocated from 0x00 to 0x8a
500 */
501
502
503 /* kernel scheduling */
504 AMDGPU_DOORBELL64_KIQ = 0x00,
505
506 /* HSA interface queue and debug queue */
507 AMDGPU_DOORBELL64_HIQ = 0x01,
508 AMDGPU_DOORBELL64_DIQ = 0x02,
509
510 /* Compute engines */
511 AMDGPU_DOORBELL64_MEC_RING0 = 0x03,
512 AMDGPU_DOORBELL64_MEC_RING1 = 0x04,
513 AMDGPU_DOORBELL64_MEC_RING2 = 0x05,
514 AMDGPU_DOORBELL64_MEC_RING3 = 0x06,
515 AMDGPU_DOORBELL64_MEC_RING4 = 0x07,
516 AMDGPU_DOORBELL64_MEC_RING5 = 0x08,
517 AMDGPU_DOORBELL64_MEC_RING6 = 0x09,
518 AMDGPU_DOORBELL64_MEC_RING7 = 0x0a,
519
520 /* User queue doorbell range (128 doorbells) */
521 AMDGPU_DOORBELL64_USERQUEUE_START = 0x0b,
522 AMDGPU_DOORBELL64_USERQUEUE_END = 0x8a,
523
524 /* Graphics engine */
525 AMDGPU_DOORBELL64_GFX_RING0 = 0x8b,
526
527 /*
528 * Other graphics doorbells can be allocated here: from 0x8c to 0xef
529 * Graphics voltage island aperture 1
530 * default non-graphics QWORD index is 0xF0 - 0xFF inclusive
531 */
532
533 /* sDMA engines */
534 AMDGPU_DOORBELL64_sDMA_ENGINE0 = 0xF0,
535 AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE0 = 0xF1,
536 AMDGPU_DOORBELL64_sDMA_ENGINE1 = 0xF2,
537 AMDGPU_DOORBELL64_sDMA_HI_PRI_ENGINE1 = 0xF3,
538
539 /* Interrupt handler */
540 AMDGPU_DOORBELL64_IH = 0xF4, /* For legacy interrupt ring buffer */
541 AMDGPU_DOORBELL64_IH_RING1 = 0xF5, /* For page migration request log */
542 AMDGPU_DOORBELL64_IH_RING2 = 0xF6, /* For page migration translation/invalidation log */
543
e6b3ecb4
ML
544 /* VCN engine use 32 bits doorbell */
545 AMDGPU_DOORBELL64_VCN0_1 = 0xF8, /* lower 32 bits for VNC0 and upper 32 bits for VNC1 */
546 AMDGPU_DOORBELL64_VCN2_3 = 0xF9,
547 AMDGPU_DOORBELL64_VCN4_5 = 0xFA,
548 AMDGPU_DOORBELL64_VCN6_7 = 0xFB,
549
550 /* overlap the doorbell assignment with VCN as they are mutually exclusive
551 * VCE engine's doorbell is 32 bit and two VCE ring share one QWORD
552 */
4ed11d79
FM
553 AMDGPU_DOORBELL64_UVD_RING0_1 = 0xF8,
554 AMDGPU_DOORBELL64_UVD_RING2_3 = 0xF9,
555 AMDGPU_DOORBELL64_UVD_RING4_5 = 0xFA,
556 AMDGPU_DOORBELL64_UVD_RING6_7 = 0xFB,
557
558 AMDGPU_DOORBELL64_VCE_RING0_1 = 0xFC,
559 AMDGPU_DOORBELL64_VCE_RING2_3 = 0xFD,
560 AMDGPU_DOORBELL64_VCE_RING4_5 = 0xFE,
561 AMDGPU_DOORBELL64_VCE_RING6_7 = 0xFF,
39807b93
KW
562
563 AMDGPU_DOORBELL64_MAX_ASSIGNMENT = 0xFF,
564 AMDGPU_DOORBELL64_INVALID = 0xFFFF
565} AMDGPU_DOORBELL64_ASSIGNMENT;
566
97b2e202
AD
567/*
568 * IRQS.
569 */
570
571struct amdgpu_flip_work {
325cbba1 572 struct delayed_work flip_work;
97b2e202
AD
573 struct work_struct unpin_work;
574 struct amdgpu_device *adev;
575 int crtc_id;
325cbba1 576 u32 target_vblank;
97b2e202
AD
577 uint64_t base;
578 struct drm_pending_vblank_event *event;
765e7fbf 579 struct amdgpu_bo *old_abo;
f54d1867 580 struct dma_fence *excl;
1ffd2652 581 unsigned shared_count;
f54d1867
CW
582 struct dma_fence **shared;
583 struct dma_fence_cb cb;
cb9e59d7 584 bool async;
97b2e202
AD
585};
586
587
588/*
589 * CP & rings.
590 */
591
592struct amdgpu_ib {
593 struct amdgpu_sa_bo *sa_bo;
594 uint32_t length_dw;
595 uint64_t gpu_addr;
596 uint32_t *ptr;
de807f81 597 uint32_t flags;
97b2e202
AD
598};
599
1b1f42d8 600extern const struct drm_sched_backend_ops amdgpu_sched_ops;
c1b69ed0 601
50838c8c 602int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
c5637837 603 struct amdgpu_job **job, struct amdgpu_vm *vm);
d71518b5
CK
604int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
605 struct amdgpu_job **job);
b6723c8d 606
a5fb4ec2 607void amdgpu_job_free_resources(struct amdgpu_job *job);
50838c8c 608void amdgpu_job_free(struct amdgpu_job *job);
d71518b5 609int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
1b1f42d8 610 struct drm_sched_entity *entity, void *owner,
f54d1867 611 struct dma_fence **f);
8b4fb00b 612
effd924d
AR
613/*
614 * Queue manager
615 */
616struct amdgpu_queue_mapper {
617 int hw_ip;
618 struct mutex lock;
619 /* protected by lock */
620 struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS];
621};
622
623struct amdgpu_queue_mgr {
624 struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM];
625};
626
627int amdgpu_queue_mgr_init(struct amdgpu_device *adev,
628 struct amdgpu_queue_mgr *mgr);
629int amdgpu_queue_mgr_fini(struct amdgpu_device *adev,
630 struct amdgpu_queue_mgr *mgr);
631int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
632 struct amdgpu_queue_mgr *mgr,
fa7c7939 633 u32 hw_ip, u32 instance, u32 ring,
effd924d
AR
634 struct amdgpu_ring **out_ring);
635
97b2e202
AD
636/*
637 * context related structures
638 */
639
21c16bf6 640struct amdgpu_ctx_ring {
91404fb2 641 uint64_t sequence;
f54d1867 642 struct dma_fence **fences;
1b1f42d8 643 struct drm_sched_entity entity;
21c16bf6
CK
644};
645
97b2e202 646struct amdgpu_ctx {
0b492a4c 647 struct kref refcount;
9cb7e5a9 648 struct amdgpu_device *adev;
effd924d 649 struct amdgpu_queue_mgr queue_mgr;
0b492a4c 650 unsigned reset_counter;
668ca1b4 651 unsigned reset_counter_query;
e55f2b64 652 uint32_t vram_lost_counter;
21c16bf6 653 spinlock_t ring_lock;
f54d1867 654 struct dma_fence **fences;
21c16bf6 655 struct amdgpu_ctx_ring rings[AMDGPU_MAX_RINGS];
e55f2b64 656 bool preamble_presented;
1b1f42d8
LS
657 enum drm_sched_priority init_priority;
658 enum drm_sched_priority override_priority;
0ae94444 659 struct mutex lock;
1102900d 660 atomic_t guilty;
97b2e202
AD
661};
662
663struct amdgpu_ctx_mgr {
0b492a4c
AD
664 struct amdgpu_device *adev;
665 struct mutex lock;
666 /* protected by lock */
667 struct idr ctx_handles;
97b2e202
AD
668};
669
0b492a4c
AD
670struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
671int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
672
eb01abc7
ML
673int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
674 struct dma_fence *fence, uint64_t *seq);
f54d1867 675struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
21c16bf6 676 struct amdgpu_ring *ring, uint64_t seq);
c23be4ae 677void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
1b1f42d8 678 enum drm_sched_priority priority);
21c16bf6 679
0b492a4c
AD
680int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
681 struct drm_file *filp);
682
0ae94444
AG
683int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id);
684
efd4ccb5 685void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
8ee3a52e 686void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
c49d8280 687void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr);
efd4ccb5 688void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
0b492a4c 689
0ae94444 690
97b2e202
AD
691/*
692 * file private structure
693 */
694
695struct amdgpu_fpriv {
696 struct amdgpu_vm vm;
b85891bd 697 struct amdgpu_bo_va *prt_va;
0f4b3c68 698 struct amdgpu_bo_va *csa_va;
97b2e202
AD
699 struct mutex bo_list_lock;
700 struct idr bo_list_handles;
0b492a4c 701 struct amdgpu_ctx_mgr ctx_mgr;
97b2e202
AD
702};
703
704/*
705 * residency list
706 */
9124a398
CK
707struct amdgpu_bo_list_entry {
708 struct amdgpu_bo *robj;
709 struct ttm_validate_buffer tv;
710 struct amdgpu_bo_va *bo_va;
711 uint32_t priority;
712 struct page **user_pages;
713 int user_invalidated;
714};
97b2e202
AD
715
716struct amdgpu_bo_list {
717 struct mutex lock;
5ac55629
AX
718 struct rcu_head rhead;
719 struct kref refcount;
97b2e202
AD
720 struct amdgpu_bo *gds_obj;
721 struct amdgpu_bo *gws_obj;
722 struct amdgpu_bo *oa_obj;
211dff55 723 unsigned first_userptr;
97b2e202
AD
724 unsigned num_entries;
725 struct amdgpu_bo_list_entry *array;
726};
727
728struct amdgpu_bo_list *
729amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
636ce25c
CK
730void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
731 struct list_head *validated);
97b2e202
AD
732void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
733void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
734
735/*
736 * GFX stuff
737 */
738#include "clearstate_defs.h"
739
79e5412c
AD
740struct amdgpu_rlc_funcs {
741 void (*enter_safe_mode)(struct amdgpu_device *adev);
742 void (*exit_safe_mode)(struct amdgpu_device *adev);
743};
744
97b2e202
AD
745struct amdgpu_rlc {
746 /* for power gating */
747 struct amdgpu_bo *save_restore_obj;
748 uint64_t save_restore_gpu_addr;
749 volatile uint32_t *sr_ptr;
750 const u32 *reg_list;
751 u32 reg_list_size;
752 /* for clear state */
753 struct amdgpu_bo *clear_state_obj;
754 uint64_t clear_state_gpu_addr;
755 volatile uint32_t *cs_ptr;
756 const struct cs_section_def *cs_data;
757 u32 clear_state_size;
758 /* for cp tables */
759 struct amdgpu_bo *cp_table_obj;
760 uint64_t cp_table_gpu_addr;
761 volatile uint32_t *cp_table_ptr;
762 u32 cp_table_size;
79e5412c
AD
763
764 /* safe mode for updating CG/PG state */
765 bool in_safe_mode;
766 const struct amdgpu_rlc_funcs *funcs;
2b6cd977
EH
767
768 /* for firmware data */
769 u32 save_and_restore_offset;
770 u32 clear_state_descriptor_offset;
771 u32 avail_scratch_ram_locations;
772 u32 reg_restore_list_size;
773 u32 reg_list_format_start;
774 u32 reg_list_format_separate_start;
775 u32 starting_offsets_start;
776 u32 reg_list_format_size_bytes;
777 u32 reg_list_size_bytes;
621a6318
HR
778 u32 reg_list_format_direct_reg_list_length;
779 u32 save_restore_list_cntl_size_bytes;
780 u32 save_restore_list_gpm_size_bytes;
781 u32 save_restore_list_srm_size_bytes;
2b6cd977
EH
782
783 u32 *register_list_format;
784 u32 *register_restore;
621a6318
HR
785 u8 *save_restore_list_cntl;
786 u8 *save_restore_list_gpm;
787 u8 *save_restore_list_srm;
788
789 bool is_rlc_v2_1;
97b2e202
AD
790};
791
78c16834
AR
792#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
793
97b2e202
AD
794struct amdgpu_mec {
795 struct amdgpu_bo *hpd_eop_obj;
796 u64 hpd_eop_gpu_addr;
b1023571
KW
797 struct amdgpu_bo *mec_fw_obj;
798 u64 mec_fw_gpu_addr;
97b2e202 799 u32 num_mec;
42794b27
AR
800 u32 num_pipe_per_mec;
801 u32 num_queue_per_pipe;
59a82d7d 802 void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS + 1];
78c16834
AR
803
804 /* These are the resources for which amdgpu takes ownership */
805 DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
97b2e202
AD
806};
807
4e638ae9
XY
808struct amdgpu_kiq {
809 u64 eop_gpu_addr;
810 struct amdgpu_bo *eop_obj;
43ca8efa 811 spinlock_t ring_lock;
4e638ae9
XY
812 struct amdgpu_ring ring;
813 struct amdgpu_irq_src irq;
814};
815
97b2e202
AD
816/*
817 * GPU scratch registers structures, functions & helpers
818 */
819struct amdgpu_scratch {
820 unsigned num_reg;
821 uint32_t reg_base;
50261151 822 uint32_t free_mask;
97b2e202
AD
823};
824
825/*
826 * GFX configurations
827 */
e3fa7630
AD
828#define AMDGPU_GFX_MAX_SE 4
829#define AMDGPU_GFX_MAX_SH_PER_SE 2
830
831struct amdgpu_rb_config {
832 uint32_t rb_backend_disable;
833 uint32_t user_rb_backend_disable;
834 uint32_t raster_config;
835 uint32_t raster_config_1;
836};
837
d0e95758
AG
838struct gb_addr_config {
839 uint16_t pipe_interleave_size;
840 uint8_t num_pipes;
841 uint8_t max_compress_frags;
842 uint8_t num_banks;
843 uint8_t num_se;
844 uint8_t num_rb_per_se;
845};
846
ea323f88 847struct amdgpu_gfx_config {
97b2e202
AD
848 unsigned max_shader_engines;
849 unsigned max_tile_pipes;
850 unsigned max_cu_per_sh;
851 unsigned max_sh_per_se;
852 unsigned max_backends_per_se;
853 unsigned max_texture_channel_caches;
854 unsigned max_gprs;
855 unsigned max_gs_threads;
856 unsigned max_hw_contexts;
857 unsigned sc_prim_fifo_size_frontend;
858 unsigned sc_prim_fifo_size_backend;
859 unsigned sc_hiz_tile_fifo_size;
860 unsigned sc_earlyz_tile_fifo_size;
861
862 unsigned num_tile_pipes;
863 unsigned backend_enable_mask;
864 unsigned mem_max_burst_length_bytes;
865 unsigned mem_row_size_in_kb;
866 unsigned shader_engine_tile_size;
867 unsigned num_gpus;
868 unsigned multi_gpu_tile_size;
869 unsigned mc_arb_ramcfg;
870 unsigned gb_addr_config;
8f8e00c1 871 unsigned num_rbs;
408bfe7c
JZ
872 unsigned gs_vgt_table_depth;
873 unsigned gs_prim_buffer_depth;
97b2e202
AD
874
875 uint32_t tile_mode_array[32];
876 uint32_t macrotile_mode_array[16];
e3fa7630 877
d0e95758 878 struct gb_addr_config gb_addr_config_fields;
e3fa7630 879 struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE];
df6e2c4a
JZ
880
881 /* gfx configure feature */
882 uint32_t double_offchip_lds_buf;
5eeae247
AD
883 /* cached value of DB_DEBUG2 */
884 uint32_t db_debug2;
97b2e202
AD
885};
886
7dae69a2 887struct amdgpu_cu_info {
ebdebf42 888 uint32_t simd_per_cu;
51fd0370 889 uint32_t max_waves_per_simd;
408bfe7c 890 uint32_t wave_front_size;
51fd0370
HZ
891 uint32_t max_scratch_slots_per_cu;
892 uint32_t lds_size;
dbfe85ea
FC
893
894 /* total active CU number */
895 uint32_t number;
896 uint32_t ao_cu_mask;
897 uint32_t ao_cu_bitmap[4][4];
7dae69a2
AD
898 uint32_t bitmap[4][4];
899};
900
b95e31fd
AD
901struct amdgpu_gfx_funcs {
902 /* get the gpu clock counter */
903 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
9559ef5b 904 void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
472259f0 905 void (*read_wave_data)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields);
c5a60ce8
TSD
906 void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst);
907 void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst);
f7a9ee81 908 void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue);
b95e31fd
AD
909};
910
bce23e00
AD
911struct amdgpu_ngg_buf {
912 struct amdgpu_bo *bo;
913 uint64_t gpu_addr;
914 uint32_t size;
915 uint32_t bo_size;
916};
917
918enum {
af8baf15
GR
919 NGG_PRIM = 0,
920 NGG_POS,
921 NGG_CNTL,
922 NGG_PARAM,
bce23e00
AD
923 NGG_BUF_MAX
924};
925
926struct amdgpu_ngg {
927 struct amdgpu_ngg_buf buf[NGG_BUF_MAX];
928 uint32_t gds_reserve_addr;
929 uint32_t gds_reserve_size;
930 bool init;
931};
932
9bdc2092
AG
933struct sq_work {
934 struct work_struct work;
935 unsigned ih_data;
936};
937
97b2e202
AD
938struct amdgpu_gfx {
939 struct mutex gpu_clock_mutex;
ea323f88 940 struct amdgpu_gfx_config config;
97b2e202
AD
941 struct amdgpu_rlc rlc;
942 struct amdgpu_mec mec;
4e638ae9 943 struct amdgpu_kiq kiq;
97b2e202
AD
944 struct amdgpu_scratch scratch;
945 const struct firmware *me_fw; /* ME firmware */
946 uint32_t me_fw_version;
947 const struct firmware *pfp_fw; /* PFP firmware */
948 uint32_t pfp_fw_version;
949 const struct firmware *ce_fw; /* CE firmware */
950 uint32_t ce_fw_version;
951 const struct firmware *rlc_fw; /* RLC firmware */
952 uint32_t rlc_fw_version;
953 const struct firmware *mec_fw; /* MEC firmware */
954 uint32_t mec_fw_version;
955 const struct firmware *mec2_fw; /* MEC2 firmware */
956 uint32_t mec2_fw_version;
02558a00
KW
957 uint32_t me_feature_version;
958 uint32_t ce_feature_version;
959 uint32_t pfp_feature_version;
351643d7 960 uint32_t rlc_feature_version;
621a6318
HR
961 uint32_t rlc_srlc_fw_version;
962 uint32_t rlc_srlc_feature_version;
963 uint32_t rlc_srlg_fw_version;
964 uint32_t rlc_srlg_feature_version;
965 uint32_t rlc_srls_fw_version;
966 uint32_t rlc_srls_feature_version;
351643d7
JZ
967 uint32_t mec_feature_version;
968 uint32_t mec2_feature_version;
97b2e202
AD
969 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
970 unsigned num_gfx_rings;
971 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
972 unsigned num_compute_rings;
973 struct amdgpu_irq_src eop_irq;
974 struct amdgpu_irq_src priv_reg_irq;
975 struct amdgpu_irq_src priv_inst_irq;
5a2f2913 976 struct amdgpu_irq_src cp_ecc_error_irq;
981658c6 977 struct amdgpu_irq_src sq_irq;
9bdc2092
AG
978 struct sq_work sq_work;
979
97b2e202 980 /* gfx status */
7dae69a2 981 uint32_t gfx_current_status;
a101a899 982 /* ce ram size*/
7dae69a2
AD
983 unsigned ce_ram_size;
984 struct amdgpu_cu_info cu_info;
b95e31fd 985 const struct amdgpu_gfx_funcs *funcs;
3d7c6384
CZ
986
987 /* reset mask */
988 uint32_t grbm_soft_reset;
989 uint32_t srbm_soft_reset;
b4e40676
DP
990 /* s3/s4 mask */
991 bool in_suspend;
bce23e00
AD
992 /* NGG */
993 struct amdgpu_ngg ngg;
b8866c26
AR
994
995 /* pipe reservation */
996 struct mutex pipe_reserve_mutex;
997 DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
97b2e202
AD
998};
999
b07c60c0 1000int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
97b2e202 1001 unsigned size, struct amdgpu_ib *ib);
4d9c514d 1002void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib,
f54d1867 1003 struct dma_fence *f);
b07c60c0 1004int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
50ddc75e
JZ
1005 struct amdgpu_ib *ibs, struct amdgpu_job *job,
1006 struct dma_fence **f);
97b2e202
AD
1007int amdgpu_ib_pool_init(struct amdgpu_device *adev);
1008void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
1009int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
97b2e202
AD
1010
1011/*
1012 * CS.
1013 */
1014struct amdgpu_cs_chunk {
1015 uint32_t chunk_id;
1016 uint32_t length_dw;
758ac17f 1017 void *kdata;
97b2e202
AD
1018};
1019
1020struct amdgpu_cs_parser {
1021 struct amdgpu_device *adev;
1022 struct drm_file *filp;
3cb485f3 1023 struct amdgpu_ctx *ctx;
c3cca41e 1024
97b2e202
AD
1025 /* chunks */
1026 unsigned nchunks;
1027 struct amdgpu_cs_chunk *chunks;
97b2e202 1028
50838c8c
CK
1029 /* scheduler job object */
1030 struct amdgpu_job *job;
97b2e202 1031
c3cca41e
CK
1032 /* buffer objects */
1033 struct ww_acquire_ctx ticket;
1034 struct amdgpu_bo_list *bo_list;
3fe89771 1035 struct amdgpu_mn *mn;
c3cca41e
CK
1036 struct amdgpu_bo_list_entry vm_pd;
1037 struct list_head validated;
f54d1867 1038 struct dma_fence *fence;
c3cca41e 1039 uint64_t bytes_moved_threshold;
00f06b24 1040 uint64_t bytes_moved_vis_threshold;
c3cca41e 1041 uint64_t bytes_moved;
00f06b24 1042 uint64_t bytes_moved_vis;
662bfa61 1043 struct amdgpu_bo_list_entry *evictable;
97b2e202
AD
1044
1045 /* user fence */
91acbeb6 1046 struct amdgpu_bo_list_entry uf_entry;
660e8558
DA
1047
1048 unsigned num_post_dep_syncobjs;
1049 struct drm_syncobj **post_dep_syncobjs;
97b2e202
AD
1050};
1051
753ad49c
ML
1052#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0) /* bit set means command submit involves a preamble IB */
1053#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1) /* bit set means preamble IB is first presented in belonging context */
1054#define AMDGPU_HAVE_CTX_SWITCH (1 << 2) /* bit set means context switch occured */
1055
bb977d37 1056struct amdgpu_job {
1b1f42d8 1057 struct drm_sched_job base;
bb977d37 1058 struct amdgpu_device *adev;
edf600da 1059 struct amdgpu_vm *vm;
b07c60c0 1060 struct amdgpu_ring *ring;
e86f9cee 1061 struct amdgpu_sync sync;
df83d1eb 1062 struct amdgpu_sync sched_sync;
bb977d37 1063 struct amdgpu_ib *ibs;
f54d1867 1064 struct dma_fence *fence; /* the hw fence */
753ad49c 1065 uint32_t preamble_status;
bb977d37 1066 uint32_t num_ibs;
e2840221 1067 void *owner;
3aecd24c 1068 uint64_t fence_ctx; /* the fence_context this job uses */
fd53be30 1069 bool vm_needs_flush;
d88bf583 1070 uint64_t vm_pd_addr;
5a4633c4
CK
1071 unsigned vmid;
1072 unsigned pasid;
d88bf583
CK
1073 uint32_t gds_base, gds_size;
1074 uint32_t gws_base, gws_size;
1075 uint32_t oa_base, oa_size;
14e47f93 1076 uint32_t vram_lost_counter;
758ac17f
CK
1077
1078 /* user fence handling */
b5f5acbc 1079 uint64_t uf_addr;
758ac17f
CK
1080 uint64_t uf_sequence;
1081
bb977d37 1082};
a6db8a33
JZ
1083#define to_amdgpu_job(sched_job) \
1084 container_of((sched_job), struct amdgpu_job, base)
bb977d37 1085
7270f839
CK
1086static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p,
1087 uint32_t ib_idx, int idx)
97b2e202 1088{
50838c8c 1089 return p->job->ibs[ib_idx].ptr[idx];
97b2e202
AD
1090}
1091
7270f839
CK
1092static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p,
1093 uint32_t ib_idx, int idx,
1094 uint32_t value)
1095{
50838c8c 1096 p->job->ibs[ib_idx].ptr[idx] = value;
7270f839
CK
1097}
1098
97b2e202
AD
1099/*
1100 * Writeback
1101 */
73469585 1102#define AMDGPU_MAX_WB 128 /* Reserve at most 128 WB slots for amdgpu-owned rings. */
97b2e202
AD
1103
1104struct amdgpu_wb {
1105 struct amdgpu_bo *wb_obj;
1106 volatile uint32_t *wb;
1107 uint64_t gpu_addr;
1108 u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */
1109 unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)];
1110};
1111
131b4b36
AD
1112int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb);
1113void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb);
97b2e202 1114
97b2e202
AD
1115/*
1116 * SDMA
1117 */
c113ea1c 1118struct amdgpu_sdma_instance {
97b2e202
AD
1119 /* SDMA firmware */
1120 const struct firmware *fw;
1121 uint32_t fw_version;
cfa2104f 1122 uint32_t feature_version;
97b2e202
AD
1123
1124 struct amdgpu_ring ring;
18111de0 1125 bool burst_nop;
97b2e202
AD
1126};
1127
c113ea1c
AD
1128struct amdgpu_sdma {
1129 struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES];
30d1574f
KW
1130#ifdef CONFIG_DRM_AMDGPU_SI
1131 //SI DMA has a difference trap irq number for the second engine
1132 struct amdgpu_irq_src trap_irq_1;
1133#endif
c113ea1c
AD
1134 struct amdgpu_irq_src trap_irq;
1135 struct amdgpu_irq_src illegal_inst_irq;
edf600da 1136 int num_instances;
e702a680 1137 uint32_t srbm_soft_reset;
c113ea1c
AD
1138};
1139
97b2e202
AD
1140/*
1141 * Firmware
1142 */
e635ee07
HR
1143enum amdgpu_firmware_load_type {
1144 AMDGPU_FW_LOAD_DIRECT = 0,
1145 AMDGPU_FW_LOAD_SMU,
1146 AMDGPU_FW_LOAD_PSP,
1147};
1148
97b2e202
AD
1149struct amdgpu_firmware {
1150 struct amdgpu_firmware_info ucode[AMDGPU_UCODE_ID_MAXIMUM];
e635ee07 1151 enum amdgpu_firmware_load_type load_type;
97b2e202
AD
1152 struct amdgpu_bo *fw_buf;
1153 unsigned int fw_size;
2445b227 1154 unsigned int max_ucodes;
0e5ca0d1
HR
1155 /* firmwares are loaded by psp instead of smu from vega10 */
1156 const struct amdgpu_psp_funcs *funcs;
1157 struct amdgpu_bo *rbuf;
1158 struct mutex mutex;
ab4fe3e1
HR
1159
1160 /* gpu info firmware data pointer */
1161 const struct firmware *gpu_info_fw;
d59c026b
ML
1162
1163 void *fw_buf_ptr;
1164 uint64_t fw_buf_mc;
97b2e202
AD
1165};
1166
1167/*
1168 * Benchmarking
1169 */
1170void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
1171
1172
1173/*
1174 * Testing
1175 */
1176void amdgpu_test_moves(struct amdgpu_device *adev);
97b2e202 1177
50ab2533 1178
97b2e202
AD
1179/*
1180 * amdgpu smumgr functions
1181 */
1182struct amdgpu_smumgr_funcs {
1183 int (*check_fw_load_finish)(struct amdgpu_device *adev, uint32_t fwtype);
1184 int (*request_smu_load_fw)(struct amdgpu_device *adev);
1185 int (*request_smu_specific_fw)(struct amdgpu_device *adev, uint32_t fwtype);
1186};
1187
1188/*
1189 * amdgpu smumgr
1190 */
1191struct amdgpu_smumgr {
1192 struct amdgpu_bo *toc_buf;
1193 struct amdgpu_bo *smu_buf;
1194 /* asic priv smu data */
1195 void *priv;
1196 spinlock_t smu_lock;
1197 /* smumgr functions */
1198 const struct amdgpu_smumgr_funcs *smumgr_funcs;
1199 /* ucode loading complete flag */
1200 uint32_t fw_flags;
1201};
1202
1203/*
1204 * ASIC specific register table accessible by UMD
1205 */
1206struct amdgpu_allowed_register_entry {
1207 uint32_t reg_offset;
97b2e202
AD
1208 bool grbm_indexed;
1209};
1210
97b2e202
AD
1211/*
1212 * ASIC specific functions.
1213 */
1214struct amdgpu_asic_funcs {
1215 bool (*read_disabled_bios)(struct amdgpu_device *adev);
7946b878
AD
1216 bool (*read_bios_from_rom)(struct amdgpu_device *adev,
1217 u8 *bios, u32 length_bytes);
97b2e202
AD
1218 int (*read_register)(struct amdgpu_device *adev, u32 se_num,
1219 u32 sh_num, u32 reg_offset, u32 *value);
1220 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
1221 int (*reset)(struct amdgpu_device *adev);
97b2e202
AD
1222 /* get the reference clock */
1223 u32 (*get_xclk)(struct amdgpu_device *adev);
97b2e202
AD
1224 /* MM block clocks */
1225 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1226 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
841686df
MB
1227 /* static power management */
1228 int (*get_pcie_lanes)(struct amdgpu_device *adev);
1229 void (*set_pcie_lanes)(struct amdgpu_device *adev, int lanes);
bbf282d8
AD
1230 /* get config memsize register */
1231 u32 (*get_config_memsize)(struct amdgpu_device *adev);
2df1b8b6 1232 /* flush hdp write queue */
69882565 1233 void (*flush_hdp)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
2df1b8b6 1234 /* invalidate hdp read cache */
69882565
CK
1235 void (*invalidate_hdp)(struct amdgpu_device *adev,
1236 struct amdgpu_ring *ring);
69070690
AD
1237 /* check if the asic needs a full reset of if soft reset will work */
1238 bool (*need_full_reset)(struct amdgpu_device *adev);
97b2e202
AD
1239};
1240
1241/*
1242 * IOCTL.
1243 */
1244int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
1245 struct drm_file *filp);
1246int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
1247 struct drm_file *filp);
1248
1249int amdgpu_gem_info_ioctl(struct drm_device *dev, void *data,
1250 struct drm_file *filp);
1251int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
1252 struct drm_file *filp);
1253int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data,
1254 struct drm_file *filp);
1255int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
1256 struct drm_file *filp);
1257int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
1258 struct drm_file *filp);
1259int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
1260 struct drm_file *filp);
1261int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
7ca24cf2
MO
1262int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data,
1263 struct drm_file *filp);
97b2e202 1264int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
eef18a82
JZ
1265int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data,
1266 struct drm_file *filp);
97b2e202
AD
1267
1268int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data,
1269 struct drm_file *filp);
1270
1271/* VRAM scratch page for HDP bug, default vram page */
1272struct amdgpu_vram_scratch {
1273 struct amdgpu_bo *robj;
1274 volatile uint32_t *ptr;
1275 u64 gpu_addr;
1276};
1277
1278/*
1279 * ACPI
1280 */
1281struct amdgpu_atif_notification_cfg {
1282 bool enabled;
1283 int command_code;
1284};
1285
1286struct amdgpu_atif_notifications {
1287 bool display_switch;
1288 bool expansion_mode_change;
1289 bool thermal_state;
1290 bool forced_power_state;
1291 bool system_power_state;
1292 bool display_conf_change;
1293 bool px_gfx_switch;
1294 bool brightness_change;
1295 bool dgpu_display_event;
1296};
1297
1298struct amdgpu_atif_functions {
1299 bool system_params;
1300 bool sbios_requests;
1301 bool select_active_disp;
1302 bool lid_state;
1303 bool get_tv_standard;
1304 bool set_tv_standard;
1305 bool get_panel_expansion_mode;
1306 bool set_panel_expansion_mode;
1307 bool temperature_change;
1308 bool graphics_device_types;
1309};
1310
1311struct amdgpu_atif {
1312 struct amdgpu_atif_notifications notifications;
1313 struct amdgpu_atif_functions functions;
1314 struct amdgpu_atif_notification_cfg notification_cfg;
1315 struct amdgpu_encoder *encoder_for_bl;
1316};
1317
1318struct amdgpu_atcs_functions {
1319 bool get_ext_state;
1320 bool pcie_perf_req;
1321 bool pcie_dev_rdy;
1322 bool pcie_bus_width;
1323};
1324
1325struct amdgpu_atcs {
1326 struct amdgpu_atcs_functions functions;
1327};
1328
a05502e5
HC
1329/*
1330 * Firmware VRAM reservation
1331 */
1332struct amdgpu_fw_vram_usage {
1333 u64 start_offset;
1334 u64 size;
1335 struct amdgpu_bo *reserved_bo;
1336 void *va;
1337};
1338
d03846af
CZ
1339/*
1340 * CGS
1341 */
110e6f26
DA
1342struct cgs_device *amdgpu_cgs_create_device(struct amdgpu_device *adev);
1343void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device);
a8fe58ce 1344
97b2e202
AD
1345/*
1346 * Core structure, functions and helpers.
1347 */
1348typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t);
1349typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1350
1351typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1352typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1353
946a4d5b
SL
1354
1355/*
1356 * amdgpu nbio functions
1357 *
946a4d5b 1358 */
bf383fb6
AD
1359struct nbio_hdp_flush_reg {
1360 u32 ref_and_mask_cp0;
1361 u32 ref_and_mask_cp1;
1362 u32 ref_and_mask_cp2;
1363 u32 ref_and_mask_cp3;
1364 u32 ref_and_mask_cp4;
1365 u32 ref_and_mask_cp5;
1366 u32 ref_and_mask_cp6;
1367 u32 ref_and_mask_cp7;
1368 u32 ref_and_mask_cp8;
1369 u32 ref_and_mask_cp9;
1370 u32 ref_and_mask_sdma0;
1371 u32 ref_and_mask_sdma1;
1372};
946a4d5b
SL
1373
1374struct amdgpu_nbio_funcs {
bf383fb6
AD
1375 const struct nbio_hdp_flush_reg *hdp_flush_reg;
1376 u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev);
1377 u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev);
1378 u32 (*get_pcie_index_offset)(struct amdgpu_device *adev);
1379 u32 (*get_pcie_data_offset)(struct amdgpu_device *adev);
1380 u32 (*get_rev_id)(struct amdgpu_device *adev);
bf383fb6 1381 void (*mc_access_enable)(struct amdgpu_device *adev, bool enable);
69882565 1382 void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring);
bf383fb6
AD
1383 u32 (*get_memsize)(struct amdgpu_device *adev);
1384 void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance,
1385 bool use_doorbell, int doorbell_index);
1386 void (*enable_doorbell_aperture)(struct amdgpu_device *adev,
1387 bool enable);
1388 void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev,
1389 bool enable);
1390 void (*ih_doorbell_range)(struct amdgpu_device *adev,
1391 bool use_doorbell, int doorbell_index);
1392 void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
1393 bool enable);
1394 void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev,
1395 bool enable);
1396 void (*get_clockgating_state)(struct amdgpu_device *adev,
1397 u32 *flags);
1398 void (*ih_control)(struct amdgpu_device *adev);
1399 void (*init_registers)(struct amdgpu_device *adev);
1400 void (*detect_hw_virt)(struct amdgpu_device *adev);
946a4d5b
SL
1401};
1402
634c96e3
HZ
1403struct amdgpu_df_funcs {
1404 void (*init)(struct amdgpu_device *adev);
1405 void (*enable_broadcast_mode)(struct amdgpu_device *adev,
1406 bool enable);
1407 u32 (*get_fb_channel_number)(struct amdgpu_device *adev);
1408 u32 (*get_hbm_channel_number)(struct amdgpu_device *adev);
1409 void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev,
1410 bool enable);
1411 void (*get_clockgating_state)(struct amdgpu_device *adev,
1412 u32 *flags);
8f9b2e50
AD
1413 void (*enable_ecc_force_par_wr_rmw)(struct amdgpu_device *adev,
1414 bool enable);
634c96e3 1415};
4522824c
SL
1416/* Define the HW IP blocks will be used in driver , add more if necessary */
1417enum amd_hw_ip_block_type {
1418 GC_HWIP = 1,
1419 HDP_HWIP,
1420 SDMA0_HWIP,
1421 SDMA1_HWIP,
1422 MMHUB_HWIP,
1423 ATHUB_HWIP,
1424 NBIO_HWIP,
1425 MP0_HWIP,
e6636ae1 1426 MP1_HWIP,
4522824c
SL
1427 UVD_HWIP,
1428 VCN_HWIP = UVD_HWIP,
1429 VCE_HWIP,
1430 DF_HWIP,
1431 DCE_HWIP,
1432 OSSSYS_HWIP,
1433 SMUIO_HWIP,
1434 PWR_HWIP,
1435 NBIF_HWIP,
e6636ae1 1436 THM_HWIP,
4522824c
SL
1437 MAX_HWIP
1438};
1439
1440#define HWIP_MAX_INSTANCE 6
1441
11dc9364 1442struct amd_powerplay {
11dc9364 1443 void *pp_handle;
11dc9364 1444 const struct amd_pm_funcs *pp_funcs;
00f54b97 1445 uint32_t pp_feature;
11dc9364
RZ
1446};
1447
0c49e0b8 1448#define AMDGPU_RESET_MAGIC_NUM 64
97b2e202
AD
1449struct amdgpu_device {
1450 struct device *dev;
1451 struct drm_device *ddev;
1452 struct pci_dev *pdev;
97b2e202 1453
a8fe58ce
MB
1454#ifdef CONFIG_DRM_AMD_ACP
1455 struct amdgpu_acp acp;
1456#endif
1457
97b2e202 1458 /* ASIC */
2f7d10b3 1459 enum amd_asic_type asic_type;
97b2e202
AD
1460 uint32_t family;
1461 uint32_t rev_id;
1462 uint32_t external_rev_id;
1463 unsigned long flags;
1464 int usec_timeout;
1465 const struct amdgpu_asic_funcs *asic_funcs;
1466 bool shutdown;
97b2e202 1467 bool need_dma32;
fd5fd480 1468 bool need_swiotlb;
97b2e202 1469 bool accel_working;
edf600da 1470 struct work_struct reset_work;
97b2e202
AD
1471 struct notifier_block acpi_nb;
1472 struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
1473 struct amdgpu_debugfs debugfs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
edf600da 1474 unsigned debugfs_count;
97b2e202 1475#if defined(CONFIG_DEBUG_FS)
adcec288 1476 struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
97b2e202
AD
1477#endif
1478 struct amdgpu_atif atif;
1479 struct amdgpu_atcs atcs;
1480 struct mutex srbm_mutex;
1481 /* GRBM index mutex. Protects concurrent access to GRBM index */
1482 struct mutex grbm_idx_mutex;
1483 struct dev_pm_domain vga_pm_domain;
1484 bool have_disp_power_ref;
1485
1486 /* BIOS */
0cdd5005 1487 bool is_atom_fw;
97b2e202 1488 uint8_t *bios;
a9f5db9c 1489 uint32_t bios_size;
5af2c10d 1490 struct amdgpu_bo *stolen_vga_memory;
a5bde2f9 1491 uint32_t bios_scratch_reg_offset;
97b2e202
AD
1492 uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH];
1493
1494 /* Register/doorbell mmio */
1495 resource_size_t rmmio_base;
1496 resource_size_t rmmio_size;
1497 void __iomem *rmmio;
1498 /* protects concurrent MM_INDEX/DATA based register access */
1499 spinlock_t mmio_idx_lock;
1500 /* protects concurrent SMC based register access */
1501 spinlock_t smc_idx_lock;
1502 amdgpu_rreg_t smc_rreg;
1503 amdgpu_wreg_t smc_wreg;
1504 /* protects concurrent PCIE register access */
1505 spinlock_t pcie_idx_lock;
1506 amdgpu_rreg_t pcie_rreg;
1507 amdgpu_wreg_t pcie_wreg;
36b9a952
HR
1508 amdgpu_rreg_t pciep_rreg;
1509 amdgpu_wreg_t pciep_wreg;
97b2e202
AD
1510 /* protects concurrent UVD register access */
1511 spinlock_t uvd_ctx_idx_lock;
1512 amdgpu_rreg_t uvd_ctx_rreg;
1513 amdgpu_wreg_t uvd_ctx_wreg;
1514 /* protects concurrent DIDT register access */
1515 spinlock_t didt_idx_lock;
1516 amdgpu_rreg_t didt_rreg;
1517 amdgpu_wreg_t didt_wreg;
ccdbb20a
RZ
1518 /* protects concurrent gc_cac register access */
1519 spinlock_t gc_cac_idx_lock;
1520 amdgpu_rreg_t gc_cac_rreg;
1521 amdgpu_wreg_t gc_cac_wreg;
16abb5d2
EQ
1522 /* protects concurrent se_cac register access */
1523 spinlock_t se_cac_idx_lock;
1524 amdgpu_rreg_t se_cac_rreg;
1525 amdgpu_wreg_t se_cac_wreg;
97b2e202
AD
1526 /* protects concurrent ENDPOINT (audio) register access */
1527 spinlock_t audio_endpt_idx_lock;
1528 amdgpu_block_rreg_t audio_endpt_rreg;
1529 amdgpu_block_wreg_t audio_endpt_wreg;
1530 void __iomem *rio_mem;
1531 resource_size_t rio_mem_size;
1532 struct amdgpu_doorbell doorbell;
1533
1534 /* clock/pll info */
1535 struct amdgpu_clock clock;
1536
1537 /* MC */
770d13b1 1538 struct amdgpu_gmc gmc;
97b2e202 1539 struct amdgpu_gart gart;
92e71b06 1540 dma_addr_t dummy_page_addr;
97b2e202 1541 struct amdgpu_vm_manager vm_manager;
e60f8db5 1542 struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS];
97b2e202
AD
1543
1544 /* memory management */
1545 struct amdgpu_mman mman;
97b2e202
AD
1546 struct amdgpu_vram_scratch vram_scratch;
1547 struct amdgpu_wb wb;
97b2e202 1548 atomic64_t num_bytes_moved;
dbd5ed60 1549 atomic64_t num_evictions;
68e2c5ff 1550 atomic64_t num_vram_cpu_page_faults;
d94aed5a 1551 atomic_t gpu_reset_counter;
f1892138 1552 atomic_t vram_lost_counter;
97b2e202 1553
95844d20
MO
1554 /* data for buffer migration throttling */
1555 struct {
1556 spinlock_t lock;
1557 s64 last_update_us;
1558 s64 accum_us; /* accumulated microseconds */
00f06b24 1559 s64 accum_us_vis; /* for visible VRAM */
95844d20
MO
1560 u32 log2_max_MBps;
1561 } mm_stats;
1562
97b2e202 1563 /* display */
9accf2fd 1564 bool enable_virtual_display;
97b2e202 1565 struct amdgpu_mode_info mode_info;
4562236b 1566 /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
97b2e202
AD
1567 struct work_struct hotplug_work;
1568 struct amdgpu_irq_src crtc_irq;
1569 struct amdgpu_irq_src pageflip_irq;
1570 struct amdgpu_irq_src hpd_irq;
1571
1572 /* rings */
76bf0db5 1573 u64 fence_context;
97b2e202
AD
1574 unsigned num_rings;
1575 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
1576 bool ib_pool_ready;
1577 struct amdgpu_sa_manager ring_tmp_bo;
1578
1579 /* interrupts */
1580 struct amdgpu_irq irq;
1581
1f7371b2
AD
1582 /* powerplay */
1583 struct amd_powerplay powerplay;
f3898ea1 1584 bool pp_force_state_enabled;
1f7371b2 1585
97b2e202
AD
1586 /* dpm */
1587 struct amdgpu_pm pm;
1588 u32 cg_flags;
1589 u32 pg_flags;
1590
1591 /* amdgpu smumgr */
1592 struct amdgpu_smumgr smu;
1593
1594 /* gfx */
1595 struct amdgpu_gfx gfx;
1596
1597 /* sdma */
c113ea1c 1598 struct amdgpu_sdma sdma;
97b2e202 1599
b43aaee6
LL
1600 /* uvd */
1601 struct amdgpu_uvd uvd;
1602
1603 /* vce */
1604 struct amdgpu_vce vce;
1605
1606 /* vcn */
1607 struct amdgpu_vcn vcn;
97b2e202
AD
1608
1609 /* firmwares */
1610 struct amdgpu_firmware firmware;
1611
0e5ca0d1
HR
1612 /* PSP */
1613 struct psp_context psp;
1614
97b2e202
AD
1615 /* GDS */
1616 struct amdgpu_gds gds;
1617
4562236b
HW
1618 /* display related functionality */
1619 struct amdgpu_display_manager dm;
1620
a1255107 1621 struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
97b2e202 1622 int num_ip_blocks;
97b2e202
AD
1623 struct mutex mn_lock;
1624 DECLARE_HASHTABLE(mn_hash, 7);
1625
1626 /* tracking pinned memory */
1627 u64 vram_pin_size;
e131b914 1628 u64 invisible_pin_size;
97b2e202 1629 u64 gart_pin_size;
130e0371
OG
1630
1631 /* amdkfd interface */
1632 struct kfd_dev *kfd;
23ca0e4e 1633
4522824c
SL
1634 /* soc15 register offset based on ip, instance and segment */
1635 uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE];
1636
946a4d5b 1637 const struct amdgpu_nbio_funcs *nbio_funcs;
634c96e3 1638 const struct amdgpu_df_funcs *df_funcs;
946a4d5b 1639
2dc80b00
S
1640 /* delayed work_func for deferring clockgating during resume */
1641 struct delayed_work late_init_work;
1642
5a5099cb 1643 struct amdgpu_virt virt;
a05502e5
HC
1644 /* firmware VRAM reservation */
1645 struct amdgpu_fw_vram_usage fw_vram_usage;
0c4e7fa5
CZ
1646
1647 /* link all shadow bo */
1648 struct list_head shadow_list;
1649 struct mutex shadow_list_lock;
795f2813
AR
1650 /* keep an lru list of rings by HW IP */
1651 struct list_head ring_lru_list;
1652 spinlock_t ring_lru_list_lock;
5c1354bd 1653
c836fec5
JQ
1654 /* record hw reset is performed */
1655 bool has_hw_reset;
0c49e0b8 1656 u8 reset_magic[AMDGPU_RESET_MAGIC_NUM];
c836fec5 1657
47ed4e1c
KW
1658 /* record last mm index being written through WREG32*/
1659 unsigned long last_mm_index;
13a752e3
ML
1660 bool in_gpu_reset;
1661 struct mutex lock_reset;
97b2e202
AD
1662};
1663
a7d64de6
CK
1664static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
1665{
1666 return container_of(bdev, struct amdgpu_device, mman.bdev);
1667}
1668
97b2e202
AD
1669int amdgpu_device_init(struct amdgpu_device *adev,
1670 struct drm_device *ddev,
1671 struct pci_dev *pdev,
1672 uint32_t flags);
1673void amdgpu_device_fini(struct amdgpu_device *adev);
1674int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
1675
1676uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
15d72fd7 1677 uint32_t acc_flags);
97b2e202 1678void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
15d72fd7 1679 uint32_t acc_flags);
421a2a30
ML
1680void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
1681uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
1682
97b2e202
AD
1683u32 amdgpu_io_rreg(struct amdgpu_device *adev, u32 reg);
1684void amdgpu_io_wreg(struct amdgpu_device *adev, u32 reg, u32 v);
1685
1686u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index);
1687void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
832be404
KW
1688u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index);
1689void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v);
97b2e202 1690
4562236b
HW
1691bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type);
1692bool amdgpu_device_has_dc_support(struct amdgpu_device *adev);
1693
9475a943
SL
1694int emu_soc_asic_init(struct amdgpu_device *adev);
1695
97b2e202
AD
1696/*
1697 * Registers read & write functions.
1698 */
15d72fd7
ML
1699
1700#define AMDGPU_REGS_IDX (1<<0)
1701#define AMDGPU_REGS_NO_KIQ (1<<1)
1702
1703#define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ)
1704#define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ)
1705
421a2a30
ML
1706#define RREG8(reg) amdgpu_mm_rreg8(adev, (reg))
1707#define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
1708
15d72fd7
ML
1709#define RREG32(reg) amdgpu_mm_rreg(adev, (reg), 0)
1710#define RREG32_IDX(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_IDX)
1711#define DREG32(reg) printk(KERN_INFO "REGISTER: " #reg " : 0x%08X\n", amdgpu_mm_rreg(adev, (reg), 0))
1712#define WREG32(reg, v) amdgpu_mm_wreg(adev, (reg), (v), 0)
1713#define WREG32_IDX(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_IDX)
97b2e202
AD
1714#define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1715#define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
1716#define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg))
1717#define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v))
36b9a952
HR
1718#define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg))
1719#define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v))
97b2e202
AD
1720#define RREG32_SMC(reg) adev->smc_rreg(adev, (reg))
1721#define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v))
1722#define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg))
1723#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
1724#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
1725#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
ccdbb20a
RZ
1726#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
1727#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
16abb5d2
EQ
1728#define RREG32_SE_CAC(reg) adev->se_cac_rreg(adev, (reg))
1729#define WREG32_SE_CAC(reg, v) adev->se_cac_wreg(adev, (reg), (v))
97b2e202
AD
1730#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
1731#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
1732#define WREG32_P(reg, val, mask) \
1733 do { \
1734 uint32_t tmp_ = RREG32(reg); \
1735 tmp_ &= (mask); \
1736 tmp_ |= ((val) & ~(mask)); \
1737 WREG32(reg, tmp_); \
1738 } while (0)
1739#define WREG32_AND(reg, and) WREG32_P(reg, 0, and)
1740#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or))
1741#define WREG32_PLL_P(reg, val, mask) \
1742 do { \
1743 uint32_t tmp_ = RREG32_PLL(reg); \
1744 tmp_ &= (mask); \
1745 tmp_ |= ((val) & ~(mask)); \
1746 WREG32_PLL(reg, tmp_); \
1747 } while (0)
1748#define DREG32_SYS(sqf, adev, reg) seq_printf((sqf), #reg " : 0x%08X\n", amdgpu_mm_rreg((adev), (reg), false))
1749#define RREG32_IO(reg) amdgpu_io_rreg(adev, (reg))
1750#define WREG32_IO(reg, v) amdgpu_io_wreg(adev, (reg), (v))
1751
1752#define RDOORBELL32(index) amdgpu_mm_rdoorbell(adev, (index))
1753#define WDOORBELL32(index, v) amdgpu_mm_wdoorbell(adev, (index), (v))
832be404
KW
1754#define RDOORBELL64(index) amdgpu_mm_rdoorbell64(adev, (index))
1755#define WDOORBELL64(index, v) amdgpu_mm_wdoorbell64(adev, (index), (v))
97b2e202
AD
1756
1757#define REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
1758#define REG_FIELD_MASK(reg, field) reg##__##field##_MASK
1759
1760#define REG_SET_FIELD(orig_val, reg, field, field_val) \
1761 (((orig_val) & ~REG_FIELD_MASK(reg, field)) | \
1762 (REG_FIELD_MASK(reg, field) & ((field_val) << REG_FIELD_SHIFT(reg, field))))
1763
1764#define REG_GET_FIELD(value, reg, field) \
1765 (((value) & REG_FIELD_MASK(reg, field)) >> REG_FIELD_SHIFT(reg, field))
61cb8cef
TSD
1766
1767#define WREG32_FIELD(reg, field, val) \
1768 WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
97b2e202 1769
ccaf3574
TSD
1770#define WREG32_FIELD_OFFSET(reg, offset, field, val) \
1771 WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field))
1772
97b2e202
AD
1773/*
1774 * BIOS helpers.
1775 */
1776#define RBIOS8(i) (adev->bios[i])
1777#define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
1778#define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
1779
c113ea1c
AD
1780static inline struct amdgpu_sdma_instance *
1781amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
4b2f7e2c
JZ
1782{
1783 struct amdgpu_device *adev = ring->adev;
1784 int i;
1785
c113ea1c
AD
1786 for (i = 0; i < adev->sdma.num_instances; i++)
1787 if (&adev->sdma.instance[i].ring == ring)
4b2f7e2c
JZ
1788 break;
1789
1790 if (i < AMDGPU_MAX_SDMA_INSTANCES)
c113ea1c 1791 return &adev->sdma.instance[i];
4b2f7e2c
JZ
1792 else
1793 return NULL;
1794}
1795
97b2e202
AD
1796/*
1797 * ASICs macro.
1798 */
1799#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
1800#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
97b2e202
AD
1801#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
1802#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
1803#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
841686df
MB
1804#define amdgpu_get_pcie_lanes(adev) (adev)->asic_funcs->get_pcie_lanes((adev))
1805#define amdgpu_set_pcie_lanes(adev, l) (adev)->asic_funcs->set_pcie_lanes((adev), (l))
1806#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
97b2e202 1807#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
7946b878 1808#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
97b2e202 1809#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
bbf282d8 1810#define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev))
69882565
CK
1811#define amdgpu_asic_flush_hdp(adev, r) (adev)->asic_funcs->flush_hdp((adev), (r))
1812#define amdgpu_asic_invalidate_hdp(adev, r) (adev)->asic_funcs->invalidate_hdp((adev), (r))
69070690 1813#define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev))
132f34e4 1814#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid))
c633c00b
CK
1815#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr))
1816#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid))
132f34e4
CK
1817#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags))
1818#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags))
1819#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags))
97b2e202 1820#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
de9ea7bd 1821#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
97b2e202 1822#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
97b2e202
AD
1823#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
1824#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
bbec97aa 1825#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
97b2e202
AD
1826#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
1827#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
1828#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
c4f46f22 1829#define amdgpu_ring_emit_ib(r, ib, vmid, c) (r)->funcs->emit_ib((r), (ib), (vmid), (c))
b8c7b39e 1830#define amdgpu_ring_emit_pipeline_sync(r) (r)->funcs->emit_pipeline_sync((r))
c633c00b 1831#define amdgpu_ring_emit_vm_flush(r, vmid, addr) (r)->funcs->emit_vm_flush((r), (vmid), (addr))
890ee23f 1832#define amdgpu_ring_emit_fence(r, addr, seq, flags) (r)->funcs->emit_fence((r), (addr), (seq), (flags))
97b2e202 1833#define amdgpu_ring_emit_gds_switch(r, v, db, ds, wb, ws, ab, as) (r)->funcs->emit_gds_switch((r), (v), (db), (ds), (wb), (ws), (ab), (as))
d2edb07b 1834#define amdgpu_ring_emit_hdp_flush(r) (r)->funcs->emit_hdp_flush((r))
c2167a65 1835#define amdgpu_ring_emit_switch_buffer(r) (r)->funcs->emit_switch_buffer((r))
753ad49c 1836#define amdgpu_ring_emit_cntxcntl(r, d) (r)->funcs->emit_cntxcntl((r), (d))
b6091c12
XY
1837#define amdgpu_ring_emit_rreg(r, d) (r)->funcs->emit_rreg((r), (d))
1838#define amdgpu_ring_emit_wreg(r, d, v) (r)->funcs->emit_wreg((r), (d), (v))
c1e877da 1839#define amdgpu_ring_emit_reg_wait(r, d, v, m) (r)->funcs->emit_reg_wait((r), (d), (v), (m))
82853638 1840#define amdgpu_ring_emit_reg_write_reg_wait(r, d0, d1, v, m) (r)->funcs->emit_reg_write_reg_wait((r), (d0), (d1), (v), (m))
3b4d68e9 1841#define amdgpu_ring_emit_tmz(r, b) (r)->funcs->emit_tmz((r), (b))
9e5d5309 1842#define amdgpu_ring_pad_ib(r, ib) ((r)->funcs->pad_ib((r), (ib)))
03ccf481
ML
1843#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
1844#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
97b2e202 1845#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
00ecd8a2 1846#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
97b2e202
AD
1847#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
1848#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
97b2e202 1849#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
97b2e202
AD
1850#define amdgpu_display_backlight_set_level(adev, e, l) (adev)->mode_info.funcs->backlight_set_level((e), (l))
1851#define amdgpu_display_backlight_get_level(adev, e) (adev)->mode_info.funcs->backlight_get_level((e))
1852#define amdgpu_display_hpd_sense(adev, h) (adev)->mode_info.funcs->hpd_sense((adev), (h))
1853#define amdgpu_display_hpd_set_polarity(adev, h) (adev)->mode_info.funcs->hpd_set_polarity((adev), (h))
1854#define amdgpu_display_hpd_get_gpio_reg(adev) (adev)->mode_info.funcs->hpd_get_gpio_reg((adev))
1855#define amdgpu_display_bandwidth_update(adev) (adev)->mode_info.funcs->bandwidth_update((adev))
cb9e59d7 1856#define amdgpu_display_page_flip(adev, crtc, base, async) (adev)->mode_info.funcs->page_flip((adev), (crtc), (base), (async))
97b2e202
AD
1857#define amdgpu_display_page_flip_get_scanoutpos(adev, crtc, vbl, pos) (adev)->mode_info.funcs->page_flip_get_scanoutpos((adev), (crtc), (vbl), (pos))
1858#define amdgpu_display_add_encoder(adev, e, s, c) (adev)->mode_info.funcs->add_encoder((adev), (e), (s), (c))
1859#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
c7ae72c0 1860#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
6e7a3840 1861#define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b))
b95e31fd 1862#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
9559ef5b 1863#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
97b2e202 1864#define amdgpu_gds_switch(adev, r, v, d, w, a) (adev)->gds.funcs->patch_gds_switch((r), (v), (d), (w), (a))
0e5ca0d1 1865#define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i))
f7a9ee81 1866#define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q) (adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q))
97b2e202
AD
1867
1868/* Common functions */
5f152b5e
AD
1869int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
1870 struct amdgpu_job* job, bool force);
8111c387 1871void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
39c640c0 1872bool amdgpu_device_need_post(struct amdgpu_device *adev);
166140fb 1873void amdgpu_display_update_priority(struct amdgpu_device *adev);
d5fc5e82 1874
00f06b24
JB
1875void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
1876 u64 num_vis_bytes);
765e7fbf 1877void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
97b2e202 1878bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
2543e28a 1879void amdgpu_device_vram_location(struct amdgpu_device *adev,
770d13b1 1880 struct amdgpu_gmc *mc, u64 base);
2543e28a 1881void amdgpu_device_gart_location(struct amdgpu_device *adev,
770d13b1 1882 struct amdgpu_gmc *mc);
d6895ad3 1883int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev);
9c3f2b54 1884void amdgpu_device_program_register_sequence(struct amdgpu_device *adev,
97b2e202
AD
1885 const u32 *registers,
1886 const u32 array_size);
1887
1888bool amdgpu_device_is_px(struct drm_device *dev);
1889/* atpx handler */
1890#if defined(CONFIG_VGA_SWITCHEROO)
1891void amdgpu_register_atpx_handler(void);
1892void amdgpu_unregister_atpx_handler(void);
a78fe133 1893bool amdgpu_has_atpx_dgpu_power_cntl(void);
2f5af82e 1894bool amdgpu_is_atpx_hybrid(void);
efc83cf4 1895bool amdgpu_atpx_dgpu_req_power_for_displays(void);
714f88e0 1896bool amdgpu_has_atpx(void);
97b2e202
AD
1897#else
1898static inline void amdgpu_register_atpx_handler(void) {}
1899static inline void amdgpu_unregister_atpx_handler(void) {}
a78fe133 1900static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
2f5af82e 1901static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
efc83cf4 1902static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; }
714f88e0 1903static inline bool amdgpu_has_atpx(void) { return false; }
97b2e202
AD
1904#endif
1905
1906/*
1907 * KMS
1908 */
1909extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
f498d9ed 1910extern const int amdgpu_max_kms_ioctl;
97b2e202
AD
1911
1912int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
11b3c20b 1913void amdgpu_driver_unload_kms(struct drm_device *dev);
97b2e202
AD
1914void amdgpu_driver_lastclose_kms(struct drm_device *dev);
1915int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv);
1916void amdgpu_driver_postclose_kms(struct drm_device *dev,
1917 struct drm_file *file_priv);
cdd61df6 1918int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
810ddc3a
AD
1919int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon);
1920int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon);
88e72717
TR
1921u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
1922int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
1923void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
97b2e202
AD
1924long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
1925 unsigned long arg);
1926
97b2e202
AD
1927/*
1928 * functions used by amdgpu_encoder.c
1929 */
1930struct amdgpu_afmt_acr {
1931 u32 clock;
1932
1933 int n_32khz;
1934 int cts_32khz;
1935
1936 int n_44_1khz;
1937 int cts_44_1khz;
1938
1939 int n_48khz;
1940 int cts_48khz;
1941
1942};
1943
1944struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock);
1945
1946/* amdgpu_acpi.c */
1947#if defined(CONFIG_ACPI)
1948int amdgpu_acpi_init(struct amdgpu_device *adev);
1949void amdgpu_acpi_fini(struct amdgpu_device *adev);
1950bool amdgpu_acpi_is_pcie_performance_request_supported(struct amdgpu_device *adev);
1951int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev,
1952 u8 perf_req, bool advertise);
1953int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
1954#else
1955static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
1956static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
1957#endif
1958
9cca0b8e
CK
1959int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
1960 uint64_t addr, struct amdgpu_bo **bo,
1961 struct amdgpu_bo_va_mapping **mapping);
97b2e202 1962
4562236b
HW
1963#if defined(CONFIG_DRM_AMD_DC)
1964int amdgpu_dm_display_resume(struct amdgpu_device *adev );
1965#else
1966static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; }
1967#endif
1968
97b2e202 1969#include "amdgpu_object.h"
97b2e202 1970#endif