Merge tag 'powerpc-5.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc...
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.h
CommitLineData
073440d2
CK
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König
23 */
24#ifndef __AMDGPU_VM_H__
25#define __AMDGPU_VM_H__
26
02208441 27#include <linux/idr.h>
1b1f42d8
LS
28#include <linux/kfifo.h>
29#include <linux/rbtree.h>
30#include <drm/gpu_scheduler.h>
61b100e9 31#include <drm/drm_file.h>
f921661b 32#include <drm/ttm/ttm_bo_driver.h>
a269e449 33#include <linux/sched/mm.h>
073440d2 34
073440d2
CK
35#include "amdgpu_sync.h"
36#include "amdgpu_ring.h"
620f774f 37#include "amdgpu_ids.h"
073440d2
CK
38
39struct amdgpu_bo_va;
40struct amdgpu_job;
41struct amdgpu_bo_list_entry;
42
43/*
44 * GPUVM handling
45 */
46
073440d2
CK
47/* Maximum number of PTEs the hardware can write with one command */
48#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
49
50/* number of entries in page table */
36b32a68 51#define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size)
073440d2 52
35ba15f0
CK
53#define AMDGPU_PTE_VALID (1ULL << 0)
54#define AMDGPU_PTE_SYSTEM (1ULL << 1)
55#define AMDGPU_PTE_SNOOPED (1ULL << 2)
073440d2 56
c5efd80f
AD
57/* RV+ */
58#define AMDGPU_PTE_TMZ (1ULL << 3)
59
073440d2 60/* VI only */
35ba15f0 61#define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
073440d2 62
35ba15f0
CK
63#define AMDGPU_PTE_READABLE (1ULL << 5)
64#define AMDGPU_PTE_WRITEABLE (1ULL << 6)
073440d2 65
982a1348 66#define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
073440d2 67
d0766e98
ZJ
68/* TILED for VEGA10, reserved for older ASICs */
69#define AMDGPU_PTE_PRT (1ULL << 51)
284710fa 70
cf2f0a37
AD
71/* PDE is handled as PTE for VEGA10 */
72#define AMDGPU_PDE_PTE (1ULL << 54)
73
7f95167c
JX
74#define AMDGPU_PTE_LOG (1ULL << 55)
75
6a42fd6f
CK
76/* PTE is handled as PDE for VEGA10 (Translate Further) */
77#define AMDGPU_PTE_TF (1ULL << 56)
78
4005809b
LG
79/* MALL noalloc for sienna_cichlid, reserved for older ASICs */
80#define AMDGPU_PTE_NOALLOC (1ULL << 58)
81
6a42fd6f
CK
82/* PDE Block Fragment Size for VEGA10 */
83#define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59)
84
959a2091
YZ
85
86/* For GFX9 */
7596ab68
HZ
87#define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57)
88#define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL)
ca02061c 89
959a2091 90#define AMDGPU_MTYPE_NC 0
6d16dac8
YZ
91#define AMDGPU_MTYPE_CC 2
92
93#define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \
94 | AMDGPU_PTE_SNOOPED \
95 | AMDGPU_PTE_EXECUTABLE \
96 | AMDGPU_PTE_READABLE \
97 | AMDGPU_PTE_WRITEABLE \
7596ab68 98 | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC))
6d16dac8 99
5f4814de 100/* gfx10 */
c304b9e5
HZ
101#define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48)
102#define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL)
103
f349f772 104/* How to program VM fault handling */
073440d2
CK
105#define AMDGPU_VM_FAULT_STOP_NEVER 0
106#define AMDGPU_VM_FAULT_STOP_FIRST 1
107#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
108
9d1b3c78 109/* Reserve 4MB VRAM for page tables */
8c8244ca 110#define AMDGPU_VM_RESERVED_VRAM (8ULL << 20)
9d1b3c78 111
eb60ef2b 112/* max number of VMHUB */
c8a6e2a3 113#define AMDGPU_MAX_VMHUBS 3
a2d15ed7
LM
114#define AMDGPU_GFXHUB_0 0
115#define AMDGPU_MMHUB_0 1
c8a6e2a3 116#define AMDGPU_MMHUB_1 2
eb60ef2b 117
55bb919b
CK
118/* Reserve 2MB at top/bottom of address space for kernel use */
119#define AMDGPU_VA_RESERVED_SIZE (2ULL << 20)
ff4cd389 120
c3505770
CZ
121/* max vmids dedicated for process */
122#define AMDGPU_VM_MAX_RESERVED_VMID 1
eb60ef2b 123
9a4b7d4c
HK
124#define AMDGPU_VM_CONTEXT_GFX 0
125#define AMDGPU_VM_CONTEXT_COMPUTE 1
126
127/* See vm_update_mode */
128#define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0)
129#define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1)
130
196f7489
CZ
131/* VMPT level enumerate, and the hiberachy is:
132 * PDB2->PDB1->PDB0->PTB
133 */
134enum amdgpu_vm_level {
135 AMDGPU_VM_PDB2,
136 AMDGPU_VM_PDB1,
137 AMDGPU_VM_PDB0,
138 AMDGPU_VM_PTB
139};
140
ec681545
CK
141/* base structure for tracking BO usage in a VM */
142struct amdgpu_vm_bo_base {
143 /* constant after initialization */
144 struct amdgpu_vm *vm;
145 struct amdgpu_bo *bo;
146
147 /* protected by bo being reserved */
646b9025 148 struct amdgpu_vm_bo_base *next;
ec681545
CK
149
150 /* protected by spinlock */
151 struct list_head vm_status;
3d7d4d3a
CK
152
153 /* protected by the BO being reserved */
154 bool moved;
ec681545 155};
9a4b7d4c 156
073440d2 157struct amdgpu_vm_pt {
3f3333f8 158 struct amdgpu_vm_bo_base base;
67003a15
CK
159
160 /* array of page tables, one for each directory entry */
3f3333f8 161 struct amdgpu_vm_pt *entries;
073440d2
CK
162};
163
4473e1db
HR
164/* provided by hw blocks that can write ptes, e.g., sdma */
165struct amdgpu_vm_pte_funcs {
166 /* number of dw to reserve per operation */
167 unsigned copy_pte_num_dw;
168
169 /* copy pte entries from GART */
170 void (*copy_pte)(struct amdgpu_ib *ib,
171 uint64_t pe, uint64_t src,
172 unsigned count);
173
174 /* write pte one entry at a time with addr mapping */
175 void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
176 uint64_t value, unsigned count,
177 uint32_t incr);
178 /* for linear pte/pde updates without addr mapping */
179 void (*set_pte_pde)(struct amdgpu_ib *ib,
180 uint64_t pe,
181 uint64_t addr, unsigned count,
182 uint32_t incr, uint64_t flags);
183};
184
2aa37bf5
AG
185struct amdgpu_task_info {
186 char process_name[TASK_COMM_LEN];
187 char task_name[TASK_COMM_LEN];
188 pid_t pid;
189 pid_t tgid;
190};
191
d1e29462
CK
192/**
193 * struct amdgpu_vm_update_params
194 *
195 * Encapsulate some VM table update parameters to reduce
196 * the number of function parameters
197 *
198 */
199struct amdgpu_vm_update_params {
200
201 /**
202 * @adev: amdgpu device we do this update for
203 */
204 struct amdgpu_device *adev;
205
206 /**
207 * @vm: optional amdgpu_vm we do this update for
208 */
209 struct amdgpu_vm *vm;
210
47ca7efa 211 /**
eaad0c3a 212 * @immediate: if changes should be made immediately
47ca7efa 213 */
eaad0c3a 214 bool immediate;
47ca7efa 215
9c466bcb
CK
216 /**
217 * @unlocked: true if the root BO is not locked
218 */
219 bool unlocked;
220
d1e29462
CK
221 /**
222 * @pages_addr:
223 *
224 * DMA addresses to use for mapping
225 */
226 dma_addr_t *pages_addr;
227
6dd09027
CK
228 /**
229 * @job: job to used for hw submission
230 */
231 struct amdgpu_job *job;
232
6dd09027
CK
233 /**
234 * @num_dw_left: number of dw left for the IB
235 */
236 unsigned int num_dw_left;
d1e29462
CK
237};
238
6dd09027 239struct amdgpu_vm_update_funcs {
ecf96b52 240 int (*map_table)(struct amdgpu_bo *bo);
9f3cc18d
CK
241 int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
242 enum amdgpu_sync_mode sync_mode);
6dd09027
CK
243 int (*update)(struct amdgpu_vm_update_params *p,
244 struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
245 unsigned count, uint32_t incr, uint64_t flags);
246 int (*commit)(struct amdgpu_vm_update_params *p,
247 struct dma_fence **fence);
248};
249
073440d2
CK
250struct amdgpu_vm {
251 /* tree of virtual addresses mapped */
f808c13f 252 struct rb_root_cached va;
073440d2 253
a269e449
AS
254 /* Lock to prevent eviction while we are updating page tables
255 * use vm_eviction_lock/unlock(vm)
256 */
b4ff0f8a
CK
257 struct mutex eviction_lock;
258 bool evicting;
a269e449 259 unsigned int saved_flags;
b4ff0f8a 260
3f3333f8
CK
261 /* BOs who needs a validation */
262 struct list_head evicted;
263
ea09729c
CK
264 /* PT BOs which relocated and their parent need an update */
265 struct list_head relocated;
266
c12a2ee5 267 /* per VM BOs moved, but not yet updated in the PT */
27c7b9ae 268 struct list_head moved;
073440d2 269
806f043f
CK
270 /* All BOs of this VM not currently in the state machine */
271 struct list_head idle;
272
c12a2ee5
CK
273 /* regular invalidated BOs, but not yet updated in the PT */
274 struct list_head invalidated;
275 spinlock_t invalidated_lock;
276
073440d2
CK
277 /* BO mappings freed, but not yet updated in the PT */
278 struct list_head freed;
279
0e601a04
MBP
280 /* BOs which are invalidated, has been updated in the PTs */
281 struct list_head done;
282
073440d2 283 /* contains the page directory */
67003a15 284 struct amdgpu_vm_pt root;
d5884513 285 struct dma_fence *last_update;
073440d2 286
a2cf3247 287 /* Scheduler entities for page table updates */
eaad0c3a 288 struct drm_sched_entity immediate;
a2cf3247 289 struct drm_sched_entity delayed;
073440d2 290
9c466bcb
CK
291 /* Last unlocked submission to the scheduler entities */
292 struct dma_fence *last_unlocked;
90b69cdc 293
02208441 294 unsigned int pasid;
36bbf3bf 295 /* dedicated to vm */
620f774f 296 struct amdgpu_vmid *reserved_vmid[AMDGPU_MAX_VMHUBS];
9a4b7d4c
HK
297
298 /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */
6dd09027
CK
299 bool use_cpu_for_update;
300
301 /* Functions to use for VM table updates */
302 const struct amdgpu_vm_update_funcs *update_funcs;
51ac7eec
YZ
303
304 /* Flag to indicate ATS support from PTE for GFX9 */
305 bool pte_support_ats;
a2f14820 306
c98171cc 307 /* Up to 128 pending retry page faults */
a2f14820 308 DECLARE_KFIFO(faults, u64, 128);
c98171cc 309
5b21d3e5
FK
310 /* Points to the KFD process VM info */
311 struct amdkfd_process_info *process_info;
312
313 /* List node in amdkfd_process_info.vm_list_head */
314 struct list_head vm_list_node;
315
316 /* Valid while the PD is reserved or fenced */
317 uint64_t pd_phys_addr;
2aa37bf5
AG
318
319 /* Some basic info about the task */
320 struct amdgpu_task_info task_info;
f921661b
HR
321
322 /* Store positions of group of BOs */
323 struct ttm_lru_bulk_move lru_bulk_move;
324 /* mark whether can do the bulk move */
325 bool bulk_moveable;
f43ef951
AS
326 /* Flag to indicate if VM is used for compute */
327 bool is_compute_context;
073440d2
CK
328};
329
073440d2
CK
330struct amdgpu_vm_manager {
331 /* Handling of VMIDs */
620f774f 332 struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS];
40111ec2 333 unsigned int first_kfd_vmid;
073440d2
CK
334
335 /* Handling of VM fences */
336 u64 fence_context;
337 unsigned seqno[AMDGPU_MAX_RINGS];
338
22770e5a 339 uint64_t max_pfn;
8437a097 340 uint32_t num_level;
36b32a68 341 uint32_t block_size;
e618d306 342 uint32_t fragment_size;
196f7489 343 enum amdgpu_vm_level root_level;
073440d2
CK
344 /* vram base address for page table entry */
345 u64 vram_base_offset;
073440d2 346 /* vm pte handling */
3798e9a6 347 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
0c88b430
ND
348 struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS];
349 unsigned vm_pte_num_scheds;
c4229c6e 350 struct amdgpu_ring *page_fault;
284710fa
CK
351
352 /* partial resident texture handling */
353 spinlock_t prt_lock;
451bc8eb 354 atomic_t num_prt_users;
9a4b7d4c
HK
355
356 /* controls how VM page tables are updated for Graphics and Compute.
357 * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU
358 * BIT1[= 0] Compute updated by SDMA [= 1] by CPU
359 */
360 int vm_update_mode;
02208441
FK
361
362 /* PASID to VM mapping, will be used in interrupt context to
363 * look up VM of a page fault
364 */
365 struct idr pasid_idr;
366 spinlock_t pasid_lock;
073440d2
CK
367};
368
4473e1db
HR
369#define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count)))
370#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
371#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
372
6dd09027
CK
373extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs;
374extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs;
375
073440d2
CK
376void amdgpu_vm_manager_init(struct amdgpu_device *adev);
377void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
56753e73
CK
378
379long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
9a4b7d4c 380int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
c7b6bac9
FY
381 int vm_context, u32 pasid);
382int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, u32 pasid);
bf47afba 383void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
073440d2
CK
384void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
385void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
386 struct list_head *validated,
387 struct amdgpu_bo_list_entry *entry);
3f3333f8 388bool amdgpu_vm_ready(struct amdgpu_vm *vm);
073440d2
CK
389int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
390 int (*callback)(void *p, struct amdgpu_bo *bo),
391 void *param);
8fdf074f 392int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync);
807e2994 393int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
eaad0c3a 394 struct amdgpu_vm *vm, bool immediate);
073440d2 395int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
f3467818
NH
396 struct amdgpu_vm *vm,
397 struct dma_fence **fence);
73fb16e7 398int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
4e55eb38 399 struct amdgpu_vm *vm);
073440d2
CK
400int amdgpu_vm_bo_update(struct amdgpu_device *adev,
401 struct amdgpu_bo_va *bo_va,
402 bool clear);
6ceeb144 403bool amdgpu_vm_evictable(struct amdgpu_bo *bo);
073440d2 404void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
3f3333f8 405 struct amdgpu_bo *bo, bool evicted);
6dd09027 406uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
073440d2
CK
407struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
408 struct amdgpu_bo *bo);
409struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
410 struct amdgpu_vm *vm,
411 struct amdgpu_bo *bo);
412int amdgpu_vm_bo_map(struct amdgpu_device *adev,
413 struct amdgpu_bo_va *bo_va,
414 uint64_t addr, uint64_t offset,
268c3001 415 uint64_t size, uint64_t flags);
80f95c57
CK
416int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
417 struct amdgpu_bo_va *bo_va,
418 uint64_t addr, uint64_t offset,
419 uint64_t size, uint64_t flags);
073440d2
CK
420int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
421 struct amdgpu_bo_va *bo_va,
422 uint64_t addr);
dc54d3d1
CK
423int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
424 struct amdgpu_vm *vm,
425 uint64_t saddr, uint64_t size);
aebc5e6f
CK
426struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
427 uint64_t addr);
8ab19ea6 428void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
073440d2
CK
429void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
430 struct amdgpu_bo_va *bo_va);
43370c4c 431void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
f3368128
CK
432 uint32_t fragment_size_default, unsigned max_level,
433 unsigned max_bits);
cfbcacf4 434int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
b9bf33d5
CZ
435bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
436 struct amdgpu_job *job);
e59c0205 437void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev);
073440d2 438
c7b6bac9 439void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid,
f921661b 440 struct amdgpu_task_info *task_info);
c7b6bac9 441bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
ec671737 442 uint64_t addr);
2aa37bf5
AG
443
444void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
445
f921661b
HR
446void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
447 struct amdgpu_vm *vm);
b61857b5
CZ
448void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
449
ff72bc40
MBP
450#if defined(CONFIG_DEBUG_FS)
451void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
452#endif
453
073440d2 454#endif