drm/amdgpu: add num_level to the VM manager
[linux-2.6-block.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_vm.h
CommitLineData
073440d2
CK
1/*
2 * Copyright 2016 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König
23 */
24#ifndef __AMDGPU_VM_H__
25#define __AMDGPU_VM_H__
26
27#include <linux/rbtree.h>
28
29#include "gpu_scheduler.h"
30#include "amdgpu_sync.h"
31#include "amdgpu_ring.h"
32
33struct amdgpu_bo_va;
34struct amdgpu_job;
35struct amdgpu_bo_list_entry;
36
37/*
38 * GPUVM handling
39 */
40
41/* maximum number of VMIDs */
42#define AMDGPU_NUM_VM 16
43
44/* Maximum number of PTEs the hardware can write with one command */
45#define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF
46
47/* number of entries in page table */
48#define AMDGPU_VM_PTE_COUNT (1 << amdgpu_vm_block_size)
49
50/* PTBs (Page Table Blocks) need to be aligned to 32K */
51#define AMDGPU_VM_PTB_ALIGN_SIZE 32768
52
53/* LOG2 number of continuous pages for the fragment field */
54#define AMDGPU_LOG2_PAGES_PER_FRAG 4
55
35ba15f0
CK
56#define AMDGPU_PTE_VALID (1ULL << 0)
57#define AMDGPU_PTE_SYSTEM (1ULL << 1)
58#define AMDGPU_PTE_SNOOPED (1ULL << 2)
073440d2
CK
59
60/* VI only */
35ba15f0 61#define AMDGPU_PTE_EXECUTABLE (1ULL << 4)
073440d2 62
35ba15f0
CK
63#define AMDGPU_PTE_READABLE (1ULL << 5)
64#define AMDGPU_PTE_WRITEABLE (1ULL << 6)
073440d2 65
982a1348 66#define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7)
073440d2 67
35ba15f0 68#define AMDGPU_PTE_PRT (1ULL << 63)
284710fa 69
ca02061c
AD
70/* VEGA10 only */
71#define AMDGPU_PTE_MTYPE(a) ((uint64_t)a << 57)
72#define AMDGPU_PTE_MTYPE_MASK AMDGPU_PTE_MTYPE(3ULL)
73
073440d2
CK
74/* How to programm VM fault handling */
75#define AMDGPU_VM_FAULT_STOP_NEVER 0
76#define AMDGPU_VM_FAULT_STOP_FIRST 1
77#define AMDGPU_VM_FAULT_STOP_ALWAYS 2
78
79struct amdgpu_vm_pt {
80 struct amdgpu_bo *bo;
81 uint64_t addr;
82};
83
84struct amdgpu_vm {
85 /* tree of virtual addresses mapped */
86 struct rb_root va;
87
88 /* protecting invalidated */
89 spinlock_t status_lock;
90
91 /* BOs moved, but not yet updated in the PT */
92 struct list_head invalidated;
93
94 /* BOs cleared in the PT because of a move */
95 struct list_head cleared;
96
97 /* BO mappings freed, but not yet updated in the PT */
98 struct list_head freed;
99
100 /* contains the page directory */
101 struct amdgpu_bo *page_directory;
102 unsigned max_pde_used;
a24960f3 103 struct dma_fence *last_dir_update;
073440d2
CK
104 uint64_t last_eviction_counter;
105
106 /* array of page tables, one for each page directory entry */
107 struct amdgpu_vm_pt *page_tables;
108
109 /* for id and flush management per ring */
110 struct amdgpu_vm_id *ids[AMDGPU_MAX_RINGS];
111
112 /* protecting freed */
113 spinlock_t freed_lock;
114
115 /* Scheduler entity for page table updates */
116 struct amd_sched_entity entity;
117
118 /* client id */
119 u64 client_id;
bd7de27d
ML
120 /* each VM will map on CSA */
121 struct amdgpu_bo_va *csa_bo_va;
073440d2
CK
122};
123
124struct amdgpu_vm_id {
125 struct list_head list;
220196b3 126 struct dma_fence *first;
073440d2 127 struct amdgpu_sync active;
220196b3 128 struct dma_fence *last_flush;
073440d2
CK
129 atomic64_t owner;
130
131 uint64_t pd_gpu_addr;
132 /* last flushed PD/PT update */
220196b3 133 struct dma_fence *flushed_updates;
073440d2
CK
134
135 uint32_t current_gpu_reset_count;
136
137 uint32_t gds_base;
138 uint32_t gds_size;
139 uint32_t gws_base;
140 uint32_t gws_size;
141 uint32_t oa_base;
142 uint32_t oa_size;
143};
144
145struct amdgpu_vm_manager {
146 /* Handling of VMIDs */
147 struct mutex lock;
148 unsigned num_ids;
149 struct list_head ids_lru;
150 struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
151
152 /* Handling of VM fences */
153 u64 fence_context;
154 unsigned seqno[AMDGPU_MAX_RINGS];
155
156 uint32_t max_pfn;
8437a097 157 uint32_t num_level;
073440d2
CK
158 /* vram base address for page table entry */
159 u64 vram_base_offset;
160 /* is vm enabled? */
161 bool enabled;
162 /* vm pte handling */
163 const struct amdgpu_vm_pte_funcs *vm_pte_funcs;
164 struct amdgpu_ring *vm_pte_rings[AMDGPU_MAX_RINGS];
165 unsigned vm_pte_num_rings;
166 atomic_t vm_pte_next_ring;
167 /* client id counter */
168 atomic64_t client_counter;
284710fa
CK
169
170 /* partial resident texture handling */
171 spinlock_t prt_lock;
451bc8eb 172 atomic_t num_prt_users;
073440d2
CK
173};
174
175void amdgpu_vm_manager_init(struct amdgpu_device *adev);
176void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
177int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
178void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
179void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
180 struct list_head *validated,
181 struct amdgpu_bo_list_entry *entry);
182int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
183 int (*callback)(void *p, struct amdgpu_bo *bo),
184 void *param);
185void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
186 struct amdgpu_vm *vm);
663e4577
CK
187int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
188 struct amdgpu_vm *vm,
189 uint64_t saddr, uint64_t size);
073440d2 190int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
220196b3 191 struct amdgpu_sync *sync, struct dma_fence *fence,
073440d2
CK
192 struct amdgpu_job *job);
193int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
194void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
195int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
196 struct amdgpu_vm *vm);
197int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
f3467818
NH
198 struct amdgpu_vm *vm,
199 struct dma_fence **fence);
073440d2
CK
200int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
201 struct amdgpu_sync *sync);
202int amdgpu_vm_bo_update(struct amdgpu_device *adev,
203 struct amdgpu_bo_va *bo_va,
204 bool clear);
205void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
206 struct amdgpu_bo *bo);
207struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
208 struct amdgpu_bo *bo);
209struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
210 struct amdgpu_vm *vm,
211 struct amdgpu_bo *bo);
212int amdgpu_vm_bo_map(struct amdgpu_device *adev,
213 struct amdgpu_bo_va *bo_va,
214 uint64_t addr, uint64_t offset,
268c3001 215 uint64_t size, uint64_t flags);
80f95c57
CK
216int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
217 struct amdgpu_bo_va *bo_va,
218 uint64_t addr, uint64_t offset,
219 uint64_t size, uint64_t flags);
073440d2
CK
220int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
221 struct amdgpu_bo_va *bo_va,
222 uint64_t addr);
dc54d3d1
CK
223int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
224 struct amdgpu_vm *vm,
225 uint64_t saddr, uint64_t size);
073440d2
CK
226void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
227 struct amdgpu_bo_va *bo_va);
228
229#endif