drm/xe: Use LRC prefix rather than CTX prefix in lrc desc defines
[linux-block.git] / drivers / gpu / drm / xe / xe_vm_types.h
CommitLineData
dd08ebf6
MB
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#ifndef _XE_VM_TYPES_H_
7#define _XE_VM_TYPES_H_
8
b06d47be
MB
9#include <drm/drm_gpuvm.h>
10
dd08ebf6
MB
11#include <linux/dma-resv.h>
12#include <linux/kref.h>
13#include <linux/mmu_notifier.h>
14#include <linux/scatterlist.h>
15
16#include "xe_device_types.h"
17#include "xe_pt_types.h"
fd84041d 18#include "xe_range_fence.h"
dd08ebf6
MB
19
20struct xe_bo;
b06d47be 21struct xe_sync_entry;
dd08ebf6
MB
22struct xe_vm;
23
b06d47be
MB
24#define TEST_VM_ASYNC_OPS_ERROR
25#define FORCE_ASYNC_OP_ERROR BIT(31)
dd08ebf6 26
b06d47be
MB
27#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
28#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
29#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
30#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
31#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
8f33b4f0
MB
32#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
33#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
34#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
b06d47be 35
a4cc60a5
MB
36/** struct xe_userptr - User pointer */
37struct xe_userptr {
38 /** @invalidate_link: Link for the vm::userptr.invalidated list */
39 struct list_head invalidate_link;
40 /**
41 * @notifier: MMU notifier for user pointer (invalidation call back)
42 */
43 struct mmu_interval_notifier notifier;
44 /** @sgt: storage for a scatter gather table */
45 struct sg_table sgt;
46 /** @sg: allocated scatter gather table */
47 struct sg_table *sg;
48 /** @notifier_seq: notifier sequence number */
49 unsigned long notifier_seq;
50 /**
51 * @initial_bind: user pointer has been bound at least once.
52 * write: vm->userptr.notifier_lock in read mode and vm->resv held.
53 * read: vm->userptr.notifier_lock in write mode or vm->resv held.
54 */
55 bool initial_bind;
56#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
57 u32 divisor;
58#endif
59};
60
b06d47be
MB
61struct xe_vma {
62 /** @gpuva: Base GPUVA object */
63 struct drm_gpuva gpuva;
dd08ebf6 64
24f947d5
TH
65 /**
66 * @combined_links: links into lists which are mutually exclusive.
67 * Locking: vm lock in write mode OR vm lock in read mode and the vm's
68 * resv.
69 */
1655c893 70 union {
24f947d5 71 /** @userptr: link into VM repin list if userptr. */
1655c893 72 struct list_head userptr;
24f947d5 73 /** @rebind: link into VM if this VMA needs rebinding. */
1655c893 74 struct list_head rebind;
24f947d5 75 /** @destroy: link to contested list when VM is being closed. */
1655c893
MB
76 struct list_head destroy;
77 } combined_links;
dd08ebf6 78
eae553cb
MB
79 union {
80 /** @destroy_cb: callback to destroy VMA when unbind job is done */
81 struct dma_fence_cb destroy_cb;
82 /** @destroy_work: worker to destroy this BO */
83 struct work_struct destroy_work;
84 };
dd08ebf6 85
dd08ebf6
MB
86 /** @usm: unified shared memory state */
87 struct {
876611c2 88 /** @tile_invalidated: VMA has been invalidated */
63412a5a 89 u8 tile_invalidated;
dd08ebf6
MB
90 } usm;
91
63412a5a
MB
92 /** @tile_mask: Tile mask of where to create binding for this VMA */
93 u8 tile_mask;
94
95 /**
96 * @tile_present: GT mask of binding are present for this VMA.
97 * protected by vm->lock, vm->resv and for userptrs,
98 * vm->userptr.notifier_lock for writing. Needs either for reading,
99 * but if reading is done under the vm->lock only, it needs to be held
100 * in write mode.
101 */
102 u8 tile_present;
103
e1fbc4f1
MA
104 /**
105 * @pat_index: The pat index to use when encoding the PTEs for this vma.
106 */
107 u16 pat_index;
108
a4cc60a5
MB
109 /**
110 * @userptr: user pointer state, only allocated for VMAs that are
111 * user pointers
112 */
113 struct xe_userptr userptr;
dd08ebf6
MB
114};
115
116struct xe_device;
117
dd08ebf6 118struct xe_vm {
b06d47be
MB
119 /** @gpuvm: base GPUVM used to track VMAs */
120 struct drm_gpuvm gpuvm;
dd08ebf6 121
b06d47be 122 struct xe_device *xe;
dd08ebf6 123
9b9529ce
FD
124 /* exec queue used for (un)binding vma's */
125 struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
dd08ebf6 126
7ba4c5f0
MB
127 /** @lru_bulk_move: Bulk LRU move list for this VM's BOs */
128 struct ttm_lru_bulk_move lru_bulk_move;
129
dd08ebf6 130 u64 size;
dd08ebf6 131
a5edc7cd 132 struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
a5edc7cd 133 struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
dd08ebf6 134
9d858b69
MB
135 /**
136 * @flags: flags for this VM, statically setup a creation time aside
137 * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
138 */
0d39b6da 139#define XE_VM_FLAG_64K BIT(0)
fdb6a053 140#define XE_VM_FLAG_LR_MODE BIT(1)
d3d76739
MB
141#define XE_VM_FLAG_MIGRATION BIT(2)
142#define XE_VM_FLAG_SCRATCH_PAGE BIT(3)
143#define XE_VM_FLAG_FAULT_MODE BIT(4)
144#define XE_VM_FLAG_BANNED BIT(5)
145#define XE_VM_FLAG_TILE_ID(flags) FIELD_GET(GENMASK(7, 6), flags)
146#define XE_VM_FLAG_SET_TILE_ID(tile) FIELD_PREP(GENMASK(7, 6), (tile)->id)
dd08ebf6
MB
147 unsigned long flags;
148
149 /** @composite_fence_ctx: context composite fence */
150 u64 composite_fence_ctx;
151 /** @composite_fence_seqno: seqno for composite fence */
152 u32 composite_fence_seqno;
153
154 /**
155 * @lock: outer most lock, protects objects of anything attached to this
156 * VM
157 */
158 struct rw_semaphore lock;
159
160 /**
24f947d5
TH
161 * @rebind_list: list of VMAs that need rebinding. Protected by the
162 * vm->lock in write mode, OR (the vm->lock in read mode and the
163 * vm resv).
dd08ebf6
MB
164 */
165 struct list_head rebind_list;
166
167 /** @rebind_fence: rebind fence from execbuf */
168 struct dma_fence *rebind_fence;
169
170 /**
171 * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
172 * from an irq context can be last put and the destroy needs to be able
173 * to sleep.
174 */
175 struct work_struct destroy_work;
176
fd84041d
MB
177 /**
178 * @rftree: range fence tree to track updates to page table structure.
179 * Used to implement conflict tracking between independent bind engines.
180 */
181 struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE];
182
dd08ebf6
MB
183 /** @async_ops: async VM operations (bind / unbinds) */
184 struct {
185 /** @list: list of pending async VM ops */
186 struct list_head pending;
187 /** @work: worker to execute async VM ops */
188 struct work_struct work;
189 /** @lock: protects list of pending async VM ops and fences */
190 spinlock_t lock;
dd08ebf6
MB
191 /** @fence: fence state */
192 struct {
193 /** @context: context of async fence */
194 u64 context;
195 /** @seqno: seqno of async fence */
196 u32 seqno;
197 } fence;
198 /** @error: error state for async VM ops */
199 int error;
200 /**
201 * @munmap_rebind_inflight: an munmap style VM bind is in the
202 * middle of a set of ops which requires a rebind at the end.
203 */
204 bool munmap_rebind_inflight;
205 } async_ops;
206
0e5e77bd
LDM
207 const struct xe_pt_ops *pt_ops;
208
dd08ebf6
MB
209 /** @userptr: user pointer state */
210 struct {
211 /**
212 * @userptr.repin_list: list of VMAs which are user pointers,
213 * and needs repinning. Protected by @lock.
214 */
215 struct list_head repin_list;
216 /**
217 * @notifier_lock: protects notifier in write mode and
218 * submission in read mode.
219 */
220 struct rw_semaphore notifier_lock;
221 /**
222 * @userptr.invalidated_lock: Protects the
223 * @userptr.invalidated list.
224 */
225 spinlock_t invalidated_lock;
226 /**
227 * @userptr.invalidated: List of invalidated userptrs, not yet
228 * picked
229 * up for revalidation. Protected from access with the
230 * @invalidated_lock. Removing items from the list
231 * additionally requires @lock in write mode, and adding
232 * items to the list requires the @userptr.notifer_lock in
233 * write mode.
234 */
235 struct list_head invalidated;
236 } userptr;
237
238 /** @preempt: preempt state */
239 struct {
240 /**
241 * @min_run_period_ms: The minimum run period before preempting
242 * an engine again
243 */
244 s64 min_run_period_ms;
9b9529ce
FD
245 /** @exec_queues: list of exec queues attached to this VM */
246 struct list_head exec_queues;
247 /** @num_exec_queues: number exec queues attached to this VM */
248 int num_exec_queues;
8e41443e
TH
249 /**
250 * @rebind_deactivated: Whether rebind has been temporarily deactivated
251 * due to no work available. Protected by the vm resv.
252 */
253 bool rebind_deactivated;
dd08ebf6
MB
254 /**
255 * @rebind_work: worker to rebind invalidated userptrs / evicted
256 * BOs
257 */
258 struct work_struct rebind_work;
259 } preempt;
260
261 /** @um: unified memory state */
262 struct {
263 /** @asid: address space ID, unique to each VM */
264 u32 asid;
265 /**
266 * @last_fault_vma: Last fault VMA, used for fast lookup when we
267 * get a flood of faults to the same VMA
268 */
269 struct xe_vma *last_fault_vma;
270 } usm;
271
dd08ebf6
MB
272 /** @error_capture: allow to track errors */
273 struct {
274 /** @capture_once: capture only one error per VM */
275 bool capture_once;
276 } error_capture;
85dbfe47
TH
277
278 /** @batch_invalidate_tlb: Always invalidate TLB before batch start */
279 bool batch_invalidate_tlb;
9e4e9761
TU
280 /** @xef: XE file handle for tracking this VM's drm client */
281 struct xe_file *xef;
dd08ebf6
MB
282};
283
b06d47be
MB
284/** struct xe_vma_op_map - VMA map operation */
285struct xe_vma_op_map {
286 /** @vma: VMA to map */
287 struct xe_vma *vma;
288 /** @immediate: Immediate bind */
289 bool immediate;
290 /** @read_only: Read only */
291 bool read_only;
292 /** @is_null: is NULL binding */
293 bool is_null;
e1fbc4f1
MA
294 /** @pat_index: The pat index to use for this operation. */
295 u16 pat_index;
b06d47be
MB
296};
297
b06d47be
MB
298/** struct xe_vma_op_remap - VMA remap operation */
299struct xe_vma_op_remap {
300 /** @prev: VMA preceding part of a split mapping */
301 struct xe_vma *prev;
302 /** @next: VMA subsequent part of a split mapping */
303 struct xe_vma *next;
304 /** @start: start of the VMA unmap */
305 u64 start;
306 /** @range: range of the VMA unmap */
307 u64 range;
8f33b4f0
MB
308 /** @skip_prev: skip prev rebind */
309 bool skip_prev;
310 /** @skip_next: skip next rebind */
311 bool skip_next;
b06d47be
MB
312 /** @unmap_done: unmap operation in done */
313 bool unmap_done;
314};
315
316/** struct xe_vma_op_prefetch - VMA prefetch operation */
317struct xe_vma_op_prefetch {
318 /** @region: memory region to prefetch to */
319 u32 region;
320};
321
322/** enum xe_vma_op_flags - flags for VMA operation */
323enum xe_vma_op_flags {
324 /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
5ef091fc 325 XE_VMA_OP_FIRST = BIT(0),
b06d47be 326 /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
5ef091fc 327 XE_VMA_OP_LAST = BIT(1),
b06d47be 328 /** @XE_VMA_OP_COMMITTED: VMA operation committed */
5ef091fc
MB
329 XE_VMA_OP_COMMITTED = BIT(2),
330 /** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
331 XE_VMA_OP_PREV_COMMITTED = BIT(3),
332 /** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
333 XE_VMA_OP_NEXT_COMMITTED = BIT(4),
b06d47be
MB
334};
335
336/** struct xe_vma_op - VMA operation */
337struct xe_vma_op {
338 /** @base: GPUVA base operation */
339 struct drm_gpuva_op base;
340 /**
341 * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
342 * operations is processed
343 */
344 struct drm_gpuva_ops *ops;
9b9529ce
FD
345 /** @q: exec queue for this operation */
346 struct xe_exec_queue *q;
b06d47be
MB
347 /**
348 * @syncs: syncs for this operation, only used on first and last
349 * operation
350 */
351 struct xe_sync_entry *syncs;
352 /** @num_syncs: number of syncs */
353 u32 num_syncs;
354 /** @link: async operation link */
355 struct list_head link;
b06d47be
MB
356 /** @flags: operation flags */
357 enum xe_vma_op_flags flags;
358
359#ifdef TEST_VM_ASYNC_OPS_ERROR
360 /** @inject_error: inject error to test async op error handling */
361 bool inject_error;
362#endif
363
364 union {
365 /** @map: VMA map operation specific data */
366 struct xe_vma_op_map map;
b06d47be
MB
367 /** @remap: VMA remap operation specific data */
368 struct xe_vma_op_remap remap;
369 /** @prefetch: VMA prefetch operation specific data */
370 struct xe_vma_op_prefetch prefetch;
371 };
372};
dd08ebf6 373#endif