Merge tag 'soc-drivers-6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-block.git] / drivers / gpu / drm / xe / xe_vm_types.h
CommitLineData
dd08ebf6
MB
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#ifndef _XE_VM_TYPES_H_
7#define _XE_VM_TYPES_H_
8
b06d47be
MB
9#include <drm/drm_gpuvm.h>
10
dd08ebf6
MB
11#include <linux/dma-resv.h>
12#include <linux/kref.h>
13#include <linux/mmu_notifier.h>
14#include <linux/scatterlist.h>
15
16#include "xe_device_types.h"
17#include "xe_pt_types.h"
fd84041d 18#include "xe_range_fence.h"
dd08ebf6
MB
19
20struct xe_bo;
b06d47be 21struct xe_sync_entry;
785f4cc0 22struct xe_user_fence;
dd08ebf6
MB
23struct xe_vm;
24
b06d47be
MB
25#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
26#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
27#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
28#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
29#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
8f33b4f0
MB
30#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
31#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
32#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
4cf8ffeb 33#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
5b672ec3 34#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
b06d47be 35
a4cc60a5
MB
36/** struct xe_userptr - User pointer */
37struct xe_userptr {
38 /** @invalidate_link: Link for the vm::userptr.invalidated list */
39 struct list_head invalidate_link;
ed2bdf3b
TH
40 /** @userptr: link into VM repin list if userptr. */
41 struct list_head repin_link;
a4cc60a5
MB
42 /**
43 * @notifier: MMU notifier for user pointer (invalidation call back)
44 */
45 struct mmu_interval_notifier notifier;
46 /** @sgt: storage for a scatter gather table */
47 struct sg_table sgt;
48 /** @sg: allocated scatter gather table */
49 struct sg_table *sg;
50 /** @notifier_seq: notifier sequence number */
51 unsigned long notifier_seq;
52 /**
53 * @initial_bind: user pointer has been bound at least once.
54 * write: vm->userptr.notifier_lock in read mode and vm->resv held.
55 * read: vm->userptr.notifier_lock in write mode or vm->resv held.
56 */
57 bool initial_bind;
58#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
59 u32 divisor;
60#endif
61};
62
b06d47be
MB
63struct xe_vma {
64 /** @gpuva: Base GPUVA object */
65 struct drm_gpuva gpuva;
dd08ebf6 66
24f947d5
TH
67 /**
68 * @combined_links: links into lists which are mutually exclusive.
69 * Locking: vm lock in write mode OR vm lock in read mode and the vm's
70 * resv.
71 */
1655c893 72 union {
24f947d5 73 /** @rebind: link into VM if this VMA needs rebinding. */
1655c893 74 struct list_head rebind;
24f947d5 75 /** @destroy: link to contested list when VM is being closed. */
1655c893
MB
76 struct list_head destroy;
77 } combined_links;
dd08ebf6 78
eae553cb
MB
79 union {
80 /** @destroy_cb: callback to destroy VMA when unbind job is done */
81 struct dma_fence_cb destroy_cb;
82 /** @destroy_work: worker to destroy this BO */
83 struct work_struct destroy_work;
84 };
dd08ebf6 85
dd08ebf6
MB
86 /** @usm: unified shared memory state */
87 struct {
876611c2 88 /** @tile_invalidated: VMA has been invalidated */
63412a5a 89 u8 tile_invalidated;
dd08ebf6
MB
90 } usm;
91
63412a5a
MB
92 /** @tile_mask: Tile mask of where to create binding for this VMA */
93 u8 tile_mask;
94
95 /**
96 * @tile_present: GT mask of binding are present for this VMA.
97 * protected by vm->lock, vm->resv and for userptrs,
98 * vm->userptr.notifier_lock for writing. Needs either for reading,
99 * but if reading is done under the vm->lock only, it needs to be held
100 * in write mode.
101 */
102 u8 tile_present;
103
e1fbc4f1
MA
104 /**
105 * @pat_index: The pat index to use when encoding the PTEs for this vma.
106 */
107 u16 pat_index;
785f4cc0
MK
108
109 /**
110 * @ufence: The user fence that was provided with MAP.
111 * Needs to be signalled before UNMAP can be processed.
112 */
113 struct xe_user_fence *ufence;
ed2bdf3b 114};
e1fbc4f1 115
ed2bdf3b
TH
116/**
117 * struct xe_userptr_vma - A userptr vma subclass
118 * @vma: The vma.
119 * @userptr: Additional userptr information.
120 */
121struct xe_userptr_vma {
122 struct xe_vma vma;
a4cc60a5 123 struct xe_userptr userptr;
dd08ebf6
MB
124};
125
126struct xe_device;
127
dd08ebf6 128struct xe_vm {
b06d47be
MB
129 /** @gpuvm: base GPUVM used to track VMAs */
130 struct drm_gpuvm gpuvm;
dd08ebf6 131
b06d47be 132 struct xe_device *xe;
dd08ebf6 133
9b9529ce
FD
134 /* exec queue used for (un)binding vma's */
135 struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
dd08ebf6 136
7ba4c5f0
MB
137 /** @lru_bulk_move: Bulk LRU move list for this VM's BOs */
138 struct ttm_lru_bulk_move lru_bulk_move;
139
dd08ebf6 140 u64 size;
dd08ebf6 141
a5edc7cd 142 struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
a5edc7cd 143 struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
dd08ebf6 144
9d858b69
MB
145 /**
146 * @flags: flags for this VM, statically setup a creation time aside
147 * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
148 */
0d39b6da 149#define XE_VM_FLAG_64K BIT(0)
fdb6a053 150#define XE_VM_FLAG_LR_MODE BIT(1)
d3d76739
MB
151#define XE_VM_FLAG_MIGRATION BIT(2)
152#define XE_VM_FLAG_SCRATCH_PAGE BIT(3)
153#define XE_VM_FLAG_FAULT_MODE BIT(4)
154#define XE_VM_FLAG_BANNED BIT(5)
155#define XE_VM_FLAG_TILE_ID(flags) FIELD_GET(GENMASK(7, 6), flags)
156#define XE_VM_FLAG_SET_TILE_ID(tile) FIELD_PREP(GENMASK(7, 6), (tile)->id)
dd08ebf6
MB
157 unsigned long flags;
158
159 /** @composite_fence_ctx: context composite fence */
160 u64 composite_fence_ctx;
161 /** @composite_fence_seqno: seqno for composite fence */
162 u32 composite_fence_seqno;
163
164 /**
165 * @lock: outer most lock, protects objects of anything attached to this
166 * VM
167 */
168 struct rw_semaphore lock;
169
170 /**
24f947d5
TH
171 * @rebind_list: list of VMAs that need rebinding. Protected by the
172 * vm->lock in write mode, OR (the vm->lock in read mode and the
173 * vm resv).
dd08ebf6
MB
174 */
175 struct list_head rebind_list;
176
177 /** @rebind_fence: rebind fence from execbuf */
178 struct dma_fence *rebind_fence;
179
180 /**
181 * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
182 * from an irq context can be last put and the destroy needs to be able
183 * to sleep.
184 */
185 struct work_struct destroy_work;
186
fd84041d
MB
187 /**
188 * @rftree: range fence tree to track updates to page table structure.
189 * Used to implement conflict tracking between independent bind engines.
190 */
191 struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE];
192
dd08ebf6
MB
193 /** @async_ops: async VM operations (bind / unbinds) */
194 struct {
195 /** @list: list of pending async VM ops */
196 struct list_head pending;
197 /** @work: worker to execute async VM ops */
198 struct work_struct work;
199 /** @lock: protects list of pending async VM ops and fences */
200 spinlock_t lock;
dd08ebf6
MB
201 /** @fence: fence state */
202 struct {
203 /** @context: context of async fence */
204 u64 context;
205 /** @seqno: seqno of async fence */
206 u32 seqno;
207 } fence;
208 /** @error: error state for async VM ops */
209 int error;
210 /**
211 * @munmap_rebind_inflight: an munmap style VM bind is in the
212 * middle of a set of ops which requires a rebind at the end.
213 */
214 bool munmap_rebind_inflight;
215 } async_ops;
216
0e5e77bd
LDM
217 const struct xe_pt_ops *pt_ops;
218
dd08ebf6
MB
219 /** @userptr: user pointer state */
220 struct {
221 /**
222 * @userptr.repin_list: list of VMAs which are user pointers,
223 * and needs repinning. Protected by @lock.
224 */
225 struct list_head repin_list;
226 /**
227 * @notifier_lock: protects notifier in write mode and
228 * submission in read mode.
229 */
230 struct rw_semaphore notifier_lock;
231 /**
232 * @userptr.invalidated_lock: Protects the
233 * @userptr.invalidated list.
234 */
235 spinlock_t invalidated_lock;
236 /**
237 * @userptr.invalidated: List of invalidated userptrs, not yet
238 * picked
239 * up for revalidation. Protected from access with the
240 * @invalidated_lock. Removing items from the list
241 * additionally requires @lock in write mode, and adding
242 * items to the list requires the @userptr.notifer_lock in
243 * write mode.
244 */
245 struct list_head invalidated;
246 } userptr;
247
248 /** @preempt: preempt state */
249 struct {
250 /**
251 * @min_run_period_ms: The minimum run period before preempting
252 * an engine again
253 */
254 s64 min_run_period_ms;
9b9529ce
FD
255 /** @exec_queues: list of exec queues attached to this VM */
256 struct list_head exec_queues;
257 /** @num_exec_queues: number exec queues attached to this VM */
258 int num_exec_queues;
8e41443e
TH
259 /**
260 * @rebind_deactivated: Whether rebind has been temporarily deactivated
261 * due to no work available. Protected by the vm resv.
262 */
263 bool rebind_deactivated;
dd08ebf6
MB
264 /**
265 * @rebind_work: worker to rebind invalidated userptrs / evicted
266 * BOs
267 */
268 struct work_struct rebind_work;
269 } preempt;
270
271 /** @um: unified memory state */
272 struct {
273 /** @asid: address space ID, unique to each VM */
274 u32 asid;
275 /**
276 * @last_fault_vma: Last fault VMA, used for fast lookup when we
277 * get a flood of faults to the same VMA
278 */
279 struct xe_vma *last_fault_vma;
280 } usm;
281
dd08ebf6
MB
282 /** @error_capture: allow to track errors */
283 struct {
284 /** @capture_once: capture only one error per VM */
285 bool capture_once;
286 } error_capture;
85dbfe47
TH
287
288 /** @batch_invalidate_tlb: Always invalidate TLB before batch start */
289 bool batch_invalidate_tlb;
9e4e9761
TU
290 /** @xef: XE file handle for tracking this VM's drm client */
291 struct xe_file *xef;
dd08ebf6
MB
292};
293
b06d47be
MB
294/** struct xe_vma_op_map - VMA map operation */
295struct xe_vma_op_map {
296 /** @vma: VMA to map */
297 struct xe_vma *vma;
b06d47be
MB
298 /** @is_null: is NULL binding */
299 bool is_null;
e1fbc4f1
MA
300 /** @pat_index: The pat index to use for this operation. */
301 u16 pat_index;
b06d47be
MB
302};
303
b06d47be
MB
304/** struct xe_vma_op_remap - VMA remap operation */
305struct xe_vma_op_remap {
306 /** @prev: VMA preceding part of a split mapping */
307 struct xe_vma *prev;
308 /** @next: VMA subsequent part of a split mapping */
309 struct xe_vma *next;
310 /** @start: start of the VMA unmap */
311 u64 start;
312 /** @range: range of the VMA unmap */
313 u64 range;
8f33b4f0
MB
314 /** @skip_prev: skip prev rebind */
315 bool skip_prev;
316 /** @skip_next: skip next rebind */
317 bool skip_next;
b06d47be
MB
318 /** @unmap_done: unmap operation in done */
319 bool unmap_done;
320};
321
322/** struct xe_vma_op_prefetch - VMA prefetch operation */
323struct xe_vma_op_prefetch {
324 /** @region: memory region to prefetch to */
325 u32 region;
326};
327
328/** enum xe_vma_op_flags - flags for VMA operation */
329enum xe_vma_op_flags {
330 /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
5ef091fc 331 XE_VMA_OP_FIRST = BIT(0),
b06d47be 332 /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
5ef091fc 333 XE_VMA_OP_LAST = BIT(1),
b06d47be 334 /** @XE_VMA_OP_COMMITTED: VMA operation committed */
5ef091fc
MB
335 XE_VMA_OP_COMMITTED = BIT(2),
336 /** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
337 XE_VMA_OP_PREV_COMMITTED = BIT(3),
338 /** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
339 XE_VMA_OP_NEXT_COMMITTED = BIT(4),
b06d47be
MB
340};
341
342/** struct xe_vma_op - VMA operation */
343struct xe_vma_op {
344 /** @base: GPUVA base operation */
345 struct drm_gpuva_op base;
346 /**
347 * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
348 * operations is processed
349 */
350 struct drm_gpuva_ops *ops;
9b9529ce
FD
351 /** @q: exec queue for this operation */
352 struct xe_exec_queue *q;
b06d47be
MB
353 /**
354 * @syncs: syncs for this operation, only used on first and last
355 * operation
356 */
357 struct xe_sync_entry *syncs;
358 /** @num_syncs: number of syncs */
359 u32 num_syncs;
360 /** @link: async operation link */
361 struct list_head link;
b06d47be
MB
362 /** @flags: operation flags */
363 enum xe_vma_op_flags flags;
364
b06d47be
MB
365 union {
366 /** @map: VMA map operation specific data */
367 struct xe_vma_op_map map;
b06d47be
MB
368 /** @remap: VMA remap operation specific data */
369 struct xe_vma_op_remap remap;
370 /** @prefetch: VMA prefetch operation specific data */
371 struct xe_vma_op_prefetch prefetch;
372 };
373};
dd08ebf6 374#endif