Merge tag 'pinctrl-v6.9-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-block.git] / drivers / gpu / drm / xe / xe_vm_types.h
CommitLineData
dd08ebf6
MB
1/* SPDX-License-Identifier: MIT */
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#ifndef _XE_VM_TYPES_H_
7#define _XE_VM_TYPES_H_
8
b06d47be
MB
9#include <drm/drm_gpuvm.h>
10
dd08ebf6
MB
11#include <linux/dma-resv.h>
12#include <linux/kref.h>
13#include <linux/mmu_notifier.h>
14#include <linux/scatterlist.h>
15
16#include "xe_device_types.h"
17#include "xe_pt_types.h"
fd84041d 18#include "xe_range_fence.h"
dd08ebf6
MB
19
20struct xe_bo;
b06d47be 21struct xe_sync_entry;
785f4cc0 22struct xe_user_fence;
dd08ebf6
MB
23struct xe_vm;
24
b06d47be
MB
25#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
26#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
27#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
28#define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3)
29#define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4)
8f33b4f0
MB
30#define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5)
31#define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6)
32#define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7)
4cf8ffeb 33#define XE_VMA_PTE_64K (DRM_GPUVA_USERBITS << 8)
5b672ec3 34#define XE_VMA_PTE_COMPACT (DRM_GPUVA_USERBITS << 9)
ffb7249d 35#define XE_VMA_DUMPABLE (DRM_GPUVA_USERBITS << 10)
b06d47be 36
a4cc60a5
MB
37/** struct xe_userptr - User pointer */
38struct xe_userptr {
39 /** @invalidate_link: Link for the vm::userptr.invalidated list */
40 struct list_head invalidate_link;
ed2bdf3b
TH
41 /** @userptr: link into VM repin list if userptr. */
42 struct list_head repin_link;
a4cc60a5
MB
43 /**
44 * @notifier: MMU notifier for user pointer (invalidation call back)
45 */
46 struct mmu_interval_notifier notifier;
47 /** @sgt: storage for a scatter gather table */
48 struct sg_table sgt;
49 /** @sg: allocated scatter gather table */
50 struct sg_table *sg;
51 /** @notifier_seq: notifier sequence number */
52 unsigned long notifier_seq;
53 /**
54 * @initial_bind: user pointer has been bound at least once.
55 * write: vm->userptr.notifier_lock in read mode and vm->resv held.
56 * read: vm->userptr.notifier_lock in write mode or vm->resv held.
57 */
58 bool initial_bind;
59#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
60 u32 divisor;
61#endif
62};
63
b06d47be
MB
64struct xe_vma {
65 /** @gpuva: Base GPUVA object */
66 struct drm_gpuva gpuva;
dd08ebf6 67
24f947d5
TH
68 /**
69 * @combined_links: links into lists which are mutually exclusive.
70 * Locking: vm lock in write mode OR vm lock in read mode and the vm's
71 * resv.
72 */
1655c893 73 union {
24f947d5 74 /** @rebind: link into VM if this VMA needs rebinding. */
1655c893 75 struct list_head rebind;
24f947d5 76 /** @destroy: link to contested list when VM is being closed. */
1655c893
MB
77 struct list_head destroy;
78 } combined_links;
dd08ebf6 79
eae553cb
MB
80 union {
81 /** @destroy_cb: callback to destroy VMA when unbind job is done */
82 struct dma_fence_cb destroy_cb;
83 /** @destroy_work: worker to destroy this BO */
84 struct work_struct destroy_work;
85 };
dd08ebf6 86
38602139
MB
87 /** @tile_invalidated: VMA has been invalidated */
88 u8 tile_invalidated;
dd08ebf6 89
63412a5a
MB
90 /** @tile_mask: Tile mask of where to create binding for this VMA */
91 u8 tile_mask;
92
93 /**
94 * @tile_present: GT mask of binding are present for this VMA.
95 * protected by vm->lock, vm->resv and for userptrs,
96 * vm->userptr.notifier_lock for writing. Needs either for reading,
97 * but if reading is done under the vm->lock only, it needs to be held
98 * in write mode.
99 */
100 u8 tile_present;
101
e1fbc4f1
MA
102 /**
103 * @pat_index: The pat index to use when encoding the PTEs for this vma.
104 */
105 u16 pat_index;
785f4cc0
MK
106
107 /**
108 * @ufence: The user fence that was provided with MAP.
109 * Needs to be signalled before UNMAP can be processed.
110 */
111 struct xe_user_fence *ufence;
ed2bdf3b 112};
e1fbc4f1 113
ed2bdf3b
TH
114/**
115 * struct xe_userptr_vma - A userptr vma subclass
116 * @vma: The vma.
117 * @userptr: Additional userptr information.
118 */
119struct xe_userptr_vma {
120 struct xe_vma vma;
a4cc60a5 121 struct xe_userptr userptr;
dd08ebf6
MB
122};
123
124struct xe_device;
125
dd08ebf6 126struct xe_vm {
b06d47be
MB
127 /** @gpuvm: base GPUVM used to track VMAs */
128 struct drm_gpuvm gpuvm;
dd08ebf6 129
b06d47be 130 struct xe_device *xe;
dd08ebf6 131
9b9529ce
FD
132 /* exec queue used for (un)binding vma's */
133 struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE];
dd08ebf6 134
7ba4c5f0
MB
135 /** @lru_bulk_move: Bulk LRU move list for this VM's BOs */
136 struct ttm_lru_bulk_move lru_bulk_move;
137
dd08ebf6 138 u64 size;
dd08ebf6 139
a5edc7cd 140 struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE];
a5edc7cd 141 struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL];
dd08ebf6 142
9d858b69
MB
143 /**
144 * @flags: flags for this VM, statically setup a creation time aside
145 * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely
146 */
0d39b6da 147#define XE_VM_FLAG_64K BIT(0)
fdb6a053 148#define XE_VM_FLAG_LR_MODE BIT(1)
d3d76739
MB
149#define XE_VM_FLAG_MIGRATION BIT(2)
150#define XE_VM_FLAG_SCRATCH_PAGE BIT(3)
151#define XE_VM_FLAG_FAULT_MODE BIT(4)
152#define XE_VM_FLAG_BANNED BIT(5)
153#define XE_VM_FLAG_TILE_ID(flags) FIELD_GET(GENMASK(7, 6), flags)
154#define XE_VM_FLAG_SET_TILE_ID(tile) FIELD_PREP(GENMASK(7, 6), (tile)->id)
dd08ebf6
MB
155 unsigned long flags;
156
157 /** @composite_fence_ctx: context composite fence */
158 u64 composite_fence_ctx;
159 /** @composite_fence_seqno: seqno for composite fence */
160 u32 composite_fence_seqno;
161
162 /**
163 * @lock: outer most lock, protects objects of anything attached to this
164 * VM
165 */
166 struct rw_semaphore lock;
0cd99046
ML
167 /**
168 * @snap_mutex: Mutex used to guard insertions and removals from gpuva,
169 * so we can take a snapshot safely from devcoredump.
170 */
171 struct mutex snap_mutex;
dd08ebf6
MB
172
173 /**
24f947d5
TH
174 * @rebind_list: list of VMAs that need rebinding. Protected by the
175 * vm->lock in write mode, OR (the vm->lock in read mode and the
176 * vm resv).
dd08ebf6
MB
177 */
178 struct list_head rebind_list;
179
dd08ebf6
MB
180 /**
181 * @destroy_work: worker to destroy VM, needed as a dma_fence signaling
182 * from an irq context can be last put and the destroy needs to be able
183 * to sleep.
184 */
185 struct work_struct destroy_work;
186
fd84041d
MB
187 /**
188 * @rftree: range fence tree to track updates to page table structure.
189 * Used to implement conflict tracking between independent bind engines.
190 */
191 struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE];
192
0e5e77bd
LDM
193 const struct xe_pt_ops *pt_ops;
194
dd08ebf6
MB
195 /** @userptr: user pointer state */
196 struct {
197 /**
198 * @userptr.repin_list: list of VMAs which are user pointers,
199 * and needs repinning. Protected by @lock.
200 */
201 struct list_head repin_list;
202 /**
203 * @notifier_lock: protects notifier in write mode and
204 * submission in read mode.
205 */
206 struct rw_semaphore notifier_lock;
207 /**
208 * @userptr.invalidated_lock: Protects the
209 * @userptr.invalidated list.
210 */
211 spinlock_t invalidated_lock;
212 /**
213 * @userptr.invalidated: List of invalidated userptrs, not yet
214 * picked
215 * up for revalidation. Protected from access with the
216 * @invalidated_lock. Removing items from the list
217 * additionally requires @lock in write mode, and adding
218 * items to the list requires the @userptr.notifer_lock in
219 * write mode.
220 */
221 struct list_head invalidated;
222 } userptr;
223
224 /** @preempt: preempt state */
225 struct {
226 /**
227 * @min_run_period_ms: The minimum run period before preempting
228 * an engine again
229 */
230 s64 min_run_period_ms;
9b9529ce
FD
231 /** @exec_queues: list of exec queues attached to this VM */
232 struct list_head exec_queues;
233 /** @num_exec_queues: number exec queues attached to this VM */
234 int num_exec_queues;
8e41443e
TH
235 /**
236 * @rebind_deactivated: Whether rebind has been temporarily deactivated
237 * due to no work available. Protected by the vm resv.
238 */
239 bool rebind_deactivated;
dd08ebf6
MB
240 /**
241 * @rebind_work: worker to rebind invalidated userptrs / evicted
242 * BOs
243 */
244 struct work_struct rebind_work;
245 } preempt;
246
247 /** @um: unified memory state */
248 struct {
249 /** @asid: address space ID, unique to each VM */
250 u32 asid;
251 /**
252 * @last_fault_vma: Last fault VMA, used for fast lookup when we
253 * get a flood of faults to the same VMA
254 */
255 struct xe_vma *last_fault_vma;
256 } usm;
257
dd08ebf6
MB
258 /** @error_capture: allow to track errors */
259 struct {
260 /** @capture_once: capture only one error per VM */
261 bool capture_once;
262 } error_capture;
85dbfe47 263
3c88b8f4
TH
264 /**
265 * @tlb_flush_seqno: Required TLB flush seqno for the next exec.
266 * protected by the vm resv.
267 */
268 u64 tlb_flush_seqno;
85dbfe47
TH
269 /** @batch_invalidate_tlb: Always invalidate TLB before batch start */
270 bool batch_invalidate_tlb;
9e4e9761
TU
271 /** @xef: XE file handle for tracking this VM's drm client */
272 struct xe_file *xef;
dd08ebf6
MB
273};
274
b06d47be
MB
275/** struct xe_vma_op_map - VMA map operation */
276struct xe_vma_op_map {
277 /** @vma: VMA to map */
278 struct xe_vma *vma;
b06d47be
MB
279 /** @is_null: is NULL binding */
280 bool is_null;
ffb7249d
ML
281 /** @dumpable: whether BO is dumped on GPU hang */
282 bool dumpable;
e1fbc4f1
MA
283 /** @pat_index: The pat index to use for this operation. */
284 u16 pat_index;
b06d47be
MB
285};
286
b06d47be
MB
287/** struct xe_vma_op_remap - VMA remap operation */
288struct xe_vma_op_remap {
289 /** @prev: VMA preceding part of a split mapping */
290 struct xe_vma *prev;
291 /** @next: VMA subsequent part of a split mapping */
292 struct xe_vma *next;
293 /** @start: start of the VMA unmap */
294 u64 start;
295 /** @range: range of the VMA unmap */
296 u64 range;
8f33b4f0
MB
297 /** @skip_prev: skip prev rebind */
298 bool skip_prev;
299 /** @skip_next: skip next rebind */
300 bool skip_next;
b06d47be
MB
301 /** @unmap_done: unmap operation in done */
302 bool unmap_done;
303};
304
305/** struct xe_vma_op_prefetch - VMA prefetch operation */
306struct xe_vma_op_prefetch {
307 /** @region: memory region to prefetch to */
308 u32 region;
309};
310
311/** enum xe_vma_op_flags - flags for VMA operation */
312enum xe_vma_op_flags {
313 /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */
5ef091fc 314 XE_VMA_OP_FIRST = BIT(0),
b06d47be 315 /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */
5ef091fc 316 XE_VMA_OP_LAST = BIT(1),
b06d47be 317 /** @XE_VMA_OP_COMMITTED: VMA operation committed */
5ef091fc
MB
318 XE_VMA_OP_COMMITTED = BIT(2),
319 /** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */
320 XE_VMA_OP_PREV_COMMITTED = BIT(3),
321 /** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */
322 XE_VMA_OP_NEXT_COMMITTED = BIT(4),
b06d47be
MB
323};
324
325/** struct xe_vma_op - VMA operation */
326struct xe_vma_op {
327 /** @base: GPUVA base operation */
328 struct drm_gpuva_op base;
329 /**
330 * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this
331 * operations is processed
332 */
333 struct drm_gpuva_ops *ops;
9b9529ce
FD
334 /** @q: exec queue for this operation */
335 struct xe_exec_queue *q;
b06d47be
MB
336 /**
337 * @syncs: syncs for this operation, only used on first and last
338 * operation
339 */
340 struct xe_sync_entry *syncs;
341 /** @num_syncs: number of syncs */
342 u32 num_syncs;
343 /** @link: async operation link */
344 struct list_head link;
b06d47be
MB
345 /** @flags: operation flags */
346 enum xe_vma_op_flags flags;
347
b06d47be
MB
348 union {
349 /** @map: VMA map operation specific data */
350 struct xe_vma_op_map map;
b06d47be
MB
351 /** @remap: VMA remap operation specific data */
352 struct xe_vma_op_remap remap;
353 /** @prefetch: VMA prefetch operation specific data */
354 struct xe_vma_op_prefetch prefetch;
355 };
356};
dd08ebf6 357#endif