Commit | Line | Data |
---|---|---|
dd08ebf6 MB |
1 | /* SPDX-License-Identifier: MIT */ |
2 | /* | |
3 | * Copyright © 2022 Intel Corporation | |
4 | */ | |
5 | ||
6 | #ifndef _XE_VM_TYPES_H_ | |
7 | #define _XE_VM_TYPES_H_ | |
8 | ||
b06d47be MB |
9 | #include <drm/drm_gpuvm.h> |
10 | ||
dd08ebf6 MB |
11 | #include <linux/dma-resv.h> |
12 | #include <linux/kref.h> | |
13 | #include <linux/mmu_notifier.h> | |
14 | #include <linux/scatterlist.h> | |
15 | ||
16 | #include "xe_device_types.h" | |
17 | #include "xe_pt_types.h" | |
fd84041d | 18 | #include "xe_range_fence.h" |
dd08ebf6 MB |
19 | |
20 | struct xe_bo; | |
b06d47be | 21 | struct xe_sync_entry; |
dd08ebf6 MB |
22 | struct xe_vm; |
23 | ||
b06d47be MB |
24 | #define TEST_VM_ASYNC_OPS_ERROR |
25 | #define FORCE_ASYNC_OP_ERROR BIT(31) | |
dd08ebf6 | 26 | |
b06d47be MB |
27 | #define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS |
28 | #define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1) | |
29 | #define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2) | |
30 | #define XE_VMA_FIRST_REBIND (DRM_GPUVA_USERBITS << 3) | |
31 | #define XE_VMA_LAST_REBIND (DRM_GPUVA_USERBITS << 4) | |
8f33b4f0 MB |
32 | #define XE_VMA_PTE_4K (DRM_GPUVA_USERBITS << 5) |
33 | #define XE_VMA_PTE_2M (DRM_GPUVA_USERBITS << 6) | |
34 | #define XE_VMA_PTE_1G (DRM_GPUVA_USERBITS << 7) | |
b06d47be | 35 | |
a4cc60a5 MB |
36 | /** struct xe_userptr - User pointer */ |
37 | struct xe_userptr { | |
38 | /** @invalidate_link: Link for the vm::userptr.invalidated list */ | |
39 | struct list_head invalidate_link; | |
40 | /** | |
41 | * @notifier: MMU notifier for user pointer (invalidation call back) | |
42 | */ | |
43 | struct mmu_interval_notifier notifier; | |
44 | /** @sgt: storage for a scatter gather table */ | |
45 | struct sg_table sgt; | |
46 | /** @sg: allocated scatter gather table */ | |
47 | struct sg_table *sg; | |
48 | /** @notifier_seq: notifier sequence number */ | |
49 | unsigned long notifier_seq; | |
50 | /** | |
51 | * @initial_bind: user pointer has been bound at least once. | |
52 | * write: vm->userptr.notifier_lock in read mode and vm->resv held. | |
53 | * read: vm->userptr.notifier_lock in write mode or vm->resv held. | |
54 | */ | |
55 | bool initial_bind; | |
56 | #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) | |
57 | u32 divisor; | |
58 | #endif | |
59 | }; | |
60 | ||
b06d47be MB |
61 | struct xe_vma { |
62 | /** @gpuva: Base GPUVA object */ | |
63 | struct drm_gpuva gpuva; | |
dd08ebf6 | 64 | |
24f947d5 TH |
65 | /** |
66 | * @combined_links: links into lists which are mutually exclusive. | |
67 | * Locking: vm lock in write mode OR vm lock in read mode and the vm's | |
68 | * resv. | |
69 | */ | |
1655c893 | 70 | union { |
24f947d5 | 71 | /** @userptr: link into VM repin list if userptr. */ |
1655c893 | 72 | struct list_head userptr; |
24f947d5 | 73 | /** @rebind: link into VM if this VMA needs rebinding. */ |
1655c893 | 74 | struct list_head rebind; |
24f947d5 | 75 | /** @destroy: link to contested list when VM is being closed. */ |
1655c893 MB |
76 | struct list_head destroy; |
77 | } combined_links; | |
dd08ebf6 | 78 | |
eae553cb MB |
79 | union { |
80 | /** @destroy_cb: callback to destroy VMA when unbind job is done */ | |
81 | struct dma_fence_cb destroy_cb; | |
82 | /** @destroy_work: worker to destroy this BO */ | |
83 | struct work_struct destroy_work; | |
84 | }; | |
dd08ebf6 | 85 | |
dd08ebf6 MB |
86 | /** @usm: unified shared memory state */ |
87 | struct { | |
876611c2 | 88 | /** @tile_invalidated: VMA has been invalidated */ |
63412a5a | 89 | u8 tile_invalidated; |
dd08ebf6 MB |
90 | } usm; |
91 | ||
63412a5a MB |
92 | /** @tile_mask: Tile mask of where to create binding for this VMA */ |
93 | u8 tile_mask; | |
94 | ||
95 | /** | |
96 | * @tile_present: GT mask of binding are present for this VMA. | |
97 | * protected by vm->lock, vm->resv and for userptrs, | |
98 | * vm->userptr.notifier_lock for writing. Needs either for reading, | |
99 | * but if reading is done under the vm->lock only, it needs to be held | |
100 | * in write mode. | |
101 | */ | |
102 | u8 tile_present; | |
103 | ||
e1fbc4f1 MA |
104 | /** |
105 | * @pat_index: The pat index to use when encoding the PTEs for this vma. | |
106 | */ | |
107 | u16 pat_index; | |
108 | ||
a4cc60a5 MB |
109 | /** |
110 | * @userptr: user pointer state, only allocated for VMAs that are | |
111 | * user pointers | |
112 | */ | |
113 | struct xe_userptr userptr; | |
dd08ebf6 MB |
114 | }; |
115 | ||
116 | struct xe_device; | |
117 | ||
dd08ebf6 | 118 | struct xe_vm { |
b06d47be MB |
119 | /** @gpuvm: base GPUVM used to track VMAs */ |
120 | struct drm_gpuvm gpuvm; | |
dd08ebf6 | 121 | |
b06d47be | 122 | struct xe_device *xe; |
dd08ebf6 | 123 | |
9b9529ce FD |
124 | /* exec queue used for (un)binding vma's */ |
125 | struct xe_exec_queue *q[XE_MAX_TILES_PER_DEVICE]; | |
dd08ebf6 | 126 | |
7ba4c5f0 MB |
127 | /** @lru_bulk_move: Bulk LRU move list for this VM's BOs */ |
128 | struct ttm_lru_bulk_move lru_bulk_move; | |
129 | ||
dd08ebf6 | 130 | u64 size; |
dd08ebf6 | 131 | |
a5edc7cd | 132 | struct xe_pt *pt_root[XE_MAX_TILES_PER_DEVICE]; |
a5edc7cd | 133 | struct xe_pt *scratch_pt[XE_MAX_TILES_PER_DEVICE][XE_VM_MAX_LEVEL]; |
dd08ebf6 | 134 | |
9d858b69 MB |
135 | /** |
136 | * @flags: flags for this VM, statically setup a creation time aside | |
137 | * from XE_VM_FLAG_BANNED which requires vm->lock to set / read safely | |
138 | */ | |
0d39b6da | 139 | #define XE_VM_FLAG_64K BIT(0) |
fdb6a053 | 140 | #define XE_VM_FLAG_LR_MODE BIT(1) |
f3e9b1f4 | 141 | #define XE_VM_FLAG_ASYNC_DEFAULT BIT(2) |
dd08ebf6 MB |
142 | #define XE_VM_FLAG_MIGRATION BIT(3) |
143 | #define XE_VM_FLAG_SCRATCH_PAGE BIT(4) | |
144 | #define XE_VM_FLAG_FAULT_MODE BIT(5) | |
9d858b69 | 145 | #define XE_VM_FLAG_BANNED BIT(6) |
4d18eac0 LDM |
146 | #define XE_VM_FLAG_TILE_ID(flags) FIELD_GET(GENMASK(8, 7), flags) |
147 | #define XE_VM_FLAG_SET_TILE_ID(tile) FIELD_PREP(GENMASK(8, 7), (tile)->id) | |
dd08ebf6 MB |
148 | unsigned long flags; |
149 | ||
150 | /** @composite_fence_ctx: context composite fence */ | |
151 | u64 composite_fence_ctx; | |
152 | /** @composite_fence_seqno: seqno for composite fence */ | |
153 | u32 composite_fence_seqno; | |
154 | ||
155 | /** | |
156 | * @lock: outer most lock, protects objects of anything attached to this | |
157 | * VM | |
158 | */ | |
159 | struct rw_semaphore lock; | |
160 | ||
161 | /** | |
24f947d5 TH |
162 | * @rebind_list: list of VMAs that need rebinding. Protected by the |
163 | * vm->lock in write mode, OR (the vm->lock in read mode and the | |
164 | * vm resv). | |
dd08ebf6 MB |
165 | */ |
166 | struct list_head rebind_list; | |
167 | ||
168 | /** @rebind_fence: rebind fence from execbuf */ | |
169 | struct dma_fence *rebind_fence; | |
170 | ||
171 | /** | |
172 | * @destroy_work: worker to destroy VM, needed as a dma_fence signaling | |
173 | * from an irq context can be last put and the destroy needs to be able | |
174 | * to sleep. | |
175 | */ | |
176 | struct work_struct destroy_work; | |
177 | ||
fd84041d MB |
178 | /** |
179 | * @rftree: range fence tree to track updates to page table structure. | |
180 | * Used to implement conflict tracking between independent bind engines. | |
181 | */ | |
182 | struct xe_range_fence_tree rftree[XE_MAX_TILES_PER_DEVICE]; | |
183 | ||
dd08ebf6 MB |
184 | /** @async_ops: async VM operations (bind / unbinds) */ |
185 | struct { | |
186 | /** @list: list of pending async VM ops */ | |
187 | struct list_head pending; | |
188 | /** @work: worker to execute async VM ops */ | |
189 | struct work_struct work; | |
190 | /** @lock: protects list of pending async VM ops and fences */ | |
191 | spinlock_t lock; | |
dd08ebf6 MB |
192 | /** @fence: fence state */ |
193 | struct { | |
194 | /** @context: context of async fence */ | |
195 | u64 context; | |
196 | /** @seqno: seqno of async fence */ | |
197 | u32 seqno; | |
198 | } fence; | |
199 | /** @error: error state for async VM ops */ | |
200 | int error; | |
201 | /** | |
202 | * @munmap_rebind_inflight: an munmap style VM bind is in the | |
203 | * middle of a set of ops which requires a rebind at the end. | |
204 | */ | |
205 | bool munmap_rebind_inflight; | |
206 | } async_ops; | |
207 | ||
0e5e77bd LDM |
208 | const struct xe_pt_ops *pt_ops; |
209 | ||
dd08ebf6 MB |
210 | /** @userptr: user pointer state */ |
211 | struct { | |
212 | /** | |
213 | * @userptr.repin_list: list of VMAs which are user pointers, | |
214 | * and needs repinning. Protected by @lock. | |
215 | */ | |
216 | struct list_head repin_list; | |
217 | /** | |
218 | * @notifier_lock: protects notifier in write mode and | |
219 | * submission in read mode. | |
220 | */ | |
221 | struct rw_semaphore notifier_lock; | |
222 | /** | |
223 | * @userptr.invalidated_lock: Protects the | |
224 | * @userptr.invalidated list. | |
225 | */ | |
226 | spinlock_t invalidated_lock; | |
227 | /** | |
228 | * @userptr.invalidated: List of invalidated userptrs, not yet | |
229 | * picked | |
230 | * up for revalidation. Protected from access with the | |
231 | * @invalidated_lock. Removing items from the list | |
232 | * additionally requires @lock in write mode, and adding | |
233 | * items to the list requires the @userptr.notifer_lock in | |
234 | * write mode. | |
235 | */ | |
236 | struct list_head invalidated; | |
237 | } userptr; | |
238 | ||
239 | /** @preempt: preempt state */ | |
240 | struct { | |
241 | /** | |
242 | * @min_run_period_ms: The minimum run period before preempting | |
243 | * an engine again | |
244 | */ | |
245 | s64 min_run_period_ms; | |
9b9529ce FD |
246 | /** @exec_queues: list of exec queues attached to this VM */ |
247 | struct list_head exec_queues; | |
248 | /** @num_exec_queues: number exec queues attached to this VM */ | |
249 | int num_exec_queues; | |
8e41443e TH |
250 | /** |
251 | * @rebind_deactivated: Whether rebind has been temporarily deactivated | |
252 | * due to no work available. Protected by the vm resv. | |
253 | */ | |
254 | bool rebind_deactivated; | |
dd08ebf6 MB |
255 | /** |
256 | * @rebind_work: worker to rebind invalidated userptrs / evicted | |
257 | * BOs | |
258 | */ | |
259 | struct work_struct rebind_work; | |
260 | } preempt; | |
261 | ||
262 | /** @um: unified memory state */ | |
263 | struct { | |
264 | /** @asid: address space ID, unique to each VM */ | |
265 | u32 asid; | |
266 | /** | |
267 | * @last_fault_vma: Last fault VMA, used for fast lookup when we | |
268 | * get a flood of faults to the same VMA | |
269 | */ | |
270 | struct xe_vma *last_fault_vma; | |
271 | } usm; | |
272 | ||
dd08ebf6 MB |
273 | /** @error_capture: allow to track errors */ |
274 | struct { | |
275 | /** @capture_once: capture only one error per VM */ | |
276 | bool capture_once; | |
277 | } error_capture; | |
85dbfe47 TH |
278 | |
279 | /** @batch_invalidate_tlb: Always invalidate TLB before batch start */ | |
280 | bool batch_invalidate_tlb; | |
9e4e9761 TU |
281 | /** @xef: XE file handle for tracking this VM's drm client */ |
282 | struct xe_file *xef; | |
dd08ebf6 MB |
283 | }; |
284 | ||
b06d47be MB |
285 | /** struct xe_vma_op_map - VMA map operation */ |
286 | struct xe_vma_op_map { | |
287 | /** @vma: VMA to map */ | |
288 | struct xe_vma *vma; | |
289 | /** @immediate: Immediate bind */ | |
290 | bool immediate; | |
291 | /** @read_only: Read only */ | |
292 | bool read_only; | |
293 | /** @is_null: is NULL binding */ | |
294 | bool is_null; | |
e1fbc4f1 MA |
295 | /** @pat_index: The pat index to use for this operation. */ |
296 | u16 pat_index; | |
b06d47be MB |
297 | }; |
298 | ||
b06d47be MB |
299 | /** struct xe_vma_op_remap - VMA remap operation */ |
300 | struct xe_vma_op_remap { | |
301 | /** @prev: VMA preceding part of a split mapping */ | |
302 | struct xe_vma *prev; | |
303 | /** @next: VMA subsequent part of a split mapping */ | |
304 | struct xe_vma *next; | |
305 | /** @start: start of the VMA unmap */ | |
306 | u64 start; | |
307 | /** @range: range of the VMA unmap */ | |
308 | u64 range; | |
8f33b4f0 MB |
309 | /** @skip_prev: skip prev rebind */ |
310 | bool skip_prev; | |
311 | /** @skip_next: skip next rebind */ | |
312 | bool skip_next; | |
b06d47be MB |
313 | /** @unmap_done: unmap operation in done */ |
314 | bool unmap_done; | |
315 | }; | |
316 | ||
317 | /** struct xe_vma_op_prefetch - VMA prefetch operation */ | |
318 | struct xe_vma_op_prefetch { | |
319 | /** @region: memory region to prefetch to */ | |
320 | u32 region; | |
321 | }; | |
322 | ||
323 | /** enum xe_vma_op_flags - flags for VMA operation */ | |
324 | enum xe_vma_op_flags { | |
325 | /** @XE_VMA_OP_FIRST: first VMA operation for a set of syncs */ | |
5ef091fc | 326 | XE_VMA_OP_FIRST = BIT(0), |
b06d47be | 327 | /** @XE_VMA_OP_LAST: last VMA operation for a set of syncs */ |
5ef091fc | 328 | XE_VMA_OP_LAST = BIT(1), |
b06d47be | 329 | /** @XE_VMA_OP_COMMITTED: VMA operation committed */ |
5ef091fc MB |
330 | XE_VMA_OP_COMMITTED = BIT(2), |
331 | /** @XE_VMA_OP_PREV_COMMITTED: Previous VMA operation committed */ | |
332 | XE_VMA_OP_PREV_COMMITTED = BIT(3), | |
333 | /** @XE_VMA_OP_NEXT_COMMITTED: Next VMA operation committed */ | |
334 | XE_VMA_OP_NEXT_COMMITTED = BIT(4), | |
b06d47be MB |
335 | }; |
336 | ||
337 | /** struct xe_vma_op - VMA operation */ | |
338 | struct xe_vma_op { | |
339 | /** @base: GPUVA base operation */ | |
340 | struct drm_gpuva_op base; | |
341 | /** | |
342 | * @ops: GPUVA ops, when set call drm_gpuva_ops_free after this | |
343 | * operations is processed | |
344 | */ | |
345 | struct drm_gpuva_ops *ops; | |
9b9529ce FD |
346 | /** @q: exec queue for this operation */ |
347 | struct xe_exec_queue *q; | |
b06d47be MB |
348 | /** |
349 | * @syncs: syncs for this operation, only used on first and last | |
350 | * operation | |
351 | */ | |
352 | struct xe_sync_entry *syncs; | |
353 | /** @num_syncs: number of syncs */ | |
354 | u32 num_syncs; | |
355 | /** @link: async operation link */ | |
356 | struct list_head link; | |
b06d47be MB |
357 | /** @flags: operation flags */ |
358 | enum xe_vma_op_flags flags; | |
359 | ||
360 | #ifdef TEST_VM_ASYNC_OPS_ERROR | |
361 | /** @inject_error: inject error to test async op error handling */ | |
362 | bool inject_error; | |
363 | #endif | |
364 | ||
365 | union { | |
366 | /** @map: VMA map operation specific data */ | |
367 | struct xe_vma_op_map map; | |
b06d47be MB |
368 | /** @remap: VMA remap operation specific data */ |
369 | struct xe_vma_op_remap remap; | |
370 | /** @prefetch: VMA prefetch operation specific data */ | |
371 | struct xe_vma_op_prefetch prefetch; | |
372 | }; | |
373 | }; | |
dd08ebf6 | 374 | #endif |