Commit | Line | Data |
---|---|---|
dd08ebf6 MB |
1 | /* SPDX-License-Identifier: MIT */ |
2 | /* | |
3 | * Copyright © 2022 Intel Corporation | |
4 | */ | |
5 | ||
6 | #ifndef _XE_VM_TYPES_H_ | |
7 | #define _XE_VM_TYPES_H_ | |
8 | ||
9 | #include <linux/dma-resv.h> | |
10 | #include <linux/kref.h> | |
11 | #include <linux/mmu_notifier.h> | |
12 | #include <linux/scatterlist.h> | |
13 | ||
14 | #include "xe_device_types.h" | |
15 | #include "xe_pt_types.h" | |
16 | ||
17 | struct xe_bo; | |
18 | struct xe_vm; | |
19 | ||
20 | struct xe_vma { | |
21 | struct rb_node vm_node; | |
22 | /** @vm: VM which this VMA belongs to */ | |
23 | struct xe_vm *vm; | |
24 | ||
25 | /** | |
26 | * @start: start address of this VMA within its address domain, end - | |
27 | * start + 1 == VMA size | |
28 | */ | |
29 | u64 start; | |
30 | /** @end: end address of this VMA within its address domain */ | |
31 | u64 end; | |
32 | /** @pte_flags: pte flags for this VMA */ | |
33 | u32 pte_flags; | |
34 | ||
35 | /** @bo: BO if not a userptr, must be NULL is userptr */ | |
36 | struct xe_bo *bo; | |
37 | /** @bo_offset: offset into BO if not a userptr, unused for userptr */ | |
38 | u64 bo_offset; | |
39 | ||
40 | /** @gt_mask: GT mask of where to create binding for this VMA */ | |
41 | u64 gt_mask; | |
42 | ||
43 | /** | |
44 | * @gt_present: GT mask of binding are present for this VMA. | |
45 | * protected by vm->lock, vm->resv and for userptrs, | |
46 | * vm->userptr.notifier_lock for writing. Needs either for reading, | |
47 | * but if reading is done under the vm->lock only, it needs to be held | |
48 | * in write mode. | |
49 | */ | |
50 | u64 gt_present; | |
51 | ||
52 | /** | |
53 | * @destroyed: VMA is destroyed, in the sense that it shouldn't be | |
54 | * subject to rebind anymore. This field must be written under | |
55 | * the vm lock in write mode and the userptr.notifier_lock in | |
56 | * either mode. Read under the vm lock or the userptr.notifier_lock in | |
57 | * write mode. | |
58 | */ | |
59 | bool destroyed; | |
60 | ||
61 | /** | |
62 | * @first_munmap_rebind: VMA is first in a sequence of ops that triggers | |
63 | * a rebind (munmap style VM unbinds). This indicates the operation | |
64 | * using this VMA must wait on all dma-resv slots (wait for pending jobs | |
65 | * / trigger preempt fences). | |
66 | */ | |
67 | bool first_munmap_rebind; | |
68 | ||
69 | /** | |
70 | * @last_munmap_rebind: VMA is first in a sequence of ops that triggers | |
71 | * a rebind (munmap style VM unbinds). This indicates the operation | |
72 | * using this VMA must install itself into kernel dma-resv slot (blocks | |
73 | * future jobs) and kick the rebind work in compute mode. | |
74 | */ | |
75 | bool last_munmap_rebind; | |
76 | ||
77 | /** @use_atomic_access_pte_bit: Set atomic access bit in PTE */ | |
78 | bool use_atomic_access_pte_bit; | |
79 | ||
80 | union { | |
81 | /** @bo_link: link into BO if not a userptr */ | |
82 | struct list_head bo_link; | |
83 | /** @userptr_link: link into VM repin list if userptr */ | |
84 | struct list_head userptr_link; | |
85 | }; | |
86 | ||
87 | /** | |
88 | * @rebind_link: link into VM if this VMA needs rebinding, and | |
89 | * if it's a bo (not userptr) needs validation after a possible | |
90 | * eviction. Protected by the vm's resv lock. | |
91 | */ | |
92 | struct list_head rebind_link; | |
93 | ||
94 | /** | |
95 | * @unbind_link: link or list head if an unbind of multiple VMAs, in | |
96 | * single unbind op, is being done. | |
97 | */ | |
98 | struct list_head unbind_link; | |
99 | ||
100 | /** @destroy_cb: callback to destroy VMA when unbind job is done */ | |
101 | struct dma_fence_cb destroy_cb; | |
102 | ||
103 | /** @destroy_work: worker to destroy this BO */ | |
104 | struct work_struct destroy_work; | |
105 | ||
106 | /** @userptr: user pointer state */ | |
107 | struct { | |
108 | /** @ptr: user pointer */ | |
109 | uintptr_t ptr; | |
110 | /** @invalidate_link: Link for the vm::userptr.invalidated list */ | |
111 | struct list_head invalidate_link; | |
112 | /** | |
113 | * @notifier: MMU notifier for user pointer (invalidation call back) | |
114 | */ | |
115 | struct mmu_interval_notifier notifier; | |
116 | /** @sgt: storage for a scatter gather table */ | |
117 | struct sg_table sgt; | |
118 | /** @sg: allocated scatter gather table */ | |
119 | struct sg_table *sg; | |
120 | /** @notifier_seq: notifier sequence number */ | |
121 | unsigned long notifier_seq; | |
122 | /** | |
123 | * @initial_bind: user pointer has been bound at least once. | |
124 | * write: vm->userptr.notifier_lock in read mode and vm->resv held. | |
125 | * read: vm->userptr.notifier_lock in write mode or vm->resv held. | |
126 | */ | |
127 | bool initial_bind; | |
128 | #if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) | |
129 | u32 divisor; | |
130 | #endif | |
131 | } userptr; | |
132 | ||
133 | /** @usm: unified shared memory state */ | |
134 | struct { | |
135 | /** @gt_invalidated: VMA has been invalidated */ | |
136 | u64 gt_invalidated; | |
137 | } usm; | |
138 | ||
139 | struct { | |
140 | struct list_head rebind_link; | |
141 | } notifier; | |
142 | ||
143 | struct { | |
144 | /** | |
145 | * @extobj.link: Link into vm's external object list. | |
146 | * protected by the vm lock. | |
147 | */ | |
148 | struct list_head link; | |
149 | } extobj; | |
150 | }; | |
151 | ||
152 | struct xe_device; | |
153 | ||
154 | #define xe_vm_assert_held(vm) dma_resv_assert_held(&(vm)->resv) | |
155 | ||
156 | struct xe_vm { | |
157 | struct xe_device *xe; | |
158 | ||
159 | struct kref refcount; | |
160 | ||
161 | /* engine used for (un)binding vma's */ | |
162 | struct xe_engine *eng[XE_MAX_GT]; | |
163 | ||
164 | /** Protects @rebind_list and the page-table structures */ | |
165 | struct dma_resv resv; | |
166 | ||
167 | u64 size; | |
168 | struct rb_root vmas; | |
169 | ||
170 | struct xe_pt *pt_root[XE_MAX_GT]; | |
171 | struct xe_bo *scratch_bo[XE_MAX_GT]; | |
172 | struct xe_pt *scratch_pt[XE_MAX_GT][XE_VM_MAX_LEVEL]; | |
173 | ||
174 | /** @flags: flags for this VM, statically setup a creation time */ | |
175 | #define XE_VM_FLAGS_64K BIT(0) | |
176 | #define XE_VM_FLAG_COMPUTE_MODE BIT(1) | |
177 | #define XE_VM_FLAG_ASYNC_BIND_OPS BIT(2) | |
178 | #define XE_VM_FLAG_MIGRATION BIT(3) | |
179 | #define XE_VM_FLAG_SCRATCH_PAGE BIT(4) | |
180 | #define XE_VM_FLAG_FAULT_MODE BIT(5) | |
181 | #define XE_VM_FLAG_GT_ID(flags) (((flags) >> 6) & 0x3) | |
182 | #define XE_VM_FLAG_SET_GT_ID(gt) ((gt)->info.id << 6) | |
183 | unsigned long flags; | |
184 | ||
185 | /** @composite_fence_ctx: context composite fence */ | |
186 | u64 composite_fence_ctx; | |
187 | /** @composite_fence_seqno: seqno for composite fence */ | |
188 | u32 composite_fence_seqno; | |
189 | ||
190 | /** | |
191 | * @lock: outer most lock, protects objects of anything attached to this | |
192 | * VM | |
193 | */ | |
194 | struct rw_semaphore lock; | |
195 | ||
196 | /** | |
197 | * @rebind_list: list of VMAs that need rebinding, and if they are | |
198 | * bos (not userptr), need validation after a possible eviction. The | |
199 | * list is protected by @resv. | |
200 | */ | |
201 | struct list_head rebind_list; | |
202 | ||
203 | /** @rebind_fence: rebind fence from execbuf */ | |
204 | struct dma_fence *rebind_fence; | |
205 | ||
206 | /** | |
207 | * @destroy_work: worker to destroy VM, needed as a dma_fence signaling | |
208 | * from an irq context can be last put and the destroy needs to be able | |
209 | * to sleep. | |
210 | */ | |
211 | struct work_struct destroy_work; | |
212 | ||
213 | /** @extobj: bookkeeping for external objects. Protected by the vm lock */ | |
214 | struct { | |
215 | /** @enties: number of external BOs attached this VM */ | |
216 | u32 entries; | |
217 | /** @list: list of vmas with external bos attached */ | |
218 | struct list_head list; | |
219 | } extobj; | |
220 | ||
221 | /** @async_ops: async VM operations (bind / unbinds) */ | |
222 | struct { | |
223 | /** @list: list of pending async VM ops */ | |
224 | struct list_head pending; | |
225 | /** @work: worker to execute async VM ops */ | |
226 | struct work_struct work; | |
227 | /** @lock: protects list of pending async VM ops and fences */ | |
228 | spinlock_t lock; | |
229 | /** @error_capture: error capture state */ | |
230 | struct { | |
231 | /** @mm: user MM */ | |
232 | struct mm_struct *mm; | |
233 | /** | |
234 | * @addr: user pointer to copy error capture state too | |
235 | */ | |
236 | u64 addr; | |
237 | /** @wq: user fence wait queue for VM errors */ | |
238 | wait_queue_head_t wq; | |
239 | } error_capture; | |
240 | /** @fence: fence state */ | |
241 | struct { | |
242 | /** @context: context of async fence */ | |
243 | u64 context; | |
244 | /** @seqno: seqno of async fence */ | |
245 | u32 seqno; | |
246 | } fence; | |
247 | /** @error: error state for async VM ops */ | |
248 | int error; | |
249 | /** | |
250 | * @munmap_rebind_inflight: an munmap style VM bind is in the | |
251 | * middle of a set of ops which requires a rebind at the end. | |
252 | */ | |
253 | bool munmap_rebind_inflight; | |
254 | } async_ops; | |
255 | ||
256 | /** @userptr: user pointer state */ | |
257 | struct { | |
258 | /** | |
259 | * @userptr.repin_list: list of VMAs which are user pointers, | |
260 | * and needs repinning. Protected by @lock. | |
261 | */ | |
262 | struct list_head repin_list; | |
263 | /** | |
264 | * @notifier_lock: protects notifier in write mode and | |
265 | * submission in read mode. | |
266 | */ | |
267 | struct rw_semaphore notifier_lock; | |
268 | /** | |
269 | * @userptr.invalidated_lock: Protects the | |
270 | * @userptr.invalidated list. | |
271 | */ | |
272 | spinlock_t invalidated_lock; | |
273 | /** | |
274 | * @userptr.invalidated: List of invalidated userptrs, not yet | |
275 | * picked | |
276 | * up for revalidation. Protected from access with the | |
277 | * @invalidated_lock. Removing items from the list | |
278 | * additionally requires @lock in write mode, and adding | |
279 | * items to the list requires the @userptr.notifer_lock in | |
280 | * write mode. | |
281 | */ | |
282 | struct list_head invalidated; | |
283 | } userptr; | |
284 | ||
285 | /** @preempt: preempt state */ | |
286 | struct { | |
287 | /** | |
288 | * @min_run_period_ms: The minimum run period before preempting | |
289 | * an engine again | |
290 | */ | |
291 | s64 min_run_period_ms; | |
292 | /** @engines: list of engines attached to this VM */ | |
293 | struct list_head engines; | |
294 | /** @num_engines: number user engines attached to this VM */ | |
295 | int num_engines; | |
8e41443e TH |
296 | /** |
297 | * @rebind_deactivated: Whether rebind has been temporarily deactivated | |
298 | * due to no work available. Protected by the vm resv. | |
299 | */ | |
300 | bool rebind_deactivated; | |
dd08ebf6 MB |
301 | /** |
302 | * @rebind_work: worker to rebind invalidated userptrs / evicted | |
303 | * BOs | |
304 | */ | |
305 | struct work_struct rebind_work; | |
306 | } preempt; | |
307 | ||
308 | /** @um: unified memory state */ | |
309 | struct { | |
310 | /** @asid: address space ID, unique to each VM */ | |
311 | u32 asid; | |
312 | /** | |
313 | * @last_fault_vma: Last fault VMA, used for fast lookup when we | |
314 | * get a flood of faults to the same VMA | |
315 | */ | |
316 | struct xe_vma *last_fault_vma; | |
317 | } usm; | |
318 | ||
319 | /** | |
320 | * @notifier: Lists and locks for temporary usage within notifiers where | |
321 | * we either can't grab the vm lock or the vm resv. | |
322 | */ | |
323 | struct { | |
324 | /** @notifier.list_lock: lock protecting @rebind_list */ | |
325 | spinlock_t list_lock; | |
326 | /** | |
327 | * @notifier.rebind_list: list of vmas that we want to put on the | |
328 | * main @rebind_list. This list is protected for writing by both | |
329 | * notifier.list_lock, and the resv of the bo the vma points to, | |
330 | * and for reading by the notifier.list_lock only. | |
331 | */ | |
332 | struct list_head rebind_list; | |
333 | } notifier; | |
334 | ||
335 | /** @error_capture: allow to track errors */ | |
336 | struct { | |
337 | /** @capture_once: capture only one error per VM */ | |
338 | bool capture_once; | |
339 | } error_capture; | |
340 | }; | |
341 | ||
342 | #endif |